id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
7,400 | error format | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"vm convert",
)
class Convert(AAZCommand):
"""Convert a VM with unmanaged disks to use managed disks.
:example: Convert a VM with unmanaged disks to use managed disks.
az vm convert -g MyResourceGroup -n MyVm
:example: Convert all VMs with unmanaged disks in a resource group to use managed disks.
az vm convert --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
_aaz_info = {
"version": "2022-11-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/virtualmachines/{}/converttomanageddisks", "2022-11-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.vm_name = AAZStrArg(
options=["-n", "--name", "--vm-name"],
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
required=True,
id_part="name",
configured_default="vm",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualMachinesConvertToManagedDisks(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class VirtualMachinesConvertToManagedDisks(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def METHOD_NAME(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"vmName", self.ctx.args.vm_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-11-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
class _ConvertHelper:
"""Helper class for Convert"""
__all__ = ["Convert"] |
7,401 | set hit mode | ##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import types
import IECore
import Gaffer
import GafferUI
from Qt import QtCore
from Qt import QtGui
from Qt import QtWidgets
class BoolWidget( GafferUI.Widget ) :
DisplayMode = IECore.Enum.create( "CheckBox", "Switch", "Tool" )
# True/False states are deliberately omitted from this enum;
# For backwards compatibility we use `bool` values instead.
State = IECore.Enum.create( "Indeterminate" )
def __init__( self, text="", checked=False, displayMode=DisplayMode.CheckBox, image = None, **kw ) :
GafferUI.Widget.__init__( self, _CheckBox( text ), **kw )
self.__defaultFocusPolicy = self._qtWidget().focusPolicy()
self.setState( checked )
self.setDisplayMode( displayMode )
self.setImage( image )
self.__stateChangedSignal = GafferUI.WidgetSignal()
self._qtWidget().stateChanged.connect( Gaffer.WeakMethod( self.__stateChanged ) )
def setText( self, text ) :
self._qtWidget().setText( text )
def getText( self ) :
return str( self._qtWidget().text() )
def setImage( self, image ) :
if isinstance( image, str ) :
# Avoid our image getting parented to the wrong thing
# if our caller is in a `with container` block.
GafferUI.Widget._pushParent( None )
self.__image = GafferUI.Image( image )
GafferUI.Widget._popParent()
else :
assert( isinstance( image, ( GafferUI.Image, type( None ) ) ) )
self.__image = image
if self.__image is None :
self._qtWidget().setIcon( QtGui.QIcon() )
else :
self._qtWidget().setIcon( QtGui.QIcon( self.__image._qtPixmap() ) )
self._qtWidget().setIconSize( self.__image._qtPixmap().size() )
def getImage( self ) :
return self.__image
## State may be passed as either a `bool` or `State.Indeterminate`.
def setState( self, state ) :
if state == self.State.Indeterminate :
self._qtWidget().setTristate( True )
self._qtWidget().setCheckState( QtCore.Qt.PartiallyChecked )
else :
self._qtWidget().setTristate( False )
self._qtWidget().setCheckState( QtCore.Qt.Checked if state else QtCore.Qt.Unchecked )
def getState( self ) :
s = self._qtWidget().checkState()
if s == QtCore.Qt.Checked :
return True
elif s == QtCore.Qt.Unchecked :
return False
else :
return self.State.Indeterminate
def setDisplayMode( self, displayMode ) :
self._qtWidget().setProperty( "gafferDisplayMode", str( displayMode ) )
self._qtWidget().METHOD_NAME(
_CheckBox.HitMode.Button if displayMode == self.DisplayMode.Tool else _CheckBox.HitMode.CheckBox
)
if displayMode == self.DisplayMode.Tool :
self._qtWidget().setFocusPolicy( QtCore.Qt.NoFocus )
else :
self._qtWidget().setFocusPolicy( self.__defaultFocusPolicy )
def getDisplayMode( self ) :
return getattr(
self.DisplayMode,
GafferUI._Variant.fromVariant(
self._qtWidget().property( "gafferDisplayMode" )
)
)
def setErrored( self, errored ) :
if errored == self.getErrored() :
return
self._qtWidget().setProperty( "gafferError", GafferUI._Variant.toVariant( bool( errored ) ) )
self._repolish()
def getErrored( self ) :
return GafferUI._Variant.fromVariant( self._qtWidget().property( "gafferError" ) ) or False
def stateChangedSignal( self ) :
return self.__stateChangedSignal
def __stateChanged( self, state ) :
self.__stateChangedSignal( self )
class _CheckBox( QtWidgets.QCheckBox ) :
HitMode = IECore.Enum.create( "Button", "CheckBox" )
def __init__( self, text, parent = None ) :
QtWidgets.QCheckBox.__init__( self, text, parent )
self.__hitMode = self.HitMode.CheckBox
self.setSizePolicy( QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed
) )
def METHOD_NAME( self, hitMode ) :
self.__hitMode = hitMode
def getHidMode( self ) :
return self.__hitMode
def hitButton( self, pos ) :
if self.__hitMode == self.HitMode.Button :
return QtWidgets.QAbstractButton.hitButton( self, pos )
else :
return QtWidgets.QCheckBox.hitButton( self, pos )
## \todo Backwards compatibility - remove for version 1.0
CheckBox = BoolWidget |
7,402 | test for in range | """ Unit test for while and for loops"""
import unittest
class SimpleLoopTests(unittest.TestCase):
def METHOD_NAME(self):
a = 0
b = 0
for i in range(5):
b = i
a +=1
self.assertEqual(a,5)
self.assertEqual(b, 4)
y = 0
for t in range(1,4):
y += t
self.assertEqual(y, 6)
#test using step argument
n = 0
for x in range(0,10,2):
n +=1
self.assertEqual(n,5)
x = [0]*10
for i in range(10):
x[i] += i
x[i] += i*2
self.assertEqual(x, [0, 3, 6, 9, 12, 15, 18, 21, 24, 27])
def foo(x):
for i in x:
break
self.assertRaises(TypeError, foo, 2)
def test_for_in_list(self):
z = 0
for x in [1,2,3]:
z += x
self.assertEqual(z,6)
def test_for_in_dict(self):
a = []
for k in {"OK":0}: a.append(k)
self.assertEqual(a, ["OK"])
def test_for_in_string(self):
a = []
for i in "skulpt": a.append(i)
self.assertEqual(a, ["s","k","u","l","p","t"])
def test_for_in_tuple(self):
z = []
a = (1,2,3)
b = ('a', 'b', 'c')
for x in a+b:
z.append(x)
self.assertEqual(z, [1,2,3,'a', 'b', 'c'])
def test_while(self):
x = 1
t = 0
while x <=5:
t = t+x
x = x+1
self.assertEqual(x,6)
self.assertEqual(t,15)
def test_break(self):
x = 1
while x < 3:
break
x = x + 1
self.assertEqual(x,1)
def f():
for i in 1,2,3,4,5:
if i == 3: break
yield i
self.assertEqual(list(f()), [1, 2])
def test_continue(self):
x = 1
n = 0
while x < 10:
x = x + 1
if n == 2:
continue
n = n + 1
self.assertEqual(n,2)
def f():
for i in 1,2,3,4,5:
if i % 2 == 0: continue
yield i
self.assertEqual(list(f()), [1, 3, 5])
def test_list_comprehension(self):
x = [v*v for v in range(0,5)]
self.assertEqual(x[3], 9)
t = [[y*10+x for x in range(0,10)] for y in range(0,10)]
self.assertEqual(t[2][3], 23)
a = [c for c in "asdf"]
self.assertEqual(a, ['a', 's', 'd', 'f'])
def test_yield(self):
def f(n):
i = 0
yield i
i += 1
j = i
yield i
yield j
j *= 100
i += j
yield j
yield i
yield n + i
a = []
for i in f(10): # i to conflict with body
j = 999
a.append(i)
self.assertEqual(a, [0, 1, 1, 100, 101, 111])
def f(n):
i = 0
while i < n:
yield i
yield i * 10
i += 1
a = []
for i in f(10):
a.append(i)
self.assertEqual(a, [0, 0, 1, 10, 2, 20, 3, 30, 4, 40, 5, 50, 6, 60, 7, 70, 8, 80, 9, 90])
def f(n):
i = 0
while i < n:
yield i
i = 100
yield i
i += 1
a = []
for i in f(50):
a.append(i)
self.assertEqual(a, [0, 100])
def f():
y = 0
while y == 0:
y += 1
yield y
a = []
for i in f():
a.append(i)
self.assertEqual(a, [1])
def yrange(n):
for i in range(n):
yield i
self.assertEqual([0, 1, 2, 3, 4],list(yrange(5)))
def yrange(n):
for i in range(n):
yield i
def zrange(n):
for y in yrange(n):
yield y
self.assertEqual(list(zrange(5)), [0, 1, 2, 3, 4])
def f(n):
yield 1
a, b = n, n + 1
yield 2
yield a
yield b
a = 9999
b = 9999
z = []
for i in f(20):
z.append(i)
self.assertEqual(z, [1,2,20,21])
def f():
for i in 1,2,3,4,5:
if i == 4: return
yield i
self.assertEqual([1, 2, 3], list(f()))
def foo(value = None):
for i in [-1,0,1,2,3,4]:
if i < 0:
continue
elif i == 0:
yield 0
elif i == 1:
yield 1
yield value
yield 2
else:
yield i
self.assertEqual(list(foo()), [0, 1, None, 2, 2, 3, 4])
def f():
if 1 == 2:
yield -1
elif 1 == 1:
yield 3
else:
yield -1
self.assertEqual(list(f()),[3])
class GeneratorClass:
test = "hi"
def __init__(self):
pass
def generator(self):
for i in range(10):
yield i
gen = GeneratorClass()
a = []
for g in gen.generator():
a.append(g)
self.assertEqual(a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_generator(self):
a = (1 for x in range(3))
self.assertEqual(str(a)[:17], "<generator object")
b = []
for i in a:
b.append(a)
self.assertNotEqual(b, [1,1,1])
z = []
for i in (1 for x in range(3)):
z.append(i)
self.assertEqual(z, [1,1,1])
c = []
for i in (i*2 for i in range(3)):
c.append(i)
self.assertEqual(c, [0,2,4])
if __name__ == '__main__':
unittest.main()
|
7,403 | srcdocs | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module to execute jupyter notebooks and gather the output into renderable
HTML files."""
# pytype: skip-file
import html
import os
import shutil
import subprocess
from html.parser import HTMLParser
from apache_beam.runners.interactive.utils import obfuscate
try:
import nbformat
from jupyter_client.kernelspec import KernelSpecManager
from nbconvert.preprocessors import ExecutePreprocessor
_interactive_integration_ready = True
except ImportError:
_interactive_integration_ready = False
class NotebookExecutor(object):
"""Executor that reads notebooks, executes it and gathers outputs into static
HTML pages that can be served."""
def __init__(self, path):
# type: (str) -> None
assert _interactive_integration_ready, (
'[interactive_test] dependency is not installed.')
assert os.path.exists(path), '{} does not exist.'.format(path)
self._paths = []
if os.path.isdir(path):
for root, _, files in os.walk(path):
for filename in files:
if filename.endswith('.ipynb'):
self._paths.append(os.path.join(root, filename))
elif path.endswith('.ipynb'):
self._paths.append(path)
assert len(
self._paths) > 0, ('No notebooks to be executed under{}'.format(path))
self._dir = os.path.dirname(self._paths[0])
self._output_html_dir = os.path.join(self._dir, 'output')
self.cleanup()
self._output_html_paths = {}
self._notebook_path_to_execution_id = {}
kernel_specs = KernelSpecManager().get_all_specs()
if 'test' not in kernel_specs:
# Install a test ipython kernel in current runtime environment. If this
# errors out, it means the test env is broken and should fail the test.
process = subprocess.run(
['python', '-m', 'ipykernel', 'install', '--user', '--name', 'test'],
check=True)
process.check_returncode()
def cleanup(self):
"""Cleans up the output folder."""
_cleanup(self._output_html_dir)
def execute(self):
"""Executes all notebooks found in the scoped path and gathers their
outputs into HTML pages stored in the output folder."""
for path in self._paths:
with open(path, 'r') as nb_f:
nb = nbformat.read(nb_f, as_version=4)
ep = ExecutePreprocessor(
timeout=-1, allow_errors=True, kernel_name='test')
ep.preprocess(nb, {'metadata': {'path': os.path.dirname(path)}})
execution_id = obfuscate(path)
output_html_path = os.path.join(
self._output_html_dir, execution_id + '.html')
with open(output_html_path, 'a+') as sink:
sink.write('<html>\n')
sink.write('<head>\n')
sink.write('</head>\n')
sink.write('<body>\n')
for cell in nb['cells']:
if cell['cell_type'] == 'code':
for output in cell['outputs']:
_extract_html(output, sink)
sink.write('</body>\n')
sink.write('</html>\n')
self._output_html_paths[execution_id] = output_html_path
self._notebook_path_to_execution_id[path] = execution_id
@property
def output_html_paths(self):
"""Mapping from execution ids to output html page paths.
An execution/test id is an obfuscated value from the executed notebook path.
It identifies the input notebook, the output html, the screenshot of the
output html, and the golden screenshot for comparison.
"""
return self._output_html_paths
@property
def output_html_dir(self):
"""The directory's path to all the output html pages generated."""
return self._output_html_dir
@property
def notebook_path_to_execution_id(self):
"""Mapping from input notebook paths to their obfuscated execution ids."""
return self._notebook_path_to_execution_id
def _cleanup(output_dir):
"""Cleans up the given output_dir."""
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
def _extract_html(output, sink):
"""Extracts html elements from the output of an executed notebook node and
writes them into a file sink."""
if output['output_type'] == 'display_data':
data = output['data']
if 'application/javascript' in data:
sink.write('<script>\n')
sink.write(data['application/javascript'])
sink.write('</script>\n')
if 'text/html' in data:
parser = IFrameParser()
parser.feed(data['text/html'])
if parser.METHOD_NAME:
sink.write(parser.METHOD_NAME)
else:
sink.write(data['text/html'])
class IFrameParser(HTMLParser):
"""A parser to extract iframe content from given HTML."""
def __init__(self):
self._srcdocs = []
super().__init__()
def handle_starttag(self, tag, attrs):
if tag == 'iframe':
for attr in attrs:
if 'srcdoc' in attr:
self._srcdocs.append(html.unescape(attr[1]))
@property
def METHOD_NAME(self):
return '\n'.join(self._srcdocs) |
7,404 | write | """Converts Md models back into the binary format used by the game"""
# Copyright 2020-2023 Capypara and the SkyTemple Contributors
#
# This file is part of SkyTemple.
#
# SkyTemple is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SkyTemple is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SkyTemple. If not, see <https://www.gnu.org/licenses/>.
from __future__ import annotations
from range_typed_integers import u32, u8, i8, u16, i16, i32
from skytemple_files.common.util import (
write_u32,
write_i8,
write_i32,
generate_bitfield,
write_u16,
write_i16,
write_u8,
)
from skytemple_files.data.md._model import MD_ENTRY_LEN, Md
class MdWriter:
def __init__(self):
self.data: bytearray = bytearray()
self.bytes_written = 0
def METHOD_NAME(self, model: Md) -> bytes:
# At max we will need 8 byte header + (number entries * 68):
self.data = bytearray(8 + len(model.entries) * MD_ENTRY_LEN)
self.bytes_written = 4
self.data[0:4] = b"MD\0\0"
self._write_u32(u32(len(model.entries)))
for entry in model.entries:
self._write_u16(entry.entid)
self._write_u16(entry.unk31)
self._write_u16(entry.national_pokedex_number)
self._write_u16(entry.base_movement_speed)
self._write_u16(entry.pre_evo_index)
self._write_u16(entry.evo_method)
self._write_u16(entry.evo_param1)
self._write_u16(entry.evo_param2)
self._write_i16(entry.sprite_index)
self._write_u8(entry.gender)
self._write_u8(entry.body_size)
self._write_u8(entry.type_primary)
self._write_u8(entry.type_secondary)
self._write_u8(entry.movement_type)
self._write_u8(entry.iq_group)
self._write_u8(entry.ability_primary)
self._write_u8(entry.ability_secondary)
self._write_u16(
u16(
generate_bitfield(
(
entry.item_required_for_spawning,
entry.can_evolve,
entry.bitfield1_5,
entry.can_move,
entry.bitfield1_3,
entry.bitfield1_2,
entry.bitfield1_1,
entry.bitfield1_0,
)
)
)
)
self._write_u16(entry.exp_yield)
self._write_i16(entry.recruit_rate1)
self._write_u16(entry.base_hp)
self._write_i16(entry.recruit_rate2)
self._write_u8(entry.base_atk)
self._write_u8(entry.base_sp_atk)
self._write_u8(entry.base_def)
self._write_u8(entry.base_sp_def)
self._write_i16(entry.weight)
self._write_i16(entry.size)
self._write_u8(entry.unk17)
self._write_u8(entry.unk18)
self._write_i8(entry.shadow_size)
self._write_i8(
entry.chance_spawn_asleep,
)
self._write_u8(entry.hp_regeneration)
self._write_i8(entry.unk21_h)
self._write_i16(entry.base_form_index)
self._write_i16(entry.exclusive_item1)
self._write_i16(entry.exclusive_item2)
self._write_i16(entry.exclusive_item3)
self._write_i16(entry.exclusive_item4)
self._write_i16(entry.unk27)
self._write_i16(entry.unk28)
self._write_i16(entry.unk29)
self._write_i16(entry.unk30)
assert self.bytes_written == len(self.data)
return self.data
def _write_u8(self, val: u8):
write_u8(self.data, val, self.bytes_written)
self.bytes_written += 1
def _write_i8(self, val: i8):
write_i8(self.data, val, self.bytes_written)
self.bytes_written += 1
def _write_u16(self, val: u16):
write_u16(self.data, val, self.bytes_written)
self.bytes_written += 2
def _write_i16(self, val: i16):
write_i16(self.data, val, self.bytes_written)
self.bytes_written += 2
def _write_u32(self, val: u32):
write_u32(self.data, val, self.bytes_written)
self.bytes_written += 4
def _write_i32(self, val: i32):
write_i32(self.data, val, self.bytes_written)
self.bytes_written += 4 |
7,405 | default dtypes | """Dtypes/scalar type implementtaions with torch dtypes.
Here `dtype` is always a torch.dtype, this module knows nothing about
scalar types, wrapper dtypes or anything like that. PyTorch only.
"""
from collections import namedtuple
import torch
# defaults : mimic NumPy, allow user control
DefaultDTypes = namedtuple(
"DefaultDTypes", ["float_dtype", "complex_dtype", "int_dtype"]
)
# a global state
# We set it the first time we call default_dtypes() to avoid importing
# torch._dynamo.config and create a circular reference
_default_dtypes = None
def METHOD_NAME():
global _default_dtypes
if _default_dtypes is None:
import torch._dynamo.config as config
_default_dtypes = DefaultDTypes(
float_dtype=getattr(torch, config.numpy_default_float),
complex_dtype=getattr(torch, config.numpy_default_complex),
int_dtype=getattr(torch, config.numpy_default_int),
)
assert isinstance(_default_dtypes.float_dtype, torch.dtype)
assert isinstance(_default_dtypes.complex_dtype, torch.dtype)
assert isinstance(_default_dtypes.int_dtype, torch.dtype)
return _default_dtypes
def get_default_dtype_for(dtype):
"""Default scalar type given sctype category."""
if dtype == torch.bool:
return dtype
if dtype.is_complex:
return METHOD_NAME().complex_dtype
if dtype.is_floating_point:
return METHOD_NAME().float_dtype
# else, it must be (some) integer
return METHOD_NAME().int_dtype
from . import _casting_dicts as _cd
def can_cast_impl(from_torch_dtype, to_torch_dtype, casting):
return _cd._can_cast_dict[casting][from_torch_dtype][to_torch_dtype]
def result_type_impl(*tensors):
# NB: torch dtypes here
dtyp = tensors[0].dtype
if len(tensors) == 1:
return dtyp
for curr in tensors[1:]:
dtyp = _cd._result_type_dict[dtyp][curr.dtype]
return dtyp
def python_type_for_torch(dtyp):
"""Get a python scalar type a torch dtype"""
if dtyp.is_floating_point:
typ = float
elif dtyp.is_complex:
typ = complex
elif dtyp == torch.bool:
typ = bool
else:
typ = int
return typ
# ### NEP 50 helpers ###
_SCALAR_TYPES = (int, bool, float, complex)
_SCALAR_AND_SYMBOLIC_TYPES = (
*_SCALAR_TYPES,
torch.SymInt,
torch.SymFloat,
torch.SymBool,
)
def is_scalar(x):
return isinstance(x, _SCALAR_TYPES)
def is_scalar_or_symbolic(x):
return isinstance(x, _SCALAR_AND_SYMBOLIC_TYPES)
def _dtype_for_scalar(py_type):
return {
bool: torch.bool,
torch.SymBool: torch.bool,
int: torch.int64,
torch.SymInt: torch.int64,
float: torch.float64,
torch.SymFloat: torch.float64,
complex: torch.complex128,
}[py_type]
def _category(dtype):
return {
torch.bool: 0,
torch.SymBool: 0,
# int
torch.uint8: 1,
torch.int8: 1,
torch.int16: 1,
torch.int32: 1,
torch.int64: 1,
torch.SymInt: 1,
# float
torch.float16: 2,
torch.float32: 2,
torch.float64: 2,
torch.SymFloat: 2,
# complex
torch.complex64: 3,
torch.complex128: 3,
}[dtype]
def nep50_to_tensors(x1, x2, handle_weaks):
"""If either of inputs is a python scalar, type-promote with NEP 50."""
def to_tensor(scalar, dtype=None):
if dtype is None:
dtype = _dtype_for_scalar(type(scalar))
dtype = get_default_dtype_for(dtype)
return torch.as_tensor(scalar, dtype=dtype)
x1_is_weak = not isinstance(x1, torch.Tensor)
x2_is_weak = not isinstance(x2, torch.Tensor)
if not handle_weaks or (x1_is_weak and x2_is_weak):
x1 = to_tensor(x1) if x1_is_weak else x1
x2 = to_tensor(x2) if x2_is_weak else x2
return x1, x2
# scalar <op> tensor: NEP 50
assert x1_is_weak != x2_is_weak
weak, not_weak = (x1, x2) if x1_is_weak else (x2, x1)
# find the dtype for the weak's type
weak_dtype = _dtype_for_scalar(type(weak))
cat_weak = _category(weak_dtype)
cat_not_weak = _category(not_weak.dtype)
dt = not_weak.dtype if cat_weak <= cat_not_weak else None
# special-case complex + float32
if weak_dtype.is_complex and not_weak.dtype == torch.float32:
dt = torch.complex64
# detect overflows: in PyTorch, uint8(-1) wraps around to 255,
# while NEP50 mandates an exception.
#
# Note that we only check if each element of the binop overflows,
# not the result. Consider, e.g. `uint8(100) + 200`. Operands are OK
# in uint8, but the result overflows and wrap around 255.
# Numpy emits a RuntimeWarning, PyTorch does not, and we do not either.
if cat_weak == 1 and cat_not_weak == 1:
# integers
iinfo = torch.iinfo(not_weak.dtype)
if not (iinfo.min <= weak <= iinfo.max):
raise OverflowError(
f"Python integer {weak} out of bounds for {not_weak.dtype}"
)
# finally, can make `weak` into a 0D tensor
weak = to_tensor(weak, dt)
return (weak, not_weak) if x1_is_weak else (not_weak, weak) |
7,406 | get method self | import builtins
import operator
import types
import unittest
from _typeshed import IdentityFunction, Unused, _KT_contra, _VT_co
from builtins import next as next
from collections.abc import Callable, ItemsView, Iterable, Iterator as _Iterator, KeysView, Mapping, ValuesView
from functools import wraps as wraps
from importlib.util import spec_from_loader as spec_from_loader
from io import BytesIO as BytesIO, StringIO as StringIO
from re import Pattern
from typing import Any, AnyStr, NoReturn, Protocol, TypeVar, overload
from typing_extensions import Literal
from six import moves as moves
# TODO: We should switch to the _typeshed version of SupportsGetItem
# once mypy updates its vendored copy of typeshed and makes a new release
class _SupportsGetItem(Protocol[_KT_contra, _VT_co]):
def __contains__(self, __x: Any) -> bool: ...
def __getitem__(self, __key: _KT_contra) -> _VT_co: ...
_T = TypeVar("_T")
_K = TypeVar("_K")
_V = TypeVar("_V")
__author__: str
__version__: str
PY2: Literal[False]
PY3: Literal[True]
PY34: Literal[True]
string_types: tuple[type[str]]
integer_types: tuple[type[int]]
class_types: tuple[type[type]]
text_type = str
binary_type = bytes
MAXSIZE: int
callable = builtins.callable
def get_unbound_function(unbound: types.FunctionType) -> types.FunctionType: ...
create_bound_method = types.MethodType
def create_unbound_method(func: types.FunctionType, cls: type) -> types.FunctionType: ...
Iterator = object
def get_method_function(meth: types.MethodType) -> types.FunctionType: ...
def METHOD_NAME(meth: types.MethodType) -> object: ...
def get_function_closure(fun: types.FunctionType) -> tuple[types._Cell, ...] | None: ...
def get_function_code(fun: types.FunctionType) -> types.CodeType: ...
def get_function_defaults(fun: types.FunctionType) -> tuple[Any, ...] | None: ...
def get_function_globals(fun: types.FunctionType) -> dict[str, Any]: ...
def iterkeys(d: Mapping[_K, Any]) -> _Iterator[_K]: ...
def itervalues(d: Mapping[Any, _V]) -> _Iterator[_V]: ...
def iteritems(d: Mapping[_K, _V]) -> _Iterator[tuple[_K, _V]]: ...
def viewkeys(d: Mapping[_K, Any]) -> KeysView[_K]: ...
def viewvalues(d: Mapping[Any, _V]) -> ValuesView[_V]: ...
def viewitems(d: Mapping[_K, _V]) -> ItemsView[_K, _V]: ...
def b(s: str) -> bytes: ...
def u(s: str) -> str: ...
unichr = chr
def int2byte(i: int) -> bytes: ...
# Should be `byte2int: operator.itemgetter[int]`. But a bug in mypy prevents using TypeVar in itemgetter.__call__
def byte2int(obj: _SupportsGetItem[int, _T]) -> _T: ...
indexbytes = operator.getitem
iterbytes = iter
def assertCountEqual(self: unittest.TestCase, first: Iterable[_T], second: Iterable[_T], msg: str | None = ...) -> None: ...
@overload
def assertRaisesRegex(self: unittest.TestCase, msg: str | None = ...) -> Any: ...
@overload
def assertRaisesRegex(self: unittest.TestCase, callable_obj: Callable[..., object], *args: Any, **kwargs: Any) -> Any: ...
def assertRegex(self: unittest.TestCase, text: AnyStr, expected_regex: AnyStr | Pattern[AnyStr], msg: Any = ...) -> None: ...
def assertNotRegex(self: unittest.TestCase, text: AnyStr, expected_regex: AnyStr | Pattern[AnyStr], msg: Any = ...) -> None: ...
exec_ = exec
def reraise(tp: type[BaseException] | None, value: BaseException | None, tb: types.TracebackType | None = None) -> NoReturn: ...
def raise_from(value: BaseException | type[BaseException], from_value: BaseException | None) -> NoReturn: ...
print_ = print
def with_metaclass(meta: type, *bases: type) -> type: ...
def add_metaclass(metaclass: type) -> IdentityFunction: ...
def ensure_binary(s: bytes | str, encoding: str = "utf-8", errors: str = "strict") -> bytes: ...
def ensure_str(s: bytes | str, encoding: str = "utf-8", errors: str = "strict") -> str: ...
def ensure_text(s: bytes | str, encoding: str = "utf-8", errors: str = "strict") -> str: ...
def python_2_unicode_compatible(klass: _T) -> _T: ...
class _LazyDescr:
name: str
def __init__(self, name: str) -> None: ...
def __get__(self, obj: object, tp: Unused) -> Any: ...
class MovedModule(_LazyDescr):
mod: str
def __init__(self, name: str, old: str, new: str | None = None) -> None: ...
def __getattr__(self, attr: str) -> Any: ...
class MovedAttribute(_LazyDescr):
mod: str
attr: str
def __init__(
self, name: str, old_mod: str, new_mod: str, old_attr: str | None = None, new_attr: str | None = None
) -> None: ...
def add_move(move: MovedModule | MovedAttribute) -> None: ...
def remove_move(name: str) -> None: ... |
7,407 | parse from string | # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import paddle
# A global variable to record the number of calling times for profiler
# functions. It is used to specify the tracing range of training steps.
_profiler_step_id = 0
# A global variable to avoid parsing from string every time.
_profiler_options = None
class ProfilerOptions(object):
'''
Use a string to initialize a ProfilerOptions.
The string should be in the format: "key1=value1;key2=value;key3=value3".
For example:
"profile_path=model.profile"
"batch_range=[50, 60]; profile_path=model.profile"
"batch_range=[50, 60]; tracer_option=OpDetail; profile_path=model.profile"
ProfilerOptions supports following key-value pair:
batch_range - a integer list, e.g. [100, 110].
state - a string, the optional values are 'CPU', 'GPU' or 'All'.
sorted_key - a string, the optional values are 'calls', 'total',
'max', 'min' or 'ave.
tracer_option - a string, the optional values are 'Default', 'OpDetail',
'AllOpDetail'.
profile_path - a string, the path to save the serialized profile data,
which can be used to generate a timeline.
exit_on_finished - a boolean.
'''
def __init__(self, options_str):
assert isinstance(options_str, str)
self._options = {
'batch_range': [10, 20],
'state': 'All',
'sorted_key': 'total',
'tracer_option': 'Default',
'profile_path': '/tmp/profile',
'exit_on_finished': True
}
if options_str != "":
self.METHOD_NAME(options_str)
def METHOD_NAME(self, options_str):
for kv in options_str.replace(' ', '').split(';'):
key, value = kv.split('=')
if key == 'batch_range':
value_list = value.replace('[', '').replace(']', '').split(',')
value_list = list(map(int, value_list))
if len(value_list) >= 2 and value_list[0] >= 0 and value_list[
1] > value_list[0]:
self._options[key] = value_list
elif key == 'exit_on_finished':
self._options[key] = value.lower() in ("yes", "true", "t", "1")
elif key in [
'state', 'sorted_key', 'tracer_option', 'profile_path'
]:
self._options[key] = value
def __getitem__(self, name):
if self._options.get(name, None) is None:
raise ValueError(
"ProfilerOptions does not have an option named %s." % name)
return self._options[name]
def add_profiler_step(options_str=None):
'''
Enable the operator-level timing using PaddlePaddle's profiler.
The profiler uses a independent variable to count the profiler steps.
One call of this function is treated as a profiler step.
Args:
profiler_options - a string to initialize the ProfilerOptions.
Default is None, and the profiler is disabled.
'''
if options_str is None:
return
global _profiler_step_id
global _profiler_options
if _profiler_options is None:
_profiler_options = ProfilerOptions(options_str)
if _profiler_step_id == _profiler_options['batch_range'][0]:
paddle.utils.profiler.start_profiler(_profiler_options['state'],
_profiler_options['tracer_option'])
elif _profiler_step_id == _profiler_options['batch_range'][1]:
paddle.utils.profiler.stop_profiler(_profiler_options['sorted_key'],
_profiler_options['profile_path'])
if _profiler_options['exit_on_finished']:
sys.exit(0)
_profiler_step_id += 1 |
7,408 | intersect register info | from typing import List, Tuple, Iterable, Dict
import logging
import archinfo
import ailment
from ....calling_conventions import SimRegArg
from ....code_location import CodeLocation
from .optimization_pass import OptimizationPass, OptimizationPassStage
_l = logging.getLogger(name=__name__)
def s2u(s, bits):
if s > 0:
return s
return (1 << bits) + s
class RegisterSaveAreaSimplifier(OptimizationPass):
"""
Optimizes away register spilling effects, including callee-saved registers.
"""
ARCHES = None
PLATFORMS = None
STAGE = OptimizationPassStage.AFTER_SINGLE_BLOCK_SIMPLIFICATION
NAME = "Simplify register save areas"
DESCRIPTION = __doc__.strip()
def __init__(self, func, **kwargs):
super().__init__(func, **kwargs)
self.analyze()
def _check(self):
# Check the first block to see what external registers are stored on the stack
stored_info = self._find_registers_stored_on_stack()
if not stored_info:
return False, None
# Check all return sites to see what external registers are restored to registers from the stack
restored_info = self._find_registers_restored_from_stack()
if not restored_info:
return False, None
# Find common registers and stack offsets
info = self.METHOD_NAME(stored_info, restored_info)
return bool(info), {"info": info}
def _analyze(self, cache=None):
def _remove_statement(old_block, stmt_idx_: int, updated_blocks_):
if old_block not in updated_blocks_:
block = old_block.copy()
updated_blocks[old_block] = block
else:
block = updated_blocks[old_block]
block.statements[stmt_idx_] = None
if cache is None:
return
info: Dict[int, Dict[str, List[Tuple[int, CodeLocation]]]] = cache["info"]
updated_blocks = {}
for data in info.values():
# remove storing statements
for _, codeloc in data["stored"]:
old_block = self._get_block(codeloc.block_addr, idx=codeloc.block_idx)
_remove_statement(old_block, codeloc.stmt_idx, updated_blocks)
for _, codeloc in data["restored"]:
old_block = self._get_block(codeloc.block_addr, idx=codeloc.block_idx)
_remove_statement(old_block, codeloc.stmt_idx, updated_blocks)
for old_block, new_block in updated_blocks.items():
# remove all statements that are None
new_block.statements = [stmt for stmt in new_block.statements if stmt is not None]
# update it
self._update_block(old_block, new_block)
def _find_registers_stored_on_stack(self) -> List[Tuple[int, int, CodeLocation]]:
first_block = self._get_block(self._func.addr)
if first_block is None:
return []
results = []
for idx, stmt in enumerate(first_block.statements):
if (
isinstance(stmt, ailment.Stmt.Store)
and isinstance(stmt.addr, ailment.Expr.StackBaseOffset)
and isinstance(stmt.data, ailment.Expr.Register)
):
# it's storing registers to the stack!
stack_offset = stmt.addr.offset
reg_offset = stmt.data.reg_offset
codeloc = CodeLocation(first_block.addr, idx, block_idx=first_block.idx, ins_addr=stmt.ins_addr)
results.append((reg_offset, stack_offset, codeloc))
return results
def _find_registers_restored_from_stack(self) -> List[List[Tuple[int, int, CodeLocation]]]:
all_results = []
for ret_site in self._func.ret_sites + self._func.jumpout_sites:
for block in self._get_blocks(ret_site.addr):
results = []
for idx, stmt in enumerate(block.statements):
if (
isinstance(stmt, ailment.Stmt.Assignment)
and isinstance(stmt.dst, ailment.Expr.Register)
and isinstance(stmt.src, ailment.Expr.Load)
and isinstance(stmt.src.addr, ailment.Expr.StackBaseOffset)
):
stack_offset = stmt.src.addr.offset
reg_offset = stmt.dst.reg_offset
codeloc = CodeLocation(block.addr, idx, block_idx=block.idx, ins_addr=stmt.ins_addr)
results.append((reg_offset, stack_offset, codeloc))
if results:
all_results.append(results)
return all_results
def METHOD_NAME(
self,
stored: List[Tuple[int, int, CodeLocation]],
restored: Iterable[List[Tuple[int, int, CodeLocation]]],
) -> Dict[int, Dict[str, List[Tuple[int, CodeLocation]]]]:
def _collect(info: List[Tuple[int, int, CodeLocation]], output, keystr: str):
for reg_offset, stack_offset, codeloc in info:
if reg_offset not in output:
output[reg_offset] = {}
if keystr not in output[reg_offset]:
output[reg_offset][keystr] = []
output[reg_offset][keystr].append((stack_offset, codeloc))
result: Dict[int, Dict[str, List[Tuple[int, CodeLocation]]]] = {}
_collect(stored, result, "stored")
for item in restored:
_collect(item, result, "restored")
# remove registers that are
# (a) stored but not restored
# (b) restored but not stored
# (c) from different offsets
# (d) the same as the return value register
cc = self._func.calling_convention
if cc is not None and isinstance(cc.RETURN_VAL, SimRegArg):
ret_val_reg_offset = self.project.arch.registers[cc.RETURN_VAL.reg_name][0]
else:
ret_val_reg_offset = None
# link register
if archinfo.arch_arm.is_arm_arch(self.project.arch):
lr_reg_offset = self.project.arch.registers["lr"][0]
elif self.project.arch.name in {"MIPS32", "MIPS64"}:
lr_reg_offset = self.project.arch.registers["ra"][0]
elif self.project.arch.name in {"PPC32", "PPC64"}:
lr_reg_offset = self.project.arch.registers["lr"][0]
else:
lr_reg_offset = None
for reg in list(result.keys()):
# stored link register should always be removed
if lr_reg_offset is not None and reg == lr_reg_offset:
if "restored" not in result[reg]:
# add a dummy one
result[reg]["restored"] = []
continue
if ret_val_reg_offset is not None and reg == ret_val_reg_offset:
# (d)
del result[reg]
continue
info = result[reg]
if len(info.keys()) != 2:
# (a) or (b)
del result[reg]
continue
stack_offsets = {stack_offset for stack_offset, _ in info["stored"]} | {
stack_offset for stack_offset, _ in info["restored"]
}
if len(stack_offsets) != 1:
# (c)
del result[reg]
continue
return result |
7,409 | create | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from typing import Optional
import urllib.parse
from .base_api_resource import BaseAPIResource
from .data_models import Participant
from .data_models import ParticipantGroup
class ParticipantGroups(BaseAPIResource):
list_api_endpoint = "participant-groups/"
retrieve_api_endpoint = "participant-groups/{id}/"
remove_api_endpoint = "participant-groups/{id}/"
list_participants_for_group_api_endpoint = "participant-groups/{id}/participants/"
@classmethod
def list(
cls,
project_id: Optional[str] = None,
is_active: bool = True,
) -> List[ParticipantGroup]:
"""
API docs for this endpoint:
https://docs.prolific.co/docs/api-docs/public/#tag/
Participant-Groups/paths/~1api~1v1~1participant-groups~1/get
"""
params = {}
if project_id:
params["project_id"] = project_id
if is_active:
params["is_active"] = is_active
endpoint = cls.list_api_endpoint
if params:
endpoint += "?" + urllib.parse.urlencode(params)
response_json = cls.get(endpoint)
participant_groups = [ParticipantGroup(**s) for s in response_json["results"]]
return participant_groups
@classmethod
def retrieve(cls, id: str) -> ParticipantGroup:
"""
API docs for this endpoint:
https://docs.prolific.co/docs/api-docs/public/#tag/
Participant-Groups/paths/~1api~1v1~1participant-groups~1%7Bid%7D~1/get
"""
endpoint = cls.retrieve_api_endpoint.format(id=id)
response_json = cls.get(endpoint)
return ParticipantGroup(**response_json)
@classmethod
def METHOD_NAME(cls, **data) -> ParticipantGroup:
"""
API docs for this endpoint:
https://docs.prolific.co/docs/api-docs/public/#tag/
Participant-Groups/paths/~1api~1v1~1participant-groups~1/post
"""
participant_group = ParticipantGroup(**data)
response_json = cls.post(cls.list_api_endpoint, params=participant_group.to_dict())
return ParticipantGroup(**response_json)
@classmethod
def remove(cls, id: str) -> None:
"""
API docs for this endpoint:
https://docs.prolific.co/docs/api-docs/public/#tag/
Participant-Groups/paths/~1api~1v1~1participant-groups~1%7Bid%7D~1/delete
"""
cls.delete(cls.remove_api_endpoint.format(id=id))
return None
@classmethod
def list_participants_for_group(cls, id: str) -> List[Participant]:
"""
API docs for this endpoint:
https://docs.prolific.co/docs/api-docs/public/#tag/
Participant-Groups/paths/~1api~1v1~1participant-groups~1%7Bid%7D~1participants~1/get
"""
response_json = cls.get(cls.list_participants_for_group_api_endpoint.format(id=id))
participants = [Participant(**s) for s in response_json["results"]]
return participants
@classmethod
def add_participants_to_group(cls, id: str, participant_ids: List[str]) -> List[Participant]:
"""
API docs for this endpoint:
https://docs.prolific.co/docs/api-docs/public/#tag/
Participant-Groups/paths/~1api~1v1~1participant-groups~1%7Bid%7D~1participants~1/post
"""
endpoint = cls.list_participants_for_group_api_endpoint.format(id=id)
params = dict(participant_ids=participant_ids)
response_json = cls.post(endpoint, params=params)
participants = [Participant(**s) for s in response_json["results"]]
return participants
@classmethod
def remove_participants_from_group(
cls,
id: str,
participant_ids: List[str],
) -> List[Participant]:
"""
API docs for this endpoint:
https://docs.prolific.co/docs/api-docs/public/#tag/
Participant-Groups/paths/~1api~1v1~1participant-groups~1%7Bid%7D~1participants~1/delete
"""
endpoint = cls.list_participants_for_group_api_endpoint.format(id=id)
params = dict(participant_ids=participant_ids)
response_json = cls.delete(endpoint, params=params)
participants = [Participant(**s) for s in response_json["results"]]
return participants |
7,410 | gen cluster info | #!/usr/bin/python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
import subprocess as sp
DEFAULT_SEASTAR_PORT="3333"
JEMALLOC_244 = "libjemalloc.so.2.4.4"
JEMALLOC_251 = "libjemalloc.so.2.5.1"
def METHOD_NAME(workspace):
tf_config_json = os.environ.get("TF_CONFIG", "{}")
print("TF_CONFIG=", tf_config_json)
tf_config = json.loads(tf_config_json)
cluster = tf_config.get("cluster", {})
if cluster is None:
print("TF_CONFIG cluster is empty")
return
ps_hosts = []
worker_hosts = []
chief_hosts = []
node_list = []
for key, value in cluster.items():
if "ps" == key:
ps_hosts = value
elif "worker" == key:
worker_hosts = value
elif "chief" == key:
chief_hosts = value
node_list.extend(value)
os.environ['TF_SEASTAR_ENDPOINT_MAP_PATH'] = '/tmp/'
print("Start to gen endpoint_map file.")
#endpoint_map_path = os.path.join(workspace, ".endpoint_map")
endpoint_map_path = "/tmp/.endpoint_map"
with open(endpoint_map_path, 'w') as fout:
for node in node_list:
host = node[0:node.index(':')]
fout.write(node + "=" + host + ":" + DEFAULT_SEASTAR_PORT + "\n")
os.system("ls -ltr /tmp/.endpoint_map")
task = tf_config.get("task", {})
if task is None:
print("TF_CONFIG task is empty")
return
task_index = task['index']
job_name = task['type']
return ps_hosts, worker_hosts, chief_hosts, job_name, task_index
def copy_python_binary(local_dir):
cmd_str = "cp /usr/bin/python " + os.path.join(local_dir, "python_bin")
return sp.call(cmd_str, shell=True)
def set_jemalloc_version(workspace):
strategy = os.environ.get("MEM_USAGE_STRATEGY", "")
cmd_str = ""
if "xmin" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";"
cmd_str += "export MALLOC_CONF=decay_time:0;"
elif "xmid" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";"
elif "min" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=dirty_decay_ms:0,muzzy_decay_ms:0;"
elif "mid" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,dirty_decay_ms:10000,muzzy_decay_ms:10000;"
elif "max" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:240000,muzzy_decay_ms:240000;"
elif "244" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";"
elif "251" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:60000,muzzy_decay_ms:60000;"
elif "close" == strategy:
pass
else:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:240000,muzzy_decay_ms:240000;"
return cmd_str
def pip_install_requirements(workspace):
requirements_path = os.path.join(workspace, "requirements.txt")
if not os.path.exists(requirements_path):
return 0
cmd_str = "$(which pip) install -r " + requirements_path
print("try to install requirements.txt from " + requirements_path)
return sp.call(cmd_str, shell=True)
def run_tensorflow_job(workspace, tf_script, tf_args, tf_envs, set_jemalloc_version_cmd):
cmd_str = "cd " + workspace + ";"
if set_jemalloc_version_cmd:
cmd_str += set_jemalloc_version_cmd
cmd_str += "LD_PRELOAD=${JEMALLOC_VERSION} "
cmd_str += " ".join(tf_envs) + " $(which python) -u "
cmd_str += tf_script + " " + " ".join(tf_args)
print("run tensorflow command:", cmd_str)
return sp.call(cmd_str, shell=True)
def set_mkl_envs(job_name):
envs = []
if "ps" == job_name:
envs.append("OMP_NUM_THREADS=1")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
elif "worker" == job_name:
envs.append("OMP_NUM_THREADS=6")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
elif "evaluator" == job_name or "chief" == job_name:
envs.append("OMP_NUM_THREADS=1")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
else:
envs.append("OMP_NUM_THREADS=1")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
return envs
def set_network_threads(job_name):
envs = []
if "ps" == job_name:
envs.append("WORKER_DEFAULT_CORE_NUM=24")
elif "worker" == job_name:
envs.append("PS_DEFAULT_CORE_NUM=24")
return envs
if __name__ == "__main__":
print("start launching tensorflow job")
if "TF_WORKSPACE" not in os.environ:
print("TF_WORKSPACE env should be set.")
exit(1)
workspace = os.environ.get("TF_WORKSPACE", "")
if "TF_SCRIPT" not in os.environ:
print("TF_SCRIPT env should be set.")
exit(1)
tf_script = os.environ.get("TF_SCRIPT", "")
if "JEMALLOC_PATH" not in os.environ:
jemalloc_path = workspace
else:
jemalloc_path = os.environ.get("JEMALLOC_PATH", "")
#ret_code = copy_python_binary(workspace)
#if (ret_code != 0):
# exit(ret_code)
tf_args = sys.argv[1:]
tf_envs = []
#tf_envs.append("TF_SEASTAR_ENDPOINT_MAP_PATH=/tmp/")
if "TF_CONFIG" in os.environ:
ps_hosts, worker_hosts, chief_hosts, job_name, task_index = METHOD_NAME(workspace)
os.environ["TASK_INDEX"] = str(task_index)
os.environ["JOB_NAME"] = str(job_name)
#tf_envs.extend(set_mkl_envs(job_name))
set_jemalloc_version_cmd = set_jemalloc_version(jemalloc_path)
ret_code = pip_install_requirements(workspace)
if (ret_code != 0):
exit(ret_code)
ret_code = run_tensorflow_job(workspace, tf_script, tf_args, tf_envs, set_jemalloc_version_cmd)
if (ret_code != 0):
exit(ret_code) |
7,411 | test handle trade tick updates indicator | # -------------------------------------------------------------------------------------------------
# Copyright (C) 2015-2023 Nautech Systems Pty Ltd. All rights reserved.
# https://nautechsystems.io
#
# Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at https://www.gnu.org/licenses/lgpl-3.0.en.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------------------------
from nautilus_trader.indicators.average.sma import SimpleMovingAverage
from nautilus_trader.model.enums import PriceType
from nautilus_trader.test_kit.providers import TestInstrumentProvider
from nautilus_trader.test_kit.stubs.data import TestDataStubs
AUDUSD_SIM = TestInstrumentProvider.default_fx_ccy("AUD/USD")
class TestSimpleMovingAverage:
def setup(self):
# Fixture Setup
self.sma = SimpleMovingAverage(10)
def test_name_returns_expected_string(self):
# Arrange, Act, Assert
assert self.sma.name == "SimpleMovingAverage"
def test_str_repr_returns_expected_string(self):
# Arrange, Act, Assert
assert str(self.sma) == "SimpleMovingAverage(10)"
assert repr(self.sma) == "SimpleMovingAverage(10)"
def test_period_returns_expected_value(self):
# Arrange, Act, Assert
assert self.sma.period == 10
def test_initialized_without_inputs_returns_false(self):
# Arrange, Act, Assert
assert self.sma.initialized is False
def test_initialized_with_required_inputs_returns_true(self):
# Arrange
self.sma.update_raw(1.0)
self.sma.update_raw(2.0)
self.sma.update_raw(3.0)
self.sma.update_raw(4.0)
self.sma.update_raw(5.0)
self.sma.update_raw(6.0)
self.sma.update_raw(7.0)
self.sma.update_raw(8.0)
self.sma.update_raw(9.0)
self.sma.update_raw(10.0)
# Act, Assert
assert self.sma.initialized is True
assert self.sma.count == 10
assert self.sma.value == 5.5
def test_handle_quote_tick_updates_indicator(self):
# Arrange
indicator = SimpleMovingAverage(10, PriceType.MID)
tick = TestDataStubs.quote_tick()
# Act
indicator.handle_quote_tick(tick)
# Assert
assert indicator.has_inputs
assert indicator.value == 1.0
def METHOD_NAME(self):
# Arrange
indicator = SimpleMovingAverage(10)
tick = TestDataStubs.trade_tick()
# Act
indicator.handle_trade_tick(tick)
# Assert
assert indicator.has_inputs
assert indicator.value == 1.0
def test_handle_bar_updates_indicator(self):
# Arrange
indicator = SimpleMovingAverage(10)
bar = TestDataStubs.bar_5decimal()
# Act
indicator.handle_bar(bar)
# Assert
assert indicator.has_inputs
assert indicator.value == 1.00003
def test_value_with_one_input_returns_expected_value(self):
# Arrange
self.sma.update_raw(1.0)
# Act, Assert
assert self.sma.value == 1.0
def test_value_with_three_inputs_returns_expected_value(self):
# Arrange
self.sma.update_raw(1.0)
self.sma.update_raw(2.0)
self.sma.update_raw(3.0)
# Act, Assert
assert self.sma.value == 2.0
def test_value_at_returns_expected_value(self):
# Arrange
self.sma.update_raw(1.0)
self.sma.update_raw(2.0)
self.sma.update_raw(3.0)
# Act, Assert
assert self.sma.value == 2.0
def test_handle_quote_tick_updates_with_expected_value(self):
# Arrange
sma_for_ticks1 = SimpleMovingAverage(10, PriceType.ASK)
sma_for_ticks2 = SimpleMovingAverage(10, PriceType.MID)
sma_for_ticks3 = SimpleMovingAverage(10, PriceType.BID)
tick = TestDataStubs.quote_tick(
bid_price=1.00001,
ask_price=1.00003,
)
# Act
sma_for_ticks1.handle_quote_tick(tick)
sma_for_ticks2.handle_quote_tick(tick)
sma_for_ticks3.handle_quote_tick(tick)
# Assert
assert sma_for_ticks1.has_inputs
assert sma_for_ticks2.has_inputs
assert sma_for_ticks3.has_inputs
assert sma_for_ticks1.value == 1.00003
assert sma_for_ticks2.value == 1.00002
assert sma_for_ticks3.value == 1.00001
def test_handle_trade_tick_updates_with_expected_value(self):
# Arrange
sma_for_ticks = SimpleMovingAverage(10)
tick = TestDataStubs.trade_tick()
# Act
sma_for_ticks.handle_trade_tick(tick)
# Assert
assert sma_for_ticks.has_inputs
assert sma_for_ticks.value == 1.0
def test_reset_successfully_returns_indicator_to_fresh_state(self):
# Arrange
for _i in range(1000):
self.sma.update_raw(1.0)
# Act
self.sma.reset()
# Assert
assert not self.sma.initialized
assert self.sma.value == 0 |
7,412 | test constants fields exist | from unittest.mock import mock_open, patch
import requests
from django.test import override_settings
from checks.constants import CHECK_DEFER, CHECK_RESULT_DEFER
from tacticalrmm.constants import (
AGENT_DEFER,
CHECKS_NON_EDITABLE_FIELDS,
FIELDS_TRIGGER_TASK_UPDATE_AGENT,
ONLINE_AGENTS,
POLICY_CHECK_FIELDS_TO_COPY,
POLICY_TASK_FIELDS_TO_COPY,
)
from tacticalrmm.test import TacticalTestCase
from .utils import bitdays_to_string, generate_winagent_exe, get_bit_days, reload_nats
class TestUtils(TacticalTestCase):
def setUp(self):
self.setup_coresettings()
@patch("requests.post")
@patch("__main__.__builtins__.open", new_callable=mock_open)
def test_generate_winagent_exe_success(self, m_open, mock_post):
r = generate_winagent_exe(
client=1,
site=1,
agent_type="server",
rdp=1,
ping=0,
power=0,
goarch="amd64",
token="abc123",
api="https://api.example.com",
file_name="rmm-client-site-server.exe",
)
self.assertEqual(r.status_code, 200)
@patch("requests.post")
def test_generate_winagent_exe_timeout(self, mock_post):
mock_post.side_effect = requests.exceptions.ConnectionError()
r = generate_winagent_exe(
client=1,
site=1,
agent_type="server",
rdp=1,
ping=0,
power=0,
goarch="amd64",
token="abc123",
api="https://api.example.com",
file_name="rmm-client-site-server.exe",
)
self.assertEqual(r.status_code, 400)
@override_settings(
CERT_FILE="/tmp/asdasd.pem",
KEY_FILE="/tmp/asds55r.pem",
ALLOWED_HOSTS=["api.example.com"],
SECRET_KEY="sekret",
DOCKER_BUILD=True,
)
@patch("subprocess.run")
def test_reload_nats_docker(self, mock_subprocess):
_ = reload_nats()
mock_subprocess.assert_not_called()
@override_settings(
ALLOWED_HOSTS=["api.example.com"], SECRET_KEY="sekret", DOCKER_BUILD=False
)
@patch("subprocess.run")
def test_reload_nats(self, mock_subprocess):
_ = reload_nats()
mock_subprocess.assert_called_once()
def test_bitdays_to_string(self):
a = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
all_days = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
bit_weekdays = get_bit_days(a)
r = bitdays_to_string(bit_weekdays)
self.assertEqual(r, ", ".join(a))
bit_weekdays = get_bit_days(all_days)
r = bitdays_to_string(bit_weekdays)
self.assertEqual(r, "Every day")
# for checking when removing db fields, make sure we update these tuples
def METHOD_NAME(self) -> None:
from agents.models import Agent
from autotasks.models import AutomatedTask
from checks.models import Check, CheckResult
agent_fields = [i.name for i in Agent._meta.get_fields()]
agent_fields.append("pk")
autotask_fields = [i.name for i in AutomatedTask._meta.get_fields()]
check_fields = [i.name for i in Check._meta.get_fields()]
check_result_fields = [i.name for i in CheckResult._meta.get_fields()]
for i in AGENT_DEFER:
self.assertIn(i, agent_fields)
for i in ONLINE_AGENTS:
self.assertIn(i, agent_fields)
for i in FIELDS_TRIGGER_TASK_UPDATE_AGENT:
self.assertIn(i, autotask_fields)
for i in POLICY_TASK_FIELDS_TO_COPY:
self.assertIn(i, autotask_fields)
for i in CHECKS_NON_EDITABLE_FIELDS:
self.assertIn(i, check_fields)
for i in POLICY_CHECK_FIELDS_TO_COPY:
self.assertIn(i, check_fields)
for i in CHECK_DEFER:
self.assertIn(i, check_fields)
for i in CHECK_RESULT_DEFER:
self.assertIn(i, check_result_fields) |
7,413 | metric | import datasets
import pytest
from primeqa.mrc.metrics.tydi_f1.tydi_f1 import TyDiF1
from primeqa.mrc.data_models.target_type import TargetType
from tests.primeqa.mrc.common.base import UnitTest
class TestTyDiF1(UnitTest):
@pytest.fixture(scope='session')
def METHOD_NAME(self):
return TyDiF1()
@pytest.fixture(scope='session')
def n_annotators(self):
return 5
@pytest.fixture(scope='session')
def references_and_predictions(self, n_annotators):
references = [
[dict(start_position=17, end_position=42, passage_index=1,
yes_no_answer=TargetType.NO_ANSWER, example_id='a', language='finnish', document_plaintext='', question='')] * n_annotators,
[dict(start_position=-1, end_position=-1, passage_index=2,
yes_no_answer=TargetType.YES, example_id='b', language='finnish', document_plaintext='', question='')] * n_annotators,
[dict(start_position=17, end_position=42, passage_index=1,
yes_no_answer=TargetType.NO_ANSWER, example_id='c', language='swahili', document_plaintext='', question='')] * 2 +
[dict(start_position=-1, end_position=-1, passage_index=-1,
yes_no_answer=TargetType.NO_ANSWER, example_id='c', language='swahili', document_plaintext='', question='')] * (n_annotators - 2),
[dict(start_position=-1, end_position=-1, passage_index=-1,
yes_no_answer=TargetType.NO_ANSWER, example_id='d', language='swahili', document_plaintext='', question='')] * n_annotators,
[dict(start_position=-1, end_position=-1, passage_index=1,
yes_no_answer=TargetType.NO_ANSWER, example_id='e', language='swahili', document_plaintext='', question='')] * n_annotators,
[dict(start_position=17, end_position=42, passage_index=1,
yes_no_answer=TargetType.NO_ANSWER, example_id='f', language='thai', document_plaintext='', question='')] * n_annotators,
[dict(start_position=17, end_position=42, passage_index=1,
yes_no_answer=TargetType.NO_ANSWER, example_id='g', language='thai', document_plaintext='', question='')] * n_annotators,
[dict(start_position=17, end_position=42, passage_index=1,
yes_no_answer=TargetType.NO_ANSWER, example_id='h', language='korean', document_plaintext='', question='')] * n_annotators,
[dict(start_position=17, end_position=42, passage_index=1,
yes_no_answer=TargetType.NO_ANSWER, example_id='i', language='korean', document_plaintext='', question='')] * n_annotators,
]
predictions = [
dict(start_position=17, end_position=42, passage_index=1,
yes_no_answer=TargetType.NO_ANSWER, example_id='a', confidence_score=10.),
dict(start_position=-1, end_position=-1, passage_index=2,
yes_no_answer=TargetType.YES, example_id='b', confidence_score=9.9),
dict(start_position=17, end_position=42, passage_index=1,
yes_no_answer=TargetType.NO_ANSWER, example_id='c', confidence_score=9.8),
dict(start_position=17, end_position=42, passage_index=1,
yes_no_answer=TargetType.NO_ANSWER, example_id='d', confidence_score=-10.),
dict(start_position=17, end_position=42, passage_index=1,
yes_no_answer=TargetType.NO_ANSWER, example_id='e', confidence_score=-5.),
dict(start_position=24, end_position=44, passage_index=1,
yes_no_answer=TargetType.NO_ANSWER, example_id='f', confidence_score=10.1),
dict(start_position=24, end_position=44, passage_index=4,
yes_no_answer=TargetType.NO_ANSWER, example_id='g', confidence_score=10.1),
dict(start_position=111, end_position=141, passage_index=1,
yes_no_answer=TargetType.NO_ANSWER, example_id='h', confidence_score=10.1),
dict(start_position=111, end_position=141, passage_index=4,
yes_no_answer=TargetType.NO_ANSWER, example_id='i', confidence_score=10.1),
]
return dict(references=references, predictions=predictions)
def test_instantiate_metric_from_class(self, METHOD_NAME):
_ = METHOD_NAME
def test_instantiate_metric_from_load_metric(self):
from primeqa.mrc.metrics.tydi_f1 import tydi_f1
_ = datasets.load_metric(tydi_f1.__file__)
def test_compute_metric(self, METHOD_NAME, references_and_predictions):
METHOD_NAME.add_batch(**references_and_predictions)
actual_metric_values = METHOD_NAME.compute()
expected_metric_values = {
"avg_passage_f1": 0.75, "avg_passage_recall": 0.75, "avg_passage_precision": 0.75,
"avg_minimal_f1": 0.7, "avg_minimal_recall": 0.7, "avg_minimal_precision": 0.7
}
assert actual_metric_values == expected_metric_values |
7,414 | setup env | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test helpers for untrusted_runner."""
import os
import shutil
import subprocess
import tempfile
import unittest
from clusterfuzz._internal.bot.tasks import commands
from clusterfuzz._internal.bot.untrusted_runner import file_host
from clusterfuzz._internal.bot.untrusted_runner import host
from clusterfuzz._internal.bot.untrusted_runner import untrusted
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import pubsub
from clusterfuzz._internal.system import environment
from clusterfuzz._internal.system import shell
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
from clusterfuzz._internal.tests.test_libs import test_utils
TEST_LIBS_DIR = os.path.dirname(os.path.realpath(__file__))
TEST_LIBS_DATA_DIR = os.path.join(TEST_LIBS_DIR, 'data')
def untrusted_process():
"""Start an untrusted process."""
os.environ['BOT_NAME'] = 'localhost'
untrusted.start_server()
def _test_data_dir():
"""Return path to directory for bot and server data."""
root_dir = os.environ['ROOT_DIR']
return os.path.join(root_dir, '_test_data')
def _create_test_bot():
"""Start test bot."""
# TODO(ochang): Use Docker container instead.
bot_path = os.path.join(_test_data_dir(), 'worker_bot')
if os.path.exists(bot_path):
shutil.rmtree(bot_path, ignore_errors=True)
env = os.environ.copy()
env['UNTRUSTED_WORKER'] = 'True'
env['BOT_NAME'] = 'localhost'
bot_proc = subprocess.Popen(
['python', 'butler.py', 'run_bot', bot_path], env=env)
return bot_proc, os.path.join(bot_path, 'clusterfuzz')
def _create_test_root():
"""Create test ROOT_DIR for the trusted host."""
root_path = os.path.join(_test_data_dir(), 'test_root')
if os.path.exists(root_path):
shutil.rmtree(root_path, ignore_errors=True)
real_root = os.environ['ROOT_DIR']
os.makedirs(root_path)
# TODO(ochang): Make sure we don't copy files that aren't tracked in git.
shutil.copytree(
os.path.join(real_root, 'bot'), os.path.join(root_path, 'bot'))
shutil.copytree(
os.path.join(real_root, 'resources'), os.path.join(
root_path, 'resources'))
os.mkdir(os.path.join(root_path, 'src'))
shutil.copytree(
os.path.join(real_root, 'src', 'appengine'),
os.path.join(root_path, 'src', 'appengine'))
shutil.copytree(
os.path.join(real_root, 'src', 'python'),
os.path.join(root_path, 'src', 'python'))
shutil.copytree(
os.path.join(real_root, 'src', 'clusterfuzz'),
os.path.join(root_path, 'src', 'clusterfuzz'))
shutil.copytree(
os.path.join(real_root, 'src', 'third_party'),
os.path.join(root_path, 'src', 'third_party'))
return root_path
def _which(prog):
"""Return full path to |prog| (based on $PATH)."""
for path in os.getenv('PATH', '').split(':'):
candidate = os.path.join(path, prog)
if os.path.exists(candidate):
return candidate
return None
@unittest.skipIf(not os.getenv('UNTRUSTED_RUNNER_TESTS'),
'Skipping untrusted runner tests.')
@test_utils.with_cloud_emulators('datastore', 'pubsub')
class UntrustedRunnerIntegrationTest(unittest.TestCase):
"""Base class for doing integration testing of untrusted_runner."""
@classmethod
def setUpClass(cls):
cls.saved_env = os.environ.copy()
os.environ['HOST_INSTANCE_NAME'] = 'host'
os.environ['HOST_INSTANCE_NUM'] = '0'
os.environ['BOT_NAME'] = 'host-0'
os.environ['LOCAL_DEVELOPMENT'] = 'True'
os.environ['SOURCE_VERSION_OVERRIDE'] = 'VERSION'
os.environ['CONFIG_DIR_OVERRIDE'] = os.path.abspath(
os.path.join(os.environ['ROOT_DIR'], 'configs', 'test'))
cert_location = os.path.join(TEST_LIBS_DATA_DIR, 'untrusted_cert.pem')
key_location = os.path.join(TEST_LIBS_DATA_DIR, 'untrusted_key.pem')
os.environ['UNTRUSTED_TLS_CERT_FOR_TESTING'] = cert_location
os.environ['UNTRUSTED_TLS_KEY_FOR_TESTING'] = key_location
cls.bot_proc, bot_root_dir = _create_test_bot()
os.environ['TRUSTED_HOST'] = 'True'
os.environ['WORKER_ROOT_DIR'] = bot_root_dir
os.environ['WORKER_BOT_TMPDIR'] = os.path.join(bot_root_dir, 'bot_tmpdir')
environment.set_default_vars()
data_types.HostWorkerAssignment(
host_name='host',
instance_num=0,
worker_name='localhost',
project_name='project',
id='host-0').put()
with open(cert_location, 'rb') as f:
cert_contents = f.read()
with open(key_location, 'rb') as f:
key_contents = f.read()
data_types.WorkerTlsCert(
project_name='project',
cert_contents=cert_contents,
key_contents=key_contents,
id='project').put()
host.init()
@classmethod
def tearDownClass(cls):
if cls.bot_proc:
try:
cls.bot_proc.terminate()
except OSError:
# Could already be killed.
pass
os.environ.clear()
os.environ.update(cls.saved_env)
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
os.environ['BOT_TMPDIR'] = os.path.join(self.tmp_dir, 'bot_tmpdir')
test_helpers.patch(self, [
'clusterfuzz._internal.datastore.data_handler.'
'get_data_bundle_bucket_name',
'clusterfuzz._internal.system.environment.'
'set_environment_parameters_from_file',
])
test_helpers.patch_environ(self)
# Our tests write data/logs into subdirs of ROOT_DIR. Pivot the ROOT_DIR to
# a temporary one.
new_root = _create_test_root()
os.environ['ROOT_DIR'] = new_root
self.saved_cwd = os.getcwd()
os.chdir(new_root)
environment.set_bot_environment()
fuzz_inputs = os.environ['FUZZ_INPUTS']
shell.remove_directory(fuzz_inputs, recreate=True)
worker_fuzz_inputs = file_host.rebase_to_worker_root(fuzz_inputs)
shell.remove_directory(worker_fuzz_inputs, recreate=True)
environment.set_value('GSUTIL_PATH', os.path.dirname(_which('gsutil')))
test_utils.setup_pubsub('test-clusterfuzz')
test_utils.create_pubsub_topic(pubsub.PubSubClient(), 'test-clusterfuzz',
'jobs-project-linux')
def tearDown(self):
shutil.rmtree(self.tmp_dir)
os.chdir(self.saved_cwd)
def METHOD_NAME(self, job_type=None):
"""Set up bot environment."""
if not job_type:
return
job = data_types.Job.query(data_types.Job.name == job_type).get()
self.fuzzer_name = 'test_fuzzer'
environment.set_value('JOB_NAME', job_type)
commands.update_environment_for_job(job.environment_string) |
7,415 | test casepriority detail redirect | import urllib.parse
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_main.models import Casepriority
class CasepriorityViewTestCase(TestCase):
"""casepriority view tests"""
@classmethod
def setUpTestData(cls):
# create object
Casepriority.objects.create(casepriority_name='casepriority_1')
# create user
User.objects.create_user(
username='testuser_casepriority', password='tRiCVI2mf531CLw7r6jQ'
)
def test_casepriority_list_not_logged_in(self):
"""test list view"""
# create url
destination = '/login/?next=' + urllib.parse.quote('/casepriority/', safe='')
# get response
response = self.client.get('/casepriority/', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_casepriority_list_logged_in(self):
"""test list view"""
# login testuser
self.client.login(
username='testuser_casepriority', password='tRiCVI2mf531CLw7r6jQ'
)
# get response
response = self.client.get('/casepriority/')
# compare
self.assertEqual(response.status_code, 200)
def test_casepriority_list_template(self):
"""test list view"""
# login testuser
self.client.login(
username='testuser_casepriority', password='tRiCVI2mf531CLw7r6jQ'
)
# get response
response = self.client.get('/casepriority/')
# compare
self.assertTemplateUsed(
response, 'dfirtrack_main/casepriority/casepriority_list.html'
)
def test_casepriority_list_get_user_context(self):
"""test list view"""
# login testuser
self.client.login(
username='testuser_casepriority', password='tRiCVI2mf531CLw7r6jQ'
)
# get response
response = self.client.get('/casepriority/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_casepriority')
def test_casepriority_list_redirect(self):
"""test list view"""
# login testuser
self.client.login(
username='testuser_casepriority', password='tRiCVI2mf531CLw7r6jQ'
)
# create url
destination = urllib.parse.quote('/casepriority/', safe='/')
# get response
response = self.client.get('/casepriority', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_casepriority_detail_not_logged_in(self):
"""test detail view"""
# get object
casepriority_1 = Casepriority.objects.get(casepriority_name='casepriority_1')
# create url
destination = '/login/?next=' + urllib.parse.quote(
'/casepriority/detail/' + str(casepriority_1.casepriority_id) + '/', safe=''
)
# get response
response = self.client.get(
'/casepriority/detail/' + str(casepriority_1.casepriority_id) + '/',
follow=True,
)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_casepriority_detail_logged_in(self):
"""test detail view"""
# get object
casepriority_1 = Casepriority.objects.get(casepriority_name='casepriority_1')
# login testuser
self.client.login(
username='testuser_casepriority', password='tRiCVI2mf531CLw7r6jQ'
)
# get response
response = self.client.get(
'/casepriority/detail/' + str(casepriority_1.casepriority_id) + '/'
)
# compare
self.assertEqual(response.status_code, 200)
def test_casepriority_detail_template(self):
"""test detail view"""
# get object
casepriority_1 = Casepriority.objects.get(casepriority_name='casepriority_1')
# login testuser
self.client.login(
username='testuser_casepriority', password='tRiCVI2mf531CLw7r6jQ'
)
# get response
response = self.client.get(
'/casepriority/detail/' + str(casepriority_1.casepriority_id) + '/'
)
# compare
self.assertTemplateUsed(
response, 'dfirtrack_main/casepriority/casepriority_detail.html'
)
def test_casepriority_detail_get_user_context(self):
"""test detail view"""
# get object
casepriority_1 = Casepriority.objects.get(casepriority_name='casepriority_1')
# login testuser
self.client.login(
username='testuser_casepriority', password='tRiCVI2mf531CLw7r6jQ'
)
# get response
response = self.client.get(
'/casepriority/detail/' + str(casepriority_1.casepriority_id) + '/'
)
# compare
self.assertEqual(str(response.context['user']), 'testuser_casepriority')
def METHOD_NAME(self):
"""test list view"""
# get object
casepriority_1 = Casepriority.objects.get(casepriority_name='casepriority_1')
# login testuser
self.client.login(
username='testuser_casepriority', password='tRiCVI2mf531CLw7r6jQ'
)
# create url
destination = urllib.parse.quote(
'/casepriority/detail/' + str(casepriority_1.casepriority_id) + '/',
safe='/',
)
# get response
response = self.client.get(
'/casepriority/detail/' + str(casepriority_1.casepriority_id), follow=True
)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
) |
7,416 | to str | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.28
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1beta1ValidatingAdmissionPolicyBinding(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta1ValidatingAdmissionPolicyBindingSpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1beta1ValidatingAdmissionPolicyBinding - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1ValidatingAdmissionPolicyBinding.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1ValidatingAdmissionPolicyBinding.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
:return: The metadata of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1ValidatingAdmissionPolicyBinding.
:param metadata: The metadata of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
:return: The spec of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
:rtype: V1beta1ValidatingAdmissionPolicyBindingSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1beta1ValidatingAdmissionPolicyBinding.
:param spec: The spec of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
:type: V1beta1ValidatingAdmissionPolicyBindingSpec
"""
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def METHOD_NAME(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.METHOD_NAME()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1ValidatingAdmissionPolicyBinding):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1ValidatingAdmissionPolicyBinding):
return True
return self.to_dict() != other.to_dict() |
7,417 | test ig role base visibility | import pytest
from awx.main.access import (
InstanceGroupAccess,
OrganizationAccess,
InventoryAccess,
JobTemplateAccess,
)
@pytest.mark.django_db
@pytest.mark.parametrize(
"obj_perm,allowed,readonly,partial", [("admin_role", True, True, True), ("use_role", False, True, True), ("read_role", False, True, False)]
)
def METHOD_NAME(default_instance_group, rando, obj_perm, allowed, partial, readonly):
if obj_perm:
getattr(default_instance_group, obj_perm).members.add(rando)
assert readonly == InstanceGroupAccess(rando).can_read(default_instance_group)
assert partial == InstanceGroupAccess(rando).can_use(default_instance_group)
assert not InstanceGroupAccess(rando).can_add(default_instance_group)
assert allowed == InstanceGroupAccess(rando).can_admin(default_instance_group)
assert allowed == InstanceGroupAccess(rando).can_change(default_instance_group, {'name': 'New Name'})
@pytest.mark.django_db
@pytest.mark.parametrize(
"obj_perm,subobj_perm,allowed", [('admin_role', 'use_role', True), ('admin_role', 'read_role', False), ('admin_role', 'admin_role', True)]
)
def test_ig_role_based_associability(default_instance_group, rando, organization, job_template_factory, obj_perm, subobj_perm, allowed):
objects = job_template_factory('jt', organization=organization, project='p', inventory='i', credential='c')
if obj_perm:
getattr(objects.job_template, obj_perm).members.add(rando)
getattr(objects.inventory, obj_perm).members.add(rando)
getattr(objects.organization, obj_perm).members.add(rando)
if subobj_perm:
getattr(default_instance_group, subobj_perm).members.add(rando)
assert allowed == JobTemplateAccess(rando).can_attach(objects.job_template, default_instance_group, 'instance_groups', None)
assert allowed == InventoryAccess(rando).can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
assert allowed == OrganizationAccess(rando).can_attach(objects.organization, default_instance_group, 'instance_groups', None)
@pytest.mark.django_db
def test_ig_use_with_org_admin(default_instance_group, rando, org_admin):
default_instance_group.use_role.members.add(rando)
assert list(InstanceGroupAccess(org_admin).get_queryset()) != [default_instance_group]
assert list(InstanceGroupAccess(rando).get_queryset()) == [default_instance_group]
@pytest.mark.django_db
def test_ig_normal_user_visibility(organization, default_instance_group, user):
u = user('user', False)
assert len(InstanceGroupAccess(u).get_queryset()) == 0
organization.instance_groups.add(default_instance_group)
organization.member_role.members.add(u)
assert len(InstanceGroupAccess(u).get_queryset()) == 0
@pytest.mark.django_db
def test_ig_admin_user_visibility(organization, default_instance_group, admin, system_auditor, org_admin):
assert len(InstanceGroupAccess(admin).get_queryset()) == 1
assert len(InstanceGroupAccess(system_auditor).get_queryset()) == 1
assert len(InstanceGroupAccess(org_admin).get_queryset()) == 0
organization.instance_groups.add(default_instance_group)
assert len(InstanceGroupAccess(org_admin).get_queryset()) == 0
@pytest.mark.django_db
def test_ig_normal_user_associability(organization, default_instance_group, user):
u = user('user', False)
access = OrganizationAccess(u)
assert not access.can_attach(organization, default_instance_group, 'instance_groups', None)
organization.instance_groups.add(default_instance_group)
organization.member_role.members.add(u)
assert not access.can_attach(organization, default_instance_group, 'instance_groups', None)
@pytest.mark.django_db
def test_ig_associability(organization, default_instance_group, admin, system_auditor, org_admin, org_member, job_template_factory):
admin_access = OrganizationAccess(admin)
auditor_access = OrganizationAccess(system_auditor)
oadmin_access = OrganizationAccess(org_admin)
omember_access = OrganizationAccess(org_member)
assert admin_access.can_attach(organization, default_instance_group, 'instance_groups', None)
assert not oadmin_access.can_attach(organization, default_instance_group, 'instance_groups', None)
assert not auditor_access.can_attach(organization, default_instance_group, 'instance_groups', None)
assert not omember_access.can_attach(organization, default_instance_group, 'instance_groups', None)
organization.instance_groups.add(default_instance_group)
assert admin_access.can_unattach(organization, default_instance_group, 'instance_groups', None)
assert not oadmin_access.can_unattach(organization, default_instance_group, 'instance_groups', None)
assert not auditor_access.can_unattach(organization, default_instance_group, 'instance_groups', None)
assert not omember_access.can_unattach(organization, default_instance_group, 'instance_groups', None)
objects = job_template_factory('jt', organization=organization, project='p', inventory='i', credential='c')
admin_access = InventoryAccess(admin)
auditor_access = InventoryAccess(system_auditor)
oadmin_access = InventoryAccess(org_admin)
omember_access = InventoryAccess(org_member)
assert admin_access.can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
assert not oadmin_access.can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
assert not auditor_access.can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
assert not omember_access.can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
admin_access = JobTemplateAccess(admin)
auditor_access = JobTemplateAccess(system_auditor)
oadmin_access = JobTemplateAccess(org_admin)
omember_access = JobTemplateAccess(org_member)
assert admin_access.can_attach(objects.job_template, default_instance_group, 'instance_groups', None)
assert not oadmin_access.can_attach(objects.job_template, default_instance_group, 'instance_groups', None)
assert not auditor_access.can_attach(objects.job_template, default_instance_group, 'instance_groups', None)
assert not omember_access.can_attach(objects.job_template, default_instance_group, 'instance_groups', None) |
7,418 | process | # type_studio.py
import threading
import logging
import datetime
import os
from xml.dom import minidom
from django.conf import settings
from ..utils import add_comment, studio_clean_old_files
from ..models import Recording
from pod.video.models import Video, get_storage_path_video
from pod.video_encode_transcript import encode
from django.template.defaultfilters import slugify
DEFAULT_RECORDER_TYPE_ID = getattr(settings, "DEFAULT_RECORDER_TYPE_ID", 1)
ENCODE_VIDEO = getattr(settings, "ENCODE_VIDEO", "start_encode")
ENCODE_STUDIO = getattr(settings, "ENCODE_STUDIO", "start_encode_studio")
MEDIA_URL = getattr(settings, "MEDIA_URL", "/media/")
OPENCAST_FILES_DIR = getattr(settings, "OPENCAST_FILES_DIR", "opencast-files")
# Possible value are "mid", "piph" or "pipb"
OPENCAST_DEFAULT_PRESENTER = getattr(settings, "OPENCAST_DEFAULT_PRESENTER", "mid")
log = logging.getLogger(__name__)
def METHOD_NAME(recording):
log.info("START PROCESS OF RECORDING %s" % recording)
t = threading.Thread(target=encode_recording, args=[recording])
t.setDaemon(True)
t.start()
def save_basic_video(recording, video_src):
# Save & encode one video corresponding to the recording without cut
# We don't generate an intermediate video
recorder = recording.recorder
video = Video()
# Video title corresponds to recording title
video.title = recording.title
video.owner = recording.user
# Video type
video.type = recorder.type
# Video management
storage_path = get_storage_path_video(video, os.path.basename(video_src))
dt = str(datetime.datetime.now()).replace(":", "-")
name, ext = os.path.splitext(os.path.basename(video_src))
ext = ext.lower()
video.video = os.path.join(
os.path.dirname(storage_path), slugify(name) + "_" + dt.replace(" ", "_") + ext
)
# Move source file to destination
os.makedirs(os.path.dirname(video.video.path), exist_ok=True)
os.rename(video_src, video.video.path)
video.save()
# Add any additional owners
video.additional_owners.add(*recorder.additional_users.all())
# Private access (draft mode)
video.is_draft = recorder.is_draft
# Restricted access (possibly to groups or by password)
video.is_restricted = recorder.is_restricted
video.restrict_access_to_groups.add(*recorder.restrict_access_to_groups.all())
video.password = recorder.password
# Add the possible channels
video.channel.add(*recorder.channel.all())
# Add the possible themes
video.theme.add(*recorder.theme.all())
# Add any disciplines
video.discipline.add(*recorder.discipline.all())
# Language choice
video.main_lang = recorder.main_lang
# Cursus
video.cursus = recorder.cursus
# Tags
video.tags = recorder.tags
# Transcription
if getattr(settings, "USE_TRANSCRIPTION", False):
video.transcript = recorder.transcript
# Licence
video.licence = recorder.licence
# Allow downloading
video.allow_downloading = recorder.allow_downloading
# Is 360
video.is_360 = recorder.is_360
# Disable comments
video.disable_comment = recorder.disable_comment
# Add sites
video.sites.add(*recorder.sites.all())
# Finally save
video.save()
# Rename the XML file
# os.rename(recording.source_file, recording.source_file + "_treated")
studio_clean_old_files()
return video
def generate_intermediate_video(recording, videos, clip_begin, clip_end, presenter):
# Video file output : at the same directory than the XML file
# And with the same name .mp4
video_output = recording.source_file.replace(".xml", ".mp4")
subtime = get_subtime(clip_begin, clip_end)
encode_studio = getattr(encode, ENCODE_STUDIO)
encode_studio(recording.id, video_output, videos, subtime, presenter)
def get_subtime(clip_begin, clip_end):
subtime = ""
if clip_begin:
subtime += "-ss %s " % str(clip_begin)
if clip_end:
subtime += "-to %s " % str(clip_end)
return subtime
def encode_recording_id(recording_id):
recording = Recording.objects.get(id=recording_id)
encode_recording(recording)
# flake ignore complexity with noqa: C901
def encode_recording(recording):
recording.comment = ""
recording.save()
add_comment(recording.id, "Start at %s\n--\n" % datetime.datetime.now())
try:
xmldoc = minidom.parse(recording.source_file)
except KeyError as e:
add_comment(recording.id, "Error : %s" % e)
return -1
videos = getElementsByName(xmldoc, "track")
catalogs = getElementsByName(xmldoc, "catalog")
title = ""
clip_begin = None
clip_end = None
att_presenter = getAttributeByName(xmldoc, "mediapackage", "presenter")
presenter = (
att_presenter
if (att_presenter in ["mid", "piph", "pipb"])
else OPENCAST_DEFAULT_PRESENTER
)
for catalog in catalogs:
xmldoc = minidom.parse(catalog.get("src"))
if catalog.get("type") == "dublincore/episode":
title = getElementValueByName(xmldoc, "dcterms:title")
change_title(recording, title)
if catalog.get("type") == "smil/cutting":
beginDefault = getAttributeByName(xmldoc, "video", "clipBegin")
endDefault = getAttributeByName(xmldoc, "video", "clipEnd")
clip_begin = (
str(round(float(beginDefault.replace("s", "")), 2))
if (beginDefault)
else None
)
clip_end = (
str(round(float(endDefault.replace("s", "")), 2))
if (endDefault)
else None
)
if clip_begin or clip_end or len(videos) > 1:
msg = "*** generate_intermediate_video (%s) %s ***" % (
videos[0].get("type"),
videos[0].get("src"),
)
add_comment(recording.id, msg)
generate_intermediate_video(recording, videos, clip_begin, clip_end, presenter)
else:
msg = "*** Management of basic video file (%s) %s ***" % (
videos[0].get("type"),
videos[0].get("src"),
)
add_comment(recording.id, msg)
video = save_basic_video(
recording,
os.path.join(settings.MEDIA_ROOT, OPENCAST_FILES_DIR, videos[0].get("src")),
)
# encode video
encode_video = getattr(encode, ENCODE_VIDEO)
encode_video(video.id)
def change_title(recording, title):
if title != "":
recording.title = title
recording.save()
def getAttributeByName(xmldoc, tagName, attribute):
elements = xmldoc.getElementsByTagName(tagName)
if elements and len(elements) > 0:
attr = elements[0].getAttribute(attribute)
if attr and "e" not in attr:
return attr
return None
def getElementsByName(xmldoc, name):
elements = []
for element in xmldoc.getElementsByTagName(name):
urlElement = element.getElementsByTagName("url")[0]
if urlElement.firstChild and urlElement.firstChild.data != "":
element_path = urlElement.firstChild.data[
urlElement.firstChild.data.index(MEDIA_URL) + len(MEDIA_URL) :
]
src = os.path.join(settings.MEDIA_ROOT, element_path)
if os.path.isfile(src):
elements.append(
{
"type": element.getAttribute("type"),
"src": src,
}
)
return elements
def getElementValueByName(xmldoc, name):
element = xmldoc.getElementsByTagName(name)[0]
if element.firstChild and element.firstChild.data != "":
return element.firstChild.data
return "" |
7,419 | validate agent manager protocol communication | '''
copyright: Copyright (C) 2015-2022, Wazuh Inc.
Created by Wazuh, Inc. <info@wazuh.com>.
This program is free software; you can redistribute it and/or modify it under the terms of GPLv2
type: integration
brief: The 'wazuh-remoted' program is the server side daemon that communicates with the agents.
Specifically, these tests will check that the manager receives an event from a protocol
that is not allowed.
components:
- remoted
suite: agent_communication
targets:
- manager
daemons:
- wazuh-remoted
os_platform:
- linux
os_version:
- Arch Linux
- Amazon Linux 2
- Amazon Linux 1
- CentOS 8
- CentOS 7
- Debian Buster
- Red Hat 8
- Ubuntu Focal
- Ubuntu Bionic
references:
- https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/remote.html
- https://documentation.wazuh.com/current/user-manual/agents/agent-life-cycle.html
tags:
- remoted
'''
import pytest
import os
from time import sleep
import wazuh_testing.tools.agent_simulator as ag
from wazuh_testing.tools.thread_executor import ThreadExecutor
from wazuh_testing.tools import LOG_FILE_PATH
from wazuh_testing.tools.configuration import load_wazuh_configurations
from wazuh_testing.tools.monitoring import FileMonitor
from wazuh_testing import remote as rd
from wazuh_testing import TCP, UDP
# Marks
pytestmark = pytest.mark.tier(level=0)
# Variables
current_test_path = os.path.dirname(os.path.realpath(__file__))
test_data_path = os.path.join(current_test_path, 'data')
configurations_path = os.path.join(test_data_path, 'wazuh_invalid_protocol_communication.yaml')
wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)
# Set configuration
parameters = [
{'PROTOCOL': TCP, 'PORT': 1514},
{'PROTOCOL': UDP, 'PORT': 1514},
{'PROTOCOL': TCP, 'PORT': 56000},
{'PROTOCOL': UDP, 'PORT': 56000}
]
metadata = [
{'protocol': TCP, 'port': 1514},
{'protocol': UDP, 'port': 1514},
{'protocol': TCP, 'port': 56000},
{'protocol': UDP, 'port': 56000}
]
agent_info = {
'manager_address': '127.0.0.1',
'os': 'debian7',
'version': '4.2.0',
'disable_all_modules': True
}
configuration_ids = [f"{item['PROTOCOL'].upper()}_{item['PORT']}" for item in parameters]
# Configuration data
configurations = load_wazuh_configurations(configurations_path, __name__, params=parameters, metadata=metadata)
def METHOD_NAME(protocol=TCP, manager_port=1514):
"""Check the communication between the agent-manager using different protocols.
Args:
protocol (str): It can be only TCP or UDP.
manager_port (int): Manager remote communication port.
Raises:
ConnectionRefusedError: If communication could not be established with the socket.
TimeoutError: If the event could not be found in the socket queue.
"""
def send_event(event, protocol, manager_port):
"""Send an event to the manager"""
print(f"Sending {protocol}")
sender = ag.Sender(agent_info['manager_address'], protocol=protocol, manager_port=manager_port)
try:
sender.send_event(event)
finally:
sender.socket.close()
# Create agent and sender
agent = ag.Agent(manager_address=agent_info['manager_address'], os=agent_info['os'], version=agent_info['version'])
# Wait until remoted has loaded the new agent key
rd.wait_to_remoted_key_update(wazuh_log_monitor)
# Generate a custom event
search_pattern = f"test message from agent {agent.id}"
agent_custom_message = f"1:/test.log:Feb 23 17:18:20 manager sshd[40657]: {search_pattern}"
event = agent.create_event(agent_custom_message)
send_event_thread = ThreadExecutor(send_event, {'event': event, 'protocol': protocol, 'manager_port': manager_port})
# If protocol is TCP, then just send the message as the attempt to establish the connection will fail.
if protocol == TCP:
send_event_thread.start()
send_event_thread.join()
else: # If protocol is UDP, then monitor the socket queue to verify that the event has not been received.
socket_monitor_thread = ThreadExecutor(rd.check_queue_socket_event, {'raw_events': search_pattern,
'timeout': 20})
socket_monitor_thread.start()
# Wait 3 seconds until socket monitor is fully initialized
sleep(3)
send_event_thread.start()
send_event_thread.join()
# Wait until socket monitor thread finishes
socket_monitor_thread.join()
# Fixtures
@pytest.fixture(scope='module', params=configurations, ids=configuration_ids)
def get_configuration(request):
"""Get configurations from the module."""
return request.param
def test_invalid_protocol_communication(get_configuration, configure_environment, restart_remoted):
'''
description: Check that the manager receive any event from a protocol that is not allowed.
For this purpose, the test will swap the expected protocol before create the simulated agents. Then,
an event will be created and a message sent using a protocol not allowed. Finally, it will raise an
error based in the protocol used.
wazuh_min_version: 4.2.0
tier: 0
parameters:
- get_configuration:
type: fixture
brief: Get configurations from the module.
- configure_environment:
type: fixture
brief: Configure a custom environment for testing. Restart Wazuh is needed for applying the configuration.
- restart_remoted:
type: fixture
brief: Reset ossec.log and start a new monitor.
assertions:
- Verify that the manager-agent connection is not established
- Verify that manager establish a connection using a protocol not allowed.
- Verify that no event is received in the socket queue.
input_description: A configuration template (test_invalid_protocol_communication) is contained in an external YAML
file, (wazuh_invalid_protocol_communication.yaml). That template is combined with different test
cases defined in the module. Those include configuration settings for the 'wazuh-remoted' daemon
and agents info.
expected_output:
- The manager has established a TCP connection when only UDP is allowed.
- The manager has received an event from a protocol not allowed.
tags:
- simulator
- remoted
'''
manager_protocol = get_configuration['metadata']['protocol']
manager_port = get_configuration['metadata']['port']
# Swap protocols to send from an invalid protocol
sender_protocol = TCP if manager_protocol == UDP else UDP
if sender_protocol == TCP:
# Check that the connection is not established
with pytest.raises(ConnectionRefusedError):
METHOD_NAME(sender_protocol, manager_port)
raise ValueError('The manager has established a TCP connection when only UDP is allowed.')
else:
# Check that no event is received in the socket queue
with pytest.raises(TimeoutError):
METHOD_NAME(sender_protocol, manager_port)
raise ValueError('The manager has received an event from a protocol not allowed.') |
7,420 | load coords | #A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* Copyright (c) Schrodinger, LLC.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
if True:
from . import selector
from .cmd import _cmd,lock,unlock,Shortcut,QuietException, \
DEFAULT_ERROR, DEFAULT_SUCCESS, _raising, is_ok, is_error
cmd = __import__("sys").modules["pymol.cmd"]
import threading
import pymol
import string
def get_bond_print(obj,max_bond,max_type,_self=cmd):
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.get_bond_print(_self._COb,str(obj),int(max_bond),int(max_type))
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise pymol.CmdException
return r
def spheroid(object="",average=0,_self=cmd): # EXPERIMENTAL
'''
DESCRIPTION
"spheroid" averages trajectory frames together to create
an ellipsoid-like approximation of the actual anisotropic
motion exhibited by the atom over a series of trajectory frames.
USAGE
spheroid object,average
average = number of states to average for each resulting spheroid state
'''
print("Warning: 'spheroid' is experimental, incomplete, and unstable.")
with _self.lockcm:
r = _cmd.spheroid(_self._COb,str(object),int(average))
return r
def mem(_self=cmd):
'''
DESCRIPTION
"mem" Dumps current memory state to standard output. This is a
debugging feature, not an official part of the API.
'''
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.mem(_self._COb)
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise pymol.CmdException
return r
def check(selection=None, preserve=0):
'''
DESCRIPTION
"check" is unsupported command that may eventually have something
to do with assigning forcefield parameters to a selection of
atoms.
'''
# This function relies on code that is not currently part of PyMOL/ChemPy
# NOTE: the realtime module relies on code that is not yet part of PyMOL/ChemPy
from chempy.tinker import realtime
if selection is None:
arg = cmd.get_names("objects")
arg = arg[0:1]
if arg:
if len(arg):
selection = arg
if selection is not None:
selection = selector.process(selection)
realtime.assign("("+selection+")",int(preserve))
realtime.setup("("+selection+")")
def fast_minimize(*args, **kwargs):
'''
DESCRIPTION
"fast_minimize" is an unsupported nonfunctional command that may
eventually have something to do with doing a quick clean up of the
molecular structure.
'''
kwargs['_setup'] = 0
return minimize(*args, **kwargs)
def minimize(sele='', iter=500, grad=0.01, interval=50, _setup=1, _self=cmd):
'''
DESCRIPTION
"fast_minimize" is an unsupported nonfunctional command that may
eventually have something to do with minimization.
'''
from chempy.tinker import realtime
if not sele:
names = _self.get_names("objects")
if not names:
return
sele = names[0]
sele = '(' + sele + ')'
if not int(_setup) or realtime.setup(sele):
_self.async_(realtime.mini, int(iter), float(grad), int(interval), sele)
else:
print(" minimize: missing parameters, can't continue")
def dump(fnam, obj, state=1, quiet=1, _self=cmd):
'''
DESCRIPTION
The dump command writes the geometry of an isosurface, isomesh,
isodot, or map object to a simple text file. Each line contains one
vertex in case of representations, or one grid point in case of a map.
For surface objects, XYZ coordinates and the normal are exported.
Three lines make one triangle (like GL_TRIANGLES).
For mesh objects, XYZ coordinates are exported (no normals).
The vertices form line strips (like GL_LINE_STRIP), a blank
line starts a new strip.
For dot objects, XYZ coordinates are exported.
For map objects, XYZ coordinates and the value at the point are
exported. This forms a grid map.
USAGE
dump filename, object, state=1, quiet=1
ARGUMENTS
filename = str: file that will be written
object = str: object name
EXAMPLE
fetch 1ubq, mymap, type=2fofc, async=0
dump gridmap.txt, mymap
isosurface mysurface, mymap
dump surfacegeometry.txt, mysurface
isomesh mymesh, mymap
dump meshgeometry.txt, mymesh
isodot mydot, mymap, quiet=1
dump dotgeometry.txt, mydot
SEE ALSO
COLLADA export
'''
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.dump(_self._COb, str(fnam), obj, int(state) - 1, int(quiet))
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise pymol.CmdException
return r
def dummy(*arg):
return None
def test(group=0,index=0,_self=cmd): # generic test routine for development
'''
DESCRIPTION
"dump" is an unsupported internal command.
'''
r = DEFAULT_ERROR
try:
_self.lock(_self)
r=_cmd.test(_self._COb,int(group),int(index))
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise pymol.CmdException
return r
def METHOD_NAME(model, oname, state=1): # UNSUPPORTED
'''
WARNING: buggy argument list, state get's decremented twice!
'''
return pymol.importing.load_coordset(model, oname, int(state)-1)
def focal_blur(aperture=2.0, samples=10, ray=0, filename='', quiet=1, _self=cmd):
'''
DESCRIPTION
Creates fancy figures by introducing a focal blur to the image.
The object at the origin will be in focus.
USAGE
focal_blur [ aperture [, samples [, ray [, filename ]]]]
ARGUMENTS
aperture = float: aperture angle in degrees {default: 2.0}
samples = int: number of images for averaging {default: 10}
ray = 0/1: {default: 0}
filename = str: write image to file {default: temporary}
AUTHORS
Jarl Underhaug, Jason Vertrees and Thomas Holder
EXAMPLES
focal_blur 3.0, 50
'''
raise pymol.IncentiveOnlyException()
def callout(name, label, pos='', screen='auto', state=-1, color='front',
quiet=1, _self=cmd):
'''
DESCRIPTION
Create a new screen-stabilized callout object.
ARGUMENTS
name = str: object name
label = str: label text
pos = str or list: anchor in model space as 3-float coord list or atom
selection. If empty, don't draw an arrow. {default: }
screen = str or list: position on screen as 2-float list between [-1,-1]
(lower left) and [1,1] (upper right) or "auto" for smart placement.
{default: auto}
'''
raise pymol.IncentiveOnlyException()
def desaturate(selection="all", a=0.5, quiet=1, _self=cmd):
'''
DESCRIPTION
Desaturate the colors in the given selection.
ARGUMENTS
selection = str: atom selection {default: all}
a = float [0..1]: desaturation factor {default: 0.5}
'''
raise pymol.IncentiveOnlyException() |
7,421 | list featureset version features output | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'ListFeaturesetVersionFeaturesResult',
'AwaitableListFeaturesetVersionFeaturesResult',
'list_featureset_version_features',
'list_featureset_version_features_output',
]
@pulumi.output_type
class ListFeaturesetVersionFeaturesResult:
"""
A paginated list of Feature entities.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> Optional[str]:
"""
The link to the next page of Feature objects. If null, there are no additional pages.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.FeatureResponse']]:
"""
An array of objects of type Feature.
"""
return pulumi.get(self, "value")
class AwaitableListFeaturesetVersionFeaturesResult(ListFeaturesetVersionFeaturesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListFeaturesetVersionFeaturesResult(
next_link=self.next_link,
value=self.value)
def list_featureset_version_features(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
skip: Optional[str] = None,
tags: Optional[str] = None,
version: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListFeaturesetVersionFeaturesResult:
"""
A paginated list of Feature entities.
:param str name: Featureset name. This is case-sensitive.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str skip: Continuation token for pagination.
:param str tags: Comma-separated list of tag names (and optionally values). Example: tag1,tag2=value2
:param str version: Featureset Version identifier. This is case-sensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['skip'] = skip
__args__['tags'] = tags
__args__['version'] = version
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20230201preview:listFeaturesetVersionFeatures', __args__, opts=opts, typ=ListFeaturesetVersionFeaturesResult).value
return AwaitableListFeaturesetVersionFeaturesResult(
next_link=pulumi.get(__ret__, 'next_link'),
value=pulumi.get(__ret__, 'value'))
@_utilities.lift_output_func(list_featureset_version_features)
def METHOD_NAME(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
skip: Optional[pulumi.Input[Optional[str]]] = None,
tags: Optional[pulumi.Input[Optional[str]]] = None,
version: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListFeaturesetVersionFeaturesResult]:
"""
A paginated list of Feature entities.
:param str name: Featureset name. This is case-sensitive.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str skip: Continuation token for pagination.
:param str tags: Comma-separated list of tag names (and optionally values). Example: tag1,tag2=value2
:param str version: Featureset Version identifier. This is case-sensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
... |
7,422 | pformat | """
Helpers for logging.
This module needs much love to become useful.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2008 Gael Varoquaux
# License: BSD Style, 3 clauses.
from __future__ import print_function
import time
import sys
import os
import shutil
import logging
import pprint
from .disk import mkdirp
def _squeeze_time(t):
"""Remove .1s to the time under Windows: this is the time it take to
stat files. This is needed to make results similar to timings under
Unix, for tests
"""
if sys.platform.startswith('win'):
return max(0, t - .1)
else:
return t
def format_time(t):
t = _squeeze_time(t)
return "%.1fs, %.1fmin" % (t, t / 60.)
def short_format_time(t):
t = _squeeze_time(t)
if t > 60:
return "%4.1fmin" % (t / 60.)
else:
return " %5.1fs" % (t)
def METHOD_NAME(obj, indent=0, depth=3):
if 'numpy' in sys.modules:
import numpy as np
print_options = np.get_printoptions()
np.set_printoptions(precision=6, threshold=64, edgeitems=1)
else:
print_options = None
out = pprint.METHOD_NAME(obj, depth=depth, indent=indent)
if print_options:
np.set_printoptions(**print_options)
return out
###############################################################################
# class `Logger`
###############################################################################
class Logger(object):
""" Base class for logging messages.
"""
def __init__(self, depth=3, name=None):
"""
Parameters
----------
depth: int, optional
The depth of objects printed.
name: str, optional
The namespace to log to. If None, defaults to joblib.
"""
self.depth = depth
self._name = name if name else 'joblib'
def warn(self, msg):
logging.getLogger(self._name).warning("[%s]: %s" % (self, msg))
def info(self, msg):
logging.info("[%s]: %s" % (self, msg))
def debug(self, msg):
# XXX: This conflicts with the debug flag used in children class
logging.getLogger(self._name).debug("[%s]: %s" % (self, msg))
def format(self, obj, indent=0):
"""Return the formatted representation of the object."""
return METHOD_NAME(obj, indent=indent, depth=self.depth)
###############################################################################
# class `PrintTime`
###############################################################################
class PrintTime(object):
""" Print and log messages while keeping track of time.
"""
def __init__(self, logfile=None, logdir=None):
if logfile is not None and logdir is not None:
raise ValueError('Cannot specify both logfile and logdir')
# XXX: Need argument docstring
self.last_time = time.time()
self.start_time = self.last_time
if logdir is not None:
logfile = os.path.join(logdir, 'joblib.log')
self.logfile = logfile
if logfile is not None:
mkdirp(os.path.dirname(logfile))
if os.path.exists(logfile):
# Rotate the logs
for i in range(1, 9):
try:
shutil.move(logfile + '.%i' % i,
logfile + '.%i' % (i + 1))
except: # noqa: E722
"No reason failing here"
# Use a copy rather than a move, so that a process
# monitoring this file does not get lost.
try:
shutil.copy(logfile, logfile + '.1')
except: # noqa: E722
"No reason failing here"
try:
with open(logfile, 'w') as logfile:
logfile.write('\nLogging joblib python script\n')
logfile.write('\n---%s---\n' % time.ctime(self.last_time))
except: # noqa: E722
""" Multiprocessing writing to files can create race
conditions. Rather fail silently than crash the
computation.
"""
# XXX: We actually need a debug flag to disable this
# silent failure.
def __call__(self, msg='', total=False):
""" Print the time elapsed between the last call and the current
call, with an optional message.
"""
if not total:
time_lapse = time.time() - self.last_time
full_msg = "%s: %s" % (msg, format_time(time_lapse))
else:
# FIXME: Too much logic duplicated
time_lapse = time.time() - self.start_time
full_msg = "%s: %.2fs, %.1f min" % (msg, time_lapse,
time_lapse / 60)
print(full_msg, file=sys.stderr)
if self.logfile is not None:
try:
with open(self.logfile, 'a') as f:
print(full_msg, file=f)
except: # noqa: E722
""" Multiprocessing writing to files can create race
conditions. Rather fail silently than crash the
calculation.
"""
# XXX: We actually need a debug flag to disable this
# silent failure.
self.last_time = time.time() |
7,423 | make table | import matplotlib.pyplot as plt
import numpy
import model_linear_head_head_1d
from amanzi_xml.observations.ObservationXMLv2 import ObservationXMLv2 as ObsXML
from amanzi_xml.observations.ObservationData import ObservationData as ObsDATA
import amanzi_xml.utils.search as search
import prettytable
import os
# load input xml file
# -- create an ObservationXML object
def loadInputXML(filename):
Obs_xml = ObsXML(filename)
return Obs_xml
# load the data file
# -- use above xml object to get observation filename
# -- create an ObservationData object
# -- load the data using this object
def loadDataFile(Obs_xml):
output_file = Obs_xml.getObservationFilename()
Obs_data = ObsDATA("amanzi-output/" + output_file)
Obs_data.getObservationData()
coords = Obs_xml.getAllCoordinates()
for obs in Obs_data.observations.values():
region = obs.region
obs.coordinate = coords[region]
return Obs_data
def plotTestObservations(Obs_xml, Obs_data, axes1):
# === SPECIAL CODE ==== for linear flow problems
# Collect the z-values from observations
z_vals = [coord[2] for coord in Obs_xml.coordinates.values()]
z_vals.sort()
z_vals = set(z_vals)
colors = ['b','g','r']
cmap = dict((zval,color) for (zval,color) in zip(z_vals, colors))
# Create dictionary for scatter plots
scatter_data={}
for key in list(cmap.keys()):
scatter_data[key]={}
scatter_data[key]['x']=[]
scatter_data[key]['pressure']=[]
# Collect observations in scatter_data
for obs in Obs_data.observations.values():
scatter_data[obs.coordinate[2]]['x'].append(obs.coordinate[0])
scatter_data[obs.coordinate[2]]['pressure'].append(obs.data)
# Plot the observations
for key in list(cmap.keys()):
axes1.scatter(scatter_data[key]['x'],scatter_data[key]['pressure'],c=cmap[key],marker='s',s=25,label='Amanzi')
# Set labels and title
axes1.set_xlabel('x-coordinate [m]')
axes1.set_ylabel('Pressure [Pa]')
axes1.set_title('Aqueous Pressure vs Distance')
return cmap
def plotTestModel(filename, cmap, axes1, Obs_xml, Obs_data):
# Instantiate the analytic solution
mymodel = model_linear_head_head_1d.createFromXML(filename)
table_values = []
# Create a set of points to plot the solution
x = numpy.linspace(mymodel.x_0,mymodel.x_1,11)
coords = numpy.zeros((11,2))
coords[:,0] = x
# Plot a line for each z-coordinate in the observations
for (z_val, color) in cmap.items():
coords[:,1] = z_val
pressure = mymodel.pressure(coords)
axes1.plot(x,pressure,color,label='$z = %0.2f $'%z_val)
axes1.legend(loc="upper right" , fancybox = True , shadow = True)
def METHOD_NAME(Obs_data,Obs_xml,filename):
pressure_amanzi = []
coordinates = []
mymodel = model_linear_head_head_1d.createFromXML(filename)
for obs in Obs_data.observations.values():
coordinates.append([obs.coordinate[0], obs.coordinate[2]])
pressure_amanzi.append(str(obs.data).rstrip(']').lstrip('['))
pressure_analytic = list(mymodel.pressure(numpy.array(coordinates)))
x = prettytable.PrettyTable(["x [m]", "z [m]", "Analytic [Pa]","Amanzi [Pa]"])
x.padding_width = 2
x.hrules = 1
x.horizontal_header_char="="
for coords, p_analytic, p_amanzi in zip(coordinates,pressure_analytic,pressure_amanzi):
x.add_row([coords[0],coords[1],"%.4f" % float(p_analytic),"%.4f" % float(p_amanzi)])
if os.path.exists("table_values.txt"):
os.remove("table_values.txt")
table_file = open("table_values.txt", "w+")
table_file.write('.. tabularcolumns:: ' + '|R|C|C|C|' + '\n\n')
table_file.write(x.get_string(sortby="x [m]"))
table_file.write('\n')
table_file.close()
if __name__ == "__main__":
import os
import run_amanzi_standard
input_file = "amanzi_linear_head_head_1d-u.xml"
run_dir = "amanzi-output"
try:
max_np = 1
run_amanzi_standard.run_amanzi(input_file, max_np, [input_file], run_dir)
obs_xml=loadInputXML(input_file)
obs_data=loadDataFile(obs_xml)
fig1 = plt.figure()
axes1=fig1.add_axes([.15,.15,.80,.80])
METHOD_NAME(obs_data,obs_xml,input_file)
cmap = plotTestObservations(obs_xml,obs_data,axes1)
plotTestModel(input_file,cmap,axes1,obs_xml,obs_data)
# plt.show()
finally:
pass
|
7,424 | get scope | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from typing import (
Any, Dict, Iterator, Optional,
)
from pyhocon import ConfigFactory, ConfigTree
from databuilder.extractor.base_extractor import Extractor
from databuilder.extractor.dashboard.databricks_sql.databricks_sql_dashboard_utils import (
DatabricksSQLPaginatedRestApiQuery, generate_dashboard_description, get_text_widgets, get_visualization_widgets,
sort_widgets,
)
from databuilder.extractor.restapi.rest_api_extractor import REST_API_QUERY, RestAPIExtractor
from databuilder.models.dashboard.dashboard_chart import DashboardChart
from databuilder.models.dashboard.dashboard_last_modified import DashboardLastModifiedTimestamp
from databuilder.models.dashboard.dashboard_metadata import DashboardMetadata
from databuilder.models.dashboard.dashboard_owner import DashboardOwner
from databuilder.models.dashboard.dashboard_query import DashboardQuery
from databuilder.rest_api.base_rest_api_query import EmptyRestApiQuerySeed
from databuilder.rest_api.rest_api_query import RestApiQuery
from databuilder.transformer.base_transformer import ChainedTransformer
from databuilder.transformer.timestamp_string_to_epoch import FIELD_NAME as TS_FIELD_NAME, TimestampStringToEpoch
class DatabricksSQLDashboardExtractor(Extractor):
"""
An extractor for retrieving dashboards, queries, and visualizations
from Databricks SQL (https://databricks.com/product/databricks-sql)
"""
DATABRICKS_HOST_KEY = "databricks_host"
DATABRICKS_API_TOKEN_KEY = "databricks_api_token"
PRODUCT = "databricks-sql"
DASHBOARD_GROUP_ID = "databricks-sql"
DASHBOARD_GROUP_NAME = "Databricks SQL"
def init(self, conf: ConfigTree) -> None:
# Required configuration
self._databricks_host = conf.get_string(
DatabricksSQLDashboardExtractor.DATABRICKS_HOST_KEY
)
self._databricks_api_token = conf.get_string(
DatabricksSQLDashboardExtractor.DATABRICKS_API_TOKEN_KEY
)
# NOTE: The dashboards api is currently in preview. When it gets moved out of preview
# this will break and it will need to be changed
self._databricks_sql_dashboards_api_base = (
f"{self._databricks_host}/api/2.0/preview/sql/dashboards"
)
self._extractor = self._build_extractor()
self._transformer = self._build_transformer()
self._extract_iter: Optional[Iterator[Any]] = None
def _get_databrick_request_headers(self) -> Dict[str, str]:
return {
"Authorization": f"Bearer {self._databricks_api_token}",
}
def _get_extract_iter(self) -> Iterator[Any]:
while True:
record = self._extractor.extract()
if not record:
break
record = next(self._transformer.transform(record=record), None)
dashboard_identity_data = {
"dashboard_group_id": DatabricksSQLDashboardExtractor.DASHBOARD_GROUP_ID,
"dashboard_id": record["dashboard_id"],
"product": "databricks-sql",
}
dashboard_data = {
"dashboard_group": DatabricksSQLDashboardExtractor.DASHBOARD_GROUP_NAME,
"dashboard_name": record["dashboard_name"],
"dashboard_url": f"{self._databricks_host}/sql/dashboards/{record['dashboard_id']}",
"dashboard_group_url": self._databricks_host,
"created_timestamp": record["created_timestamp"],
"tags": record["tags"],
}
dashboard_owner_data = {"email": record["user"]["email"]}
dashboard_owner_data.update(dashboard_identity_data)
yield DashboardOwner(**dashboard_owner_data)
dashboard_last_modified_data = {
"last_modified_timestamp": record["last_modified_timestamp"],
}
dashboard_last_modified_data.update(dashboard_identity_data)
yield DashboardLastModifiedTimestamp(**dashboard_last_modified_data)
if "widgets" in record:
widgets = sort_widgets(record["widgets"])
text_widgets = get_text_widgets(widgets)
viz_widgets = get_visualization_widgets(widgets)
dashboard_data["description"] = generate_dashboard_description(
text_widgets, viz_widgets
)
for viz in viz_widgets:
dashboard_query_data = {
"query_id": str(viz.query_id),
"query_name": viz.query_name,
"url": self._databricks_host + viz.query_relative_url,
"query_text": viz.raw_query,
}
dashboard_query_data.update(dashboard_identity_data)
yield DashboardQuery(**dashboard_query_data)
dashboard_chart_data = {
"query_id": str(viz.query_id),
"chart_id": str(viz.visualization_id),
"chart_name": viz.visualization_name,
"chart_type": viz.visualization_type,
}
dashboard_chart_data.update(dashboard_identity_data)
yield DashboardChart(**dashboard_chart_data)
dashboard_data.update(dashboard_identity_data)
yield DashboardMetadata(**dashboard_data)
def extract(self) -> Any:
if not self._extract_iter:
self._extract_iter = self._get_extract_iter()
try:
return next(self._extract_iter)
except StopIteration:
return None
def _build_extractor(self) -> RestAPIExtractor:
extractor = RestAPIExtractor()
rest_api_extractor_conf = ConfigFactory.from_dict(
{REST_API_QUERY: self._build_restapi_query()}
)
extractor.init(rest_api_extractor_conf)
return extractor
def _build_transformer(self) -> ChainedTransformer:
transformers = []
# transform timestamps from ISO to unix epoch
ts_transformer_1 = TimestampStringToEpoch()
ts_transformer_1.init(
ConfigFactory.from_dict({TS_FIELD_NAME: "created_timestamp"})
)
transformers.append(ts_transformer_1)
ts_transformer_2 = TimestampStringToEpoch()
ts_transformer_2.init(
ConfigFactory.from_dict({TS_FIELD_NAME: "last_modified_timestamp"})
)
transformers.append(ts_transformer_2)
return ChainedTransformer(transformers=transformers)
def _build_restapi_query(self) -> RestApiQuery:
databricks_sql_dashboard_query = DatabricksSQLPaginatedRestApiQuery(
query_to_join=EmptyRestApiQuerySeed(),
url=self._databricks_sql_dashboards_api_base,
params={"headers": self._get_databrick_request_headers()},
json_path="results[*].[id,name,tags,updated_at,created_at,user]",
field_names=[
"dashboard_id",
"dashboard_name",
"tags",
"last_modified_timestamp",
"created_timestamp",
"user",
],
skip_no_results=True,
)
return RestApiQuery(
query_to_join=databricks_sql_dashboard_query,
url=f"{self._databricks_sql_dashboards_api_base}/{{dashboard_id}}",
params={"headers": self._get_databrick_request_headers()},
json_path="widgets",
field_names=["widgets"],
skip_no_result=True,
)
def METHOD_NAME(self) -> str:
return "extractor.databricks_sql_extractor" |
7,425 | test match optional nodes | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import onnx
import pytest
from sparseml.exporters.transforms.utils.matching import (
INITIALIZER_MATCH,
any_of,
get_structural_matches,
optional_node,
)
from sparseml.onnx.utils.graph_editor import ONNXGraph
@pytest.fixture()
def onnx_graph() -> ONNXGraph:
"""
Creates a graph that looks like:
init1
|
id1 input
| |
\\ /
add1 init2
| /
| /
add2
|
output
"""
model_input = onnx.helper.make_tensor_value_info(
"input", onnx.TensorProto.FLOAT, (1,)
)
model_output = onnx.helper.make_tensor_value_info(
"output", onnx.TensorProto.FLOAT, (1,)
)
init1 = onnx.helper.make_tensor(
name="init1", data_type=onnx.TensorProto.FLOAT, dims=(1,), vals=[1]
)
init2 = onnx.helper.make_tensor(
name="init2", data_type=onnx.TensorProto.FLOAT, dims=(1,), vals=[2]
)
id1 = onnx.helper.make_node("Identity", ["init1"], ["id1_output"], name="id1")
add1 = onnx.helper.make_node(
"Add", ["id1_output", "input"], ["add1_output"], name="add1"
)
add2 = onnx.helper.make_node(
"Add", ["add1_output", "init2"], ["add2_output"], name="add2"
)
id2 = onnx.helper.make_node("Identity", ["add2_output"], ["output"], name="id2")
graph = onnx.helper.make_graph(
nodes=[id1, add1, add2, id2],
name="g",
inputs=[model_input],
outputs=[model_output],
initializer=[init1, init2],
)
model = onnx.helper.make_model(graph)
return ONNXGraph(model)
def METHOD_NAME(onnx_graph: ONNXGraph):
matches = get_structural_matches(
onnx_graph, op_type="Add", children_ops=[[optional_node("Add")]]
)
assert list(map(str, matches)) == [
"MatchResult(node='add1', parents=[], children=[['add2']])",
"MatchResult(node='add2', parents=[], children=[[None]])",
]
def test_match_all_options(onnx_graph: ONNXGraph):
matches = get_structural_matches(
onnx_graph,
parent_ops=[
["Add"],
[INITIALIZER_MATCH],
],
op_type="Add",
children_ops=[["Identity"]],
)
assert list(map(str, matches)) == [
"MatchResult(node='add2', parents=[['add1'], ['init2']], children=[['id2']])"
]
def test_only_op_type(onnx_graph: ONNXGraph):
matches = get_structural_matches(onnx_graph, op_type="Conv")
assert len(matches) == 0
matches = get_structural_matches(onnx_graph, op_type="Identity")
assert list(map(str, matches)) == [
"MatchResult(node='id1', parents=[], children=[])",
"MatchResult(node='id2', parents=[], children=[])",
]
matches = get_structural_matches(onnx_graph, op_type="Add")
assert list(map(str, matches)) == [
"MatchResult(node='add1', parents=[], children=[])",
"MatchResult(node='add2', parents=[], children=[])",
]
def test_parent_empty(onnx_graph: ONNXGraph):
matches = get_structural_matches(
onnx_graph,
op_type="Add",
parent_ops=[
[],
[INITIALIZER_MATCH],
],
)
assert list(map(str, matches)) == [
"MatchResult(node='add2', parents=[[], ['init2']], children=[])"
]
def test_match_any(onnx_graph: ONNXGraph):
matches = get_structural_matches(
onnx_graph,
op_type="Add",
parent_ops=[
[any_of("Add", "Identity")],
[],
],
)
assert list(map(str, matches)) == [
"MatchResult(node='add1', parents=[['id1'], []], children=[])",
"MatchResult(node='add2', parents=[['add1'], []], children=[])",
] |
7,426 | parse content | """
Krb5Configuration - files ``/etc/krb5.conf`` and ``/etc/krb5.conf.d/*``
=======================================================================
krb5 Configuration are ``/etc/krb5.conf`` and ``/etc/krb5.conf.d/*``,
and the content format is similar to ``INI config``, but they include
values that span multiple lines. Multi-line values start with a '{'
and end with a '}', and we join them together by setting the `is_squ`
variable to True while in a multi-line value.
Example:
>>> krb5_content = '''
[realms]
dns_lookup_realm = false
ticket_lifetime = 24h
default_ccache_name = KEYRING:persistent:%{uid}
EXAMPLE.COM = {
kdc = kerberos.example.com
admin_server = kerberos.example.com
}
pam = {
debug = false
krb4_convert = false
ticket_lifetime = 36000
}
[libdefaults]
dns_lookup_realm = false
ticket_lifetime = 24h
EXAMPLE.COM = {
kdc = kerberos2.example.com
admin_server = kerberos2.example.com
}
# renew_lifetime = 7d
# forwardable = true
# rdns = false
'''.strip()
>>> from insights.tests import context_wrap
>>> shared = {Krb5Configuration: Krb5Configuration(context_wrap(krb5_content))}
>>> krb5_info = shared[Krb5Configuration]
>>> krb5_info["libdefaults"]["dnsdsd"]
"false"
>>> krb5_info["realms"]["EXAMPLE.COM"]["kdc"]
"kerberos.example.com"
>>> krb5_info.sections()
["libdefaults","realms"]
>>> krb5_info.has_section("realms")
True
>>> krb5_info.has_option("realms", "nosuchoption")
False
>>> krb5_info.options("libdefaults")
["dns_lookup_realm","ticket_lifetime","EXAMPLE.COM"]
"""
from .. import parser, Parser, get_active_lines, LegacyItemAccess
from insights.specs import Specs
PREFIX_FOR_LIST = ("includedir", "include", "module")
def _handle_key_value(t_dict, key, value):
"""
Function to handle key has multi value, and return the values as list.
"""
if key in t_dict:
val = t_dict[key]
if isinstance(val, str):
val = [val]
val.append(value)
return val
return value
def _handle_krb5_bool(value):
"""
Convert krb5.conf boolean
"""
# see lib/krb5/krb/libdef_parse.c _krb5_conf_boolean()
if value in set(["y", "yes", "true", "t", "1", "on"]):
return True
elif value in set(["n", "no", "false", "nil", "0", "off"]):
return False
else:
# _krb5_conf_boolean() treats any other value as "False". Return
# "None" so caller can identify this case.
return None
@parser(Specs.krb5)
class Krb5Configuration(Parser, LegacyItemAccess):
"""
Class for ``krb5.conf`` and ``krb5.conf.d`` configuration files.
The Kerberos .ini format is like an ordinary .ini file except that values
can include a multiple line key-value pair 'relation' that starts with a
'{' and end with a '}' on a trailing line. So we track whether we're in
curly braces by setting `is_squ` when we enter a relation, and clearing
it when we leave. Please fill in the remainder of the logic here.
Attributes:
includedir (list): The directory list that `krb5.conf` includes via
`includedir` directive
include (list): The configuration file list that `krb5.conf` includes
via `include` directive
module (list): The module list that `krb5.conf` specifed via `module`
directive
"""
def METHOD_NAME(self, content):
dict_all = {}
is_squ = False
section_name = ""
squ_value = {}
squ_section_name = ""
section_value = {}
self.includedir = []
self.include = []
self.module = []
unchangeable_tags = []
for line in get_active_lines(content):
line = line.strip()
if line.startswith(PREFIX_FOR_LIST):
key, value = [i.strip() for i in line.split(None, 1)]
getattr(self, key).append(value) if key in PREFIX_FOR_LIST else None
continue
if is_squ:
# If in {} sub_section, get the key_value pair
if "=" in line:
key, value = [i.strip() for i in line.split('=', 1)]
if key not in unchangeable_tags:
value = value.split()[0].strip()
squ_value[key] = _handle_key_value(squ_value, key, value)
if line.endswith("*"):
unchangeable_tags.append(key)
# The {} sub_section should end with },
# if it is, set the whole value to the sub_section name,
# and clean the flag
else:
section_value[squ_section_name] = squ_value
is_squ = False
squ_section_name = ""
squ_value = {}
else:
# [XXX] means a section, get the section name and the value
# format is dict.
if line.startswith("[") and line.endswith("]"):
# If first section, just get the section name,
# if not, set the value to the former section and
# get the section name
section_name = line.strip("[]")
section_value = {}
if section_name:
dict_all[section_name] = section_value
# key value format is XXX = YYY, store as dict
elif "=" in line and not line.endswith("{"):
key, value = [i.strip() for i in line.split('=', 1)]
if key not in unchangeable_tags:
value = value.split()[0].strip()
section_value[key] = _handle_key_value(section_value, key, value)
if line.endswith("*"):
unchangeable_tags.append(key)
# The {} sub_section should start with format XXX = {
else:
is_squ = True
squ_section_name = line.split("=")[0].strip()
self.data = dict_all
def sections(self):
"""
Return a list of section names.
"""
return self.data.keys()
def has_section(self, section):
"""
Indicate whether the named section is present in the configuration.
Return True if the given section is present, and False if not present.
"""
return section in self.data
def options(self, section):
"""
Return a list of option names for the given section name.
"""
return self.data[section].keys() if self.has_section(section) else []
def has_option(self, section, option):
"""
Check for the existence of a given option in a given section.
Return True if the given option is present, and False if not present.
"""
if section not in self.data:
return False
return option in self.data[section]
def getboolean(self, section, option):
"""Parse option as bool
Returns None is not a krb5.conf boolean string.
"""
value = self.data[section][option]
return _handle_krb5_bool(value) |
7,427 | s3 client | # pylint: disable=redefined-outer-name
import datetime
import json
import os
from typing import Dict
from unittest import mock
import boto3
import botocore.client
import botocore.session
import pytest as pytest
from aws_embedded_metrics.logger.metrics_logger import MetricsLogger
from botocore.stub import Stubber, ANY
from stats import handle_sqs, handle_pageload, handle_compiler_stats
SOME_DATE = datetime.datetime(2020, 1, 2, 3, 4, 5, 12312)
@pytest.fixture
def sqs_client():
return botocore.session.get_session().create_client("sqs", region_name="not-real")
@pytest.fixture
def dynamodb_client():
return botocore.session.get_session().create_client("dynamodb", region_name="not-real")
@pytest.fixture
def METHOD_NAME():
return botocore.session.get_session().create_client("s3", region_name="not-real")
def make_expected_body(msg_type: str, value: str):
return f'{{"date": "2020-01-02", "time": "03:04:05", "type": "{msg_type}", "value": "{value}"}}'
@mock.patch.dict(os.environ, dict(S3_BUCKET_NAME="not-a-real-bucket"))
def test_should_store_results_from_sqs_correctly(METHOD_NAME):
context = mock.Mock(function_name="some_func")
event = dict(Records=[dict(body="first"), dict(body="second")])
with Stubber(METHOD_NAME) as stubber:
stubber.add_response(
"put_object",
{},
dict(
Body="first\nsecond", Bucket="not-a-real-bucket", Key="stats/some_func-2020-01-02-03:04:05.012312.log"
),
)
handle_sqs(event, context, METHOD_NAME, SOME_DATE)
def test_pageloads_should_return_a_200_doc():
metrics = mock.Mock(spec_set=MetricsLogger)
queue_url = "some-queue-url"
result = handle_pageload(dict(queryStringParameters={}), metrics, SOME_DATE, queue_url, mock.Mock())
assert result["statusCode"] == 200
assert result["body"] == "Ok"
metrics.put_metric.assert_called_once_with("PageLoad", 1)
def test_should_handle_pageloads_with_no_sponsors(sqs_client):
metrics = mock.Mock(spec_set=MetricsLogger)
queue_url = "some-queue-url"
with Stubber(sqs_client) as stubber:
stubber.add_response(
"send_message", {}, dict(QueueUrl=queue_url, MessageBody=make_expected_body("PageLoad", ""))
)
handle_pageload(dict(queryStringParameters={}), metrics, SOME_DATE, queue_url, sqs_client)
metrics.set_property.assert_called_once_with("sponsors", [])
metrics.put_metric.assert_called_once_with("PageLoad", 1)
def test_should_handle_pageloads_with_empty_sponsors(sqs_client):
metrics = mock.Mock(spec_set=MetricsLogger)
queue_url = "some-queue-url"
with Stubber(sqs_client) as stubber:
stubber.add_response(
"send_message", {}, dict(QueueUrl=queue_url, MessageBody=make_expected_body("PageLoad", ""))
)
handle_pageload(dict(queryStringParameters=dict(icons="")), metrics, SOME_DATE, queue_url, sqs_client)
metrics.set_property.assert_called_once_with("sponsors", [])
def test_should_handle_pageloads_with_one_sponsor(sqs_client):
metrics = mock.Mock(spec_set=MetricsLogger)
queue_url = "some-queue-url"
with Stubber(sqs_client) as stubber:
stubber.add_response("send_message", {}, dict(QueueUrl=queue_url, MessageBody=ANY))
stubber.add_response(
"send_message", {}, dict(QueueUrl=queue_url, MessageBody=make_expected_body("SponsorView", "bob"))
)
handle_pageload(dict(queryStringParameters=dict(icons="bob")), metrics, SOME_DATE, queue_url, sqs_client)
metrics.set_property.assert_called_once_with("sponsors", ["bob"])
def test_should_handle_pageloads_with_many_sponsors(sqs_client):
metrics = mock.Mock(spec_set=MetricsLogger)
queue_url = "some-queue-url"
with Stubber(sqs_client) as stubber:
stubber.add_response("send_message", {}, dict(QueueUrl=queue_url, MessageBody=ANY))
for expectation in ("bob", "alice", "crystal"):
stubber.add_response(
"send_message", {}, dict(QueueUrl=queue_url, MessageBody=make_expected_body("SponsorView", expectation))
)
handle_pageload(
dict(queryStringParameters=dict(icons="bob,alice,crystal")), metrics, SOME_DATE, queue_url, sqs_client
)
metrics.set_property.assert_called_once_with("sponsors", ["bob", "alice", "crystal"])
def test_should_handle_pageloads_with_many_sponsors_uri_encoded(sqs_client):
metrics = mock.Mock(spec_set=MetricsLogger)
queue_url = "some-queue-url"
with Stubber(sqs_client) as stubber:
stubber.add_response("send_message", {}, dict(QueueUrl=queue_url, MessageBody=ANY))
for expectation in ("bob", "alice", "crystal"):
stubber.add_response(
"send_message", {}, dict(QueueUrl=queue_url, MessageBody=make_expected_body("SponsorView", expectation))
)
handle_pageload(
dict(queryStringParameters=dict(icons="bob%2Calice%2Ccrystal")), metrics, SOME_DATE, queue_url, sqs_client
)
metrics.set_property.assert_called_once_with("sponsors", ["bob", "alice", "crystal"])
@pytest.mark.skip("run manually with creds")
def test_should_find_stats_on_a_compiler():
res = handle_compiler_stats("gcc", "compiler-builds", boto3.client("dynamodb"))
print(res)
def test_should_query_compilers_with_the_right_query(dynamodb_client):
with Stubber(dynamodb_client) as stubber:
stubber.add_response(
"query",
dict(Count=0, Items=[]),
dict(
TableName="compiler-table",
Limit=100,
ScanIndexForward=False,
KeyConditionExpression="#key = :compiler",
FilterExpression="#status = :status_filter",
ExpressionAttributeNames={"#key": "compiler", "#status": "status"},
ExpressionAttributeValues={":status_filter": dict(S="OK"), ":compiler": dict(S="some-compiler")},
),
)
stubber.add_response(
"query",
dict(Count=0, Items=[]),
dict(
TableName="compiler-table",
Limit=100,
ScanIndexForward=False,
KeyConditionExpression="#key = :compiler",
ExpressionAttributeNames={"#key": "compiler"},
ExpressionAttributeValues={":compiler": dict(S="some-compiler")},
),
)
handle_compiler_stats("some-compiler", "compiler-table", dynamodb_client)
def test_should_mention_most_recent_compiler_build(dynamodb_client):
def make_fake_item(run_id: str) -> Dict:
return dict(
path=dict(S="path"),
github_run_id=dict(S=run_id),
timestamp=dict(S="some time"),
duration=dict(N="123"),
)
with Stubber(dynamodb_client) as stubber:
stubber.add_response(
"query", dict(Count=3, Items=[make_fake_item("first"), make_fake_item("second"), make_fake_item("third")])
)
stubber.add_response("query", dict(Count=2, Items=[make_fake_item("first_b"), make_fake_item("second_b")]))
result = handle_compiler_stats("some-compiler", "compiler-table", dynamodb_client)
assert result["statusCode"] == 200
assert json.loads(result["body"]) == dict(
last_success=dict(duration=123, github_run_id="first", path="path", timestamp="some time"),
last_build=dict(duration=123, github_run_id="first_b", path="path", timestamp="some time"),
)
def test_should_handle_when_no_valid_compiler_builds(dynamodb_client):
with Stubber(dynamodb_client) as stubber:
stubber.add_response("query", dict(Count=0, Items=[]))
stubber.add_response("query", dict(Count=0, Items=[]))
result = handle_compiler_stats("some-compiler", "compiler-table", dynamodb_client)
assert result["statusCode"] == 200
assert json.loads(result["body"]) == dict(last_success=None, last_build=None) |
7,428 | init | # Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import json
from typing import Type
import mcli
import pytest
import torch
from torch.utils.data import DataLoader
from composer.core import Callback
from composer.loggers import WandBLogger
from composer.loggers.mosaicml_logger import (MOSAICML_ACCESS_TOKEN_ENV_VAR, MOSAICML_PLATFORM_ENV_VAR, MosaicMLLogger,
format_data_to_json_serializable)
from composer.trainer import Trainer
from composer.utils import dist
from tests.callbacks.callback_settings import get_cb_kwargs, get_cb_model_and_datasets, get_cbs_and_marks
from tests.common import RandomClassificationDataset, SimpleModel
from tests.common.markers import world_size
class MockMAPI:
def __init__(self):
self.run_metadata = {}
def update_run_metadata(self, run_name, new_metadata):
if run_name not in self.run_metadata:
self.run_metadata[run_name] = {}
for k, v in new_metadata.items():
self.run_metadata[run_name][k] = v
# Serialize the data to ensure it is json serializable
json.dumps(self.run_metadata[run_name])
def test_format_data_to_json_serializable():
data = {
'key1': 'value1',
'key2': 42,
'key3': 3.14,
'key4': True,
'key5': torch.tensor([1, 2, 3]),
'key6': {
'inner_key': 'inner_value'
},
'key7': [1, 2, 3],
}
formatted_data = format_data_to_json_serializable(data)
expected_formatted_data = {
'key1': 'value1',
'key2': 42,
'key3': 3.14,
'key4': True,
'key5': 'Tensor of shape torch.Size([3])',
'key6': {
'inner_key': 'inner_value'
},
'key7': [1, 2, 3],
}
assert formatted_data == expected_formatted_data
@pytest.mark.parametrize('callback_cls', get_cbs_and_marks(callbacks=True))
@world_size(1, 2)
def test_logged_data_is_json_serializable(monkeypatch, callback_cls: Type[Callback], world_size):
"""Test that all logged data is json serializable, which is a requirement to use MAPI."""
mock_mapi = MockMAPI()
monkeypatch.setattr(mcli, 'update_run_metadata', mock_mapi.update_run_metadata)
run_name = 'small_chungus'
monkeypatch.setenv('RUN_NAME', run_name)
callback_kwargs = get_cb_kwargs(callback_cls)
callback = callback_cls(**callback_kwargs)
train_dataset = RandomClassificationDataset()
model, train_dataloader, _ = get_cb_model_and_datasets(callback, sampler=dist.get_sampler(train_dataset))
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
train_subset_num_batches=1,
max_duration='1ep',
callbacks=callback,
loggers=MosaicMLLogger(),
)
trainer.fit()
if dist.get_global_rank() == 0:
assert len(mock_mapi.run_metadata[run_name].keys()) > 0
else:
assert len(mock_mapi.run_metadata.keys()) == 0
def test_metric_partial_filtering(monkeypatch):
mock_mapi = MockMAPI()
monkeypatch.setattr(mcli, 'update_run_metadata', mock_mapi.update_run_metadata)
run_name = 'small_chungus'
monkeypatch.setenv('RUN_NAME', run_name)
trainer = Trainer(
model=SimpleModel(),
train_dataloader=DataLoader(RandomClassificationDataset()),
train_subset_num_batches=2,
max_duration='1ep',
loggers=MosaicMLLogger(ignore_keys=['loss', 'accuracy']),
)
trainer.fit()
assert 'mosaicml/num_nodes' in mock_mapi.run_metadata[run_name]
assert 'mosaicml/loss' not in mock_mapi.run_metadata[run_name]
def test_metric_full_filtering(monkeypatch):
mock_mapi = MockMAPI()
monkeypatch.setattr(mcli, 'update_run_metadata', mock_mapi.update_run_metadata)
run_name = 'small_chungus'
monkeypatch.setenv('RUN_NAME', run_name)
trainer = Trainer(
model=SimpleModel(),
train_dataloader=DataLoader(RandomClassificationDataset()),
train_subset_num_batches=2,
max_duration='1ep',
loggers=MosaicMLLogger(ignore_keys=['*']),
)
trainer.fit()
assert len(mock_mapi.run_metadata[run_name].keys()) == 0
class SetWandBRunURL(Callback):
"""Sets run_url attribute on WandB for offline unit testing."""
def __init__(self, run_url) -> None:
self.run_url = run_url
def METHOD_NAME(self, state, event) -> None:
for callback in state.callbacks:
if isinstance(callback, WandBLogger):
callback.run_url = self.run_url
def test_wandb_run_url(monkeypatch):
mock_mapi = MockMAPI()
monkeypatch.setattr(mcli, 'update_run_metadata', mock_mapi.update_run_metadata)
run_name = 'small_chungus'
monkeypatch.setenv('RUN_NAME', run_name)
run_url = 'my_run_url'
monkeypatch.setenv('WANDB_MODE', 'offline')
Trainer(model=SimpleModel(), loggers=[
MosaicMLLogger(),
WandBLogger(),
], callbacks=[
SetWandBRunURL(run_url),
])
assert mock_mapi.run_metadata[run_name]['mosaicml/wandb/run_url'] == run_url
@pytest.mark.parametrize('platform_env_var', ['True', 'None'])
@pytest.mark.parametrize('access_token_env_var', ['my-token', 'None'])
@pytest.mark.parametrize('logger_set', [True, False])
def test_auto_add_logger(monkeypatch, platform_env_var, access_token_env_var, logger_set):
mock_mapi = MockMAPI()
monkeypatch.setattr(mcli, 'update_run_metadata', mock_mapi.update_run_metadata)
run_name = 'small_chungus'
monkeypatch.setenv('RUN_NAME', run_name)
monkeypatch.setenv(MOSAICML_PLATFORM_ENV_VAR, platform_env_var)
monkeypatch.setenv(MOSAICML_ACCESS_TOKEN_ENV_VAR, access_token_env_var)
trainer = Trainer(
model=SimpleModel(),
train_dataloader=DataLoader(RandomClassificationDataset()),
train_subset_num_batches=2,
max_duration='1ep',
loggers=MosaicMLLogger() if logger_set else None,
)
logger_count = 0
for callback in trainer.state.callbacks:
if isinstance(callback, MosaicMLLogger):
logger_count += 1
# If logger is specified manually, ensure only 1
if logger_set:
assert logger_count == 1
# Otherwise, auto-add only if platform and access token are set
elif platform_env_var and access_token_env_var is not None:
assert logger_count == 1
# Otherwise, no logger
else:
assert logger_count == 0 |
7,429 | pz | #!/usr/bin/python
##################
# init.py
#
# Copyright David Baddeley, 2009
# d.baddeley@auckland.ac.nz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
#!/usr/bin/python
from PYME.Acquire.ExecTools import joinBGInit, HWNotPresent, init_gui, init_hardware
import scipy
import time
@init_hardware('Fake Piezos')
def METHOD_NAME(scope):
from PYME.Acquire.Hardware.Simulator import fakePiezo
scope.fakePiezo = fakePiezo.FakePiezo(100)
scope.register_piezo(scope.fakePiezo, 'z', needCamRestart=True)
scope.fakeXPiezo = fakePiezo.FakePiezo(10)
scope.register_piezo(scope.fakeXPiezo, 'x')
scope.fakeYPiezo = fakePiezo.FakePiezo(10)
scope.register_piezo(scope.fakeYPiezo, 'y')
METHOD_NAME.join() #piezo must be there before we start camera
@init_hardware('Fake Camera')
def cm(scope):
import numpy as np
from PYME.Acquire.Hardware.Simulator import fakeCam
from PYME.Acquire.Hardware.Simulator import rend_im
PIXELSIZE_NM = 50.
rend_im.set_pixelsize_nm(PIXELSIZE_NM)
scope.register_camera(fakeCam.FakeCamera(PIXELSIZE_NM*np.arange(-128.0, 768.0 + 128.0),
PIXELSIZE_NM*np.arange(-128.0, 128.0),
fakeCam.NoiseMaker(),
scope.fakePiezo, xpiezo = scope.fakeXPiezo, ypiezo = scope.fakeYPiezo),'Fake Camera')
#scope.EnableJoystick = 'foo'
#InitBG('Should Fail', """
#raise Exception, 'test error'
#time.sleep(1)
#""")
#
#InitBG('Should not be there', """
#raise HWNotPresent, 'test error'
#time.sleep(1)
#""")
@init_gui('Simulation UI')
def sim_controls(MainFrame, scope):
from PYME.Acquire.Hardware.Simulator import dSimControl
dsc = dSimControl.dSimControl(MainFrame, scope)
MainFrame.AddPage(page=dsc, select=False, caption='Simulation Settings')
@init_gui('Camera controls')
def cam_controls(MainFrame, scope):
from PYME.Acquire.Hardware.AndorIXon import AndorControlFrame
scope.camControls['Fake Camera'] = AndorControlFrame.AndorPanel(MainFrame, scope.cam, scope)
MainFrame.camPanels.append((scope.camControls['Fake Camera'], 'EMCCD Properties'))
@init_gui('Sample database')
def samp_db(MainFrame, scope):
from PYME.Acquire import sampleInformation
sampPan = sampleInformation.slidePanel(MainFrame)
MainFrame.camPanels.append((sampPan, 'Current Slide'))
@init_gui('Analysis settings')
def anal_settings(MainFrame, scope):
from PYME.Acquire.ui import AnalysisSettingsUI
AnalysisSettingsUI.Plug(scope, MainFrame)
@init_gui('Fake DMD')
def fake_dmd(MainFrame, scope):
from PYMEnf.Hardware import FakeDMD, DMDGui
scope.LC = FakeDMD.FakeDMD(scope)
LCGui = DMDGui.DMDPanel(MainFrame,scope.LC, scope)
MainFrame.camPanels.append((LCGui, 'DMD Control', False))
#InitGUI("""
#from PYME.Acquire.Hardware import ccdAdjPanel
##import wx
##f = wx.Frame(None)
#snrPan = ccdAdjPanel.sizedCCDPanel(notebook1, scope, acf)
#notebook1.AddPage(page=snrPan, select=False, caption='Image SNR')
##camPanels.append((snrPan, 'SNR etc ...'))
##f.Show()
##time1.WantNotification.append(snrPan.ccdPan.draw)
#""")
cm.join()
@init_hardware('Lasers')
def lasers(scope):
from PYME.Acquire.Hardware import lasers
scope.l488 = lasers.FakeLaser('l488',scope.cam,1, initPower=10)
scope.l488.register(scope)
scope.l405 = lasers.FakeLaser('l405',scope.cam,0, initPower=10)
scope.l405.register(scope)
@init_gui('Laser controls')
def laser_controls(MainFrame, scope):
from PYME.Acquire.ui import lasersliders
lcf = lasersliders.LaserToggles(MainFrame.toolPanel, scope.state)
MainFrame.time1.WantNotification.append(lcf.update)
MainFrame.camPanels.append((lcf, 'Laser Control'))
lsf = lasersliders.LaserSliders(MainFrame.toolPanel, scope.state)
MainFrame.time1.WantNotification.append(lsf.update)
MainFrame.camPanels.append((lsf, 'Laser Powers'))
@init_gui('Focus Keys')
def focus_keys(MainFrame, scope):
from PYME.Acquire.Hardware import focusKeys
fk = focusKeys.FocusKeys(MainFrame, None, scope.piezos[0])
#InitGUI("""
#from PYME.Acquire.Hardware import splitter
#splt = splitter.Splitter(MainFrame, None, scope, scope.cam)
#""")
@init_gui('Action manager')
def action_manager(MainFrame, scope):
from PYME.Acquire.ui import actionUI
ap = actionUI.ActionPanel(MainFrame, scope.actions, scope)
MainFrame.AddPage(ap, caption='Queued Actions')
#must be here!!!
joinBGInit() #wait for anyhting which was being done in a separate thread
#import numpy
#psf = numpy.load(r'd:\psf647.npy')
#psf = numpy.maximum(psf, 0.)
#from PYME.Analysis import MetaData
#fakeCam.rend_im.setModel(psf, MetaData.TIRFDefault)
#time.sleep(.5)
scope.initDone = True
|
7,430 | expect flow mapping | from typing import Any, Protocol, TypeVar
from yaml.error import YAMLError
_T_contra = TypeVar("_T_contra", str, bytes, contravariant=True)
class _WriteStream(Protocol[_T_contra]):
def write(self, __data: _T_contra) -> object: ...
# Optional fields:
# encoding: str
# def flush(self) -> object: ...
class EmitterError(YAMLError): ...
class ScalarAnalysis:
scalar: Any
empty: Any
multiline: Any
allow_flow_plain: Any
allow_block_plain: Any
allow_single_quoted: Any
allow_double_quoted: Any
allow_block: Any
def __init__(
self, scalar, empty, multiline, allow_flow_plain, allow_block_plain, allow_single_quoted, allow_double_quoted, allow_block
) -> None: ...
class Emitter:
DEFAULT_TAG_PREFIXES: Any
stream: _WriteStream[Any]
encoding: Any
states: Any
state: Any
events: Any
event: Any
indents: Any
indent: Any
flow_level: Any
root_context: Any
sequence_context: Any
mapping_context: Any
simple_key_context: Any
line: Any
column: Any
whitespace: Any
indention: Any
open_ended: Any
canonical: Any
allow_unicode: Any
best_indent: Any
best_width: Any
best_line_break: Any
tag_prefixes: Any
prepared_anchor: Any
prepared_tag: Any
analysis: Any
style: Any
def __init__(
self, stream: _WriteStream[Any], canonical=None, indent=None, width=None, allow_unicode=None, line_break=None
) -> None: ...
def dispose(self): ...
def emit(self, event): ...
def need_more_events(self): ...
def need_events(self, count): ...
def increase_indent(self, flow=False, indentless=False): ...
def expect_stream_start(self): ...
def expect_nothing(self): ...
def expect_first_document_start(self): ...
def expect_document_start(self, first=False): ...
def expect_document_end(self): ...
def expect_document_root(self): ...
def expect_node(self, root=False, sequence=False, mapping=False, simple_key=False): ...
def expect_alias(self): ...
def expect_scalar(self): ...
def expect_flow_sequence(self): ...
def expect_first_flow_sequence_item(self): ...
def expect_flow_sequence_item(self): ...
def METHOD_NAME(self): ...
def expect_first_flow_mapping_key(self): ...
def expect_flow_mapping_key(self): ...
def expect_flow_mapping_simple_value(self): ...
def expect_flow_mapping_value(self): ...
def expect_block_sequence(self): ...
def expect_first_block_sequence_item(self): ...
def expect_block_sequence_item(self, first=False): ...
def expect_block_mapping(self): ...
def expect_first_block_mapping_key(self): ...
def expect_block_mapping_key(self, first=False): ...
def expect_block_mapping_simple_value(self): ...
def expect_block_mapping_value(self): ...
def check_empty_sequence(self): ...
def check_empty_mapping(self): ...
def check_empty_document(self): ...
def check_simple_key(self): ...
def process_anchor(self, indicator): ...
def process_tag(self): ...
def choose_scalar_style(self): ...
def process_scalar(self): ...
def prepare_version(self, version): ...
def prepare_tag_handle(self, handle): ...
def prepare_tag_prefix(self, prefix): ...
def prepare_tag(self, tag): ...
def prepare_anchor(self, anchor): ...
def analyze_scalar(self, scalar): ...
def flush_stream(self): ...
def write_stream_start(self): ...
def write_stream_end(self): ...
def write_indicator(self, indicator, need_whitespace, whitespace=False, indention=False): ...
def write_indent(self): ...
def write_line_break(self, data=None): ...
def write_version_directive(self, version_text): ...
def write_tag_directive(self, handle_text, prefix_text): ...
def write_single_quoted(self, text, split=True): ...
ESCAPE_REPLACEMENTS: Any
def write_double_quoted(self, text, split=True): ...
def determine_block_hints(self, text): ...
def write_folded(self, text): ...
def write_literal(self, text): ...
def write_plain(self, text, split=True): ... |
7,431 | build arguments schema | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"storage-mover agent unregister",
confirmation="WARNING: Deleting this agent will stop ongoing migrations on this agent. Job definitions that reference this agent can’t be started until their agent reference is updated to a working agent. Registering this agent again will result in a new identity and not fix existing job definitions. Note that the Azure ARC trust is not broken. The Hybrid Compute resource must be manually removed to invalidate the agent identity that may still be allowed access to target storage containers. \nAre you sure you want to delete this storage mover agent?",
)
class Unregister(AAZCommand):
"""Unregisters an Agent resource.
:example: agent unregister
az storage-mover agent unregister -g {rg} -n {agent_name} --storage-mover-name {mover_name}
"""
_aaz_info = {
"version": "2023-07-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.storagemover/storagemovers/{}/agents/{}", "2023-07-01-preview"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.agent_name = AAZStrArg(
options=["-n", "--name", "--agent-name"],
help="The name of the Agent resource.",
required=True,
id_part="child_name_1",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.storage_mover_name = AAZStrArg(
options=["--storage-mover-name"],
help="The name of the Storage Mover resource.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.AgentsDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class AgentsDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageMover/storageMovers/{storageMoverName}/agents/{agentName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"agentName", self.ctx.args.agent_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"storageMoverName", self.ctx.args.storage_mover_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-07-01-preview",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _UnregisterHelper:
"""Helper class for Unregister"""
__all__ = ["Unregister"] |
7,432 | build updaters and get charts cached | """Handle submission of chart revisions."""
from http.client import RemoteDisconnected
from typing import Any, Dict, List
from urllib.error import URLError
import streamlit as st
from pydantic import BaseModel
from structlog import get_logger
import etl.grapher_model as gm
# from etl.chart_revision.v2.base import ChartUpdater
from apps.wizard.charts.utils import OWIDEnv
from apps.wizard.charts.variable_config import VariableConfig
from etl.chart_revision.v2.core import (
build_updaters_and_get_charts,
create_chart_comparison,
submit_chart_comparisons,
update_chart_config,
)
# Logger
log = get_logger()
def create_submission(variable_config: VariableConfig, schema_chart_config: Dict[str, Any]) -> "SubmissionConfig":
"""Create submission config."""
# Get updaters and charts to update
# If user submitted variable mapping (i.e. clicked on "Next"), then get updaters and charts.
# This if-else is here to avoid re-runs whenever the user just checks/unchecks boxes or changes dropdown values.
# We want to recreate the experience of having a "form".
submission = SubmissionConfig()
if st.session_state.submitted_variables:
with st.spinner("Retrieving charts to be updated. This can take up to 1 minute..."):
try:
log.info("chart_revision: building updaters and getting charts!")
st.session_state.variable_mapping = variable_config.variable_mapping
updaters, charts = METHOD_NAME(
variable_mapping=variable_config.variable_mapping,
schema_chart_config=schema_chart_config,
skip_slider_check_limit=variable_config.skip_slider_check_limit,
)
except (URLError, RemoteDisconnected) as e:
st.error(e.__traceback__)
else:
submission = SubmissionConfig(charts=charts, updaters=updaters)
# Otherwise, get cached values
else:
try:
log.info("chart_revision: building updaters and getting charts CACHED!")
updaters, charts = METHOD_NAME(
variable_mapping=st.session_state.variable_mapping,
schema_chart_config=schema_chart_config,
skip_slider_check_limit=variable_config.skip_slider_check_limit,
)
except (URLError, RemoteDisconnected) as e:
st.error(e.__traceback__)
else:
submission = SubmissionConfig(charts=charts, updaters=updaters)
# st.session_state.charts_obtained = True
# If we managed to get the charts and updaters, show results.
if submission.is_valid:
log.info(f"chart_revision: Submission is valid: {submission}")
# Display details
num_charts = len(charts) # type: ignore
with st.container():
st.info(f"""Number of charts to be updated: {num_charts}""")
with st.expander("🔎 Show variable id mapping"):
st.write(variable_config.variable_mapping)
with st.expander("📊 Show affected charts (before update)"):
st.warning("Charts that are not public at ourworldindata.org will not be rendered correctly.")
for chart in charts: # type: ignore
slug = chart.config["slug"]
st.markdown(
f"""<iframe src="https://ourworldindata.org/grapher/{slug}" loading="lazy" style="width: 100%; height: 600px; border: 0px none;"></iframe>""",
unsafe_allow_html=True,
)
# Button to finally submit the revisions
submitted_revisions = st.button(label="🚀 SUBMIT CHART REVISIONS", use_container_width=True, type="primary")
if submitted_revisions:
st.session_state.submitted_revisions = True
log.info(
f"{st.session_state.submitted_datasets}, {st.session_state.submitted_variables}, {st.session_state.submitted_revisions}"
)
else:
st.session_state.submitted_revisions = False
st.divider()
st.session_state.submitted_variables = False
return submission
def push_submission(submission_config: "SubmissionConfig", owid_env: OWIDEnv) -> None:
"""Push submissions to the database."""
# Create chart comparisons
progress_text = "Submitting chart revisions..."
bar = st.progress(0, progress_text)
comparisons = []
for i, chart in enumerate(submission_config.charts):
log.info(f"chart_revision: creating comparison for chart {chart.id}")
# Update chart config
config_new = update_chart_config(chart.config, submission_config.updaters)
# Create chart comparison and add to list
comparison = create_chart_comparison(chart.config, config_new)
comparisons.append(comparison)
# Show progress bar
percent_complete = int(100 * (i + 1) / submission_config.num_charts)
bar.progress(percent_complete, text=f"{progress_text} {percent_complete}%")
# Submit chart comparisons
try:
submit_chart_comparisons(comparisons)
except Exception as e:
st.error(f"Something went wrong! {e}")
else:
st.balloons()
if owid_env.env_type_id == "unknown":
live_link = "https://owid.cloud/admin/suggested-chart-revisions/review"
staging_link = "https://staging.owid.cloud/admin/suggested-chart-revisions/review"
local_link = "http://localhost:3030/admin/suggested-chart-revisions/review"
st.success(
f"""
Chart revisions submitted successfully!
Now review these at the approval tool:
- [Live]({live_link})
- [Staging]({staging_link})
- [Local]({local_link})
"""
)
else:
st.success(
f"Chart revisions submitted successfully! Now review these at the [approval tool]({owid_env.chart_approval_tool_url})!"
)
@st.cache_data(show_spinner=False)
def METHOD_NAME(variable_mapping, schema_chart_config, skip_slider_check_limit):
# st.write(variable_mapping)
return build_updaters_and_get_charts(
variable_mapping=variable_mapping,
schema_chart_config=schema_chart_config,
skip_slider_check_limit=skip_slider_check_limit,
)
class SubmissionConfig(BaseModel):
"""Form 1."""
is_valid: bool = False
charts: List[gm.Chart]
updaters: List[Any]
def __init__(self, **data: Any) -> None:
"""Constructor."""
if "charts" not in data:
data["charts"] = []
if "updaters" not in data:
data["updaters"] = []
if "charts" in data and "updaters" in data:
data["is_valid"] = True
super().__init__(**data)
@property
def num_charts(self) -> int:
"""Number of charts in the submission."""
if self.charts is not None:
return len(self.charts)
raise ValueError("Charts have not been set yet! Invalid submission configuration.") |
7,433 | close | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.longrunning import operations_pb2
from google.oauth2 import service_account # type: ignore
from google.cloud.talent_v4 import gapic_version as package_version
from google.cloud.talent_v4.types import completion_service
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=package_version.__version__
)
class CompletionTransport(abc.ABC):
"""Abstract transport class for Completion."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
)
DEFAULT_HOST: str = "jobs.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Don't apply audience if the credentials file passed from user.
if hasattr(credentials, "with_gdch_audience"):
credentials = credentials.with_gdch_audience(
api_audience if api_audience else host
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.complete_query: gapic_v1.method.wrap_method(
self.complete_query,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
}
def METHOD_NAME(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def complete_query(
self,
) -> Callable[
[completion_service.CompleteQueryRequest],
Union[
completion_service.CompleteQueryResponse,
Awaitable[completion_service.CompleteQueryResponse],
],
]:
raise NotImplementedError()
@property
def get_operation(
self,
) -> Callable[
[operations_pb2.GetOperationRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def kind(self) -> str:
raise NotImplementedError()
__all__ = ("CompletionTransport",) |
7,434 | get concatenated feature to index | ## @package concat
# Module caffe2.python.layers.concat
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
import numpy as np
from collections import defaultdict
import logging
logger = logging.getLogger(__name__)
def METHOD_NAME(blobs_to_concat):
concat_feature_to_index = defaultdict(list)
start_pos = 0
for scalar in blobs_to_concat:
num_dims = scalar.dtype.shape[0]
if hasattr(scalar, 'metadata') \
and hasattr(scalar.metadata, 'feature_specs') \
and hasattr(scalar.metadata.feature_specs, 'feature_to_index') \
and isinstance(scalar.metadata.feature_specs.feature_to_index, dict): # noqa B950
for k, v in scalar.metadata.feature_specs.feature_to_index.items():
concat_feature_to_index[k].extend([start_pos + vi for vi in v])
start_pos += num_dims
return dict(concat_feature_to_index) if concat_feature_to_index.keys() else None
class Concat(ModelLayer):
"""
Construct Concat layer
Assume that first dimension is batch,
Example:
embedding_dim = 64
input_record = self.new_record(schema.Struct(
('input1', schema.Scalar((np.float32, (embedding_dim, )))),
('input2', schema.Scalar((np.float32, (embedding_dim, )))),
('input3', schema.Scalar((np.float32, (embedding_dim, )))),
))
output = self.model.Concat(input_record)
self.assertEqual(
schema.Scalar((np.float32, ((len(input_record.fields) * embedding_dim, )))),
output
)
# Note that in Concat layer we assume first dimension is batch.
# so input is B * embedding_dim
# add_axis=1 make it B * 1 * embedding_dim
# Concat on axis=1 make it B * N * embedding_dim
output = self.model.Concat(input_record, axis=1, add_axis=1)
self.assertEqual(
schema.Scalar((np.float32, ((len(input_record.fields), embedding_dim)))),
output
)
"""
def __init__(self, model, input_record, axis=1, add_axis=0,
name='concat', **kwargs):
super().__init__(model, name, input_record, **kwargs)
self.axis = axis
self.add_axis = add_axis
assert not (axis == 0 and add_axis == 1), \
"It's not allowed to add axis=0"
assert isinstance(input_record, schema.Struct),\
"Incorrect input type. Expected Struct, but received: {0}".\
format(input_record)
shapes = []
for field_name, field_type in input_record.fields.items():
assert isinstance(field_type, schema.Scalar),\
"Incorrect input type for {}. Expected Scalar, but got: {}".\
format(field_name, field_type)
# Assume that first dimension is batch, so actual axis in shape is
# axis - 1
shape = list(field_type.field_type().shape)
if add_axis:
shape.insert(axis - 1, 1)
assert len(shape) >= axis,\
"Concat expects that limited dimensions of the input tensor"
shapes.append(shape)
logger.info('Concat Layer input shapes: ' + str(shapes))
if axis == 0:
self.output_schema = schema.from_blob_list(
input_record[0],
[self.get_next_blob_reference('output')]
)
return
concat_dim = 0
for shape in shapes:
concat_dim += shape[axis - 1]
shape[axis - 1] = 0
assert shape == shapes[0],\
"Shapes {0} and {1} are not compatible for Concat".\
format(shape, shapes[0])
output_dims = shapes[0]
output_dims[axis - 1] = concat_dim
logger.info('Concat Layer output_dims: ' + str(output_dims))
self.output_schema = schema.Scalar(
(np.float32, output_dims),
self.get_next_blob_reference('output'))
record_to_concat = input_record.fields.values()
concated_feature_to_index = METHOD_NAME(
record_to_concat
)
if concated_feature_to_index:
metadata = schema.Metadata(
feature_specs=schema.FeatureSpec(
feature_to_index=concated_feature_to_index
)
)
self.output_schema.set_metadata(metadata)
def add_ops(self, net):
net.Concat(
self.input_record.field_blobs(),
[
self.output_schema.field_blobs()[0],
self.output_schema.field_blobs()[0] + "_concat_dims"
],
axis=self.axis,
add_axis=self.add_axis,
) |
7,435 | load json schema | __all__ = [
'StorageLinkSerializer'
'LocFileSerializer',
'AccFileSerializer',
'ReinsInfoFileSerializer',
'ReinsScopeFileSerializer',
'AnalysisSettingsSerializer',
'ModelParametersSerializer',
]
import json
from rest_framework import serializers
# import jsonschema
from jsonschema.exceptions import ValidationError as JSONSchemaValidationError
from jsonschema.exceptions import SchemaError as JSONSchemaError
from ods_tools.oed.setting_schema import ModelSettingSchema, AnalysisSettingSchema
from ods_tools.oed.common import OdsException
class TokenObtainPairResponseSerializer(serializers.Serializer):
refresh_token = serializers.CharField(read_only=True)
access_token = serializers.CharField(read_only=True)
token_type = serializers.CharField(read_only=True, default="Bearer")
expires_in = serializers.IntegerField(read_only=True, default=86400)
def create(self, validated_data):
raise NotImplementedError()
def update(self, instance, validated_data):
raise NotImplementedError()
class TokenRefreshResponseSerializer(serializers.Serializer):
access_token = serializers.CharField()
token_type = serializers.CharField()
expires_in = serializers.IntegerField()
def create(self, validated_data):
raise NotImplementedError()
def update(self, instance, validated_data):
raise NotImplementedError()
class StorageLinkSerializer(serializers.Serializer):
accounts_file = serializers.CharField()
location_file = serializers.CharField()
reinsurance_info_file = serializers.CharField()
reinsurance_scope_file = serializers.CharField()
def create(self, validated_data):
raise NotImplementedError()
def update(self, instance, validated_data):
raise NotImplementedError()
class LocFileSerializer(serializers.Serializer):
uri = serializers.URLField()
name = serializers.CharField()
stored = serializers.CharField()
def create(self, validated_data):
raise NotImplementedError()
def update(self, instance, validated_data):
raise NotImplementedError()
class AccFileSerializer(serializers.Serializer):
uri = serializers.URLField()
name = serializers.CharField()
stored = serializers.CharField()
def create(self, validated_data):
raise NotImplementedError()
def update(self, instance, validated_data):
raise NotImplementedError()
class ReinsInfoFileSerializer(serializers.Serializer):
uri = serializers.URLField()
name = serializers.CharField()
stored = serializers.CharField()
def create(self, validated_data):
raise NotImplementedError()
def update(self, instance, validated_data):
raise NotImplementedError()
class ReinsScopeFileSerializer(serializers.Serializer):
uri = serializers.URLField()
name = serializers.CharField()
stored = serializers.CharField()
def create(self, validated_data):
raise NotImplementedError()
def update(self, instance, validated_data):
raise NotImplementedError()
def update_links(link_prefix, d):
"""
Linking in pre-defined scheams with path links will be nested
into the overall swagger schema, breaking preset links
Remap based on 'link_prefix' value
'#definitions/option' -> #definitions/SWAGGER_OBJECT/definitions/option
"""
for k, v in d.items():
if isinstance(v, dict):
update_links(link_prefix, v)
elif isinstance(v, list):
for el in v:
if isinstance(el, dict):
update_links(link_prefix, el)
else:
if k in '$ref':
link = v.split('#')[-1]
d[k] = "{}{}".format(link_prefix, link)
def METHOD_NAME(schema, link_prefix=None):
"""
Load json schema stored in the .schema dir
"""
if link_prefix:
update_links(link_prefix, schema)
return schema
class JsonSettingsSerializer(serializers.Serializer):
def to_internal_value(self, data):
return data
def validate_json(self, data):
try:
vaild, errors = self.schemaClass.validate(data, raise_error=False)
if not vaild:
raise serializers.ValidationError(errors)
except (JSONSchemaValidationError, JSONSchemaError, OdsException) as e:
raise serializers.ValidationError(e.message)
return self.to_internal_value(json.dumps(data))
class ModelParametersSerializer(JsonSettingsSerializer):
class Meta:
swagger_schema_fields = METHOD_NAME(
schema=ModelSettingSchema().schema,
link_prefix='#/definitions/ModelSettings'
)
def __init__(self, *args, **kwargs):
super(ModelParametersSerializer, self).__init__(*args, **kwargs)
self.filenmame = 'model_settings.json' # Store POSTED JSON using this fname
self.schemaClass = ModelSettingSchema()
def validate(self, data):
return super(ModelParametersSerializer, self).validate_json(data)
class AnalysisSettingsSerializer(JsonSettingsSerializer):
class Meta:
swagger_schema_fields = METHOD_NAME(
schema=AnalysisSettingSchema().schema,
link_prefix='#/definitions/AnalysisSettings'
)
def __init__(self, *args, **kwargs):
super(AnalysisSettingsSerializer, self).__init__(*args, **kwargs)
self.filenmame = 'analysis_settings.json' # Store POSTED JSON using this fname
self.schemaClass = AnalysisSettingSchema()
def validate(self, data):
data = self.schemaClass.compatibility(data)
return super(AnalysisSettingsSerializer, self).validate_json(data) |
7,436 | generate msg | #!/usr/bin/env python3
# Created by Shuffle, AS. <frikky@shuffler.io>.
# Based on the Slack integration using Webhooks
import json
import sys
import time
import os
try:
import requests
from requests.auth import HTTPBasicAuth
except Exception as e:
print("No module 'requests' found. Install: pip install requests")
sys.exit(1)
# ADD THIS TO ossec.conf configuration:
# <integration>
# <name>custom-shuffle</name>
# <hook_url>http://<IP>:3001/api/v1/hooks/<HOOK_ID></hook_url>
# <level>3</level>
# <alert_format>json</alert_format>
# </integration>
# Global vars
debug_enabled = False
pwd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
json_alert = {}
now = time.strftime("%a %b %d %H:%M:%S %Z %Y")
# Set paths
log_file = '{0}/logs/integrations.log'.format(pwd)
try:
with open("/tmp/shuffle_start.txt", "w+") as tmp:
tmp.write("Script started")
except:
pass
def main(args):
debug("# Starting")
# Read args
alert_file_location = args[1]
webhook = args[3]
debug("# Webhook")
debug(webhook)
debug("# File location")
debug(alert_file_location)
# Load alert. Parse JSON object.
try:
with open(alert_file_location) as alert_file:
json_alert = json.load(alert_file)
except:
debug("# Alert file %s doesn't exist" % alert_file_location)
debug("# Processing alert")
try:
debug(json_alert)
except Exception as e:
debug("Failed getting json_alert %s" % e)
sys.exit(1)
debug("# Generating message")
msg = METHOD_NAME(json_alert)
if isinstance(msg, str):
if len(msg) == 0:
return
debug(msg)
debug("# Sending message")
try:
with open("/tmp/shuffle_end.txt", "w+") as tmp:
tmp.write("Script done pre-msg sending")
except:
pass
send_msg(msg, webhook)
def debug(msg):
if debug_enabled:
msg = "{0}: {1}\n".format(now, msg)
print(msg)
f = open(log_file, "a")
f.write(msg)
f.close()
# Skips container kills to stop self-recursion
def filter_msg(alert):
# These are things that recursively happen because Shuffle starts Docker containers
skip = ["87924", "87900", "87901", "87902", "87903", "87904", "86001", "86002", "86003", "87932", "80710", "87929", "87928", "5710"]
if alert["rule"]["id"] in skip:
return False
#try:
# if "docker" in alert["rule"]["description"].lower() and "
#msg['text'] = alert.get('full_log')
#except:
# pass
#msg['title'] = alert['rule']['description'] if 'description' in alert['rule'] else "N/A"
return True
def METHOD_NAME(alert):
if not filter_msg(alert):
print("Skipping rule %s" % alert["rule"]["id"])
return ""
level = alert['rule']['level']
if (level <= 4):
severity = 1
elif (level >= 5 and level <= 7):
severity = 2
else:
severity = 3
msg = {}
msg['severity'] = severity
msg['pretext'] = "WAZUH Alert"
msg['title'] = alert['rule']['description'] if 'description' in alert['rule'] else "N/A"
msg['text'] = alert.get('full_log')
msg['rule_id'] = alert["rule"]["id"]
msg['timestamp'] = alert["timestamp"]
msg['id'] = alert['id']
msg["all_fields"] = alert
#msg['fields'] = []
# msg['fields'].append({
# "title": "Agent",
# "value": "({0}) - {1}".format(
# alert['agent']['id'],
# alert['agent']['name']
# ),
# })
#if 'agentless' in alert:
# msg['fields'].append({
# "title": "Agentless Host",
# "value": alert['agentless']['host'],
# })
#msg['fields'].append({"title": "Location", "value": alert['location']})
#msg['fields'].append({
# "title": "Rule ID",
# "value": "{0} _(Level {1})_".format(alert['rule']['id'], level),
#})
#attach = {'attachments': [msg]}
return json.dumps(msg)
def send_msg(msg, url):
debug("# In send msg")
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
res = requests.post(url, data=msg, headers=headers, verify=False)
debug("# After send msg: %s" % res)
if __name__ == "__main__":
try:
# Read arguments
bad_arguments = False
if len(sys.argv) >= 4:
msg = '{0} {1} {2} {3} {4}'.format(
now,
sys.argv[1],
sys.argv[2],
sys.argv[3],
sys.argv[4] if len(sys.argv) > 4 else '',
)
#debug_enabled = (len(sys.argv) > 4 and sys.argv[4] == 'debug')
debug_enabled = True
else:
msg = '{0} Wrong arguments'.format(now)
bad_arguments = True
# Logging the call
try:
f = open(log_file, 'a')
except:
f = open(log_file, 'w+')
f.write("")
f.close()
f = open(log_file, 'a')
f.write(msg + '\n')
f.close()
if bad_arguments:
debug("# Exiting: Bad arguments. Inputted: %s" % sys.argv)
sys.exit(1)
# Main function
main(sys.argv)
except Exception as e:
debug(str(e))
raise |
7,437 | is file | import collections
import itertools
import pathlib
import operator
from . import abc
from ._itertools import only
from ._compat import ZipPath
def remove_duplicates(items):
return iter(collections.OrderedDict.fromkeys(items))
class FileReader(abc.TraversableResources):
def __init__(self, loader):
self.path = pathlib.Path(loader.path).parent
def resource_path(self, resource):
"""
Return the file system path to prevent
`resources.path()` from creating a temporary
copy.
"""
return str(self.path.joinpath(resource))
def files(self):
return self.path
class ZipReader(abc.TraversableResources):
def __init__(self, loader, module):
_, _, name = module.rpartition('.')
self.prefix = loader.prefix.replace('\\', '/') + name + '/'
self.archive = loader.archive
def open_resource(self, resource):
try:
return super().open_resource(resource)
except KeyError as exc:
raise FileNotFoundError(exc.args[0])
def is_resource(self, path):
"""
Workaround for `zipfile.Path.is_file` returning true
for non-existent paths.
"""
target = self.files().joinpath(path)
return target.METHOD_NAME() and target.exists()
def files(self):
return ZipPath(self.archive, self.prefix)
class MultiplexedPath(abc.Traversable):
"""
Given a series of Traversable objects, implement a merged
version of the interface across all objects. Useful for
namespace packages which may be multihomed at a single
name.
"""
def __init__(self, *paths):
self._paths = list(map(pathlib.Path, remove_duplicates(paths)))
if not self._paths:
message = 'MultiplexedPath must contain at least one path'
raise FileNotFoundError(message)
if not all(path.is_dir() for path in self._paths):
raise NotADirectoryError('MultiplexedPath only supports directories')
def iterdir(self):
children = (child for path in self._paths for child in path.iterdir())
by_name = operator.attrgetter('name')
groups = itertools.groupby(sorted(children, key=by_name), key=by_name)
return map(self._follow, (locs for name, locs in groups))
def read_bytes(self):
raise FileNotFoundError(f'{self} is not a file')
def read_text(self, *args, **kwargs):
raise FileNotFoundError(f'{self} is not a file')
def is_dir(self):
return True
def METHOD_NAME(self):
return False
def joinpath(self, *descendants):
try:
return super().joinpath(*descendants)
except abc.TraversalError:
# One of the paths did not resolve (a directory does not exist).
# Just return something that will not exist.
return self._paths[0].joinpath(*descendants)
@classmethod
def _follow(cls, children):
"""
Construct a MultiplexedPath if needed.
If children contains a sole element, return it.
Otherwise, return a MultiplexedPath of the items.
Unless one of the items is not a Directory, then return the first.
"""
subdirs, one_dir, one_file = itertools.tee(children, 3)
try:
return only(one_dir)
except ValueError:
try:
return cls(*subdirs)
except NotADirectoryError:
return next(one_file)
def open(self, *args, **kwargs):
raise FileNotFoundError(f'{self} is not a file')
@property
def name(self):
return self._paths[0].name
def __repr__(self):
paths = ', '.join(f"'{path}'" for path in self._paths)
return f'MultiplexedPath({paths})'
class NamespaceReader(abc.TraversableResources):
def __init__(self, namespace_path):
if 'NamespacePath' not in str(namespace_path):
raise ValueError('Invalid path')
self.path = MultiplexedPath(*list(namespace_path))
def resource_path(self, resource):
"""
Return the file system path to prevent
`resources.path()` from creating a temporary
copy.
"""
return str(self.path.joinpath(resource))
def files(self):
return self.path |
7,438 | build record | #!/usr/bin/env python
"""Input / Output Routines for ROI files.
You can load the records sequentially one by one by iterating over a
RoiFile object:
f1 = ngs_roi.io.RoiFile('in.roi')
for r in f1:
print r.ref, r.start_pos, r.end_pos
f2 = ngs_roi.io.RoiFile('in.roi')
print f1.next().ref
print f1.next().ref
print f1.next().ref
Alternatively, you can load all or a subset of the records:
ten_records = ngs_roi.io.load('in.roi', 10) # first ten
all_records = ngs_roi.io.load('in.roi')
"""
__author__ = 'Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>'
__copyright__ = 'Copyring 2013, Freie Universitaet Berlin'
__license__ = 'BSD 3-clause'
class RoiRecord(object):
"""Represent one record in a ROI file.
:ivar ref:name of reference
:type ref:str
:ivar start_pos:0-based start position
:type start_pos:int
:ivar end_pos:0-based end position
:type end_pos:int
:ivar region_name:name of the region
:type region_name:str
:ivar region_length:length of the region
:type region_length:int
:ivar strand:strand of the region, one of ['+', '-']
:type strand:str
:ivar max_count:highest coverage
:type max_count:int
:ivar data:values of the extended data fields
:type data:list of str
:ivar points:the coverage over the region length
:type points:list of ints
:ivar data_keys:list with key names for the data fields
:type data_keys:list of str or None
"""
def __init__(self, ref, start_pos, end_pos, region_name, region_length,
strand, max_count, data, points, data_keys=None):
"""Initialize RoiRecord."""
self.ref = ref
self.start_pos = start_pos
self.end_pos = end_pos
self.strand = strand
self.region_length = region_length
self.region_name = region_name
self.max_count = max_count
self.data = data
self.points = points
self.data_keys = data_keys
def __str__(self):
return 'RoiRecord(%s, %s, %s, %s, %s, %s, %s, %s, len([...])==%s, %s)' % \
(repr(self.ref), self.start_pos, self.end_pos,
self.region_name, self.region_length, repr(self.strand),
self.max_count, self.data, len(self.points), self.data_keys)
def __repr__(self):
return self.__str__()
class RoiFile(object):
"""File of ROI records.
Can be used as an iterator.
:ivar data_keys:Keys of additional data
:type data_keys:list of str
"""
def __init__(self, path):
"""Open ROI file and load header."""
self.path = path
self.f = open(path, 'rb')
self.line = None
self.data_keys = self._loadKeys(path)
def _loadKeys(self, path):
"""Load keys from header, skipping comments."""
while True:
self.line = self.f.readline()
if self.line == '': # EOF
return None
if self.line.startswith('##'):
keys = self.line[1:].strip().split('\t')
self.line = self.f.readline()
return keys[7:-1]
if self.line.startswith('#'):
continue # comment
break
def __iter__(self):
"""Return iterator (self)."""
return self
def __next__(self):
"""Return next record."""
if self.line == '': # EOF
raise StopIteration
l = self.line
self.line = self.f.readline()
return self.METHOD_NAME(l)
def METHOD_NAME(self, line):
"""Build RoiRecord from ROI line."""
vals = line.split()
region_length = int(vals[4])
data = vals[7:-1]
points = [int(x) for x in vals[-1].split(',')]
return RoiRecord(vals[0], int(vals[1]) - 1, int(vals[2]), vals[3],
region_length, vals[5], int(vals[6]), data, points,
self.data_keys)
def load(path, max_count=0):
"""Load ROI file and return it as a list of RoiRecord objects.
NA values are translated to 0.
"""
result = []
for i, x in enumerate(RoiFile(path)):
if max_count > 0 and i >= max_count:
break
result.append(x)
return result |
7,439 | do create | # Copyright (c) 2017 Linaro Limited.
#
# SPDX-License-Identifier: Apache-2.0
'''Runner for pyOCD .'''
import os
from os import path
from runners.core import ZephyrBinaryRunner, RunnerCaps, BuildConfiguration
DEFAULT_PYOCD_GDB_PORT = 3333
DEFAULT_PYOCD_TELNET_PORT = 4444
class PyOcdBinaryRunner(ZephyrBinaryRunner):
'''Runner front-end for pyOCD.'''
def __init__(self, cfg, target,
pyocd='pyocd',
dev_id=None, flash_addr=0x0, erase=False, flash_opts=None,
gdb_port=DEFAULT_PYOCD_GDB_PORT,
telnet_port=DEFAULT_PYOCD_TELNET_PORT, tui=False,
pyocd_config=None,
daparg=None, frequency=None, tool_opt=None):
super().__init__(cfg)
default = path.join(cfg.board_dir, 'support', 'pyocd.yaml')
if path.exists(default):
self.pyocd_config = default
else:
self.pyocd_config = None
self.target_args = ['-t', target]
self.pyocd = pyocd
self.flash_addr_args = ['-a', hex(flash_addr)] if flash_addr else []
self.erase = erase
self.gdb_cmd = [cfg.gdb] if cfg.gdb is not None else None
self.gdb_port = gdb_port
self.telnet_port = telnet_port
self.tui_args = ['-tui'] if tui else []
self.hex_name = cfg.hex_file
self.bin_name = cfg.bin_file
self.elf_name = cfg.elf_file
pyocd_config_args = []
if self.pyocd_config is not None:
pyocd_config_args = ['--config', self.pyocd_config]
self.pyocd_config_args = pyocd_config_args
board_args = []
if dev_id is not None:
board_args = ['-u', dev_id]
self.board_args = board_args
daparg_args = []
if daparg is not None:
daparg_args = ['-da', daparg]
self.daparg_args = daparg_args
frequency_args = []
if frequency is not None:
frequency_args = ['-f', frequency]
self.frequency_args = frequency_args
self.tool_opt_args = tool_opt or []
self.flash_extra = flash_opts if flash_opts else []
@classmethod
def name(cls):
return 'pyocd'
@classmethod
def capabilities(cls):
return RunnerCaps(commands={'flash', 'debug', 'debugserver', 'attach'},
dev_id=True, flash_addr=True, erase=True,
tool_opt=True)
@classmethod
def dev_id_help(cls) -> str:
return '''Device identifier. Use it to select the probe's unique ID
or substring thereof.'''
@classmethod
def do_add_parser(cls, parser):
parser.add_argument('--target', required=True,
help='target override')
parser.add_argument('--daparg',
help='Additional -da arguments to pyocd tool')
parser.add_argument('--pyocd', default='pyocd',
help='path to pyocd tool, default is pyocd')
parser.add_argument('--flash-opt', default=[], action='append',
help='''Additional options for pyocd flash,
e.g. --flash-opt="-e=chip" to chip erase''')
parser.add_argument('--frequency',
help='SWD clock frequency in Hz')
parser.add_argument('--gdb-port', default=DEFAULT_PYOCD_GDB_PORT,
help='pyocd gdb port, defaults to {}'.format(
DEFAULT_PYOCD_GDB_PORT))
parser.add_argument('--telnet-port', default=DEFAULT_PYOCD_TELNET_PORT,
help='pyocd telnet port, defaults to {}'.format(
DEFAULT_PYOCD_TELNET_PORT))
parser.add_argument('--tui', default=False, action='store_true',
help='if given, GDB uses -tui')
parser.add_argument('--board-id', dest='dev_id',
help='obsolete synonym for -i/--dev-id')
@classmethod
def tool_opt_help(cls) -> str:
return """Additional options for pyocd commander,
e.g. '--script=user.py'"""
@classmethod
def METHOD_NAME(cls, cfg, args):
build_conf = BuildConfiguration(cfg.build_dir)
flash_addr = cls.get_flash_address(args, build_conf)
ret = PyOcdBinaryRunner(
cfg, args.target,
pyocd=args.pyocd,
flash_addr=flash_addr, erase=args.erase, flash_opts=args.flash_opt,
gdb_port=args.gdb_port, telnet_port=args.telnet_port, tui=args.tui,
dev_id=args.dev_id, daparg=args.daparg,
frequency=args.frequency,
tool_opt=args.tool_opt)
daparg = os.environ.get('PYOCD_DAPARG')
if not ret.daparg_args and daparg:
ret.logger.warning('PYOCD_DAPARG is deprecated; use --daparg')
ret.logger.debug('--daparg={} via PYOCD_DAPARG'.format(daparg))
ret.daparg_args = ['-da', daparg]
return ret
def port_args(self):
return ['-p', str(self.gdb_port), '-T', str(self.telnet_port)]
def do_run(self, command, **kwargs):
self.require(self.pyocd)
if command == 'flash':
self.flash(**kwargs)
else:
self.debug_debugserver(command, **kwargs)
def flash(self, **kwargs):
if self.hex_name is not None and os.path.isfile(self.hex_name):
fname = self.hex_name
elif self.bin_name is not None and os.path.isfile(self.bin_name):
self.logger.warning(
'hex file ({}) does not exist; falling back on .bin ({}). '.
format(self.hex_name, self.bin_name) +
'Consider enabling CONFIG_BUILD_OUTPUT_HEX.')
fname = self.bin_name
else:
raise ValueError(
'Cannot flash; no hex ({}) or bin ({}) files found. '.format(
self.hex_name, self.bin_name))
erase_method = 'chip' if self.erase else 'sector'
cmd = ([self.pyocd] +
['flash'] +
self.pyocd_config_args +
['-e', erase_method] +
self.flash_addr_args +
self.daparg_args +
self.target_args +
self.board_args +
self.frequency_args +
self.tool_opt_args +
self.flash_extra +
[fname])
self.logger.info('Flashing file: {}'.format(fname))
self.check_call(cmd)
def log_gdbserver_message(self):
self.logger.info('pyOCD GDB server running on port {}'.
format(self.gdb_port))
def debug_debugserver(self, command, **kwargs):
server_cmd = ([self.pyocd] +
['gdbserver'] +
self.daparg_args +
self.port_args() +
self.target_args +
self.board_args +
self.frequency_args +
self.tool_opt_args)
if command == 'debugserver':
self.log_gdbserver_message()
self.check_call(server_cmd)
else:
if self.gdb_cmd is None:
raise ValueError('Cannot debug; gdb is missing')
if self.elf_name is None:
raise ValueError('Cannot debug; elf is missing')
client_cmd = (self.gdb_cmd +
self.tui_args +
[self.elf_name] +
['-ex', 'target remote :{}'.format(self.gdb_port)])
if command == 'debug':
client_cmd += ['-ex', 'monitor halt',
'-ex', 'monitor reset',
'-ex', 'load']
self.require(client_cmd[0])
self.log_gdbserver_message()
self.run_server_and_client(server_cmd, client_cmd) |
7,440 | list list effective virtual network by network | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'ListListEffectiveVirtualNetworkByNetworkGroupResult',
'AwaitableListListEffectiveVirtualNetworkByNetworkGroupResult',
'list_list_effective_virtual_network_by_network_group',
'list_list_effective_virtual_network_by_network_group_output',
]
@pulumi.output_type
class ListListEffectiveVirtualNetworkByNetworkGroupResult:
"""
Result of the request to list Effective Virtual Network. It contains a list of groups and a URL link to get the next set of results.
"""
def __init__(__self__, skip_token=None, value=None):
if skip_token and not isinstance(skip_token, str):
raise TypeError("Expected argument 'skip_token' to be a str")
pulumi.set(__self__, "skip_token", skip_token)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="skipToken")
def skip_token(self) -> Optional[str]:
"""
When present, the value can be passed to a subsequent query call (together with the same query and scopes used in the current request) to retrieve the next page of data.
"""
return pulumi.get(self, "skip_token")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.EffectiveVirtualNetworkResponse']]:
"""
Gets a page of EffectiveVirtualNetwork
"""
return pulumi.get(self, "value")
class AwaitableListListEffectiveVirtualNetworkByNetworkGroupResult(ListListEffectiveVirtualNetworkByNetworkGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListListEffectiveVirtualNetworkByNetworkGroupResult(
skip_token=self.skip_token,
value=self.value)
def list_list_effective_virtual_network_by_network_group(network_group_name: Optional[str] = None,
network_manager_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
skip_token: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListListEffectiveVirtualNetworkByNetworkGroupResult:
"""
Lists all effective virtual networks by specified network group.
:param str network_group_name: The name of the network group.
:param str network_manager_name: The name of the network manager.
:param str resource_group_name: The name of the resource group.
:param str skip_token: When present, the value can be passed to a subsequent query call (together with the same query and scopes used in the current request) to retrieve the next page of data.
"""
__args__ = dict()
__args__['networkGroupName'] = network_group_name
__args__['networkManagerName'] = network_manager_name
__args__['resourceGroupName'] = resource_group_name
__args__['skipToken'] = skip_token
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network/v20220401preview:listListEffectiveVirtualNetworkByNetworkGroup', __args__, opts=opts, typ=ListListEffectiveVirtualNetworkByNetworkGroupResult).value
return AwaitableListListEffectiveVirtualNetworkByNetworkGroupResult(
skip_token=pulumi.get(__ret__, 'skip_token'),
value=pulumi.get(__ret__, 'value'))
@_utilities.lift_output_func(list_list_effective_virtual_network_by_network_group)
def METHOD_NAME(network_group_name: Optional[pulumi.Input[str]] = None,
network_manager_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
skip_token: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListListEffectiveVirtualNetworkByNetworkGroupResult]:
"""
Lists all effective virtual networks by specified network group.
:param str network_group_name: The name of the network group.
:param str network_manager_name: The name of the network manager.
:param str resource_group_name: The name of the resource group.
:param str skip_token: When present, the value can be passed to a subsequent query call (together with the same query and scopes used in the current request) to retrieve the next page of data.
"""
... |
7,441 | default deduplicate | from functools import cache
import joblib
import numpy as np
import pandas as pd
import pytest
from scipy.cluster.hierarchy import linkage
from scipy.spatial.distance import squareform
from sklearn.utils._testing import assert_array_equal, skip_if_no_parallel
from skrub._deduplicate import (
_create_spelling_correction,
_guess_clusters,
compute_ngram_distance,
deduplicate,
)
from skrub.datasets import make_deduplication_data
@pytest.mark.parametrize(
["entries_per_category", "prob_mistake_per_letter"],
[[[500, 100, 1500], 0.05], [[100, 100], 0.02], [[200, 50, 30, 200, 800], 0.01]],
)
def test_deduplicate(
entries_per_category: list[int],
prob_mistake_per_letter: float,
seed: int = 123,
) -> None:
rng = np.random.RandomState(seed)
# hard coded to fix ground truth string similarities
clean_categories = [
"Example Category",
"Generic",
"Random Word",
"Pretty similar category",
"Final cluster",
]
n_clusters = len(entries_per_category)
clean_categories = clean_categories[:n_clusters]
data = make_deduplication_data(
clean_categories, entries_per_category, prob_mistake_per_letter, rng
)
deduplicated_data = np.array(deduplicate(data, n_clusters=None))
assert deduplicated_data.shape[0] == len(data)
recovered_categories = np.unique(deduplicated_data)
assert recovered_categories.shape[0] == n_clusters
assert np.isin(clean_categories, recovered_categories).all()
deduplicated_data = deduplicate(data, n_clusters=n_clusters)
translation_table = pd.Series(deduplicated_data, index=data)
translation_table = translation_table[
~translation_table.index.duplicated(keep="first")
]
assert np.isin(np.unique(deduplicated_data), recovered_categories).all()
assert np.all(translation_table[data] == np.array(deduplicated_data))
deduplicated_other_analyzer = np.array(
deduplicate(data, n_clusters=n_clusters, analyzer="char")
)
unique_other_analyzer = np.unique(deduplicated_other_analyzer)
assert np.isin(unique_other_analyzer, recovered_categories).all()
def test_compute_ngram_distance() -> None:
words = np.array(["aac", "aaa", "aaab", "aaa", "aaab", "aaa", "aaab", "aaa"])
distance = compute_ngram_distance(words)
distance = squareform(distance)
assert distance.shape[0] == words.shape[0]
assert np.allclose(np.diag(distance), 0)
for un_word in np.unique(words):
assert np.allclose(distance[words == un_word][:, words == un_word], 0)
def test__guess_clusters() -> None:
words = np.array(["aac", "aaa", "aaab", "aaa", "aaab", "aaa", "aaab", "aaa"])
distance = compute_ngram_distance(words)
Z = linkage(distance, method="average")
n_clusters = _guess_clusters(Z, distance)
assert n_clusters == len(np.unique(words))
def test__create_spelling_correction(seed: int = 123) -> None:
rng = np.random.RandomState(seed)
n_clusters = 3
samples_per_cluster = 10
counts = np.concatenate(
[rng.randint(0, 100, samples_per_cluster) for _ in range(n_clusters)]
)
clusters = (
np.repeat(np.arange(n_clusters), samples_per_cluster).astype("int").tolist()
)
spelling_correction = _create_spelling_correction(
counts.astype("str"),
counts,
clusters,
)
# Check that the most common sample per cluster is chosen as the 'correct' spelling
for n in np.arange(n_clusters):
assert (
spelling_correction.values[clusters == n].astype("int")
== counts[clusters == n].max()
).all()
@cache
def METHOD_NAME(n: int = 500):
"""
Create a default deduplication dataset.
"""
X = make_deduplication_data(
examples=["black", "white", "red"],
entries_per_example=[n, n, n],
prob_mistake_per_letter=0.3,
)
y = deduplicate(X)
return X, y
def test_parallelism() -> None:
"""Tests that parallelism works with different backends and n_jobs."""
X, y = METHOD_NAME(n=200)
y_parallel = deduplicate(X, n_jobs=2)
assert_array_equal(y, y_parallel)
DEFAULT_JOBLIB_BACKEND = joblib.parallel.get_active_backend()[0].__class__
class DummyBackend(DEFAULT_JOBLIB_BACKEND): # type: ignore
"""
A dummy backend used to check that specifying a backend works
in deduplicate.
The `count` attribute is used to check that the backend is used.
Copied from https://github.com/scikit-learn/scikit-learn/blob/36958fb240fbe435673a9e3c52e769f01f36bec0/sklearn/ensemble/tests/test_forest.py # noqa
"""
def __init__(self, *args, **kwargs):
self.count = 0
super().__init__(*args, **kwargs)
def start_call(self):
self.count += 1
return super().start_call()
joblib.register_parallel_backend("testing", DummyBackend)
@skip_if_no_parallel
def test_backend_respected():
"""
Test that the joblib backend is used.
Copied from https://github.com/scikit-learn/scikit-learn/blob/36958fb240fbe435673a9e3c52e769f01f36bec0/sklearn/ensemble/tests/test_forest.py # noqa
"""
# Test that parallelism works
X = make_deduplication_data(
examples=["black", "white"], entries_per_example=[15, 15], random_state=0
)
deduplicate(X, n_jobs=2)
# TODO: switch to joblib.parallel_config when we support joblib 1.3
with joblib.parallel_backend("testing") as (ba, n_jobs):
deduplicate(X, n_jobs=n_jobs)
assert ba.count > 0 |
7,442 | is expired | from threading import RLock
from typing import Dict, Optional
import maya
class TTLCache:
"""
Thread-safe cache that stores keyed data with auto-expiring values via a time-to-live.
Expired items are not proactively removed from the cache unless functionality
necessitates it. Either specific items get proactively removed for example trying
to access a keyed-value that is already expired, OR a wholistic purge occurs because
consistent global state is needed, for example, the length of the cache queried or
a list of key-value pairs requested.
Expired entries can be forcibly purged at any time using the purge_expired() function.
"""
class TTLEntry:
def __init__(self, value: object, ttl: int):
self._value = value
self._expiration = maya.now().add(seconds=ttl)
@property
def value(self) -> Optional[object]:
"""
Return the value if not expired, None otherwise
"""
if self.METHOD_NAME():
return None
return self._value
def METHOD_NAME(self) -> bool:
"""
Return true if the entry has exceeded its time-to-live.
"""
return self._expiration < maya.now()
def __init__(self, ttl: int, *args, **kwargs):
super().__init__(*args, **kwargs)
if ttl <= 0:
raise ValueError(f"Invalid time-to-live {ttl}")
self.ttl = ttl
self.__cache: Dict[object, TTLCache.TTLEntry] = {}
self.__cache_lock = RLock()
def __setitem__(self, key, value):
"""
Add the provided key entry to be the provided value.
"""
if key is None or value is None:
raise ValueError(f"Invalid key-value pair ({key}, {value})")
with self.__cache_lock:
self.__cache[key] = self.TTLEntry(value=value, ttl=self.ttl)
def __getitem__(self, key):
"""
Return the associated keyed item, else None if expired or not present.
"""
with self.__cache_lock:
ttl_entry = self.__cache.get(key)
if not ttl_entry:
# no value stored
return None
value = ttl_entry.value
if not value:
# entry is expired
del self.__cache[key]
return None
return value
def items(self):
"""
Returns a copy of the cache's list of non-expired (key, value) pairs.
"""
key_value_pairs = []
with self.__cache_lock:
for key in list(self.__cache):
ttl_entry = self.__cache[key]
value = ttl_entry.value
if value:
key_value_pairs.append((key, ttl_entry.value))
else:
# expired entry, opportunity to remove it
del self.__cache[key]
return key_value_pairs
def pop(self, key, default=None):
"""
Get item from the cache and remove it.
Return default if expired or does not exist.
"""
with self.__cache_lock:
ttl_entry = self.__cache.get(key)
if not ttl_entry:
return default
del self.__cache[key]
value = ttl_entry.value
if not value:
# entry expired
return default
return value
def remove(self, key):
"""
Remove keyed item from the cache.
"""
with self.__cache_lock:
if key in self.__cache:
del self.__cache[key]
def purge_expired(self):
"""
Remove all expired items from the cache.
"""
with self.__cache_lock:
for key in list(self.__cache):
entry = self.__cache[key]
if entry.METHOD_NAME():
del self.__cache[key]
def __len__(self):
"""
Returns the current (non-expired entries) size of the cache.
"""
with self.__cache_lock:
self.purge_expired()
return len(self.__cache)
def clear(self):
"""
Remove all items from the cache.
"""
with self.__cache_lock:
self.__cache.clear() |
7,443 | get symbol | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A database of Python protocol buffer generated symbols.
SymbolDatabase is the MessageFactory for messages generated at compile time,
and makes it easy to create new instances of a registered type, given only the
type's protocol buffer symbol name.
Example usage:
db = symbol_database.SymbolDatabase()
# Register symbols of interest, from one or multiple files.
db.RegisterFileDescriptor(my_proto_pb2.DESCRIPTOR)
db.RegisterMessage(my_proto_pb2.MyMessage)
db.RegisterEnumDescriptor(my_proto_pb2.MyEnum.DESCRIPTOR)
# The database can be used as a MessageFactory, to generate types based on
# their name:
types = db.GetMessages(['my_proto.proto'])
my_message_instance = types['MyMessage']()
# The database's underlying descriptor pool can be queried, so it's not
# necessary to know a type's filename to be able to generate it:
filename = db.pool.FindFileContainingSymbol('MyMessage')
my_message_instance = db.GetMessages([filename])['MyMessage']()
# This functionality is also provided directly via a convenience method:
my_message_instance = db.GetSymbol('MyMessage')()
"""
from google.protobuf import descriptor_pool
from google.protobuf import message_factory
class SymbolDatabase(message_factory.MessageFactory):
"""A database of Python generated symbols."""
def RegisterMessage(self, message):
"""Registers the given message type in the local database.
Calls to GetSymbol() and GetMessages() will return messages registered here.
Args:
message: a message.Message, to be registered.
Returns:
The provided message.
"""
desc = message.DESCRIPTOR
self._classes[desc.full_name] = message
self.pool.AddDescriptor(desc)
return message
def RegisterEnumDescriptor(self, enum_descriptor):
"""Registers the given enum descriptor in the local database.
Args:
enum_descriptor: a descriptor.EnumDescriptor.
Returns:
The provided descriptor.
"""
self.pool.AddEnumDescriptor(enum_descriptor)
return enum_descriptor
def RegisterFileDescriptor(self, file_descriptor):
"""Registers the given file descriptor in the local database.
Args:
file_descriptor: a descriptor.FileDescriptor.
Returns:
The provided descriptor.
"""
self.pool.AddFileDescriptor(file_descriptor)
def METHOD_NAME(self, symbol):
"""Tries to find a symbol in the local database.
Currently, this method only returns message.Message instances, however, if
may be extended in future to support other symbol types.
Args:
symbol: A str, a protocol buffer symbol.
Returns:
A Python class corresponding to the symbol.
Raises:
KeyError: if the symbol could not be found.
"""
return self._classes[symbol]
def GetMessages(self, files):
# TODO(amauryfa): Fix the differences with MessageFactory.
"""Gets all registered messages from a specified file.
Only messages already created and registered will be returned; (this is the
case for imported _pb2 modules)
But unlike MessageFactory, this version also returns already defined nested
messages, but does not register any message extensions.
Args:
files: The file names to extract messages from.
Returns:
A dictionary mapping proto names to the message classes.
Raises:
KeyError: if a file could not be found.
"""
def _GetAllMessageNames(desc):
"""Walk a message Descriptor and recursively yields all message names."""
yield desc.full_name
for msg_desc in desc.nested_types:
for full_name in _GetAllMessageNames(msg_desc):
yield full_name
result = {}
for file_name in files:
file_desc = self.pool.FindFileByName(file_name)
for msg_desc in file_desc.message_types_by_name.values():
for full_name in _GetAllMessageNames(msg_desc):
try:
result[full_name] = self._classes[full_name]
except KeyError:
# This descriptor has no registered class, skip it.
pass
return result
_DEFAULT = SymbolDatabase(pool=descriptor_pool.Default())
def Default():
"""Returns the default SymbolDatabase."""
return _DEFAULT |
7,444 | terminate provider | # vision/providerBase.py
# A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2018-2019 NV Access Limited, Babbage B.V.
"""Module within the vision framework that contains the base vision enhancement provider class.
"""
from abc import abstractmethod
import config
from autoSettingsUtils.autoSettings import AutoSettings
from baseObject import AutoPropertyObject
from .visionHandlerExtensionPoints import EventExtensionPoints
from typing import Optional, Any
class VisionEnhancementProviderSettings(AutoSettings):
"""
Base class for settings for a vision enhancement provider.
Ensure that the following are implemented:
- AutoSettings.getId:
This is case sensitive. Used in the config file. Does not have to match the module name.
- AutoSettings.getDisplayName:
The string that should appear in the GUI as the name.
- AutoSettings._get_supportedSettings:
The settings for your provider, the returned list is permitted to change during
start / termination of the provider.
The implementation must handle how to modify the returned settings based on external (software,
hardware) dependencies.
@note
If the vision enhancement provider has settings, it will provide an implementation of this class.
The provider will hold a reference to an instance of this class, this is accessed through the class method
L{VisionEnhancementProvider.getSettings}.
One way to handle settings that are strictly runtime:
- During initialization, the vision enhancement provider can instruct the settings instance what it should
expose using the L{utoSettings._get_supportedSettings} property.
- "_exampleProvider_autoGui.py" provides an example of this.
"""
def __init__(self):
super().__init__()
self.initSettings() # ensure that settings are loaded at construction time.
@classmethod
def _getConfigSection(cls) -> str:
return "vision" # all providers should be in the "vision" section.
class VisionProviderStateControl:
""" Stub showing the interface for controlling the start/termination of a single provider.
Implementors of this class should handle the outcome when things go wrong.
"""
@abstractmethod
def getProviderInfo(self):
"""
@return: The provider info
@rtype: providerInfo.ProviderInfo
"""
@abstractmethod
def getProviderInstance(self):
"""Gets an instance for the provider if it already exists
@rtype: Optional[VisionEnhancementProvider]
"""
@abstractmethod
def startProvider(self, shouldPromptOnError: bool) -> bool:
"""Initializes the provider, prompting user with the error if necessary.
@param shouldPromptOnError: True if the user should be presented with any errors that may occur.
@return: True on success
"""
@abstractmethod
def METHOD_NAME(self, shouldPromptOnError: bool) -> bool:
"""Terminate the provider, prompting user with the error if necessary.
@param shouldPromptOnError: True if the user should be presented with any errors that may occur.
@return: True on success
"""
class VisionEnhancementProvider(AutoPropertyObject):
"""A class for vision enhancement providers.
Derived classes should implement:
- terminate:
How to shutdown the provider
- registerEventExtensionPoints:
Allows the provider to receive updates form NVDA
- canStart:
Checks startup dependencies are satisfied
- getSettings:
Returns your implementation of VisionEnhancementProviderSettings
Optional: To provide a custom GUI, return a SettingsPanel class type from:
- getSettingsPanelClass
"""
cachePropertiesByDefault = True
@classmethod
@abstractmethod
def getSettings(cls) -> VisionEnhancementProviderSettings:
"""
@remarks: The L{VisionEnhancementProviderSettings} class should be implemented to define the settings
for your provider
"""
...
@classmethod
def getSettingsPanelClass(cls) -> Optional[Any]:
"""Returns the class to be used in order to construct a settingsPanel instance for the provider.
The returned class must have a constructor which accepts:
- parent: wx.Window
- providerControl: VisionProviderStateControl
EG:
``` python
class mySettingsPanel(gui.settingsDialogs.SettingsPanel):
def __init__(self, parent: wx.Window, providerControl: VisionProviderStateControl):
super().__init__(parent=parent)
```
@rtype: Optional[SettingsPanel]
@remarks: When None is returned, L{gui.settingsDialogs.VisionProviderSubPanel_Wrapper} is used.
"""
return None
def reinitialize(self) -> None:
"""Reinitialize a vision enhancement provider, reusing the same instance.
This base implementation simply calls terminate and __init__ consecutively.
"""
self.terminate()
self.__init__()
@abstractmethod
def terminate(self) -> None:
"""Terminate this provider.
This should be used for any required clean up.
@precondition: L{initialize} has been called.
@postcondition: This provider can no longer be used.
"""
...
@abstractmethod
def registerEventExtensionPoints(self, extensionPoints: EventExtensionPoints) -> None:
"""
Called at provider initialization time, this method should register the provider
to the several event extension points that it is interested in.
This method should only register itself with the extension points,
and should refrain from doing anything else,
as it might be called again several times between initialization and termination.
@param extensionPoints: An object containing available extension points as attributes.
"""
...
@classmethod
@abstractmethod
def canStart(cls) -> bool:
"""Returns whether this provider is able to start."""
return False
@classmethod
def enableInConfig(cls, enable: bool) -> None:
"""Enables or disables the provider in the current configuration.
@param enable: Whether to enable (C{True}) or disable (C{False}) the provider in the configuration.
"""
settings = cls.getSettings()
config.conf[settings._getConfigSection()][settings.getId()]["enabled"] = enable
@classmethod
def isEnabledInConfig(cls) -> bool:
"""Returns whether the provider is enabled in the configuration."""
settings = cls.getSettings()
return config.conf[settings._getConfigSection()][settings.getId()]["enabled"] |
7,445 | create | from __future__ import annotations
import importlib
import logging
import os
import sys
from typing import TYPE_CHECKING
from feeluown.config import Config
from feeluown.utils.dispatch import Signal
from .consts import USER_PLUGINS_DIR
if TYPE_CHECKING:
from feeluown.app import App
__all__ = (
'plugins_mgr',
'Plugin',
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class InvalidPluginError(Exception):
pass
class Plugin:
"""
A plugin can be a Python module or package which implements
`enable(app)` and `disable(app)` function. It can also implements
`init_config(config)` and initialize its configurations.
"""
def __init__(self, module, alias='', version='', desc='',
author='', homepage='', dist_name=''):
"""插件对象
:param alias: 插件名
:param version: 插件版本
:param module: 插件模块对象,它有 enable 和 disable 方法
:param desc: 插件描述
:param author: 插件作者
:param homepage: 插件主页
:param dist_name: 插件发行版名字
"""
# pylint: disable=too-many-arguments
self.alias = alias
# FIXME(cosven): use entry point name as plugin name, instead of the module name.
self.name = module.__name__.split('.')[-1]
self._module = module
self.version = version
self.desc = desc
self.author = author
self.homepage = homepage
self.dist_name = dist_name
self.is_enabled = False
@classmethod
def METHOD_NAME(cls, module):
"""Plugin 工厂函数
:param module:
:return:
"""
try:
# alias, desc, version 为必需字段
alias = module.__alias__
desc = module.__desc__
_ = module.__version__ # noqa
author = getattr(module, '__author__', '')
homepage = getattr(module, '__homepage__', '')
dist_name = getattr(module, '__dist_name__', '')
except AttributeError as e:
raise InvalidPluginError(str(e)) from None
else:
return Plugin(module,
alias=alias,
desc=desc,
author=author,
homepage=homepage,
dist_name=dist_name)
def init_config(self, config: Config):
"""Call plugin.init_config function if possible
:param config: app config instance.
.. versionadded: 3.7.15
"""
myconfig = Config()
names = [self.name]
# Currently, plugin name looks like fuo_xxx and xxx is the real name.
# User maye want to define config like app.xxx.X=Y,
# instead of app.fuo_xxx.X=Y.
if self.name.startswith('fuo_'):
names.append(self.name[4:])
# Define a subconfig(namespace) for plugin so that plugin can
# define its own configuration fields.
for name in names:
config.deffield(name,
type_=Config,
default=myconfig,
desc=f'Configurations for plugin {self.name}')
try:
fn = self._module.init_config
except AttributeError:
# The plugin does not define any config field.
pass
else:
fn(myconfig)
def enable(self, app):
self._module.enable(app)
self.is_enabled = True
def disable(self, app):
self._module.disable(app)
self.is_enabled = False
class PluginsManager:
def __init__(self):
self._plugins = {}
#: A plugin is about to enable.
# The payload is the plugin object `(Plugin)`.
# .. versionadded: 3.7.15
self.about_to_enable = Signal()
# scan_finished means all found plugins are enabled.
# TODO: maybe rename scan_finished to plugins_enabled?
self.scan_finished = Signal()
def light_scan(self):
"""Scan plugins without enabling them."""
logger.info('Light scan plugins.')
self._scan_dirs()
self._scan_entry_points()
def init_plugins_config(self, config):
"""Try to init config for the plugin.
Plugin can declare their configuration items.
"""
for plugin in self._plugins.values():
try:
plugin.init_config(config)
except Exception: # noqa
logger.exception(f'Init config for plugin:{plugin.name} failed')
def enable_plugins(self, app: App):
logger.info(f'Enable plugins that are scaned. total: {len(self._plugins)} ')
for plugin in self._plugins.values():
# Try to enbale the plugin.
self.about_to_enable.emit(plugin)
try:
plugin.enable(app)
except Exception: # noqa
logger.exception(f'Enable plugin:{plugin.name} failed')
self.scan_finished.emit(list(self._plugins.values()))
def load_plugin_from_module(self, module):
"""Load module and try to load the plugin"""
logger.info('Try to load plugin from module: %s', module.__name__)
# Try to create a new plugin.
try:
plugin = Plugin.METHOD_NAME(module)
except InvalidPluginError:
return
self._plugins[plugin.name] = plugin
def _scan_dirs(self):
"""扫描插件目录中的插件"""
if not os.path.exists(USER_PLUGINS_DIR):
return
module_name_list = []
for fname in os.listdir(USER_PLUGINS_DIR):
if os.path.isdir(os.path.join(USER_PLUGINS_DIR, fname)):
module_name_list.append(fname)
else:
if fname.endswith('.py'):
module_name_list.append(fname[:-3])
sys.path.append(USER_PLUGINS_DIR)
for module_name in module_name_list:
try:
module = importlib.import_module(module_name)
except Exception: # noqa
logger.exception('Failed to import module %s', module_name)
else:
self.load_plugin_from_module(module)
def _scan_entry_points(self):
"""扫描通过 setuptools 机制注册的插件
https://packaging.python.org/guides/creating-and-discovering-plugins/
"""
try:
import importlib.metadata # pylint: disable=redefined-outer-name
entry_points = importlib.metadata.entry_points().get('fuo.plugins_v1', [])
except ImportError:
import pkg_resources
entry_points = pkg_resources.iter_entry_points('fuo.plugins_v1')
for entry_point in entry_points:
try:
module = entry_point.load()
except Exception: # noqa
logger.exception('Failed to load module %s', entry_point.name)
else:
self.load_plugin_from_module(module)
plugins_mgr = PluginsManager() |
7,446 | validate token request | import logging
from ..rfc6749.errors import (
InvalidRequestError,
UnauthorizedClientError,
AccessDeniedError,
)
from ..rfc6749 import BaseGrant, TokenEndpointMixin
from .errors import (
AuthorizationPendingError,
ExpiredTokenError,
SlowDownError,
)
log = logging.getLogger(__name__)
DEVICE_CODE_GRANT_TYPE = 'urn:ietf:params:oauth:grant-type:device_code'
class DeviceCodeGrant(BaseGrant, TokenEndpointMixin):
"""This OAuth 2.0 [RFC6749] protocol extension enables OAuth clients to
request user authorization from applications on devices that have
limited input capabilities or lack a suitable browser. Such devices
include smart TVs, media consoles, picture frames, and printers,
which lack an easy input method or a suitable browser required for
traditional OAuth interactions. Here is the authorization flow::
+----------+ +----------------+
| |>---(A)-- Client Identifier --->| |
| | | |
| |<---(B)-- Device Code, ---<| |
| | User Code, | |
| Device | & Verification URI | |
| Client | | |
| | [polling] | |
| |>---(E)-- Device Code --->| |
| | & Client Identifier | |
| | | Authorization |
| |<---(F)-- Access Token ---<| Server |
+----------+ (& Optional Refresh Token) | |
v | |
: | |
(C) User Code & Verification URI | |
: | |
v | |
+----------+ | |
| End User | | |
| at |<---(D)-- End user reviews --->| |
| Browser | authorization request | |
+----------+ +----------------+
This DeviceCodeGrant is the implementation of step (E) and (F).
(E) While the end user reviews the client's request (step D), the
client repeatedly polls the authorization server to find out if
the user completed the user authorization step. The client
includes the device code and its client identifier.
(F) The authorization server validates the device code provided by
the client and responds with the access token if the client is
granted access, an error if they are denied access, or an
indication that the client should continue to poll.
"""
GRANT_TYPE = DEVICE_CODE_GRANT_TYPE
TOKEN_ENDPOINT_AUTH_METHODS = ['client_secret_basic', 'client_secret_post', 'none']
def METHOD_NAME(self):
"""After displaying instructions to the user, the client creates an
access token request and sends it to the token endpoint with the
following parameters:
grant_type
REQUIRED. Value MUST be set to
"urn:ietf:params:oauth:grant-type:device_code".
device_code
REQUIRED. The device verification code, "device_code" from the
device authorization response.
client_id
REQUIRED if the client is not authenticating with the
authorization server as described in Section 3.2.1. of [RFC6749].
The client identifier as described in Section 2.2 of [RFC6749].
For example, the client makes the following HTTPS request::
POST /token HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Adevice_code
&device_code=GmRhmhcxhwAzkoEqiMEg_DnyEysNkuNhszIySk9eS
&client_id=1406020730
"""
device_code = self.request.data.get('device_code')
if not device_code:
raise InvalidRequestError('Missing "device_code" in payload')
client = self.authenticate_token_endpoint_client()
if not client.check_grant_type(self.GRANT_TYPE):
raise UnauthorizedClientError()
credential = self.query_device_credential(device_code)
if not credential:
raise InvalidRequestError('Invalid "device_code" in payload')
if credential.get_client_id() != client.get_client_id():
raise UnauthorizedClientError()
user = self.validate_device_credential(credential)
self.request.user = user
self.request.client = client
self.request.credential = credential
def create_token_response(self):
"""If the access token request is valid and authorized, the
authorization server issues an access token and optional refresh
token.
"""
client = self.request.client
scope = self.request.credential.get_scope()
token = self.generate_token(
user=self.request.user,
scope=scope,
include_refresh_token=client.check_grant_type('refresh_token'),
)
log.debug('Issue token %r to %r', token, client)
self.save_token(token)
self.execute_hook('process_token', token=token)
return 200, token, self.TOKEN_RESPONSE_HEADER
def validate_device_credential(self, credential):
if credential.is_expired():
raise ExpiredTokenError()
user_code = credential.get_user_code()
user_grant = self.query_user_grant(user_code)
if user_grant is not None:
user, approved = user_grant
if not approved:
raise AccessDeniedError()
return user
if self.should_slow_down(credential):
raise SlowDownError()
raise AuthorizationPendingError()
def query_device_credential(self, device_code):
"""Get device credential from previously savings via ``DeviceAuthorizationEndpoint``.
Developers MUST implement it in subclass::
def query_device_credential(self, device_code):
return DeviceCredential.get(device_code)
:param device_code: a string represent the code.
:return: DeviceCredential instance
"""
raise NotImplementedError()
def query_user_grant(self, user_code):
"""Get user and grant via the given user code. Developers MUST
implement it in subclass::
def query_user_grant(self, user_code):
# e.g. we saved user grant info in redis
data = redis.get('oauth_user_grant:' + user_code)
if not data:
return None
user_id, allowed = data.split()
user = User.get(user_id)
return user, bool(allowed)
Note, user grant information is saved by verification endpoint.
"""
raise NotImplementedError()
def should_slow_down(self, credential):
"""The authorization request is still pending and polling should
continue, but the interval MUST be increased by 5 seconds for this
and all subsequent requests.
"""
raise NotImplementedError() |
7,447 | invoke | from dataclasses import dataclass
from typing import Optional, Tuple, Dict, ClassVar
import struct
import posixpath
import zlib
import gdb
def get_file_crc32(filename):
with open(filename, "rb") as f:
return zlib.crc32(f.read())
@dataclass
class AppState:
name: str
text_address: int = 0
entry_address: int = 0
other_sections: Dict[str, int] = None
debug_link_elf: str = ""
debug_link_crc: int = 0
DEBUG_ELF_ROOT: ClassVar[Optional[str]] = None
def __post_init__(self):
if self.other_sections is None:
self.other_sections = {}
def get_original_elf_path(self) -> str:
if self.DEBUG_ELF_ROOT is None:
raise ValueError("DEBUG_ELF_ROOT not set; call fap-set-debug-elf-root")
return (
posixpath.join(self.DEBUG_ELF_ROOT, self.debug_link_elf)
if self.DEBUG_ELF_ROOT
else self.debug_link_elf
)
def is_debug_available(self) -> bool:
have_debug_info = bool(self.debug_link_elf and self.debug_link_crc)
if not have_debug_info:
print("No debug info available for this app")
return False
debug_elf_path = self.get_original_elf_path()
debug_elf_crc32 = get_file_crc32(debug_elf_path)
if self.debug_link_crc != debug_elf_crc32:
print(
f"Debug info ({debug_elf_path}) CRC mismatch: {self.debug_link_crc:08x} != {debug_elf_crc32:08x}, rebuild app"
)
return False
return True
def get_gdb_load_command(self) -> str:
load_path = self.get_original_elf_path()
print(f"Loading debug information from {load_path}")
load_command = (
f"add-symbol-file -readnow {load_path} 0x{self.text_address:08x} "
)
load_command += " ".join(
f"-s {name} 0x{address:08x}"
for name, address in self.other_sections.items()
)
return load_command
def get_gdb_unload_command(self) -> str:
return f"remove-symbol-file -a 0x{self.text_address:08x}"
@staticmethod
def get_gdb_app_ep(app) -> int:
return int(app["state"]["entry"])
@staticmethod
def parse_debug_link_data(section_data: bytes) -> Tuple[str, int]:
# Debug link format: a null-terminated string with debuggable file name
# Padded with 0's to multiple of 4 bytes
# Followed by 4 bytes of CRC32 checksum of that file
elf_name = section_data[:-4].decode("utf-8").split("\x00")[0]
crc32 = struct.unpack("<I", section_data[-4:])[0]
return (elf_name, crc32)
@classmethod
def from_gdb(cls, gdb_app: "AppState") -> "AppState":
state = AppState(str(gdb_app["manifest"]["name"].string()))
state.entry_address = cls.get_gdb_app_ep(gdb_app)
app_state = gdb_app["state"]
if debug_link_size := int(app_state["debug_link_info"]["debug_link_size"]):
debug_link_data = (
gdb.selected_inferior()
.read_memory(
int(app_state["debug_link_info"]["debug_link"]), debug_link_size
)
.tobytes()
)
state.debug_link_elf, state.debug_link_crc = AppState.parse_debug_link_data(
debug_link_data
)
for idx in range(app_state["mmap_entry_count"]):
mmap_entry = app_state["mmap_entries"][idx]
section_name = mmap_entry["name"].string()
section_addr = int(mmap_entry["address"])
if section_name == ".text":
state.text_address = section_addr
else:
state.other_sections[section_name] = section_addr
return state
class SetFapDebugElfRoot(gdb.Command):
"""Set path to original ELF files for debug info"""
def __init__(self):
super().__init__(
"fap-set-debug-elf-root", gdb.COMMAND_FILES, gdb.COMPLETE_FILENAME
)
self.dont_repeat()
def METHOD_NAME(self, arg, from_tty):
AppState.DEBUG_ELF_ROOT = arg
try:
global helper
print(f"Set '{arg}' as debug info lookup path for Flipper external apps")
helper.attach_to_fw()
gdb.events.stop.connect(helper.handle_stop)
gdb.events.exited.connect(helper.handle_exit)
except gdb.error as e:
print(f"Support for Flipper external apps debug is not available: {e}")
class FlipperAppStateHelper:
def __init__(self):
self.app_type_ptr = None
self.app_list_ptr = None
self.app_list_entry_type = None
self._current_apps: list[AppState] = []
self.set_debug_mode(True)
def _walk_app_list(self, list_head):
while list_head:
if app := list_head["data"]:
yield app.dereference()
list_head = list_head["next"]
def _exec_gdb_command(self, command: str) -> bool:
try:
gdb.execute(command)
return True
except gdb.error as e:
print(f"Failed to execute GDB command '{command}': {e}")
return False
def _sync_apps(self) -> None:
self.set_debug_mode(True)
if not (app_list := self.app_list_ptr.value()):
print("Reset app loader state")
for app in self._current_apps:
self._exec_gdb_command(app.get_gdb_unload_command())
self._current_apps = []
return
loaded_apps: dict[int, gdb.Value] = dict(
(AppState.get_gdb_app_ep(app), app)
for app in self._walk_app_list(app_list[0])
)
for app in self._current_apps.copy():
if app.entry_address not in loaded_apps:
print(f"Application {app.name} is no longer loaded")
if not self._exec_gdb_command(app.get_gdb_unload_command()):
print(f"Failed to unload debug info for {app.name}")
self._current_apps.remove(app)
for entry_point, app in loaded_apps.items():
if entry_point not in set(app.entry_address for app in self._current_apps):
new_app_state = AppState.from_gdb(app)
print(f"New application loaded. Adding debug info")
if self._exec_gdb_command(new_app_state.get_gdb_load_command()):
self._current_apps.append(new_app_state)
else:
print(f"Failed to load debug info for {new_app_state}")
def attach_to_fw(self) -> None:
print("Attaching to Flipper firmware")
self.app_list_ptr = gdb.lookup_global_symbol(
"flipper_application_loaded_app_list"
)
self.app_type_ptr = gdb.lookup_type("FlipperApplication").pointer()
self.app_list_entry_type = gdb.lookup_type("struct FlipperApplicationList_s")
self._sync_apps()
def handle_stop(self, event) -> None:
self._sync_apps()
def handle_exit(self, event) -> None:
self.set_debug_mode(False)
def set_debug_mode(self, mode: bool) -> None:
try:
gdb.execute(f"set variable furi_hal_debug_gdb_session_active = {int(mode)}")
except gdb.error as e:
print(f"Failed to set debug mode: {e}")
# Init additional 'fap-set-debug-elf-root' command and set up hooks
SetFapDebugElfRoot()
helper = FlipperAppStateHelper()
print("Support for Flipper external apps debug is loaded") |
7,448 | valid instance file extension | from django.utils.datastructures import MultiValueDictKeyError
from couchforms.const import MAGIC_PROPERTY, VALID_ATTACHMENT_FILE_EXTENSIONS
import logging
from datetime import datetime
from django.conf import settings
from couchforms.exceptions import (
EmptyPayload,
MultipartEmptyPayload,
MultipartFilenameError,
PayloadTooLarge,
InvalidAttachmentFileError,
InvalidSubmissionFileExtensionError,
AttachmentSizeTooLarge,
)
from dimagi.utils.parsing import string_to_utc_datetime
from dimagi.utils.web import get_ip, get_site_domain, IP_RE
__all__ = ['get_path', 'get_instance_and_attachment',
'get_location', 'get_received_on', 'get_date_header',
'get_submit_ip', 'get_last_sync_token', 'get_openrosa_headers']
# Header that formplayer adds to request to store user ip address on form submission
COMMCAREHQ_ORIGIN_IP = 'HTTP_X_COMMCAREHQ_ORIGIN_IP'
def get_path(request):
return request.path
class MultimediaBug(Exception):
pass
def get_instance_and_attachment(request):
try:
return request._instance_and_attachment
except AttributeError:
pass
attachments = {}
if request.META['CONTENT_TYPE'].startswith('multipart/form-data'):
# ODK submission; of the form
# $ curl --form 'xml_submission_file=@form.xml' $URL
if list(request.POST):
raise MultimediaBug("Received a submission with POST.keys()")
try:
instance_file = request.FILES[MAGIC_PROPERTY]
except MultiValueDictKeyError:
raise MultipartFilenameError()
else:
if instance_file.size > settings.MAX_UPLOAD_SIZE:
logging.info("Domain {request.domain} attempted to submit a form exceeding the allowed size")
raise PayloadTooLarge()
if not METHOD_NAME(instance_file):
raise InvalidSubmissionFileExtensionError()
instance = instance_file.read()
for key, item in request.FILES.items():
if key != MAGIC_PROPERTY:
if _attachment_exceeds_size_limit(item):
raise AttachmentSizeTooLarge()
if not _valid_attachment_file(item):
raise InvalidAttachmentFileError()
attachments[key] = item
if not instance:
raise MultipartEmptyPayload()
else:
# touchforms; of the form
# $ curl --data '@form.xml' $URL
instance = request.body
if not instance:
raise EmptyPayload()
request._instance_and_attachment = (instance, attachments)
return instance, attachments
def METHOD_NAME(file):
return _valid_file_extension(file.name, ['xml'])
def _valid_file_extension(filename, valid_extensions):
if "." not in filename:
return False
file_extension = filename.rsplit(".", 1)[-1]
return file_extension in valid_extensions
def _valid_attachment_file(file):
return _valid_attachment_file_extension(file) or _valid_attachment_file_mimetype(file)
def _valid_attachment_file_extension(file):
return _valid_file_extension(file.name, VALID_ATTACHMENT_FILE_EXTENSIONS)
def _valid_attachment_file_mimetype(file):
return (
file.content_type.startswith(("audio/", "image/", "video/"))
# default mimetype set by CommCare
or file.content_type == "application/octet-stream"
# supported by formplayer
or file.content_type == "application/pdf"
)
def _attachment_exceeds_size_limit(file):
return file.size > settings.MAX_UPLOAD_SIZE_ATTACHMENT
def get_location(request=None):
# this is necessary, because www.commcarehq.org always uses https,
# but is behind a proxy that won't necessarily look like https
if hasattr(settings, "OVERRIDE_LOCATION"):
return settings.OVERRIDE_LOCATION
if request is None:
prefix = settings.DEFAULT_PROTOCOL
else:
prefix = "https" if request.is_secure() else "http"
return "%s://%s" % (prefix, get_site_domain())
def get_received_on(request):
received_on = request.META.get('HTTP_X_SUBMIT_TIME')
if received_on:
return string_to_utc_datetime(received_on)
else:
return None
def get_date_header(request):
date_header = request.META.get('HTTP_DATE')
if date_header:
# comes in as:
# Mon, 11 Apr 2011 18:24:43 GMT
# goes out as:
# 2011-04-11T18:24:43Z
try:
date = datetime.strptime(date_header, "%a, %d %b %Y %H:%M:%S GMT")
date = datetime.strftime(date, "%Y-%m-%dT%H:%M:%SZ")
except:
logging.error((
"Receiver app: incoming submission has a date header "
"that we can't parse: '%s'"
) % date_header)
date = date_header
date_header = date
else:
date_header = None
return date_header
def get_submit_ip(request):
from corehq.apps.ota.decorators import ORIGIN_TOKEN_HEADER, validate_origin_token
x_commcarehq_origin_ip = request.META.get(COMMCAREHQ_ORIGIN_IP, None)
origin_token = request.META.get(ORIGIN_TOKEN_HEADER, None)
if x_commcarehq_origin_ip:
is_ip_address = IP_RE.match(x_commcarehq_origin_ip)
if is_ip_address and validate_origin_token(origin_token):
return x_commcarehq_origin_ip
return get_ip(request)
def get_last_sync_token(request):
return getattr(request, 'last_sync_token', None)
def get_openrosa_headers(request):
return getattr(request, 'openrosa_headers', None) |
7,449 | test bitorr1 | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import numpy as np
import dace
from common import compare_numpy_output
### Left, match first pos ######################################################
@compare_numpy_output()
def test_subl1(A: dace.float64[5, 3], B: dace.float64[3]):
return A - B
@compare_numpy_output()
def test_multl1(A: dace.int64[5, 3], B: dace.int64[3]):
return A * B
@compare_numpy_output()
def test_bitorl1(A: dace.int64[5, 3], B: dace.int64[3]):
return A | B
@compare_numpy_output()
def test_bitxorl1(A: dace.int64[5, 3], B: dace.int64[3]):
return A ^ B
@compare_numpy_output()
def test_noteql1(A: dace.int64[5, 3], B: dace.int64[3]):
return A != B
@compare_numpy_output()
def test_ltl1(A: dace.int64[5, 3], B: dace.int64[3]):
return A < B
### Right, match first pos #####################################################
@compare_numpy_output()
def test_subr1(A: dace.float64[5], B: dace.float64[3, 5]):
return A - B
@compare_numpy_output()
def test_multr1(A: dace.int64[5], B: dace.int64[3, 5]):
return A * B
@compare_numpy_output()
def METHOD_NAME(A: dace.int64[5], B: dace.int64[3, 5]):
return A | B
@compare_numpy_output()
def test_bitxorr1(A: dace.int64[5], B: dace.int64[3, 5]):
return A ^ B
@compare_numpy_output()
def test_noteqr1(A: dace.int64[5], B: dace.int64[3, 5]):
return A != B
@compare_numpy_output()
def test_ltr1(A: dace.int64[5], B: dace.int64[3, 5]):
return A < B
### Left, first pos 1, match second pos ########################################
@compare_numpy_output()
def test_subl2(A: dace.float64[5, 3], B: dace.float64[5, 1]):
return A - B
@compare_numpy_output()
def test_multl2(A: dace.int64[5, 3], B: dace.int64[5, 1]):
return A * B
@compare_numpy_output()
def test_bitorl2(A: dace.int64[5, 3], B: dace.int64[5, 1]):
return A | B
@compare_numpy_output()
def test_bitxorl2(A: dace.int64[5, 3], B: dace.int64[5, 1]):
return A ^ B
@compare_numpy_output()
def test_noteql2(A: dace.int64[5, 3], B: dace.int64[5, 1]):
return A != B
@compare_numpy_output()
def test_ltl2(A: dace.int64[5, 3], B: dace.int64[5, 1]):
return A < B
### Right, first pos 1, match second ###########################################
@compare_numpy_output()
def test_subr2(A: dace.float64[3, 1], B: dace.float64[3, 5]):
return A - B
@compare_numpy_output()
def test_multr2(A: dace.int64[3, 1], B: dace.int64[3, 5]):
return A * B
@compare_numpy_output()
def test_bitorr2(A: dace.int64[3, 1], B: dace.int64[3, 5]):
return A | B
@compare_numpy_output()
def test_bitxorr2(A: dace.int64[3, 1], B: dace.int64[3, 5]):
return A ^ B
@compare_numpy_output()
def test_noteqr2(A: dace.int64[3, 1], B: dace.int64[3, 5]):
return A != B
@compare_numpy_output()
def test_ltr2(A: dace.int64[3, 1], B: dace.int64[3, 5]):
return A < B
### Left, first pos 1, match second pos, None last pos ########################
@compare_numpy_output()
def test_subl3(A: dace.float64[5, 3], B: dace.float64[2, 5, 1]):
return A - B
@compare_numpy_output()
def test_bitxorl3(A: dace.int64[5, 3], B: dace.int64[2, 5, 1]):
return A ^ B
@compare_numpy_output()
def test_ltl3(A: dace.int64[5, 3], B: dace.int64[2, 5, 1]):
return A < B
### Right, first pos 1, match second pos, None last pos #######################
@compare_numpy_output()
def test_multr3(A: dace.int64[4, 3, 1], B: dace.int64[3, 5]):
return A * B
@compare_numpy_output()
def test_bitorr3(A: dace.int64[4, 3, 1], B: dace.int64[3, 5]):
return A | B
@compare_numpy_output()
def test_noteqr3(A: dace.int64[4, 3, 1], B: dace.int64[3, 5]):
return A != B
### Left Errors ###############################################################
@compare_numpy_output()
def test_subl4(A: dace.float64[5, 3], B: dace.float64[2]):
return A - B
@compare_numpy_output()
def test_bitxorl4(A: dace.int64[5, 3], B: dace.int64[2, 3]):
return A ^ B
@compare_numpy_output()
def test_ltl4(A: dace.int64[5, 3], B: dace.int64[3, 2, 3]):
return A < B
### Right Errors ##############################################################
@compare_numpy_output()
def test_multr4(A: dace.int64[4], B: dace.int64[3, 5]):
return A * B
@compare_numpy_output()
def test_bitorr4(A: dace.int64[4, 1], B: dace.int64[3, 5]):
return A | B
# this is broken as of numpy 1.18: numpy doesn't raise an error
#
# >>> import numpy as np
# >>> a = np.random.rand(3, 2)
# >>> b = np.random.rand(2)
# >>> a == b # this works as expected
# array([[False, False],
# [False, False],
# [False, False]])
# >>> b = np.random.rand(3)
# >>> a == b # ?
# <stdin>:1: DeprecationWarning: elementwise comparison failed; this will raise an error in the future.
# False
#
# this test can be reenabled when this is fixed
#@compare_numpy_output()
#def test_noteqr4(A: dace.int64[3, 3, 2], B: dace.int64[3, 5]):
# return A != B
@compare_numpy_output()
def test_regression_result_none(A: dace.int32[1, 3], B: dace.int32[3]):
return A + B
@compare_numpy_output()
def test_both_match(A: dace.float64[5, 1], B: dace.float64[1, 3]):
return A + B
def test_symbolic_bcast_same():
N = dace.symbol("N")
I = dace.symbol("I")
@dace.program
def symbolic_bcast(A: dace.float64[N, 4], B: dace.float64[N * (I + 1) - N * I, 1]):
return A + B
A = np.arange(40).astype(np.float64).reshape(10, 4)
B = np.arange(10).astype(np.float64).reshape(10, 1)
result = symbolic_bcast(A.copy(), B.copy(), I=42, N=10)
expected = A + B
np.testing.assert_allclose(result, expected)
if __name__ == '__main__':
# generate this with
# cat binop_broadcasting_test.py | grep -oP '(?<=f ).*(?=\()' | awk '{print $0 "()"}'
test_subl1()
test_multl1()
test_bitorl1()
test_bitxorl1()
test_noteql1()
test_ltl1()
test_subr1()
test_multr1()
METHOD_NAME()
test_bitxorr1()
test_noteqr1()
test_ltr1()
test_subl2()
test_multl2()
test_bitorl2()
test_bitxorl2()
test_noteql2()
test_ltl2()
test_subr2()
test_multr2()
test_bitorr2()
test_bitxorr2()
test_noteqr2()
test_ltr2()
test_subl3()
test_bitxorl3()
test_ltl3()
test_multr3()
test_bitorr3()
test_noteqr3()
test_subl4()
test_bitxorl4()
test_ltl4()
test_multr4()
test_bitorr4()
test_regression_result_none()
test_symbolic_bcast_same() |
7,450 | initializer botocore session | import logging
import os
from aws_lambda_powertools.shared.version import VERSION
powertools_version = VERSION
inject_header = True
try:
import botocore
except ImportError:
# if botocore failed to import, user might be using custom runtime and we can't inject header
inject_header = False
logger = logging.getLogger(__name__)
EXEC_ENV = os.environ.get("AWS_EXECUTION_ENV", "NA")
TARGET_SDK_EVENT = "request-created"
FEATURE_PREFIX = "PT"
DEFAULT_FEATURE = "no-op"
HEADER_NO_OP = f"{FEATURE_PREFIX}/{DEFAULT_FEATURE}/{powertools_version} PTEnv/{EXEC_ENV}"
def METHOD_NAME(session):
"""
This function is used to add an extra header for the User-Agent in the Botocore session,
as described in the pull request: https://github.com/boto/botocore/pull/2682
Parameters
----------
session : botocore.session.Session
The Botocore session to which the user-agent function will be registered.
Raises
------
Exception
If there is an issue while adding the extra header for the User-Agent.
"""
try:
session.register(TARGET_SDK_EVENT, _create_feature_function(DEFAULT_FEATURE))
except Exception:
logger.debug("Can't add extra header User-Agent")
def _create_feature_function(feature):
"""
Create and return the `add_powertools_feature` function.
The `add_powertools_feature` function is designed to be registered in boto3's event system.
When registered, it appends the given feature string to the User-Agent header of AWS SDK requests.
Parameters
----------
feature : str
The feature string to be appended to the User-Agent header.
Returns
-------
add_powertools_feature : Callable
The `add_powertools_feature` function that modifies the User-Agent header.
"""
def add_powertools_feature(request, **kwargs):
try:
headers = request.headers
header_user_agent = (
f"{headers['User-Agent']} {FEATURE_PREFIX}/{feature}/{powertools_version} PTEnv/{EXEC_ENV}"
)
# This function is exclusive to client and resources objects created in Powertools
# and must remove the no-op header, if present
if HEADER_NO_OP in headers["User-Agent"] and feature != DEFAULT_FEATURE:
# Remove HEADER_NO_OP + space
header_user_agent = header_user_agent.replace(f"{HEADER_NO_OP} ", "")
headers["User-Agent"] = f"{header_user_agent}"
except Exception:
logger.debug("Can't find User-Agent header")
return add_powertools_feature
# Add feature user-agent to given sdk boto3.session
def register_feature_to_session(session, feature):
"""
Register the given feature string to the event system of the provided boto3 session
and append the feature to the User-Agent header of the request
Parameters
----------
session : boto3.session.Session
The boto3 session to which the feature will be registered.
feature : str
The feature string to be appended to the User-Agent header, e.g., "streaming" in Powertools.
Raises
------
AttributeError
If the provided session does not have an event system.
"""
try:
session.events.register(TARGET_SDK_EVENT, _create_feature_function(feature))
except AttributeError as e:
logger.debug(f"session passed in doesn't have a event system:{e}")
# Add feature user-agent to given sdk botocore.session.Session
def register_feature_to_botocore_session(botocore_session, feature):
"""
Register the given feature string to the event system of the provided botocore session
Please notice this function is for patching botocore session and is different from
previous one which is for patching boto3 session
Parameters
----------
botocore_session : botocore.session.Session
The botocore session to which the feature will be registered.
feature : str
The feature string to be appended to the User-Agent header, e.g., "data-masking" in Powertools.
Raises
------
AttributeError
If the provided session does not have an event system.
Examples
--------
**register data-masking user-agent to botocore session**
>>> from aws_lambda_powertools.shared.user_agent import (
>>> register_feature_to_botocore_session
>>> )
>>>
>>> session = botocore.session.Session()
>>> register_feature_to_botocore_session(botocore_session=session, feature="data-masking")
>>> key_provider = StrictAwsKmsMasterKeyProvider(key_ids=self.keys, botocore_session=session)
"""
try:
botocore_session.register(TARGET_SDK_EVENT, _create_feature_function(feature))
except AttributeError as e:
logger.debug(f"botocore session passed in doesn't have a event system:{e}")
# Add feature user-agent to given sdk boto3.client
def register_feature_to_client(client, feature):
"""
Register the given feature string to the event system of the provided boto3 client
and append the feature to the User-Agent header of the request
Parameters
----------
client : boto3.session.Session.client
The boto3 client to which the feature will be registered.
feature : str
The feature string to be appended to the User-Agent header, e.g., "streaming" in Powertools.
Raises
------
AttributeError
If the provided client does not have an event system.
"""
try:
client.meta.events.register(TARGET_SDK_EVENT, _create_feature_function(feature))
except AttributeError as e:
logger.debug(f"session passed in doesn't have a event system:{e}")
# Add feature user-agent to given sdk boto3.resource
def register_feature_to_resource(resource, feature):
"""
Register the given feature string to the event system of the provided boto3 resource
and append the feature to the User-Agent header of the request
Parameters
----------
resource : boto3.session.Session.resource
The boto3 resource to which the feature will be registered.
feature : str
The feature string to be appended to the User-Agent header, e.g., "streaming" in Powertools.
Raises
------
AttributeError
If the provided resource does not have an event system.
"""
try:
resource.meta.client.meta.events.register(TARGET_SDK_EVENT, _create_feature_function(feature))
except AttributeError as e:
logger.debug(f"resource passed in doesn't have a event system:{e}")
def inject_user_agent():
if inject_header:
# Some older botocore versions doesn't support register_initializer. In those cases, we disable the feature.
if not hasattr(botocore, "register_initializer"):
return
# Customize botocore session to inject Powertools header
# See: https://github.com/boto/botocore/pull/2682
botocore.register_initializer(METHOD_NAME) |
7,451 | update model | import json
import requests
import tempfile
from datetime import datetime
from resotolib.args import ArgumentParser
from resotolib.logger import log
from resotolib.jwt import encode_jwt_to_headers
from resotolib.graph import Graph, GraphExportIterator, export_model
from resotolib.config import Config
from resotolib.core import resotocore
from typing import Callable, Optional
from tenacity import Retrying
from tenacity.stop import stop_after_attempt
from tenacity.wait import wait_fixed
class Resotocore:
def __init__(
self,
send_request: Callable[[requests.Request], requests.Response],
config: Config,
) -> None:
self._send_request = send_request
self._config = config
def create_graph_and_update_model(self, tempdir: str) -> None:
base_uri = resotocore.http_uri
resotocore_graph = self._config.resotoworker.graph
dump_json = self._config.resotoworker.debug_dump_json
self.create_graph(base_uri, resotocore_graph)
self.METHOD_NAME(base_uri, dump_json=dump_json, tempdir=tempdir)
def send_to_resotocore(self, graph: Graph, task_id: str, tempdir: str) -> None:
if not ArgumentParser.args.resotocore_uri:
return None
base_uri = resotocore.http_uri
resotocore_graph = self._config.resotoworker.graph
dump_json = self._config.resotoworker.debug_dump_json
graph_merge_kind = self._config.resotoworker.graph_merge_kind
graph_export_iterator = GraphExportIterator(
graph,
delete_tempfile=not dump_json,
tempdir=tempdir,
graph_merge_kind=graph_merge_kind,
)
# The graph is not required any longer and can be released.
del graph
graph_export_iterator.export_graph()
if not graph_export_iterator.found_replace_node:
log.error("No replace node found, not sending graph to resotocore")
return
self.send_graph(graph_export_iterator, base_uri, resotocore_graph, task_id)
def create_graph(self, resotocore_base_uri: str, resotocore_graph: str) -> None:
graph_uri = f"{resotocore_base_uri}/graph/{resotocore_graph}"
log.debug(f"Creating graph {resotocore_graph} via {graph_uri}")
headers = {
"accept": "application/json",
"Content-Type": "application/json",
}
if getattr(ArgumentParser.args, "psk", None):
encode_jwt_to_headers(headers, {}, ArgumentParser.args.psk)
for attempt in Retrying(reraise=True, stop=stop_after_attempt(3), wait=wait_fixed(10)):
with attempt:
request = requests.Request(method="POST", url=graph_uri, data="", headers=headers)
r = self._send_request(request)
if r.status_code != 200:
log.error(r.content)
raise RuntimeError(f"Failed to create graph: {r.content}") # type: ignore
def METHOD_NAME(
self,
resotocore_base_uri: str,
dump_json: bool = False,
tempdir: Optional[str] = None,
) -> None:
model_uri = f"{resotocore_base_uri}/model"
log.debug(f"Updating model via {model_uri}")
model_json = json.dumps(export_model(), indent=4)
if dump_json:
ts = datetime.now().strftime("%Y-%m-%d-%H-%M")
with tempfile.NamedTemporaryFile(
prefix=f"resoto-model-{ts}-",
suffix=".json",
delete=not dump_json,
dir=tempdir,
) as model_outfile:
log.info(f"Writing model json to file {model_outfile.name}")
model_outfile.write(model_json.encode())
headers = {"Content-Type": "application/json"}
if getattr(ArgumentParser.args, "psk", None):
encode_jwt_to_headers(headers, {}, ArgumentParser.args.psk)
for attempt in Retrying(reraise=True, stop=stop_after_attempt(3), wait=wait_fixed(10)):
with attempt:
request = requests.Request(method="PATCH", url=model_uri, data=model_json, headers=headers)
r = self._send_request(request)
if r.status_code != 200:
log.error(r.content)
raise RuntimeError(f"Failed to create model: {r.content}") # type: ignore
def send_graph(
self,
graph_export_iterator: GraphExportIterator,
resotocore_base_uri: str,
resotocore_graph: str,
task_id: str,
) -> None:
merge_uri = f"{resotocore_base_uri}/graph/{resotocore_graph}/merge"
log.debug(f"Sending graph via {merge_uri}")
headers = {
"Content-Type": "application/x-ndjson",
"Resoto-Worker-Nodes": str(graph_export_iterator.number_of_nodes),
"Resoto-Worker-Edges": str(graph_export_iterator.number_of_edges),
"Resoto-Worker-Task-Id": task_id,
}
params = dict(wait_for_result=False)
if getattr(ArgumentParser.args, "psk", None):
encode_jwt_to_headers(headers, {}, ArgumentParser.args.psk)
for attempt in Retrying(reraise=True, stop=stop_after_attempt(3), wait=wait_fixed(10)):
with attempt:
request = requests.Request(
method="POST", url=merge_uri, data=graph_export_iterator, params=params, headers=headers
)
r = self._send_request(request)
if r.status_code not in (200, 204):
log.error(r.content)
raise RuntimeError(f"Failed to send graph: {r.content}") # type: ignore
log.debug(f"Sent {graph_export_iterator.total_lines} items to resotocore") |
7,452 | defpanellist | # Module 'panel'
#
# Support for the Panel library.
# Uses built-in module 'pnl'.
# Applications should use 'panel.function' instead of 'pnl.function';
# most 'pnl' functions are transparently exported by 'panel',
# but dopanel() is overridden and you have to use this version
# if you want to use callbacks.
from warnings import warnpy3k
warnpy3k("the panel module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import pnl
debug = 0
# Test if an object is a list.
#
def is_list(x):
return type(x) == type([])
# Reverse a list.
#
def reverse(list):
res = []
for item in list:
res.insert(0, item)
return res
# Get an attribute of a list, which may itself be another list.
# Don't use 'prop' for name.
#
def getattrlist(list, name):
for item in list:
if item and is_list(item) and item[0] == name:
return item[1:]
return []
# Get a property of a list, which may itself be another list.
#
def getproplist(list, name):
for item in list:
if item and is_list(item) and item[0] == 'prop':
if len(item) > 1 and item[1] == name:
return item[2:]
return []
# Test if an actuator description contains the property 'end-of-group'
#
def is_endgroup(list):
x = getproplist(list, 'end-of-group')
return (x and x[0] == '#t')
# Neatly display an actuator definition given as S-expression
# the prefix string is printed before each line.
#
def show_actuator(prefix, a):
for item in a:
if not is_list(item):
print prefix, item
elif item and item[0] == 'al':
print prefix, 'Subactuator list:'
for a in item[1:]:
show_actuator(prefix + ' ', a)
elif len(item) == 2:
print prefix, item[0], '=>', item[1]
elif len(item) == 3 and item[0] == 'prop':
print prefix, 'Prop', item[1], '=>',
print item[2]
else:
print prefix, '?', item
# Neatly display a panel.
#
def show_panel(prefix, p):
for item in p:
if not is_list(item):
print prefix, item
elif item and item[0] == 'al':
print prefix, 'Actuator list:'
for a in item[1:]:
show_actuator(prefix + ' ', a)
elif len(item) == 2:
print prefix, item[0], '=>', item[1]
elif len(item) == 3 and item[0] == 'prop':
print prefix, 'Prop', item[1], '=>',
print item[2]
else:
print prefix, '?', item
# Exception raised by build_actuator or build_panel.
#
panel_error = 'panel error'
# Dummy callback used to initialize the callbacks.
#
def dummy_callback(arg):
pass
# Assign attributes to members of the target.
# Attribute names in exclist are ignored.
# The member name is the attribute name prefixed with the prefix.
#
def assign_members(target, attrlist, exclist, prefix):
for item in attrlist:
if is_list(item) and len(item) == 2 and item[0] not in exclist:
name, value = item[0], item[1]
ok = 1
if value[0] in '-0123456789':
value = eval(value)
elif value[0] == '"':
value = value[1:-1]
elif value == 'move-then-resize':
# Strange default set by Panel Editor...
ok = 0
else:
print 'unknown value', value, 'for', name
ok = 0
if ok:
lhs = 'target.' + prefix + name
stmt = lhs + '=' + repr(value)
if debug: print 'exec', stmt
try:
exec stmt + '\n'
except KeyboardInterrupt: # Don't catch this!
raise KeyboardInterrupt
except:
print 'assign failed:', stmt
# Build a real actuator from an actuator description.
# Return a pair (actuator, name).
#
def build_actuator(descr):
namelist = getattrlist(descr, 'name')
if namelist:
# Assume it is a string
actuatorname = namelist[0][1:-1]
else:
actuatorname = ''
type = descr[0]
if type[:4] == 'pnl_': type = type[4:]
act = pnl.mkact(type)
act.downfunc = act.activefunc = act.upfunc = dummy_callback
#
assign_members(act, descr[1:], ['al', 'data', 'name'], '')
#
# Treat actuator-specific data
#
datalist = getattrlist(descr, 'data')
prefix = ''
if type[-4:] == 'puck':
prefix = 'puck_'
elif type == 'mouse':
prefix = 'mouse_'
assign_members(act, datalist, [], prefix)
#
return act, actuatorname
# Build all sub-actuators and add them to the super-actuator.
# The super-actuator must already have been added to the panel.
# Sub-actuators with defined names are added as members to the panel
# so they can be referenced as p.name.
#
# Note: I have no idea how panel.endgroup() works when applied
# to a sub-actuator.
#
def build_subactuators(panel, super_act, al):
#
# This is nearly the same loop as below in build_panel(),
# except a call is made to addsubact() instead of addact().
#
for a in al:
act, name = build_actuator(a)
act.addsubact(super_act)
if name:
stmt = 'panel.' + name + ' = act'
if debug: print 'exec', stmt
exec stmt + '\n'
if is_endgroup(a):
panel.endgroup()
sub_al = getattrlist(a, 'al')
if sub_al:
build_subactuators(panel, act, sub_al)
#
# Fix the actuator to which whe just added subactuators.
# This can't hurt (I hope) and is needed for the scroll actuator.
#
super_act.fixact()
# Build a real panel from a panel definition.
# Return a panel object p, where for each named actuator a, p.name is a
# reference to a.
#
def build_panel(descr):
#
# Sanity check
#
if (not descr) or descr[0] != 'panel':
raise panel_error, 'panel description must start with "panel"'
#
if debug: show_panel('', descr)
#
# Create an empty panel
#
panel = pnl.mkpanel()
#
# Assign panel attributes
#
assign_members(panel, descr[1:], ['al'], '')
#
# Look for actuator list
#
al = getattrlist(descr, 'al')
#
# The order in which actuators are created is important
# because of the endgroup() operator.
# Unfortunately the Panel Editor outputs the actuator list
# in reverse order, so we reverse it here.
#
al = reverse(al)
#
for a in al:
act, name = build_actuator(a)
act.addact(panel)
if name:
stmt = 'panel.' + name + ' = act'
exec stmt + '\n'
if is_endgroup(a):
panel.endgroup()
sub_al = getattrlist(a, 'al')
if sub_al:
build_subactuators(panel, act, sub_al)
#
return panel
# Wrapper around pnl.dopanel() which calls call-back functions.
#
def my_dopanel():
# Extract only the first 4 elements to allow for future expansion
a, down, active, up = pnl.dopanel()[:4]
if down:
down.downfunc(down)
if active:
active.activefunc(active)
if up:
up.upfunc(up)
return a
# Create one or more panels from a description file (S-expressions)
# generated by the Panel Editor.
#
def METHOD_NAME(file):
import panelparser
descrlist = panelparser.parse_file(open(file, 'r'))
panellist = []
for descr in descrlist:
panellist.append(build_panel(descr))
return panellist
# Import everything from built-in method pnl, so the user can always
# use panel.foo() instead of pnl.foo().
# This gives *no* performance penalty once this module is imported.
#
from pnl import * # for export
dopanel = my_dopanel # override pnl.dopanel |
7,453 | forward | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.nn.utils import weight_norm
from .pcmer import PCmer
class Unit2MelNaive(nn.Module):
def __init__(
self,
input_channel,
n_spk,
use_pitch_aug=False,
out_dims=128,
n_layers=3,
n_chans=256,
n_hidden=None, # 废弃
use_speaker_encoder=False,
speaker_encoder_out_channels=256,
use_full_siren=False,
l2reg_loss=0
):
super().__init__()
self.l2reg_loss = l2reg_loss if (l2reg_loss is not None) else 0
self.f0_embed = nn.Linear(1, n_chans)
self.volume_embed = nn.Linear(1, n_chans)
if use_pitch_aug:
self.aug_shift_embed = nn.Linear(1, n_chans, bias=False)
else:
self.aug_shift_embed = None
self.n_spk = n_spk
self.use_speaker_encoder = use_speaker_encoder
if use_speaker_encoder:
self.spk_embed = nn.Linear(speaker_encoder_out_channels, n_chans, bias=False)
else:
if n_spk is not None and n_spk > 1:
self.spk_embed = nn.Embedding(n_spk, n_chans)
# conv in stack
self.stack = nn.Sequential(
nn.Conv1d(input_channel, n_chans, 3, 1, 1),
nn.GroupNorm(4, n_chans),
nn.LeakyReLU(),
nn.Conv1d(n_chans, n_chans, 3, 1, 1))
# transformer
if use_full_siren:
from .pcmer_siren_full import PCmer as PCmerfs
self.decoder = PCmerfs(
num_layers=n_layers,
num_heads=8,
dim_model=n_chans,
dim_keys=n_chans,
dim_values=n_chans,
residual_dropout=0.1,
attention_dropout=0.1)
else:
self.decoder = PCmer(
num_layers=n_layers,
num_heads=8,
dim_model=n_chans,
dim_keys=n_chans,
dim_values=n_chans,
residual_dropout=0.1,
attention_dropout=0.1)
self.norm = nn.LayerNorm(n_chans)
# out
self.n_out = out_dims
self.dense_out = weight_norm(
nn.Linear(n_chans, self.n_out))
def METHOD_NAME(self, units, f0, volume, spk_id=None, spk_mix_dict=None, aug_shift=None,
gt_spec=None, infer=True, infer_speedup=10, method='dpm-solver', k_step=None, use_tqdm=True,
spk_emb=None, spk_emb_dict=None):
'''
input:
B x n_frames x n_unit
return:
dict of B x n_frames x feat
'''
x = self.stack(units.transpose(1, 2)).transpose(1, 2)
x = x + self.f0_embed((1 + f0 / 700).log()) + self.volume_embed(volume)
if self.use_speaker_encoder:
if spk_mix_dict is not None:
assert spk_emb_dict is not None
for k, v in spk_mix_dict.items():
spk_id_torch = spk_emb_dict[str(k)]
spk_id_torch = np.tile(spk_id_torch, (len(units), 1))
spk_id_torch = torch.from_numpy(spk_id_torch).float().to(units.device)
x = x + v * self.spk_embed(spk_id_torch)
else:
x = x + self.spk_embed(spk_emb)
else:
if self.n_spk is not None and self.n_spk > 1:
if spk_mix_dict is not None:
for k, v in spk_mix_dict.items():
spk_id_torch = torch.LongTensor(np.array([[k]])).to(units.device)
x = x + v * self.spk_embed(spk_id_torch - 1)
else:
x = x + self.spk_embed(spk_id - 1)
if self.aug_shift_embed is not None and aug_shift is not None:
x = x + self.aug_shift_embed(aug_shift / 5)
x = self.decoder(x)
x = self.norm(x)
x = self.dense_out(x)
if not infer:
x = F.mse_loss(x, gt_spec)
if self.l2reg_loss > 0:
x = x + l2_regularization(model=self, l2_alpha=self.l2reg_loss)
return x
def l2_regularization(model, l2_alpha):
l2_loss = []
for module in model.modules():
if type(module) is nn.Conv2d:
l2_loss.append((module.weight ** 2).sum() / 2.0)
return l2_alpha * sum(l2_loss) |
7,454 | test return code existing variable | from http import client
import json
import pytest
import re
def assert_items_equal(x, y):
assert set(x) == set(y)
GITHUB_URL_REGEX = r"^https://github\.com/openfisca/country-template/blob/\d+\.\d+\.\d+((.dev|rc)\d+)?/openfisca_country_template/variables/(.)+\.py#L\d+-L\d+$"
# /variables
@pytest.fixture(scope="module")
def variables_response(test_client):
variables_response = test_client.get("/variables")
return variables_response
def test_return_code(variables_response):
assert variables_response.status_code == client.OK
def test_response_data(variables_response):
variables = json.loads(variables_response.data.decode("utf-8"))
assert variables["birth"] == {
"description": "Birth date",
"href": "http://localhost/variable/birth",
}
# /variable/<id>
def test_error_code_non_existing_variable(test_client):
response = test_client.get("/variable/non_existing_variable")
assert response.status_code == client.NOT_FOUND
@pytest.fixture(scope="module")
def input_variable_response(test_client):
input_variable_response = test_client.get("/variable/birth")
return input_variable_response
def test_return_code_existing_input_variable(input_variable_response):
assert input_variable_response.status_code == client.OK
def check_input_variable_value(key, expected_value, input_variable=None):
assert input_variable[key] == expected_value
@pytest.mark.parametrize(
"expected_values",
[
("description", "Birth date"),
("valueType", "Date"),
("defaultValue", "1970-01-01"),
("definitionPeriod", "ETERNITY"),
("entity", "person"),
("references", ["https://en.wiktionary.org/wiki/birthdate"]),
],
)
def test_input_variable_value(expected_values, input_variable_response):
input_variable = json.loads(input_variable_response.data.decode("utf-8"))
check_input_variable_value(*expected_values, input_variable=input_variable)
def test_input_variable_github_url(test_client):
input_variable_response = test_client.get("/variable/income_tax")
input_variable = json.loads(input_variable_response.data.decode("utf-8"))
assert re.match(GITHUB_URL_REGEX, input_variable["source"])
def METHOD_NAME(test_client):
variable_response = test_client.get("/variable/income_tax")
assert variable_response.status_code == client.OK
def check_variable_value(key, expected_value, variable=None):
assert variable[key] == expected_value
@pytest.mark.parametrize(
"expected_values",
[
("description", "Income tax"),
("valueType", "Float"),
("defaultValue", 0),
("definitionPeriod", "MONTH"),
("entity", "person"),
],
)
def test_variable_value(expected_values, test_client):
variable_response = test_client.get("/variable/income_tax")
variable = json.loads(variable_response.data.decode("utf-8"))
check_variable_value(*expected_values, variable=variable)
def test_variable_formula_github_link(test_client):
variable_response = test_client.get("/variable/income_tax")
variable = json.loads(variable_response.data.decode("utf-8"))
assert re.match(GITHUB_URL_REGEX, variable["formulas"]["0001-01-01"]["source"])
def test_variable_formula_content(test_client):
variable_response = test_client.get("/variable/income_tax")
variable = json.loads(variable_response.data.decode("utf-8"))
content = variable["formulas"]["0001-01-01"]["content"]
assert "def formula(person, period, parameters):" in content
assert (
'return person("salary", period) * parameters(period).taxes.income_tax_rate'
in content
)
def test_null_values_are_dropped(test_client):
variable_response = test_client.get("/variable/age")
variable = json.loads(variable_response.data.decode("utf-8"))
assert "references" not in variable.keys()
def test_variable_with_start_and_stop_date(test_client):
response = test_client.get("/variable/housing_allowance")
variable = json.loads(response.data.decode("utf-8"))
assert_items_equal(variable["formulas"], ["1980-01-01", "2016-12-01"])
assert variable["formulas"]["2016-12-01"] is None
assert "formula" in variable["formulas"]["1980-01-01"]["content"]
def test_variable_with_enum(test_client):
response = test_client.get("/variable/housing_occupancy_status")
variable = json.loads(response.data.decode("utf-8"))
assert variable["valueType"] == "String"
assert variable["defaultValue"] == "tenant"
assert "possibleValues" in variable.keys()
assert variable["possibleValues"] == {
"free_lodger": "Free lodger",
"homeless": "Homeless",
"owner": "Owner",
"tenant": "Tenant",
}
@pytest.fixture(scope="module")
def dated_variable_response(test_client):
dated_variable_response = test_client.get("/variable/basic_income")
return dated_variable_response
def test_return_code_existing_dated_variable(dated_variable_response):
assert dated_variable_response.status_code == client.OK
def test_dated_variable_formulas_dates(dated_variable_response):
dated_variable = json.loads(dated_variable_response.data.decode("utf-8"))
assert_items_equal(dated_variable["formulas"], ["2016-12-01", "2015-12-01"])
def test_dated_variable_formulas_content(dated_variable_response):
dated_variable = json.loads(dated_variable_response.data.decode("utf-8"))
formula_code_2016 = dated_variable["formulas"]["2016-12-01"]["content"]
formula_code_2015 = dated_variable["formulas"]["2015-12-01"]["content"]
assert "def formula_2016_12(person, period, parameters):" in formula_code_2016
assert "return" in formula_code_2016
assert "def formula_2015_12(person, period, parameters):" in formula_code_2015
assert "return" in formula_code_2015
def test_variable_encoding(test_client):
variable_response = test_client.get("/variable/pension")
assert variable_response.status_code == client.OK
def test_variable_documentation(test_client):
response = test_client.get("/variable/housing_allowance")
variable = json.loads(response.data.decode("utf-8"))
assert (
variable["documentation"]
== "This allowance was introduced on the 1st of Jan 1980.\nIt disappeared in Dec 2016."
)
formula_documentation = variable["formulas"]["1980-01-01"]["documentation"]
assert "Housing allowance." in formula_documentation
assert (
"Calculating it before this date will always return the variable default value, 0."
in formula_documentation
) |
7,455 | test nested ignore user abort rpc work | import time
import urllib
import socket
from python.lib.testcase import KphpServerAutoTestCase
class TestIgnoreUserAbort(KphpServerAutoTestCase):
def _send_request(self, uri="/", timeout=0.05):
try:
self.kphp_server.http_request(uri=uri, timeout=timeout)
except Exception:
pass
"""
Changing the name leads to different tests run order and for some reason it helps to get rid of ASAN warning.
As we decided that the previous ASAN warning was false-positive, this kind of fix might be acceptable for us.
Old name was - "test_user_abort_rpc_work"
"""
def test_user_abort_of_rpc_work(self):
self._send_request(uri='/test_ignore_user_abort?type=rpc&level=no_ignore&port={}'.format(str(self.kphp_server.master_port)))
self.kphp_server.assert_log(['Critical error during script execution: http connection close'], timeout=10)
error = False
try:
self.kphp_server.assert_log(["test_ignore_user_abort/finish_rpc_work_" + "no_ignore"], timeout=2)
except Exception:
error = True
self.assertTrue(error)
def test_user_abort_resumable_work(self):
self._send_request(uri='/test_ignore_user_abort?type=resumable&level=no_ignore')
self.kphp_server.assert_log(['Critical error during script execution: http connection close'], timeout=10)
error = False
try:
self.kphp_server.assert_log(["test_ignore_user_abort/finish_resumable_work_" + "no_ignore"], timeout=2)
except Exception:
error = True
self.assertTrue(error)
def test_ignore_user_abort_rpc_work(self):
self._send_request(uri='/test_ignore_user_abort?type=rpc&level=ignore&port={}'.format(str(self.kphp_server.master_port)))
self.kphp_server.assert_log(["test_ignore_user_abort/finish_rpc_work_" + "ignore"], timeout=5)
self.kphp_server.assert_log(["test_ignore_user_abort/finish_ignore_" + "rpc"], timeout=5)
self.kphp_server.assert_log(["shutdown_function was called"], timeout=5)
def test_ignore_user_abort_resumable_work(self):
self._send_request(uri='/test_ignore_user_abort?type=resumable&level=ignore')
self.kphp_server.assert_log(["test_ignore_user_abort/finish_resumable_work_" + "ignore"], timeout=5)
self.kphp_server.assert_log(["test_ignore_user_abort/finish_ignore_" + "resumable"], timeout=5)
self.kphp_server.assert_log(["shutdown_function was called"], timeout=5)
def METHOD_NAME(self):
self._send_request(uri='/test_ignore_user_abort?type=rpc&level=nested_ignore&port={}'.format(str(self.kphp_server.master_port)))
self.kphp_server.assert_log(["test_ignore_user_abort/finish_rpc_work_" + "nested_ignore"], timeout=5)
self.kphp_server.assert_log(["test_ignore_user_abort/finish_nested_ignore_" + "rpc"], timeout=5)
self.kphp_server.assert_log(["shutdown_function was called"], timeout=5)
def test_nested_ignore_user_abort_resumable_work(self):
self._send_request(uri='/test_ignore_user_abort?type=resumable&level=nested_ignore')
self.kphp_server.assert_log(["test_ignore_user_abort/finish_resumable_work_" + "nested_ignore"], timeout=5)
self.kphp_server.assert_log(["test_ignore_user_abort/finish_nested_ignore_" + "resumable"], timeout=5)
self.kphp_server.assert_log(["shutdown_function was called"], timeout=5)
def test_multi_ignore_user_abort_rpc_work(self):
self._send_request(uri='/test_ignore_user_abort?type=rpc&level=multi_ignore&port={}'.format(str(self.kphp_server.master_port)))
self.kphp_server.assert_log(["test_ignore_user_abort/finish_rpc_work_" + "multi_ignore"], timeout=5)
self.kphp_server.assert_log(["test_ignore_user_abort/finish_multi_ignore_" + "rpc"], timeout=5)
self.kphp_server.assert_log(["shutdown_function was called"], timeout=5)
def test_multi_ignore_user_abort_resumable_work(self):
self._send_request(uri='/test_ignore_user_abort?type=resumable&level=multi_ignore')
self.kphp_server.assert_log(["test_ignore_user_abort/finish_resumable_work_" + "multi_ignore"], timeout=5)
self.kphp_server.assert_log(["test_ignore_user_abort/finish_multi_ignore_" + "resumable"], timeout=5)
self.kphp_server.assert_log(["shutdown_function was called"], timeout=5)
def test_idempotence_ignore_user_abort(self):
self._send_request(uri='/test_ignore_user_abort?type=rpc&level=ignore&port={}'.format(str(self.kphp_server.master_port)))
self.kphp_server.assert_log(["test_ignore_user_abort/finish_rpc_work_" + "ignore"], timeout=5)
self.kphp_server.assert_log(["test_ignore_user_abort/finish_ignore_" + "rpc"], timeout=5)
self.kphp_server.assert_log(["shutdown_function was called"], timeout=5)
time.sleep(2)
self._send_request(uri='/test_ignore_user_abort?type=resumable&level=nested_ignore')
self.kphp_server.assert_log(["test_ignore_user_abort/finish_resumable_work_" + "nested_ignore"], timeout=5)
self.kphp_server.assert_log(["test_ignore_user_abort/finish_nested_ignore_" + "resumable"], timeout=5)
self.kphp_server.assert_log(["shutdown_function was called"], timeout=5) |
7,456 | select | from __future__ import absolute_import
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
from builtins import range
from PyQt5.QtGui import QColor, QPixmap, QIcon, QImage
from PyQt5.QtCore import Qt, pyqtSignal, QModelIndex, QItemSelectionModel
from .listModel import ListModel, ListElement, _NPIXELS
import logging
logger = logging.getLogger(__name__)
class Crop(ListElement):
changed = pyqtSignal()
colorChanged = pyqtSignal(QColor)
pmapColorChanged = pyqtSignal(QColor)
roi_4dChanged = pyqtSignal()
def __init__(self, name, roi_4d, color, parent=None, pmapColor=None):
ListElement.__init__(self, name, parent)
self._brushColor = color
if pmapColor is None:
self._pmapColor = color
else:
self._pmapColor = pmapColor
self._roi_4d = roi_4d
def roi_4d(self):
return self._roi_4d
def setRoi_4d(self, roi_4d):
if self._roi_4d != roi_4d:
logger.debug("Crop '{}' has new RoI {}".format(self._roi_4d, roi_4d))
self._roi_4d = roi_4d
self.roi_4dChanged.emit()
def brushColor(self):
return self._brushColor
def setBrushColor(self, c):
if self._brushColor != c:
logger.debug("Crop '{}' has new brush color {}".format(self._brushColor, c))
self._brushColor = c
self.colorChanged.emit(c)
def pmapColor(self):
return self._pmapColor
def setPmapColor(self, c):
if self._pmapColor != c:
logger.debug("Crop '{}' has new pmapColor {}".format(self._pmapColor, c))
self._pmapColor = c
self.pmapColorChanged.emit(c)
def __repr__(self):
return "<Crop name={}, color={}>".format(self.name, self._brushColor)
class CropListModel(ListModel):
cropSelected = pyqtSignal(int)
icon_cache = {}
class ColumnID(object):
Color = 0
Name = 1
Delete = 2
ncols = 3
def __init__(self, crops=None, parent=None):
ListModel.__init__(self, crops, parent)
self._crops = self._elements
self.elementSelected.connect(self.cropSelected.emit)
def __len__(self):
return len(self._crops)
def __getitem__(self, i):
return self._crops[i]
def rowCount(self, parent=None):
return len(self._elements)
def removeRow(self, position, parent=QModelIndex()):
if position in self.unremovable_rows:
return False
if self.rowCount() <= 1:
return False
self.beginRemoveRows(parent, position, position)
value = self._elements[position]
logger.debug("removing row: " + str(value))
self._elements.remove(value)
self.endRemoveRows()
return True
def data(self, index, role):
if role == Qt.EditRole and index.column() == self.ColumnID.Color:
return (self._elements[index.row()].brushColor(), self._elements[index.row()].pmapColor())
elif role == Qt.ToolTipRole and index.column() == self.ColumnID.Color:
return "Hex code : {}\nDouble click to change".format(self._elements[index.row()].brushColor().name())
elif role == Qt.DecorationRole and index.column() == self.ColumnID.Color:
row = index.row()
return self.createIconForCrop(row)
else:
return ListModel.data(self, index, role)
def createIconForCrop(self, row):
value = self._elements[row]
a = value.brushColor().rgba()
b = value.pmapColor().rgba()
try:
# Return a cached icon if we already generated one.
return CropListModel.icon_cache[(a, b)]
except KeyError:
if a == b:
pixmap = QPixmap(_NPIXELS, _NPIXELS)
pixmap.fill(value.brushColor())
else:
img = QImage(_NPIXELS, _NPIXELS, QImage.Format_RGB32)
for i in range(_NPIXELS):
for j in range(0, _NPIXELS - i):
img.setPixel(i, j, a)
for i in range(_NPIXELS):
for j in range(_NPIXELS - i, _NPIXELS):
img.setPixel(i, j, b)
pixmap = QPixmap.fromImage(img)
icon = QIcon(pixmap)
# Cache this icon so we don't have to make it again
CropListModel.icon_cache[(a, b)] = icon
return icon
def flags(self, index):
if index.column() == self.ColumnID.Color:
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
else:
return ListModel.flags(self, index)
def setData(self, index, value, role=Qt.EditRole):
if role == Qt.EditRole and index.column() == self.ColumnID.Color:
row = index.row()
brushColor = QColor(value[0])
pmapColor = QColor(value[1])
if brushColor.isValid() and pmapColor.isValid():
logger.debug("setData: brushColor = {}, pmapColor = {}".format(brushColor.name(), pmapColor.name()))
logger.debug(" self._elements[row] has type {}" "".format(type(self._elements[row])))
self._elements[row].setBrushColor(brushColor)
self._elements[row].setPmapColor(pmapColor)
logger.debug(" self._elements[row].brushColor = {}".format(self._elements[row].brushColor().name()))
logger.debug(" self._elements[row].pmapColor = {}".format(self._elements[row].pmapColor().name()))
self.dataChanged.emit(index, index)
return True
else:
return ListModel.setData(self, index, value, role)
def METHOD_NAME(self, row):
self._selectionModel.clear()
self._selectionModel.METHOD_NAME(self.index(row, self.ColumnID.Color), QItemSelectionModel.Select)
self._selectionModel.METHOD_NAME(self.index(row, self.ColumnID.Name), QItemSelectionModel.Select) |
7,457 | add shim | # don't import any costly modules
import sys
import os
is_pypy = '__pypy__' in sys.builtin_module_names
def warn_distutils_present():
if 'distutils' not in sys.modules:
return
if is_pypy and sys.version_info < (3, 7):
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
return
import warnings
warnings.warn(
"Distutils was imported before Setuptools, but importing Setuptools "
"also replaces the `distutils` module in `sys.modules`. This may lead "
"to undesirable behaviors or errors. To avoid these issues, avoid "
"using distutils directly, ensure that setuptools is installed in the "
"traditional way (e.g. not an editable install), and/or make sure "
"that setuptools is always imported before distutils."
)
def clear_distutils():
if 'distutils' not in sys.modules:
return
import warnings
warnings.warn("Setuptools is replacing distutils.")
mods = [
name
for name in sys.modules
if name == "distutils" or name.startswith("distutils.")
]
for name in mods:
del sys.modules[name]
def enabled():
"""
Allow selection of distutils by environment variable.
"""
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
return which == 'local'
def ensure_local_distutils():
import importlib
clear_distutils()
# With the DistutilsMetaFinder in place,
# perform an import to cause distutils to be
# loaded from setuptools._distutils. Ref #2906.
with shim():
importlib.import_module('distutils')
# check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
assert 'setuptools._distutils.log' not in sys.modules
def do_override():
"""
Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation.
"""
if enabled():
warn_distutils_present()
ensure_local_distutils()
class _TrivialRe:
def __init__(self, *patterns):
self._patterns = patterns
def match(self, string):
return all(pat in string for pat in self._patterns)
class DistutilsMetaFinder:
def find_spec(self, fullname, path, target=None):
# optimization: only consider top level modules and those
# found in the CPython test suite.
if path is not None and not fullname.startswith('test.'):
return
method_name = 'spec_for_{fullname}'.format(**locals())
method = getattr(self, method_name, lambda: None)
return method()
def spec_for_distutils(self):
if self.is_cpython():
return
import importlib
import importlib.abc
import importlib.util
try:
mod = importlib.import_module('setuptools._distutils')
except Exception:
# There are a couple of cases where setuptools._distutils
# may not be present:
# - An older Setuptools without a local distutils is
# taking precedence. Ref #2957.
# - Path manipulation during sitecustomize removes
# setuptools from the path but only after the hook
# has been loaded. Ref #2980.
# In either case, fall back to stdlib behavior.
return
class DistutilsLoader(importlib.abc.Loader):
def create_module(self, spec):
mod.__name__ = 'distutils'
return mod
def exec_module(self, module):
pass
return importlib.util.spec_from_loader(
'distutils', DistutilsLoader(), origin=mod.__file__
)
@staticmethod
def is_cpython():
"""
Suppress supplying distutils for CPython (build and tests).
Ref #2965 and #3007.
"""
return os.path.isfile('pybuilddir.txt')
def spec_for_pip(self):
"""
Ensure stdlib distutils when running under pip.
See pypa/pip#8761 for rationale.
"""
if sys.version_info >= (3, 12) or self.pip_imported_during_build():
return
clear_distutils()
self.spec_for_distutils = lambda: None
@classmethod
def pip_imported_during_build(cls):
"""
Detect if pip is being imported in a build script. Ref #2355.
"""
import traceback
return any(
cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None)
)
@staticmethod
def frame_file_is_setup(frame):
"""
Return True if the indicated frame suggests a setup.py file.
"""
# some frames may not have __file__ (#2940)
return frame.f_globals.get('__file__', '').endswith('setup.py')
def spec_for_sensitive_tests(self):
"""
Ensure stdlib distutils when running select tests under CPython.
python/cpython#91169
"""
clear_distutils()
self.spec_for_distutils = lambda: None
sensitive_tests = (
[
'test.test_distutils',
'test.test_peg_generator',
'test.test_importlib',
]
if sys.version_info < (3, 10)
else [
'test.test_distutils',
]
)
for name in DistutilsMetaFinder.sensitive_tests:
setattr(
DistutilsMetaFinder,
f'spec_for_{name}',
DistutilsMetaFinder.spec_for_sensitive_tests,
)
DISTUTILS_FINDER = DistutilsMetaFinder()
def METHOD_NAME():
DISTUTILS_FINDER in sys.meta_path or insert_shim()
class shim:
def __enter__(self):
insert_shim()
def __exit__(self, exc, value, tb):
_remove_shim()
def insert_shim():
sys.meta_path.insert(0, DISTUTILS_FINDER)
def _remove_shim():
try:
sys.meta_path.remove(DISTUTILS_FINDER)
except ValueError:
pass
if sys.version_info < (3, 12):
# DistutilsMetaFinder can only be disabled in Python < 3.12 (PEP 632)
remove_shim = _remove_shim |
7,458 | validate basic auth enabled | """ProxyProvider API Views"""
from typing import Any, Optional
from django.utils.translation import gettext_lazy as _
from drf_spectacular.utils import extend_schema_field
from rest_framework.exceptions import ValidationError
from rest_framework.fields import CharField, ListField, ReadOnlyField, SerializerMethodField
from rest_framework.serializers import ModelSerializer
from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet
from authentik.core.api.providers import ProviderSerializer
from authentik.core.api.used_by import UsedByMixin
from authentik.core.api.utils import PassiveSerializer
from authentik.lib.utils.time import timedelta_from_string
from authentik.providers.oauth2.models import ScopeMapping
from authentik.providers.oauth2.views.provider import ProviderInfoView
from authentik.providers.proxy.models import ProxyMode, ProxyProvider
class OpenIDConnectConfigurationSerializer(PassiveSerializer):
"""rest_framework Serializer for OIDC Configuration"""
issuer = CharField()
authorization_endpoint = CharField()
token_endpoint = CharField()
userinfo_endpoint = CharField()
end_session_endpoint = CharField()
introspection_endpoint = CharField()
jwks_uri = CharField()
response_types_supported = ListField(child=CharField())
id_token_signing_alg_values_supported = ListField(child=CharField())
subject_types_supported = ListField(child=CharField())
token_endpoint_auth_methods_supported = ListField(child=CharField())
class ProxyProviderSerializer(ProviderSerializer):
"""ProxyProvider Serializer"""
client_id = CharField(read_only=True)
redirect_uris = CharField(read_only=True)
outpost_set = ListField(child=CharField(), read_only=True, source="outpost_set.all")
def METHOD_NAME(self, value: bool) -> bool:
"""Ensure user and password attributes are set"""
if value:
if (
self.initial_data.get("basic_auth_password_attribute", "") == ""
or self.initial_data.get("basic_auth_user_attribute", "") == ""
):
raise ValidationError(
_("User and password attributes must be set when basic auth is enabled.")
)
return value
def validate(self, attrs) -> dict[Any, str]:
"""Check that internal_host is set when mode is Proxy"""
if (
attrs.get("mode", ProxyMode.PROXY) == ProxyMode.PROXY
and attrs.get("internal_host", "") == ""
):
raise ValidationError(
{"internal_host": _("Internal host cannot be empty when forward auth is disabled.")}
)
return attrs
def create(self, validated_data: dict):
instance: ProxyProvider = super().create(validated_data)
instance.set_oauth_defaults()
instance.save()
return instance
def update(self, instance: ProxyProvider, validated_data: dict):
instance = super().update(instance, validated_data)
instance.set_oauth_defaults()
instance.save()
return instance
class Meta:
model = ProxyProvider
fields = ProviderSerializer.Meta.fields + [
"client_id",
"internal_host",
"external_host",
"internal_host_ssl_validation",
"certificate",
"skip_path_regex",
"basic_auth_enabled",
"basic_auth_password_attribute",
"basic_auth_user_attribute",
"mode",
"intercept_header_auth",
"redirect_uris",
"cookie_domain",
"jwks_sources",
"access_token_validity",
"refresh_token_validity",
"outpost_set",
]
extra_kwargs = ProviderSerializer.Meta.extra_kwargs
class ProxyProviderViewSet(UsedByMixin, ModelViewSet):
"""ProxyProvider Viewset"""
queryset = ProxyProvider.objects.all()
serializer_class = ProxyProviderSerializer
filterset_fields = {
"application": ["isnull"],
"name": ["iexact"],
"authorization_flow__slug": ["iexact"],
"property_mappings": ["iexact"],
"internal_host": ["iexact"],
"external_host": ["iexact"],
"internal_host_ssl_validation": ["iexact"],
"certificate__kp_uuid": ["iexact"],
"certificate__name": ["iexact"],
"skip_path_regex": ["iexact"],
"basic_auth_enabled": ["iexact"],
"basic_auth_password_attribute": ["iexact"],
"basic_auth_user_attribute": ["iexact"],
"mode": ["iexact"],
"redirect_uris": ["iexact"],
"cookie_domain": ["iexact"],
}
search_fields = ["name"]
ordering = ["name"]
class ProxyOutpostConfigSerializer(ModelSerializer):
"""Proxy provider serializer for outposts"""
assigned_application_slug = ReadOnlyField(source="application.slug")
assigned_application_name = ReadOnlyField(source="application.name")
oidc_configuration = SerializerMethodField()
access_token_validity = SerializerMethodField()
scopes_to_request = SerializerMethodField()
@extend_schema_field(OpenIDConnectConfigurationSerializer)
def get_oidc_configuration(self, obj: ProxyProvider):
"""Embed OpenID Connect provider information"""
return ProviderInfoView(request=self.context["request"]._request).get_info(obj)
def get_access_token_validity(self, obj: ProxyProvider) -> Optional[float]:
"""Get token validity as second count"""
return timedelta_from_string(obj.access_token_validity).total_seconds()
def get_scopes_to_request(self, obj: ProxyProvider) -> list[str]:
"""Get all the scope names the outpost should request,
including custom-defined ones"""
scope_names = set(
ScopeMapping.objects.filter(provider__in=[obj]).values_list("scope_name", flat=True)
)
return list(scope_names)
class Meta:
model = ProxyProvider
fields = [
"pk",
"name",
"internal_host",
"external_host",
"internal_host_ssl_validation",
"client_id",
"client_secret",
"oidc_configuration",
"cookie_secret",
"certificate",
"skip_path_regex",
"basic_auth_enabled",
"basic_auth_password_attribute",
"basic_auth_user_attribute",
"mode",
"cookie_domain",
"access_token_validity",
"intercept_header_auth",
"scopes_to_request",
"assigned_application_slug",
"assigned_application_name",
]
class ProxyOutpostConfigViewSet(ReadOnlyModelViewSet):
"""ProxyProvider Viewset"""
queryset = ProxyProvider.objects.filter(application__isnull=False)
serializer_class = ProxyOutpostConfigSerializer
ordering = ["name"]
search_fields = ["name"]
filterset_fields = ["name"] |
7,459 | test classic summary file writer errors out | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Eager Execution: Sanity tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from tensorflow.contrib.eager.python import tfe
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.summary.writer import writer
class TFETest(test_util.TensorFlowTestCase):
def testMatmul(self):
x = [[2.]]
y = math_ops.matmul(x, x) # tf.matmul
self.assertAllEqual([[4.]], y.numpy())
def testInstantError(self):
if test_util.is_gpu_available():
# TODO(nareshmodi): make this test better
self.skipTest("Gather doesn't do index checking on GPUs")
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'indices = 7 is not in \[0, 3\)'):
array_ops.gather([0, 1, 2], 7)
def testGradients(self):
def square(x):
return math_ops.multiply(x, x)
grad = tfe.gradients_function(square)
self.assertEquals([6], [x.numpy() for x in grad(3.)])
def testGradOfGrad(self):
def square(x):
return math_ops.multiply(x, x)
grad = tfe.gradients_function(square)
gradgrad = tfe.gradients_function(lambda x: grad(x)[0])
self.assertEquals([2], [x.numpy() for x in gradgrad(3.)])
def testCustomGrad(self):
@tfe.custom_gradient
def f(x):
y = math_ops.multiply(x, x)
def grad_fn(_):
return [x + y]
return y, grad_fn
grad = tfe.gradients_function(f)
self.assertEquals([12], [x.numpy() for x in grad(3.)])
@test_util.run_gpu_only
def testGPU(self):
# tf.Tensor.as_gpu_device() moves a tensor to GPU.
x = constant_op.constant([[1., 2.], [3., 4.]]).gpu()
# Alternatively, tf.device() as a context manager places tensors and
# operations.
with ops.device('gpu:0'):
x += 1.
# Without a device context, heuristics are used to place ops.
# In this case, ops.reduce_mean runs on the GPU.
axis = range(x.shape.ndims)
m = math_ops.reduce_mean(x, axis)
# m is on GPU, bring it back to CPU and compare.
self.assertEqual(3.5, m.cpu().numpy())
def testListDevices(self):
# Expect at least one device.
self.assertTrue(tfe.list_devices())
def testAddCheckNumericsOpsRaisesError(self):
with self.assertRaisesRegexp(
RuntimeError,
r'add_check_numerics_ops\(\) is not compatible with eager execution'):
numerics.add_check_numerics_ops()
def testClassicSummaryOpsErrorOut(self):
x = constant_op.constant(42)
x_summary = summary.scalar('x', x)
y = constant_op.constant([1, 3, 3, 7])
y_summary = summary.histogram('hist', y)
with self.assertRaisesRegexp(
RuntimeError,
r'Merging tf\.summary\.\* ops is not compatible with eager execution'):
summary.merge([x_summary, y_summary])
with self.assertRaisesRegexp(
RuntimeError,
r'Merging tf\.summary\.\* ops is not compatible with eager execution'):
summary.merge_all()
def METHOD_NAME(self):
with self.assertRaisesRegexp(
RuntimeError,
r'tf\.summary\.FileWriter is not compatible with eager execution'):
writer.FileWriter(tempfile.mkdtemp())
if __name__ == '__main__':
tfe.enable_eager_execution()
test.main() |
7,460 | text | # Copyright 2023 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write Summaries from JAX for use with Tensorboard."""
import contextlib
import functools
import os
# pylint: disable=g-import-not-at-top
from flax import io
import numpy as np
import tensorflow as tf # pytype: disable=import-error
from tensorboard.plugins.hparams import api as hparams_api
def _flatten_dict(input_dict, parent_key='', sep='.'):
"""Flattens and simplifies dict such that it can be used by hparams.
Args:
input_dict: Input dict, e.g., from ConfigDict.
parent_key: String used in recursion.
sep: String used to separate parent and child keys.
Returns:
Flattened dict.
"""
items = []
for k, v in input_dict.items():
new_key = parent_key + sep + k if parent_key else k
# Valid types according to https://github.com/tensorflow/tensorboard/blob/1204566da5437af55109f7a4af18f9f8b7c4f864/tensorboard/plugins/hparams/summary_v2.py
valid_types = (
bool,
int,
float,
str,
np.bool_,
np.integer,
np.floating,
np.character,
)
if isinstance(v, dict):
# Recursively flatten the dict.
items.extend(_flatten_dict(v, new_key, sep=sep).items())
continue
elif not isinstance(v, valid_types):
# Cast any incompatible values as strings such that they can be handled by hparams
v = str(v)
items.append((new_key, v))
return dict(items)
@contextlib.contextmanager
def _as_default(summary_writer: tf.summary.SummaryWriter, auto_flush: bool):
"""No-flush variation of summary_writer.as_default()."""
context_manager = summary_writer.as_default()
try:
context_manager.__enter__()
yield summary_writer
finally:
old_flush = summary_writer.flush
new_flush = old_flush if auto_flush else lambda: None
summary_writer.flush = new_flush
context_manager.__exit__()
summary_writer.flush = old_flush
class SummaryWriter:
"""Saves data in event and summary protos for tensorboard."""
def __init__(self, log_dir, auto_flush=True):
"""Create a new SummaryWriter.
Args:
log_dir: path to record tfevents files in.
auto_flush: if true, flush after every reported metric.
"""
log_dir = os.fspath(log_dir)
# If needed, create log_dir directory as well as missing parent directories.
if not io.isdir(log_dir):
io.makedirs(log_dir)
self._event_writer = tf.summary.create_file_writer(log_dir)
self._as_default = functools.partial(_as_default, auto_flush=auto_flush)
self._closed = False
def close(self):
"""Close SummaryWriter. Final!"""
if not self._closed:
self._event_writer.close()
self._closed = True
del self._event_writer
def flush(self):
self._event_writer.flush()
def scalar(self, tag, value, step):
"""Saves scalar value.
Args:
tag: str: label for this data
value: int/float: number to log
step: int: training step
"""
value = float(np.array(value))
with self._as_default(self._event_writer):
tf.summary.scalar(name=tag, data=value, step=step)
def image(self, tag, image, step, max_outputs=3):
"""Saves RGB image summary from np.ndarray [H,W], [H,W,1], or [H,W,3].
Args:
tag: str: label for this data
image: ndarray: [H,W], [H,W,1], [H,W,3], [K,H,W], [K,H,W,1], [K,H,W,3]
Save image in greyscale or colors.
Pixel values could be either uint8 or float.
Floating point values should be in range [0, 1).
step: int: training step
max_outputs: At most this many images will be emitted at each step.
Defaults to 3.
"""
image = np.array(image)
# tf.summary.image expects image to have shape [k, h, w, c] where,
# k = number of samples, h = height, w = width, c = number of channels.
if len(np.shape(image)) == 2:
image = image[np.newaxis, :, :, np.newaxis]
elif len(np.shape(image)) == 3:
# this could be either [k, h, w] or [h, w, c]
if np.shape(image)[-1] in (1, 3):
image = image[np.newaxis, :, :, :]
else:
image = image[:, :, :, np.newaxis]
if np.shape(image)[-1] == 1:
image = np.repeat(image, 3, axis=-1)
# Convert to tensor value as tf.summary.image expects data to be a tensor.
image = tf.convert_to_tensor(image)
with self._as_default(self._event_writer):
tf.summary.image(name=tag, data=image, step=step, max_outputs=max_outputs)
def audio(self, tag, audiodata, step, sample_rate=44100, max_outputs=3):
"""Saves audio as wave.
NB: single channel only right now.
Args:
tag: str: label for this data
audiodata: ndarray [Nsamples, Nframes, Nchannels]: audio data to
be saved as wave. The data will be clipped to [-1.0, 1.0].
step: int: training step
sample_rate: sample rate of passed in audio buffer
max_outputs: At most this many audio clips will be emitted at each
step. Defaults to 3.
"""
# tf.summary.audio expects the audio data to have floating values in
# [-1.0, 1.0].
audiodata = np.clip(np.array(audiodata), -1, 1)
# Convert to tensor value as tf.summary.audio expects data to be a tensor.
audio = tf.convert_to_tensor(audiodata, dtype=tf.float32)
with self._as_default(self._event_writer):
tf.summary.audio(
name=tag,
data=audio,
sample_rate=sample_rate,
step=step,
max_outputs=max_outputs,
encoding='wav',
)
def histogram(self, tag, values, step, bins=None):
"""Saves histogram of values.
Args:
tag: str: label for this data
values: ndarray: will be flattened by this routine
step: int: training step
bins: number of bins in histogram
"""
values = np.array(values)
values = np.reshape(values, -1)
with self._as_default(self._event_writer):
tf.summary.histogram(name=tag, data=values, step=step, buckets=bins)
def METHOD_NAME(self, tag, textdata, step):
"""Saves a text summary.
Args:
tag: str: label for this data
textdata: string
step: int: training step
Note: markdown formatting is rendered by tensorboard.
"""
if not isinstance(textdata, (str, bytes)):
raise ValueError('`textdata` should be of the type `str` or `bytes`.')
with self._as_default(self._event_writer):
tf.summary.METHOD_NAME(name=tag, data=tf.constant(textdata), step=step)
def write(self, tag, tensor, step, metadata=None):
"""Saves an arbitrary tensor summary.
Useful when working with custom plugins or constructing a summary directly.
Args:
tag: str: label for this data
tensor: ndarray: tensor data to save.
step: int: training step
metadata: Optional SummaryMetadata, as a proto or serialized bytes.
Note: markdown formatting is rendered by tensorboard.
"""
with self._as_default(self._event_writer):
tf.summary.write(tag=tag, tensor=tensor, step=step, metadata=metadata)
def hparams(self, hparams):
"""Saves hyper parameters.
Args:
hparams: Flat mapping from hyper parameter name to value.
"""
with self._as_default(self._event_writer):
hparams_api.hparams(hparams=_flatten_dict(hparams)) |
7,461 | test cstrings | import unittest, sys
from ctypes.test import need_symbol
import test.support
class SimpleTypesTestCase(unittest.TestCase):
def setUp(self):
import ctypes
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
self.prev_conv_mode = set_conversion_mode("ascii", "strict")
def tearDown(self):
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
set_conversion_mode(*self.prev_conv_mode)
def test_subclasses(self):
from ctypes import c_void_p, c_char_p
# ctypes 0.9.5 and before did overwrite from_param in SimpleType_new
class CVOIDP(c_void_p):
def from_param(cls, value):
return value * 2
from_param = classmethod(from_param)
class CCHARP(c_char_p):
def from_param(cls, value):
return value * 4
from_param = classmethod(from_param)
self.assertEqual(CVOIDP.from_param("abc"), "abcabc")
self.assertEqual(CCHARP.from_param("abc"), "abcabcabcabc")
@need_symbol('c_wchar_p')
def test_subclasses_c_wchar_p(self):
from ctypes import c_wchar_p
class CWCHARP(c_wchar_p):
def from_param(cls, value):
return value * 3
from_param = classmethod(from_param)
self.assertEqual(CWCHARP.from_param("abc"), "abcabcabc")
# XXX Replace by c_char_p tests
def METHOD_NAME(self):
from ctypes import c_char_p, byref
# c_char_p.from_param on a Python String packs the string
# into a cparam object
s = "123"
self.assertIs(c_char_p.from_param(s)._obj, s)
# new in 0.9.1: convert (encode) unicode to ascii
self.assertEqual(c_char_p.from_param(u"123")._obj, "123")
self.assertRaises(UnicodeEncodeError, c_char_p.from_param, u"123\377")
self.assertRaises(TypeError, c_char_p.from_param, 42)
# calling c_char_p.from_param with a c_char_p instance
# returns the argument itself:
a = c_char_p("123")
self.assertIs(c_char_p.from_param(a), a)
@need_symbol('c_wchar_p')
def test_cw_strings(self):
from ctypes import byref, c_wchar_p
s = u"123"
if sys.platform == "win32":
self.assertTrue(c_wchar_p.from_param(s)._obj is s)
self.assertRaises(TypeError, c_wchar_p.from_param, 42)
# new in 0.9.1: convert (decode) ascii to unicode
self.assertEqual(c_wchar_p.from_param("123")._obj, u"123")
self.assertRaises(UnicodeDecodeError, c_wchar_p.from_param, "123\377")
pa = c_wchar_p.from_param(c_wchar_p(u"123"))
self.assertEqual(type(pa), c_wchar_p)
def test_int_pointers(self):
from ctypes import c_short, c_uint, c_int, c_long, POINTER, pointer
LPINT = POINTER(c_int)
## p = pointer(c_int(42))
## x = LPINT.from_param(p)
x = LPINT.from_param(pointer(c_int(42)))
self.assertEqual(x.contents.value, 42)
self.assertEqual(LPINT(c_int(42)).contents.value, 42)
self.assertEqual(LPINT.from_param(None), None)
if c_int != c_long:
self.assertRaises(TypeError, LPINT.from_param, pointer(c_long(42)))
self.assertRaises(TypeError, LPINT.from_param, pointer(c_uint(42)))
self.assertRaises(TypeError, LPINT.from_param, pointer(c_short(42)))
def test_byref_pointer(self):
# The from_param class method of POINTER(typ) classes accepts what is
# returned by byref(obj), it type(obj) == typ
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
LPINT = POINTER(c_int)
LPINT.from_param(byref(c_int(42)))
self.assertRaises(TypeError, LPINT.from_param, byref(c_short(22)))
if c_int != c_long:
self.assertRaises(TypeError, LPINT.from_param, byref(c_long(22)))
self.assertRaises(TypeError, LPINT.from_param, byref(c_uint(22)))
def test_byref_pointerpointer(self):
# See above
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
LPLPINT = POINTER(POINTER(c_int))
LPLPINT.from_param(byref(pointer(c_int(42))))
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_short(22))))
if c_int != c_long:
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_long(22))))
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_uint(22))))
def test_array_pointers(self):
from ctypes import c_short, c_uint, c_int, c_long, POINTER
INTARRAY = c_int * 3
ia = INTARRAY()
self.assertEqual(len(ia), 3)
self.assertEqual([ia[i] for i in range(3)], [0, 0, 0])
# Pointers are only compatible with arrays containing items of
# the same type!
LPINT = POINTER(c_int)
LPINT.from_param((c_int*3)())
self.assertRaises(TypeError, LPINT.from_param, c_short*3)
self.assertRaises(TypeError, LPINT.from_param, c_long*3)
self.assertRaises(TypeError, LPINT.from_param, c_uint*3)
def test_noctypes_argtype(self):
import _ctypes_test
from ctypes import CDLL, c_void_p, ArgumentError
func = CDLL(_ctypes_test.__file__)._testfunc_p_p
func.restype = c_void_p
# TypeError: has no from_param method
self.assertRaises(TypeError, setattr, func, "argtypes", (object,))
class Adapter(object):
def from_param(cls, obj):
return None
func.argtypes = (Adapter(),)
self.assertEqual(func(None), None)
self.assertEqual(func(object()), None)
class Adapter(object):
def from_param(cls, obj):
return obj
func.argtypes = (Adapter(),)
# don't know how to convert parameter 1
self.assertRaises(ArgumentError, func, object())
self.assertEqual(func(c_void_p(42)), 42)
class Adapter(object):
def from_param(cls, obj):
raise ValueError(obj)
func.argtypes = (Adapter(),)
# ArgumentError: argument 1: ValueError: 99
self.assertRaises(ArgumentError, func, 99)
def test_abstract(self):
from ctypes import (Array, Structure, Union, _Pointer,
_SimpleCData, _CFuncPtr)
self.assertRaises(TypeError, Array.from_param, 42)
self.assertRaises(TypeError, Structure.from_param, 42)
self.assertRaises(TypeError, Union.from_param, 42)
self.assertRaises(TypeError, _CFuncPtr.from_param, 42)
self.assertRaises(TypeError, _Pointer.from_param, 42)
self.assertRaises(TypeError, _SimpleCData.from_param, 42)
@test.support.cpython_only
def test_issue31311(self):
# __setstate__ should neither raise a SystemError nor crash in case
# of a bad __dict__.
from ctypes import Structure
class BadStruct(Structure):
@property
def __dict__(self):
pass
with self.assertRaises(TypeError):
BadStruct().__setstate__({}, b'foo')
class WorseStruct(Structure):
@property
def __dict__(self):
1/0.0
with self.assertRaises(ZeroDivisionError):
WorseStruct().__setstate__({}, b'foo')
################################################################
if __name__ == '__main__':
unittest.main() |
7,462 | apply | """
Base class for the internal managers. Both BlockManager and ArrayManager
inherit from this class.
"""
from __future__ import annotations
from typing import (
Literal,
TypeVar,
final,
)
import numpy as np
from pandas._typing import (
ArrayLike,
DtypeObj,
Shape,
)
from pandas.errors import AbstractMethodError
from pandas.core.dtypes.cast import (
find_common_type,
np_can_hold_element,
)
from pandas.core.base import PandasObject
from pandas.core.indexes.api import (
Index,
default_index,
)
T = TypeVar("T", bound="DataManager")
class DataManager(PandasObject):
# TODO share more methods/attributes
axes: list[Index]
@property
def items(self) -> Index:
raise AbstractMethodError(self)
@final
def __len__(self) -> int:
return len(self.items)
@property
def ndim(self) -> int:
return len(self.axes)
@property
def shape(self) -> Shape:
return tuple(len(ax) for ax in self.axes)
@final
def _validate_set_axis(self, axis: int, new_labels: Index) -> None:
# Caller is responsible for ensuring we have an Index object.
old_len = len(self.axes[axis])
new_len = len(new_labels)
if axis == 1 and len(self.items) == 0:
# If we are setting the index on a DataFrame with no columns,
# it is OK to change the length.
pass
elif new_len != old_len:
raise ValueError(
f"Length mismatch: Expected axis has {old_len} elements, new "
f"values have {new_len} elements"
)
def reindex_indexer(
self: T,
new_axis,
indexer,
axis: int,
fill_value=None,
allow_dups: bool = False,
copy: bool = True,
only_slice: bool = False,
) -> T:
raise AbstractMethodError(self)
@final
def reindex_axis(
self: T,
new_index: Index,
axis: int,
fill_value=None,
only_slice: bool = False,
) -> T:
"""
Conform data manager to new index.
"""
new_index, indexer = self.axes[axis].reindex(new_index)
return self.reindex_indexer(
new_index,
indexer,
axis=axis,
fill_value=fill_value,
copy=False,
only_slice=only_slice,
)
def _equal_values(self: T, other: T) -> bool:
"""
To be implemented by the subclasses. Only check the column values
assuming shape and indexes have already been checked.
"""
raise AbstractMethodError(self)
@final
def equals(self, other: object) -> bool:
"""
Implementation for DataFrame.equals
"""
if not isinstance(other, DataManager):
return False
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
return self._equal_values(other)
def METHOD_NAME(
self: T,
f,
align_keys: list[str] | None = None,
ignore_failures: bool = False,
**kwargs,
) -> T:
raise AbstractMethodError(self)
@final
def isna(self: T, func) -> T:
return self.METHOD_NAME("apply", func=func)
# --------------------------------------------------------------------
# Consolidation: No-ops for all but BlockManager
def is_consolidated(self) -> bool:
return True
def consolidate(self: T) -> T:
return self
def _consolidate_inplace(self) -> None:
return
class SingleDataManager(DataManager):
@property
def ndim(self) -> Literal[1]:
return 1
@final
@property
def array(self) -> ArrayLike:
"""
Quick access to the backing array of the Block or SingleArrayManager.
"""
# error: "SingleDataManager" has no attribute "arrays"; maybe "array"
return self.arrays[0] # type: ignore[attr-defined]
def setitem_inplace(self, indexer, value) -> None:
"""
Set values with indexer.
For Single[Block/Array]Manager, this backs s[indexer] = value
This is an inplace version of `setitem()`, mutating the manager/values
in place, not returning a new Manager (and Block), and thus never changing
the dtype.
"""
arr = self.array
# EAs will do this validation in their own __setitem__ methods.
if isinstance(arr, np.ndarray):
# Note: checking for ndarray instead of np.dtype means we exclude
# dt64/td64, which do their own validation.
value = np_can_hold_element(arr.dtype, value)
arr[indexer] = value
def grouped_reduce(self, func, ignore_failures: bool = False):
"""
ignore_failures : bool, default False
Not used; for compatibility with ArrayManager/BlockManager.
"""
arr = self.array
res = func(arr)
index = default_index(len(res))
mgr = type(self).from_array(res, index)
return mgr
@classmethod
def from_array(cls, arr: ArrayLike, index: Index):
raise AbstractMethodError(cls)
def interleaved_dtype(dtypes: list[DtypeObj]) -> DtypeObj | None:
"""
Find the common dtype for `blocks`.
Parameters
----------
blocks : List[DtypeObj]
Returns
-------
dtype : np.dtype, ExtensionDtype, or None
None is returned when `blocks` is empty.
"""
if not len(dtypes):
return None
return find_common_type(dtypes) |
7,463 | test get datadir and port empty string | #!/usr/bin/env python3
import os, sys
import unittest
from gppylib import gplog
from gppylib.commands.base import CommandResult
from gpsegstop import SegStop, SegStopStatus
from mock import patch
logger = gplog.get_unittest_logger()
class SegStopTestCase(unittest.TestCase):
def setUp(self):
self.segstop = SegStop(name='Segment Stop',
db=None,
mode=None,
timeout=None)
def test_get_datadir_and_port(self):
self.segstop.db = '/tmp/gpseg0:1234'
self.assertEqual(['/tmp/gpseg0', '1234'], self.segstop.get_datadir_and_port())
def test_get_datadir_and_port_empty_port(self):
self.segstop.db = '/tmp/gpseg0'
self.assertEqual(['/tmp/gpseg0'], self.segstop.get_datadir_and_port())
def test_get_datadir_and_port_empty_datadir(self):
self.segstop.db = '1234'
self.assertEqual(['1234'], self.segstop.get_datadir_and_port())
def METHOD_NAME(self):
self.segstop.db = ''
self.assertEqual([''], self.segstop.get_datadir_and_port())
@patch('gppylib.commands.base.Command.run')
@patch('gppylib.commands.base.Command.get_results', return_value=CommandResult(0, b'', b'',True, False))
@patch('gppylib.commands.gp.SegmentIsShutDown.is_shutdown', return_value=True)
@patch('gpsegstop.unix.kill_9_segment_processes')
@patch('gpsegstop.pg.ReadPostmasterTempFile.getResults', return_value=(True, 1234, '/tmp/gpseg0'))
@patch('gpsegstop.unix.check_pid', return_value=False)
def test_run(self, mock1, mock2, mock3, mock4, mock5, mock6):
self.segstop.db = '/tmp/gpseg0:1234'
self.segstop.mode = 'smart'
self.segstop.timeout = '10'
expected = SegStopStatus('/tmp/gpseg0', True, 'Shutdown Succeeded')
result = self.segstop.run()
self.assertEqual(str(expected), str(result))
@patch('gppylib.commands.base.Command.run')
@patch('gppylib.commands.base.Command.get_results', return_value=CommandResult(1, b'', b'',True, False))
@patch('gpsegstop.pg.ReadPostmasterTempFile.getResults', return_value=(True, 1234, '/tmp/gpseg0'))
@patch('gpsegstop.unix.kill_sequence')
@patch('gpsegstop.unix.kill_9_segment_processes')
@patch('gpsegstop.unix.check_pid', return_value=False)
def test_run_with_error(self, mock1, mock2, mock3, mock4, mock5, mock6):
self.segstop.db = '/tmp/gpseg0:1234'
self.segstop.mode = 'smart'
self.segstop.timeout = '10'
expected = SegStopStatus('/tmp/gpseg0', True, 'Forceful termination success')
result = self.segstop.run()
self.assertIn(str(expected), str(result))
@patch('gppylib.commands.base.Command.run')
@patch('gppylib.commands.base.Command.get_results', return_value=CommandResult(0, b'', b'',True, False))
@patch('gppylib.commands.gp.SegmentIsShutDown.is_shutdown', return_value=False)
@patch('gpsegstop.pg.ReadPostmasterTempFile.getResults', return_value=(True, 1234, '/tmp/gpseg0'))
@patch('gpsegstop.unix.kill_sequence')
@patch('gpsegstop.unix.kill_9_segment_processes')
@patch('gpsegstop.unix.check_pid', return_value=False)
def test_run_with_pg_controldata_error(self, mock1, mock2, mock3, mock4, mock5, mock6, mock7):
self.segstop.db = '/tmp/gpseg0:1234'
self.segstop.mode = 'smart'
self.segstop.timeout = '10'
expected = SegStopStatus('/tmp/gpseg0', True, 'Forceful termination success')
result = self.segstop.run()
self.assertIn(str(expected), str(result))
@patch('gppylib.commands.base.Command.run')
@patch('gppylib.commands.base.Command.get_results', return_value=CommandResult(0, b'', b'',True, False))
@patch('gppylib.commands.gp.SegmentIsShutDown.is_shutdown', return_value=False)
@patch('gpsegstop.pg.ReadPostmasterTempFile.getResults', return_value=(True, 1234, '/tmp/gpseg0'))
@patch('gpsegstop.unix.kill_9_segment_processes')
@patch('gpsegstop.unix.check_pid', return_value=False)
def test_run_with_pg_controldata_error_in_immediate_mode(self, mock1, mock2, mock3, mock4, mock5, mock6):
self.segstop.db = '/tmp/gpseg0:1234'
self.segstop.mode = 'immediate'
self.segstop.timeout = '10'
expected = SegStopStatus('/tmp/gpseg0', True, 'Shutdown Immediate')
result = self.segstop.run()
self.assertEqual(str(expected), str(result))
@patch('gppylib.commands.base.Command.run')
@patch('gppylib.commands.base.Command.get_results', return_value=CommandResult(0, b'', b'',True, False))
@patch('gppylib.commands.gp.SegmentIsShutDown.is_shutdown', return_value=False)
@patch('gpsegstop.pg.ReadPostmasterTempFile.getResults', return_value=(True, 1234, '/tmp/gpseg0'))
@patch('gpsegstop.unix.kill_sequence')
@patch('gpsegstop.unix.check_pid', return_value=False)
@patch('gpsegstop.unix.kill_9_segment_processes')
def test_run_with_kill_error(self, mock1, mock2, mock3, mock4, mock5, mock6, mock7):
self.segstop.db = '/tmp/gpseg0:1234'
self.segstop.mode = 'immediate'
self.segstop.timeout = '10'
expected = SegStopStatus('/tmp/gpseg0', True, 'Shutdown Immediate')
result = self.segstop.run()
self.assertEqual(str(expected), str(result))
#------------------------------- Mainline --------------------------------
if __name__ == '__main__':
unittest.main() |
7,464 | strip dev suffix | """
*****************
Version Functions
*****************
Check for latest version and recommend upgrade::
synapseclient.check_for_updates()
Print release notes for installed version of client::
synapseclient.release_notes()
.. automethod:: synapseclient.core.version_check.check_for_updates
.. automethod:: synapseclient.core.version_check.release_notes
"""
import importlib.resources
import re
import sys
import json
import requests
import synapseclient
_VERSION_URL = "https://raw.githubusercontent.com/Sage-Bionetworks/synapsePythonClient/master/synapseclient/synapsePythonClient" # noqa
def version_check(
current_version=None, version_url=_VERSION_URL, check_for_point_releases=False
):
"""
Gets the latest version information from version_url and check against the current version.
Recommends upgrade, if a newer version exists.
:returns: True if current version is the latest release (or higher) version,
False otherwise.
"""
try:
if not current_version:
current_version = synapseclient.__version__
version_info = _get_version_info(version_url)
current_base_version = METHOD_NAME(current_version)
# Check blacklist
if (
current_base_version in version_info["blacklist"]
or current_version in version_info["blacklist"]
):
msg = (
"\nPLEASE UPGRADE YOUR CLIENT\n\nUpgrading your SynapseClient is required. "
"Please upgrade your client by typing:\n"
" pip install --upgrade synapseclient\n\n"
)
raise SystemExit(msg)
if "message" in version_info:
sys.stderr.write(version_info["message"] + "\n")
levels = 3 if check_for_point_releases else 2
# Compare with latest version
if _version_tuple(current_version, levels=levels) < _version_tuple(
version_info["latestVersion"], levels=levels
):
sys.stderr.write(
"\nUPGRADE AVAILABLE\n\nA more recent version of the Synapse Client (%s) "
"is available. Your version (%s) can be upgraded by typing:\n"
" pip install --upgrade synapseclient\n\n"
% (
version_info["latestVersion"],
current_version,
)
)
if "releaseNotes" in version_info:
sys.stderr.write(
"Python Synapse Client version %s release notes\n\n"
% version_info["latestVersion"]
)
sys.stderr.write(version_info["releaseNotes"] + "\n\n")
return False
except Exception as e:
# Don't prevent the client from running if something goes wrong
sys.stderr.write("Exception in version check: %s\n" % (str(e),))
return False
return True
def check_for_updates():
"""
Check for the existence of newer versions of the client, reporting both current release version and development
version.
For help installing development versions of the client, see the docs for
:py:mod:`synapseclient` or the `README.md <https://github.com/Sage-Bionetworks/synapsePythonClient>`_.
"""
sys.stderr.write("Python Synapse Client\n")
sys.stderr.write("currently running version: %s\n" % synapseclient.__version__)
release_version_info = _get_version_info(_VERSION_URL)
sys.stderr.write(
"latest release version: %s\n" % release_version_info["latestVersion"]
)
if _version_tuple(synapseclient.__version__, levels=3) < _version_tuple(
release_version_info["latestVersion"], levels=3
):
print(
(
"\nUPGRADE AVAILABLE\n\nA more recent version of the Synapse Client (%s) is available. "
"Your version (%s) can be upgraded by typing:\n"
" pip install --upgrade synapseclient\n\n"
)
% (
release_version_info["latestVersion"],
synapseclient.__version__,
)
)
else:
sys.stderr.write("\nYour Synapse client is up to date!\n")
def release_notes(version_url=None):
"""
Print release notes for the installed version of the client or latest release or development version if version_url
is supplied.
:param version_url: Defaults to None, meaning release notes for the installed version. Alternatives are:
- synapseclient.version_check._VERSION_URL
- synapseclient.version_check._DEV_VERSION_URL
"""
version_info = _get_version_info(version_url)
sys.stderr.write(
"Python Synapse Client version %s release notes\n\n"
% version_info["latestVersion"]
)
if "releaseNotes" in version_info:
sys.stderr.write(version_info["releaseNotes"] + "\n")
def METHOD_NAME(version):
return re.sub(r"\.dev\d+", "", version)
def _version_tuple(version, levels=2):
"""
Take a version number as a string delimited by periods and return a tuple with the desired number of levels.
For example::
print(version_tuple('0.5.1.dev1', levels=2))
('0', '5')
"""
v = METHOD_NAME(version).split(".")
v = v[0 : min(len(v), levels)]
if len(v) < levels:
v = v + ["0"] * (levels - len(v))
return tuple(v)
def _get_version_info(version_url=_VERSION_URL):
if version_url is None:
# ref = importlib_resources.files("synapseclient").joinpath("synapsePythonClient")
# with ref.open("r") as fp:
# pkg_metadata = json.loads(fp.read())
# TODO: switch to the above after python 3.8 is deprecated
with importlib.resources.path(__name__, "synapsePythonClient") as ref:
pkg_metadata = json.load(ref)
return pkg_metadata
else:
headers = {"Accept": "application/json; charset=UTF-8"}
headers.update(synapseclient.USER_AGENT)
return requests.get(version_url, headers=headers).json()
# If this file is run as a script, print current version
# then perform version check
if __name__ == "__main__":
print("Version check")
print("=============")
print("Python Synapse Client version %s" % synapseclient.__version__)
print("Check against production version:")
if version_check():
print("ok")
print("Check against local copy of version file:")
if version_check(version_url=None):
print("ok") |
7,465 | test empty get | import json
import falcon
from mock import MagicMock, Mock, patch, call
from tests import RestTestBase, TimeMock
from monitorrent.rest.execute_logs_details import ExecuteLogsDetails, ExecuteLogManager
class ExecuteLogDetailsTest(RestTestBase):
def test_get_all(self):
entries = [{}, {}, {}]
log_manager = MagicMock()
log_manager.get_execute_log_details = MagicMock(return_value=entries)
log_manager.is_running = MagicMock(return_value=False)
# noinspection PyTypeChecker
execute_logs = ExecuteLogsDetails(log_manager)
self.api.add_route('/api/execute/logs/{execute_id}/details', execute_logs)
body = self.simulate_request('/api/execute/logs/1/details', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
self.assertEqual(result, {'is_running': False, 'logs': entries})
def test_bad_request(self):
log_manager = MagicMock()
# noinspection PyTypeChecker
execute_logs = ExecuteLogsDetails(log_manager)
self.api.add_route('/api/execute/logs/{execute_id}/details', execute_logs)
self.simulate_request('/api/execute/logs/abcd/details')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST)
def METHOD_NAME(self):
log_manager = ExecuteLogManager()
log_manager.get_execute_log_details = Mock(return_value=[])
log_manager.is_running = Mock(return_value=False)
time = TimeMock()
with patch("monitorrent.rest.execute.time", time):
execute_log_details = ExecuteLogsDetails(log_manager)
self.api.add_route('/api/execute/logs/{execute_id}/details', execute_log_details)
body = self.simulate_request('/api/execute/logs/1/details', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
result = json.loads(body)
self.assertEqual(result, {'is_running': False, 'logs': []})
def test_no_wait_get(self):
log_manager = ExecuteLogManager()
log_manager.get_execute_log_details = Mock(return_value=[{}])
log_manager.is_running = Mock(return_value=True)
time = TimeMock()
with patch("monitorrent.rest.execute_logs_details.time", time):
execute_log_details = ExecuteLogsDetails(log_manager)
self.api.add_route('/api/execute/logs/{execute_id}/details', execute_log_details)
body = self.simulate_request('/api/execute/logs/1/details', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
result = json.loads(body)
self.assertEqual(result, {'is_running': True, 'logs': [{}]})
def test_no_wait_after_get(self):
log_manager = ExecuteLogManager()
get_execute_log_details_mock = Mock(return_value=[{}])
log_manager.get_execute_log_details = get_execute_log_details_mock
log_manager.is_running = Mock(return_value=True)
time = TimeMock()
with patch("monitorrent.rest.execute_logs_details.time", time):
execute_log_details = ExecuteLogsDetails(log_manager)
self.api.add_route('/api/execute/logs/{execute_id}/details', execute_log_details)
body = self.simulate_request('/api/execute/logs/1/details', query_string="after=17", decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
result = json.loads(body)
self.assertEqual(result, {'is_running': True, 'logs': [{}]})
get_execute_log_details_mock.assert_has_calls([call(1, 17)])
def test_half_wait_get(self):
result = {'r': None}
def set_result():
result['r'] = [{}]
log_manager = ExecuteLogManager()
log_manager.get_execute_log_details = Mock(side_effect=lambda *a, **ka: result['r'])
log_manager.is_running = Mock(return_value=True)
time = TimeMock()
time.call_on(115, set_result)
with patch("monitorrent.rest.execute_logs_details.time", time):
execute_log_details = ExecuteLogsDetails(log_manager)
self.api.add_route('/api/execute/logs/{execute_id}/details', execute_log_details)
body = self.simulate_request('/api/execute/logs/1/details', query_string="after=17", decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
result = json.loads(body)
self.assertEqual(result, {'is_running': True, 'logs': [{}]}) |
7,466 | test tap element nis | """
Copyright(C) 2023 Altom Consulting
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import pytest
from alttester import By
from .utils import Scenes
class TestScene07A:
@pytest.fixture(autouse=True)
def setup(self, altdriver):
self.altdriver = altdriver
self.altdriver.reset_input()
self.altdriver.load_scene(Scenes.Scene07A)
def METHOD_NAME(self):
capsule = self.altdriver.find_object(By.NAME, "Capsule")
capsule.tap()
component_name = "AltExampleNewInputSystem"
property_name = "jumpCounter"
property_value = capsule.get_component_property(
component_name, property_name, "Assembly-CSharp",
max_depth=1
)
assert property_value == 1
def test_tap_coordinates_NIS(self):
capsule = self.altdriver.find_object(By.NAME, "Capsule")
self.altdriver.tap(capsule.get_screen_position())
action_info = self.altdriver.wait_for_object(
By.PATH, "//ActionText[@text=Capsule was tapped!]", timeout=1)
assert action_info.get_text() == "Capsule was tapped!"
def test_click_element_NIS(self):
capsule = self.altdriver.find_object(By.NAME, "Capsule")
capsule.click()
component_name = "AltExampleNewInputSystem"
property_name = "jumpCounter"
property_value = capsule.get_component_property(
component_name, property_name, "Assembly-CSharp",
max_depth=1
)
assert property_value == 1
def test_click_coordinates_NIS(self):
capsule = self.altdriver.find_object(By.NAME, "Capsule")
self.altdriver.click(capsule.get_screen_position())
action_info = self.altdriver.wait_for_object(
By.PATH, "//ActionText[@text=Capsule was clicked!]",
timeout=1
)
assert action_info.get_text() == "Capsule was clicked!"
def test_tilt(self):
cube = self.altdriver.find_object(By.NAME, "Cube (1)")
initial_position = cube.get_world_position()
self.altdriver.tilt([1000, 10, 10], duration=1)
assert initial_position != self.altdriver.find_object(By.NAME, "Cube (1)").get_world_position()
is_moved = cube.get_component_property("AltCubeNIS", "isMoved", "Assembly-CSharp")
assert is_moved
class TestScene07B:
@pytest.fixture(autouse=True)
def setup(self, altdriver):
self.altdriver = altdriver
self.altdriver.load_scene(Scenes.Scene07B)
def get_sprite_name(self, source_image_name, image_source_drop_zone_name):
image_source = self.altdriver.find_object(By.NAME, source_image_name).get_component_property(
"UnityEngine.UI.Image", "sprite.name", assembly="UnityEngine.UI")
image_source_drop_zone = self.altdriver.find_object(
By.NAME, image_source_drop_zone_name).get_component_property(
"UnityEngine.UI.Image", "sprite.name", assembly="UnityEngine.UI")
return image_source, image_source_drop_zone
def drop_image_with_multipoint_swipe(self, object_names, duration, wait):
positions = []
for name in object_names:
alt_object = self.altdriver.find_object(By.NAME, name)
positions.append(alt_object.get_screen_position())
self.altdriver.multipoint_swipe(positions, duration=duration, wait=wait)
def test_multipoint_swipe_NIS(self):
self.drop_image_with_multipoint_swipe(["Drag Image1", "Drop Box1"], 1, False)
self.drop_image_with_multipoint_swipe(["Drag Image2", "Drop Box1", "Drop Box2"], 1, False)
image_source, image_source_drop_zone = self.get_sprite_name("Drag Image1", "Drop Image")
assert image_source == image_source_drop_zone
image_source, image_source_drop_zone = self.get_sprite_name("Drag Image2", "Drop")
assert image_source == image_source_drop_zone |
7,467 | test empty | #!/usr/bin/env python
# Copyright (C) 2006-2021 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
import os
def taglibVersion():
if sys.platform == 'linux2':
return os.popen('dpkg -l libtag*dev | grep ii').read().split('\n')[0].split()[2]
if sys.platform == 'darwin':
return os.popen('taglib-config --version').read().split('\n')[0]
return '??'
class TestMetadataReader(TestCase):
audioDir = join(testdata.audio_dir, 'generated', 'metadata')
def testFlac(self):
result = MetadataReader(filename = join(self.audioDir, 'test.flac'))()
tagsPool = result[7]
tags = tagsPool.descriptorNames() + [tagsPool[t][0] for t in tagsPool.descriptorNames()]
self.assertEqualVector(result[:7], ('test flac', 'mtg', 'essentia', '', 'Thrash Metal', '01', '2009'))
# FIXME: Taglib 1.11.0 on OSX outputs bitrate inconsistent with 1.9.1 on Linux for FLAC and OGG
# It might be due to different versions of Taglib or due to different platforms (we have not tested)
# Therefore accept both bitrates as correct
self.assertEqualVector([result[8]] + list(result[10:]), (5, 44100, 4))
self.assertTrue(result[9] == 2201 or result[9] == 2202)
self.assertEqualVector(
tags,
['metadata.tags.album', 'metadata.tags.artist', 'metadata.tags.composer', 'metadata.tags.copyright',
'metadata.tags.date', 'metadata.tags.description', 'metadata.tags.discnumber', 'metadata.tags.genre',
'metadata.tags.performer', 'metadata.tags.title', 'metadata.tags.tracknumber', 'metadata.tags.tracktotal',
'essentia', 'mtg', 'roberto.toscano', 'mtg.upf.edu', '2009', 'This is not thrash metal', '01', 'Thrash Metal',
'roberto.toscano', 'test flac', '01', '01']
)
def testOgg(self):
result = MetadataReader(filename = join(self.audioDir, 'test.ogg'))()
tagsPool = result[7]
tags = tagsPool.descriptorNames() + [tagsPool[t][0] for t in tagsPool.descriptorNames()]
self.assertEqualVector(result[:7], ('test ogg', 'mtg', 'essentia', 'this is not psychadelic', 'Psychadelic', '01', '2009'))
# see the FIXME note above
self.assertEqualVector([result[8]] + list(result[10:]), (5, 44100, 1))
self.assertTrue(result[9] == 96 or result[9] == 20)
self.assertEqualVector(
tags,
['metadata.tags.album', 'metadata.tags.artist', 'metadata.tags.comment', 'metadata.tags.composer',
'metadata.tags.copyright', 'metadata.tags.date', 'metadata.tags.description', 'metadata.tags.discnumber',
'metadata.tags.genre', 'metadata.tags.performer', 'metadata.tags.title', 'metadata.tags.tracknumber',
'metadata.tags.tracktotal', 'essentia', 'mtg', 'this is not psychadelic', 'roberto.toscano', 'mtg.upf.edu',
'2009', 'this is not psychadelic', '1', 'Psychadelic', 'roberto.toscano', 'test ogg', '01', '01']
)
def testMp3(self):
result = MetadataReader(filename = join(self.audioDir, 'test.mp3'))()
tagsPool = result[7]
tags = tagsPool.descriptorNames() + [tagsPool[t][0] for t in tagsPool.descriptorNames()]
self.assertEqualVector(result[:7], ('test sound', 'mtg', 'essentia', 'this is not reggae', 'Reggae', '01', '2009'))
self.assertEqualVector(result[8:], (5, 128, 44100, 1))
self.assertEqualVector(
tags,
['metadata.tags.album', 'metadata.tags.artist', 'metadata.tags.comment', 'metadata.tags.date',
'metadata.tags.genre', 'metadata.tags.title', 'metadata.tags.tracknumber', 'essentia', 'mtg',
'this is not reggae', '2009', 'Reggae', 'test sound', '01']
)
def testApe(self):
result = MetadataReader(filename = join(self.audioDir, 'test.ape'))()
tagsPool = result[7]
tags = tagsPool.descriptorNames() + [tagsPool[t][0] for t in tagsPool.descriptorNames()]
self.assertEqualVector(result[:7], ('ape test file', 'mtg', 'essentia', 'this is not porn', 'Porn Groove', "01/01", "2009"))
self.assertEqualVector(result[8:], (5, 722, 44100, 1))
self.assertEqualVector(
tags,
['metadata.tags.album', 'metadata.tags.artist', 'metadata.tags.comment', 'metadata.tags.composer',
'metadata.tags.copyright', 'metadata.tags.date', 'metadata.tags.genre', 'metadata.tags.original artist',
'metadata.tags.part', 'metadata.tags.title', 'metadata.tags.tracknumber', 'essentia', 'mtg', 'this is not porn',
'roberto.toscano', 'mtg.upf.edu', '2009', 'Porn Groove', 'roberto.toscano', '1', 'ape test file', '01/01']
)
def testPCM(self):
result = MetadataReader(filename = join(testdata.audio_dir, 'recorded', 'musicbox.wav'), failOnError=True)()
self.assertTrue(not len(result[7].descriptorNames()))
self.assertEqualVector(result[:7], ('', '', '', '', '', '', ''))
self.assertEqualVector(result[8:], (45, 1444, 44100, 2))
def testFailOnError(self):
self.assertComputeFails(
MetadataReader(filename = join(self.audioDir, 'random_file_that_doesnt_exist.ape'), failOnError=True))
result = MetadataReader(filename = join(self.audioDir, 'random_file_that_doesnt_exist.ape'), failOnError=False)()
self.assertTrue(result[7].descriptorNames() == [])
self.assertEqualVector(result[:7], ('', '', '', '', '', '', ''))
self.assertEqualVector(result[8:], (0, 0, 0, 0))
def testUnicode(self):
result = MetadataReader(filename = join(self.audioDir, 'test-unicode.flac'))()
self.assertEqualVector(result[:7], ('test flac &n"jef\';:/?.>,<-_=+)(*&^%$#@!~`', '?mtg $#@!$"&', '', '', '', '', ''))
def METHOD_NAME(self):
self.assertComputeFails(MetadataReader())
def testEmptyTags(self):
result = MetadataReader(filename = join(self.audioDir, 'empty.mp3'))()
self.assertEqualVector(result[:7], ('', '', '', '', '', '', ''))
self.assertTrue(result[7].descriptorNames() == [])
self.assertEqual(result[8], 0)
# Outputs [-3:] correspond to bitrate, samplerate and channels, and will differ depending on taglib version
# Therefore, not testing them.
suite = allTests(TestMetadataReader)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite) |
7,468 | test async callbacks | # -*- coding: utf-8 -*-
import time
from threading import Thread
import pytest
import env # noqa: F401
from pybind11_tests import callbacks as m
def test_callbacks():
from functools import partial
def func1():
return "func1"
def func2(a, b, c, d):
return "func2", a, b, c, d
def func3(a):
return "func3({})".format(a)
assert m.test_callback1(func1) == "func1"
assert m.test_callback2(func2) == ("func2", "Hello", "x", True, 5)
assert m.test_callback1(partial(func2, 1, 2, 3, 4)) == ("func2", 1, 2, 3, 4)
assert m.test_callback1(partial(func3, "partial")) == "func3(partial)"
assert m.test_callback3(lambda i: i + 1) == "func(43) = 44"
f = m.test_callback4()
assert f(43) == 44
f = m.test_callback5()
assert f(number=43) == 44
def test_bound_method_callback():
# Bound Python method:
class MyClass:
def double(self, val):
return 2 * val
z = MyClass()
assert m.test_callback3(z.double) == "func(43) = 86"
z = m.CppBoundMethodTest()
assert m.test_callback3(z.triple) == "func(43) = 129"
def test_keyword_args_and_generalized_unpacking():
def f(*args, **kwargs):
return args, kwargs
assert m.test_tuple_unpacking(f) == (("positional", 1, 2, 3, 4, 5, 6), {})
assert m.test_dict_unpacking(f) == (
("positional", 1),
{"key": "value", "a": 1, "b": 2},
)
assert m.test_keyword_args(f) == ((), {"x": 10, "y": 20})
assert m.test_unpacking_and_keywords1(f) == ((1, 2), {"c": 3, "d": 4})
assert m.test_unpacking_and_keywords2(f) == (
("positional", 1, 2, 3, 4, 5),
{"key": "value", "a": 1, "b": 2, "c": 3, "d": 4, "e": 5},
)
with pytest.raises(TypeError) as excinfo:
m.test_unpacking_error1(f)
assert "Got multiple values for keyword argument" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.test_unpacking_error2(f)
assert "Got multiple values for keyword argument" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
m.test_arg_conversion_error1(f)
assert "Unable to convert call argument" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
m.test_arg_conversion_error2(f)
assert "Unable to convert call argument" in str(excinfo.value)
def test_lambda_closure_cleanup():
m.test_lambda_closure_cleanup()
cstats = m.payload_cstats()
assert cstats.alive() == 0
assert cstats.copy_constructions == 1
assert cstats.move_constructions >= 1
def test_cpp_callable_cleanup():
alive_counts = m.test_cpp_callable_cleanup()
assert alive_counts == [0, 1, 2, 1, 2, 1, 0]
def test_cpp_function_roundtrip():
"""Test if passing a function pointer from C++ -> Python -> C++ yields the original pointer"""
assert (
m.test_dummy_function(m.dummy_function) == "matches dummy_function: eval(1) = 2"
)
assert (
m.test_dummy_function(m.roundtrip(m.dummy_function))
== "matches dummy_function: eval(1) = 2"
)
assert (
m.test_dummy_function(m.dummy_function_overloaded)
== "matches dummy_function: eval(1) = 2"
)
assert m.roundtrip(None, expect_none=True) is None
assert (
m.test_dummy_function(lambda x: x + 2)
== "can't convert to function pointer: eval(1) = 3"
)
with pytest.raises(TypeError) as excinfo:
m.test_dummy_function(m.dummy_function2)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.test_dummy_function(lambda x, y: x + y)
assert any(
s in str(excinfo.value)
for s in ("missing 1 required positional argument", "takes exactly 2 arguments")
)
def test_function_signatures(doc):
assert doc(m.test_callback3) == "test_callback3(arg0: Callable[[int], int]) -> str"
assert doc(m.test_callback4) == "test_callback4() -> Callable[[int], int]"
def test_movable_object():
assert m.callback_with_movable(lambda _: None) is True
@pytest.mark.skipif(
"env.PYPY",
reason="PyPy segfaults on here. See discussion on #1413.",
)
def test_python_builtins():
"""Test if python builtins like sum() can be used as callbacks"""
assert m.test_sum_builtin(sum, [1, 2, 3]) == 6
assert m.test_sum_builtin(sum, []) == 0
def METHOD_NAME():
# serves as state for async callback
class Item:
def __init__(self, value):
self.value = value
res = []
# generate stateful lambda that will store result in `res`
def gen_f():
s = Item(3)
return lambda j: res.append(s.value + j)
# do some work async
work = [1, 2, 3, 4]
m.test_async_callback(gen_f(), work)
# wait until work is done
from time import sleep
sleep(0.5)
assert sum(res) == sum(x + 3 for x in work)
def test_async_async_callbacks():
t = Thread(target=METHOD_NAME)
t.start()
t.join()
def test_callback_num_times():
# Super-simple micro-benchmarking related to PR #2919.
# Example runtimes (Intel Xeon 2.2GHz, fully optimized):
# num_millions 1, repeats 2: 0.1 secs
# num_millions 20, repeats 10: 11.5 secs
one_million = 1000000
num_millions = 1 # Try 20 for actual micro-benchmarking.
repeats = 2 # Try 10.
rates = []
for rep in range(repeats):
t0 = time.time()
m.callback_num_times(lambda: None, num_millions * one_million)
td = time.time() - t0
rate = num_millions / td if td else 0
rates.append(rate)
if not rep:
print()
print(
"callback_num_times: {:d} million / {:.3f} seconds = {:.3f} million / second".format(
num_millions, td, rate
)
)
if len(rates) > 1:
print("Min Mean Max")
print(
"{:6.3f} {:6.3f} {:6.3f}".format(
min(rates), sum(rates) / len(rates), max(rates)
)
) |
7,469 | set up | from homeassistant.components.climate.const import (
PRESET_AWAY,
PRESET_COMFORT,
PRESET_ECO,
ClimateEntityFeature,
HVACMode,
)
from homeassistant.const import UnitOfTemperature, UnitOfTime
from ..const import NEDIS_HTPL20F_PAYLOAD
from ..helpers import assert_device_properties_set
from ..mixins.climate import TargetTemperatureTests
from ..mixins.lock import BasicLockTests
from ..mixins.number import BasicNumberTests
from .base_device_tests import TuyaDeviceTestCase
HVACMODE_DPS = "1"
TEMPERATURE_DPS = "2"
CURRENTTEMP_DPS = "3"
PRESET_DPS = "4"
LOCK_DPS = "7"
UNKNOWN11_DPS = "11"
TIMER_DPS = "13"
UNKNOWN101_DPS = "101"
class TestNedisHtpl20fHeater(
BasicLockTests, BasicNumberTests, TargetTemperatureTests, TuyaDeviceTestCase
):
__test__ = True
def METHOD_NAME(self):
self.setUpForConfig("nedis_htpl20f_heater.yaml", NEDIS_HTPL20F_PAYLOAD)
self.subject = self.entities.get("climate")
self.setUpTargetTemperature(
TEMPERATURE_DPS,
self.subject,
min=15,
max=35,
)
self.setUpBasicLock(
LOCK_DPS,
self.entities.get("lock_child_lock"),
)
self.setUpBasicNumber(
TIMER_DPS,
self.entities.get("number_timer"),
max=1440,
unit=UnitOfTime.MINUTES,
)
self.mark_secondary(["lock_child_lock", "number_timer"])
def test_supported_features(self):
self.assertEqual(
self.subject.supported_features,
(
ClimateEntityFeature.TARGET_TEMPERATURE
| ClimateEntityFeature.PRESET_MODE
),
)
def test_icon(self):
self.dps[HVACMODE_DPS] = True
self.assertEqual(self.subject.icon, "mdi:radiator")
self.dps[HVACMODE_DPS] = False
self.assertEqual(self.subject.icon, "mdi:radiator-disabled")
def test_temperature_unit_returns_celsius(self):
self.assertEqual(self.subject.temperature_unit, UnitOfTemperature.CELSIUS)
def test_current_temperature(self):
self.dps[CURRENTTEMP_DPS] = 25
self.assertEqual(self.subject.current_temperature, 25)
def test_hvac_mode(self):
self.dps[HVACMODE_DPS] = True
self.assertEqual(self.subject.hvac_mode, HVACMode.HEAT)
self.dps[HVACMODE_DPS] = False
self.assertEqual(self.subject.hvac_mode, HVACMode.OFF)
def test_hvac_modes(self):
self.assertCountEqual(self.subject.hvac_modes, [HVACMode.OFF, HVACMode.HEAT])
async def test_turn_on(self):
async with assert_device_properties_set(
self.subject._device, {HVACMODE_DPS: True}
):
await self.subject.async_set_hvac_mode(HVACMode.HEAT)
async def test_turn_off(self):
async with assert_device_properties_set(
self.subject._device, {HVACMODE_DPS: False}
):
await self.subject.async_set_hvac_mode(HVACMode.OFF)
def test_preset_modes(self):
self.assertCountEqual(
self.subject.preset_modes,
[PRESET_COMFORT, PRESET_ECO, PRESET_AWAY],
)
def test_preset_mode(self):
self.dps[PRESET_DPS] = "1"
self.assertEqual(self.subject.preset_mode, PRESET_ECO)
self.dps[PRESET_DPS] = "2"
self.assertEqual(self.subject.preset_mode, PRESET_COMFORT)
self.dps[PRESET_DPS] = "3"
self.assertEqual(self.subject.preset_mode, PRESET_AWAY)
async def test_set_preset_more_to_eco(self):
async with assert_device_properties_set(
self.subject._device, {PRESET_DPS: "1"}
):
await self.subject.async_set_preset_mode(PRESET_ECO)
async def test_set_preset_more_to_comfort(self):
async with assert_device_properties_set(
self.subject._device, {PRESET_DPS: "2"}
):
await self.subject.async_set_preset_mode(PRESET_COMFORT)
async def test_set_preset_more_to_away(self):
async with assert_device_properties_set(
self.subject._device, {PRESET_DPS: "3"}
):
await self.subject.async_set_preset_mode(PRESET_AWAY)
def test_extra_state_attributes(self):
self.dps[UNKNOWN11_DPS] = "11"
self.dps[UNKNOWN101_DPS] = True
self.assertCountEqual(
self.subject.extra_state_attributes,
{"unknown_11": "11", "unknown_101": True},
) |
7,470 | initialize | """ StatesAccountingAgent sends periodically numbers of jobs and pilots in various states for various
sites to the Monitoring system to create historical plots.
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN StatesAccountingAgent
:end-before: ##END
:dedent: 2
:caption: StatesAccountingAgent options
"""
import datetime
from DIRAC import S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities import TimeUtilities
from DIRAC.AccountingSystem.Client.Types.WMSHistory import WMSHistory
from DIRAC.AccountingSystem.Client.DataStoreClient import DataStoreClient
from DIRAC.MonitoringSystem.Client.MonitoringReporter import MonitoringReporter
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
class StatesAccountingAgent(AgentModule):
"""Agent that every 15 minutes will report
to the AccountingDB (MySQL) or the Monitoring DB (ElasticSearch), or both,
a snapshot of the JobDB.
Also sends a snapshot of PilotAgentsDB to Monitoring.
"""
# WMSHistory fields
__summaryKeyFieldsMapping = [
"Status",
"Site",
"User",
"UserGroup",
"JobGroup",
"JobType",
"ApplicationStatus",
"MinorStatus",
]
__summaryDefinedFields = [("ApplicationStatus", "unset"), ("MinorStatus", "unset")]
__summaryValueFieldsMapping = ["Jobs", "Reschedules"]
__renameFieldsMapping = {"JobType": "JobSplitType"}
# PilotsHistory fields
__pilotsMapping = ["TaskQueueID", "GridSite", "GridType", "Status", "NumOfPilots"]
def METHOD_NAME(self):
"""Standard initialization"""
# This agent will always loop every 15 minutes
self.am_setOption("PollingTime", 900)
# Check whether to send to Monitoring or Accounting or both
self.jobMonitoringOption = Operations().getMonitoringBackends(monitoringType="WMSHistory")
self.pilotMonitoringOption = Operations().getMonitoringBackends(monitoringType="PilotsHistory")
messageQueue = self.am_getOption("MessageQueue", "dirac.wmshistory")
self.datastores = {} # For storing the clients to Accounting and Monitoring
if "Accounting" in self.jobMonitoringOption:
self.datastores["Accounting"] = DataStoreClient(retryGraceTime=900)
if "Monitoring" in self.jobMonitoringOption:
self.datastores["Monitoring"] = MonitoringReporter(
monitoringType="WMSHistory", failoverQueueName=messageQueue
)
if "Monitoring" in self.pilotMonitoringOption:
self.pilotReporter = MonitoringReporter(monitoringType="PilotsHistory", failoverQueueName=messageQueue)
self.__jobDBFields = []
for field in self.__summaryKeyFieldsMapping:
if field == "User":
field = "Owner"
elif field == "UserGroup":
field = "OwnerGroup"
self.__jobDBFields.append(field)
return S_OK()
def execute(self):
"""Main execution method"""
# PilotsHistory to Monitoring
if "Monitoring" in self.pilotMonitoringOption:
self.log.info("Committing PilotsHistory to Monitoring")
result = PilotAgentsDB().getSummarySnapshot()
now = datetime.datetime.utcnow()
if not result["OK"]:
self.log.error(
"Can't get the PilotAgentsDB summary",
f"{result['Message']}: won't commit PilotsHistory at this cycle",
)
values = result["Value"][1]
for record in values:
rD = {}
for iP, _ in enumerate(self.__pilotsMapping):
rD[self.__pilotsMapping[iP]] = record[iP]
rD["timestamp"] = int(TimeUtilities.toEpochMilliSeconds(now))
self.pilotReporter.addRecord(rD)
self.log.info("Committing to Monitoring...")
result = self.pilotReporter.commit()
if not result["OK"]:
self.log.error("Could not commit to Monitoring", result["Message"])
self.log.verbose("Done committing PilotsHistory to Monitoring")
# WMSHistory to Monitoring or Accounting
self.log.info(f"Committing WMSHistory to {'and '.join(self.jobMonitoringOption)} backend")
result = JobDB().getSummarySnapshot(self.__jobDBFields)
now = datetime.datetime.utcnow()
if not result["OK"]:
self.log.error("Can't get the JobDB summary", f"{result['Message']}: won't commit WMSHistory at this cycle")
return S_ERROR()
values = result["Value"][1]
self.log.info("Start sending WMSHistory records")
for record in values:
rD = {}
for fV in self.__summaryDefinedFields:
rD[fV[0]] = fV[1]
for iP, _ in enumerate(self.__summaryKeyFieldsMapping):
fieldName = self.__summaryKeyFieldsMapping[iP]
rD[self.__renameFieldsMapping.get(fieldName, fieldName)] = record[iP]
record = record[len(self.__summaryKeyFieldsMapping) :]
for iP, _ in enumerate(self.__summaryValueFieldsMapping):
rD[self.__summaryValueFieldsMapping[iP]] = int(record[iP])
for backend in self.datastores:
if backend.lower() == "monitoring":
rD["timestamp"] = int(TimeUtilities.toEpochMilliSeconds(now))
self.datastores["Monitoring"].addRecord(rD)
elif backend.lower() == "accounting":
acWMS = WMSHistory()
acWMS.setStartTime(now)
acWMS.setEndTime(now)
acWMS.setValuesFromDict(rD)
retVal = acWMS.checkValues()
if not retVal["OK"]:
self.log.error("Invalid WMSHistory accounting record ", f"{retVal['Message']} -> {rD}")
else:
self.datastores["Accounting"].addRegister(acWMS)
for backend, datastore in self.datastores.items():
self.log.info(f"Committing WMSHistory records to {backend} backend")
result = datastore.commit()
if not result["OK"]:
self.log.error(f"Couldn't commit WMSHistory to {backend}", result["Message"])
return S_ERROR()
self.log.verbose(f"Done committing WMSHistory to {backend} backend")
return S_OK() |
7,471 | dblsha | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2016-2022 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
from test_framework.test_framework import BitcoinTestFramework
from test_framework.authproxy import JSONRPCException
from test_framework.util import connect_nodes_bi
from binascii import a2b_hex, b2a_hex
from hashlib import sha256
from struct import pack
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def b2x(b):
return b2a_hex(b).decode('ascii')
# NOTE: This does not work for signed numbers (set the high bit) or zero (use b'\0')
def encodeUNum(n):
s = bytearray(b'\1')
while n > 127:
s[0] += 1
s.append(n % 256)
n //= 256
s.append(n)
return bytes(s)
def varlenEncode(n):
if n < 0xfd:
return pack('<B', n)
if n <= 0xffff:
return b'\xfd' + pack('<H', n)
if n <= 0xffffffff:
return b'\xfe' + pack('<L', n)
return b'\xff' + pack('<Q', n)
def METHOD_NAME(b):
return sha256(sha256(b).digest()).digest()
def genmrklroot(leaflist):
cur = leaflist
while len(cur) > 1:
n = []
if len(cur) & 1:
cur.append(cur[-1])
for i in range(0, len(cur), 2):
n.append(METHOD_NAME(cur[i] + cur[i+1]))
cur = n
return cur[0]
def template_to_bytearray(tmpl, txlist):
blkver = pack('<L', tmpl['version'])
mrklroot = genmrklroot(list(METHOD_NAME(a) for a in txlist))
reserved = b'\0'*32
timestamp = pack('<L', tmpl['curtime'])
nonce = b'\0'*32
soln = b'\0'
blk = blkver + a2b_hex(tmpl['previousblockhash'])[::-1] + mrklroot + reserved + timestamp + a2b_hex(tmpl['bits'])[::-1] + nonce + soln
blk += varlenEncode(len(txlist))
for tx in txlist:
blk += tx
return bytearray(blk)
def template_to_hex(tmpl, txlist):
return b2x(template_to_bytearray(tmpl, txlist))
def assert_template(node, tmpl, txlist, expect):
rsp = node.getblocktemplate({'data':template_to_hex(tmpl, txlist),'mode':'proposal'})
if rsp != expect:
raise AssertionError('unexpected: %s' % (rsp,))
class GetBlockTemplateProposalTest(BitcoinTestFramework):
'''
Test block proposals with getblocktemplate.
'''
def __init__(self):
super().__init__()
self.num_nodes = 2
def setup_network(self):
self.nodes = self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
def run_test(self):
node = self.nodes[0]
node.generate(1) # Mine a block to leave initial block download
tmpl = node.getblocktemplate()
if 'coinbasetxn' not in tmpl:
rawcoinbase = encodeUNum(tmpl['height'])
rawcoinbase += b'\x01-'
hexcoinbase = b2x(rawcoinbase)
hexoutval = b2x(pack('<Q', tmpl['coinbasevalue']))
tmpl['coinbasetxn'] = {'data': '01000000' + '01' + '0000000000000000000000000000000000000000000000000000000000000000ffffffff' + ('%02x' % (len(rawcoinbase),)) + hexcoinbase + 'fffffffe' + '01' + hexoutval + '00' + '00000000'}
txlist = list(bytearray(a2b_hex(a['data'])) for a in (tmpl['coinbasetxn'],) + tuple(tmpl['transactions']))
# Test 0: Capability advertised
assert('proposal' in tmpl['capabilities'])
# NOTE: This test currently FAILS (regtest mode doesn't enforce block height in coinbase)
## Test 1: Bad height in coinbase
#txlist[0][4+1+36+1+1] += 1
#assert_template(node, tmpl, txlist, 'FIXME')
#txlist[0][4+1+36+1+1] -= 1
# Test 2: Bad input hash for gen tx
txlist[0][4+1] += 1
assert_template(node, tmpl, txlist, 'bad-cb-missing')
txlist[0][4+1] -= 1
# Test 3: Truncated final tx
lastbyte = txlist[-1].pop()
try:
assert_template(node, tmpl, txlist, 'n/a')
except JSONRPCException:
pass # Expected
txlist[-1].append(lastbyte)
# Test 4: Add an invalid tx to the end (duplicate of gen tx)
txlist.append(txlist[0])
assert_template(node, tmpl, txlist, 'bad-txns-duplicate')
txlist.pop()
# Test 5: Add an invalid tx to the end (non-duplicate)
txlist.append(bytearray(txlist[0]))
txlist[-1][4+1] = 0xff
assert_template(node, tmpl, txlist, 'bad-txns-inputs-missingorspent')
txlist.pop()
# Test 6: Future tx lock time
txlist[0][-4:] = b'\xff\xff\xff\xff'
assert_template(node, tmpl, txlist, 'bad-txns-nonfinal')
txlist[0][-4:] = b'\0\0\0\0'
# Test 7: Bad tx count
txlist.append(b'')
try:
assert_template(node, tmpl, txlist, 'n/a')
except JSONRPCException:
pass # Expected
txlist.pop()
# Test 8: Bad bits
realbits = tmpl['bits']
tmpl['bits'] = '1c0000ff' # impossible in the real world
assert_template(node, tmpl, txlist, 'bad-diffbits')
tmpl['bits'] = realbits
# Test 9: Bad merkle root
rawtmpl = template_to_bytearray(tmpl, txlist)
rawtmpl[4+32] = (rawtmpl[4+32] + 1) % 0x100
rsp = node.getblocktemplate({'data':b2x(rawtmpl),'mode':'proposal'})
if rsp != 'bad-txnmrklroot':
raise AssertionError('unexpected: %s' % (rsp,))
# Test 10: Bad timestamps
realtime = tmpl['curtime']
tmpl['curtime'] = 0x7fffffff
assert_template(node, tmpl, txlist, 'time-too-new')
tmpl['curtime'] = 0
assert_template(node, tmpl, txlist, 'time-too-old')
tmpl['curtime'] = realtime
# Test 11: Valid block
assert_template(node, tmpl, txlist, None)
# Test 12: Orphan block
tmpl['previousblockhash'] = 'ff00' * 16
assert_template(node, tmpl, txlist, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
GetBlockTemplateProposalTest().main() |
7,472 | create es user | import uuid
from unittest.mock import patch
from django.test import SimpleTestCase, TestCase
from corehq.apps.change_feed import data_sources, topics
from corehq.apps.change_feed.document_types import change_meta_from_doc
from corehq.apps.change_feed.producer import producer
from corehq.apps.change_feed.topics import get_topic_offset
from corehq.apps.es.client import manager
from corehq.apps.es.tests.utils import es_test
from corehq.apps.es.users import user_adapter
from corehq.apps.groups.models import Group
from corehq.apps.groups.tests.test_utils import delete_all_groups
from corehq.apps.hqcase.management.commands.ptop_reindexer_v2 import (
reindex_and_clean,
)
from corehq.apps.users.models import CommCareUser
from corehq.pillows.groups_to_user import (
get_group_pillow,
remove_group_from_users,
update_es_user_with_groups,
)
@es_test(requires=[user_adapter])
class GroupToUserPillowTest(SimpleTestCase):
domain = 'grouptouser-pillowtest-domain'
def setUp(self):
super(GroupToUserPillowTest, self).setUp()
self.user_id = 'user1'
self._create_es_user_without_db_calls(self.user_id, self.domain)
def _check_es_user(self, group_ids=None, group_names=None):
_assert_es_user_and_groups(
self, self.user_id, group_ids, group_names
)
def _create_es_user_without_db_calls(self, *args):
with patch('corehq.apps.groups.dbaccessors.get_group_id_name_map_by_user', return_value=[]):
METHOD_NAME(*args)
def test_update_es_user_with_groups(self):
group_doc = {
'name': 'g1',
'_id': 'group1',
'users': []
}
# no change if user not in group
update_es_user_with_groups(group_doc)
self._check_es_user(None, None)
# user added to group
group_doc['users'] = [self.user_id]
update_es_user_with_groups(group_doc)
self._check_es_user(['group1'], ['g1'])
# re-process group with no change
update_es_user_with_groups(group_doc)
self._check_es_user(['group1'], ['g1'])
# user added to new group
new_group = {
'name': 'g2',
'_id': 'group2',
'users': [self.user_id]
}
update_es_user_with_groups(new_group)
self._check_es_user(['group1', 'group2'], ['g1', 'g2'])
def test_update_es_user_with_groups_remove_user(self):
group_doc = {
'name': 'g1',
'_id': 'group1',
'users': [self.user_id],
'removed_users': set([]),
}
# re-process group with no change
update_es_user_with_groups(group_doc)
self._check_es_user(['group1'], ['g1'])
group_doc['removed_users'].add(self.user_id)
group_doc['users'] = []
update_es_user_with_groups(group_doc)
self._check_es_user(None, None)
def test_remove_user_from_groups_partial_match(self):
original_id = uuid.uuid4().hex
group_doc = {
'name': 'original',
'_id': original_id,
'users': [self.user_id]
}
# set original groups on the user
update_es_user_with_groups(group_doc)
self._check_es_user([original_id], ['original'])
new_id = uuid.uuid4().hex
group_doc = {
'name': 'original',
'_id': new_id,
'users': [self.user_id]
}
remove_group_from_users(group_doc)
def _assert_es_user_and_groups(test_case, user_id, group_ids=None, group_names=None):
manager.index_refresh(user_adapter.index_name)
user_doc = user_adapter.get(user_id)
if group_ids is None:
test_case.assertTrue('__group_ids' not in user_doc or not user_doc['__group_ids'])
else:
test_case.assertEqual(set(user_doc['__group_ids']), set(group_ids))
if group_names is None:
test_case.assertTrue('__group_names' not in user_doc or not user_doc['__group_names'])
else:
test_case.assertEqual(set(user_doc['__group_names']), set(group_names))
def METHOD_NAME(user_id, domain):
user = CommCareUser(
_id=user_id,
domain=domain,
username='hc',
first_name='Harry',
last_name='Casual',
is_active=True,
)
user_adapter.index(user, refresh=True)
return user
@es_test(requires=[user_adapter])
class GroupToUserPillowDbTest(TestCase):
def test_pillow(self):
user_id = uuid.uuid4().hex
domain = 'dbtest-group-user'
METHOD_NAME(user_id, domain)
_assert_es_user_and_groups(self, user_id, None, None)
# create and save a group
group = Group(domain=domain, name='g1', users=[user_id])
group.save()
# send to kafka
since = get_topic_offset(topics.GROUP)
producer.send_change(topics.GROUP, _group_to_change_meta(group.to_json()))
# process using pillow
pillow = get_group_pillow()
pillow.process_changes(since=since, forever=False)
# confirm updated in elasticsearch
manager.index_refresh(user_adapter.index_name)
_assert_es_user_and_groups(self, user_id, [group._id], [group.name])
return user_id, group
def test_pillow_deletion(self):
user_id, group = self.test_pillow()
group.soft_delete()
# send to kafka
since = get_topic_offset(topics.GROUP)
producer.send_change(topics.GROUP, _group_to_change_meta(group.to_json()))
pillow = get_group_pillow()
pillow.process_changes(since=since, forever=False)
# confirm removed in elasticsearch
manager.index_refresh(user_adapter.index_name)
_assert_es_user_and_groups(self, user_id, [], [])
def _group_to_change_meta(group):
return change_meta_from_doc(
document=group,
data_source_type=data_sources.SOURCE_COUCH,
data_source_name=Group.get_db().dbname,
)
@es_test(requires=[user_adapter], setup_class=True)
class GroupsToUserReindexerTest(TestCase):
def setUp(self):
super(GroupsToUserReindexerTest, self).setUp()
delete_all_groups()
def test_groups_to_user_reindexer(self):
user_id = uuid.uuid4().hex
domain = 'test-groups-to-user-reindex'
METHOD_NAME(user_id, domain)
# create and save a group
group = Group(domain=domain, name='g1', users=[user_id])
group.save()
reindex_and_clean('groups-to-user')
manager.index_refresh(user_adapter.index_name)
_assert_es_user_and_groups(self, user_id, [group._id], [group.name]) |
7,473 | run | import re
import logging
from virttest import error_context
from virttest import data_dir
from virttest import storage
from avocado.utils import process
from provider.in_place_upgrade_base import IpuTest
LOG_JOB = logging.getLogger('avocado.test')
class IpuLegacyTest(IpuTest):
"""
Provide basic functions for in place upgrade test cases
"""
def __init__(self, test, params):
super(IpuLegacyTest, self).__init__(test, params)
self.session = None
self.test = test
self.params = params
def pre_upgrade_whitelist(self, test):
"""
Fix known issues before executing pre-upgrade
"""
try:
# Leapp and grubby's version
le = self.session.cmd_output("rpm -qa|grep ^leapp")
test.log.info("leapp version: %s", str(le))
gr = self.session.cmd_output("rpm -qa|grep ^grubby")
test.log.info("grubby version: %s", str(gr))
# Possible problems with remote login using root account
self.session.cmd(self.params.get("fix_permit"))
# Answer file missing will be fixed
fix_answer = self.params.get("fix_answer_file")
self.session.cmd(fix_answer, timeout=1200)
fix_answer_sec = self.params.get("fix_answer_section")
self.session.cmd(fix_answer_sec, timeout=1200)
erase_old_kernel = self.params.get("clean_up_old_kernel")
s, output = self.session.cmd_status_output(erase_old_kernel,
timeout=1200)
error_info = self.params.get("error_info")
if re.search(error_info, output):
pass
except Exception as info:
test.fail("Failed to fix known issues in advance :%s" % str(info))
@error_context.context_aware
def METHOD_NAME(test, params, env):
"""
Run in place upgrade cases rhel7.9 guest:
a) without RHSM
1.configure vm
2.install leapp tool
3.download new rhel content repo
4.pre_upgrade test in the vm
5.upgrade test in the vm
6.check if it's target system
b) with rhsm
1.configure vm
2.install leapp tool
3.subscribe vm
4.pre_upgrade test in the vm
5.upgrade test in the vm
6.check if it's target system
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
upgrade_test = IpuLegacyTest(test, params)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
login_timeout = int(params.get("login_timeout", 360))
upgrade_test.session = vm.wait_for_login(timeout=login_timeout)
check_rhel_ver = params.get("check_rhel_ver")
pre_rhel_ver = upgrade_test.run_guest_cmd(check_rhel_ver)
try:
# set post_release
pre_release = params.get("pre_release")
release_chk = params.get("release_check")
if pre_release not in upgrade_test.run_guest_cmd(release_chk):
test.cancel("your image is not for rhel 7.9 product, please check")
post_release = params.get("post_release")
if params.get("rhsm_type") == "no_rhsm":
# internal repo for doing yum update
# please specify the old_custom_internal_repo_7
# in the cfg in advance this parameter should
# contain the repo files, by which you can upgrade
# old system to the newer version before you do in place upgade
old_custom_repo = params.get("old_custom_internal_repo_7")
upgrade_test.yum_update_no_rhsm(test, old_custom_repo)
elif params.get("rhsm_type") == "rhsm":
# doing rhsm and update the old system
# prepare_env, get_answer_files_source and get_answer_files
# download and use private script to prepare test env
upgrade_test.run_guest_cmd(params.get("prepare_env"))
upgrade_test.run_guest_cmd(params.get("get_answer_files_source"))
upgrade_test.rhsm(test)
upgrade_test.session = vm.reboot(upgrade_test.session)
# repo_leapp_7 it's leapp tool's repo
# repo_leppp_7_seed and ins_leapp_cmd, install leapp tool command
if not params.get_boolean("com_install"):
upgrade_test.run_guest_cmd(params.get("repo_leapp_7"))
upgrade_test.run_guest_cmd(params.get("repo_leppp_7_seed"))
upgrade_test.run_guest_cmd(params.get("ins_leapp_cmd_7"))
if params.get("rhsm_type") == "rhsm":
upgrade_test.run_guest_cmd(params.get("get_answer_files_source"))
elif params.get("rhsm_type") == "no_rhsm":
upgrade_test.run_guest_cmd(params.get("prepare_env"))
upgrade_test.run_guest_cmd(params.get("get_answer_files_source"))
# get_custom_7 and export_type_7, set env for no_rhsm test
upgrade_test.run_guest_cmd(params.get("get_custom_7"))
upgrade_test.run_guest_cmd(params.get("export_type_7"))
# please specify the new_rhel_content_7 in the cfg in advance
# this parameter should contain your upgraded system's repo files
upgrade_test.run_guest_cmd(params.get("new_rhel_content_7"))
upgrade_test.pre_upgrade_whitelist(test)
if params.get("rhsm_type") == "no_rhsm":
# do preugprade test without rhsm
upgrade_test.run_guest_cmd(params.get("pre_upgrade_no_rhsm"))
# do upgrade test without rhsm
upgrade_test.upgrade_process(params.get("process_upgrade_no_rhsm"))
elif params.get("rhsm_type") == "rhsm":
# if you want to use the below method to get answer files
# please disable "get_answer_files_source" and "get_answer_files"
# two steps above and run the following two commented steps
# upgrade_test.run_guest_cmd(params.get("leapp_proxy_host"))
# upgrade_test.run_guest_cmd(params.get("leapp_service_host"))
# do preugprade test with rhsm
upgrade_test.run_guest_cmd(params.get("pre_upgrade_rhsm"))
# do ugprade test with rhsm
upgrade_test.upgrade_process(params.get("process_upgrade_rhsm"))
# after run upgrade, reboot the guest after finish preupgrade
upgrade_test.session.sendline(params.get("reboot_cmd"))
# post checking
upgrade_test.session = vm.wait_for_login(timeout=6000)
upgrade_test.post_upgrade_check(test, post_release)
post_rhel_ver = upgrade_test.run_guest_cmd(check_rhel_ver)
vm.verify_kernel_crash()
finally:
vm.graceful_shutdown(timeout=300)
try:
image_name = params.objects("images")[0]
image_params = params.object_params(image_name)
image_path = params.get("images_base_dir", data_dir.get_data_dir())
old_name = storage.get_image_filename(image_params, image_path)
upgraded_name = old_name.replace(pre_rhel_ver, post_rhel_ver + "0")
process.METHOD_NAME(params.get("image_clone_command") %
(old_name, upgraded_name))
except Exception as error:
test.log.warning("Failed to rename upgraded image:%s" % str(error)) |
7,474 | get fsx id | import json
import os
import subprocess
import pytest
import tarfile
from ruamel.yaml import YAML
import random
import string
import shutil
from sagemaker.image_uris import retrieve
def get_region():
return os.environ.get("AWS_REGION")
def get_sagemaker_role_arn():
return os.environ.get("SAGEMAKER_ROLE_ARN")
def get_robomaker_role_arn():
return os.environ.get("ROBOMAKER_ROLE_ARN")
def get_s3_data_bucket():
return os.environ.get("S3_DATA_BUCKET")
def get_minio_service_port():
return os.environ.get("MINIO_SERVICE_PORT")
def get_kfp_namespace():
return os.environ.get("NAMESPACE")
def get_fsx_subnet():
return os.environ.get("FSX_SUBNET")
def get_fsx_security_group():
return os.environ.get("FSX_SECURITY_GROUP")
def METHOD_NAME():
return os.environ.get("FSX_ID")
def get_algorithm_image_registry(framework, region, version=None):
return retrieve(framework, region, version).split(".")[0]
def get_assume_role_arn():
return os.environ.get("ASSUME_ROLE_ARN")
def run_command(cmd, *popenargs, **kwargs):
if isinstance(cmd, str):
cmd = cmd.split(" ")
try:
print("executing command: {}".format(" ".join(cmd)))
return subprocess.check_output(
cmd, *popenargs, stderr=subprocess.STDOUT, **kwargs
)
except subprocess.CalledProcessError as e:
pytest.fail(f"Command failed. Error code: {e.returncode}, Log: {e.output}")
def read_from_file_in_tar(file_path, file_name="data", decode=True):
"""Opens a local tarball and reads the contents of the file as specified.
Arguments:
- file_path: The local path of the tarball file.
- file_name: The name of the file inside the tarball to be read. (Default `"data"`)
- decode: Ensures the contents of the file is decoded to type `str`. (Default `True`)
See: https://github.com/kubeflow/pipelines/blob/2e14fe732b3f878a710b16d1a63beece6c19330a/sdk/python/kfp/components/_components.py#L182
"""
with tarfile.open(file_path).extractfile(file_name) as f:
if decode:
return f.read().decode()
else:
return f.read()
def replace_placeholders(input_filename, output_filename, shallow_canary=False):
region = get_region()
variables_to_replace = {
"((REGION))": region,
"((SAGEMAKER_ROLE_ARN))": get_sagemaker_role_arn(),
"((DATA_BUCKET))": get_s3_data_bucket(),
"((KMEANS_REGISTRY))": get_algorithm_image_registry("kmeans", region, "1"),
}
if not shallow_canary:
extra_variables_to_replace = {
"((XGBOOST_REGISTRY))": get_algorithm_image_registry(
"xgboost", region, "1.0-1"
),
"((BUILTIN_RULE_IMAGE))": get_algorithm_image_registry("debugger", region),
"((MODEL_MONITOR_IMAGE))": get_algorithm_image_registry(
"model-monitor", region
),
"((CLARIFY_IMAGE))": get_algorithm_image_registry("clarify", region),
"((FSX_ID))": METHOD_NAME(),
"((FSX_SUBNET))": get_fsx_subnet(),
"((FSX_SECURITY_GROUP))": get_fsx_security_group(),
"((ASSUME_ROLE_ARN))": get_assume_role_arn(),
"((ROBOMAKER_ROLE_ARN))": get_robomaker_role_arn(),
}
variables_to_replace = {**variables_to_replace, **extra_variables_to_replace}
filedata = ""
with open(input_filename, "r") as f:
filedata = f.read()
for replace_key, replace_value in variables_to_replace.items():
if replace_value is None:
continue
filedata = filedata.replace(replace_key, replace_value)
with open(output_filename, "w") as f:
f.write(filedata)
return output_filename
def load_params(file_name):
with open(file_name, "r") as f:
yaml = YAML(typ="safe")
return yaml.load(f)
def generate_random_string(length):
"""Generate a random string with twice the length of input parameter."""
assert isinstance(length, int)
return "".join(
[random.choice(string.ascii_lowercase) for n in range(length)]
+ [random.choice(string.digits) for n in range(length)]
)
def mkdir(directory_path):
if not os.path.exists(directory_path):
os.makedirs(directory_path)
return directory_path
def remove_dir(dir_path):
shutil.rmtree(dir_path) |
7,475 | package id | from conan import ConanFile
from conan.tools.files import rmdir, mkdir, save, load, get, apply_conandata_patches, export_conandata_patches, copy
from conan.tools.gnu import AutotoolsToolchain, Autotools
from conan.tools.layout import basic_layout
from conan.tools.microsoft import is_msvc, unix_path
import glob
import os
import re
import yaml
required_conan_version = ">=1.54.0"
class XorgProtoConan(ConanFile):
name = "xorg-proto"
package_type = "header-library"
description = "This package provides the headers and specification documents defining " \
"the core protocol and (many) extensions for the X Window System."
topics = ("specification", "x-window")
license = "X11"
homepage = "https://gitlab.freedesktop.org/xorg/proto/xorgproto"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
generators = "PkgConfigDeps"
def layout(self):
basic_layout(self, src_folder="src")
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def build_requirements(self):
self.tool_requires("automake/1.16.5")
self.tool_requires("xorg-macros/1.19.3")
self.tool_requires("pkgconf/1.9.3")
if self._settings_build.os == "Windows":
self.win_bash = True
if not self.conf.get("tools.microsoft.bash:path", check_type=str):
self.tool_requires("msys2/cci.latest")
def requirements(self):
if hasattr(self, "settings_build"):
self.requires("xorg-macros/1.19.3")
def METHOD_NAME(self):
# self.info.clear() would be fine too, but keep the os to add c3i test coverage for Windows.
del self.info.settings.arch
del self.info.settings.build_type
del self.info.settings.compiler
def export_sources(self):
export_conandata_patches(self)
def source(self):
get(self, **self.conan_data["sources"][self.version], destination=self.source_folder, strip_root=True)
def generate(self):
tc = AutotoolsToolchain(self)
env = tc.environment()
if is_msvc(self):
compile_wrapper = unix_path(self, self.conf.get("user.automake:compile-wrapper"))
env.define("CC", f"{compile_wrapper} cl -nologo")
tc.generate(env)
def build(self):
apply_conandata_patches(self)
autotools = Autotools(self)
autotools.configure()
autotools.make()
@property
def _pc_data_path(self):
return os.path.join(self.package_folder, "res", "pc_data.yml")
def package(self):
copy(self, "COPYING-*", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
autotools = Autotools(self)
autotools.install()
pc_data = {}
for fn in glob.glob(os.path.join(self.package_folder, "share", "pkgconfig", "*.pc")):
pc_text = load(self, fn)
filename = os.path.basename(fn)[:-3]
name = next(re.finditer("^Name: ([^\n$]+)[$\n]", pc_text, flags=re.MULTILINE)).group(1)
version = next(re.finditer("^Version: ([^\n$]+)[$\n]", pc_text, flags=re.MULTILINE)).group(1)
pc_data[filename] = {
"version": version,
"name": name,
}
mkdir(self, os.path.dirname(self._pc_data_path))
save(self, self._pc_data_path, yaml.dump(pc_data))
rmdir(self, os.path.join(self.package_folder, "share"))
def package_info(self):
for filename, name_version in yaml.safe_load(open(self._pc_data_path)).items():
self.cpp_info.components[filename].filenames["pkg_config"] = filename
self.cpp_info.components[filename].libdirs = []
if hasattr(self, "settings_build"):
self.cpp_info.components[filename].requires = ["xorg-macros::xorg-macros"]
self.cpp_info.components[filename].version = name_version["version"]
self.cpp_info.components[filename].set_property("pkg_config_name", filename)
self.cpp_info.components["xproto"].includedirs.append(os.path.join("include", "X11")) |
7,476 | get utt permutation | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2019 Johns Hopkins University (Xuankai Chang)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import codecs
import json
import logging
import re
import sys
import numpy as np
def permutationDFS(source, start, res):
# get permutations with DFS
# return order in [[1, 2], [2, 1]] or
# [[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 2, 1], [3, 1, 2]]
if start == len(source) - 1: # reach final state
res.append(source.tolist())
for i in range(start, len(source)):
# swap values at position start and i
source[start], source[i] = source[i], source[start]
permutationDFS(source, start + 1, res)
# reverse the swap
source[start], source[i] = source[i], source[start]
# pre-set the permutation scheme (ref_idx, hyp_idx)
def permutation_schemes(num_spkrs):
src = [x for x in range(1, num_spkrs + 1)]
perms = []
# get all permutations of [1, ..., num_spkrs]
# [[r1h1, r2h2], [r1h2, r2h1]]
# [[r1h1, r2h2, r3h3], [r1h1, r2h3, r3h2], [r1h2, r2h1, r3h3],
# [r1h2, r2h3, r3h2], [r1h3, r2h2, r3h1], [r1h3, r2h1, r3h2]]]
# ...
permutationDFS(np.array(src), 0, perms)
keys = []
for perm in perms:
keys.append(["r%dh%d" % (i, j) for i, j in enumerate(perm, 1)])
return sum(keys, []), keys
def convert_score(keys, dic):
ret = {}
pat = re.compile(r"\d+")
for k in keys:
score = dic[k]["Scores"]
score = list(map(int, pat.findall(score))) # [c,s,d,i]
assert len(score) == 4
ret[k] = score
return ret
def METHOD_NAME(old_dic, num_spkrs=2):
perm, keys = permutation_schemes(num_spkrs)
new_dic = {}
for id in old_dic.keys():
# compute error rate for each utt
in_dic = old_dic[id]
score = convert_score(perm, in_dic)
perm_score = []
for ks in keys:
tmp_score = [0, 0, 0, 0]
for k in ks:
tmp_score = [tmp_score[i] + score[k][i] for i in range(4)]
perm_score.append(tmp_score)
error_rate = [
sum(s[1:4]) / float(sum(s[0:3])) for s in perm_score
] # (s+d+i) / (c+s+d)
min_idx, min_v = min(enumerate(error_rate), key=lambda x: x[1])
dic = {}
for k in keys[min_idx]:
dic[k] = in_dic[k]
dic["Scores"] = "(#C #S #D #I) " + " ".join(map(str, perm_score[min_idx]))
new_dic[id] = dic
return new_dic
def get_results(result_file, result_key):
re_id = r"^id: "
re_strings = {
"Speaker": r"^Speaker sentences",
"Scores": r"^Scores: ",
"REF": r"^REF: ",
"HYP": r"^HYP: ",
}
re_id = re.compile(re_id)
re_patterns = {}
for p in re_strings.keys():
re_patterns[p] = re.compile(re_strings[p])
results = {}
tmp_id = None
tmp_ret = {}
with codecs.open(result_file, "r", encoding="utf-8") as f:
line = f.readline()
while line:
x = line.rstrip()
x_split = x.split()
if re_id.match(x):
if tmp_id:
results[tmp_id] = {result_key: tmp_ret}
tmp_ret = {}
tmp_id = x_split[1]
for p in re_patterns.keys():
if re_patterns[p].match(x):
tmp_ret[p] = " ".join(x_split[1:])
line = f.readline()
if tmp_ret != {}:
results[tmp_id] = {result_key: tmp_ret}
return {"utts": results}
def merge_results(results):
rslt_lst = []
# make intersection set for utterance keys
intersec_keys = []
for x in results.keys():
j = results[x]
ks = j["utts"].keys()
logging.info(x + ": has " + str(len(ks)) + " utterances")
if len(intersec_keys) > 0:
intersec_keys = intersec_keys.intersection(set(ks))
else:
intersec_keys = set(ks)
rslt_lst.append(j)
logging.info(
"After merge, the result has " + str(len(intersec_keys)) + " utterances"
)
# merging results
dic = dict()
for k in intersec_keys:
v = rslt_lst[0]["utts"][k]
for j in rslt_lst[1:]:
v.update(j["utts"][k])
dic[k] = v
return dic
def get_parser():
parser = argparse.ArgumentParser(description="evaluate permutation-free error")
parser.add_argument(
"--num-spkrs", type=int, default=2, help="number of mixed speakers."
)
parser.add_argument(
"results",
type=str,
nargs="+",
help="the scores between references and hypotheses, "
"in ascending order of references (1st) and hypotheses (2nd), "
"e.g. [r1h1, r1h2, r2h1, r2h2] in 2-speaker-mix case.",
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
if len(args.results) != args.num_spkrs**2:
parser.print_help()
sys.exit(1)
# Read results from files
results = {}
for r in range(1, args.num_spkrs + 1):
for h in range(1, args.num_spkrs + 1):
idx = (r - 1) * args.num_spkrs + h - 1
key = "r{}h{}".format(r, h)
result = get_results(args.results[idx], key)
results[key] = result
# Merge the results of every permutation
results = merge_results(results)
# Get the final results with best permutation
new_results = METHOD_NAME(results, args.num_spkrs)
# Get WER/CER
pat = re.compile(r"\d+")
score = np.zeros((len(new_results.keys()), 4))
for idx, key in enumerate(new_results.keys()):
# [c, s, d, i]
tmp_score = list(map(int, pat.findall(new_results[key]["Scores"])))
score[idx] = tmp_score
return score, new_results
if __name__ == "__main__":
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.buffer)
scores, new_results = main()
score_sum = np.sum(scores, axis=0, dtype=int)
# Print results
print(sys.argv)
print("Total Scores: (#C #S #D #I) " + " ".join(map(str, list(score_sum))))
print(
"Error Rate: {:0.2f}".format(
100 * sum(score_sum[1:4]) / float(sum(score_sum[0:3]))
)
)
print("Total Utts: ", str(scores.shape[0]))
print(
json.dumps(
{"utts": new_results},
indent=4,
ensure_ascii=False,
sort_keys=True,
separators=(",", ": "),
)
) |
7,477 | add pch | #! /usr/bin/env python
# encoding: utf-8
# Alexander Afanasyev (UCLA), 2014
"""
Enable precompiled C++ header support (currently only clang++ and g++ are supported)
To use this tool, wscript should look like:
def options(opt):
opt.load('pch')
# This will add `--with-pch` configure option.
# Unless --with-pch during configure stage specified, the precompiled header support is disabled
def configure(conf):
conf.load('pch')
# this will set conf.env.WITH_PCH if --with-pch is specified and the supported compiler is used
# Unless conf.env.WITH_PCH is set, the precompiled header support is disabled
def build(bld):
bld(features='cxx pch',
target='precompiled-headers',
name='precompiled-headers',
headers='a.h b.h c.h', # headers to pre-compile into `precompiled-headers`
# Other parameters to compile precompiled headers
# includes=...,
# export_includes=...,
# use=...,
# ...
# Exported parameters will be propagated even if precompiled headers are disabled
)
bld(
target='test',
features='cxx cxxprogram',
source='a.cpp b.cpp d.cpp main.cpp',
use='precompiled-headers',
)
# or
bld(
target='test',
features='pch cxx cxxprogram',
source='a.cpp b.cpp d.cpp main.cpp',
headers='a.h b.h c.h',
)
Note that precompiled header must have multiple inclusion guards. If the guards are missing, any benefit of precompiled header will be voided and compilation may fail in some cases.
"""
import os
from waflib import Task, TaskGen, Utils
from waflib.Tools import c_preproc, cxx
PCH_COMPILER_OPTIONS = {
'clang++': [['-include'], '.pch', ['-x', 'c++-header']],
'g++': [['-include'], '.gch', ['-x', 'c++-header']],
}
def options(opt):
opt.add_option('--without-pch', action='store_false', default=True, dest='with_pch', help='''Try to use precompiled header to speed up compilation (only g++ and clang++)''')
def configure(conf):
if (conf.options.with_pch and conf.env['COMPILER_CXX'] in PCH_COMPILER_OPTIONS.keys()):
conf.env.WITH_PCH = True
flags = PCH_COMPILER_OPTIONS[conf.env['COMPILER_CXX']]
conf.env.CXXPCH_F = flags[0]
conf.env.CXXPCH_EXT = flags[1]
conf.env.CXXPCH_FLAGS = flags[2]
@TaskGen.feature('pch')
@TaskGen.before('process_source')
def apply_pch(self):
if not self.env.WITH_PCH:
return
if getattr(self.bld, 'pch_tasks', None) is None:
self.bld.pch_tasks = {}
if getattr(self, 'headers', None) is None:
return
self.headers = self.to_nodes(self.headers)
if getattr(self, 'name', None):
try:
task = self.bld.pch_tasks["%s.%s" % (self.name, self.idx)]
self.bld.fatal("Duplicated 'pch' task with name %r" % "%s.%s" % (self.name, self.idx))
except KeyError:
pass
out = '%s.%d%s' % (self.target, self.idx, self.env['CXXPCH_EXT'])
out = self.path.find_or_declare(out)
task = self.create_task('gchx', self.headers, out)
# target should be an absolute path of `out`, but without precompiled header extension
task.target = out.abspath()[:-len(out.suffix())]
self.pch_task = task
if getattr(self, 'name', None):
self.bld.pch_tasks["%s.%s" % (self.name, self.idx)] = task
@TaskGen.feature('cxx')
@TaskGen.after_method('process_source', 'propagate_uselib_vars')
def METHOD_NAME(self):
if not (self.env['WITH_PCH'] and getattr(self, 'use', None) and getattr(self, 'compiled_tasks', None) and getattr(self.bld, 'pch_tasks', None)):
return
pch = None
# find pch task, if any
if getattr(self, 'pch_task', None):
pch = self.pch_task
else:
for use in Utils.to_list(self.use):
try:
pch = self.bld.pch_tasks[use]
except KeyError:
pass
if pch:
for x in self.compiled_tasks:
x.env.append_value('CXXFLAGS', self.env['CXXPCH_F'] + [pch.target])
class gchx(Task.Task):
run_str = '${CXX} ${ARCH_ST:ARCH} ${CXXFLAGS} ${CXXPCH_FLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CXXPCH_F:SRC} ${CXX_SRC_F}${SRC[0].abspath()} ${CXX_TGT_F}${TGT[0].abspath()} ${CPPFLAGS}'
scan = c_preproc.scan
color = 'BLUE'
ext_out=['.h']
def runnable_status(self):
try:
node_deps = self.generator.bld.node_deps[self.uid()]
except KeyError:
node_deps = []
ret = Task.Task.runnable_status(self)
if ret == Task.SKIP_ME and self.env.CXX_NAME == 'clang':
t = os.stat(self.outputs[0].abspath()).st_mtime
for n in self.inputs + node_deps:
if os.stat(n.abspath()).st_mtime > t:
return Task.RUN_ME
return ret |
7,478 | rest group binding | #!/usr/bin/env python3
# coding: utf-8 -*-
#
# Author: zaraki673 & pipiche38
#
import json
import Domoticz
from Classes.WebServer.headerResponse import (prepResponseMessage,
setupHeadersResponse)
from Modules.bindings import bindGroup, unbindGroup, webBind, webUnBind
from Modules.zb_tables_management import mgt_binding_table_req
from Modules.zigateConsts import ZCL_CLUSTERS_ACT
def rest_bindLSTcluster(self, verb, data, parameters):
_response = prepResponseMessage(self, setupHeadersResponse())
bindCluster = []
for key in self.ListOfDevices:
if key == "0000":
continue
for ep in self.ListOfDevices[key]["Ep"]:
for cluster in self.ListOfDevices[key]["Ep"][ep]:
if cluster in ZCL_CLUSTERS_ACT:
item = {"ClusterId": cluster, "ClusterDesc": ZCL_CLUSTERS_ACT[cluster]}
if item not in bindCluster:
bindCluster.append(item)
_response["Data"] = json.dumps(bindCluster)
return _response
def rest_bindLSTdevice(self, verb, data, parameters):
_response = prepResponseMessage(self, setupHeadersResponse())
if len(parameters) != 1:
Domoticz.Error("Must have 1 argument. %s" % parameters)
return _response
listofdevices = []
clustertobind = parameters[0]
for key in self.ListOfDevices:
if key == "0000":
dev = {
"IEEE": self.ListOfDevices[key]["IEEE"],
"NwkId": key,
"Ep": "01",
"ZDeviceName": self.ListOfDevices[key]["ZDeviceName"],
}
listofdevices.append(dev)
continue
for ep in self.ListOfDevices[key]["Ep"]:
if clustertobind in self.ListOfDevices[key]["Ep"][ep]:
dev = {
"IEEE": self.ListOfDevices[key]["IEEE"],
"NwkId": key,
"Ep": ep,
"ZDeviceName": self.ListOfDevices[key]["ZDeviceName"],
}
if dev not in listofdevices:
listofdevices.append(dev)
_response["Data"] = json.dumps(listofdevices)
return _response
def rest_binding(self, verb, data, parameters):
_response = prepResponseMessage(self, setupHeadersResponse())
if verb != "PUT" or len(parameters) != 0:
return _response
_response["Data"] = None
data = data.decode("utf8")
data = json.loads(data)
if "sourceIeee" not in data and "sourceEp" not in data and "destIeee" not in data and "destEp" not in data and "cluster" not in data:
Domoticz.Error("-----> uncomplet json %s" % data)
_response["Data"] = json.dumps("uncomplet json %s" % data)
return _response
self.logging(
"Debug",
"rest_binding - Source: %s/%s Dest: %s/%s Cluster: %s" % (data["sourceIeee"], data["sourceEp"], data["destIeee"], data["destEp"], data["cluster"]),
)
webBind(self, data["sourceIeee"], data["sourceEp"], data["destIeee"], data["destEp"], data["cluster"])
_response["Data"] = json.dumps("Binding cluster %s between %s/%s and %s/%s" % (data["cluster"], data["sourceIeee"], data["sourceEp"], data["destIeee"], data["destEp"]))
return _response
def rest_unbinding(self, verb, data, parameters):
_response = prepResponseMessage(self, setupHeadersResponse())
if verb != "PUT" or len(parameters) != 0:
return _response
_response["Data"] = None
data = data.decode("utf8")
data = json.loads(data)
if "sourceIeee" not in data and "sourceEp" not in data and "destIeee" not in data and "destEp" not in data and "cluster" not in data:
Domoticz.Log("-----> uncomplet json %s" % data)
_response["Data"] = json.dumps("uncomplet json %s" % data)
return _response
self.logging(
"Debug",
"rest_unbinding - Source: %s/%s Dest: %s/%s Cluster: %s" % (data["sourceIeee"], data["sourceEp"], data["destIeee"], data["destEp"], data["cluster"]),
)
webUnBind(self, data["sourceIeee"], data["sourceEp"], data["destIeee"], data["destEp"], data["cluster"])
_response["Data"] = json.dumps("Binding cluster %s between %s/%s and %s/%s" % (data["cluster"], data["sourceIeee"], data["sourceEp"], data["destIeee"], data["destEp"]))
return _response
def METHOD_NAME(self, verb, data, parameters):
# curl -X PUT -d '{"sourceIeee":" 84fd27fffe17e4c5", "sourceEp": "01", "groupId": " 4ca3", "cluster": "0006"}' http://127.0.0.1:9441/rest-zigate/1/binding-group
_response = prepResponseMessage(self, setupHeadersResponse())
if verb != "PUT" or len(parameters) != 0:
return _response
_response["Data"] = None
data = data.decode("utf8")
data = json.loads(data)
if "sourceIeee" not in data and "sourceEp" not in data and "groupId" not in data and "cluster" not in data:
Domoticz.Error("-----> uncomplet json %s" % data)
_response["Data"] = json.dumps("uncomplet json %s" % data)
return _response
self.logging(
"Debug",
"rest_binding - Source: %s/%s Dest: %s Cluster: %s" % (data["sourceIeee"], data["sourceEp"], data["groupId"], data["cluster"]),
)
bindGroup(self, data["sourceIeee"], data["sourceEp"], data["cluster"], data["groupId"])
_response["Data"] = json.dumps("Binding cluster %s between %s/%s and %s" % (data["cluster"], data["sourceIeee"], data["sourceEp"], data["groupId"]))
return _response
def rest_group_unbinding(self, verb, data, parameters):
# curl -X PUT -d '{"sourceIeee":" 84fd27fffe17e4c5", "sourceEp": "01", "groupId": " 4ca3", "cluster": "0006"}' http://127.0.0.1:9441/rest-zigate/1/unbinding-group
_response = prepResponseMessage(self, setupHeadersResponse())
if verb != "PUT" or len(parameters) != 0:
return _response
_response["Data"] = None
data = data.decode("utf8")
data = json.loads(data)
if "sourceIeee" not in data and "sourceEp" not in data and "groupId" not in data and "cluster" not in data:
Domoticz.Error("-----> uncomplet json %s" % data)
_response["Data"] = json.dumps("uncomplet json %s" % data)
return _response
self.logging(
"Debug",
"rest_group_unbinding - Source: %s/%s Dest: %s Cluster: %s" % (data["sourceIeee"], data["sourceEp"], data["groupId"], data["cluster"]),
)
unbindGroup(self, data["sourceIeee"], data["sourceEp"], data["cluster"], data["groupId"])
_response["Data"] = json.dumps("UnBinding cluster %s between %s/%s and %s" % (data["cluster"], data["sourceIeee"], data["sourceEp"], data["groupId"]))
return _response
def rest_binding_table_req(self, verb, data, parameters):
# curl http://127.0.0.1:9440/rest-zigate/1/binding-table-req/bd92
_response = prepResponseMessage(self, setupHeadersResponse())
_response["Headers"]["Content-Type"] = "application/json; charset=utf-8"
if verb != "GET":
return _response
if len(parameters) != 1:
return _response
if parameters[0] not in self.ListOfDevices:
return _response
nwkid = parameters[0]
mgt_binding_table_req(self, nwkid, start_index="00")
action = {"Name": "Requested Binding table for device: %s" % nwkid}
_response["Data"] = json.dumps(action, sort_keys=True)
return _response
def rest_binding_table_disp(self, verb, data, parameters):
_response = prepResponseMessage(self, setupHeadersResponse())
if verb != "GET":
return _response
if len(parameters) != 1:
return _response
if parameters[0] not in self.ListOfDevices:
return _response
nwkid = parameters[0]
if "BindingTable" not in self.ListOfDevices[nwkid]:
return _response
bindtable = self.ListOfDevices[nwkid]["BindingTable"]
_response["Data"] = json.dumps(bindtable, sort_keys=True)
return _response |
7,479 | on mouse move | import numpy as np
from vispy.scene.visuals import Compound, Line, Markers, Polygon
from napari._vispy.overlays.base import LayerOverlayMixin, VispySceneOverlay
from napari.components.overlays import LabelsPolygonOverlay
from napari.layers import Labels
from napari.layers.labels._labels_constants import Mode
from napari.layers.labels._labels_utils import mouse_event_to_labels_coordinate
def _only_when_enabled(callback):
"""Decorator that wraps a callback of VispyLabelsPolygonOverlay.
It ensures that the callback is only executed when all the conditions are met:
1) The overlay is enabled;
2) The number of displayed dimensions is 2 (it can only work in 2D);
3) The number of dimensions across which labels will be edited is 2.
If 2, 3 are not met, the Labels mode is automatically switched to PAN_ZOOM.
"""
def decorated_callback(self, layer: Labels, event):
if not self.overlay.enabled:
return
if layer._slice_input.ndisplay != 2 or layer.n_edit_dimensions != 2:
layer.mode = Mode.PAN_ZOOM
return
callback(self, layer, event)
return decorated_callback
class VispyLabelsPolygonOverlay(LayerOverlayMixin, VispySceneOverlay):
def __init__(
self, *, layer: Labels, overlay: LabelsPolygonOverlay, parent=None
):
points = [(0, 0), (1, 1)]
self._nodes_kwargs = {
'face_color': (1, 1, 1, 1),
'size': 8.0,
'edge_width': 1.0,
'edge_color': (0, 0, 0, 1),
}
self._nodes = Markers(pos=np.array(points), **self._nodes_kwargs)
self._polygon = Polygon(
pos=points,
border_method='agg',
)
self._line = Line(pos=points, method='agg')
super().__init__(
node=Compound([self._polygon, self._nodes, self._line]),
layer=layer,
overlay=overlay,
parent=parent,
)
self.layer.mouse_move_callbacks.append(self.METHOD_NAME)
self.layer.mouse_drag_callbacks.append(self._on_mouse_press)
self.layer.mouse_double_click_callbacks.append(
self._on_mouse_double_click
)
self.overlay.events.points.connect(self._on_points_change)
self.overlay.events.enabled.connect(self._on_enabled_change)
layer.events.selected_label.connect(self._update_color)
layer.events.colormap.connect(self._update_color)
layer.events.color_mode.connect(self._update_color)
layer.events.opacity.connect(self._update_color)
self._first_point_pos = np.zeros(2)
self.reset()
self._update_color()
# If there are no points, it won't be visible
self.overlay.visible = True
def _on_enabled_change(self):
if self.overlay.enabled:
self._on_points_change()
def _on_points_change(self):
num_points = len(self.overlay.points)
if num_points:
points = np.array(self.overlay.points)[
:, self._dims_displayed[::-1]
]
else:
points = np.empty((0, 2))
if num_points > 2:
self._polygon.visible = True
self._line.visible = False
self._polygon.pos = points
else:
self._polygon.visible = False
self._line.visible = num_points == 2
if self._line.visible:
self._line.set_data(pos=points)
self._nodes.set_data(
pos=points,
**self._nodes_kwargs,
)
def _set_color(self, color):
border_color = tuple(color[:3]) + (1,) # always opaque
polygon_color = color
# Clean up polygon faces before making it transparent, otherwise
# it keeps the previous visualization of the polygon without cleaning
if polygon_color[-1] == 0:
self._polygon.mesh.set_data(faces=[])
self._polygon.color = polygon_color
self._polygon.border_color = border_color
self._line.set_data(color=border_color)
def _update_color(self):
layer = self.layer
if layer._selected_label == layer._background_label:
self._set_color((1, 0, 0, 0))
else:
self._set_color(
layer._selected_color.tolist()[:3] + [layer.opacity]
)
@_only_when_enabled
def METHOD_NAME(self, layer, event):
"""Continuously redraw the latest polygon point with the current mouse position."""
if self._num_points == 0:
return
pos = self._get_mouse_coordinates(event)
self.overlay.points = self.overlay.points[:-1] + [pos.tolist()]
@_only_when_enabled
def _on_mouse_press(self, layer, event):
pos = self._get_mouse_coordinates(event)
dims_displayed = self._dims_displayed
if event.button == 1: # left mouse click
orig_pos = pos.copy()
# recenter the point in the center of the image pixel
pos[dims_displayed] = np.floor(pos[dims_displayed]) + 0.5
if not self.overlay.points:
self._first_point_pos = np.array(event.pos)
prev_point = (
self.overlay.points[-2] if self._num_points > 1 else None
)
# Add a new point only if it differs from the previous one
if prev_point is None or np.linalg.norm(pos - prev_point) > 0:
self.overlay.points = self.overlay.points[:-1] + [
pos.tolist(),
# add some epsilon to avoid points duplication,
# the latest point is used only for visualization of the cursor
(orig_pos + 1e-3).tolist(),
]
elif event.button == 2 and self._num_points > 0: # right mouse click
if self._num_points < 3:
self.overlay.points = []
else:
self.overlay.points = self.overlay.points[:-2] + [pos.tolist()]
@_only_when_enabled
def _on_mouse_double_click(self, layer, event):
if event.button == 2:
self._on_mouse_press(layer, event)
return None
first_point_dist = np.linalg.norm(event.pos - self._first_point_pos)
if (
not self.overlay.double_click_completion
or first_point_dist > self.overlay.completion_radius
):
return self._on_mouse_press(layer, event)
# Remove the latest 2 points as double click always follows a simple click
# and another point is reserved for the visualization purpose
self.overlay.points = self.overlay.points[:-2]
self.overlay.add_polygon_to_labels(layer)
return None
def _get_mouse_coordinates(self, event):
pos = mouse_event_to_labels_coordinate(self.layer, event)
if pos is None:
return None
pos = np.array(pos, dtype=float)
pos[self._dims_displayed] += 0.5
return pos
@property
def _dims_displayed(self):
return self.layer._slice_input.displayed
@property
def _num_points(self):
return len(self.overlay.points)
def reset(self):
super().reset()
self._on_points_change() |
7,480 | update object | # (C) Copyright 2004-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
""" Editor that displays an interactive Python shell.
"""
from traits.api import Bool, Str, Event, Property, observe
from traits.observation.api import match
from traitsui.basic_editor_factory import BasicEditorFactory
from traitsui.editor import Editor
from traitsui.toolkit import toolkit_object
class _ShellEditor(Editor):
"""Base class for an editor that displays an interactive Python shell."""
#: An event fired to execute a command in the shell.
command_to_execute = Event()
#: An event fired whenver the user executes a command in the shell:
command_executed = Event(Bool)
#: Is the shell editor is scrollable? This value overrides the default.
scrollable = True
# -------------------------------------------------------------------------
# 'Editor' Interface
# -------------------------------------------------------------------------
def init(self, parent):
"""Finishes initializing the editor by creating the underlying toolkit
widget.
"""
# Moving the import here, since PythonShell is implemented in the
# Pyface backend packages, and we want to delay loading this toolkit
# specific class until this editor is actually used.
from pyface.python_shell import PythonShell
locals = None
self._base_locals = None
value = self.value
if self.factory.share and isinstance(value, dict):
locals = value
self._shell = shell = PythonShell(parent)
shell.create()
self.control = shell.control
if locals:
for item in locals.items():
shell.bind(*item)
if locals is None:
object = self.object
shell.bind("self", object)
shell.observe(
self.METHOD_NAME, "command_executed", dispatch="ui"
)
if not isinstance(value, dict):
self._any_trait_observer = lambda name, ctrait: True
object.observe(
self.update_any,
match(self._any_trait_observer),
dispatch="ui",
)
else:
self._base_locals = locals = {}
for name in self._shell.interpreter().locals.keys():
locals[name] = None
# Synchronize any editor events:
self.sync_value(
self.factory.command_to_execute, "command_to_execute", "from"
)
self.sync_value(
self.factory.command_executed, "command_executed", "to"
)
self.set_tooltip()
def METHOD_NAME(self, event):
"""Handles the user entering input data in the edit control."""
locals = self._shell.interpreter().locals
base_locals = self._base_locals
if base_locals is None:
object = self.object
for name in object.trait_names():
if name in locals:
try:
setattr(object, name, locals[name])
except:
pass
else:
dic = self.value
for name in locals.keys():
if name not in base_locals:
try:
dic[name] = locals[name]
except:
pass
self.command_executed = True
def update_editor(self):
"""Updates the editor when the object trait changes externally to the
editor.
"""
if self.factory.share:
value = self.value
if isinstance(value, dict):
self._shell.interpreter().locals = value
else:
locals = self._shell.interpreter().locals
base_locals = self._base_locals
if base_locals is None:
object = self.object
for name in object.trait_names():
locals[name] = getattr(object, name, None)
else:
dic = self.value
for name, value in dic.items():
locals[name] = value
def update_any(self, event):
"""Updates the editor when the object trait changes externally to the
editor.
"""
name, new = event.name, event.new
locals = self._shell.interpreter().locals
if self._base_locals is None:
locals[name] = new
else:
self.value[name] = new
def dispose(self):
"""Disposes of the contents of an editor."""
if not (self.factory.share and isinstance(self.value, dict)):
self._shell.observe(
self.METHOD_NAME,
"command_executed",
remove=True,
dispatch="ui",
)
if self._base_locals is None:
self.object.observe(
self.update_any,
match(self._any_trait_observer),
remove=True,
dispatch="ui",
)
super().dispose()
def restore_prefs(self, prefs):
"""Restores any saved user preference information associated with the
editor.
"""
shell = self._shell
try:
history = prefs.get("history", [])
history_index = prefs.get("history_index", -1)
shell.set_history(history, history_index)
except:
pass
def save_prefs(self):
"""Returns any user preference information associated with the editor."""
history, history_index = self._shell.get_history()
return {"history": history, "history_index": history_index}
# -------------------------------------------------------------------------
# Private Interface
# -------------------------------------------------------------------------
# Trait change handlers --------------------------------------------------
@observe("command_to_execute")
def _execute_command(self, event):
"""Handles the 'command_to_execute' trait being fired."""
# Show the command. A 'hidden' command should be executed directly on
# the namespace trait!
command = event.new
self._shell.execute_command(command, hidden=False)
class ShellEditor(BasicEditorFactory):
"""Editor factory for shell editors."""
#: The editor class to be instantiated.
klass = Property()
#: Should the shell interpreter use the object value's dictionary?
share = Bool(False)
#: Extended trait name of the object event trait which triggers a command
#: execution in the shell when fired.
command_to_execute = Str()
#: Extended trait name of the object event trait which is fired when a
#: command is executed.
command_executed = Str()
def _get_klass(self):
"""Returns the toolkit-specific editor class to be used in the UI."""
return toolkit_object("shell_editor:_ShellEditor")
# This alias is deprecated and will be removed in TraitsUI 8.
ToolkitEditorFactory = ShellEditor |
7,481 | reply to arp | import binascii
import json
import argparse
import os.path
from collections import defaultdict
import logging
import scapy.all as scapy
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
class ARPResponder(object):
ARP_OP_REQUEST = 1
ip_sets = {}
@staticmethod
def action(packet):
if "ARP" in packet: # IPv4
return ARPResponder.METHOD_NAME(packet)
elif "ICMPv6ND_NS" in packet and "ICMPv6NDOptSrcLLAddr" in packet: # IPv6
return ARPResponder.reply_to_ndp(packet)
else:
# Handle other Ethernet types
pass
@staticmethod
def METHOD_NAME(data):
remote_mac = data["ARP"].hwsrc
remote_ip = data["ARP"].psrc
request_ip = data["ARP"].pdst
op_type = data["ARP"].op
# Don't send ARP response if the ARP op code is not request
if op_type != ARPResponder.ARP_OP_REQUEST:
return
interface = data.sniffed_on
if interface not in ARPResponder.ip_sets:
return
if request_ip not in ARPResponder.ip_sets[interface]:
return
if 'vlan' in ARPResponder.ip_sets[interface]:
vlan_list = ARPResponder.ip_sets[interface]['vlan']
else:
vlan_list = [None]
for vlan_id in vlan_list:
arp_reply = ARPResponder.generate_arp_reply(ARPResponder.ip_sets[interface][request_ip],
remote_mac, request_ip, remote_ip, vlan_id)
scapy.sendp(arp_reply, iface=interface)
@staticmethod
def reply_to_ndp(data):
remote_mac = data["ICMPv6NDOptSrcLLAddr"].lladdr
remote_ip = data["IPv6"].src
request_ip = data["ICMPv6ND_NS"].tgt
interface = data.sniffed_on
if interface not in ARPResponder.ip_sets:
return
if request_ip not in ARPResponder.ip_sets[interface]:
return
ndp_reply = ARPResponder.generate_neigh_adv(ARPResponder.ip_sets[interface][request_ip],
remote_mac, request_ip, remote_ip)
scapy.sendp(ndp_reply, iface=interface)
@staticmethod
def generate_arp_reply(local_mac, remote_mac, local_ip, remote_ip, vlan_id):
l2 = scapy.Ether(dst=remote_mac, src=local_mac, type=(0x8100 if vlan_id else 0x0806))
l3 = scapy.ARP(op=2, hwsrc=local_mac, psrc=local_ip, hwdst=remote_mac, pdst=remote_ip)
if vlan_id:
l2 /= scapy.Dot1Q(vlan=vlan_id, type=0x0806)
return l2 / l3
@staticmethod
def generate_neigh_adv(local_mac, remote_mac, target_ip, remote_ip):
neigh_adv_pkt = scapy.Ether(src=local_mac, dst=remote_mac)
neigh_adv_pkt /= scapy.IPv6(src=target_ip, dst=remote_ip)
neigh_adv_pkt /= scapy.ICMPv6ND_NA(tgt=target_ip, R=0, S=1, O=1)
neigh_adv_pkt /= scapy.ICMPv6NDOptDstLLAddr(lladdr=local_mac)
return neigh_adv_pkt
def parse_args():
parser = argparse.ArgumentParser(description='ARP autoresponder')
parser.add_argument('--conf', '-c', type=str, dest='conf',
default='/tmp/from_t1.json', help='path to json file with configuration')
parser.add_argument('--extended', '-e', action='store_true',
dest='extended', default=False, help='enable extended mode')
args = parser.parse_args()
return args
def main():
args = parse_args()
if not os.path.exists(args.conf):
print(("Can't find file %s" % args.conf))
return
with open(args.conf) as fp:
data = json.load(fp)
# generate ip_sets. every ip address will have it's own uniq mac address
ip_sets = {}
for iface, ip_dict in list(data.items()):
vlan = None
iface = str(iface)
if iface.find('@') != -1:
iface, vlan = iface.split('@')
vlan_tag = format(int(vlan), 'x')
vlan_tag = vlan_tag.zfill(4)
if iface not in ip_sets:
ip_sets[iface] = defaultdict(list)
if args.extended:
for ip, mac in list(ip_dict.items()):
ip_sets[iface][str(ip)] = binascii.unhexlify(str(mac))
else:
for ip in ip_dict:
ip_sets[iface][str(ip)] = scapy.get_if_hwaddr(iface)
if vlan is not None:
ip_sets[iface]['vlan'].append(binascii.unhexlify(vlan_tag))
ARPResponder.ip_sets = ip_sets
scapy.sniff(prn=ARPResponder.action, filter="arp or icmp6", iface=list(ip_sets.keys()), store=False)
if __name__ == '__main__':
main() |
7,482 | test light brightness | from homeassistant.components.fan import (
DIRECTION_FORWARD,
DIRECTION_REVERSE,
FanEntityFeature,
)
from homeassistant.components.light import ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ColorMode
from ..const import ARLEC_FAN_LIGHT_PAYLOAD
from ..helpers import assert_device_properties_set
from ..mixins.select import BasicSelectTests
from ..mixins.switch import SwitchableTests
from .base_device_tests import TuyaDeviceTestCase
SWITCH_DPS = "1"
SPEED_DPS = "3"
DIRECTION_DPS = "4"
LIGHT_DPS = "9"
BRIGHTNESS_DPS = "10"
COLORTEMP_DPS = "11"
PRESET_DPS = "102"
TIMER_DPS = "103"
class TestArlecFan(SwitchableTests, BasicSelectTests, TuyaDeviceTestCase):
__test__ = True
def setUp(self):
self.setUpForConfig("arlec_fan_light.yaml", ARLEC_FAN_LIGHT_PAYLOAD)
self.subject = self.entities.get("fan")
self.light = self.entities.get("light")
self.timer = self.entities.get("select_timer")
self.setUpSwitchable(SWITCH_DPS, self.subject)
self.setUpBasicSelect(
TIMER_DPS,
self.entities["select_timer"],
{
"off": "Off",
"2hour": "2 hours",
"4hour": "4 hours",
"8hour": "8 hours",
},
)
self.mark_secondary(["select_timer"])
def test_supported_features(self):
self.assertEqual(
self.subject.supported_features,
(
FanEntityFeature.DIRECTION
| FanEntityFeature.PRESET_MODE
| FanEntityFeature.SET_SPEED
),
)
def test_preset_mode(self):
self.dps[PRESET_DPS] = "nature"
self.assertEqual(self.subject.preset_mode, "nature")
self.dps[PRESET_DPS] = "sleep"
self.assertEqual(self.subject.preset_mode, "sleep")
self.dps[PRESET_DPS] = None
self.assertIs(self.subject.preset_mode, None)
def test_preset_modes(self):
self.assertCountEqual(self.subject.preset_modes, ["nature", "sleep"])
async def test_set_preset_mode_to_nature(self):
async with assert_device_properties_set(
self.subject._device,
{PRESET_DPS: "nature"},
):
await self.subject.async_set_preset_mode("nature")
async def test_set_preset_mode_to_sleep(self):
async with assert_device_properties_set(
self.subject._device,
{PRESET_DPS: "sleep"},
):
await self.subject.async_set_preset_mode("sleep")
def test_direction(self):
self.dps[DIRECTION_DPS] = "forward"
self.assertEqual(self.subject.current_direction, DIRECTION_FORWARD)
self.dps[DIRECTION_DPS] = "reverse"
self.assertEqual(self.subject.current_direction, DIRECTION_REVERSE)
async def test_set_direction_forward(self):
async with assert_device_properties_set(
self.subject._device, {DIRECTION_DPS: "forward"}
):
await self.subject.async_set_direction(DIRECTION_FORWARD)
async def test_set_direction_reverse(self):
async with assert_device_properties_set(
self.subject._device, {DIRECTION_DPS: "reverse"}
):
await self.subject.async_set_direction(DIRECTION_REVERSE)
def test_speed(self):
self.dps[SPEED_DPS] = "3"
self.assertEqual(self.subject.percentage, 50)
def test_speed_step(self):
self.assertAlmostEqual(self.subject.percentage_step, 16.67, 2)
self.assertEqual(self.subject.speed_count, 6)
async def test_set_speed(self):
async with assert_device_properties_set(self.subject._device, {SPEED_DPS: 2}):
await self.subject.async_set_percentage(33)
async def test_set_speed_in_normal_mode_snaps(self):
self.dps[PRESET_DPS] = "normal"
async with assert_device_properties_set(self.subject._device, {SPEED_DPS: 5}):
await self.subject.async_set_percentage(80)
def test_light_is_on(self):
self.dps[LIGHT_DPS] = False
self.assertFalse(self.light.is_on)
self.dps[LIGHT_DPS] = True
self.assertTrue(self.light.is_on)
def test_light_supported_color_modes(self):
self.assertCountEqual(
self.light.supported_color_modes,
[ColorMode.COLOR_TEMP],
)
def test_light_color_mode(self):
self.assertEqual(self.light.color_mode, ColorMode.COLOR_TEMP)
def METHOD_NAME(self):
self.dps[BRIGHTNESS_DPS] = 50
self.assertAlmostEqual(self.light.brightness, 128, 0)
def test_light_color_temp(self):
self.dps[COLORTEMP_DPS] = 70
self.assertEqual(self.light.color_temp, 396)
async def test_light_async_turn_on(self):
async with assert_device_properties_set(
self.light._device,
{LIGHT_DPS: True, BRIGHTNESS_DPS: 44, COLORTEMP_DPS: 70},
):
await self.light.async_turn_on(
brightness=112,
color_temp=396,
) |
7,483 | resolve publication date | from typing import List
import graphene
from ...attribute import models as attribute_models
from ...page import models
from ...permission.enums import PagePermissions, PageTypePermissions
from ..attribute.filters import AttributeFilterInput, AttributeWhereInput
from ..attribute.types import Attribute, AttributeCountableConnection, SelectedAttribute
from ..core import ResolveInfo
from ..core.connection import (
CountableConnection,
create_connection_slice,
filter_connection_queryset,
)
from ..core.descriptions import ADDED_IN_33, DEPRECATED_IN_3X_FIELD, RICH_CONTENT
from ..core.doc_category import DOC_CATEGORY_PAGES
from ..core.federation import federated_entity, resolve_federation_references
from ..core.fields import FilterConnectionField, JSONString, PermissionsField
from ..core.scalars import Date
from ..core.types import ModelObjectType, NonNullList
from ..meta.types import ObjectWithMetadata
from ..translations.fields import TranslationField
from ..translations.types import PageTranslation
from .dataloaders import (
PageAttributesByPageTypeIdLoader,
PagesByPageTypeIdLoader,
PageTypeByIdLoader,
SelectedAttributesByPageIdLoader,
)
@federated_entity("id")
class PageType(ModelObjectType[models.PageType]):
id = graphene.GlobalID(required=True, description="ID of the page type.")
name = graphene.String(required=True, description="Name of the page type.")
slug = graphene.String(required=True, description="Slug of the page type.")
attributes = NonNullList(
Attribute, description="Page attributes of that page type."
)
available_attributes = FilterConnectionField(
AttributeCountableConnection,
filter=AttributeFilterInput(),
where=AttributeWhereInput(),
description="Attributes that can be assigned to the page type.",
permissions=[
PagePermissions.MANAGE_PAGES,
PageTypePermissions.MANAGE_PAGE_TYPES_AND_ATTRIBUTES,
],
)
has_pages = PermissionsField(
graphene.Boolean,
description="Whether page type has pages assigned.",
permissions=[
PagePermissions.MANAGE_PAGES,
PageTypePermissions.MANAGE_PAGE_TYPES_AND_ATTRIBUTES,
],
)
class Meta:
description = (
"Represents a type of page. It defines what attributes are available to "
"pages of this type."
)
interfaces = [graphene.relay.Node, ObjectWithMetadata]
model = models.PageType
@staticmethod
def get_model():
return models.PageType
@staticmethod
def resolve_attributes(root: models.PageType, info: ResolveInfo):
return PageAttributesByPageTypeIdLoader(info.context).load(root.pk)
@staticmethod
def resolve_available_attributes(
root: models.PageType, info: ResolveInfo, **kwargs
):
qs = attribute_models.Attribute.objects.get_unassigned_page_type_attributes(
root.pk
)
qs = filter_connection_queryset(qs, kwargs, info.context)
return create_connection_slice(qs, info, kwargs, AttributeCountableConnection)
@staticmethod
def resolve_has_pages(root: models.PageType, info: ResolveInfo):
return (
PagesByPageTypeIdLoader(info.context)
.load(root.pk)
.then(lambda pages: bool(pages))
)
@staticmethod
def __resolve_references(roots: List["PageType"], _info: ResolveInfo):
return resolve_federation_references(PageType, roots, models.PageType.objects)
class PageTypeCountableConnection(CountableConnection):
class Meta:
doc_category = DOC_CATEGORY_PAGES
node = PageType
class Page(ModelObjectType[models.Page]):
id = graphene.GlobalID(required=True, description="ID of the page.")
seo_title = graphene.String(description="Title of the page for SEO.")
seo_description = graphene.String(description="Description of the page for SEO.")
title = graphene.String(required=True, description="Title of the page.")
content = JSONString(description="Content of the page." + RICH_CONTENT)
publication_date = Date(
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} "
"Use the `publishedAt` field to fetch the publication date."
),
)
published_at = graphene.DateTime(
description="The page publication date." + ADDED_IN_33
)
is_published = graphene.Boolean(
required=True, description="Determines if the page is published."
)
slug = graphene.String(required=True, description="Slug of the page.")
page_type = graphene.Field(
PageType, required=True, description="Determines the type of page"
)
created = graphene.DateTime(
required=True, description="Date and time at which page was created."
)
content_json = JSONString(
description="Content of the page." + RICH_CONTENT,
deprecation_reason=f"{DEPRECATED_IN_3X_FIELD} Use the `content` field instead.",
required=True,
)
translation = TranslationField(PageTranslation, type_name="page")
attributes = NonNullList(
SelectedAttribute,
required=True,
description="List of attributes assigned to this product.",
)
class Meta:
description = (
"A static page that can be manually added by a shop operator through the "
"dashboard."
)
interfaces = [graphene.relay.Node, ObjectWithMetadata]
model = models.Page
@staticmethod
def METHOD_NAME(root: models.Page, _info: ResolveInfo):
return root.published_at
@staticmethod
def resolve_created(root: models.Page, _info: ResolveInfo):
return root.created_at
@staticmethod
def resolve_page_type(root: models.Page, info: ResolveInfo):
return PageTypeByIdLoader(info.context).load(root.page_type_id)
@staticmethod
def resolve_content_json(root: models.Page, _info: ResolveInfo):
content = root.content
return content if content is not None else {}
@staticmethod
def resolve_attributes(root: models.Page, info: ResolveInfo):
return SelectedAttributesByPageIdLoader(info.context).load(root.id)
class PageCountableConnection(CountableConnection):
class Meta:
doc_category = DOC_CATEGORY_PAGES
node = Page |
7,484 | assert wrapper failure | # pylint: disable=g-bad-file-header
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import subprocess
import textwrap
import unittest
from src.test.py.bazel import test_base
class MockPythonLines(object):
NORMAL = textwrap.dedent(
r"""\
if [ "$1" = "-V" ]; then
echo "Mock Python 3.xyz!"
else
echo "I am mock Python!"
fi
"""
).split("\n")
FAIL = textwrap.dedent(r"""\
echo "Mock failure!"
exit 1
""").split("\n")
VERSION_ERROR = textwrap.dedent(r"""\
if [ "$1" = "-V" ]; then
echo "Error!"
exit 1
else
echo "I am mock Python!"
fi
""").split("\n")
# TODO(brandjon): Switch to shutil.which when the test is moved to PY3.
def which(cmd):
"""A poor man's approximation of `shutil.which()` or the `which` command.
Args:
cmd: The command (executable) name to lookup; should not contain path
separators
Returns:
The absolute path to the first match in PATH, or None if not found.
"""
for p in os.environ["PATH"].split(os.pathsep):
fullpath = os.path.abspath(os.path.join(p, cmd))
if os.path.exists(fullpath):
return fullpath
return None
# TODO(brandjon): Move this test to PY3. Blocked (ironically!) on the fix for
# #4815 being available in the host version of Bazel used to run this test.
class PywrapperTest(test_base.TestBase):
"""Unit tests for pywrapper_template.txt.
These tests are based on the instantiation of the template for Python 2. They
ensure that the wrapper can locate, validate, and launch a Python 2 executable
on PATH. To ensure hermeticity, the tests launch the wrapper with PATH
restricted to the scratch directory.
Unix only.
"""
def setup_tool(self, cmd):
"""Copies a command from its system location to the test directory."""
path = which(cmd)
self.assertIsNotNone(
path, msg="Could not locate '%s' command on PATH" % cmd)
# On recent MacOs versions, copying the coreutils tools elsewhere doesn't
# work -- they simply fail with "Killed: 9". To workaround that, just
# re-exec the actual binary.
self.ScratchFile("dir/" + cmd,
["#!/bin/sh", 'exec {} "$@"'.format(path)],
executable=True)
def locate_runfile(self, runfile_path):
resolved_path = self.Rlocation(runfile_path)
self.assertIsNotNone(
resolved_path, msg="Could not locate %s in runfiles" % runfile_path)
return resolved_path
def setUp(self):
super(PywrapperTest, self).setUp()
# Locate scripts under test.
self.wrapper_path = self.locate_runfile(
"io_bazel/tools/python/py3wrapper.sh"
)
self.nonstrict_wrapper_path = self.locate_runfile(
"io_bazel/tools/python/py3wrapper_nonstrict.sh"
)
# Setup scratch directory with all executables the script depends on.
#
# This is brittle, but we need to make sure we can run the script when only
# the scratch directory is on PATH, so that we can control whether or not
# the python executables exist on PATH.
self.setup_tool("which")
self.setup_tool("echo")
self.setup_tool("grep")
def run_with_restricted_path(self, program, title_for_logging=None):
new_env = dict(os.environ)
new_env["PATH"] = self.Path("dir")
proc = subprocess.Popen([program],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
cwd=self.Path("dir"),
env=new_env)
# TODO(brandjon): Add a timeout arg here when upgraded to PY3.
out, err = proc.communicate()
if title_for_logging is not None:
print(textwrap.dedent("""\
----------------
%s
Exit code: %d
stdout:
%s
stderr:
%s
----------------
""") % (title_for_logging, proc.returncode, out, err))
return proc.returncode, out, err
def run_wrapper(self, title_for_logging):
return self.run_with_restricted_path(self.wrapper_path, title_for_logging)
def run_nonstrict_wrapper(self, title_for_logging):
return self.run_with_restricted_path(self.nonstrict_wrapper_path,
title_for_logging)
def assert_wrapper_success(self, returncode, out, err):
self.assertEqual(returncode, 0, msg="Expected to exit without error")
self.assertEqual(
out, "I am mock Python!\n", msg="stdout was not as expected")
self.assertEqual(err, "", msg="Expected to produce no stderr output")
def METHOD_NAME(self, returncode, out, err, message):
self.assertEqual(returncode, 1, msg="Expected to exit with error code 1")
self.assertRegexpMatches(
err, message, msg="stderr did not contain expected string")
def test_finds_python(self):
self.ScratchFile("dir/python", MockPythonLines.NORMAL, executable=True)
returncode, out, err = self.run_wrapper("test_finds_python")
self.assert_wrapper_success(returncode, out, err)
def test_no_interpreter_found(self):
returncode, out, err = self.run_wrapper("test_no_interpreter_found")
self.METHOD_NAME(
returncode, out, err, "Neither 'python3' nor 'python' were found"
)
def test_error_getting_version(self):
self.ScratchFile(
"dir/python", MockPythonLines.VERSION_ERROR, executable=True
)
returncode, out, err = self.run_wrapper("test_error_getting_version")
self.METHOD_NAME(returncode, out, err,
"Could not get interpreter version")
def test_interpreter_not_executable(self):
self.ScratchFile(
"dir/python", MockPythonLines.VERSION_ERROR, executable=False
)
returncode, out, err = self.run_wrapper("test_interpreter_not_executable")
self.METHOD_NAME(
returncode, out, err, "Neither 'python3' nor 'python' were found"
)
if __name__ == "__main__":
unittest.main() |
7,485 | execute | from types import ModuleType
from typing import Any, List, Optional, Type
from digitalpy.core.zmanager.action_mapper import ActionMapper
from digitalpy.core.digipy_configuration.configuration import Configuration
from digitalpy.core.zmanager.request import Request
from digitalpy.core.zmanager.response import Response
from digitalpy.core.parsing.load_configuration import Configuration
from digitalpy.core.domain.node import Node
from digitalpy.core.main.controller import Controller
from digitalpy.core.main.object_factory import ObjectFactory
import uuid
from .. import domain
class Domain(Controller):
def __init__(
self,
request: Request,
response: Response,
domain_action_mapper: ActionMapper,
configuration: Configuration,
**kwargs,
):
super().__init__(request, response, domain_action_mapper, configuration)
self.domain = domain
def METHOD_NAME(self, method=None):
return getattr(self, method)(**self.request.get_values())
def add_child(self, node: Node, child: Node, **kwargs) -> None:
"""add a child to a node
Args:
node (Node): the origin node
child (Node): the node to be added as the original node
Returns:
_type_: _description_
"""
return node.add_child(child)
def create_node(self, configuration: Configuration, object_class_name: str, id:str=str(uuid.uuid1()), **kwargs) -> None:
"""this method creates a new node object
Args:
configuration (Configuration): _description_
object_class_name (str): _description_
id (str): the id of the created node
"""
# allow the domain to be extended
self.domain = self._extend_domain(self.domain, getattr(kwargs, 'extended_domain', {}))
# retrieve the original object class
object_class = getattr(self.domain, object_class_name)
# instantiate an oid for the instance
oid = ObjectFactory.get_instance("ObjectId", {"id": id, "type": object_class_name})
# instantiate the object class
object_class_instance = object_class(configuration, self.domain, oid=oid)
# set the module object
self.response.set_value("model_object", object_class_instance)
def _extend_domain(self, domain: ModuleType, extended_domain: dict) -> ModuleType:
"""this method is responsible for adding domain extensions from a given component
Args:
domain (_type_): the base domain package
extended_domain (_type_): the updated domain package
Returns:
ModuleType: an updated domain
"""
for key, value in extended_domain.items():
setattr(domain, key, value)
return domain
def delete_child(self, node: Node, child_id: str, **kwargs):
"""delete a child node
Args:
node (Node): the node from which to remove the child
child_id (str): the id of the child to be deleted
Returns:
None
"""
return node.delete_child(child_id)
def get_children_ex(
self,
id,
node: Node,
children_type,
values,
properties,
use_regex=True,
**kwargs,
):
self.response.set_value(
"children",
node.get_children_ex(
id, node, children_type, values, properties, use_regex
),
)
def get_first_child(self, node: Node, child_type: Type[Node], values: "dict[str, Any]", properties: "dict[str, Any]", use_regex: bool = True, **kwargs) -> Optional[Node]:
"""Returns the first child of the given node that matches the given child type, values, and properties.
Args:
node (Node): The node to get the first child of.
child_type (Type[Node]): The type of the child to find.
values (dict[str, Any]): The values the child must have.
properties (dict[str, Any]): The properties the child must have.
use_regex (bool, optional): Whether to use regular expressions to match values and properties. Defaults to True.
**kwargs: Additional keyword arguments.
Returns:
Optional[Node]: The first child that matches the given child type, values, and properties, or None if no such child is found.
"""
self.response.set_value("first_child", node.get_first_child(child_type, values, properties, use_regex))
def get_next_sibling(self, node: Node, **kwargs) -> Optional[Node]:
"""Returns the next sibling of the given node.
Args:
node (Node): The node to get the next sibling of.
**kwargs: Additional keyword arguments.
Returns:
Optional[Node]: The next sibling of the given node, or None if the node has no next sibling.
"""
self.response.set_value("next_sibling", node.get_next_sibling())
def get_num_children(self, node: Node, children_type: Optional[Type[Node]] = None, **kwargs) -> int:
"""Returns the number of children the given node has.
Args:
node (Node): The node to get the number of children of.
children_type (Optional[Type[Node]], optional): The type of children to count. If not specified, all children are counted. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
int: The number of children the given node has.
"""
self.response.set_value("num_children", node.get_num_children(children_type))
def get_num_parents(self, node: Node, parent_types: Optional[List[Type[Node]]] = None, **kwargs) -> int:
"""Returns the number of parents the given node has.
Args:
node (Node): The node to get the number of parents of.
parent_types (Optional[List[Type[Node]]], optional): The types of parents to count. If not specified, all parents are counted. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
int: The number of parents the given node has.
"""
self.response.set_value("num_parents", node.get_num_parents(parent_types))
def get_previous_sibling(self, node: Node) -> Optional[Node]:
"""Returns the previous sibling of the given node.
Args:
node (Node): The node to get the previous sibling of.
Returns:
Optional[Node]: The previous sibling of the given node, or None if the node has no previous sibling.
"""
self.response.set_value("previous_sibling", node.get_previous_sibling())
def get_parent(self, node: Node) -> Optional[Node]:
"""Returns the parent of the given node.
Args:
node (Node): The node to get the parent of.
Returns:
Optional[Node]: The parent of the given node, or None if the node has no parent.
"""
self.response.set_value("parent", node.get_parent()) |
7,486 | test invalid file upload | from datetime import timedelta
from unittest import mock
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
from django.utils import timezone
from sentry.models import Environment, File
from sentry.monitors.models import (
CheckInStatus,
Monitor,
MonitorCheckIn,
MonitorEnvironment,
MonitorType,
)
from sentry.testutils.cases import MonitorIngestTestCase
from sentry.testutils.silo import region_silo_test
@region_silo_test(stable=True)
class MonitorIngestCheckinAttachmentEndpointTest(MonitorIngestTestCase):
endpoint = "sentry-api-0-organization-monitor-check-in-attachment"
def get_path(self, monitor, checkin):
return reverse(self.endpoint, args=[self.organization.slug, monitor.slug, checkin.guid])
def _create_monitor(self):
return Monitor.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
type=MonitorType.CRON_JOB,
config={"schedule": "* * * * *"},
date_added=timezone.now() - timedelta(minutes=1),
)
def _create_monitor_environment(self, monitor, name="production", **kwargs):
environment = Environment.get_or_create(project=self.project, name=name)
monitorenvironment_defaults = {
"status": monitor.status,
"next_checkin": timezone.now() - timedelta(minutes=1),
**kwargs,
}
return MonitorEnvironment.objects.create(
monitor=monitor, environment=environment, **monitorenvironment_defaults
)
def test_upload(self):
monitor = self._create_monitor()
monitor_environment = self._create_monitor_environment(monitor)
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
date_added=monitor.date_added,
status=CheckInStatus.IN_PROGRESS,
)
path = self.get_path(monitor, checkin)
resp = self.client.post(
path,
{
"file": SimpleUploadedFile(
"log.txt", b"test log data", content_type="application/text"
),
},
format="multipart",
**self.token_auth_headers,
)
assert resp.status_code == 200, resp.content
checkin = MonitorCheckIn.objects.get(id=checkin.id)
assert checkin.status == CheckInStatus.IN_PROGRESS
file = File.objects.get(id=checkin.attachment_id)
assert file.name == "log.txt"
assert file.getfile().read() == b"test log data"
def test_upload_no_file(self):
monitor = self._create_monitor()
monitor_environment = self._create_monitor_environment(monitor)
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
date_added=monitor.date_added,
status=CheckInStatus.IN_PROGRESS,
)
path = self.get_path(monitor, checkin)
resp = self.client.post(
path,
{},
format="multipart",
**self.token_auth_headers,
)
assert resp.status_code == 400
assert resp.data["detail"] == "Missing uploaded file"
@mock.patch(
"sentry.monitors.endpoints.monitor_ingest_checkin_attachment.MAX_ATTACHMENT_SIZE", 1
)
def test_upload_file_too_big(self):
monitor = self._create_monitor()
monitor_environment = self._create_monitor_environment(monitor)
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
date_added=monitor.date_added,
status=CheckInStatus.IN_PROGRESS,
)
path = self.get_path(monitor, checkin)
resp = self.client.post(
path,
{
"file": SimpleUploadedFile(
"log.txt", b"test log data", content_type="application/text"
),
},
format="multipart",
**self.token_auth_headers,
)
assert resp.status_code == 400
assert resp.data["detail"] == "Please keep uploads below 100kb"
def test_duplicate_upload(self):
monitor = self._create_monitor()
monitor_environment = self._create_monitor_environment(monitor)
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
date_added=monitor.date_added,
status=CheckInStatus.IN_PROGRESS,
)
path = self.get_path(monitor, checkin)
resp = self.client.post(
path,
{
"file": SimpleUploadedFile(
"log.txt", b"test log data", content_type="application/text"
),
},
format="multipart",
**self.token_auth_headers,
)
assert resp.status_code == 200, resp.content
checkin = MonitorCheckIn.objects.get(id=checkin.id)
assert checkin.status == CheckInStatus.IN_PROGRESS
file = File.objects.get(id=checkin.attachment_id)
assert file.name == "log.txt"
assert file.getfile().read() == b"test log data"
resp = self.client.post(
path,
{
"file": SimpleUploadedFile(
"log.txt", b"test log data", content_type="application/text"
),
},
format="multipart",
**self.token_auth_headers,
)
assert resp.status_code == 400
assert resp.data["detail"] == "Check-in already has an attachment"
def METHOD_NAME(self):
monitor = self._create_monitor()
monitor_environment = self._create_monitor_environment(monitor)
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
date_added=monitor.date_added,
status=CheckInStatus.IN_PROGRESS,
)
path = self.get_path(monitor, checkin)
resp = self.client.post(
path,
{"file": "invalid_file"},
format="multipart",
**self.token_auth_headers,
)
assert resp.status_code == 400
assert resp.data["detail"] == "Please upload a valid file object" |
7,487 | add file | #
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
#Base class for all modes.
class BaseMode:
def __init__(self):
self.state = False
self.name = ""
def on(self):
self.state = True
def off(self):
self.state = False
def is_on(self):
if self.state:
return True
else:
return False
def is_off(self):
if not self.state:
return True
else:
return False
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
#Contains the directory and file manipulation stuff
class PathHandler:
def __init__(self):
self.rhn_root = "/etc/sysconfig/rhn/allowed-actions/configfiles"
#Set the rhn_root variable.
def set_rhn_root(self, rhn_root):
self.rhn_root = rhn_root
#Creates the self.rhn_root directories if they don't already exist. This allows subclasses to implement modes in different locations.
def _create_rhnconfig_path(self):
if not os.path.exists(self.rhn_root):
os.makedirs(self.rhn_root, int('0770', 8))
#Create the file if it doesn't already exist.
def METHOD_NAME(self, filename):
self._create_rhnconfig_path()
if not self.check_for_file(filename):
try:
f = open(os.path.join(self.rhn_root, filename), "w")
f.close()
except Exception:
raise
#remove the file if it's present.
def remove_file(self, filename):
self._create_rhnconfig_path()
if self.check_for_file(filename):
try:
os.remove(os.path.join(self.rhn_root, filename))
except Exception:
raise
#Returns True if filename exists in /etc/sysconfig/rhn/allowed-actions/configfiles
def check_for_file(self, filename):
self._create_rhnconfig_path()
return os.path.exists(os.path.join(self.rhn_root, filename))
#Stuff that's common to the Mode subclasses.
class ConfigFilesBaseMode(BaseMode):
def __init__(self):
BaseMode.__init__(self)
self.ph = PathHandler()
self.name = None #Must be set in subclass
def on(self):
self.ph.METHOD_NAME(self.name)
self.state = True
def off(self):
self.ph.remove_file(self.name)
self.state = False
#Could probably just check the value of state...
def is_on(self):
return self.ph.check_for_file(self.name)
def is_off(self):
if self.ph.check_for_file(self.name):
return False
elif not self.ph.check_for_file(self.name):
return True
class RunMode(ConfigFilesBaseMode):
def __init__(self):
ConfigFilesBaseMode.__init__(self)
self.name = "run"
self.ph.set_rhn_root("/etc/sysconfig/rhn/allowed-actions/script")
class RunAllMode(ConfigFilesBaseMode):
def __init__(self):
ConfigFilesBaseMode.__init__(self)
self.name = "all"
self.ph.set_rhn_root("/etc/sysconfig/rhn/allowed-actions/script")
class AllMode(ConfigFilesBaseMode):
def __init__(self):
ConfigFilesBaseMode.__init__(self)
self.name = "all"
class DeployMode(ConfigFilesBaseMode):
def __init__(self):
ConfigFilesBaseMode.__init__(self)
self.name = "deploy"
class DiffMode(ConfigFilesBaseMode):
def __init__(self):
ConfigFilesBaseMode.__init__(self)
self.name = "diff"
class UploadMode(ConfigFilesBaseMode):
def __init__(self):
ConfigFilesBaseMode.__init__(self)
self.name = "upload"
class MTimeUploadMode(ConfigFilesBaseMode):
def __init__(self):
ConfigFilesBaseMode.__init__(self)
self.name = "mtime_upload"
#Solaris Specific Modes
class SolarisRunMode(ConfigFilesBaseMode):
def __init__(self):
ConfigFilesBaseMode.__init__(self)
self.name = "run"
self.ph.set_rhn_root("/opt/redhat/rhn/solaris/etc/sysconfig/rhn/allowed-actions/script")
class SolarisAllRunMode(ConfigFilesBaseMode):
def __init__(self):
ConfigFilesBaseMode.__init__(self)
self.name = "all"
self.ph.set_rhn_root("/opt/redhat/rhn/solaris/etc/sysconfig/rhn/allowed-actions/script")
class SolarisAllMode(ConfigFilesBaseMode):
def __init__(self):
ConfigFilesBaseMode.__init__(self)
self.name = "all"
self.ph.set_rhn_root("/opt/redhat/rhn/solaris/etc/sysconfig/rhn/allowed-actions/configfiles")
class SolarisDeployMode(ConfigFilesBaseMode):
def __init__(self):
ConfigFilesBaseMode.__init__(self)
self.name = "deploy"
self.ph.set_rhn_root("/opt/redhat/rhn/solaris/etc/sysconfig/rhn/allowed-actions/configfiles")
class SolarisDiffMode(ConfigFilesBaseMode):
def __init__(self):
ConfigFilesBaseMode.__init__(self)
self.name = "diff"
self.ph.set_rhn_root("/opt/redhat/rhn/solaris/etc/sysconfig/rhn/allowed-actions/configfiles")
class SolarisUploadMode(ConfigFilesBaseMode):
def __init__(self):
ConfigFilesBaseMode.__init__(self)
self.name = "upload"
self.ph.set_rhn_root("/opt/redhat/rhn/solaris/etc/sysconfig/rhn/allowed-actions/configfiles")
class SolarisMTimeUploadMode(ConfigFilesBaseMode):
def __init__(self):
ConfigFilesBaseMode.__init__(self)
self.name = "mtime_upload"
self.ph.set_rhn_root("/opt/redhat/rhn/solaris/etc/sysconfig/rhn/allowed-actions/configfiles")
|
7,488 | last modified | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetMediaGraphResult',
'AwaitableGetMediaGraphResult',
'get_media_graph',
'get_media_graph_output',
]
@pulumi.output_type
class GetMediaGraphResult:
"""
The Media Graph.
"""
def __init__(__self__, created=None, description=None, id=None, METHOD_NAME=None, name=None, sinks=None, sources=None, state=None, type=None):
if created and not isinstance(created, str):
raise TypeError("Expected argument 'created' to be a str")
pulumi.set(__self__, "created", created)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'last_modified' to be a str")
pulumi.set(__self__, "last_modified", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if sinks and not isinstance(sinks, list):
raise TypeError("Expected argument 'sinks' to be a list")
pulumi.set(__self__, "sinks", sinks)
if sources and not isinstance(sources, list):
raise TypeError("Expected argument 'sources' to be a list")
pulumi.set(__self__, "sources", sources)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def created(self) -> str:
"""
Date the Media Graph was created.
"""
return pulumi.get(self, "created")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Media Graph description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastModified")
def METHOD_NAME(self) -> str:
"""
Date the Media Graph was last modified.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def sinks(self) -> Sequence['outputs.MediaGraphAssetSinkResponse']:
"""
Media Graph sinks.
"""
return pulumi.get(self, "sinks")
@property
@pulumi.getter
def sources(self) -> Sequence['outputs.MediaGraphRtspSourceResponse']:
"""
Media Graph sources.
"""
return pulumi.get(self, "sources")
@property
@pulumi.getter
def state(self) -> str:
"""
Media Graph state which indicates the resource allocation status for running the media graph pipeline.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetMediaGraphResult(GetMediaGraphResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMediaGraphResult(
created=self.created,
description=self.description,
id=self.id,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
sinks=self.sinks,
sources=self.sources,
state=self.state,
type=self.type)
def get_media_graph(account_name: Optional[str] = None,
media_graph_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMediaGraphResult:
"""
Get the details of a Media Graph in the Media Services account.
Azure REST API version: 2020-02-01-preview.
:param str account_name: The Media Services account name.
:param str media_graph_name: The Media Graph name.
:param str resource_group_name: The name of the resource group within the Azure subscription.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['mediaGraphName'] = media_graph_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:media:getMediaGraph', __args__, opts=opts, typ=GetMediaGraphResult).value
return AwaitableGetMediaGraphResult(
created=pulumi.get(__ret__, 'created'),
description=pulumi.get(__ret__, 'description'),
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'last_modified'),
name=pulumi.get(__ret__, 'name'),
sinks=pulumi.get(__ret__, 'sinks'),
sources=pulumi.get(__ret__, 'sources'),
state=pulumi.get(__ret__, 'state'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_media_graph)
def get_media_graph_output(account_name: Optional[pulumi.Input[str]] = None,
media_graph_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetMediaGraphResult]:
"""
Get the details of a Media Graph in the Media Services account.
Azure REST API version: 2020-02-01-preview.
:param str account_name: The Media Services account name.
:param str media_graph_name: The Media Graph name.
:param str resource_group_name: The name of the resource group within the Azure subscription.
"""
... |
7,489 | test converts max bandwidth as string | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from awscli.testutils import unittest
from awscli.customizations.s3 import transferconfig
from awscli.compat import six
class TestTransferConfig(unittest.TestCase):
def build_config_with(self, **config_from_user):
return transferconfig.RuntimeConfig().build_config(**config_from_user)
def test_user_provides_no_config_uses_default(self):
# If the user does not provide any config overrides,
# we should just use the default values defined in
# the module.
config = transferconfig.RuntimeConfig()
runtime_config = config.build_config()
self.assertEqual(runtime_config, transferconfig.DEFAULTS)
def test_user_provides_partial_overrides(self):
config_from_user = {
'max_concurrent_requests': '20',
'multipart_threshold': str(64 * (1024 ** 2)),
}
runtime_config = self.build_config_with(**config_from_user)
# Our overrides were accepted.
self.assertEqual(runtime_config['multipart_threshold'],
int(config_from_user['multipart_threshold']))
self.assertEqual(runtime_config['max_concurrent_requests'],
int(config_from_user['max_concurrent_requests']))
# And defaults were used for values not specified.
self.assertEqual(runtime_config['max_queue_size'],
int(transferconfig.DEFAULTS['max_queue_size']))
def test_validates_integer_types(self):
with self.assertRaises(transferconfig.InvalidConfigError):
self.build_config_with(max_concurrent_requests="not an int")
def test_validates_positive_integers(self):
with self.assertRaises(transferconfig.InvalidConfigError):
self.build_config_with(max_concurrent_requests="-10")
def test_min_value(self):
with self.assertRaises(transferconfig.InvalidConfigError):
self.build_config_with(max_concurrent_requests="0")
def test_human_readable_sizes_converted_to_bytes(self):
runtime_config = self.build_config_with(multipart_threshold="10MB")
self.assertEqual(runtime_config['multipart_threshold'],
10 * 1024 * 1024)
def test_long_value(self):
# MAXSIZE is the max size of an int on python 2 and the maximum size
# of Py_ssize_t on python 3, but notably not the maximum size of an
# int since they are effectively unbounded.
long_value = six.MAXSIZE + 1
runtime_config = self.build_config_with(
multipart_threshold=long_value)
self.assertEqual(runtime_config['multipart_threshold'], long_value)
def METHOD_NAME(self):
runtime_config = self.build_config_with(max_bandwidth='1MB/s')
self.assertEqual(runtime_config['max_bandwidth'], 1024 * 1024)
def test_validates_max_bandwidth_no_seconds(self):
with self.assertRaises(transferconfig.InvalidConfigError):
self.build_config_with(max_bandwidth='1MB')
def test_validates_max_bandwidth_in_bits_per_second(self):
with self.assertRaises(transferconfig.InvalidConfigError):
self.build_config_with(max_bandwidth='1Mb/s')
class TestConvertToS3TransferConfig(unittest.TestCase):
def test_convert(self):
runtime_config = {
'multipart_threshold': 1,
'multipart_chunksize': 2,
'max_concurrent_requests': 3,
'max_queue_size': 4,
'max_bandwidth': 1024 * 1024,
'addressing_style': 'path',
'use_accelerate_endpoint': True,
# This is a TransferConfig only option, it should
# just be ignored if it's in the ~/.aws/config for now.
'max_in_memory_upload_chunks': 1000,
}
result = transferconfig.create_transfer_config_from_runtime_config(
runtime_config)
self.assertEqual(result.multipart_threshold, 1)
self.assertEqual(result.multipart_chunksize, 2)
self.assertEqual(result.max_request_concurrency, 3)
self.assertEqual(result.max_request_queue_size, 4)
self.assertEqual(result.max_bandwidth, 1024 * 1024)
self.assertNotEqual(result.max_in_memory_upload_chunks, 1000) |
7,490 | get response for test | # pylint: disable=redefined-outer-name
from unittest.mock import Mock
import pytest
from django.contrib.sessions.middleware import SessionMiddleware
from django.http import HttpResponse
from oscar.apps.checkout.forms import ShippingAddressForm
from oscar.apps.checkout.utils import CheckoutSessionData
def METHOD_NAME(request):
return HttpResponse()
@pytest.fixture
def csdf(rf):
""""""
request = rf.get("/")
middleware = SessionMiddleware(METHOD_NAME)
middleware.process_request(request)
return CheckoutSessionData(request)
def get_address_fields():
def new_init(self, *args, **kwargs):
super(ShippingAddressForm, self).__init__(*args, **kwargs)
ShippingAddressForm.__init__ = new_init
data = {
"first_name": "John",
"last_name": "Doe",
"line1": "1 Egg Road",
"line4": "Shell City",
"postcode": "N12 9RT",
"phone_number": "+49231555555",
}
form = ShippingAddressForm(data)
form.is_valid()
address_fields = dict(
(k, v) for (k, v) in form.instance.__dict__.items() if not k.startswith("_")
)
return address_fields
def test__get(csdf):
value = csdf._get("non-existent-namespace", "non-existent-key", "default-value")
assert value == "default-value"
def test__unset(csdf):
csdf._set("test-namespace", "test-key", "test-value")
csdf._unset("test-namespace", "test-key")
assert "test-key" not in csdf.request.session[csdf.SESSION_KEY]["test-namespace"]
def test_flush(csdf):
csdf._set("test-namespace", "the-key", "the-value")
csdf.flush()
assert csdf.request.session[csdf.SESSION_KEY] == {}
def test_set_get_guest_email(csdf):
email = "info@example.com"
csdf.set_guest_email(email)
assert csdf.request.session[csdf.SESSION_KEY]["guest"]["email"] == email
assert csdf.get_guest_email() == email
def test_reset_shipping_data(csdf):
csdf.reset_shipping_data()
assert csdf.request.session[csdf.SESSION_KEY]["shipping"] == {}
def test_ship_to_user_address(csdf):
address = Mock(id=1)
csdf.ship_to_user_address(address)
assert csdf.request.session[csdf.SESSION_KEY]["shipping"]["user_address_id"] == 1
assert csdf.shipping_user_address_id() == 1
def test_serialize_new_address_with_phone_number(csdf):
address_fields = get_address_fields()
csdf.ship_to_new_address(address_fields)
csdf.bill_to_new_address(address_fields)
data = csdf.request.session._get_session(no_load=True)
assert csdf.request.session.encode(data)
address_fields["phone_number"] = address_fields["phone_number"].as_international
assert (
address_fields
== csdf.new_billing_address_fields()
== csdf.new_shipping_address_fields()
)
def test_new_shipping_address_fields(csdf):
address_fields = get_address_fields()
csdf.ship_to_new_address(address_fields)
address_fields["phone_number"] = address_fields["phone_number"].as_international
assert address_fields == csdf.new_shipping_address_fields()
def test_use_free_shipping(csdf):
csdf.use_free_shipping()
assert (
csdf.request.session[csdf.SESSION_KEY]["shipping"]["method_code"] == "__free__"
)
def test_bill_to_shipping_address(csdf):
address = Mock(id=1)
csdf.bill_to_user_address(address)
assert csdf.billing_user_address_id() == 1
csdf.bill_to_shipping_address()
assert "user_address_id" not in csdf.request.session[csdf.SESSION_KEY]["billing"]
assert (
csdf.request.session[csdf.SESSION_KEY]["billing"][
"billing_address_same_as_shipping"
]
is True
)
assert csdf.is_billing_address_same_as_shipping() is True
assert csdf.is_billing_address_set() is True
def test_payment_methods(csdf):
csdf.pay_by("paypal")
assert csdf.request.session[csdf.SESSION_KEY]["payment"]["method"] == "paypal"
assert csdf.payment_method() == "paypal"
def test_order_number(csdf):
"""
:param CheckoutSessionData csdf:
:return:
"""
csdf.set_order_number("55555")
assert csdf.get_order_number() == "55555" |
7,491 | sync services | from django.conf import settings
from django.db.models import Q
import os
from geotrek.diving import models
from geotrek.diving import views as diving_views
if 'geotrek.sensitivity' in settings.INSTALLED_APPS:
from geotrek.sensitivity import views as sensitivity_views
if 'geotrek.tourism' in settings.INSTALLED_APPS:
from geotrek.tourism import views as tourism_views
class SyncRando:
def __init__(self, sync):
self.global_sync = sync
def sync(self, lang):
models_picto = [models.Practice, models.Difficulty, models.Level]
self.global_sync.sync_pictograms(lang, models_picto, zipfile=self.global_sync.zipfile)
self.global_sync.sync_geojson(lang, diving_views.DiveAPIViewSet, 'dives.geojson')
dives = models.Dive.objects.existing().order_by('pk')
dives = dives.filter(**{'published_{lang}'.format(lang=lang): True})
if self.global_sync.source:
dives = dives.filter(source__name__in=self.global_sync.source)
if self.global_sync.portal:
dives = dives.filter(Q(portal__name=self.global_sync.portal) | Q(portal=None))
for dive in dives:
self.sync_detail(lang, dive)
def sync_pois(self, lang, dive):
params = {'format': 'geojson'}
view = diving_views.DivePOIViewSet.as_view({'get': 'list'})
name = os.path.join('api', lang, 'dives', str(dive.pk), 'pois.geojson')
self.global_sync.sync_view(lang, view, name, params=params, pk=dive.pk)
def METHOD_NAME(self, lang, dive):
view = diving_views.DiveServiceViewSet.as_view({'get': 'list'})
name = os.path.join('api', lang, 'dives', str(dive.pk), 'services.geojson')
self.global_sync.sync_view(lang, view, name, params={'format': 'geojson'}, pk=dive.pk)
def sync_detail(self, lang, dive):
self.global_sync.sync_metas(lang, diving_views.DiveMeta, dive)
self.global_sync.sync_pdf(lang, dive, diving_views.DiveDocumentPublic.as_view(model=type(dive)))
if 'geotrek.trekking' in settings.INSTALLED_APPS:
self.sync_pois(lang, dive)
self.METHOD_NAME(lang, dive)
for picture, resized in dive.resized_pictures:
self.global_sync.sync_media_file(lang, resized)
for poi in dive.published_pois:
if poi.resized_pictures:
self.global_sync.sync_media_file(lang, poi.resized_pictures[0][1])
for picture, resized in poi.resized_pictures[1:]:
self.global_sync.sync_media_file(lang, resized)
for other_file in poi.files:
self.global_sync.sync_media_file(lang, other_file.attachment_file)
if 'geotrek.tourism' in settings.INSTALLED_APPS:
if self.global_sync.with_events:
self.sync_touristicevents(lang, dive)
if self.global_sync.categories:
self.sync_touristiccontents(lang, dive)
if 'geotrek.sensitivity' in settings.INSTALLED_APPS:
self.sync_sensitiveareas(lang, dive)
def sync_touristiccontents(self, lang, dive):
params = {'format': 'geojson',
'categories': ','.join(category for category in self.global_sync.categories)}
self.global_sync.get_params_portal(params)
view = tourism_views.DiveTouristicContentViewSet.as_view({'get': 'list'})
name = os.path.join('api', lang, 'dives', str(dive.pk), 'touristiccontents.geojson')
self.global_sync.sync_view(lang, view, name, params=params, pk=dive.pk)
for content in dive.touristic_contents.all():
self.sync_touristiccontent_media(lang, content)
def sync_touristicevents(self, lang, dive):
params = {'format': 'geojson'}
self.global_sync.get_params_portal(params)
view = tourism_views.DiveTouristicEventViewSet.as_view({'get': 'list'})
name = os.path.join('api', lang, 'dives', str(dive.pk), 'touristicevents.geojson')
self.global_sync.sync_view(lang, view, name, params=params, pk=dive.pk)
for event in dive.touristic_events.all():
self.sync_touristicevent_media(lang, event)
def sync_sensitiveareas(self, lang, dive):
params = {'format': 'geojson', 'practices': 'Terrestre'}
view = sensitivity_views.DiveSensitiveAreaViewSet.as_view({'get': 'list'})
name = os.path.join('api', lang, 'dives', str(dive.pk), 'sensitiveareas.geojson')
self.global_sync.sync_view(lang, view, name, params=params, pk=dive.pk)
def sync_touristicevent_media(self, lang, event, zipfile=None):
if event.resized_pictures:
self.global_sync.sync_media_file(lang, event.resized_pictures[0][1], zipfile=zipfile)
for picture, resized in event.resized_pictures[1:]:
self.global_sync.sync_media_file(lang, resized)
def sync_touristiccontent_media(self, lang, content, zipfile=None):
if content.resized_pictures:
self.global_sync.sync_media_file(lang, content.resized_pictures[0][1], zipfile=zipfile)
for picture, resized in content.resized_pictures[1:]:
self.global_sync.sync_media_file(lang, resized) |
7,492 | create schema | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from ..monitoring import MonitoredEntity_pb2 as monitoring_dot_MonitoredEntity__pb2
from ..monitoring import Schema_pb2 as monitoring_dot_Schema__pb2
class SchemaServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.METHOD_NAME = channel.unary_unary(
'/ai.verta.monitoring.SchemaService/createSchema',
request_serializer=monitoring_dot_Schema__pb2.CreateSchema.SerializeToString,
response_deserializer=monitoring_dot_Schema__pb2.Schema.FromString,
)
self.updateSchema = channel.unary_unary(
'/ai.verta.monitoring.SchemaService/updateSchema',
request_serializer=monitoring_dot_Schema__pb2.UpdateSchema.SerializeToString,
response_deserializer=monitoring_dot_Schema__pb2.Schema.FromString,
)
self.findSchema = channel.unary_unary(
'/ai.verta.monitoring.SchemaService/findSchema',
request_serializer=monitoring_dot_Schema__pb2.FindSchemas.SerializeToString,
response_deserializer=monitoring_dot_Schema__pb2.FindSchemas.Response.FromString,
)
self.deleteSchema = channel.unary_unary(
'/ai.verta.monitoring.SchemaService/deleteSchema',
request_serializer=monitoring_dot_Schema__pb2.DeleteSchema.SerializeToString,
response_deserializer=monitoring_dot_MonitoredEntity__pb2.Empty.FromString,
)
class SchemaServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def METHOD_NAME(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def updateSchema(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def findSchema(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def deleteSchema(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SchemaServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'createSchema': grpc.unary_unary_rpc_method_handler(
servicer.METHOD_NAME,
request_deserializer=monitoring_dot_Schema__pb2.CreateSchema.FromString,
response_serializer=monitoring_dot_Schema__pb2.Schema.SerializeToString,
),
'updateSchema': grpc.unary_unary_rpc_method_handler(
servicer.updateSchema,
request_deserializer=monitoring_dot_Schema__pb2.UpdateSchema.FromString,
response_serializer=monitoring_dot_Schema__pb2.Schema.SerializeToString,
),
'findSchema': grpc.unary_unary_rpc_method_handler(
servicer.findSchema,
request_deserializer=monitoring_dot_Schema__pb2.FindSchemas.FromString,
response_serializer=monitoring_dot_Schema__pb2.FindSchemas.Response.SerializeToString,
),
'deleteSchema': grpc.unary_unary_rpc_method_handler(
servicer.deleteSchema,
request_deserializer=monitoring_dot_Schema__pb2.DeleteSchema.FromString,
response_serializer=monitoring_dot_MonitoredEntity__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'ai.verta.monitoring.SchemaService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) |
7,493 | pull image | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
"""
Interactions with the Docker daemon
"""
import docker
import logging
import os
import requests
import sys
import time
from tern.utils import constants
from tern.utils import rootfs
from tern.utils import general
# global logger
logger = logging.getLogger(constants.logger_name)
def check_docker_setup():
"""Check if the docker daemon is up and running. This should return a
docker client if everything is running fine. Else it should exit
gracefully. The intent is that this function is run before any docker
operations are invoked"""
try:
client = docker.from_env(timeout=180)
client.ping()
return client
except (requests.exceptions.ConnectionError, docker.errors.DockerException) as e:
logger.critical('Critical Docker error: %s', str(e))
if 'FileNotFoundError' in str(e) or 'ConnectionRefusedError' in str(e):
logger.critical('Docker is not installed or the daemon is not '
'running.')
if 'PermissionError' in str(e):
logger.critical('The user id is not in the docker group.')
logger.critical('Aborting...')
sys.exit(1)
def build_image(dfile, client):
"""Invoke docker build with the given dockerfile. It is assumed that
docker is installed and the docker daemon is running"""
df_path = os.path.abspath(dfile)
image_tag = '{name}:{tag}'.format(name=constants.image,
tag=str(int(time.time())))
# try to build the image
# TODO: docker's upstream API does not support build
# contexts yet. You are expected to provide that as
# a tarball as of the 4.3.1 release
# This is a hack to get around that
# source:
# https://github.com/docker/docker-py/issues/2105#issuecomment-613685891
dfcontents = ''
dfcontext = os.path.dirname(df_path)
try:
with open(df_path, encoding='utf-8') as f:
dfcontents = f.read()
# terrible bypass of the API
docker.api.build.process_dockerfile = lambda dockerfile, path: (
df_path, dockerfile)
image_obj, _ = client.images.build(
tag=image_tag, path=dfcontext, dockerfile=dfcontents, nocache=True,
forcerm=True)
return image_obj
except FileNotFoundError as e:
logger.critical('Dockerfile not found: %s', e)
return None
except (docker.errors.BuildError, docker.errors.APIError) as e:
logger.warning('Build failed: %s', e)
return None
def extract_image(image_obj):
"""Run docker save and extract the resulting tarball into the working
directory."""
temp_path = rootfs.get_working_dir()
placeholder = os.path.join(general.get_top_dir(), constants.temp_tarfile)
# try to save the image
try:
result = image_obj.save(chunk_size=2097152, named=True)
# write all of the tar byte stream into temporary tar file
with open(placeholder, 'wb') as f:
for chunk in result:
f.write(chunk)
# extract temporary tar file into the working directory
rootfs.extract_tarfile(placeholder, temp_path)
# remove the tar file
os.remove(placeholder)
# If these operations didn't work, return False
if not os.listdir(temp_path):
logger.critical('Unable to extract Docker image')
return False
return True
except docker.errors.APIError as e:
logger.critical(
'Something happened with the Docker client: %s', e)
return False
def remove_image(image_obj, client):
"""Remove the Docker container image"""
try:
for tag in image_obj.tags:
client.images.remove(tag)
return True
except docker.errors.APIError as e:
logger.warning(
'Unable to remove the image: %s', e)
return False
def close_client(client):
"""End docker interactions by closing the client. This is meant to be
used after loading of the image is done"""
try:
client.close()
except (AttributeError, requests.exceptions.ConnectionError):
# it should either already be closed, no socket is in use,
# or docker is not setup -- either way, the socket is closed
pass
def build_and_dump(dockerfile):
"""Given a path to the dockerfile, use the Docker API to build the
container image and extract the image into a working directory. Return
true if this succeeded and false if it didn't"""
image_metadata = None
# open up a client first
# if this fails we cannot proceed further so we will exit
client = check_docker_setup()
image = build_image(dockerfile, client)
if image:
# the build succeeded, so we should be able to extract it
if extract_image(image):
image_metadata = image.attrs
remove_image(image, client)
else:
# we didn't succeed building the image
logger.warning("Could not build Docker image")
close_client(client)
return image_metadata
# These functions should be deprecated
def check_image(image_tag_string, client):
"""Check if the image and tag exist on disk"""
logger.debug(
"Checking if image \"%s\" is available on disk...", image_tag_string)
try:
image_obj = client.images.get(image_tag_string)
logger.debug("Image \"%s\" found", image_tag_string)
return image_obj
except docker.errors.ImageNotFound:
return None
def METHOD_NAME(image_tag_string, client):
"""Pull an image from a container registry using Docker
Note: this function uses the Docker API to pull from a container
registry and is not responsible for configuring what registry to use"""
logger.debug("Attempting to pull image \"%s\"", image_tag_string)
try:
image = client.images.pull(image_tag_string)
logger.debug("Image \"%s\" downloaded", image_tag_string)
return image
except (docker.errors.ImageNotFound, docker.errors.NotFound):
logger.error("No such image: \"%s\"", image_tag_string)
return None
def get_docker_image(image_tag_string, client):
"""Try to retrieve a docker image using the docker API.
image_tag_string: can be in image:tag or image@digest_type:digest format"""
image = check_image(image_tag_string, client)
if image is None:
image = METHOD_NAME(image_tag_string, client)
return image
def get_docker_image_digest(image_tag):
"""Given an image and tag, get the image's digest in
'image@sha_type:digest' format"""
digest = ""
# open up a client first
# if this fails we cannot proceed further so we will exit
client = check_docker_setup()
# get the image
image = get_docker_image(image_tag, client)
if image:
digest = image.attrs['RepoDigests'][0]
# cleanup
remove_image(image, client)
close_client(client)
return digest
def dump_docker_image(image_tag):
"""Given an image and tag or image and digest, use the Docker API to get
a container image representation into the working directory"""
image_metadata = None
# open up a client first
# if this fails we cannot proceed further so we will exit
client = check_docker_setup()
image = get_docker_image(image_tag, client)
if image:
if extract_image(image):
image_metadata = image.attrs
# now the client can be closed
close_client(client)
return image_metadata |
7,494 | run | #!/usr/bin/env python3
import sys, os, argparse, pathlib
###############################################################################
def parse_command_line(args, description):
###############################################################################
parser = argparse.ArgumentParser()
parser.add_argument ("--bin-dir", help="binary dir", default=None)
parser.add_argument ("--install-lib-dir", help="albany install lib dir", default=None)
parser.add_argument ("--cmake-generator", help="generator used by cmake", default=None)
return parser.parse_args(args[1:])
###############################################################################
def get_link_line_ninja(bin_dir):
###############################################################################
infile = bin_dir / "build.ninja"
with infile.open('r') as fd:
break_in_two = False
break_next = False
for line_no,line in enumerate(fd):
if break_next:
break
if break_in_two:
break_next = True
if 'build dummy/dummy: CXX_EXECUTABLE_LINKER' in line:
break_in_two = True
tokens = line.strip().split('=')
if len(tokens)!=2:
print(f"ERROR! Unexpected format of LINK_LIBRARIES line in build.ninja")
print(f"link line: {line}")
raise RuntimeError
if tokens[0].strip() != "LINK_LIBRARIES":
print(f"ERROR! Unexpected format of LINK_LIBRARIES line in build.ninja")
raise RuntimeError
return tokens[1].split()
###############################################################################
def get_link_line_make(bin_dir):
###############################################################################
infile = bin_dir / "dummy" / "CMakeFiles" / "dummy.dir" / "link.txt"
with infile.open('r') as fd:
line = fd.read().strip()
items = line.split()
index = items.index('dummy')
items = items[index+1:]
return items
###############################################################################
def METHOD_NAME (bin_dir,install_lib_dir, cmake_generator):
###############################################################################
if cmake_generator=="Unix Makefiles":
items = get_link_line_make (bin_dir)
elif cmake_generator=="Ninja":
items = get_link_line_ninja (bin_dir)
else:
print(f"ERROR! Unknown/unsupported cmake generator '{cmake_generator}'")
raise RuntimeError
libs = []
libs_dirs = [install_lib_dir]
for item in items:
if "," in item:
# This are link options, so process them one by one
tokens = item.split(',')
for i,t in enumerate(tokens):
if ':' in t:
paths = t.split(':')
# Loop over all paths, replace them with install dir
for j,p in enumerate(paths):
path = pathlib.Path(p).resolve()
paths[j] = str(install_lib_dir)
# Replace the rpath list with the unique list
tokens[i] = ':'.join(list(set(paths)))
else:
# Just a link flag, keep it
tokens[i] = t
# Re-join proecessed tokens with commas
libs.append(",".join(tokens))
elif item.startswith("-l"):
# It's either a -lXYZ lib or a link flag. Keep it as is.
libs.append(item)
elif item.startswith("-") and not item.startswith("-L"):
# This is some funky link option (e.g., -mkl or -cxxlib with Intel).
# Simply add the item to the "libs" list
libs.append(item)
elif item.startswith("-L"):
print (f"-L dir: {item}")
# We want to get an abs path, with symlinks resolved (if any)
lib_dir_full = pathlib.Path(item[2:]).resolve()
if not lib_dir_full.exists() or not lib_dir_full.is_dir():
print (f"could not parse token {item}")
print (f" -> It appears to be a lib dir entry, but the path does not exist, or is not a directory")
raise ValueError
libs_dirs.append(lib_dir_full)
else:
# This should be a library name, expressed by full/relative path.
# We want to get an abs path, with symlinks resolved *except* for symlinks
# in the file name, to avoid an error we're seeing where libdl.so points
# to the file libdl-2.28.so (an odd name: usually we see libdl.so.2.28)
lib_file_full = pathlib.Path(item).parent.resolve() / pathlib.Path(item).name
if not lib_file_full.exists():
print (f"could not locate lib: {lib_file_full}")
print (f"cwd: {os.getcwd()}")
raise ValueError
lib_file = lib_file_full.name
lib_path = lib_file_full.parent
# Remove all extensions (such as .so.5), and remove first 3 chars (lib)
lib_name = str(lib_file).split('.')[0][3:]
libs.append (f"-l{lib_name}")
if not lib_path in libs_dirs and not str(lib_path).startswith(str(bin_dir)):
libs_dirs.append(lib_path)
outfile = pathlib.Path(bin_dir) / "export_albany.in"
link_line = ""
for dir in libs_dirs:
link_line += f"-L{dir} "
for lib in libs:
link_line += f"{lib} "
print (f"writing to {outfile}")
with outfile.open('w') as fd:
fd.write(f'ALBANY_LINK_LIBS="{link_line.strip()}"')
###############################################################################
def _main_func(description):
###############################################################################
args = vars(parse_command_line(sys.argv, description))
bin_dir = pathlib.Path(args["bin_dir"]).resolve()
install_lib_dir = pathlib.Path(args["install_lib_dir"]).resolve()
generator = args["cmake_generator"]
METHOD_NAME (bin_dir,install_lib_dir,generator)
sys.exit(0)
###############################################################################
if (__name__ == "__main__"):
_main_func(__doc__)
|
7,495 | test can not delete other repo file | import json
from django.urls import reverse
from seahub.base.models import FileComment
from seahub.test_utils import BaseTestCase
class FileCommentTest(BaseTestCase):
def setUp(self):
self.tmp_user = self.create_user()
self.tmp_repo_1_id = self.create_repo(
name='tmp-repo-1', desc='', username=self.tmp_user.username, passwd=None)
self.file1 = self.create_file(repo_id=self.tmp_repo_1_id, parent_dir='/',
filename='test1.txt',
username=self.tmp_user.username)
self.tmp_repo_2_id = self.create_repo(
name='tmp-repo-2', desc='', username=self.user.username, passwd=None)
self.file2 = self.create_file(repo_id=self.tmp_repo_2_id, parent_dir='/',
filename='test2.txt',
username=self.user.username)
o = FileComment.objects.add_by_file_path(repo_id=self.repo.id,
file_path=self.file,
author=self.tmp_user.username,
comment='test comment')
o1 = FileComment.objects.add_by_file_path(repo_id=self.tmp_repo_1_id,
file_path='/test1.txt',
author=self.tmp_user.username,
comment='test comment1')
o2 = FileComment.objects.add_by_file_path(repo_id=self.tmp_repo_2_id,
file_path='/test2.txt',
author=self.user.username,
comment='test comment2')
self.login_as(self.user)
self.endpoint = reverse('api2-file-comment', args=[self.repo.id, o.pk]) + '?p=' + self.file
self.endpoint1 = reverse('api2-file-comment', args=[self.repo.id, o1.pk]) + '?p=' + '/test1.txt'
self.endpoint2 = reverse('api2-file-comment', args=[self.repo.id, o2.pk]) + '?p=' + '/test2.txt'
def tearDown(self):
self.remove_repo()
self.remove_repo(repo_id=self.tmp_repo_1_id)
self.remove_repo(repo_id=self.tmp_repo_2_id)
self.remove_user()
self.remove_user(self.tmp_user.email)
FileComment.objects.all().delete()
def test_can_get(self):
resp = self.client.get(self.endpoint)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['repo_id'] == self.repo.id
assert json_resp['parent_path'] == '/'
assert json_resp['item_name'] == 'test.txt'
assert json_resp['user_email'] == self.tmp_user.email
assert 'avatars' in json_resp['avatar_url']
def test_can_not_get_other_repo_file_comment(self):
resp = self.client.get(self.endpoint1)
self.assertEqual(404, resp.status_code)
def test_can_not_get_other_user_file_comment(self):
resp = self.client.get(self.endpoint2)
self.assertEqual(404, resp.status_code)
def test_can_get_with_avatar_size(self):
resp = self.client.get(self.endpoint + '&avatar_size=20')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['parent_path'] == '/'
assert json_resp['item_name'] == 'test.txt'
assert json_resp['user_email'] == self.tmp_user.email
assert 'avatars' in json_resp['avatar_url']
def test_can_delete(self):
assert len(FileComment.objects.all()) == 3
resp = self.client.delete(self.endpoint)
self.assertEqual(204, resp.status_code)
assert len(FileComment.objects.all()) == 2
def METHOD_NAME(self):
assert len(FileComment.objects.all()) == 3
resp = self.client.delete(self.endpoint1)
self.assertEqual(404, resp.status_code)
assert len(FileComment.objects.all()) == 3
def test_can_not_delete_other_user_file_comment(self):
assert len(FileComment.objects.all()) == 3
resp = self.client.delete(self.endpoint2)
self.assertEqual(404, resp.status_code)
assert len(FileComment.objects.all()) == 3
def test_invalid_user_can_not_delete(self):
self.logout()
self.login_as(self.admin)
assert len(FileComment.objects.all()) == 3
resp = self.client.delete(self.endpoint)
self.assertEqual(403, resp.status_code)
assert len(FileComment.objects.all()) == 3 |
7,496 | timeout handler | """Timeout management for tests."""
from __future__ import annotations
import dataclasses
import datetime
import functools
import os
import signal
import time
import typing as t
from .io import (
read_json_file,
)
from .config import (
CommonConfig,
TestConfig,
)
from .util import (
display,
TimeoutExpiredError,
)
from .thread import (
WrappedThread,
)
from .constants import (
TIMEOUT_PATH,
)
from .test import (
TestTimeout,
)
@dataclasses.dataclass(frozen=True)
class TimeoutDetail:
"""Details required to enforce a timeout on test execution."""
_DEADLINE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # format used to maintain backwards compatibility with previous versions of ansible-test
deadline: datetime.datetime
duration: int | float # minutes
@property
def remaining(self) -> datetime.timedelta:
"""The amount of time remaining before the timeout occurs. If the timeout has passed, this will be a negative duration."""
return self.deadline - datetime.datetime.now(tz=datetime.timezone.utc).replace(microsecond=0)
def to_dict(self) -> dict[str, t.Any]:
"""Return timeout details as a dictionary suitable for JSON serialization."""
return dict(
deadline=self.deadline.strftime(self._DEADLINE_FORMAT),
duration=self.duration,
)
@staticmethod
def from_dict(value: dict[str, t.Any]) -> TimeoutDetail:
"""Return a TimeoutDetail instance using the value previously returned by to_dict."""
return TimeoutDetail(
deadline=datetime.datetime.strptime(value['deadline'], TimeoutDetail._DEADLINE_FORMAT).replace(tzinfo=datetime.timezone.utc),
duration=value['duration'],
)
@staticmethod
def create(duration: int | float) -> TimeoutDetail | None:
"""Return a new TimeoutDetail instance for the specified duration (in minutes), or None if the duration is zero."""
if not duration:
return None
if duration == int(duration):
duration = int(duration)
return TimeoutDetail(
deadline=datetime.datetime.now(datetime.timezone.utc).replace(microsecond=0) + datetime.timedelta(seconds=int(duration * 60)),
duration=duration,
)
def get_timeout() -> TimeoutDetail | None:
"""Return details about the currently set timeout, if any, otherwise return None."""
try:
return TimeoutDetail.from_dict(read_json_file(TIMEOUT_PATH))
except FileNotFoundError:
return None
def configure_timeout(args: CommonConfig) -> None:
"""Configure the timeout."""
if isinstance(args, TestConfig):
configure_test_timeout(args) # only tests are subject to the timeout
def configure_test_timeout(args: TestConfig) -> None:
"""Configure the test timeout."""
timeout = get_timeout()
if not timeout:
return
timeout_remaining = timeout.remaining
test_timeout = TestTimeout(timeout.duration)
if timeout_remaining <= datetime.timedelta():
test_timeout.write(args)
raise TimeoutExpiredError(f'The {timeout.duration} minute test timeout expired {timeout_remaining * -1} ago at {timeout.deadline}.')
display.info(f'The {timeout.duration} minute test timeout expires in {timeout_remaining} at {timeout.deadline}.', verbosity=1)
def METHOD_NAME(_dummy1: t.Any, _dummy2: t.Any) -> None:
"""Runs when SIGUSR1 is received."""
test_timeout.write(args)
raise TimeoutExpiredError(f'Tests aborted after exceeding the {timeout.duration} minute time limit.')
def timeout_waiter(timeout_seconds: int) -> None:
"""Background thread which will kill the current process if the timeout elapses."""
time.sleep(timeout_seconds)
os.kill(os.getpid(), signal.SIGUSR1)
signal.signal(signal.SIGUSR1, METHOD_NAME)
instance = WrappedThread(functools.partial(timeout_waiter, timeout_remaining.total_seconds()))
instance.daemon = True
instance.start() |
7,497 | remove punc | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
""" Official evaluation script for the MLQA dataset. """
from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
import unicodedata
PUNCT = {chr(i) for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P')}.union(string.punctuation)
WHITESPACE_LANGS = ['en', 'es', 'hi', 'vi', 'de', 'ar']
MIXED_SEGMENTATION_LANGS = ['zh']
def whitespace_tokenize(text):
return text.split()
def mixed_segmentation(text):
segs_out = []
temp_str = ""
for char in text:
if re.search(r'[\u4e00-\u9fa5]', char) or char in PUNCT:
if temp_str != "":
ss = whitespace_tokenize(temp_str)
segs_out.extend(ss)
temp_str = ""
segs_out.append(char)
else:
temp_str += char
if temp_str != "":
ss = whitespace_tokenize(temp_str)
segs_out.extend(ss)
return segs_out
def normalize_answer(s, lang):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text, lang):
if lang == 'en':
return re.sub(r'\b(a|an|the)\b', ' ', text)
elif lang == 'es':
return re.sub(r'\b(un|una|unos|unas|el|la|los|las)\b', ' ', text)
elif lang == 'hi':
return text # Hindi does not have formal articles
elif lang == 'vi':
return re.sub(r'\b(của|là|cái|chiếc|những)\b', ' ', text)
elif lang == 'de':
return re.sub(r'\b(ein|eine|einen|einem|eines|einer|der|die|das|den|dem|des)\b', ' ', text)
elif lang == 'ar':
return re.sub('\sال^|ال', ' ', text)
elif lang == 'zh':
return text # Chinese does not have formal articles
else:
raise Exception('Unknown Language {}'.format(lang))
def white_space_fix(text, lang):
if lang in WHITESPACE_LANGS:
tokens = whitespace_tokenize(text)
elif lang in MIXED_SEGMENTATION_LANGS:
tokens = mixed_segmentation(text)
else:
raise Exception('Unknown Language {}'.format(lang))
return ' '.join([t for t in tokens if t.strip() != ''])
def METHOD_NAME(text):
return ''.join(ch for ch in text if ch not in PUNCT)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(METHOD_NAME(lower(s)), lang), lang)
def f1_score(prediction, ground_truth, lang):
prediction_tokens = normalize_answer(prediction, lang).split()
ground_truth_tokens = normalize_answer(ground_truth, lang).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth, lang):
return (normalize_answer(prediction, lang) == normalize_answer(ground_truth, lang))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths, lang):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth, lang)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions, lang):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths, lang)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths, lang)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
def evaluate_with_path(dataset_file, prediction_file, answer_language):
with open(dataset_file) as dataset_file_reader:
dataset_json = json.load(dataset_file_reader)
dataset = dataset_json['data']
with open(prediction_file) as prediction_file_reader:
predictions = json.load(prediction_file_reader)
return evaluate(dataset, predictions, answer_language)
if __name__ == '__main__':
expected_version = '1.0'
parser = argparse.ArgumentParser(
description='Evaluation for MLQA ' + expected_version)
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
parser.add_argument('answer_language', help='Language code of answer language')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if (str(dataset_json['version']) != expected_version):
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions, args.answer_language))) |
7,498 | cleanup | import copy
from functools import partial
import itertools
import json
import math
import multiprocessing as mp
import os
import platform
import posixpath
import signal
import traceback
import numpy as np
import concurrent.futures
from tqdm import tqdm
from ....lib import (
xyzrange, min2, max2, Vec, Bbox,
sip, totalfn
)
from .... import sharedmemory as shm
error_queue = None
progress_queue = None
def check_error_queue():
if error_queue.empty():
return
errors = []
while not error_queue.empty():
err = error_queue.get()
if err is not StopIteration:
errors.append(err)
if len(errors):
raise Exception(errors)
def progress_queue_listener(q, total, desc):
pbar = tqdm(total=total, desc=desc)
for ct in iter(q.get, None):
pbar.update(ct)
def error_capturing_fn(fn, *args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as err:
traceback.print_exception(type(err), err, err.__traceback__)
error_queue.put(err)
return 0
def initialize_synchronization(progress_queue, fs_lock):
from . import rx, tx
rx.progress_queue = progress_queue
tx.progress_queue = progress_queue
rx.fs_lock = fs_lock
tx.fs_lock = fs_lock
def parallel_execution(
fn, items, parallel,
progress, desc="Progress",
total=None, cleanup_shm=None,
block_size=1000, min_block_size=10
):
global error_queue
error_queue = mp.Queue()
progress_queue = mp.Queue()
fs_lock = mp.Lock()
if parallel is True:
parallel = mp.cpu_count()
elif parallel <= 0:
raise ValueError(f"Parallel must be a positive number or boolean (True: all cpus). Got: {parallel}")
def METHOD_NAME(signum, frame):
if cleanup_shm:
shm.unlink(cleanup_shm)
fn = partial(error_capturing_fn, fn)
total = totalfn(items, total)
if total is not None and (total / parallel) < block_size:
block_size = int(math.ceil(total / parallel))
block_size = max(block_size, min_block_size)
prevsigint = signal.getsignal(signal.SIGINT)
prevsigterm = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGINT, METHOD_NAME)
signal.signal(signal.SIGTERM, METHOD_NAME)
# Fix for MacOS which can segfault due to
# urllib calling libdispatch which is not fork-safe
# https://bugs.python.org/issue30385
no_proxy = os.environ.get("no_proxy", "")
if platform.system().lower() == "darwin":
os.environ["no_proxy"] = "*"
try:
if progress:
proc = mp.Process(
target=progress_queue_listener,
args=(progress_queue,total,desc)
)
proc.start()
with concurrent.futures.ProcessPoolExecutor(
max_workers=parallel,
initializer=initialize_synchronization,
initargs=(progress_queue,fs_lock),
) as pool:
pool.map(fn, sip(items, block_size))
finally:
if platform.system().lower() == "darwin":
os.environ["no_proxy"] = no_proxy
signal.signal(signal.SIGINT, prevsigint)
signal.signal(signal.SIGTERM, prevsigterm)
if progress:
progress_queue.put(None)
proc.join()
proc.close()
progress_queue.close()
progress_queue.join_thread()
check_error_queue()
error_queue.close()
error_queue.join_thread()
def chunknames(bbox, volume_bbox, key, chunk_size, protocol=None):
path = posixpath if protocol != 'file' else os.path
class ChunkNamesIterator():
def __len__(self):
# round up and avoid conversion to float
n_chunks = (bbox.dx + chunk_size[0] - 1) // chunk_size[0]
n_chunks *= (bbox.dy + chunk_size[1] - 1) // chunk_size[1]
n_chunks *= (bbox.dz + chunk_size[2] - 1) // chunk_size[2]
return n_chunks
def __iter__(self):
for x,y,z in xyzrange( bbox.minpt, bbox.maxpt, chunk_size ):
xf = min(x + chunk_size.x, volume_bbox.maxpt.x)
yf = min(y + chunk_size.y, volume_bbox.maxpt.y)
zf = min(z + chunk_size.z, volume_bbox.maxpt.z)
filename = f"{x}-{xf}_{y}-{yf}_{z}-{zf}"
yield path.join(key, filename)
return ChunkNamesIterator()
def gridpoints(bbox, volume_bbox, chunk_size):
"""
Consider a volume as divided into a grid with the
first chunk labeled 1, the second 2, etc.
Return the grid x,y,z coordinates of a cutout as a
sequence.
"""
chunk_size = Vec(*chunk_size)
grid_size = np.ceil(volume_bbox.size3() / chunk_size).astype(np.int64)
cutout_grid_size = np.ceil(bbox.size3() / chunk_size).astype(np.int64)
cutout_grid_offset = np.ceil((bbox.minpt - volume_bbox.minpt) / chunk_size).astype(np.int64)
grid_cutout = Bbox( cutout_grid_offset, cutout_grid_offset + cutout_grid_size )
for x,y,z in xyzrange( grid_cutout.minpt, grid_cutout.maxpt, (1,1,1) ):
yield Vec(x,y,z)
def compressed_morton_code(gridpt, grid_size):
if hasattr(gridpt, "__len__") and len(gridpt) == 0: # generators don't have len
return np.zeros((0,), dtype=np.uint32)
gridpt = np.asarray(gridpt, dtype=np.uint32)
single_input = False
if gridpt.ndim == 1:
gridpt = np.atleast_2d(gridpt)
single_input = True
code = np.zeros((gridpt.shape[0],), dtype=np.uint64)
num_bits = [ math.ceil(math.log2(size)) for size in grid_size ]
j = np.uint64(0)
one = np.uint64(1)
if sum(num_bits) > 64:
raise ValueError(f"Unable to represent grids that require more than 64 bits. Grid size {grid_size} requires {num_bits} bits.")
max_coords = np.max(gridpt, axis=0)
if np.any(max_coords >= grid_size):
raise ValueError(f"Unable to represent grid points larger than the grid. Grid size: {grid_size} Grid points: {gridpt}")
for i in range(max(num_bits)):
for dim in range(3):
if 2 ** i < grid_size[dim]:
bit = (((np.uint64(gridpt[:, dim]) >> np.uint64(i)) & one) << j)
code |= bit
j += one
if single_input:
return code[0]
return code
def shade(dest_img, dest_bbox, src_img, src_bbox):
"""
Shade dest_img at coordinates dest_bbox using the
image contained in src_img at coordinates src_bbox.
The buffer will only be painted in the overlapping
region of the content.
Returns: void
"""
if not Bbox.intersects(dest_bbox, src_bbox) or src_img is None:
return
spt = np.maximum(src_bbox.minpt, dest_bbox.minpt)
ept = np.minimum(src_bbox.maxpt, dest_bbox.maxpt)
dest_minpt = np.minimum(spt, ept) - dest_bbox.minpt
dest_maxpt = np.maximum(spt, ept) - dest_bbox.minpt
ZERO3 = np.zeros((3,), dtype=spt.dtype)
istart = np.maximum(spt - src_bbox.minpt, ZERO3)
iend = np.minimum(ept - src_bbox.maxpt, ZERO3) + src_img.shape[:3]
while src_img.ndim < 4:
src_img = src_img[..., np.newaxis]
dest_img[
dest_minpt[0]:dest_maxpt[0],
dest_minpt[1]:dest_maxpt[1],
dest_minpt[2]:dest_maxpt[2],
] = src_img[
istart[0]:iend[0],
istart[1]:iend[1],
istart[2]:iend[2]
] |
7,499 | prep metadata migrate | # import ganga
import os
import time
import pickle
import pymongo
from GangaCore.Core.GangaRepository.VStreamer import from_file
from GangaCore.Core.GangaRepository.DStreamer import (
EmptyGangaObject,
index_from_database, index_to_database,
object_from_database, object_to_database
)
from GangaCore.GPIDev.Base.Proxy import getName, addProxy
from GangaCore.Runtime.Repository_runtime import getLocalRoot
from GangaCore.Core.GangaRepository.VStreamer import from_file
from GangaCore.test.GPI.newXMLTest.utilFunctions import getXMLDir, getXMLFile
from GangaCore.Utility.Config import getConfig
from GangaCore.GPIDev.Base.Proxy import getName, addProxy
from GangaCore.Runtime.Repository_runtime import getLocalRoot
from GangaCore.Core.GangaRepository.VStreamer import from_file
from GangaCore.test.GPI.newXMLTest.utilFunctions import getXMLDir, getXMLFile
from GangaCore.Core.GangaRepository.container_controllers import (
native_handler,
docker_handler,
udocker_handler,
singularity_handler, get_database_config
)
controller_map = {
"native": native_handler,
"docker": docker_handler,
"udocker": udocker_handler,
"singularity": singularity_handler,
}
def job_migrate(connection):
"""Convert the XML Job files to Database
"""
jobs_path = os.path.join(getLocalRoot(), '6.0', 'jobs')
job_ids = [i for i in os.listdir(os.path.join(jobs_path, "0xxx"))
if "index" not in i]
for idx in sorted(job_ids):
# ignore_subs = []
ignore_subs = ["subjobs"]
job_file = getXMLFile(int(idx))
job_folder = os.path.dirname(job_file)
jeb, err = from_file(open(job_file, "rb"))
_, _, index = pickle.load(
open(job_file.replace("/data", ".index"), "rb"))
# check for subjobs
if "subjobs.idx" in os.listdir(job_folder):
subjob_ids = [i for i in os.listdir(job_folder) if i.isdecimal()]
subjob_files = [os.path.join(job_folder, i, "data")
for i in subjob_ids]
subjob_indexes = pickle.load(
open(os.path.join(job_folder, "subjobs.idx"), "rb"))
for s_idx, file in zip(sorted(subjob_indexes), sorted(subjob_files)):
s_index = subjob_indexes[s_idx]
s_jeb, er = from_file(open(file, "rb"))
if isinstance(s_jeb, EmptyGangaObject):
continue
s_index["master"] = jeb.id
s_index["classname"] = getName(s_jeb)
s_index["category"] = s_jeb._category
index_to_database(data=s_index, document=connection.index)
object_to_database(j=s_jeb, document=connection.jobs,
master=jeb.id, ignore_subs=[])
index["master"] = -1 # normal object do not have a master/parent
index["classname"] = getName(jeb)
index["category"] = jeb._category
index_to_database(data=index, document=connection.index)
object_to_database(j=jeb, document=connection.jobs,
master=-1, ignore_subs=[])
def job_metadata_migrate(connection):
"""Convert the XMl files to Database
"""
jobs_metadata_path = os.path.join(getLocalRoot(), '6.0', 'jobs.metadata')
# adding the metadata objects
for _dir in os.listdir(os.path.join(jobs_metadata_path, "0xxx")):
_dir = os.path.join(jobs_metadata_path, "0xxx", _dir)
if os.path.isdir(_dir):
data, er = from_file(open(os.path.join(_dir, "data"), "rb"))
object_to_database(j=data, document=connection["jobs.metadata"],
master=-1, ignore_subs=[])
def METHOD_NAME(connection):
"""Convert the XMl files to Database
"""
prep_metadata_path = os.path.join(getLocalRoot(), '6.0', 'prep.metadata')
# adding the metadata objects
for _dir in os.listdir(os.path.join(prep_metadata_path, "0xxx")):
_dir = os.path.join(prep_metadata_path, "0xxx", _dir)
if os.path.isdir(_dir):
data, er = from_file(open(os.path.join(_dir, "data"), "rb"))
object_to_database(j=data, document=connection["prep.metadata"],
master=-1, ignore_subs=[])
def templates_metadata_migrate(connection):
"""Convert the XMl files to Database
"""
templates_metadata_path = os.path.join(
getLocalRoot(), '6.0', 'templates.metadata')
# adding the metadata objects
for _dir in os.listdir(os.path.join(templates_metadata_path, "0xxx")):
_dir = os.path.join(templates_metadata_path, "0xxx", _dir)
if os.path.isdir(_dir):
data, er = from_file(open(os.path.join(_dir, "data"), "rb"))
object_to_database(j=data, document=connection["templates.metadata"],
master=-1, ignore_subs=[])
def get_job_done():
# tasks_path = os.path.join(getLocalRoot(), '6.0', 'tasks')
# preps_path = os.path.join(getLocalRoot(), '6.0', 'preps')
# templates_path = os.path.join(getLocalRoot(), '6.0', 'templates')
# box_metadata_path = os.path.join(getLocalRoot(), '6.0', 'box.metadata')
# jobs_metadata_path = os.path.join(getLocalRoot(), '6.0', 'jobs.metadata')
# prep_metadata_path = os.path.join(getLocalRoot(), '6.0', 'prep.metadata')
# box_path = os.path.join(getLocalRoot(), '6.0', 'box')
gangadir = getConfig("Configuration")['gangadir']
database_config = get_database_config(gangadir)
container_controller = controller_map[database_config["controller"]]
container_controller(database_config=database_config,
action="start", gangadir=gangadir)
PORT = database_config["port"]
HOST = database_config["host"]
connection_string = f"mongodb://{HOST}:{PORT}/"
client = pymongo.MongoClient(connection_string)
connection = client[database_config['dbname']]
job_migrate(connection)
job_metadata_migrate(connection)
METHOD_NAME(connection)
container_controller(database_config=database_config, action="quit", gangadir=gangadir)
get_job_done() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.