blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d70ac3d09b70a6a5a1a071b9d8ed5bad7fdd3bc4
|
6146e33102797407ede06ce2daa56c28fdfa2812
|
/python/GafferUITest/FrameTest.py
|
076407c7619ede58440ae47a00dbfbe04fe9e68d
|
[
"BSD-3-Clause"
] |
permissive
|
GafferHQ/gaffer
|
e1eb78ba8682bfbb7b17586d6e7b47988c3b7d64
|
59cab96598c59b90bee6d3fc1806492a5c03b4f1
|
refs/heads/main
| 2023-09-01T17:36:45.227956
| 2023-08-30T09:10:56
| 2023-08-30T09:10:56
| 9,043,124
| 707
| 144
|
BSD-3-Clause
| 2023-09-14T09:05:37
| 2013-03-27T00:04:53
|
Python
|
UTF-8
|
Python
| false
| false
| 3,084
|
py
|
FrameTest.py
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferTest
import GafferUI
import GafferUITest
class FrameTest( GafferUITest.TestCase ) :
@unittest.expectedFailure
def testGadget( self ) :
# because we're not putting gadgets and widgets in different namespaces,
# we have clashes where we want to name them the same. we need to resolve this.
self.assertTrue( issubclass( GafferUI.Frame, GafferUI.Gadget ) )
def testBorderStyle( self ) :
f = GafferUI.Frame()
self.assertEqual( f.getBorderStyle(), GafferUI.Frame.BorderStyle.Flat )
f.setBorderStyle( GafferUI.Frame.BorderStyle.None_ )
self.assertEqual( f.getBorderStyle(), GafferUI.Frame.BorderStyle.None_ )
def testRemoveChild( self ) :
f = GafferUI.Frame()
b = GafferUI.Button()
f.setChild( b )
self.assertTrue( b.parent() is f )
f.removeChild( b )
self.assertIsNone( b.parent() )
def testTransferChild( self ) :
f = GafferUI.Frame()
l = GafferUI.ListContainer()
b = GafferUI.Button()
l.append( b )
self.assertEqual( len( l ), 1 )
f.setChild( b )
self.assertTrue( b.parent() is f )
self.assertTrue( f.getChild() is b )
self.assertEqual( len( l ), 0 )
if __name__ == "__main__":
unittest.main()
|
75c417275544c5d9faf8c430ac76ad405b576c65
|
e8b38b8dfa348ff006eb197a7906ca8e491a23dc
|
/pyccel/ast/operators.py
|
d863a85d00d3d2aa679d646e47ce06ea687dd6e9
|
[
"MIT"
] |
permissive
|
pyccel/pyccel
|
d79a81dbdff1172839a6a1227abfcc1f97e6c97b
|
1896b761ba662c90b14c195bbb6eb5cddc57cbfc
|
refs/heads/devel
| 2023-08-30T12:15:25.244401
| 2023-08-28T09:31:32
| 2023-08-28T09:31:32
| 100,463,736
| 307
| 39
|
MIT
| 2023-09-14T19:29:26
| 2017-08-16T07:59:14
|
Python
|
UTF-8
|
Python
| false
| false
| 35,482
|
py
|
operators.py
|
#------------------------------------------------------------------------------------------#
# This file is part of Pyccel which is released under MIT License. See the LICENSE file or #
# go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. #
#------------------------------------------------------------------------------------------#
"""
Module handling all python builtin operators
These operators all have a precision as detailed here:
https://docs.python.org/3/reference/expressions.html#operator-precedence
They also have specific rules to determine the dtype, precision, rank, shape
"""
from pyccel.utilities.stage import PyccelStage
from ..errors.errors import Errors, PyccelSemanticError
from .basic import PyccelAstNode
from .datatypes import (NativeBool, NativeInteger, NativeFloat,
NativeComplex, NativeString,
NativeNumeric)
from .internals import max_precision
from .literals import Literal, LiteralInteger, LiteralFloat, LiteralComplex
from .literals import Nil, NilArgument
from .literals import convert_to_literal
errors = Errors()
pyccel_stage = PyccelStage()
__all__ = (
'PyccelOperator',
'PyccelArithmeticOperator',
'PyccelBinaryOperator',
'PyccelBooleanOperator',
'PyccelComparisonOperator',
'PyccelUnaryOperator',
'PyccelPow',
'PyccelAdd',
'PyccelMinus',
'PyccelMul',
'PyccelDiv',
'PyccelMod',
'PyccelFloorDiv',
'PyccelEq',
'PyccelNe',
'PyccelLt',
'PyccelLe',
'PyccelGt',
'PyccelGe',
'PyccelAnd',
'PyccelOr',
'PyccelNot',
'PyccelAssociativeParenthesis',
'PyccelUnary',
'PyccelUnarySub',
'Relational',
'PyccelIs',
'PyccelIsNot',
'IfTernaryOperator'
)
#==============================================================================
def broadcast(shape_1, shape_2):
""" This function broadcast two shapes using numpy broadcasting rules """
from pyccel.ast.sympy_helper import pyccel_to_sympy
if shape_1 is None and shape_2 is None:
return None
elif shape_1 is None:
new_shape_1 = (LiteralInteger(1),)*len(shape_2)
new_shape_2 = shape_2
elif shape_2 is None:
new_shape_1 = shape_1
new_shape_2 = (LiteralInteger(1),)*len(shape_1)
else:
a = len(shape_1)
b = len(shape_2)
if a>b:
new_shape_2 = (LiteralInteger(1),)*(a-b) + tuple(shape_2)
new_shape_1 = shape_1
elif b>a:
new_shape_1 = (LiteralInteger(1),)*(b-a) + tuple(shape_1)
new_shape_2 = shape_2
else:
new_shape_2 = shape_2
new_shape_1 = shape_1
new_shape = []
for e1,e2 in zip(new_shape_1, new_shape_2):
used_names = set()
symbol_map = {}
sy_e1 = pyccel_to_sympy(e1, symbol_map, used_names)
sy_e2 = pyccel_to_sympy(e2, symbol_map, used_names)
if sy_e1 == sy_e2:
new_shape.append(e1)
elif sy_e1 == 1:
new_shape.append(e2)
elif sy_e2 == 1:
new_shape.append(e1)
elif sy_e1.is_constant() and not sy_e2.is_constant():
new_shape.append(e1)
elif sy_e2.is_constant() and not sy_e1.is_constant():
new_shape.append(e2)
elif not sy_e2.is_constant() and not sy_e1.is_constant()\
and not (sy_e1 - sy_e2).is_constant():
new_shape.append(e1)
else:
shape1_code = '-'
shape2_code = '-'
if shape_1:
shape1_code = ' '.join(f'{s},' for s in shape_1)
shape1_code = f"({shape1_code})"
if shape_2:
shape2_code = ' '.join(f"{s}," for s in shape_2)
shape2_code = f"({shape2_code})"
msg = 'operands could not be broadcast together with shapes {} {}'
msg = msg.format(shape1_code, shape2_code)
raise PyccelSemanticError(msg)
return tuple(new_shape)
#==============================================================================
class PyccelOperator(PyccelAstNode):
"""
Abstract superclass for all builtin operators.
The __init__ function is common
but the functions called by __init__ are specialised
Parameters
----------
args: tuple
The arguments passed to the operator
"""
__slots__ = ('_args', )
_attribute_nodes = ('_args',)
def __init__(self, *args):
self._args = tuple(self._handle_precedence(args))
if pyccel_stage == 'syntactic':
super().__init__()
return
self._set_dtype()
self._set_shape_rank()
# rank is None for lambda functions
self._set_order()
super().__init__()
def _set_dtype(self):
self._dtype, self._precision = self._calculate_dtype(*self._args) # pylint: disable=no-member
def _set_shape_rank(self):
self._shape, self._rank = self._calculate_shape_rank(*self._args) # pylint: disable=no-member
@property
def precedence(self):
""" The precedence of the operator as defined here:
https://docs.python.org/3/reference/expressions.html#operator-precedence
"""
return self._precedence
def _handle_precedence(self, args):
"""
Insert parentheses where necessary by examining the precedence of the operator
e.g:
PyccelMul(a,PyccelAdd(b,c))
means:
a*(b+c)
so this input will give:
PyccelMul(a, PyccelAssociativeParenthesis(PyccelAdd(b,c)))
Parentheses are also added were they are required for clarity
Parameters
----------
args: tuple
The arguments passed to the operator
Results
-------
args: tuple
The arguments with the parentheses inserted
"""
precedence = [getattr(a, 'precedence', 17) for a in args]
if min(precedence) <= self._precedence:
new_args = []
for i, (a,p) in enumerate(zip(args, precedence)):
if (p < self._precedence or (p == self._precedence and i != 0)):
new_args.append(PyccelAssociativeParenthesis(a))
else:
new_args.append(a)
args = tuple(new_args)
return args
def __str__(self):
return repr(self)
def _set_order(self):
""" Sets the shape and rank
This is chosen to match the arguments if they are in agreement.
Otherwise it defaults to 'C'
"""
if self._rank is not None and self._rank > 1:
orders = [a.order for a in self._args if a.order is not None]
my_order = orders[0]
if all(o == my_order for o in orders):
self._order = my_order
else:
self._order = 'C'
else:
self._order = None
@property
def args(self):
""" Arguments of the operator
"""
return self._args
#==============================================================================
class PyccelUnaryOperator(PyccelOperator):
""" Abstract superclass representing a python
operator with only one argument
Parameters
----------
arg: PyccelAstNode
The argument passed to the operator
"""
__slots__ = ('_dtype', '_precision','_shape','_rank','_order')
@staticmethod
def _calculate_dtype(*args):
""" Sets the dtype and precision
They are chosen to match the argument
"""
a = args[0]
dtype = a.dtype
precision = a.precision
return dtype, precision
@staticmethod
def _calculate_shape_rank(*args):
""" Sets the shape and rank
They are chosen to match the argument
"""
a = args[0]
rank = a.rank
shape = a.shape
return shape, rank
#==============================================================================
class PyccelUnary(PyccelUnaryOperator):
"""
Class representing a call to the python positive operator.
I.e:
+a
is equivalent to:
PyccelUnary(a)
Parameters
----------
arg: PyccelAstNode
The argument passed to the operator
"""
__slots__ = ()
_precedence = 14
def _handle_precedence(self, args):
args = PyccelUnaryOperator._handle_precedence(self, args)
args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelUnary) else a for a in args)
return args
def __repr__(self):
return '+{}'.format(repr(self.args[0]))
#==============================================================================
class PyccelUnarySub(PyccelUnary):
"""
Class representing a call to the python negative operator.
I.e:
-a
is equivalent to:
PyccelUnarySub(a)
Parameters
----------
arg: PyccelAstNode
The argument passed to the operator
"""
__slots__ = ()
def __repr__(self):
return '-{}'.format(repr(self.args[0]))
#==============================================================================
class PyccelNot(PyccelUnaryOperator):
"""
Class representing a call to the python not operator.
I.e:
not a
is equivalent to:
PyccelNot(a)
Parameters
----------
arg: PyccelAstNode
The argument passed to the operator
"""
__slots__ = ()
_precedence = 6
@staticmethod
def _calculate_dtype(*args):
""" Sets the dtype and precision
They are chosen to match the argument unless the class has
a _dtype or _precision member
"""
dtype = NativeBool()
precision = -1
return dtype, precision
@staticmethod
def _calculate_shape_rank(*args):
""" Sets the shape and rank
They are chosen to match the argument unless the class has
a _shape or _rank member
"""
rank = 0
shape = None
return shape, rank
def __repr__(self):
return 'not {}'.format(repr(self.args[0]))
#==============================================================================
class PyccelAssociativeParenthesis(PyccelUnaryOperator):
"""
Class representing parentheses
Parameters
----------
arg: PyccelAstNode
The argument in the PyccelAssociativeParenthesis
"""
__slots__ = () # ok
_precedence = 18
def _handle_precedence(self, args):
return args
def __repr__(self):
return '({})'.format(repr(self.args[0]))
#==============================================================================
class PyccelBinaryOperator(PyccelOperator):
""" Abstract superclass representing a python
operator with two arguments
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ('_dtype','_precision','_shape','_rank','_order')
def __init__(self, arg1, arg2, simplify = False):
super().__init__(arg1, arg2)
@classmethod
def _calculate_dtype(cls, *args):
""" Sets the dtype and precision
If one argument is a string then all arguments must be strings
If the arguments are numeric then the dtype and precision
match the broadest type and the largest precision
e.g.
1 + 2j -> PyccelAdd(LiteralInteger, LiteralComplex) -> complex
"""
integers = [a for a in args if a.dtype in (NativeInteger(),NativeBool())]
floats = [a for a in args if a.dtype is NativeFloat()]
complexes = [a for a in args if a.dtype is NativeComplex()]
strs = [a for a in args if a.dtype is NativeString()]
if strs:
assert len(integers + floats + complexes) == 0
return cls._handle_str_type(strs)
elif complexes:
return cls._handle_complex_type(args)
elif floats:
return cls._handle_float_type(args)
elif integers:
return cls._handle_integer_type(args)
else:
raise TypeError('cannot determine the type of {}'.format(args))
@staticmethod
def _handle_str_type(strs):
"""
Set dtype and precision when both arguments are strings
"""
raise TypeError("unsupported operand type(s) for /: 'str' and 'str'")
@staticmethod
def _handle_complex_type(complexes):
"""
Set dtype and precision when the result is a complex
"""
dtype = NativeComplex()
precision = max_precision(complexes)
return dtype, precision
@staticmethod
def _handle_float_type(floats):
"""
Set dtype and precision when the result is a float
"""
dtype = NativeFloat()
precision = max_precision(floats)
return dtype, precision
@staticmethod
def _handle_integer_type(integers):
"""
Set dtype and precision when the result is an integer
"""
dtype = NativeInteger()
precision = max_precision(integers)
return dtype, precision
@staticmethod
def _calculate_shape_rank(*args):
""" Sets the shape and rank
Strings must be scalars.
For numeric types the rank and shape is determined according
to numpy broadcasting rules where possible
"""
strs = [a for a in args if a.dtype is NativeString()]
if strs:
other = [a for a in args if a.dtype in (NativeInteger(), NativeBool(), NativeFloat(), NativeComplex())]
assert len(other) == 0
rank = 0
shape = None
else:
s = broadcast(args[0].shape, args[1].shape)
shape = s
rank = 0 if s is None else len(s)
return shape, rank
#==============================================================================
class PyccelArithmeticOperator(PyccelBinaryOperator):
""" Abstract superclass representing a python
arithmetic operator
This class is necessary to handle specific precedence
rules for arithmetic operators
I.e. to handle the error:
Extension: Unary operator following arithmetic operator (use parentheses)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
def _handle_precedence(self, args):
args = PyccelBinaryOperator._handle_precedence(self, args)
args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelUnary) else a for a in args)
return args
#==============================================================================
class PyccelPow(PyccelArithmeticOperator):
"""
Class representing a call to the python exponent operator.
I.e:
a ** b
is equivalent to:
PyccelPow(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 15
def __repr__(self):
return '{} ** {}'.format(self.args[0], self.args[1])
def _handle_precedence(self, args):
precedence = [getattr(a, 'precedence', 17) for a in args]
if min(precedence) <= self._precedence:
new_args = []
for i, (a,p) in enumerate(zip(args, precedence)):
if (p < self._precedence or (p == self._precedence and i != 1)):
new_args.append(PyccelAssociativeParenthesis(a))
else:
new_args.append(a)
args = tuple(new_args)
return args
#==============================================================================
class PyccelAdd(PyccelArithmeticOperator):
"""
Class representing a call to the python addition operator.
I.e:
a + b
is equivalent to:
PyccelAdd(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 12
def __new__(cls, arg1, arg2, simplify = False):
if simplify:
if isinstance(arg2, PyccelUnarySub):
return PyccelMinus(arg1, arg2.args[0], simplify = True)
dtype, precision = cls._calculate_dtype(arg1, arg2)
if isinstance(arg1, Literal) and isinstance(arg2, Literal):
return convert_to_literal(arg1.python_value + arg2.python_value,
dtype, precision)
if dtype == arg2.dtype and precision == arg2.precision and \
isinstance(arg1, Literal) and arg1.python_value == 0:
return arg2
if dtype == arg1.dtype and precision == arg1.precision and \
isinstance(arg2, Literal) and arg2.python_value == 0:
return arg1
if isinstance(arg1, (LiteralInteger, LiteralFloat)) and \
isinstance(arg2, LiteralComplex) and \
arg2.real == LiteralFloat(0):
return LiteralComplex(arg1, arg2.imag)
elif isinstance(arg2, (LiteralInteger, LiteralFloat)) and \
isinstance(arg1, LiteralComplex) and \
arg1.real == LiteralFloat(0):
return LiteralComplex(arg2, arg1.imag)
else:
return super().__new__(cls)
@staticmethod
def _handle_str_type(strs):
dtype = NativeString()
precision = None
return dtype, precision
def __repr__(self):
return '{} + {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelMul(PyccelArithmeticOperator):
"""
Class representing a call to the python multiplication operator.
I.e:
a * b
is equivalent to:
PyccelMul(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 13
def __new__(cls, arg1, arg2, simplify = False):
if simplify:
if (arg1 == 1):
return arg2
if (arg2 == 1):
return arg1
if (arg1 == 0 or arg2 == 0):
dtype, precision = cls._calculate_dtype(arg1, arg2)
return convert_to_literal(0, dtype, precision)
if (isinstance(arg1, PyccelUnarySub) and arg1.args[0] == 1):
return PyccelUnarySub(arg2)
if (isinstance(arg2, PyccelUnarySub) and arg2.args[0] == 1):
return PyccelUnarySub(arg1)
if isinstance(arg1, Literal) and isinstance(arg2, Literal):
dtype, precision = cls._calculate_dtype(arg1, arg2)
return convert_to_literal(arg1.python_value * arg2.python_value,
dtype, precision)
return super().__new__(cls)
def __repr__(self):
return '{} * {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelMinus(PyccelArithmeticOperator):
"""
Class representing a call to the python subtraction operator.
I.e:
a - b
is equivalent to:
PyccelMinus(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 12
def __new__(cls, arg1, arg2, simplify = False):
if simplify:
if isinstance(arg2, PyccelUnarySub):
return PyccelAdd(arg1, arg2.args[0], simplify = True)
elif isinstance(arg1, Literal) and isinstance(arg2, Literal):
dtype, precision = cls._calculate_dtype(arg1, arg2)
return convert_to_literal(arg1.python_value - arg2.python_value,
dtype, precision)
if isinstance(arg1, LiteralFloat) and \
isinstance(arg2, LiteralComplex) and \
arg2.real == LiteralFloat(0):
return LiteralComplex(arg1, -arg2.imag.python_value)
elif isinstance(arg2, LiteralFloat) and \
isinstance(arg1, LiteralComplex) and \
arg1.real == LiteralFloat(0):
return LiteralComplex(-arg2.python_value, arg1.imag)
else:
return super().__new__(cls)
def __repr__(self):
return '{} - {}'.format(repr(self.args[0]), repr(self.args[1]))
#==============================================================================
class PyccelDiv(PyccelArithmeticOperator):
"""
Class representing a call to the python division operator.
I.e:
a / b
is equivalent to:
PyccelDiv(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 13
def __new__(cls, arg1, arg2, simplify=False):
if simplify:
if (arg2 == 1):
return arg1
return super().__new__(cls)
@staticmethod
def _handle_integer_type(integers):
dtype = NativeFloat()
precision = -1
return dtype, precision
def __repr__(self):
return '{} / {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelMod(PyccelArithmeticOperator):
"""
Class representing a call to the python modulo operator.
I.e:
a % b
is equivalent to:
PyccelMod(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 13
def __repr__(self):
return '{} % {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelFloorDiv(PyccelArithmeticOperator):
"""
Class representing a call to the python integer division operator.
I.e:
a // b
is equivalent to:
PyccelFloorDiv(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 13
def __repr__(self):
return '{} // {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelComparisonOperator(PyccelBinaryOperator):
""" Abstract superclass representing a python
comparison operator with two arguments
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 7
@staticmethod
def _calculate_dtype(*args):
dtype = NativeBool()
precision = -1
return dtype, precision
#==============================================================================
class PyccelEq(PyccelComparisonOperator):
"""
Class representing a call to the python equality operator.
I.e:
a == b
is equivalent to:
PyccelEq(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
def __new__(cls, arg1, arg2, simplify = False):
if isinstance(arg1, Nil) or isinstance(arg2, Nil):
return PyccelIs(arg1, arg2)
else:
return super().__new__(cls)
def __repr__(self):
return '{} == {}'.format(self.args[0], self.args[1])
class PyccelNe(PyccelComparisonOperator):
"""
Class representing a call to the python inequality operator.
I.e:
a != b
is equivalent to:
PyccelNe(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
def __new__(cls, arg1, arg2, simplify = False):
if isinstance(arg1, Nil) or isinstance(arg2, Nil):
return PyccelIsNot(arg1, arg2)
else:
return super().__new__(cls)
def __repr__(self):
return '{} != {}'.format(self.args[0], self.args[1])
class PyccelLt(PyccelComparisonOperator):
"""
Class representing a call to the python less than operator.
I.e:
a < b
is equivalent to:
PyccelEq(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
def __repr__(self):
return '{} < {}'.format(self.args[0], self.args[1])
class PyccelLe(PyccelComparisonOperator):
"""
Class representing a call to the python less or equal operator.
I.e:
a <= b
is equivalent to:
PyccelEq(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
def __repr__(self):
return '{} <= {}'.format(self.args[0], self.args[1])
class PyccelGt(PyccelComparisonOperator):
"""
Class representing a call to the python greater than operator.
I.e:
a > b
is equivalent to:
PyccelEq(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
def __repr__(self):
return '{} > {}'.format(self.args[0], self.args[1])
class PyccelGe(PyccelComparisonOperator):
"""
Class representing a call to the python greater or equal operator.
I.e:
a >= b
is equivalent to:
PyccelEq(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
def __repr__(self):
return '{} >= {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelBooleanOperator(PyccelOperator):
""" Abstract superclass representing a python
boolean operator with two arguments
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
dtype = NativeBool()
precision = -1
rank = 0
shape = None
order = None
__slots__ = ()
def _set_order(self):
pass
def _set_dtype(self):
pass
def _set_shape_rank(self):
pass
#==============================================================================
class PyccelAnd(PyccelBooleanOperator):
"""
Class representing a call to the python AND operator.
I.e:
a and b
is equivalent to:
PyccelAnd(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 5
def _handle_precedence(self, args):
args = PyccelBooleanOperator._handle_precedence(self, args)
args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelOr) else a for a in args)
return args
def __repr__(self):
return '{} and {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelOr(PyccelBooleanOperator):
"""
Class representing a call to the python OR operator.
I.e:
a or b
is equivalent to:
PyccelOr(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 4
def _handle_precedence(self, args):
args = PyccelBooleanOperator._handle_precedence(self, args)
args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelAnd) else a for a in args)
return args
def __repr__(self):
return '{} or {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelIs(PyccelBooleanOperator):
"""Represents a is expression in the code.
Examples
--------
>>> from pyccel.ast.operators import PyccelIs
>>> from pyccel.ast.literals import Nil
>>> from pyccel.ast.internals import PyccelSymbol
>>> x = PyccelSymbol('x')
>>> PyccelIs(x, Nil())
PyccelIs(x, None)
"""
__slots__ = ()
_precedence = 7
@property
def lhs(self):
""" First operator argument"""
return self._args[0]
@property
def rhs(self):
""" First operator argument"""
return self._args[1]
def __repr__(self):
return '{} is {}'.format(self.args[0], self.args[1])
def eval(self):
""" Determines the value of the expression `x is None` when `x` is known.
If a boolean value cannot be computed, return the string "unknown".
"""
# evaluate `x is None` when x = None
if self.rhs is Nil() and isinstance(self.lhs, NilArgument):
return True
# evaluate `x is not None` when x is known and different to None
elif self.rhs is Nil() and not getattr(self.lhs, 'self.lhs.is_optional', False):
return False
# The result of the expression is unknown if the rhs is not None
# or the lhs is an optional variable
else:
return "unknown"
#==============================================================================
class PyccelIsNot(PyccelIs):
"""Represents a is expression in the code.
Examples
--------
>>> from pyccel.ast.operators import PyccelIsNot
>>> from pyccel.ast.literals import Nil
>>> from pyccel.ast.internals import PyccelSymbol
>>> x = PyccelSymbol('x')
>>> PyccelIsNot(x, Nil())
PyccelIsNot(x, None)
"""
__slots__ = ()
def __repr__(self):
return '{} is not {}'.format(self.args[0], self.args[1])
def eval(self):
""" Determines the value of the expression `x is not None` when `x` is known.
If a boolean value cannot be computed, return the string "unknown".
"""
# evaluate `x is not None` when x = None
if self.rhs is Nil() and isinstance(self.lhs, NilArgument):
return False
# evaluate `x is not None` when x is known and different to None
elif self.rhs is Nil() and not getattr(self.lhs, 'self.lhs.is_optional', False):
return True
# The result of the expression is unknown if the rhs is not None
# or the lhs is an optional variable
else:
return "unknown"
#==============================================================================
class IfTernaryOperator(PyccelOperator):
"""Represent a ternary conditional operator in the code, of the form (a if cond else b)
Parameters
----------
args :
args : type list
format : condition , value_if_true, value_if_false
Examples
--------
>>> from pyccel.ast.internals import PyccelSymbol
>>> from pyccel.ast.core import Assign
>>> from pyccel.ast.operators import IfTernaryOperator
>>> n = PyccelSymbol('n')
>>> x = 5 if n > 1 else 2
>>> IfTernaryOperator(PyccelGt(n > 1), 5, 2)
IfTernaryOperator(PyccelGt(n > 1), 5, 2)
"""
__slots__ = ('_dtype','_precision','_shape','_rank','_order')
_precedence = 3
def __init__(self, cond, value_true, value_false):
super().__init__(cond, value_true, value_false)
if pyccel_stage == 'syntactic':
return
if isinstance(value_true , Nil) or isinstance(value_false, Nil):
errors.report('None is not implemented for Ternary Operator', severity='fatal')
if isinstance(value_true , NativeString) or isinstance(value_false, NativeString):
errors.report('String is not implemented for Ternary Operator', severity='fatal')
if value_true.dtype != value_false.dtype:
if value_true.dtype not in NativeNumeric or value_false.dtype not in NativeNumeric:
errors.report('The types are incompatible in IfTernaryOperator', severity='fatal')
if value_false.rank != value_true.rank :
errors.report('Ternary Operator results should have the same rank', severity='fatal')
if value_false.shape != value_true.shape :
errors.report('Ternary Operator results should have the same shape', severity='fatal')
@staticmethod
def _calculate_dtype(cond, value_true, value_false):
"""
Sets the dtype and precision for IfTernaryOperator
"""
if value_true.dtype in NativeNumeric and value_false.dtype in NativeNumeric:
dtype = max([value_true.dtype, value_false.dtype], key = NativeNumeric.index)
else:
dtype = value_true.dtype
precision = max_precision([value_true, value_false])
return dtype, precision
@staticmethod
def _calculate_shape_rank(cond, value_true, value_false):
"""
Sets the shape and rank and the order for IfTernaryOperator
"""
shape = value_true.shape
rank = value_true.rank
if rank is not None and rank > 1:
if value_false.order != value_true.order :
errors.report('Ternary Operator results should have the same order', severity='fatal')
return shape, rank
@property
def cond(self):
"""
The condition property for IfTernaryOperator class
"""
return self._args[0]
@property
def value_true(self):
"""
The value_if_cond_true property for IfTernaryOperator class
"""
return self._args[1]
@property
def value_false(self):
"""
The value_if_cond_false property for IfTernaryOperator class
"""
return self._args[2]
#==============================================================================
Relational = (PyccelEq, PyccelNe, PyccelLt, PyccelLe, PyccelGt, PyccelGe, PyccelAnd, PyccelOr, PyccelNot, PyccelIs, PyccelIsNot)
|
13e483275b169b32de20488e3b4307ff82a8b8c1
|
a3e2d421f94a8adf2c41ff1d093b5a06de1448d6
|
/server/pypi/packages/cmake-example/test.py
|
dd8e7dc66a8a0b82d7e074b0609c2260799fa921
|
[
"MIT"
] |
permissive
|
chaquo/chaquopy
|
09ef057015a756ce9b862732477b2549562720b4
|
e09bbe6ca5efd859d484b01e30131ccc944aa2b6
|
refs/heads/master
| 2023-08-31T22:09:22.230601
| 2023-08-31T13:07:57
| 2023-08-31T13:07:57
| 95,140,462
| 607
| 121
|
MIT
| 2023-09-13T19:17:29
| 2017-06-22T17:33:02
|
Python
|
UTF-8
|
Python
| false
| false
| 416
|
py
|
test.py
|
import unittest
class TestCmakeExample(unittest.TestCase):
def test_basic(self):
import cmake_example
self.assertEqual(4, cmake_example.add(2, 2))
with self.assertRaisesRegexp(TypeError, "incompatible function arguments"):
cmake_example.add("one", "two")
self.assertEqual(1, cmake_example.subtract(3, 2))
self.assertEqual(-1, cmake_example.subtract(2, 3))
|
5aef60e68f47c840dbe451b1f13345c1fc12d8f7
|
0d1af55a4299a17a4e44ab2cc7ccd7ff259747ac
|
/tests/scanner/scanner.py
|
d3dc4aa542e43dcaef8e552cef59446cd69f76c8
|
[
"Apache-2.0"
] |
permissive
|
AdoptOpenJDK/openjdk-docker
|
551468f7d883bfdd0b64eec803c6c75a482321cb
|
e499808a1fd1d009c2855db9aee274add22f9aa0
|
refs/heads/master
| 2023-05-10T19:43:30.873783
| 2023-04-27T13:52:14
| 2023-04-27T13:52:14
| 104,258,329
| 442
| 285
|
Apache-2.0
| 2023-04-27T13:52:16
| 2017-09-20T19:21:31
|
Slim
|
UTF-8
|
Python
| false
| false
| 42,458
|
py
|
scanner.py
|
# ------------------------------------------------------------------------------
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import requests
import json
import copy
import argparse
import logging
from logging import config
from datetime import datetime, timedelta
from pathlib import Path
LOGGER = logging.getLogger(__name__)
def load_logging_config(debug, file_path):
"""
Loads and configures a logging config
:param debug: True or False if debugging for console should be turned on
:param file_path: File path to storage the log file
:return: None
"""
logging_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"debugFormater": {
"format": "%(asctime)s.%(msecs)03d %(levelname)s:%(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S"
},
"simpleFormater": {
"format": "%(message)s"
}
},
"handlers": {
"file": {
"class": "logging.FileHandler",
"formatter": "debugFormater",
"level": "DEBUG",
"filename": "adoptopenjdk_scanner.log"
},
"console": {
"class": "logging.StreamHandler",
"formatter": "simpleFormater",
"level": "INFO",
"stream": "ext://sys.stdout"
}
},
"loggers": {
"": {
"level": "DEBUG",
"handlers": ["file"]
},
"__main__": {
"level": "DEBUG",
"handlers": ["console"],
"propagate": True
}
}
}
# If debugging, then switch the console format to be verbose
if debug:
logging_config["handlers"]["console"]["formatter"] = "debugFormater"
# If a file path is passed in then hadnle the prefix and append the file name
if file_path:
log_path = Path(file_path)
log_path = log_path.joinpath(logging_config["handlers"]["file"]["filename"])
logging_config["handlers"]["file"]["filename"] = str(log_path)
# Apply logging config
logging.config.dictConfig(logging_config)
LOGGER.debug("Logging Config: " + str(logging_config))
LOGGER.debug("Logging is configured")
def sanitize_build(build):
"""
Takes a build name and processes it for tagging
:param build: String - Name of build - (full/slim)
:return: String - Name of processed build - (""/-slim)
"""
if build == "full":
return ""
elif build == "slim":
return "-" + build
def sanitize_jvm(jvm):
"""
Takes a JVM name and processes it for tagging
:param jvm: String - Name of JVM - (hotspot/openj9)
:return: String - Name of processed JVM - (""/-openj9)
"""
if jvm == "hotspot":
return ""
elif jvm == "openj9":
return "-" + jvm
def docker_arch_names(arch):
"""
Convert architecture names to a friendly name
:param arch: String of the arch
:return: String of the "friendly" arch name
"""
if arch == "armv7l":
return "arm"
elif arch == "aarch64":
return "arm64"
elif arch == "x86_64":
return "amd64"
elif arch == "ppc64le":
return "ppc64le"
elif arch == "s390x":
return "s390x"
else:
LOGGER.error("{arch} is an unsupport architecture!".format(arch=arch))
raise ValueError("{arch} is an unsupport architecture!".format(arch=arch))
def convert_timedelta(duration):
"""
Takes in Timedelta and converts it to days, hours, minutes and seconds
:param duration: Timedelta Timestamp
:return: Days, Hours, Minutes and Seconds
"""
days, seconds = duration.days, duration.seconds
hours = seconds // 3600
minutes = (seconds % 3600) // 60
seconds = (seconds % 60)
# Make sure if negative numbers are rounded up to 0
days = max(0, days)
hours = max(0, hours)
minutes = max(0, minutes)
seconds = max(0, seconds)
return days, hours, minutes, seconds
def enrich_list_with_image_json(image_list, docker_org="adoptopenjdk"):
"""
Enriches an image list with the image json data from docker api
:param image_list: List of images
:param docker_org: Name of the docker organization
:return: Enriched image list
"""
# Get a list that has only one copy each possible image to save on image checks
manifest_list = get_manifest_list(image_list=image_list)
# Enrich the manifest list with image json
for image in manifest_list:
image_json = get_image_information(docker_org=docker_org, docker_repo="openjdk{version}{jvm}".format(version=image["version"], jvm=sanitize_jvm(image["jvm"])), tag_name=image["tag"])
image["image_json"] = image_json
# Enrich the full image list with image json to avoid calling the same manifest 4 or 5 times(for each arch)
for image in image_list:
for enrich_image in manifest_list:
if image["tag"] == enrich_image["tag"] and image["jvm"] == enrich_image["jvm"]:
image["image_json"] = enrich_image["image_json"]
return image_list
def deenrich_list_with_image_json(enriched_image_list):
"""
De-enrich the image list
:param enriched_image_list: List of enriched images
:return: De-enriched image list
"""
# For each image delete image json
for image in enriched_image_list:
if "image_json" in image:
del image["image_json"]
return enriched_image_list
def get_image_information(docker_org, docker_repo, tag_name):
"""
Fetch image json from DockerHub for an image
:param docker_org: Name of docker organization
:param docker_repo: Name of docker repo
:param tag_name: Name of tag
:return: JSON of the image
"""
LOGGER.debug("Getting image information for: {org}/{repo}:{tag}".format(org=docker_org, repo=docker_repo, tag=tag_name))
response = requests.get("https://hub.docker.com/v2/repositories/{org}/{repo}/tags/{tag}".format(org=docker_org, repo=docker_repo, tag=tag_name))
# Checks if the response is not a 5XX or 4XX status code
if response.ok:
return response.json()
else:
# If "bad" status code print error
LOGGER.error("ERROR: Something went wrong grabbing image, {org}/{repo}:{tag}. HTTP Status Code: {code}".format(org=docker_org, repo=docker_repo, tag=tag_name, code=response.status_code))
return None
def get_last_updated_for_image(image_json):
"""
Grab "last_updated" timestamp from docker image json
:param image_json: Image JSON
:return: Datetime Object
"""
# Grab the timestamp for last time updated
last_updated = image_json.get("last_updated")
# If last_update is not empty
if last_updated is not None:
# Parse timestamp string to datetime object
timestamp = datetime.strptime(last_updated, "%Y-%m-%dT%H:%M:%S.%fZ")
return timestamp
else:
# This should not happen unless Docker API changes the format/response
LOGGER.error("last_updated value in the image json is not there. Has the DockerHub API changed?")
raise ValueError("last_updated value in the image json is not there. Has the DockerHub API changed?")
def get_manifest_list(image_list):
"""
Get a list with only the "manifest" images
:param image_list: Full image list
:return: "Unique" image list
"""
# Create a copy of the image list
manifest_list = copy.deepcopy(image_list)
# Delete arch from image dicts, thus produce a list with duplicate values
for image in manifest_list:
del image["arch"]
# Get unique list for manifest checking. Normal images list contain an entry for each arch thus "duplicates"
# Converts dict to tuple to be able to generate hash for comparision
manifest_list = [dict(t) for t in {tuple(d.items()) for d in manifest_list}]
return manifest_list
def get_unique_image_name_and_last_updated(enriched_image_list):
"""
Generate a list with "manifest" images only and last_update timestamp
:param enriched_image_list: Image list with image JSON
:return: List of tuples(image name and timestamp)
"""
# Use a set to avoid adding the same image twice
unique_list = set()
for image in enriched_image_list:
image_name = "adoptopenjdk/openjdk{version}{jvm}:{tag}".format(version=image["version"],
jvm=sanitize_jvm(image["jvm"]), tag=image["tag"])
last_updated = get_last_updated_for_image(image_json=image["image_json"])
# Need to use tuples not dicts to take advantage of a set
unique_list.add((image_name, last_updated))
return list(unique_list)
def generate_all_image(supported_versions, supported_jvms, supported_os, supported_packages, supported_builds, supported_archs, dict_image_template):
"""
Generates all possible combinations of images. Should take in any parameters that make up your image/tag
:param supported_versions: String - List of Versions
:param supported_jvms: String - List of JVMs
:param supported_os: String - List of OSs
:param supported_packages: String - List of Packages
:param supported_builds: String - List of Builds
:param supported_archs: String - List of Architectures
:param dict_template: Dict - Template Dict to store needed information about said image/tag
:return: List - All generate image/tag possibilities
"""
# A list to hold all the possible images
master_list = []
# Loop over every possible image and check if it needs to be tested
for version in supported_versions:
for jvm in supported_jvms:
for os in supported_os:
for package in supported_packages:
for build in supported_builds:
for arch in supported_archs:
# Using Deep copy to make a new dict not just over writing the same one
template = copy.deepcopy(dict_image_template)
template["version"] = version
template["jvm"] = jvm
template["arch"] = arch
template["os"] = os
template["package"] = package
template["build"] = build
template["tag"] = "{package}{version}u-{os}-nightly{build}".format(package=package,
version=version, os=os,
build=sanitize_build(
build))
master_list.append(template)
return master_list
def is_valid_package_and_build(package, build):
"""
Returns true or false depending on the package and build are jre and slim
:param package: String - Package name - (jdk/jre)
:param build: String - Build name - (slim/full)
:return: Boolean - (True/False)
"""
# Currently we do not produce JRE SLIM builds
if package == "jre" and build == "slim":
LOGGER.debug("Package & Build Check Failed with {package} and {build}".format(package=package, build=build))
return False
return True
def filter_valid_package_and_build(image_list):
"""
Filter out any non-valid image by package and build
:param image_list: List - Collection of possible images/tags
:return: Tuple - First list is valid images and second list is non-valid images
"""
filtered_list = []
removed_list = []
# Loop over all the images
for image in image_list:
# If valid added it to filter list
if is_valid_package_and_build(package=image["package"], build=image["build"]):
filtered_list.append(image)
# If non-valid add it to the "removed" list
else:
removed_list.append(image)
return (filtered_list, removed_list)
def is_valid_os_and_arch(os, arch):
"""
Returns true or false depending on arch and os combination
:param os: String - Name of OS - See supported OSs
:param arch: String - Name of Arch - See supported Archs
:return: Boolean - (True/False)
"""
# ClefOS only runs on s390x
if os == "clefos" and arch != "s390x":
LOGGER.debug("OS Check Failed with {os} and {arch}".format(os=os, arch=arch))
return False
# CentOS does not support s390x
elif os == "centos" and arch == "s390x":
LOGGER.debug("OS Check Failed with {os} and {arch}".format(os=os, arch=arch))
return False
# Ubi based images do not support armv7l
elif os == "ubi" and arch == "armv7l":
LOGGER.debug("OS Check Failed with {os} and {arch}".format(os=os, arch=arch))
return False
# Ubi-minimal based images do not support armv7l
elif os == "ubi-minimal" and arch == "armv7l":
LOGGER.debug("OS Check Failed with {os} and {arch}".format(os=os, arch=arch))
return False
return True
def filter_valid_os_and_arch(image_list):
"""
Filter out any non-valid image by package and build
:param image_list: List - Collection of possible images/tags
:return: Tuple - First list is valid images and second list is non-valid images
"""
filtered_list = []
removed_list = []
# Loop over all the images
for image in image_list:
# If valid added it to filter list
if is_valid_os_and_arch(os=image["os"], arch=image["arch"]):
filtered_list.append(image)
# If non-valid add it to the "removed" list
else:
removed_list.append(image)
return (filtered_list, removed_list)
def is_valid_jvm_and_arch(jvm, arch):
"""
Check if the jvm and arch are a valid combination
:param jvm: Name of JVM
:param arch: Name of arch
:return: Boolean
"""
# Currently OpenJ9 does not support armv7l or aarch64
# But Hotspot supports all supported_archs
if jvm == "openj9" and (arch == "armv7l" or arch == "aarch64"):
LOGGER.debug("JVM Check Failed with {jvm} and {arch}".format(jvm=jvm, arch=arch))
return False
return True
def filter_valid_jvm_and_arch(image_list):
"""
Filter list based on jvm and arch
:param image_list: List of images
:return: Dict of filtered and removed images
"""
filtered_list = []
removed_list = []
# Loop over all the images
for image in image_list:
# If valid added it to filter list
if is_valid_jvm_and_arch(jvm=image["jvm"], arch=image["arch"]):
filtered_list.append(image)
# If non-valid add it to the "removed" list
else:
removed_list.append(image)
return (filtered_list, removed_list)
def is_image_exist(docker_org, docker_repo, tag_name):
"""
Checks if image exists on DockerHub
:param docker_org: Name of docker organization
:param docker_repo: Name of docker repo
:param tag_name: Name of tag
:return: Boolean
"""
# Issue GET request to get a HTTP Status code to check if it is a valid image
# Using GET instead of HEAD because HEAD is not being treated right, thus enable stream to just get headers
response = requests.get("https://hub.docker.com/v2/repositories/{org}/{repo}/tags/{tag}".format(org=docker_org, repo=docker_repo, tag=tag_name), stream=True)
LOGGER.debug("HTTP Status Code: {code}".format(code=response.status_code))
# Checks if the response is not a 5XX or 4XX status code
if response.ok:
return True
elif response.status_code == 404:
LOGGER.debug("ERROR: Image, {org}/{repo}:{tag}, does not exist!".format(org=docker_org, repo=docker_repo, tag=tag_name))
return False
else:
# Should never get another type of status code from Dockerhub then 200 or 404
LOGGER.error("ERROR: When requesting the image, {org}/{repo}:{tag}, we got the HTTP status code, {code}. Network issues?".format(org=docker_org, repo=docker_repo, tag=tag_name, code=response.status_code))
raise ValueError("ERROR: When requesting the image, {org}/{repo}:{tag}, we got the HTTP status code, {code}. Network issues?".format(org=docker_org, repo=docker_repo, tag=tag_name, code=response.status_code))
def filter_image_exist(docker_org, image_list):
"""
Filter images based on if they exist or not
:param docker_org: Name of docker organization
:param image_list: List of images
:return: Dict of filtered and removed images
"""
filtered_list = []
removed_list = []
# Get a list that has only one copy each possible image to save on image checks
manifest_list = get_manifest_list(image_list=image_list)
removed_manifest_list = []
# Loop over all possible images
for image in manifest_list:
if is_image_exist(docker_org=docker_org, docker_repo="openjdk{version}{jvm}".format(version=image["version"], jvm=sanitize_jvm(image["jvm"])), tag_name=image["tag"]) is not True:
removed_manifest_list.append(image["tag"])
# Filter the image list based on if the image did not exist
for image in image_list:
# Add image to the removed list if in removed_manifest_list
if image["tag"] in removed_manifest_list:
removed_list.append(image)
else:
filtered_list.append(image)
return (filtered_list, removed_list)
def is_arch_in_manifest(arch, image_json):
"""
Check if the architecture is in the manifest/image json
:param arch: Name of architecture
:param image_json: JSON of image
:return: Boolean
"""
# Grab the images value the image json. Should be a list if its a manifest
manifest_images = image_json.get("images")
if manifest_images is not None:
for image in manifest_images:
if docker_arch_names(arch=arch) == image.get("architecture"):
return True
return False
else:
LOGGER.error("images value in the image json is not there. Has the DockerHub API changed?")
raise ValueError("images value in the image json is not there. Has the DockerHub API changed?")
def filter_arch_in_manifest(enriched_image_list, filter_images=True):
"""
Filter image list based on if the architecture is in the manifest/image json
:param enriched_image_list: List of images with image JSON
:param filter_images: If set to False, images that are not in the manifest will remain in the list
:return: Dict of filtered and removed images
"""
filtered_list = []
removed_list = []
for image in enriched_image_list:
if is_arch_in_manifest(arch=image["arch"], image_json=image["image_json"]):
filtered_list.append(image)
else:
removed_list.append(image)
# If filter_images is false, we want to keep the "bad" images in the list
if filter_images is False:
filtered_list = copy.deepcopy(enriched_image_list)
return (filtered_list, removed_list)
def is_timedelta(timestamp, current_time=datetime.utcnow(), delta_hours=2):
"""
Check if the given timestamp is within a time delta
:param timestamp: Timestamp of image(UTC)
:param current_time: Current UTC timestamp
:param delta_hours: An integer of hours
:return: Boolean
"""
# Check the timestamps are within a given delta
# Checking if the given timestamp + delta hours is great then the current time aka making it a "new" image
# If less then current time the image is deemed "old"
if (timestamp + timedelta(hours=delta_hours)) > current_time:
return True
else:
return False
def filter_timedelta(enriched_image_list, delta_hours=2):
"""
Filter images based on time delta
:param enriched_image_list: List of images with image JSON
:param delta_hours: An integer of hours
:return: Dict of filtered and removed images
"""
filtered_list = []
removed_list = []
for image in enriched_image_list:
if is_timedelta(timestamp=get_last_updated_for_image(image_json=image["image_json"]), current_time=datetime.utcnow(), delta_hours=delta_hours):
filtered_list.append(image)
else:
removed_list.append(image)
return (filtered_list, removed_list)
def general_filters(image_list, dict_images_template):
"""
Filters out images that should not be valid exist
:param image_list: List of images
:param dict_images_template: Images template
:return: Dict of images
"""
# Filter by package and build
image_list, dict_images_template["package_and_build"] = filter_valid_package_and_build(image_list)
# Filter by os and arch
image_list, dict_images_template["os_and_arch"] = filter_valid_os_and_arch(image_list)
# Filter by jvm and arch
image_list, dict_images_template["jvm_and_arch"] = filter_valid_jvm_and_arch(image_list)
# Remaining images must be valid after going through the above filters
dict_images_template["filtered_images"] = image_list
return dict_images_template
def verify_images(image_list, dict_images_template, docker_org="adoptopenjdk"):
"""
Verify a list of images exists
:param image_list: List of images
:param dict_images_template: Images template
:param docker_org: Name of docker organization
:return: Dict of images
"""
# Apply general filter for the image list. This makes sure all images are valid
dict_images_template = general_filters(image_list, dict_images_template)
# Check if the images exist by using the filter
dict_images_template["filtered_images"], dict_images_template["bad_requests"] = filter_image_exist(docker_org=docker_org, image_list=dict_images_template["filtered_images"])
return dict_images_template
def verify_manifests(image_list, dict_images_template, docker_org="adoptopenjdk", filter_bad_manifests=True):
"""
Verify a list of images have valid manifests
:param image_list: List of images
:param dict_images_template: Images template
:param docker_org: Name of docker organization
:param filter_bad_manifests: Filter out bad manifests from list if set to true
:return: Dict of images
"""
# Call verify images to make sure they all exist before further processing
dict_images_template = verify_images(image_list=image_list, dict_images_template=dict_images_template, docker_org=docker_org)
# Enrich the images with image JSON
enriched_image_list = enrich_list_with_image_json(image_list=dict_images_template["filtered_images"], docker_org=docker_org)
# Check if the manifests are "bad" by using the filter
dict_images_template["filtered_images"], dict_images_template["bad_manifests"] = filter_arch_in_manifest(enriched_image_list=enriched_image_list, filter_images=filter_bad_manifests)
# De-enrich the images before storing them into the dict
dict_images_template["bad_manifests"] = deenrich_list_with_image_json(enriched_image_list=dict_images_template["bad_manifests"])
return dict_images_template
def verify_timedelta(image_list, dict_images_template, docker_org="adoptopenjdk", filter_bad_manifests=True, delta_hours=2, force_old_images=False):
"""
Verify a list of images meet a given time delta
:param image_list: List of images
:param dict_images_template: Images template
:param docker_org: Name of docker organization
:param filter_bad_manifests: Filter out bad manifests from list if set to true
:param delta_hours: An integer of hours to deem an image "old"
:param force_old_images: Forces old images to not be filtered out
:return: Dict of images
"""
# Call verify manifests to make use all manifests are okay. Calling manifest also verifies if the images exist too
dict_images_template = verify_manifests(image_list=image_list, dict_images_template=dict_images_template, docker_org=docker_org, filter_bad_manifests=filter_bad_manifests)
# Force Old Images set to true will skip the delta time check
if force_old_images is not True:
dict_images_template["filtered_images"], dict_images_template["old_images"] = filter_timedelta(enriched_image_list=dict_images_template["filtered_images"], delta_hours=delta_hours)
return dict_images_template
def verify(image_list, dict_images_template, docker_org="adoptopenjdk", filter_bad_manifests=False, delta_hours=2, force_old_images=False):
"""
Verify a list of images meet all filters. Used to generate a list of images that need to be tested
:param image_list: List of images
:param dict_images_template: Images template
:param docker_org: Name of docker organization
:param filter_bad_manifests: Filter out bad manifests from list if set to true
:param delta_hours: An integer of hours to deem an image "old"
:param force_old_images: Forces old images to not be filtered out
:return: Dict of images
"""
# Call verify time delta to make sure all images are not "old". This calls verifies manifests and if the images exist
dict_images_template = verify_timedelta(image_list=image_list, dict_images_template=dict_images_template, docker_org=docker_org, filter_bad_manifests=filter_bad_manifests, delta_hours=delta_hours, force_old_images=force_old_images)
# De-enrich images before storing them in image dict
dict_images_template["filtered_images"] = deenrich_list_with_image_json(enriched_image_list=dict_images_template["filtered_images"])
return dict_images_template
def output_package_and_build(image_dict, json_output):
"""
Outputs a list of images that failed the package and build filter
:param image_dict: Dictionary of images
:param json_output: Boolean for Json output verse printed
:return: None
"""
LOGGER.info("\nPackage and Build Image Issues({number}):".format(number=str(len(image_dict["package_and_build"]))))
for image in image_dict["package_and_build"]:
if json_output is False:
image_name = "adoptopenjdk/openjdk{version}{jvm}:{tag}".format(version=image["version"], jvm=sanitize_jvm(image["jvm"]), tag=image["tag"])
LOGGER.info("Package & Build Check Failed with {package} and {build} for image: {image_name}".format(package=image["package"], build=image["build"], image_name=image_name))
else:
LOGGER.info(json.dumps(image))
def output_os_and_arch(image_dict, json_output):
"""
Outputs a list of images that failed the os and arch filter
:param image_dict: Dictionary of images
:param json_output: Boolean for Json output verse printed
:return: None
"""
LOGGER.info("\nOS and Image Image Issues({number}):".format(number=str(len(image_dict["os_and_arch"]))))
for image in image_dict["os_and_arch"]:
if json_output is False:
image_name = "adoptopenjdk/openjdk{version}{jvm}:{tag}".format(version=image["version"], jvm=sanitize_jvm(image["jvm"]), tag=image["tag"])
LOGGER.info("OS Check Failed with {os} and {arch} for image: {image_name}".format(os=image["os"], arch=image["arch"], image_name=image_name))
else:
LOGGER.info(json.dumps(image))
def output_jvm_and_arch(image_dict, json_output):
"""
Outputs a list of images that failed the jvm and arch filter
:param image_dict: Dictionary of images
:param json_output: Boolean for Json output verse printed
:return: None
"""
LOGGER.info("\nJVM and Architecture Image Issues({number}):".format(number=str(len(image_dict["jvm_and_arch"]))))
for image in image_dict["jvm_and_arch"]:
if json_output is False:
image_name = "adoptopenjdk/openjdk{version}{jvm}:{tag}".format(version=image["version"], jvm=sanitize_jvm(image["jvm"]), tag=image["tag"])
LOGGER.info("JVM Check Failed with {jvm} and {arch} for image: {image_name}".format(jvm=image["jvm"], arch=image["arch"], image_name=image_name))
else:
LOGGER.info(json.dumps(image))
def output_bad_requests(image_dict, json_output, valid_images):
"""
Outputs a list of images that do not exist in DockerHub/generated a bad request. Also can out print "valid" images
:param image_dict: Dictionary of images
:param json_output: Boolean for Json output verse printed
:param valid_images: Boolean for if Valid images should be shown
:return: None
"""
manifest_list = get_manifest_list(image_list=image_dict["bad_requests"])
LOGGER.info("\nNonexistent(Bad Requests) Image Issues({number}):".format(number=str(len(manifest_list))))
for image in manifest_list:
if json_output is False:
image_name = "adoptopenjdk/openjdk{version}{jvm}:{tag}".format(version=image["version"], jvm=sanitize_jvm(image["jvm"]), tag=image["tag"])
LOGGER.info("Got a bad request for image: {image_name}".format(image_name=image_name))
else:
LOGGER.info(json.dumps(image))
if valid_images is True:
valid_manifest_list = get_manifest_list(image_list=image_dict["filtered_images"])
LOGGER.info("\nExistent(Good Requests) Images({number}):".format(number=str(len(valid_manifest_list))))
for image in valid_manifest_list:
if json_output is False:
image_name = "adoptopenjdk/openjdk{version}{jvm}:{tag}".format(version=image["version"], jvm=sanitize_jvm(image["jvm"]), tag=image["tag"])
LOGGER.info("Got a good request for image: {image_name}".format(image_name=image_name))
else:
LOGGER.info(json.dumps(image))
def output_old_images(image_dict, json_output, valid_images, delta_hours):
"""
Outputs a list of images that are deemed "old" by a given time delta. Also can out print "valid" images
:param image_dict: Dictionary of images
:param json_output: Boolean for Json output verse printed
:param valid_images: Boolean for if Valid images should be shown
:param delta_hours: An integer of hours to deem an image "old"
:return: None
"""
if json_output is False:
image_name_and_last_updated = get_unique_image_name_and_last_updated(enriched_image_list=image_dict["old_images"])
LOGGER.info("\nDelta Time(Old) Image Issues({number}):".format(number=str(len(image_name_and_last_updated))))
for image_name, timestamp in image_name_and_last_updated:
age_of_image = datetime.utcnow() - timestamp
days, hours, minutes, seconds = convert_timedelta(age_of_image)
LOGGER.info("Failed delta time check of {delta_hours} hours with the age of {days} days, {hours:02d}:{minutes:02d}.{seconds:02d} for image: {image_name}".format(delta_hours=delta_hours, days=days, hours=hours, minutes=minutes, seconds=seconds, image_name=image_name))
if valid_images is True:
image_name_and_last_updated = get_unique_image_name_and_last_updated(enriched_image_list=image_dict["filtered_images"])
LOGGER.info("\nDelta Time(NEW) Images({number}):".format(number=str(len(image_name_and_last_updated))))
for image_name, timestamp in image_name_and_last_updated:
age_of_image = timestamp - datetime.utcnow()
days, hours, minutes, seconds = convert_timedelta(age_of_image)
LOGGER.info("Passed delta time check of {delta_hours} hours with the age of {days} days, {hours:02d}:{minutes:02d}.{seconds:02d} for image: {image_name}".format(delta_hours=delta_hours, days=days, hours=hours, minutes=minutes, seconds=seconds, image_name=image_name))
else:
LOGGER.info("\nDelta Time(Old) RAW Image Issues({number}):".format(number=str(len(image_dict["old_images"]))))
for image in image_dict["old_images"]:
LOGGER.info(json.dumps(image))
if valid_images is True:
LOGGER.info("\nDelta Time(NEW) RAW Images({number}):".format(number=str(len(image_dict["filtered_images"]))))
for image in image_dict["filtered_images"]:
LOGGER.info(json.dumps(image))
def output_bad_manifests(image_dict, json_output):
"""
Outputs of a list of images that have manifest issues
:param image_dict: Dictionary of images
:param json_output: Boolean for Json output verse printed
:return: None
"""
if json_output is False:
manifest_dict = {}
for image in image_dict["bad_manifests"]:
image_name = "adoptopenjdk/openjdk{version}{jvm}:{tag}".format(version=image["version"], jvm=sanitize_jvm(image["jvm"]), tag=image["tag"])
if image_name in manifest_dict:
manifest_dict[image_name] = manifest_dict[image_name] + ", " + image["arch"]
else:
manifest_dict[image_name] = image["arch"]
LOGGER.info("\nManifest Image Issues({number}):".format(number=str(len(image_dict["bad_manifests"]))))
for key, value in manifest_dict.items():
LOGGER.info(key + " : " + value)
else:
LOGGER.info("\nManifest RAW Image Issues({number}):".format(number=str(len(image_dict["bad_manifests"]))))
for image in image_dict["bad_manifests"]:
LOGGER.info(json.dumps(image))
def output_filtered_images(image_dict, json_output):
"""
Outputs a list of images that need to be tested
:param image_dict: Dictionary of images
:param json_output: Boolean for Json output verse printed
:return: None
"""
if json_output is False:
manifest_list = get_manifest_list(image_list=image_dict["filtered_images"])
LOGGER.info("Valid(Filtered) Images({number}):".format(number=str(len(manifest_list))))
for image in manifest_list:
image_name = "adoptopenjdk/openjdk{version}{jvm}:{tag}".format(version=image["version"], jvm=sanitize_jvm(image["jvm"]), tag=image["tag"])
LOGGER.info("All attributes have been verified for image: {image_name}".format(image_name=image_name))
else:
LOGGER.info("Valid(Filtered) RAW Images({number}):".format(number=str(len(image_dict["filtered_images"]))))
for image in image_dict["filtered_images"]:
LOGGER.info(json.dumps(image))
def get_args():
"""
Processes and handles command line arguments
:return: Dict of command line arguments
"""
parser = argparse.ArgumentParser(description="AdoptOpenJDK Scanner allows a user to verify attributes about images")
parser.add_argument("--verify",
help="Name of the attribute you want to verify",
type=str,
choices=["all", "timedelta", "manifests", "images"],
default=None,
required=True)
parser.add_argument("--versions",
help="Java Versions",
nargs='+',
type=str,
choices=["8", "11", "14"],
default=["8", "11", "14"])
parser.add_argument("--jvms",
help="Name of the JVMs",
nargs='+',
type=str,
choices=["hotspot", "openj9"],
default=["hotspot", "openj9"])
parser.add_argument("--oss",
help="Names of the OSs",
nargs='+',
type=str,
choices=["alpine", "debian", "debianslim", "ubi", "ubi-minimal", "centos", "clefos", "ubuntu"],
default=["alpine", "debian", "debianslim", "ubi", "ubi-minimal", "centos", "clefos", "ubuntu"])
parser.add_argument("--packages",
help="Names of the Packages",
nargs='+',
type=str,
choices=["jdk", "jre"],
default=["jdk", "jre"])
parser.add_argument("--archs",
help="Architectures",
nargs='+',
type=str,
choices=["armv7l", "aarch64", "ppc64le", "s390x", "x86_64"],
default=["armv7l", "aarch64", "ppc64le", "s390x", "x86_64"])
parser.add_argument("--builds",
help="Name of the Builds",
nargs='+',
type=str,
choices=["slim", "full"],
default=["slim", "full"])
parser.add_argument("--filter-bad-manifests",
help="Filter out bad manifest images",
action="store_true",
default=False)
parser.add_argument("--delta-hours",
help="Number of hours to deem an image 'old'",
type=int,
default=2)
parser.add_argument("--force-old-images",
help="Force old images not to be filtered out",
action="store_true",
default=False)
parser.add_argument("--debug",
help="Enable Debug output",
action="store_true",
default=False)
parser.add_argument("--log-path",
help="Path to where the log file will be generated",
type=str,
default=None)
parser.add_argument("--json",
help="Prints JSON output for results instead of formatted strings",
action="store_true",
default=False)
parser.add_argument("--show-valid",
help="Prints valid objects in addition to the problematic objects. Only works for certain verify values",
action="store_true",
default=False)
return vars(parser.parse_args())
def run(parsed_args):
"""
Main function that takes in arguments and processes them
:param parsed_args: Dict of command line arguments
:return: None
"""
docker_organization = "adoptopenjdk"
image_template = {
"version": "",
"jvm": "",
"arch": "",
"os": "",
"package": "",
"build": "",
"tag": ""
}
images_template = {
"filtered_images": [],
"package_and_build": [],
"os_and_arch": [],
"jvm_and_arch": [],
"bad_requests": [],
"bad_manifests": [],
"old_images": []
}
LOGGER.info("Generating All Possible Images.......")
all_images = generate_all_image(supported_versions=parsed_args["versions"], supported_jvms=parsed_args["jvms"], supported_os=parsed_args["oss"], supported_packages=parsed_args["packages"], supported_builds=parsed_args["builds"], supported_archs=parsed_args["archs"], dict_image_template=image_template)
LOGGER.info("Processing images.......")
if parsed_args["verify"] == "all":
processed_dict = verify(image_list=all_images, dict_images_template=images_template, docker_org=docker_organization, filter_bad_manifests=parsed_args["filter_bad_manifests"], delta_hours=parsed_args["delta_hours"], force_old_images=parsed_args["force_old_images"])
if parsed_args["debug"]:
output_package_and_build(image_dict=processed_dict, json_output=parsed_args["json"])
output_os_and_arch(image_dict=processed_dict, json_output=parsed_args["json"])
output_jvm_and_arch(image_dict=processed_dict, json_output=parsed_args["json"])
output_bad_requests(image_dict=processed_dict, json_output=parsed_args["json"], valid_images=parsed_args["show_valid"])
output_bad_manifests(image_dict=processed_dict, json_output=parsed_args["json"])
output_old_images(image_dict=processed_dict, json_output=parsed_args["json"], valid_images=parsed_args["show_valid"], delta_hours=parsed_args["delta_hours"])
output_filtered_images(image_dict=processed_dict, json_output=parsed_args["json"])
elif parsed_args["verify"] == "timedelta":
processed_dict = verify_timedelta(image_list=all_images, dict_images_template=images_template, docker_org=docker_organization, filter_bad_manifests=parsed_args["filter_bad_manifests"], delta_hours=parsed_args["delta_hours"], force_old_images=parsed_args["force_old_images"])
output_old_images(image_dict=processed_dict, json_output=parsed_args["json"], valid_images=parsed_args["show_valid"], delta_hours=parsed_args["delta_hours"])
elif parsed_args["verify"] == "manifests":
processed_dict = verify_manifests(image_list=all_images, dict_images_template=images_template, docker_org=docker_organization, filter_bad_manifests=parsed_args["filter_bad_manifests"])
output_bad_manifests(image_dict=processed_dict, json_output=parsed_args["json"])
elif parsed_args["verify"] == "images":
processed_dict = verify_images(image_list=all_images, dict_images_template=images_template, docker_org=docker_organization)
output_bad_requests(image_dict=processed_dict, json_output=parsed_args["json"], valid_images=parsed_args["show_valid"])
if __name__ == "__main__":
# Parse the arguments passed in
args = get_args()
# Configure logging
load_logging_config(args["debug"], args["log_path"])
LOGGER.debug("Parsed arguments: " + str(args))
run(parsed_args=args)
|
034c94f5cb075231c3ca0ffea5b510d47a7805bd
|
fce81b804cae23f525a5ad4370b684bf0dc531a5
|
/tools/ci/push_docs_to_repo.py
|
0471e38246e3215ee91090f545d67d354a1044c4
|
[
"Zlib",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
numpy/numpy
|
ba2abcc1d2d46affbb6aabe5aed6407b4b57507e
|
dc2ff125493777a1084044e6cd6857a42ee323d4
|
refs/heads/main
| 2023-09-05T10:10:52.767363
| 2023-09-04T18:03:29
| 2023-09-04T18:03:29
| 908,607
| 25,725
| 11,968
|
BSD-3-Clause
| 2023-09-14T21:26:09
| 2010-09-13T23:02:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,596
|
py
|
push_docs_to_repo.py
|
#!/usr/bin/env python3
import argparse
import subprocess
import tempfile
import os
import sys
import shutil
parser = argparse.ArgumentParser(
description='Upload files to a remote repo, replacing existing content'
)
parser.add_argument('dir', help='directory of which content will be uploaded')
parser.add_argument('remote', help='remote to which content will be pushed')
parser.add_argument('--message', default='Commit bot upload',
help='commit message to use')
parser.add_argument('--committer', default='numpy-commit-bot',
help='Name of the git committer')
parser.add_argument('--email', default='numpy-commit-bot@nomail',
help='Email of the git committer')
parser.add_argument('--count', default=1, type=int,
help="minimum number of expected files, defaults to 1")
parser.add_argument(
'--force', action='store_true',
help='hereby acknowledge that remote repo content will be overwritten'
)
args = parser.parse_args()
args.dir = os.path.abspath(args.dir)
if not os.path.exists(args.dir):
print('Content directory does not exist')
sys.exit(1)
count = len([name for name in os.listdir(args.dir) if os.path.isfile(os.path.join(args.dir, name))])
if count < args.count:
print(f"Expected {args.count} top-directory files to upload, got {count}")
sys.exit(1)
def run(cmd, stdout=True):
pipe = None if stdout else subprocess.DEVNULL
try:
subprocess.check_call(cmd, stdout=pipe, stderr=pipe)
except subprocess.CalledProcessError:
print("\n! Error executing: `%s;` aborting" % ' '.join(cmd))
sys.exit(1)
workdir = tempfile.mkdtemp()
os.chdir(workdir)
run(['git', 'init'])
# ensure the working branch is called "main"
# (`--initial-branch=main` appeared to have failed on older git versions):
run(['git', 'checkout', '-b', 'main'])
run(['git', 'remote', 'add', 'origin', args.remote])
run(['git', 'config', '--local', 'user.name', args.committer])
run(['git', 'config', '--local', 'user.email', args.email])
print('- committing new content: "%s"' % args.message)
run(['cp', '-R', os.path.join(args.dir, '.'), '.'])
run(['git', 'add', '.'], stdout=False)
run(['git', 'commit', '--allow-empty', '-m', args.message], stdout=False)
print('- uploading as %s <%s>' % (args.committer, args.email))
if args.force:
run(['git', 'push', 'origin', 'main', '--force'])
else:
print('\n!! No `--force` argument specified; aborting')
print('!! Before enabling that flag, make sure you know what it does\n')
sys.exit(1)
shutil.rmtree(workdir)
|
9b6bbc7b2e3e8430749a0010ee9e371d539997c9
|
4c4deee160ee4a3056b2973f6202e5cbaafb5925
|
/tests/functional/directory/test_proxy.py
|
371e87414740084cd374061021d35adcef705e16
|
[
"AGPL-3.0-only",
"LGPL-3.0-only"
] |
permissive
|
open-io/oio-sds
|
b9bc7cec283838a59b83b5279cb181cded69034f
|
08abd65aac86e47cf324826487ab9b475e014938
|
refs/heads/master
| 2023-09-05T05:09:29.653325
| 2023-08-01T16:37:37
| 2023-08-31T16:05:19
| 32,169,193
| 663
| 121
|
MIT
| 2022-03-04T13:08:03
| 2015-03-13T17:06:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,218
|
py
|
test_proxy.py
|
# Copyright (C) 2018 OpenIO SAS, as part of OpenIO SDS
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
from tests.utils import BaseTestCase, random_str
class TestProxyFailure(BaseTestCase):
def setUp(self):
super(TestProxyFailure, self).setUp()
def _test_admin_debug_on_srvtype(self, srvtype):
params = {"ref": random_str(64), "acct": random_str(64), "type": srvtype}
self.request("POST", self._url("admin/debug"), params=params)
def test_admin_debug_on_meta1(self):
self._test_admin_debug_on_srvtype("meta1")
def test_admin_debug_on_meta0(self):
self._test_admin_debug_on_srvtype("meta0")
|
653be7576bca6574e38f581a43d0275caea23cc9
|
d08cf46d3e16ab8e6a958731168469ba38daf069
|
/demo/poisson1D.py
|
126ff98eb63f178d558ad735a5b50d7fc1912518
|
[
"BSD-2-Clause"
] |
permissive
|
spectralDNS/shenfun
|
ce808edc5258c896f2cccfbd88e67153e3f621c9
|
bcda39d8d8e4741df1cafe719d81733cc1024def
|
refs/heads/master
| 2023-07-27T20:29:57.075970
| 2023-07-11T12:33:04
| 2023-07-11T12:33:04
| 79,914,066
| 190
| 46
|
BSD-2-Clause
| 2022-05-11T19:10:33
| 2017-01-24T13:29:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,768
|
py
|
poisson1D.py
|
r"""
Solve the Poisson equation in 1D
.. math::
-\nabla^2 u(x) = f(x), \quad x \in [a, b]
where :math:`a < b`. The equation to solve is
.. math::
-(\nabla^2 u, v) = (f, v)
and we need two boundary conditions. These boundary conditions
can be any combination of Dirichlet or Neumann, specified on
either side of the domain.
We create a function `main` that solves the problem by specifying
either one of::
0 : u(a), u(b)
1 : u'(a), u'(b)
2 : u(a), u'(b)
3 : u'(a), u(b)
4 : u(a), u'(a)
5 : u(b), u'(b)
Option 1 requires a constraint since it is a pure Neumann problem.
The constraint is set by fixing the zeroth basis function such
that :math:`\int_a^b u w dx` is in agreement with the analytical
solution.
"""
import os
import sympy as sp
import numpy as np
from shenfun import inner, div, grad, TestFunction, TrialFunction, FunctionSpace, \
Array, Function, la, dx
# Use sympy to compute a rhs, given an analytical solution
x = sp.symbols("x", real=True)
ue = sp.cos(5*sp.pi*(x+0.1)/2)
fe = -ue.diff(x, 2)
a = 0
b = 1
domain = (a, b)
bcs = {
0: f"u({a})={ue.subs(x, a).n()} && u({b})={ue.subs(x, b).n()}",
1: f"u'({a})={ue.diff(x, 1).subs(x, a).n()} && u'({b})={ue.diff(x, 1).subs(x, b).n()}",
2: f"u({a})={ue.subs(x, a).n()} && u'({b})={ue.diff(x, 1).subs(x, b).n()}",
3: f"u'({a})={ue.diff(x, 1).subs(x, a).n()} && u({b})={ue.subs(x, b).n()}",
4: f"u({a})={ue.subs(x, a).n()} && u'({a})={ue.diff(x, 1).subs(x, a).n()}",
5: f"u({b})={ue.subs(x, b).n()} && u'({b})={ue.diff(x, 1).subs(x, b).n()}",
}
def main(N, family, bc):
SD = FunctionSpace(N, family=family, domain=domain, bc=bcs[bc], alpha=0, beta=0) # alpha, beta are ignored by all other than jacobi
u = TrialFunction(SD)
v = TestFunction(SD)
constraint = ()
if bc == 1:
# The Poisson equation with only Neumann boundary conditions require a constraint
constraint = ((0, dx(Array(SD, buffer=ue), weighted=True)/dx(Array(SD, val=1), weighted=True)),)
# Compute right hand side of Poisson equation
f_hat = inner(v, fe)
# Get left hand side of Poisson equation
A0 = inner(v, -div(grad(u)))
# Solve
u_hat = Function(SD)
M = la.Solver(A0)
u_hat = M(f_hat, u_hat, constraints=constraint)
# Transform to real space
uj = u_hat.backward()
# Compare with analytical solution
ua = Array(SD, buffer=ue)
error = np.sqrt(inner(1, (uj-ua)**2))
print(f'poisson1D {SD.family()} L2 error = {error:2.6e}')
if 'pytest 'in os.environ:
assert error < 1e-5
return error
if __name__ == '__main__':
N = 36
for family in ('legendre', 'chebyshev', 'chebyshevu', 'jacobi'):
for bc in range(6):
error = main(N, family, bc)
|
9dd49e156892e28636d9e9708c9fdd3dc22bee6c
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/st/ops/gpu/test_instancenorm2d.py
|
fc2af91a23ea51d7a8709541f0fca7f960fb236c
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 2,377
|
py
|
test_instancenorm2d.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import jit
from mindspore.ops import functional as F
from mindspore.ops.composite import GradOperation
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
class Grad(nn.Cell):
def __init__(self, network):
super(Grad, self).__init__()
self.grad = GradOperation(get_all=True, sens_param=True)
self.network = network
@jit
def construct(self, input_x, grad):
return self.grad(self.network)(input_x, grad)
class Net(nn.Cell):
def __init__(self, n):
super(Net, self).__init__()
self.ops = nn.BatchNorm2d(n, use_batch_statistics=True, gamma_init=0.5, beta_init=0.5)
def construct(self, x):
shape = F.shape(x)
return F.reshape(self.ops(F.reshape(x, (1, -1, shape[2], shape[3]))), shape)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_InstanceNorm2d_fp32():
x_np = np.random.randn(3, 3, 2, 2).astype(np.float32)
bn_instance_comp = Net(3 * 3)
bn_instance_op = nn.InstanceNorm2d(3, gamma_init=0.5, beta_init=0.5)
comp_out = bn_instance_comp(Tensor(x_np))
op_out = bn_instance_op(Tensor(x_np))
assert np.allclose(comp_out.asnumpy(), op_out.asnumpy())
sens = np.random.randn(3, 3, 2, 2).astype(np.float32)
bn_comp_backward_net = Grad(bn_instance_comp)
bn_op_backward_net = Grad(bn_instance_op)
output1 = bn_comp_backward_net(Tensor(x_np), Tensor(sens))
output2 = bn_op_backward_net(Tensor(x_np), Tensor(sens))
assert np.allclose(output1[0].asnumpy(), output2[0].asnumpy())
|
a12d9dec752772afa2a55829420b239d5644fd9f
|
65078b8087c2040cf0188e2550ea298d20518f62
|
/src/bentoml/grpc/types.py
|
942aa44db0f686519ef483354e92ff6b6b2f17a9
|
[
"Apache-2.0"
] |
permissive
|
bentoml/BentoML
|
20ab6f8351b1c5cd116d6d60a28098246a1581b3
|
4a14f073d8a3e700aff29483b17ea053058c0c63
|
refs/heads/main
| 2023-09-05T16:03:08.909692
| 2023-09-04T18:54:33
| 2023-09-04T18:54:33
| 178,976,529
| 5,712
| 732
|
Apache-2.0
| 2023-09-14T20:07:54
| 2019-04-02T01:39:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,246
|
py
|
types.py
|
# pragma: no cover
"""
Specific types for BentoService gRPC server.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import typing as t
from functools import partial
import grpc
from grpc import aio
from bentoml.grpc.v1.service_pb2 import Request
from bentoml.grpc.v1.service_pb2 import Response
from bentoml.grpc.v1.service_pb2_grpc import BentoServiceServicer
P = t.TypeVar("P")
BentoServicerContext = aio.ServicerContext[Request, Response]
RequestDeserializerFn = t.Callable[[Request | None], object] | None
ResponseSerializerFn = t.Callable[[bytes], Response | None] | None
HandlerMethod = t.Callable[[Request, BentoServicerContext], P]
AsyncHandlerMethod = t.Callable[[Request, BentoServicerContext], t.Awaitable[P]]
class RpcMethodHandler(
t.NamedTuple(
"RpcMethodHandler",
request_streaming=bool,
response_streaming=bool,
request_deserializer=RequestDeserializerFn,
response_serializer=ResponseSerializerFn,
unary_unary=t.Optional[HandlerMethod[Response]],
unary_stream=t.Optional[HandlerMethod[Response]],
stream_unary=t.Optional[HandlerMethod[Response]],
stream_stream=t.Optional[HandlerMethod[Response]],
),
grpc.RpcMethodHandler,
):
"""An implementation of a single RPC method."""
request_streaming: bool
response_streaming: bool
request_deserializer: RequestDeserializerFn
response_serializer: ResponseSerializerFn
unary_unary: t.Optional[HandlerMethod[Response]]
unary_stream: t.Optional[HandlerMethod[Response]]
stream_unary: t.Optional[HandlerMethod[Response]]
stream_stream: t.Optional[HandlerMethod[Response]]
class HandlerCallDetails(
t.NamedTuple(
"HandlerCallDetails", method=str, invocation_metadata=aio.Metadata
),
grpc.HandlerCallDetails,
):
"""Describes an RPC that has just arrived for service.
Attributes:
method: The method name of the RPC.
invocation_metadata: A sequence of metadatum, a key-value pair included in the HTTP header.
An example is: ``('binary-metadata-bin', b'\\x00\\xFF')``
"""
method: str
invocation_metadata: aio.Metadata
# Servicer types
ServicerImpl = t.TypeVar("ServicerImpl")
Servicer = t.Annotated[ServicerImpl, object]
ServicerClass = t.Type[Servicer[t.Any]]
AddServicerFn = t.Callable[[Servicer[t.Any], aio.Server | grpc.Server], None]
# accepted proto fields
ProtoField = t.Annotated[
str,
t.Literal[
"dataframe",
"file",
"json",
"ndarray",
"series",
"text",
"multipart",
"serialized_bytes",
],
]
Interceptors = list[
t.Callable[[], aio.ServerInterceptor] | partial[aio.ServerInterceptor]
]
__all__ = [
"Request",
"Response",
"BentoServicerContext",
"BentoServiceServicer",
"HandlerCallDetails",
"RpcMethodHandler",
]
|
f402ad4897823824f14d4b27eb03888a384b62ee
|
a53076722d9696422b2d9f8b6166c21ed7876607
|
/misc/linkify_changelog.py
|
9656677f73c22c4863416683e89e3cb32bcc2361
|
[
"BSD-3-Clause"
] |
permissive
|
mahmoud/boltons
|
e22ef2b596d64240a2cbd924aedaa8f9e17f0c8c
|
46599bc0d498dd8adc3aea833ce1445feed349dd
|
refs/heads/master
| 2023-09-04T07:43:37.357470
| 2023-05-06T17:25:38
| 2023-05-06T17:25:38
| 8,307,391
| 6,607
| 449
|
NOASSERTION
| 2023-09-07T02:09:03
| 2013-02-20T06:17:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,367
|
py
|
linkify_changelog.py
|
from __future__ import print_function
import re
import sys
BASE_RTD_URL = 'http://boltons.readthedocs.org/en/latest/'
BASE_ISSUES_URL = 'https://github.com/mahmoud/boltons/issues/'
_issues_re = re.compile(r'#(\d+)')
_member_re = re.compile(r'((\w+utils)\.[a-zA-Z0-9_.]+)')
URL_MAP = {}
def sub_member_match(match):
full_name = match.group(1)
mod_name = match.group(2)
url = BASE_RTD_URL + mod_name + '.html#boltons.' + full_name
ret = '[%s][%s]' % (full_name, full_name)
URL_MAP[full_name] = url
# print ret
return ret
def sub_issue_match(match):
link_text = match.group(0)
issue_num = match.group(1)
link_target = 'i%s' % issue_num
link_url = BASE_ISSUES_URL + issue_num
ret = '[%s][%s]' % (link_text, link_target)
URL_MAP[link_target] = link_url
# print ret
return ret
def main():
try:
cl_filename = sys.argv[1]
except IndexError:
cl_filename = 'CHANGELOG.md'
cl_text = open(cl_filename).read().decode('utf-8')
ret = _member_re.sub(sub_member_match, cl_text)
ret = _issues_re.sub(sub_issue_match, ret)
link_map_lines = []
for (name, url) in sorted(URL_MAP.items()):
link_map_lines.append('[%s]: %s' % (name, url))
print(ret)
print()
print()
print('\n'.join(link_map_lines))
print()
if __name__ == '__main__':
main()
|
8e21580cc115d18c614a983b597fd58a35df8a23
|
a3559134386a4c00ab910b6da469fd566f7bc660
|
/examples/keystore/main.py
|
a81fb504e8d342cf01cf7a05db4686f04d28cf6d
|
[
"OLDAP-2.8"
] |
permissive
|
jnwatson/py-lmdb
|
a3fd6c39464a9cfbe4daf3c39646b306103c678e
|
57c692050b8d4f67ff7bcdec7acf38598de7c295
|
refs/heads/master
| 2023-04-24T12:15:09.041804
| 2023-04-06T06:32:04
| 2023-04-06T06:32:04
| 8,014,135
| 285
| 46
|
NOASSERTION
| 2023-04-06T06:20:40
| 2013-02-04T19:10:07
|
C
|
UTF-8
|
Python
| false
| false
| 622
|
py
|
main.py
|
from __future__ import absolute_import
import webbrowser
import twisted.internet.reactor
import lmdb
import keystore.lmdb
import keystore.webapi
def main():
port = 9999
interface = '127.0.0.1'
url = 'http://%s:%d/' % (interface, port)
env = lmdb.open('/tmp/foo')
reactor = twisted.internet.reactor
pool = reactor.getThreadPool()
store = keystore.lmdb.LmdbKeyStore(reactor, pool, env)
site = keystore.webapi.create_site(store)
reactor.listenTCP(port, site, interface=interface)
reactor.callLater(0, webbrowser.open, url)
reactor.run()
if __name__ == '__main__':
main()
|
85166de0b04cd1e463709d886d815e354752177d
|
04142fdda9b3fb29fb7456d5bc3e504985f24cbe
|
/tests/test_cnn/test_context_block.py
|
864cb417937603d162235c4a72b4eff09b151518
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmcv
|
419e301bbc1d7d45331d67eccfd673f290a796d5
|
6e9ee26718b22961d5c34caca4108413b1b7b3af
|
refs/heads/main
| 2023-08-31T07:08:27.223321
| 2023-08-28T09:02:10
| 2023-08-28T09:02:10
| 145,670,155
| 5,319
| 1,900
|
Apache-2.0
| 2023-09-14T02:37:16
| 2018-08-22T07:05:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,204
|
py
|
test_context_block.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.cnn.bricks import ContextBlock
def test_context_block():
with pytest.raises(AssertionError):
# pooling_type should be in ['att', 'avg']
ContextBlock(16, 1. / 4, pooling_type='unsupport_type')
with pytest.raises(AssertionError):
# fusion_types should be of type list or tuple
ContextBlock(16, 1. / 4, fusion_types='unsupport_type')
with pytest.raises(AssertionError):
# fusion_types should be in ['channel_add', 'channel_mul']
ContextBlock(16, 1. / 4, fusion_types=('unsupport_type', ))
# test pooling_type='att'
imgs = torch.randn(2, 16, 20, 20)
context_block = ContextBlock(16, 1. / 4, pooling_type='att')
out = context_block(imgs)
assert context_block.conv_mask.in_channels == 16
assert context_block.conv_mask.out_channels == 1
assert out.shape == imgs.shape
# test pooling_type='avg'
imgs = torch.randn(2, 16, 20, 20)
context_block = ContextBlock(16, 1. / 4, pooling_type='avg')
out = context_block(imgs)
assert hasattr(context_block, 'avg_pool')
assert out.shape == imgs.shape
# test fusion_types=('channel_add',)
imgs = torch.randn(2, 16, 20, 20)
context_block = ContextBlock(16, 1. / 4, fusion_types=('channel_add', ))
out = context_block(imgs)
assert context_block.channel_add_conv is not None
assert context_block.channel_mul_conv is None
assert out.shape == imgs.shape
# test fusion_types=('channel_mul',)
imgs = torch.randn(2, 16, 20, 20)
context_block = ContextBlock(16, 1. / 4, fusion_types=('channel_mul', ))
out = context_block(imgs)
assert context_block.channel_add_conv is None
assert context_block.channel_mul_conv is not None
assert out.shape == imgs.shape
# test fusion_types=('channel_add', 'channel_mul')
imgs = torch.randn(2, 16, 20, 20)
context_block = ContextBlock(
16, 1. / 4, fusion_types=('channel_add', 'channel_mul'))
out = context_block(imgs)
assert context_block.channel_add_conv is not None
assert context_block.channel_mul_conv is not None
assert out.shape == imgs.shape
|
310b50818bc77e547af0d5a602a9c9bcc3505de6
|
1e528494a929deada984822438b3ab569762e6c6
|
/rx/concurrency/__init__.py
|
c546822d68bd30343d24b9e0494c9af6c679b1ab
|
[
"MIT"
] |
permissive
|
Sprytile/Sprytile
|
a0233a00a243f263691921d7e1f6af05c5eb5442
|
6b68d0069aef5bfed6ab40d1d5a94a3382b41619
|
refs/heads/master
| 2022-07-10T06:54:01.003723
| 2020-09-26T07:25:35
| 2020-09-26T07:25:35
| 72,276,917
| 860
| 91
|
MIT
| 2022-07-07T23:37:19
| 2016-10-29T09:47:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,130
|
py
|
__init__.py
|
from .scheduleditem import ScheduledItem
from .immediatescheduler import ImmediateScheduler, immediate_scheduler
from .currentthreadscheduler import CurrentThreadScheduler, \
current_thread_scheduler
from .virtualtimescheduler import VirtualTimeScheduler
from .timeoutscheduler import TimeoutScheduler, timeout_scheduler
from .newthreadscheduler import NewThreadScheduler, new_thread_scheduler
try:
from .threadpoolscheduler import ThreadPoolScheduler, thread_pool_scheduler
except ImportError:
pass
from .eventloopscheduler import EventLoopScheduler
from .historicalscheduler import HistoricalScheduler
from .catchscheduler import CatchScheduler
from .mainloopscheduler import AsyncIOScheduler
from .mainloopscheduler import IOLoopScheduler
from .mainloopscheduler import GEventScheduler
from .mainloopscheduler import GtkScheduler
from .mainloopscheduler import TwistedScheduler
from .mainloopscheduler import TkinterScheduler
from .mainloopscheduler import PyGameScheduler
from .mainloopscheduler import QtScheduler
from .mainloopscheduler import WxScheduler
from .mainloopscheduler import EventLetEventScheduler
|
2c03d9e004f463da65c7f09ab9544195e475ad9b
|
300a5e74fda0966941faf72646e38f3be54d77b5
|
/library/test/is31fl3730_test.py
|
36085dfa2ae6821941e691205de252d15403a657
|
[
"MIT"
] |
permissive
|
pimoroni/scroll-phat
|
c79f5f38a07546e5c82ee6546d9d09d85e20af36
|
5a0a47f6e4faac649d89054135baa542889ff3fa
|
refs/heads/master
| 2023-08-05T22:50:09.221052
| 2023-05-25T12:46:19
| 2023-05-25T12:46:19
| 46,125,454
| 119
| 76
|
MIT
| 2023-07-20T13:10:50
| 2015-11-13T14:08:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,196
|
py
|
is31fl3730_test.py
|
import unittest
from scrollphat.IS31FL3730 import IS31FL3730, I2cConstants
# Fakes i2c to allow testing off-device
# - May return canned data (stubbing)
# - May record method calls (mocking)
class FakeI2c:
def __init__(self):
self.write_i2c_block_data_calls = []
pass
def write_i2c_block_data(self,addr,mode,size):
call = {"addr":addr, "mode":mode, "size": size}
self.write_i2c_block_data_calls.append(call)
def get_write_i2c_block_data(self):
return self.write_i2c_block_data_calls
class DeviceTest(unittest.TestCase):
def test_set_brightness_after_setting_it(self):
font= {}
fakeI2c = FakeI2c()
sut = IS31FL3730(fakeI2c, font)
constants = I2cConstants()
sut.set_brightness(5)
self.assertEquals(fakeI2c.write_i2c_block_data_calls[0]["size"], [5])
self.assertEquals(sut.get_brightness(), 5)
def test_set_brightness_when_it_was_never_set(self):
font= {}
fakeI2c = FakeI2c()
sut = IS31FL3730(fakeI2c, font)
constants = I2cConstants()
self.assertEquals(sut.get_brightness(), -1)
def test_rotate5bits_inrange(self):
font= {}
sut = IS31FL3730(FakeI2c(), font)
self.assertEquals(sut.rotate5bits(1), 16)
self.assertEquals(sut.rotate5bits(2), 8)
self.assertEquals(sut.rotate5bits(4), 4)
self.assertEquals(sut.rotate5bits(8), 2)
self.assertEquals(sut.rotate5bits(16), 1)
def test_rotate5bits_outrange_returns_zero(self):
font= {}
sut = IS31FL3730(FakeI2c(), font)
self.assertEquals(sut.rotate5bits(0), 0)
self.assertEquals(sut.rotate5bits(32), 0)
def test_create_scrollphat_with_fake_i2c(self):
font= {}
sut = IS31FL3730(FakeI2c(), font)
self.assertTrue(sut is not None)
def test_set_mode_default(self):
font= {}
fakeI2c = FakeI2c()
sut = IS31FL3730(fakeI2c, font)
constants = I2cConstants()
sut.set_mode(constants.MODE_5X11)
self.assertEquals(fakeI2c.write_i2c_block_data_calls[0]["size"], [constants.MODE_5X11])
if __name__ == '__main__':
unittest.main()
|
83ae29b19b987bc6dd06c467dff2f5d9afff9b57
|
09d0d2143026b731ad3c906581291a3c0a83e849
|
/src/poliastro/twobody/propagation/cowell.py
|
26b8af2df65ca516b65885d7fe41227f53bd13a0
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
poliastro/poliastro
|
5fbfeddd44ba5af234452d42f56bbd47c90a8bc4
|
55e96432b27301c5dffb4ef6b4f383d970c6e9c0
|
refs/heads/main
| 2023-08-21T17:20:58.631529
| 2023-05-07T07:54:00
| 2023-05-07T07:54:00
| 11,178,845
| 814
| 357
|
MIT
| 2023-08-29T10:03:59
| 2013-07-04T14:14:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,029
|
py
|
cowell.py
|
import sys
from astropy import units as u
from poliastro.core.propagation import cowell
from poliastro.core.propagation.base import func_twobody
from poliastro.twobody.propagation.enums import PropagatorKind
from poliastro.twobody.states import RVState
from ._compat import OldPropagatorModule
sys.modules[__name__].__class__ = OldPropagatorModule
class CowellPropagator:
"""Propagates orbit using Cowell's formulation.
Notes
-----
This method uses the Dormand & Prince integration method of order 8(5,3) (DOP853).
If multiple tofs are provided, the method propagates to the maximum value
(unless a terminal event is defined) and calculates the other values via dense output.
"""
kind = (
PropagatorKind.ELLIPTIC
| PropagatorKind.PARABOLIC
| PropagatorKind.HYPERBOLIC
)
def __init__(self, rtol=1e-11, events=None, f=func_twobody):
self._rtol = rtol
self._events = events
self._f = f
def propagate(self, state, tof):
state = state.to_vectors()
tofs = tof.reshape(-1)
rrs, vvs = cowell(
state.attractor.k.to_value(u.km**3 / u.s**2),
*state.to_value(),
tofs.to_value(u.s),
self._rtol,
events=self._events,
f=self._f,
)
r = rrs[-1] << u.km
v = vvs[-1] << (u.km / u.s)
new_state = RVState(state.attractor, (r, v), state.plane)
return new_state
def propagate_many(self, state, tofs):
state = state.to_vectors()
rrs, vvs = cowell(
state.attractor.k.to_value(u.km**3 / u.s**2),
*state.to_value(),
tofs.to_value(u.s),
self._rtol,
events=self._events,
f=self._f,
)
# TODO: This should probably return a RVStateArray instead,
# see discussion at https://github.com/poliastro/poliastro/pull/1492
return (
rrs << u.km,
vvs << (u.km / u.s),
)
|
7549e10c6a908e57167bfac141c1c81b43f36d00
|
6f36df6219f8e50374068bb4b3e1a5387c7a2f34
|
/_setup/build_docs.py
|
acf634904c19da95124f5b066bc3dd6a4ff56e8b
|
[
"NIST-PD"
] |
permissive
|
usnistgov/fipy
|
0a3db715fea452ae710eea3999d9cd42dfe76fe7
|
fdc17193bc293da7511be9021e6d4766757e1966
|
refs/heads/master
| 2023-08-31T21:59:36.611448
| 2023-06-27T16:28:58
| 2023-06-27T16:28:58
| 23,316,495
| 444
| 171
|
NOASSERTION
| 2023-09-06T19:21:19
| 2014-08-25T14:27:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,786
|
py
|
build_docs.py
|
from distutils.core import Command
import os
__all__ = ["build_docs"]
class build_docs(Command):
description = "build the FiPy documentation"
# List of option tuples: long name, short name (None if no short
# name), and help string.
user_options = [('pdf', None, "compile the PDF variant of the documentation"),
('html', None, "compile the HTML variant of the documentation"),
('cathartic', None, "rewrite all the files (default is to only rewrite changed files)"),
]
def initialize_options (self):
self.pdf = 0
self.html = 0
self.cathartic = 0
def finalize_options (self):
pass
def run (self):
import sphinx.cmd.build
import sphinx.ext.apidoc
sphinx_args = ['-P', '-n', '-c', 'documentation/', '.']
apidoc_args = []
if self.cathartic:
sphinx_args = ['-a', '-E'] + sphinx_args
apidoc_args = ['--force'] + apidoc_args
sphinx.ext.apidoc.main(['--output-dir=fipy/generated', '--suffix=rst']
+ apidoc_args + ['fipy'])
sphinx.ext.apidoc.main(['--output-dir=documentation/tutorial/package/generated', '--suffix=rst']
+ apidoc_args + ['documentation/tutorial/package'])
if self.html:
sphinx.cmd.build.main(['-b', 'redirecting_html'] + sphinx_args + ['documentation/_build/html/'])
if self.pdf:
try:
sphinx.cmd.build.main(['-b', 'latex'] + sphinx_args + ['documentation/_build/latex/'])
except SystemExit:
pass
outdir = os.path.join('documentation', '_build', 'latex')
from docutils.core import publish_file
for xtra in ("LICENSE", "DISCLAIMER"):
publish_file(source_path="%s.rst" % xtra,
destination_path=os.path.join(outdir, "%s.tex" % xtra),
reader_name='standalone',
parser_name='restructuredtext',
writer_name='latex',
settings_overrides= {
'template': 'documentation/_templates/empty.tex'
})
savedir = os.getcwd()
os.chdir(outdir)
os.system("pdflatex fipy")
os.system("pdflatex fipy")
os.system("pdflatex fipy")
os.system("makeindex -s python.ist fipy")
os.system("makeindex -s python.ist modfipy")
os.system("pdflatex fipy")
os.system("pdflatex fipy")
os.chdir(savedir)
|
98f831f6a4aaf6e4d1597151a4fc94fb6746aa96
|
1ab5036a95066a18d889aa1186fd1609ff4b5923
|
/petl/test/io/test_sqlite3.py
|
5e9e72b46c54d44c2cd003355ea88aee8f89b68b
|
[
"MIT"
] |
permissive
|
petl-developers/petl
|
b52d6ee6e0ab16a1ba17f98a4aa2f791d93ba796
|
e829532e2ed350d00b96680d2d6774dec4a7f2e0
|
refs/heads/master
| 2023-08-30T02:36:09.816551
| 2023-08-22T15:07:19
| 2023-08-22T15:22:53
| 2,233,194
| 663
| 109
|
MIT
| 2023-08-22T15:22:55
| 2011-08-19T09:51:03
|
Python
|
UTF-8
|
Python
| false
| false
| 4,804
|
py
|
test_sqlite3.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
from tempfile import NamedTemporaryFile
import sqlite3
from petl.test.helpers import ieq
from petl.io.db import fromdb, todb, appenddb
def test_fromsqlite3():
# initial data
f = NamedTemporaryFile(delete=False)
f.close()
data = (('a', 1),
('b', 2),
('c', 2.0))
connection = sqlite3.connect(f.name)
c = connection.cursor()
c.execute('CREATE TABLE foobar (foo, bar)')
for row in data:
c.execute('INSERT INTO foobar VALUES (?, ?)', row)
connection.commit()
c.close()
connection.close()
# test the function
actual = fromdb(f.name, 'SELECT * FROM foobar')
expect = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2.0))
ieq(expect, actual, cast=tuple)
ieq(expect, actual, cast=tuple) # verify can iterate twice
def test_fromsqlite3_connection():
# initial data
data = (('a', 1),
('b', 2),
('c', 2.0))
connection = sqlite3.connect(':memory:')
c = connection.cursor()
c.execute('CREATE TABLE foobar (foo, bar)')
for row in data:
c.execute('INSERT INTO foobar VALUES (?, ?)', row)
connection.commit()
c.close()
# test the function
actual = fromdb(connection, 'SELECT * FROM foobar')
expect = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2.0))
ieq(expect, actual, cast=tuple)
ieq(expect, actual, cast=tuple) # verify can iterate twice
def test_fromsqlite3_withargs():
# initial data
data = (('a', 1),
('b', 2),
('c', 2.0))
connection = sqlite3.connect(':memory:')
c = connection.cursor()
c.execute('CREATE TABLE foobar (foo, bar)')
for row in data:
c.execute('INSERT INTO foobar VALUES (?, ?)', row)
connection.commit()
c.close()
# test the function
actual = fromdb(
connection,
'SELECT * FROM foobar WHERE bar > ? AND bar < ?',
(1, 3)
)
expect = (('foo', 'bar'),
('b', 2),
('c', 2.0))
ieq(expect, actual)
ieq(expect, actual) # verify can iterate twice
def test_tosqlite3_appendsqlite3():
# exercise function
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
f = NamedTemporaryFile(delete=False)
f.close()
conn = sqlite3.connect(f.name)
conn.execute('CREATE TABLE foobar (foo TEXT, bar INT)')
conn.close()
todb(table, f.name, 'foobar')
# check what it did
conn = sqlite3.connect(f.name)
actual = conn.execute('SELECT * FROM foobar')
expect = (('a', 1),
('b', 2),
('c', 2))
ieq(expect, actual)
# check appending
table2 = (('foo', 'bar'),
('d', 7),
('e', 9),
('f', 1))
appenddb(table2, f.name, 'foobar')
# check what it did
conn = sqlite3.connect(f.name)
actual = conn.execute('SELECT * FROM foobar')
expect = (('a', 1),
('b', 2),
('c', 2),
('d', 7),
('e', 9),
('f', 1))
ieq(expect, actual)
def test_tosqlite3_appendsqlite3_connection():
conn = sqlite3.connect(':memory:')
conn.execute('CREATE TABLE foobar (foo TEXT, bar INT)')
# exercise function
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
todb(table, conn, 'foobar')
# check what it did
actual = conn.execute('SELECT * FROM foobar')
expect = (('a', 1),
('b', 2),
('c', 2))
ieq(expect, actual)
# check appending
table2 = (('foo', 'bar'),
('d', 7),
('e', 9),
('f', 1))
appenddb(table2, conn, 'foobar')
# check what it did
actual = conn.execute('SELECT * FROM foobar')
expect = (('a', 1),
('b', 2),
('c', 2),
('d', 7),
('e', 9),
('f', 1))
ieq(expect, actual)
def test_tosqlite3_identifiers():
# exercise function
table = (('foo foo', 'bar.baz.spong`'),
('a', 1),
('b', 2),
('c', 2))
f = NamedTemporaryFile(delete=False)
f.close()
conn = sqlite3.connect(f.name)
conn.execute('CREATE TABLE "foo "" bar`" '
'("foo foo" TEXT, "bar.baz.spong`" INT)')
conn.close()
todb(table, f.name, 'foo " bar`')
# check what it did
conn = sqlite3.connect(f.name)
actual = conn.execute('SELECT * FROM `foo " bar```')
expect = (('a', 1),
('b', 2),
('c', 2))
ieq(expect, actual)
# TODO test uneven rows
|
2b24f423b58ccdc071aa333fbf8597a2124bbbdf
|
bca3d1e208a5d0e3365a8a1766a0bfd09932367b
|
/tests/test_iterators.py
|
17d6846aaffa2c9c90b17d191beb2b2bcf4ab503
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
trailofbits/polyfile
|
998f57d9ca015248019ec55c37f43fa743df8d6c
|
a116740e4baa5765699ca6b5049e9c41d262c325
|
refs/heads/master
| 2023-09-02T07:33:10.380904
| 2023-05-24T23:48:39
| 2023-05-24T23:48:39
| 193,975,534
| 303
| 18
|
Apache-2.0
| 2023-09-05T08:27:51
| 2019-06-26T20:48:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,258
|
py
|
test_iterators.py
|
import random
from typing import Iterator
from unittest import TestCase
from polyfile.iterators import LazyIterableSequence, LazyIterableSet, unique
class TestIterators(TestCase):
@staticmethod
def random_sequence(min_len: int = 0, max_len: int = 1000) -> Iterator[str]:
seq_length = random.randint(min_len, max_len)
for _ in range(seq_length):
yield chr(random.randint(ord('a'), ord('z')))
def test_lazy_sequence(self):
for _ in range(100):
ground_truth = list(TestIterators.random_sequence())
seq = LazyIterableSequence(ground_truth)
seq_iter = iter(seq)
if len(ground_truth) > 0:
self.assertEqual(next(seq_iter), ground_truth[0])
for i, s in enumerate(seq):
self.assertEqual(ground_truth[i], s)
self.assertEqual("".join(ground_truth), "".join(seq))
if len(ground_truth) > 1:
self.assertEqual(next(seq_iter), ground_truth[1])
def test_lazy_set(self):
for _ in range(100):
ground_truth = list(TestIterators.random_sequence())
seq = LazyIterableSet(ground_truth)
self.assertEqual("".join(unique(ground_truth)), "".join(seq))
|
39feee5e45db39f08963061a31c3f6424ec095dc
|
5b0ff689a3e14f42bdf688864cae40c931a5f685
|
/msa/modulos/apertura/constants.py
|
2d278d7676dd3dfcb4a526cc3b97e7bfdf424a1f
|
[] |
no_license
|
prometheus-ar/vot.ar
|
cd7012f2792a2504fb7f0ee43796a197fc82bd28
|
72d8fa1ea08fe417b64340b98dff68df8364afdf
|
refs/heads/2017-ago-salta
| 2021-01-02T22:19:41.591077
| 2017-08-25T11:55:49
| 2017-08-25T11:55:49
| 37,735,555
| 171
| 110
| null | 2020-06-30T13:33:49
| 2015-06-19T17:15:52
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 626
|
py
|
constants.py
|
"""Constantes para el modulo de apertura."""
TEXTOS = (
"muchas_gracias", "introduzca_acta_apertura", "puede_retirar_boleta",
"no_retirar_boleta", "agradecimiento", "aguarde_unos_minutos",
"aceptar", "cancelar", "retire_acta_apertura", "confirmar",
"acta_contiene_informacion", "volver_al_inicio",
"aguarde_procesando_acta", "aguarde_configurando_mesa",
"apertura_no_almacenada", "papel_no_puesto", "acta_apertura_mesa",
"acerque_acta_apertura", "acerque_acta_cierre", "mensaje_imprimiendo",
"introduzca_otra_apertura"
)
# Cantidad de actas de apertura que se imprimen
CANTIDAD_APERTURAS = 1
|
2850a2a63d500349e7e955f4acf95828b005193d
|
f3e2559f9f03135ef71a9573932d6e09360a6a38
|
/miniupnpc/setup.py
|
bbb102e2663f778a681df08d91bc6ad52783804e
|
[
"BSD-3-Clause"
] |
permissive
|
miniupnp/miniupnp
|
75751b7513dbf7dbf9aef2be7c3f49b459fdaee2
|
fb5c328a5e8fd57a3ec0f5d33915377a5d3581f3
|
refs/heads/master
| 2023-08-22T19:12:55.862346
| 2023-08-06T22:50:37
| 2023-08-06T22:50:37
| 2,435,778
| 1,246
| 475
|
BSD-3-Clause
| 2023-08-06T22:50:38
| 2011-09-22T08:25:20
|
C
|
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
setup.py
|
#! /usr/bin/env python
# vim: tabstop=8 shiftwidth=8 expandtab
# $Id: setup.py,v 1.14 2020/04/06 10:23:02 nanard Exp $
# the MiniUPnP Project (c) 2007-2021 Thomas Bernard
# https://miniupnp.tuxfamily.org/ or http://miniupnp.free.fr/
#
# python script to build the miniupnpc module under unix
#
# Uses MAKE environment variable (defaulting to 'make')
from setuptools import setup, Extension
from setuptools.command import build_ext
import subprocess
import os
EXT = ['build/libminiupnpc.a']
class make_then_build_ext(build_ext.build_ext):
def run(self):
subprocess.check_call([os.environ.get('MAKE', 'make')] + EXT)
build_ext.build_ext.run(self)
setup(name="miniupnpc",
version=open('VERSION').read().strip(),
author='Thomas BERNARD',
author_email='miniupnp@free.fr',
license=open('LICENSE').read(),
url='http://miniupnp.free.fr/',
description='miniUPnP client',
cmdclass={'build_ext': make_then_build_ext},
ext_modules=[
Extension(name="miniupnpc", sources=["src/miniupnpcmodule.c"],
include_dirs=['include'], extra_objects=EXT)
])
|
877766bc25a42f31d47e921a55c9f651493b5072
|
3a24f63c8742560993b5465b26339e7c0ed05a27
|
/crates/ruff/resources/test/fixtures/pyflakes/F821_0.py
|
82c098db1c8d891ded67acb6971e479d5feca1dd
|
[
"BSD-3-Clause",
"0BSD",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0"
] |
permissive
|
astral-sh/ruff
|
8f1de11263474c6293454b02c728df2f113801db
|
82410524d9612f11387c2675a03869d489bb97ef
|
refs/heads/main
| 2023-08-02T23:20:34.351174
| 2023-08-02T21:32:43
| 2023-08-02T21:32:43
| 523,043,277
| 2,264
| 122
|
MIT
| 2023-09-14T20:08:59
| 2022-08-09T17:17:44
|
Rust
|
UTF-8
|
Python
| false
| false
| 2,320
|
py
|
F821_0.py
|
def get_name():
return self.name
def get_name():
return (self.name,)
def get_name():
del self.name
def get_name(self):
return self.name
x = list()
def randdec(maxprec, maxexp):
return numeric_string(maxprec, maxexp)
def ternary_optarg(prec, exp_range, itr):
for _ in range(100):
a = randdec(prec, 2 * exp_range)
b = randdec(prec, 2 * exp_range)
c = randdec(prec, 2 * exp_range)
yield a, b, c, None
yield a, b, c, None, None
class Foo:
CLASS_VAR = 1
REFERENCES_CLASS_VAR = {"CLASS_VAR": CLASS_VAR}
ANNOTATED_CLASS_VAR: int = 2
from typing import Literal
class Class:
copy_on_model_validation: Literal["none", "deep", "shallow"]
post_init_call: Literal["before_validation", "after_validation"]
def __init__(self):
Class
try:
x = 1 / 0
except Exception as e:
print(e)
y: int = 1
x: "Bar" = 1
[first] = ["yup"]
from typing import List, TypedDict
class Item(TypedDict):
nodes: List[TypedDict("Node", {"name": str})]
from enum import Enum
class Ticket:
class Status(Enum):
OPEN = "OPEN"
CLOSED = "CLOSED"
def set_status(self, status: Status):
self.status = status
def update_tomato():
print(TOMATO)
TOMATO = "cherry tomato"
A = f'{B}'
A = (
f'B'
f'{B}'
)
C = f'{A:{B}}'
C = f'{A:{f"{B}"}}'
from typing import Annotated, Literal
def arbitrary_callable() -> None:
...
class PEP593Test:
field: Annotated[
int,
"base64",
arbitrary_callable(),
123,
(1, 2, 3),
]
field_with_stringified_type: Annotated[
"PEP593Test",
123,
]
field_with_undefined_stringified_type: Annotated[
"PEP593Test123",
123,
]
field_with_nested_subscript: Annotated[
dict[Literal["foo"], str],
123,
]
field_with_undefined_nested_subscript: Annotated[
dict["foo", "bar"], # Expected to fail as undefined.
123,
]
def in_ipython_notebook() -> bool:
try:
# autoimported by notebooks
get_ipython() # type: ignore[name-defined]
except NameError:
return False # not in notebook
return True
def named_expr():
if any((key := (value := x)) for x in ["ok"]):
print(key)
|
93fb1e8d4ea4a42863555a1383900a011a08a350
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/EgammaAnalysis/ElectronTools/python/calibratedElectronsRun2_cfi.py
|
c3f7c180dec8880d3fd56342ea6540a589dbab4d
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 2,291
|
py
|
calibratedElectronsRun2_cfi.py
|
import FWCore.ParameterSet.Config as cms
correctionType = "80Xapproval"
files = {"Prompt2015":"EgammaAnalysis/ElectronTools/data/ScalesSmearings/74X_Prompt_2015",
"76XReReco" :"EgammaAnalysis/ElectronTools/data/ScalesSmearings/76X_16DecRereco_2015_Etunc",
"80Xapproval" : "EgammaAnalysis/ElectronTools/data/ScalesSmearings/80X_ichepV1_2016_ele"}
calibratedElectrons = cms.EDProducer("CalibratedElectronProducerRun2",
# input collections
electrons = cms.InputTag('gedGsfElectrons'),
gbrForestName = cms.string("gedelectron_p4combination_25ns"),
# data or MC corrections
# if isMC is false, data corrections are applied
isMC = cms.bool(False),
# set to True to get special "fake" smearing for synchronization. Use JUST in case of synchronization
isSynchronization = cms.bool(False),
correctionFile = cms.string(files[correctionType])
)
calibratedPatElectrons = cms.EDProducer("CalibratedPatElectronProducerRun2",
# input collections
electrons = cms.InputTag('slimmedElectrons'),
gbrForestName = cms.string("gedelectron_p4combination_25ns"),
# data or MC corrections
# if isMC is false, data corrections are applied
isMC = cms.bool(False),
# set to True to get special "fake" smearing for synchronization. Use JUST in case of synchronization
isSynchronization = cms.bool(False),
correctionFile = cms.string(files[correctionType])
)
|
5c40c48a25d3b92de02f849009b1f60de2d171da
|
d818edebb14d90c0b82186fb80e547e41a66e339
|
/src/Python/Filtering/PerlinNoise.py
|
77e0abfa721fe89cbedf7c192ec687f818db0332
|
[
"Apache-2.0"
] |
permissive
|
Kitware/vtk-examples
|
900de7c3cbbb42c014d2bc1ae70fe9783df01997
|
e696d92dcdc452cbde5df77d06877404542a3aae
|
refs/heads/master
| 2023-09-05T17:18:32.943962
| 2023-09-05T06:20:11
| 2023-09-05T06:20:11
| 281,112,041
| 162
| 64
|
Apache-2.0
| 2023-01-26T00:37:52
| 2020-07-20T12:36:50
|
C++
|
UTF-8
|
Python
| false
| false
| 1,761
|
py
|
PerlinNoise.py
|
#!/usr/bin/env python
# noinspection PyUnresolvedReferences
import vtkmodules.vtkInteractionStyle
# noinspection PyUnresolvedReferences
import vtkmodules.vtkRenderingOpenGL2
from vtkmodules.vtkCommonColor import vtkNamedColors
from vtkmodules.vtkCommonDataModel import vtkPerlinNoise
from vtkmodules.vtkFiltersCore import vtkContourFilter
from vtkmodules.vtkImagingHybrid import vtkSampleFunction
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkPolyDataMapper,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkRenderer
)
def main():
colors = vtkNamedColors()
perlinNoise = vtkPerlinNoise()
perlinNoise.SetFrequency(2, 1.25, 1.5)
perlinNoise.SetPhase(0, 0, 0)
sample = vtkSampleFunction()
sample.SetImplicitFunction(perlinNoise)
sample.SetSampleDimensions(65, 65, 20)
sample.ComputeNormalsOff()
surface = vtkContourFilter()
surface.SetInputConnection(sample.GetOutputPort())
surface.SetValue(0, 0.0)
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(surface.GetOutputPort())
mapper.ScalarVisibilityOff()
actor = vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colors.GetColor3d('SteelBlue'))
renderer = vtkRenderer()
renderWindow = vtkRenderWindow()
renderWindow.AddRenderer(renderer)
interactor = vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
# Add the actors to the renderer, set the background and size
renderer.AddActor(actor)
renderer.SetBackground(colors.GetColor3d('SlateGray'))
renderWindow.SetWindowName('PerlinNoise')
renderWindow.SetSize(300, 300)
renderer.ResetCamera()
renderWindow.Render()
interactor.Start()
if __name__ == '__main__':
main()
|
1d2ff8c540cbea4ca53395552c246d61342fb877
|
5142e81b50d15202ff79a34c9b888f18d2baec27
|
/plotnine/facets/__init__.py
|
9be638ce416b81a746e24d70ef37413988e4b4e1
|
[
"MIT"
] |
permissive
|
has2k1/plotnine
|
03c0e979b6b05b5e92cb869cca903cfce20988dc
|
ef5650c4aabb29dcfe810043fb0fc8a4ea83f14b
|
refs/heads/main
| 2023-08-30T22:17:07.835055
| 2023-08-08T07:57:53
| 2023-08-08T07:57:53
| 89,276,692
| 3,719
| 233
|
MIT
| 2023-08-08T13:09:24
| 2017-04-24T19:00:44
|
Python
|
UTF-8
|
Python
| false
| false
| 392
|
py
|
__init__.py
|
"""
Facets
"""
from .facet_grid import facet_grid
from .facet_null import facet_null
from .facet_wrap import facet_wrap
from .labelling import (
as_labeller,
label_both,
label_context,
label_value,
labeller,
)
__all__ = (
"facet_grid",
"facet_null",
"facet_wrap",
"label_value",
"label_both",
"label_context",
"labeller",
"as_labeller",
)
|
f0fe31dbbd0a55b703e3e6b637f0042777c43e5a
|
e1ea5f615a2ee3130926cdb506d4804742bc9fee
|
/tests/gold_tests/forward_proxy/forward_proxy.test.py
|
71bcc87630c19ab51a41d1c2343ccacaf477e2a0
|
[
"TCL",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-ssleay-windows",
"ISC",
"OpenSSL",
"MIT",
"HPND",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown"
] |
permissive
|
apache/trafficserver
|
1507804151ab2c2ac50012fdecee6200f2c52c1f
|
c5d7b19d60646de902fc847f80f57030779a4bfc
|
refs/heads/master
| 2023-08-31T20:10:00.381763
| 2023-08-31T19:15:34
| 2023-08-31T19:15:34
| 356,066
| 1,570
| 981
|
Apache-2.0
| 2023-09-14T21:04:36
| 2009-10-31T08:00:10
|
C++
|
UTF-8
|
Python
| false
| false
| 4,904
|
py
|
forward_proxy.test.py
|
"""
"""
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
Test.Summary = 'Verify ATS can function as a forward proxy'
Test.ContinueOnFail = True
class ForwardProxyTest:
_scheme_proto_mismatch_policy: Union[int, None]
_ts_counter: int = 0
_server_counter: int = 0
def __init__(self, verify_scheme_matches_protocol: Union[int, None]):
"""Construct a ForwardProxyTest object.
:param verify_scheme_matches_protocol: The value with which to
configure Traffic Server's
proxy.config.ssl.client.scheme_proto_mismatch_policy. A value of None
means that no value will be explicitly set in the records.yaml.
:type verify_scheme_matches_protocol: int or None
"""
self._scheme_proto_mismatch_policy = verify_scheme_matches_protocol
self.setupOriginServer()
self.setupTS()
def setupOriginServer(self):
"""Configure the Proxy Verifier server."""
proc_name = f"server{ForwardProxyTest._server_counter}"
self.server = Test.MakeVerifierServerProcess(proc_name, "forward_proxy.replay.yaml")
ForwardProxyTest._server_counter += 1
if self._scheme_proto_mismatch_policy in (2, None):
self.server.Streams.All = Testers.ExcludesExpression(
'Received an HTTP/1 request with key 1',
'Verify that the server did not receive the request.')
else:
self.server.Streams.All = Testers.ContainsExpression(
'Received an HTTP/1 request with key 1',
'Verify that the server received the request.')
def setupTS(self):
"""Configure the Traffic Server process."""
proc_name = f"ts{ForwardProxyTest._ts_counter}"
self.ts = Test.MakeATSProcess(proc_name, enable_tls=True, enable_cache=False)
ForwardProxyTest._ts_counter += 1
self.ts.addDefaultSSLFiles()
self.ts.Disk.ssl_multicert_config.AddLine("dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key")
self.ts.Disk.remap_config.AddLine(
f"map / http://127.0.0.1:{self.server.Variables.http_port}/")
self.ts.Disk.records_config.update({
'proxy.config.ssl.server.cert.path': self.ts.Variables.SSLDir,
'proxy.config.ssl.server.private_key.path': self.ts.Variables.SSLDir,
'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE',
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': "http",
})
if self._scheme_proto_mismatch_policy is not None:
self.ts.Disk.records_config.update({
'proxy.config.ssl.client.scheme_proto_mismatch_policy': self._scheme_proto_mismatch_policy,
})
def addProxyHttpsToHttpCase(self):
"""Test ATS as an HTTPS forward proxy behind an HTTP server."""
tr = Test.AddTestRun()
tr.Processes.Default.StartBefore(self.server)
tr.Processes.Default.StartBefore(self.ts)
tr.Processes.Default.Command = (
f'curl --proxy-insecure -v -H "uuid: 1" '
f'--proxy "https://127.0.0.1:{self.ts.Variables.ssl_port}/" '
f'http://example.com/')
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = self.server
tr.StillRunningAfter = self.ts
if self._scheme_proto_mismatch_policy in (2, None):
tr.Processes.Default.Streams.All = Testers.ContainsExpression(
'< HTTP/1.1 400 Invalid HTTP Request',
'Verify that the request was rejected.')
else:
tr.Processes.Default.Streams.All = Testers.ContainsExpression(
'< HTTP/1.1 200 OK',
'Verify that curl received a 200 OK response.')
def run(self):
"""Configure the TestRun instances for this set of tests."""
self.addProxyHttpsToHttpCase()
ForwardProxyTest(verify_scheme_matches_protocol=None).run()
ForwardProxyTest(verify_scheme_matches_protocol=0).run()
ForwardProxyTest(verify_scheme_matches_protocol=1).run()
ForwardProxyTest(verify_scheme_matches_protocol=2).run()
|
912867eaa54dd5367a60384fc33dac48cc46e2b9
|
2ed0210bc41f848a0e67fce3ad6b7a3e85228261
|
/src/pykeen/hpo/__init__.py
|
6ac6b50862ef812553972021e17c2b8a1510fe08
|
[
"MIT"
] |
permissive
|
pykeen/pykeen
|
f7483445bd99d3a404bc4ff42538550d56702b66
|
5ff3597b18ab9a220e34361d3c3f262060811df1
|
refs/heads/master
| 2023-08-25T20:29:55.021639
| 2023-08-24T20:05:20
| 2023-08-24T20:05:20
| 242,672,435
| 1,308
| 199
|
MIT
| 2023-09-13T18:18:36
| 2020-02-24T07:26:03
|
Python
|
UTF-8
|
Python
| false
| false
| 25,342
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
"""The easiest way to optimize a model is with the :func:`pykeen.hpo.hpo_pipeline` function.
All of the following examples are about getting the best model
when training :class:`pykeen.models.TransE` on the :class:`pykeen.datasets.Nations` dataset.
Each gives a bit of insight into usage of the :func:`hpo_pipeline` function.
The minimal usage of the hyper-parameter optimization is to specify the
dataset, the model, and how much to run. The following example shows how to
optimize the TransE model on the Nations dataset a given number of times using
the ``n_trials`` argument.
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... n_trials=30,
... dataset='Nations',
... model='TransE',
... )
Alternatively, the ``timeout`` can be set. In the following example,
as many trials as possible will be run in 60 seconds.
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... timeout=60,
... dataset='Nations',
... model='TransE',
... )
The hyper-parameter optimization pipeline has the ability to optimize hyper-parameters for the corresponding
``*_kwargs`` arguments in the :func:`pykeen.pipeline.pipeline`:
- ``model``
- ``loss``
- ``regularizer``
- ``optimizer``
- ``negative_sampler``
- ``training``
Defaults
--------
Each component's hyper-parameters have a reasonable default values. For example, every model in PyKEEN has
default for its hyper-parameters chosen from the best-reported values in each model's
original paper unless otherwise stated on the model's reference page. In case hyper-parameters for a model for a
specific dataset were not available, we choose the hyper-parameters based on the findings in our
large-scale benchmarking [ali2020a]_. For most components (e.g., models, losses, regularizers, negative
samples, training loops), these values are stored in the default valeues of the respective classes'
`__init__()` functions. They can be viewed in the corresponding reference section of the docs.
Some components contain strategies for doing hyper-parameter optimization. When you call the
:func:`pykeen.hpo.hpo_pipeline`, the following steps are taken to determine what happens for each hyper-parameter
in each componenent:
1. If an explicit value was passed, use it.
2. If no explicit value was passed and an HPO strategy was passed, use the explicit strategy.
3. If no explicit value was passed and no HPO strategy was passed and there is a default
HPO strategy, use the default strategy.
4. If no explicit value was passed, no HPO strategy was passed, and there is no default HPO strategy, use
the default hyper-parameter value
5. If no explicit value was passed, no HPO strategy was passed, and there is no default HPO strategy, and
there is no default hyper-parameter value, raise an :class:`TypeError`
For example, the TransE model's default HPO strategy for its ``embedding_dim`` argument is to search between
$[16, 256]$ with a step size of 16. The $l_p$ norm is set to search as either 1 or 2. This will be overridden
with 50 in the following code:
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... dataset='Nations',
... model='TransE',
... model_kwargs=dict(embedding_dim=50),
... )
The strategy can be explicitly overridden with:
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... dataset='Nations',
... model='TransE',
... model_kwargs_ranges=dict(
... embedding_dim=dict(type=int, low=16, high=256, step=32),
... ),
... )
Each model, loss, regularizer, negative sampler, and training loop specify a class variable called ``hpo_defaults``
in which there's a dictionary with all of the default strategies. They keys match up to the arguments in their
respective ``__init__()`` functions.
Since optimizers aren't re-implemented in PyKEEN, there's a specfic dictionary at
:py:attr:`pykeen.optimizers.optimizers_hpo_defaults` containing their strategies. It's debatable whether
you should optimize the optimizers (yo dawg), so you can always choose to set the learning rate ``lr`` to a constant
value.
Strategies
----------
An HPO strategy is a Python :class:`dict` with a ``type`` key corresponding to a categorical variable, boolean
variable, integer variable, or floating point number variable. The value itself for ``type``
should be one of the following:
1. ``"categorical"``
2. ``bool`` or ``"bool"``
3. ``int`` or ``"int"``
4. ``float`` or ``"float"``
Several strategies can be grouped together in a dictionary where the key is the name of the hyper-parameter
for the component in the ``*_kwargs_ranges`` arguments to the HPO pipeline.
Categorical
~~~~~~~~~~~
The only other key to use inside a categorical variable is ``choices``. For example, if you want to
choose between Kullback-Leibler divergence or expected likelihood as similarity used in the KG2E model,
you can write a strategy like:
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... dataset='Nations',
... model='KG2E',
... model_kwargs_ranges=dict(
... dist_similarity=dict(type='categorical', choices=['KL', 'EL']),
... ),
... )
Boolean
~~~~~~~
The boolean variable actually doesn't need any extra keys besides the type, so a strategy for a boolean
variable always looks like ``dict(type='bool')``. Under the hood, this is automatically translated to a categorical
variable with ``choices=[True, False]``.
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... dataset='Nations',
... model='TransE',
... training_loop='sLCWA',
... negative_sampler_kwargs_ranges=dict(
... filtered=dict(type=boolean),
... ),
... )
Integers and Floating Point Numbers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The integer and floating point number strategies share several aspects. Both require a ``low`` and ``high`` entry
like in ``dict(type=float, low=0.0, high=1.0)`` or ``dict(type=int, low=1, high=10)``.
Linear Scale
************
By default, you don't need to specify a ``scale``, but you can be explicit by setting ``scale='linear'``.
This behavior should be self explanatory - there is no rescaling and you get back uniform distribution
within the bounds specified by the ``low`` and ``high`` arguments. This applies to both ``type=int`` and
``type=float``. The following example uniformly choose from [1,100]:
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... dataset='Nations',
... model='TransE',
... training_loop='sLCWA',
... negative_sampler_kwargs_ranges=dict(
... num_negs_per_pos=dict(type=int, low=1, high=100),
... ),
... )
Power Scale (``type=int`` only)
*******************************
The power scale was originally implemented as ``scale='power_two'`` to support
:class:`pykeen.models.ConvE`'s ``output_channels`` parameter. However, using two as a base is a bit limiting, so we
also implemented a more general ``scale='power'`` where you can set set ``base``. Here's an example to optimize over
the number of negatives per positive ratio using `base=10`:
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... dataset='Nations',
... model='TransE',
... training_loop='sLCWA',
... negative_sampler_kwargs_ranges=dict(
... num_negs_per_pos=dict(type=int, scale='power', base=10, low=0, high=2),
... ),
... )
The power scale can only be used with `type=int` and not bool, categorical, or float. I like this scale because
it can quickly discretize a large search space. In this example, you will get `[10**0, 10**1, 10**2]` as
choices then uniformly choose from them.
Logarithmic Reweighting
***********************
The evil twin to the power scale is logarithmic reweighting on the linear scale. This is applicable ``type=int`` and
``type=float``. Rather than changing the choices themselves, the log scale uses Optuna's built in ``log`` functionality
to reassign the probabilities uniformly over the log'd distribution. The same example as above could be
accomplished with:
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... dataset='Nations',
... model='TransE',
... training_loop='sLCWA',
... negative_sampler_kwargs_ranges=dict(
... num_negs_per_pos=dict(type=int, low=1, high=100, log=True),
... ),
... )
but this time, it's not discretized. However, you're just as likely to pick from $[1,10]$ as $[10, 100]$.
Stepping
********
With the linear scale, you can specify the ``step`` size. This discretizes the distribution in linear space,
so if you want to pick from $10, 20, ... 100$, you can do:
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... dataset='Nations',
... model='TransE',
... training_loop='sLCWA',
... negative_sampler_kwargs_ranges=dict(
... num_negs_per_pos=dict(type=int, low=10, high=100, step=10),
... ),
... )
This actually also works with logarithmic reweighting, since it is still technically on a linear scale,
but with probabilites reweighted logarithmically. So now you'd pick from one of $[10]$ or $[20, 30, 40, ..., 100]$
with the same probability
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... dataset='Nations',
... model='TransE',
... training_loop='sLCWA',
... negative_sampler_kwargs_ranges=dict(
... num_negs_per_pos=dict(type=int, low=10, high=100, step=10, log=True),
... ),
... )
Custom Strategies
-----------------
While the default values for hyper-parameters are encoded with the python syntax
for default values of the ``__init__()`` function of each model, the ranges/scales can be
found in the class variable :py:attr:`pykeen.models.Model.hpo_default`. For
example, the range for TransE's embedding dimension is set to optimize
between 50 and 350 at increments of 25 in :py:attr:`pykeen.models.TransE.hpo_default`.
TransE also has a scoring function norm that will be optimized by a categorical
selection of {1, 2} by default.
.. note ::
These hyper-parameter ranges were chosen as reasonable defaults for the benchmark
datasets FB15k-237 / WN18RR. When using different datasets, the ranges might be suboptimal.
All hyper-parameters defined in the ``hpo_default`` of your chosen model will be
optimized by default. If you already have a value that you're happy with for
one of them, you can specify it with the ``model_kwargs`` attribute. In the
following example, the ``embedding_dim`` for a TransE model is fixed at 200,
while the rest of the parameters will be optimized using the pre-defined HPO strategies in
the model. For TransE, that means that the scoring function norm will be optimized
as 1 or 2.
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... model='TransE',
... model_kwargs=dict(
... embedding_dim=200,
... ),
... dataset='Nations',
... n_trials=30,
... )
If you would like to set your own HPO strategy for the model's hyperparameters, you can do so with the
``model_kwargs_ranges`` argument. In the example below, the embeddings are
searched over a larger range (``low`` and ``high``), but with a higher step
size (``q``), such that 100, 200, 300, 400, and 500 are searched.
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_result = hpo_pipeline(
... n_trials=30,
... dataset='Nations',
... model='TransE',
... model_kwargs_ranges=dict(
... embedding_dim=dict(type=int, low=100, high=500, q=100),
... ),
... )
.. warning::
If the given range is not divisible by the step size, then the
upper bound will be omitted.
If you want to optimize the entity initializer, you can use the ``type='categorical'`` type,
which requres a ``choices=[...]`` key with a list of choices. This works for strings, integers,
floats, etc.
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_result = hpo_pipeline(
... n_trials=30,
... dataset='Nations',
... model='TransE',
... model_kwargs_ranges=dict(
... entity_initializer=dict(type='categorical', choices=[
... 'xavier_uniform',
... 'xavier_uniform_norm',
... 'uniform',
... ]),
... ),
... )
The same could be used for constrainers, normalizers, and regularizers over both entities and
relations. However, different models might have different names for the initializer, normalizer,
constrainer and regularizer since there could be multiple representations for either the entity,
relation, or both. Check your desired model's documentation page for the kwargs that you can
optimize over.
Keys of :data:`pykeen.nn.representation.initializers` can be passed as initializers as strings and
keys of :data:`pykeen.nn.representation.constrainers` can be passed as constrainers as strings.
The HPO pipeline does not support optimizing over the hyper-parameters for each
initializer. If you are interested in this, consider rolling your own ablation
study pipeline.
Optimizing the Loss
~~~~~~~~~~~~~~~~~~~
While each model has its own default loss, you can explicitly specify a loss
the same way as in :func:`pykeen.pipeline.pipeline`.
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... n_trials=30,
... dataset='Nations',
... model='TransE',
... loss='MarginRankingLoss',
... )
As stated in the documentation for :func:`pykeen.pipeline.pipeline`, each model
specifies its own default loss function in :py:attr:`pykeen.models.Model.loss_default`.
For example, the TransE model defines the margin ranking loss as its default in
:py:attr:`pykeen.models.TransE.loss_default`.
Each model also specifies default hyper-parameters for the loss function in
:py:attr:`pykeen.models.Model.loss_default_kwargs`. For example, DistMultLiteral
explicitly sets the margin to `0.0` in :py:attr:`pykeen.models.DistMultLiteral.loss_default_kwargs`.
Unlike the model's hyper-parameters, the models don't store the strategies for
optimizing the loss functions' hyper-parameters. The pre-configured strategies
are stored in the loss function's class variable :py:attr:`pykeen.models.Loss.hpo_default`.
However, similarily to how you would specify ``model_kwargs_ranges``, you can
specify the ``loss_kwargs_ranges`` explicitly, as in the following example.
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... n_trials=30,
... dataset='Nations',
... model='TransE',
... loss='MarginRankingLoss',
... loss_kwargs_ranges=dict(
... margin=dict(type=float, low=1.0, high=2.0),
... ),
... )
Optimizing the Negative Sampler
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When the stochastic local closed world assumption (sLCWA) training approach is used for training, a negative sampler
(subclass of :py:class:`pykeen.sampling.NegativeSampler`) is chosen.
Each has a strategy stored in :py:attr:`pykeen.sampling.NegativeSampler.hpo_default`.
Like models and regularizers, the rules are the same for specifying ``negative_sampler``,
``negative_sampler_kwargs``, and ``negative_sampler_kwargs_ranges``.
Optimizing the Optimizer
~~~~~~~~~~~~~~~~~~~~~~~~
Yo dawg, I heard you liked optimization, so we put an optimizer around your
optimizer so you can optimize while you optimize. Since all optimizers used
in PyKEEN come from the PyTorch implementations, they obviously do not have
``hpo_defaults`` class variables. Instead, every optimizer has a default
optimization strategy stored in :py:attr:`pykeen.optimizers.optimizers_hpo_defaults`
the same way that the default strategies for losses are stored externally.
Optimizing the Optimized Optimizer - a.k.a. Learning Rate Schedulers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If optimizing your optimizer doesn't cut it for you, you can turn it up a notch and use learning
rate schedulers (lr_scheduler) that will vary the learning rate of the optimizer. This can e.g.
be useful to have a more aggressive learning rate in the beginning to quickly make progress
while lowering the learning rate over time to allow the model to smoothly converge to the optimum.
PyKEEN allows you to use the learning rate schedulers provided by PyTorch, which you can
simply specify as you would in the :func:`pykeen.pipeline.pipeline`.
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... dataset='Nations',
... model='TransE',
... lr_scheduler='ExponentialLR',
... )
>>> pipeline_result.save_to_directory('nations_transe')
The same way as the optimizers don't come with ``hpo_defaults`` class variables, lr_schedulers rely
on their own optimization strategies provided in :py:attr:`pykeen.lr_schedulers.lr_schedulers_hpo_defaults`
In case you are ready to explore even more you can of course also set your own ranges with the
``lr_scheduler_kwargs_ranges`` keyword argument as in:
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... dataset='Nations',
... model='TransE',
... lr_scheduler='ExponentialLR',
... lr_scheduler_kwargs_ranges=dict(
... gamma=dict(type=float, low=0.8, high=1.0),
... ),
... )
>>> pipeline_result.save_to_directory('nations_transe')
Optimizing Everything Else
~~~~~~~~~~~~~~~~~~~~~~~~~~
Without loss of generality, the following arguments to :func:`pykeen.pipeline.pipeline`
have corresponding `*_kwargs` and `*_kwargs_ranges`:
- ``training_loop`` (only kwargs, not kwargs_ranges)
- ``evaluator``
- ``evaluation``
Early Stopping
--------------
Early stopping can be baked directly into the :mod:`optuna` optimization.
The important keys are ``stopper='early'`` and ``stopper_kwargs``.
When using early stopping, the :func:`hpo_pipeline` automatically takes
care of adding appropriate callbacks to interface with :mod:`optuna`.
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... n_trials=30,
... dataset='Nations',
... model='TransE',
... stopper='early',
... stopper_kwargs=dict(frequency=5, patience=2, relative_delta=0.002),
... )
These stopper kwargs were chosen to make the example run faster. You will
likely want to use different ones.
Configuring Optuna
------------------
Choosing a Search Algorithm
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Because PyKEEN's hyper-parameter optimization pipeline is powered by Optuna, it can directly use all of
Optuna's built-in samplers listed on :mod:`optuna.samplers` or any custom subclass of
:class:`optuna.samplers.BaseSampler`.
By default, PyKEEN uses the Tree-structured Parzen Estimator (TPE; :class:`optuna.samplers.TPESampler`),
a probabilistic search algorithm. You can explicitly set the sampler using the ``sampler`` argument
(not to be confused with the negative sampler used when training under the sLCWA):
>>> from pykeen.hpo import hpo_pipeline
>>> from optuna.samplers import TPESampler
>>> hpo_pipeline_result = hpo_pipeline(
... n_trials=30,
... sampler=TPESampler,
... dataset='Nations',
... model='TransE',
... )
You can alternatively pass a string so you don't have to worry about importing Optuna. PyKEEN knows that sampler
classes always end in "Sampler" so you can pass either "TPE" or "TPESampler" as a string. This is case-insensitive.
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... n_trials=30,
... sampler="tpe",
... dataset='Nations',
... model='TransE',
... )
It's also possible to pass a sampler instance directly:
>>> from pykeen.hpo import hpo_pipeline
>>> from optuna.samplers import TPESampler
>>> sampler = TPESampler(prior_weight=1.1)
>>> hpo_pipeline_result = hpo_pipeline(
... n_trials=30,
... sampler=sampler,
... dataset='Nations',
... model='TransE',
... )
If you're working in a JSON-based configuration setting, you won't be able to instantiate the sampler
with your desired settings like this. As a solution, you can pass the keyword arguments via the
``sampler_kwargs`` argument in combination with specifying the sampler as a string/class to the HPO pipeline like in:
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... n_trials=30,
... sampler="tpe",
... sampler_kwargs=dict(prior_weight=1.1),
... dataset='Nations',
... model='TransE',
... )
To emulate most hyper-parameter optimizations that have used random
sampling, use :class:`optuna.samplers.RandomSampler` like in:
>>> from pykeen.hpo import hpo_pipeline
>>> from optuna.samplers import RandomSampler
>>> hpo_pipeline_result = hpo_pipeline(
... n_trials=30,
... sampler=RandomSampler,
... dataset='Nations',
... model='TransE',
... )
Grid search can be performed using :class:`optuna.samplers.GridSampler`. Notice that this sampler
expected an additional `search_space` argument in its `sampler_kwargs`, e.g.,
>>> from pykeen.hpo import hpo_pipeline
>>> from optuna.samplers import GridSampler
>>> hpo_pipeline_result = hpo_pipeline(
... n_trials=30,
... sampler=GridSampler,
... sampler_kwargs=dict(
... search_space={
... "model.embedding_dim": [32, 64, 128],
... "model.scoring_fct_norm": [1, 2],
... "loss.margin": [1.0],
... "optimizer.lr": [1.0e-03],
... "negative_sampler.num_negs_per_pos": [32],
... "training.num_epochs": [100],
... "training.batch_size": [128],
... },
... ),
... dataset='Nations',
... model='TransE',
... )
Also notice that the search space of grid search grows fast with increasing number of studied hyper-parameters,
and thus grid search is less efficient than other search strategies in finding good configurations,
cf. https://jmlr.csail.mit.edu/papers/v13/bergstra12a.html.
Full Examples
-------------
The examples above have shown the permutation of one setting at a time. This
section has some more complete examples.
The following example sets the optimizer, loss, training, negative sampling,
evaluation, and early stopping settings.
>>> from pykeen.hpo import hpo_pipeline
>>> hpo_pipeline_result = hpo_pipeline(
... n_trials=30,
... dataset='Nations',
... model='TransE',
... model_kwargs=dict(embedding_dim=20, scoring_fct_norm=1),
... optimizer='SGD',
... optimizer_kwargs=dict(lr=0.01),
... loss='marginranking',
... loss_kwargs=dict(margin=1),
... training_loop='slcwa',
... training_kwargs=dict(num_epochs=100, batch_size=128),
... negative_sampler='basic',
... negative_sampler_kwargs=dict(num_negs_per_pos=1),
... evaluator_kwargs=dict(filtered=True),
... evaluation_kwargs=dict(batch_size=128),
... stopper='early',
... stopper_kwargs=dict(frequency=5, patience=2, relative_delta=0.002),
... )
If you have the configuration as a dictionary:
>>> from pykeen.hpo import hpo_pipeline_from_config
>>> config = {
... 'optuna': dict(
... n_trials=30,
... ),
... 'pipeline': dict(
... dataset='Nations',
... model='TransE',
... model_kwargs=dict(embedding_dim=20, scoring_fct_norm=1),
... optimizer='SGD',
... optimizer_kwargs=dict(lr=0.01),
... loss='marginranking',
... loss_kwargs=dict(margin=1),
... training_loop='slcwa',
... training_kwargs=dict(num_epochs=100, batch_size=128),
... negative_sampler='basic',
... negative_sampler_kwargs=dict(num_negs_per_pos=1),
... evaluator_kwargs=dict(filtered=True),
... evaluation_kwargs=dict(batch_size=128),
... stopper='early',
... stopper_kwargs=dict(frequency=5, patience=2, relative_delta=0.002),
... )
... }
... hpo_pipeline_result = hpo_pipeline_from_config(config)
If you have a configuration (in the same format) in a JSON file:
>>> import json
>>> config = {
... 'optuna': dict(
... n_trials=30,
... ),
... 'pipeline': dict(
... dataset='Nations',
... model='TransE',
... model_kwargs=dict(embedding_dim=20, scoring_fct_norm=1),
... optimizer='SGD',
... optimizer_kwargs=dict(lr=0.01),
... loss='marginranking',
... loss_kwargs=dict(margin=1),
... training_loop='slcwa',
... training_kwargs=dict(num_epochs=100, batch_size=128),
... negative_sampler='basic',
... negative_sampler_kwargs=dict(num_negs_per_pos=1),
... evaluator_kwargs=dict(filtered=True),
... evaluation_kwargs=dict(batch_size=128),
... stopper='early',
... stopper_kwargs=dict(frequency=5, patience=2, relative_delta=0.002),
... )
... }
... with open('config.json', 'w') as file:
... json.dump(config, file, indent=2)
... hpo_pipeline_result = hpo_pipeline_from_path('config.json')
.. seealso::
- https://towardsdatascience.com/a-conceptual-explanation-of-bayesian-model-based-hyperparameter-optimization-for-machine-learning-b8172278050f # noqa:E501
"""
from .hpo import HpoPipelineResult, hpo_pipeline, hpo_pipeline_from_config, hpo_pipeline_from_path # noqa: F401
__all__ = [
"HpoPipelineResult",
"hpo_pipeline_from_path",
"hpo_pipeline_from_config",
"hpo_pipeline",
]
|
847cc979b61de7d7f0536c6a2b1e51770a20505d
|
b5ce6908490cfb8e6a1e1cbe4745d675122ddce0
|
/questions/restore-ip-addresses/Solution.py
|
ad926eb01c689049e406b86513b398a510227014
|
[
"MIT"
] |
permissive
|
franklingu/leetcode-solutions
|
8895910f13208e1d8e604100d84c2dd35684cde4
|
7ad7e5c1c040510b7b7bd225ed4297054464dbc6
|
refs/heads/master
| 2023-01-09T01:34:08.097518
| 2023-01-02T02:05:35
| 2023-01-02T02:05:35
| 43,345,677
| 155
| 66
|
MIT
| 2020-10-02T03:41:36
| 2015-09-29T04:54:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,329
|
py
|
Solution.py
|
"""
Given a string s containing only digits, return all possible valid IP addresses that can be obtained from s. You can return them in any order.
A valid IP address consists of exactly four integers, each integer is between 0 and 255, separated by single dots and cannot have leading zeros. For example, "0.1.2.201" and "192.168.1.1" are valid IP addresses and "0.011.255.245", "192.168.1.312" and "192.168@1.1" are invalid IP addresses.
Example 1:
Input: s = "25525511135"
Output: ["255.255.11.135","255.255.111.35"]
Example 2:
Input: s = "0000"
Output: ["0.0.0.0"]
Example 3:
Input: s = "1111"
Output: ["1.1.1.1"]
Example 4:
Input: s = "010010"
Output: ["0.10.0.10","0.100.1.0"]
Example 5:
Input: s = "101023"
Output: ["1.0.10.23","1.0.102.3","10.1.0.23","10.10.2.3","101.0.2.3"]
Constraints:
0 <= s.length <= 3000
s consists of digits only.
"""
class Solution:
def restoreIpAddresses(self, s: str) -> List[str]:
def generateInterpretations(s, index, positions, ret):
if index > len(s) or (index == len(s) and len(positions) < 3):
return
if index == len(s) or len(positions) == 3:
runner = 0
tmp = []
for i, ch in enumerate(s):
if runner == 3:
tmp.append(ch)
continue
if i == positions[runner]:
runner += 1
tmp.append('.')
tmp.append(ch)
else:
tmp.append(ch)
ret.append(''.join(tmp))
return
generateInterpretations(s, index + 1, positions, ret)
start = 0 if not positions else positions[-1]
if index > start + 1 and s[start] == '0':
return
elif int(s[start:index]) > 255:
return
elif len(positions) == 2:
if s[index] == '0' and index < len(s) - 1:
return
elif int(s[index:]) > 255:
return
positions.append(index)
generateInterpretations(s, index + 1, positions, ret)
positions.pop()
ret = []
generateInterpretations(s, 1, [], ret)
return ret
|
e01a1e3106b36f5201f5f3e55c485052641118dc
|
60f3761284aa01d309446850bf9b87e0bf32ad4c
|
/braintree/merchant_account/merchant_account.py
|
13f81cf26f17d896e754e763a1f8d157f6b38f80
|
[
"MIT"
] |
permissive
|
braintree/braintree_python
|
e8cc05016e65a79ad6b40c3fef557a196dbfce44
|
673f70a60d1db03d633f0758b5b2d40a28c79f67
|
refs/heads/master
| 2023-09-04T15:38:50.037101
| 2023-08-29T22:51:14
| 2023-08-29T22:51:14
| 579,729
| 212
| 119
|
MIT
| 2023-06-15T01:05:55
| 2010-03-25T21:29:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,759
|
py
|
merchant_account.py
|
from braintree.configuration import Configuration
from braintree.resource import Resource
from braintree.merchant_account import BusinessDetails, FundingDetails, IndividualDetails
class MerchantAccount(Resource):
class Status(object):
Active = "active"
Pending = "pending"
Suspended = "suspended"
class FundingDestination(object):
Bank = "bank"
Email = "email"
MobilePhone = "mobile_phone"
FundingDestinations = FundingDestination
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
self.individual_details = IndividualDetails(attributes.get("individual", {}))
self.business_details = BusinessDetails(attributes.get("business", {}))
self.funding_details = FundingDetails(attributes.get("funding", {}))
if "master_merchant_account" in attributes:
self.master_merchant_account = MerchantAccount(gateway, attributes.pop("master_merchant_account"))
def __repr__(self):
detail_list = [
"id",
"business_details",
"currency_iso_code",
"default",
"funding_details",
"individual_details",
"master_merchant_account",
"status",
]
return super(MerchantAccount, self).__repr__(detail_list)
@staticmethod
def create(params=None):
if params is None:
params = {}
return Configuration.gateway().merchant_account.create(params)
@staticmethod
def update(id, attributes):
return Configuration.gateway().merchant_account.update(id, attributes)
@staticmethod
def find(id):
return Configuration.gateway().merchant_account.find(id)
|
0dd07270bcdc22bba5210cb0acf1683ae4a860be
|
52107637d2687db30f168ba15ffd1e1e534f4cb4
|
/tests/datasets/test_acousticbrainz_genre.py
|
0ddf024d227d09f449b514800b13e23e506a6120
|
[
"BSD-3-Clause"
] |
permissive
|
mir-dataset-loaders/mirdata
|
9be10e0201b08abf51fc72338ccaaacc8216145b
|
496eb4a9120aa16ff6963792f0c8b738a0c3f310
|
refs/heads/master
| 2023-05-07T13:15:16.517429
| 2023-03-27T13:54:37
| 2023-03-27T13:54:37
| 170,765,267
| 297
| 65
|
BSD-3-Clause
| 2023-08-05T22:48:48
| 2019-02-14T22:11:33
|
Python
|
UTF-8
|
Python
| false
| false
| 4,957
|
py
|
test_acousticbrainz_genre.py
|
import os
import shutil
from mirdata import download_utils
from mirdata.datasets import acousticbrainz_genre
from tests.test_utils import run_track_tests
def test_track():
default_trackid = "tagtraum#validation#be9e01e5-8f93-494d-bbaa-ddcc5a52f629#2b6bfcfd-46a5-3f98-a58f-2c51d7c9e960#trance########"
data_home = os.path.normpath("tests/resources/mir_datasets/acousticbrainz_genre")
dataset = acousticbrainz_genre.Dataset(data_home, version="test")
track = dataset.track(default_trackid)
expected_attributes = {
"path": os.path.normpath(
"tests/resources/mir_datasets/acousticbrainz_genre/acousticbrainz-mediaeval-validation/be/be9e01e5-8f93-494d-bbaa-ddcc5a52f629.json"
),
"track_id": "tagtraum#validation#be9e01e5-8f93-494d-bbaa-ddcc5a52f629#2b6bfcfd-46a5-3f98-a58f-2c51d7c9e960#trance########",
"genre": ["trance"],
"mbid": "be9e01e5-8f93-494d-bbaa-ddcc5a52f629",
"mbid_group": "2b6bfcfd-46a5-3f98-a58f-2c51d7c9e960",
"split": "validation",
}
expected_property_types = {
"artist": list,
"title": list,
"date": list,
"file_name": str,
"album": list,
"tracknumber": list,
"tonal": dict,
"low_level": dict,
"rhythm": dict,
"acousticbrainz_metadata": dict,
}
run_track_tests(track, expected_attributes, expected_property_types)
def test_load_extractor():
path = os.path.normpath(
"tests/resources/mir_datasets/acousticbrainz_genre/acousticbrainz-mediaeval-validation/be/be9e01e5-8f93-494d-bbaa-ddcc5a52f629.json"
)
extractor_data = acousticbrainz_genre.load_extractor(path)
assert isinstance(extractor_data, dict)
def test_to_jams():
data_home = os.path.normpath("tests/resources/mir_datasets/acousticbrainz_genre")
trackid = "tagtraum#validation#be9e01e5-8f93-494d-bbaa-ddcc5a52f629#2b6bfcfd-46a5-3f98-a58f-2c51d7c9e960#trance########"
dataset = acousticbrainz_genre.Dataset(data_home, version="test")
track = dataset.track(trackid)
jam = track.to_jams()
def test_filter_index():
data_home = os.path.normpath("tests/resources/mir_datasets/acousticbrainz_genre")
dataset = acousticbrainz_genre.Dataset(data_home, version="test")
index = dataset.load_all_train()
assert len(index) == 8
index = dataset.load_all_validation()
assert len(index) == 8
index = dataset.load_tagtraum_validation()
assert len(index) == 2
index = dataset.load_tagtraum_train()
assert len(index) == 2
index = dataset.load_allmusic_validation()
assert len(index) == 2
index = dataset.load_lastfm_train()
assert len(index) == 2
index = dataset.load_lastfm_validation()
assert len(index) == 2
index = dataset.load_discogs_train()
assert len(index) == 2
index = dataset.load_discogs_validation()
assert len(index) == 2
def test_download(httpserver):
data_home = os.path.normpath(
"tests/resources/mir_datasets/acousticbrainz_genre_download"
)
if os.path.exists(data_home):
shutil.rmtree(data_home)
httpserver.serve_content(
open(
os.path.normpath(
"tests/resources/download/acousticbrainz_genre_index.json.zip"
),
"rb",
).read()
)
remotes = {
"index": download_utils.RemoteFileMetadata(
filename="acousticbrainz_genre_index.json.zip",
url=httpserver.url,
checksum="b32a663449c1da55de424d845521eb79",
)
}
dataset = acousticbrainz_genre.Dataset(data_home, version="test")
dataset.remotes = remotes
dataset.download()
assert os.path.exists(data_home)
assert os.path.exists(
os.path.join(data_home, "acousticbrainz_genre_index.json.zip")
)
httpserver.serve_content(
open(
os.path.normpath(
"tests/resources/download/acousticbrainz-mediaeval-features-train-01.tar.bz2"
),
"rb",
).read()
)
remotes = {
"train-01": download_utils.RemoteFileMetadata(
filename="acousticbrainz-mediaeval-features-train-01.tar.bz2",
url=httpserver.url,
checksum="eb155784e1d4de0f35aa23ded4d34849",
destination_dir="acousticbrainz-mediaeval-train",
unpack_directories=["acousticbrainz-mediaeval-train"],
)
}
dataset.remotes = remotes
dataset.download()
assert os.path.exists(data_home)
assert os.path.exists(os.path.join(data_home, "acousticbrainz-mediaeval-train"))
assert os.path.exists(
os.path.join(data_home, "acousticbrainz-mediaeval-train", "01")
)
assert os.path.exists(
os.path.join(
data_home,
"acousticbrainz-mediaeval-train",
"01",
"01a0a332-d340-4806-a88b-cb60a05355c0.json",
)
)
shutil.rmtree(data_home)
|
666b35da46d5e4584dd3dfd71837db46b7417ac7
|
90f9bdb537573ae3081e2a93e05cfc93b5f99612
|
/LoopStructural/analysis/__init__.py
|
899d41accb58666c860d0e3cf330a76c8be913fa
|
[
"MIT"
] |
permissive
|
Loop3D/LoopStructural
|
d0fa201d9ff8d99fb47006e3def0fbfb30ece5c4
|
c6175623450dbc79ed06ed8d8bbff21b63fc8b4c
|
refs/heads/master
| 2023-06-25T21:17:47.595673
| 2023-06-19T00:40:20
| 2023-06-19T00:40:20
| 181,411,760
| 123
| 21
|
MIT
| 2023-06-19T00:49:32
| 2019-04-15T04:25:27
|
Python
|
UTF-8
|
Python
| false
| false
| 546
|
py
|
__init__.py
|
"""
Analysis
========
Various tools for analysing loopstructural models, including calculating fault intersections and fault toplogies
"""
from ..utils import getLogger
import LoopStructural
logger = getLogger(__name__)
if LoopStructural.experimental:
logger.warning(
"LoopStructural.analysis is experimental and may not perform as expected"
)
from ._fault_displacement import displacement_missfit
from ._fault_intersection import calculate_fault_intersections
from ._topology import calculate_fault_topology_matrix
|
d85b11e8e2a59452b86b286381226d3d078c8bac
|
bed34365a9dab825fd9f4a4ff1b0863f441266ac
|
/neutron/db/models/dns.py
|
651a04275ec5df95754c616c0474414e3c74bbc7
|
[
"Apache-2.0"
] |
permissive
|
openstack/neutron
|
0913ee3cd69d5bdb9c10aa084d4e1803abee320c
|
dde31aae392b80341f6440eb38db1583563d7d1f
|
refs/heads/master
| 2023-08-31T13:09:41.831598
| 2023-08-31T11:37:30
| 2023-08-31T11:37:30
| 2,400,289
| 1,174
| 1,325
|
Apache-2.0
| 2022-06-29T08:00:05
| 2011-09-16T16:04:08
|
Python
|
UTF-8
|
Python
| false
| false
| 5,200
|
py
|
dns.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.db import constants
from neutron_lib.db import model_base
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy import sql
from neutron.db.models import l3 as l3_models
from neutron.db import models_v2
class NetworkDNSDomain(model_base.BASEV2):
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
dns_domain = sa.Column(sa.String(255),
nullable=False)
# Add a relationship to the Network model in order to instruct
# SQLAlchemy to eagerly load this association
network = orm.relationship(models_v2.Network,
load_on_pending=True,
backref=orm.backref("dns_domain",
lazy='joined',
uselist=False,
cascade='delete'))
revises_on_change = ('network', )
class FloatingIPDNS(model_base.BASEV2):
__tablename__ = 'floatingipdnses'
floatingip_id = sa.Column(sa.String(36),
sa.ForeignKey('floatingips.id',
ondelete="CASCADE"),
primary_key=True)
dns_name = sa.Column(sa.String(255),
nullable=False)
dns_domain = sa.Column(sa.String(255),
nullable=False)
published_dns_name = sa.Column(sa.String(255),
nullable=False)
published_dns_domain = sa.Column(sa.String(255),
nullable=False)
# Add a relationship to the FloatingIP model in order to instruct
# SQLAlchemy to eagerly load this association
floatingip = orm.relationship(l3_models.FloatingIP,
load_on_pending=True,
backref=orm.backref("dns",
lazy='joined',
uselist=False,
cascade='delete'))
revises_on_change = ('floatingip', )
class PortDNS(model_base.BASEV2):
__tablename__ = 'portdnses'
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id',
ondelete="CASCADE"),
primary_key=True)
current_dns_name = sa.Column(sa.String(255),
nullable=False)
current_dns_domain = sa.Column(sa.String(255),
nullable=False)
previous_dns_name = sa.Column(sa.String(255),
nullable=False)
previous_dns_domain = sa.Column(sa.String(255),
nullable=False)
dns_name = sa.Column(sa.String(255), nullable=False)
dns_domain = sa.Column(sa.String(constants.FQDN_FIELD_SIZE),
nullable=False,
server_default='')
# Add a relationship to the Port model in order to instruct
# SQLAlchemy to eagerly load this association
port = orm.relationship(models_v2.Port,
load_on_pending=True,
backref=orm.backref("dns",
lazy='joined',
uselist=False,
cascade='delete'))
revises_on_change = ('port', )
class SubnetDNSPublishFixedIP(model_base.BASEV2):
__tablename__ = "subnet_dns_publish_fixed_ips"
subnet_id = sa.Column(sa.String(constants.UUID_FIELD_SIZE),
sa.ForeignKey('subnets.id', ondelete="CASCADE"),
primary_key=True)
dns_publish_fixed_ip = sa.Column(sa.Boolean(),
nullable=False,
server_default=sql.false())
# Add a relationship to the Subnet model in order to instruct
# SQLAlchemy to eagerly load this association
subnet = orm.relationship(models_v2.Subnet,
load_on_pending=True,
backref=orm.backref("dns_publish_fixed_ip",
lazy='joined',
uselist=False,
cascade='delete'))
revises_on_change = ('subnet', )
|
6fd017be79633bc0cfa1be483d9f56306122c7f5
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Mastering_Probabilistic_Graphical_Models_Using_Python/pgmpy/tests/test_models/test_JunctionTree.py
|
01042a6ed4843267e2a638238e0a5bc143234e97
|
[
"MIT"
] |
permissive
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,530
|
py
|
test_JunctionTree.py
|
from pgmpy.models import JunctionTree
from pgmpy.tests import help_functions as hf
import unittest
class TestJunctionTreeCreation(unittest.TestCase):
def setUp(self):
self.graph = JunctionTree()
def test_add_single_node(self):
self.graph.add_node(('a', 'b'))
self.assertListEqual(self.graph.nodes(), [('a', 'b')])
def test_add_single_node_raises_error(self):
self.assertRaises(TypeError, self.graph.add_node, 'a')
def test_add_multiple_nodes(self):
self.graph.add_nodes_from([('a', 'b'), ('b', 'c')])
self.assertListEqual(hf.recursive_sorted(self.graph.nodes()),
[['a', 'b'], ['b', 'c']])
def test_add_single_edge(self):
self.graph.add_edge(('a', 'b'), ('b', 'c'))
self.assertListEqual(hf.recursive_sorted(self.graph.nodes()),
[['a', 'b'], ['b', 'c']])
self.assertListEqual(sorted([node for edge in self.graph.edges()
for node in edge]),
[('a', 'b'), ('b', 'c')])
def test_add_single_edge_raises_error(self):
self.assertRaises(ValueError, self.graph.add_edge,
('a', 'b'), ('c', 'd'))
def test_add_cyclic_path_raises_error(self):
self.graph.add_edge(('a', 'b'), ('b', 'c'))
self.graph.add_edge(('b', 'c'), ('c', 'd'))
self.assertRaises(ValueError, self.graph.add_edge, ('c', 'd'), ('a', 'b'))
def tearDown(self):
del self.graph
|
c5d88a3f9609d934a391b53ff64fe869cc632c95
|
eb752c0386a217c95ce640bfb78f627dc56f2cad
|
/tests/test_endpoints.py
|
c9d71a30d8c7d8931fe2e6895b41050332607eb5
|
[
"MIT"
] |
permissive
|
MikeWooster/api-client
|
8cacad002406766e85f507f2a66a6374efcf8c69
|
380c28c9d28f05139e4d41aab1ea7d2729cbbbe1
|
refs/heads/master
| 2023-02-08T08:27:56.599632
| 2022-08-10T17:20:51
| 2022-08-10T17:20:51
| 170,163,866
| 153
| 41
|
MIT
| 2023-02-02T18:43:51
| 2019-02-11T16:39:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,082
|
py
|
test_endpoints.py
|
import os
import pytest
from apiclient import endpoint
@endpoint(base_url="http://foo.com")
class Endpoint:
search = "search"
integer = 3
search_id = "search/{id}"
_protected = "protected"
@endpoint(base_url="http://foo.com///")
class EndpointWithExtraSlash:
search = "///search"
class EndpointNotDecorated:
search = "search"
@endpoint(base_url=os.environ["ENDPOINT_BASE_URL"])
class EndpointFromEnvironment:
search = "search"
class BaseEndpoint:
get_apples = "apples"
get_grapes = "grapes"
def method(self):
pass
@endpoint(base_url="https://fruits.com")
class SubEndpoint(BaseEndpoint):
get_hamburgers = "hamburgers"
_ignore_attr = "ignored"
def test_endpoint():
assert Endpoint.search == "http://foo.com/search"
assert Endpoint.integer == "http://foo.com/3"
def test_decorator_removes_trailing_slashes_from_base_url():
assert EndpointWithExtraSlash.search == "http://foo.com/search"
def test_endpoint_must_contain_base_url():
with pytest.raises(RuntimeError) as exc_info:
endpoint(EndpointNotDecorated)
expected_message = "A decorated endpoint must define a base_url as @endpoint(base_url='http://foo.com')."
assert str(exc_info.value) == expected_message
def test_endpoint_with_formatting():
assert Endpoint.search_id == "http://foo.com/search/{id}"
assert Endpoint.search_id.format(id=34) == "http://foo.com/search/34"
def test_decorator_does_not_modify_protected_attributes():
assert Endpoint._protected == "protected"
def test_decorated_endpoint_loaded_from_environment_variable():
assert EndpointFromEnvironment.search == "http://environment.com/search"
def test_decorator_inherits_attributes():
assert BaseEndpoint.get_apples == "apples"
assert BaseEndpoint.get_grapes == "grapes"
assert SubEndpoint.get_apples == "https://fruits.com/apples"
assert SubEndpoint.get_grapes == "https://fruits.com/grapes"
assert SubEndpoint.get_hamburgers == "https://fruits.com/hamburgers"
assert SubEndpoint._ignore_attr == "ignored"
|
026975265d317e6008383b39429b27fa8deee35c
|
cfa35dc2ea93ee0eceb2399a9e6112e987579c09
|
/stonesoup/hypothesiser/categorical.py
|
669a7f475eb7dda8e2731825bf83ad19a4c76595
|
[
"LicenseRef-scancode-proprietary-license",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-secret-labs-2011"
] |
permissive
|
dstl/Stone-Soup
|
227e6a9e6fbdceca14af3f0259f311ec74095597
|
f24090cc919b3b590b84f965a3884ed1293d181d
|
refs/heads/main
| 2023-09-01T14:33:14.626428
| 2023-09-01T11:35:46
| 2023-09-01T11:35:46
| 98,420,803
| 315
| 126
|
MIT
| 2023-09-14T14:55:34
| 2017-07-26T12:34:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,818
|
py
|
categorical.py
|
from .base import Hypothesiser
from ..base import Property
from ..measures import ObservationAccuracy
from ..predictor.categorical import HMMPredictor
from ..types.detection import MissedDetection
from ..types.hypothesis import SingleProbabilityHypothesis
from ..types.multihypothesis import MultipleHypothesis
from ..types.numeric import Probability
from ..updater.categorical import HMMUpdater
class HMMHypothesiser(Hypothesiser):
r"""Hypothesiser based on categorical distribution accuracy.
This hypothesiser generates track predictions at detection times and scores each hypothesised
prediction-detection pair according to the accuracy of the corresponding measurement
prediction compared to the detection.
"""
predictor: HMMPredictor = Property(doc="Predictor used to predict tracks to detection times")
updater: HMMUpdater = Property(doc="Updater used to get measurement prediction")
prob_detect: Probability = Property(default=Probability(0.99),
doc="Target Detection Probability")
prob_gate: Probability = Property(default=Probability(0.95),
doc="Gate Probability - prob. gate contains true "
"measurement if detected")
def hypothesise(self, track, detections, timestamp):
""" Evaluate and return all track association hypotheses.
For a given track and a set of N available detections, return a MultipleHypothesis object
with N+1 detections (first detection is a 'MissedDetection'), each with an associated
accuracy (of prediction emission to measurement), considered the probability of the
hypothesis being true.
Parameters
----------
track: :class:`~.Track`
The track object to hypothesise on. Composed of :class:`~.CategoricalState` types.
detections: :class:`set`
A set of :class:`~.CategoricalDetection` objects, representing the available
detections.
timestamp: :class:`datetime.datetime`
A timestamp used when evaluating the state and measurement predictions. Note that if a
given detection has a non empty timestamp, then prediction will be performed according
to the timestamp of the detection.
Returns
-------
: :class:`~.MultipleHypothesis`
A container of :class:`~.SingleProbabilityHypothesis` objects
"""
hypotheses = list()
prediction = self.predictor.predict(track, timestamp=timestamp)
probability = Probability(1 - self.prob_detect * self.prob_gate)
hypotheses.append(
SingleProbabilityHypothesis(
prediction,
MissedDetection(timestamp=timestamp),
probability
))
for detection in detections:
prediction = self.predictor.predict(track, timestamp=detection.timestamp)
measurement_prediction = self.updater.predict_measurement(
predicted_state=prediction,
measurement_model=detection.measurement_model,
noise=False
)
probability = self.measure(measurement_prediction, detection)
probability = probability * self.prob_detect
probability = Probability(probability, log_value=False)
# True detection hypothesis
hypotheses.append(
SingleProbabilityHypothesis(
prediction,
detection,
probability,
measurement_prediction))
return MultipleHypothesis(hypotheses, normalise=False, total_weight=1)
@property
def measure(self):
return ObservationAccuracy()
|
21c49a035e7518ea567a5c8083a8d1d6136d7a49
|
5c363c50c54175a982330ec888401b3e394373ab
|
/examples/training_scripts/height_with_cost/train_height_with_cost.py
|
bbb844ef6d6478406ce74d46a92db9d923b5b36c
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
awslabs/syne-tune
|
b14fb008f63def6a172bea6cc451f4e1906647f5
|
c35686e1b5947d45384fd1d41a44e013da53ef43
|
refs/heads/main
| 2023-08-14T14:21:48.995716
| 2023-08-03T12:57:13
| 2023-08-03T12:57:13
| 417,499,108
| 313
| 47
|
Apache-2.0
| 2023-09-14T14:06:54
| 2021-10-15T12:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,189
|
py
|
train_height_with_cost.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Derived from ``train_height.py``, but add variable cost (elapsed time).
"""
import argparse
import logging
import math
import os
import time
from syne_tune.utils import (
resume_from_checkpointed_model,
checkpoint_model_at_rung_level,
add_checkpointing_to_argparse,
parse_bool,
)
from syne_tune import Reporter
from syne_tune.config_space import randint, add_to_argparse
_config_space = {
"width": randint(0, 20),
"height": randint(-100, 100),
}
def height_with_cost_default_params(params=None):
dont_sleep = str(params is not None and params.get("backend") == "simulated")
return {
"max_resource_level": 100,
"grace_period": 1,
"reduction_factor": 3,
"dont_sleep": dont_sleep,
}
def height_with_cost_benchmark(params):
config_space = dict(
_config_space,
epochs=params["max_resource_level"],
dont_sleep=params["dont_sleep"],
)
return {
"script": __file__,
"metric": "mean_loss",
"mode": "min",
"resource_attr": "epoch",
"elapsed_time_attr": "elapsed_time",
"max_resource_attr": "epochs",
"config_space": config_space,
"supports_simulated": True,
}
def objective(config):
dont_sleep = parse_bool(config["dont_sleep"])
width = config["width"]
height = config["height"]
ts_start = time.time()
report = Reporter()
# Checkpointing
# Since this is a tabular benchmark, checkpointing is not really needed.
# Still, we use a "checkpoint" file in order to store the epoch at which
# the evaluation was paused, since this information is not passed
def load_model_fn(local_path: str) -> int:
local_filename = os.path.join(local_path, "checkpoint.json")
try:
with open(local_filename, "r") as f:
data = json.load(f)
resume_from = int(data["epoch"])
except Exception:
resume_from = 0
return resume_from
def save_model_fn(local_path: str, epoch: int):
os.makedirs(local_path, exist_ok=True)
local_filename = os.path.join(local_path, "checkpoint.json")
with open(local_filename, "w") as f:
json.dump({"epoch": str(epoch)}, f)
resume_from = resume_from_checkpointed_model(config, load_model_fn)
# Loop over epochs
cost_epoch = 0.1 + 0.05 * math.sin(width * height)
elapsed_time_raw = 0
for epoch in range(resume_from + 1, config["epochs"] + 1):
mean_loss = 1.0 / (0.1 + width * epoch / 100) + 0.1 * height
if dont_sleep:
elapsed_time_raw += cost_epoch
else:
time.sleep(cost_epoch)
elapsed_time = time.time() - ts_start + elapsed_time_raw
report(epoch=epoch, mean_loss=mean_loss, elapsed_time=elapsed_time)
# Write checkpoint (optional)
if epoch == config["epochs"]:
checkpoint_model_at_rung_level(config, save_model_fn, epoch)
if __name__ == "__main__":
# Benchmark-specific imports are done here, in order to avoid import
# errors if the dependencies are not installed (such errors should happen
# only when the code is really called)
import json
root = logging.getLogger()
root.setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, required=True)
parser.add_argument("--dont_sleep", type=str, required=True)
add_to_argparse(parser, _config_space)
add_checkpointing_to_argparse(parser)
args, _ = parser.parse_known_args()
objective(config=vars(args))
|
80d76bd7381bec237d63a7f6fffe7a3f02b5ed98
|
767dae79df18f9868855774464d08864a1d8629b
|
/protonfixes/gamefixes/242640.py
|
c70822ade5ec6ab21c8d77ec152227e57c6673bf
|
[
"BSD-2-Clause"
] |
permissive
|
simons-public/protonfixes
|
05cd9c2c37c35ce56ec4c3cdcdba375c6eadf530
|
681411ba8ceb5d2d790e674eb7a5b98951d426e6
|
refs/heads/master
| 2022-11-16T04:16:32.764931
| 2022-11-15T00:24:24
| 2022-11-15T00:24:24
| 150,211,569
| 245
| 75
|
NOASSERTION
| 2022-11-15T00:24:25
| 2018-09-25T05:20:02
|
Python
|
UTF-8
|
Python
| false
| false
| 486
|
py
|
242640.py
|
""" Game fix for Styx: Master of Shadows
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" Install dotnet40
Works, but gives a popup twice at the beginning of launch:
'Unable to find a version of the runtime to run this
application. (OK)
"""
# https://github.com/ValveSoftware/Proton/issues/810
# https://steamcommunity.com/app/242640/discussions/0/620700960990638817/
util.protontricks('xact')
util.protontricks('dotnet40')
|
76667c5e0e94e1efaaa39849d3c234d9f2b2eff4
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/CarbonBlackProtect/Scripts/CBPFindRule/CBPFindRule_test.py
|
32d105d3dae78729b13a206a14f8f2f1a1f9b256
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 746
|
py
|
CBPFindRule_test.py
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def test_cbp_find_rule(mocker):
from CBPFindRule import cbp_find_rule
args = {"hash": "some_hash"}
rule = [{"Type": 3, "Contents": [{"hash": "some_hash", "fileState": 1}]}]
mocker.patch.object(demisto, 'executeCommand', return_value=rule)
mocker.patch.object(demisto, 'results')
cbp_find_rule(args)
res = demisto.results
content = res.call_args[0][0]
expected_res = [{'Type': 1, 'ContentsFormat': 'markdown', 'Contents': 'Hash some_hash is in state **Unapproved**\n'},
{'Type': 1, 'ContentsFormat': 'table', 'Contents': [{'hash': 'some_hash', 'fileState': 1}]}]
assert expected_res == content
|
978bcf394bcbe1c1f0b34ed0b45836d133fbedd0
|
bf001de41a72db2f0fc957fd4ba24a5b38ef1db5
|
/packages/camphr/camphr/tokenizer/juman/__init__.py
|
a17029a62398b62fb2a20a108b86457ebbc0790e
|
[
"Apache-2.0"
] |
permissive
|
PKSHATechnology-Research/camphr
|
b8e6e896238d0ea2c04db9709f6e68445e66cbee
|
d464d079e71fed0d53f78d45a42c1fdf6637c10a
|
refs/heads/master
| 2023-08-05T14:39:46.209210
| 2021-08-18T06:06:51
| 2021-08-18T06:06:51
| 239,420,805
| 373
| 23
|
Apache-2.0
| 2022-12-09T05:56:02
| 2020-02-10T03:39:58
|
Python
|
UTF-8
|
Python
| false
| false
| 5,083
|
py
|
__init__.py
|
"""The package juman defines Japanese spacy.Language with JUMAN tokenizer."""
from dataclasses import dataclass
from camphr.doc import Doc, UserDataProto
import itertools
import distutils.spawn
from typing import Any, Callable, Dict, Iterator, List, Optional
from typing_extensions import Literal
from camphr.serde import SerializationMixin
def get_juman_command() -> Optional[Literal["juman", "jumanpp"]]:
for cmd in ["jumanpp", "juman"]:
if distutils.spawn.find_executable(cmd):
return cmd # type: ignore
return None
@dataclass
class ShortUnitWord:
surface: str
lemma: str
pos: str
fstring: str
space: str
_REPLACE_STRINGS = {"\t": " ", "\r": "", "\n": " "}
def han_to_zen_normalize(text: str):
try:
import mojimoji
except ImportError:
raise ValueError("juman or knp Language requires mojimoji.")
text = mojimoji.han_to_zen(text)
for k, v in _REPLACE_STRINGS.items():
text = text.replace(k, v)
return text
class Tokenizer(SerializationMixin):
"""Juman tokenizer
Note:
`spacy.Token._.fstring` is set. The Juman's output is stored into it during tokenizing.
"""
serialization_fields = ["preprocessor", "juman_kwargs"]
KEY_FSTRING = "juman_fstring"
@classmethod
def get_juman_fstring(cls, e: UserDataProto) -> str:
if cls.KEY_FSTRING not in e.user_data:
raise ValueError(f"{cls.KEY_FSTRING} is not set in {e}")
return e.user_data[cls.KEY_FSTRING]
@classmethod
def set_juman_fstring(cls, e: UserDataProto, fstring: str):
e.user_data[cls.KEY_FSTRING] = fstring
def __init__(
self,
juman_kwargs: Optional[Dict[str, Any]] = None,
preprocessor: Optional[Callable[[str], str]] = han_to_zen_normalize,
):
"""
Args:
juman_kwargs: passed to `pyknp.Juman.__init__`
preprocessor: applied to text before tokenizing. `mojimoji.han_to_zen` is often used.
"""
juman_kwargs = juman_kwargs or {}
default_command = get_juman_command()
assert default_command
juman_kwargs.setdefault("command", default_command)
self.juman_kwargs = juman_kwargs
self.preprocessor = preprocessor
self.set_tokenizer()
def set_tokenizer(self):
from pyknp import Juman
self.tokenizer = Juman(**self.juman_kwargs) if self.juman_kwargs else Juman()
def __call__(self, text: str) -> Doc:
"""Make doc from text. Juman's `fstring` is stored in `Token._.fstring`"""
if self.preprocessor:
text = self.preprocessor(text)
juman_lines = self._juman_parse(text)
dtokens = self._detailed_tokens(juman_lines)
doc = self._dtokens_to_doc(dtokens)
self.set_juman_fstring(doc, juman_lines)
return doc
def _juman_parse(self, text: str) -> str:
texts = _split_text_for_juman(text)
while True:
try:
lines: str = "".join(
itertools.chain.from_iterable(
self.tokenizer.juman_lines(text) for text in texts # type: ignore
)
)
break
except BrokenPipeError:
# Juman is sometimes broken due to its subprocess management.
self.set_tokenizer()
return lines
def _dtokens_to_doc(self, dtokens: List[ShortUnitWord]) -> Doc:
words = [x.surface + x.space for x in dtokens]
doc = Doc.from_words(words)
for token, dtoken in zip(doc, dtokens):
token.tag_ = dtoken.pos
token.lemma_ = dtoken.lemma
self.set_juman_fstring(token, dtoken.fstring)
return doc
def _detailed_tokens(self, juman_lines: str) -> List[ShortUnitWord]:
"""Tokenize text with Juman and format the outputs for further processing"""
from pyknp import MList, Morpheme # type: ignore
ml: List[Morpheme] = MList(juman_lines).mrph_list()
words: List[ShortUnitWord] = []
for m in ml:
surface: str = m.midasi # type: ignore
pos: str = m.hinsi + "," + m.bunrui # type: ignore
lemma: str = m.genkei or surface # type: ignore
words.append(ShortUnitWord(surface, lemma, pos, m.fstring, "")) # type: ignore
return words
_SEPS = ["。", ".", "."]
def _split_text_for_juman(text: str) -> Iterator[str]:
"""Juman denies long text (maybe >4096 bytes) so split text"""
n = 1000
if len(text) == 0:
return
if len(text) < n:
yield text
return
for sep in _SEPS:
if sep in text:
i = text.index(sep)
head, tail = text[: i + 1], text[i + 1 :]
if len(head) < n:
yield from _split_text_for_juman(head)
yield from _split_text_for_juman(tail)
return
# If any separator is not found in text, split roughly
yield text[:n]
yield from _split_text_for_juman(text[n:])
|
20eb1f1ae0787830101c73542659053f904f1cc8
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/completion/className/alreadyImportedModulesPreference/alreadyImportedModulesPreference.py
|
54fc8eda0a2a3034e70c3213611a8f2acd83d374
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 28
|
py
|
alreadyImportedModulesPreference.py
|
from pkg3 import B
A<caret>
|
b134774d45d80b19ff3e533a77c6f76cb84f6690
|
3079aff5691cc3452c86c9a76c0309bf66c920a1
|
/python/cufinufft/tests/test_array_ordering.py
|
d42fd8fa717c43037a582d77a350d4eea02eaa79
|
[
"Apache-2.0"
] |
permissive
|
flatironinstitute/finufft
|
dd332853a8900b9ea8a5a6ad21a3842f07bc9fc8
|
0e5f3f3b3ec07ac8b7e7ccd26d88d2c912c1b9a6
|
refs/heads/master
| 2023-08-30T12:19:36.644780
| 2023-08-22T11:44:39
| 2023-08-23T10:08:35
| 78,682,506
| 209
| 80
|
NOASSERTION
| 2023-09-08T21:51:05
| 2017-01-11T21:42:14
|
C++
|
UTF-8
|
Python
| false
| false
| 1,592
|
py
|
test_array_ordering.py
|
import pytest
import numpy as np
import pycuda.autoinit # NOQA:401
import pycuda.gpuarray as gpuarray
from cufinufft import Plan
import utils
def test_type2_ordering(dtype=np.float32, shape=(16, 16, 16), M=4096, tol=1e-3):
complex_dtype = utils._complex_dtype(dtype)
k = utils.gen_nu_pts(M).astype(dtype)
fk = utils.gen_uniform_data(shape).astype(complex_dtype)
fkTT = fk.T.copy().T
k_gpu = gpuarray.to_gpu(k)
fk_gpu = gpuarray.to_gpu(fk)
fkTT_gpu = gpuarray.to_gpu(fkTT)
plan = Plan(2, shape, eps=tol, dtype=complex_dtype)
plan.setpts(k_gpu[0], k_gpu[1], k_gpu[2])
c_gpu = plan.execute(fk_gpu)
with pytest.raises(TypeError, match="following requirement: C") as err:
cTT_gpu = plan.execute(fkTT_gpu)
# Ideally, it should be possible to get this to align with true output,
# but corrently does not look like it.
# c = c_gpu.get()
# cTT = cTT_gpu.get()
# assert np.allclose(c, cTT, rtol=1e-2)
def test_type1_ordering(dtype=np.float32, shape=(16, 16, 16), M=4096, tol=1e-3):
complex_dtype = utils._complex_dtype(dtype)
k, c = utils.type1_problem(dtype, shape, M)
k_gpu = gpuarray.to_gpu(k)
c_gpu = gpuarray.to_gpu(c)
plan = Plan(1, shape, eps=tol, dtype=complex_dtype)
plan.setpts(*k_gpu)
out_gpu = gpuarray.GPUArray(shape, dtype=complex_dtype)
plan.execute(c_gpu, out=out_gpu)
out_gpu = gpuarray.GPUArray(shape, dtype=complex_dtype, order="F")
with pytest.raises(TypeError, match="following requirement: C") as err:
plan.execute(c_gpu, out=out_gpu)
|
f8225930346faaadd6e7e65cf25d3ec28c91ee66
|
1658f312f3aee37c6c4e2714ac081e081e73a7b8
|
/python/ctranslate2/converters/transformers.py
|
3e54354d0c6c21155614ef1ad029c3ee1dac92b0
|
[
"MIT"
] |
permissive
|
OpenNMT/CTranslate2
|
c96ac260e5a910ba8c7bec1f2ad7945599d90ec4
|
61d34502325bfa3c5ef8a11cd2e391d0efed1bf9
|
refs/heads/master
| 2023-08-16T03:02:30.003902
| 2023-08-04T13:33:06
| 2023-08-04T13:33:06
| 210,299,376
| 1,744
| 185
|
MIT
| 2023-09-13T07:58:59
| 2019-09-23T08:10:42
|
C++
|
UTF-8
|
Python
| false
| false
| 63,756
|
py
|
transformers.py
|
import abc
import argparse
import gc
import itertools
import os
from typing import List, Optional
import numpy as np
try:
import huggingface_hub
import torch
import transformers
except ImportError:
pass
from ctranslate2.converters import utils
from ctranslate2.converters.converter import Converter
from ctranslate2.specs import common_spec, model_spec, transformer_spec, whisper_spec
_SUPPORTED_ACTIVATIONS = {
"gelu": common_spec.Activation.GELU,
"gelu_fast": common_spec.Activation.GELUTanh,
"gelu_new": common_spec.Activation.GELUTanh,
"gelu_python": common_spec.Activation.GELU,
"gelu_pytorch_tanh": common_spec.Activation.GELUTanh,
"quick_gelu": common_spec.Activation.GELUSigmoid,
"relu": common_spec.Activation.RELU,
"silu": common_spec.Activation.SWISH,
"swish": common_spec.Activation.SWISH,
}
_MODEL_LOADERS = {}
def register_loader(config_name):
"""Registers a model loader for this configuration name."""
def decorator(cls):
_MODEL_LOADERS[config_name] = cls()
return cls
return decorator
class TransformersConverter(Converter):
"""Converts models from Hugging Face Transformers."""
def __init__(
self,
model_name_or_path: str,
activation_scales: Optional[str] = None,
copy_files: Optional[List[str]] = None,
load_as_float16: bool = False,
revision: Optional[str] = None,
low_cpu_mem_usage: bool = False,
trust_remote_code: bool = False,
):
"""Initializes the converter.
Arguments:
model_name_or_path: Name of the pretrained model to download, or path to the
directory containing the pretrained model.
activation_scales: Path to the pre-computed activation scales. Models may
use them to rescale some weights to smooth the intermediate activations
and improve the quantization accuracy. See
https://github.com/mit-han-lab/smoothquant.
copy_files: List of filenames to copy from the Hugging Face model to the
converted model directory.
load_as_float16: Load the model weights as float16. More precisely, the model
will be loaded with ``from_pretrained(..., torch_dtype=torch.float16)``.
revision: Revision of the model to download from the Hugging Face Hub.
low_cpu_mem_usage: Enable the flag ``low_cpu_mem_usage`` when loading the model
with ``from_pretrained``.
trust_remote_code: Allow converting models using custom code.
"""
self._model_name_or_path = model_name_or_path
self._activation_scales = activation_scales
self._copy_files = copy_files
self._load_as_float16 = load_as_float16
self._revision = revision
self._low_cpu_mem_usage = low_cpu_mem_usage
self._trust_remote_code = trust_remote_code
def _load(self):
with torch.no_grad():
config = transformers.AutoConfig.from_pretrained(
self._model_name_or_path, trust_remote_code=self._trust_remote_code
)
config_name = config.__class__.__name__
loader = _MODEL_LOADERS.get(config_name)
if loader is None:
raise ValueError(
"No conversion is registered for the model configuration %s "
"(supported configurations are: %s)"
% (config_name, ", ".join(sorted(_MODEL_LOADERS.keys())))
)
model_class = getattr(transformers, loader.architecture_name)
tokenizer_class = transformers.AutoTokenizer
kwargs = {
"torch_dtype": (
torch.float16
if self._load_as_float16
else getattr(config, "torch_dtype", None)
)
}
if self._revision:
kwargs["revision"] = self._revision
if self._low_cpu_mem_usage:
kwargs["low_cpu_mem_usage"] = self._low_cpu_mem_usage
if self._trust_remote_code:
kwargs["trust_remote_code"] = self._trust_remote_code
model = self.load_model(model_class, self._model_name_or_path, **kwargs)
tokenizer_kwargs = {}
if self._trust_remote_code:
tokenizer_kwargs["trust_remote_code"] = self._trust_remote_code
tokenizer = self.load_tokenizer(
tokenizer_class, self._model_name_or_path, **tokenizer_kwargs
)
spec = loader(model, tokenizer)
if self._activation_scales:
activation_scales = torch.load(
self._activation_scales, map_location="cpu"
)
loader.smooth_activation(spec, activation_scales)
if self._copy_files:
for filename in self._copy_files:
spec.register_file(self.get_model_file(filename))
return spec
def load_model(self, model_class, model_name_or_path, **kwargs):
return model_class.from_pretrained(model_name_or_path, **kwargs)
def load_tokenizer(self, tokenizer_class, model_name_or_path, **kwargs):
return tokenizer_class.from_pretrained(model_name_or_path, **kwargs)
def get_model_file(self, filename):
if os.path.isdir(self._model_name_or_path):
path = os.path.join(self._model_name_or_path, filename)
else:
try:
path = huggingface_hub.hf_hub_download(
repo_id=self._model_name_or_path, filename=filename
)
except huggingface_hub.utils.EntryNotFoundError:
path = None
if path is None or not os.path.isfile(path):
raise ValueError(
"File %s does not exist in model %s"
% (filename, self._model_name_or_path)
)
return path
class ModelLoader(abc.ABC):
"""Base class for loading Transformers models into a CTranslate2 model specification."""
@property
def architecture_name(self):
return None
@abc.abstractmethod
def get_model_spec(self, model):
raise NotImplementedError()
def __call__(self, model, tokenizer):
spec = self.get_model_spec(model)
self.set_config(spec.config, model, tokenizer)
tokens = self.get_vocabulary(model, tokenizer)
self.set_vocabulary(spec, tokens)
return spec
def get_vocabulary(self, model, tokenizer):
return [
token
for token, _ in sorted(
tokenizer.get_vocab().items(), key=lambda item: item[1]
)
]
def set_vocabulary(self, spec, tokens):
pass
def set_config(self, config, model, tokenizer):
pass
def set_layer_norm(self, spec, module):
spec.gamma = module.weight
spec.beta = module.bias
def set_linear(self, spec, module):
spec.weight = module.weight
if isinstance(module, transformers.Conv1D):
spec.weight = spec.weight.transpose(0, 1)
if module.bias is not None:
spec.bias = module.bias
def set_embeddings(self, spec, module):
spec.weight = module.weight
def set_position_encodings(self, spec, module):
spec.encodings = module.weight
offset = getattr(module, "offset", 0)
if offset > 0:
spec.encodings = spec.encodings[offset:]
def smooth_activation(self, spec, activation_scales):
raise NotImplementedError(
"No activation smoothing logic is defined for this model"
)
@register_loader("BartConfig")
class BartLoader(ModelLoader):
@property
def architecture_name(self):
return "BartForConditionalGeneration"
def get_model_spec(self, model):
spec = transformer_spec.TransformerSpec.from_config(
(model.config.encoder_layers, model.config.decoder_layers),
model.config.encoder_attention_heads,
pre_norm=model.config.normalize_before,
activation=_SUPPORTED_ACTIVATIONS[model.config.activation_function],
layernorm_embedding=getattr(model.config, "normalize_embedding", True),
)
self.set_encoder(spec.encoder, model.model.encoder)
self.set_decoder(spec.decoder, model.model.decoder)
self.set_linear(spec.decoder.projection, model.lm_head)
final_logits_bias = getattr(model, "final_logits_bias", None)
if final_logits_bias is not None and final_logits_bias.nonzero().numel() != 0:
spec.decoder.projection.bias = final_logits_bias.squeeze()
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
if model.config.vocab_size < len(tokens):
tokens = tokens[: model.config.vocab_size]
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_source_vocabulary(tokens)
spec.register_target_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
config.decoder_start_token = tokenizer.convert_ids_to_tokens(
model.config.decoder_start_token_id
)
def set_encoder(self, spec, encoder):
self.set_common_layers(spec, encoder)
for layer_spec, layer in zip(spec.layer, encoder.layers):
self.set_attention(
layer_spec.self_attention,
layer.self_attn,
self_attention=True,
)
self.set_layer_norm(
layer_spec.self_attention.layer_norm,
layer.self_attn_layer_norm,
)
self.set_linear(layer_spec.ffn.linear_0, layer.fc1)
self.set_linear(layer_spec.ffn.linear_1, layer.fc2)
self.set_layer_norm(layer_spec.ffn.layer_norm, layer.final_layer_norm)
def set_decoder(self, spec, decoder):
self.set_common_layers(spec, decoder)
for layer_spec, layer in zip(spec.layer, decoder.layers):
self.set_attention(
layer_spec.self_attention,
layer.self_attn,
self_attention=True,
)
self.set_layer_norm(
layer_spec.self_attention.layer_norm,
layer.self_attn_layer_norm,
)
if hasattr(layer, "encoder_attn"):
self.set_attention(
layer_spec.attention,
layer.encoder_attn,
self_attention=False,
)
self.set_layer_norm(
layer_spec.attention.layer_norm,
layer.encoder_attn_layer_norm,
)
self.set_linear(layer_spec.ffn.linear_0, layer.fc1)
self.set_linear(layer_spec.ffn.linear_1, layer.fc2)
self.set_layer_norm(layer_spec.ffn.layer_norm, layer.final_layer_norm)
def set_attention(self, spec, attention, self_attention=False):
split_layers = [common_spec.LinearSpec() for _ in range(3)]
self.set_linear(split_layers[0], attention.q_proj)
self.set_linear(split_layers[1], attention.k_proj)
self.set_linear(split_layers[2], attention.v_proj)
if self_attention:
utils.fuse_linear(spec.linear[0], split_layers)
else:
utils.fuse_linear(spec.linear[0], split_layers[:1])
utils.fuse_linear(spec.linear[1], split_layers[1:])
self.set_linear(spec.linear[-1], attention.out_proj)
def set_common_layers(self, spec, module):
spec.scale_embeddings = module.embed_scale
self.set_position_encodings(spec.position_encodings, module.embed_positions)
self.set_embeddings(
spec.embeddings[0]
if isinstance(spec.embeddings, list)
else spec.embeddings,
module.embed_tokens,
)
if hasattr(module, "layer_norm"):
self.set_layer_norm(spec.layer_norm, module.layer_norm)
if hasattr(module, "layernorm_embedding"):
self.set_layer_norm(spec.layernorm_embedding, module.layernorm_embedding)
@register_loader("MarianConfig")
class MarianMTLoader(BartLoader):
@property
def architecture_name(self):
return "MarianMTModel"
def get_model_spec(self, model):
model.config.normalize_before = False
model.config.normalize_embedding = False
spec = super().get_model_spec(model)
self._remove_pad_weights(spec)
return spec
def set_config(self, config, model, tokenizer):
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
# The decoder start token can be any token because the decoder always starts
# from a zero embedding.
config.decoder_start_token = tokenizer.eos_token
def set_decoder(self, spec, decoder):
spec.start_from_zero_embedding = True
super().set_decoder(spec, decoder)
def get_vocabulary(self, model, tokenizer):
# The <pad> token is added by Transformers to start the decoder from a zero embedding,
# but we already have a dedicated option "start_from_zero_embedding". We remove this token
# to match the original Marian vocabulary and prevent this token from being generated.
tokens = super().get_vocabulary(model, tokenizer)
if tokens[-1] == "<pad>":
tokens.pop()
return tokens
def _remove_pad_weights(self, spec):
vocab_specs = [
spec.encoder.embeddings[0],
spec.decoder.embeddings,
spec.decoder.projection,
]
# Weights may be shared so we check against the expected size to prevent
# updating the same weight multiple times.
new_vocab_size = vocab_specs[0].weight.shape[0] - 1
for vocab_spec in vocab_specs:
if vocab_spec.weight.shape[0] == new_vocab_size + 1:
vocab_spec.weight = vocab_spec.weight[:-1]
if (
isinstance(vocab_spec, common_spec.LinearSpec)
and vocab_spec.has_bias()
and vocab_spec.bias.shape[0] == new_vocab_size + 1
):
vocab_spec.bias = vocab_spec.bias[:-1]
@register_loader("M2M100Config")
class M2M100Loader(BartLoader):
@property
def architecture_name(self):
return "M2M100ForConditionalGeneration"
def get_model_spec(self, model):
model.config.normalize_before = True
model.config.normalize_embedding = False
return super().get_model_spec(model)
def set_position_encodings(self, spec, module):
spec.encodings = module.weights[module.offset :]
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
# Workaround for issue https://github.com/OpenNMT/CTranslate2/issues/1039.
if tokens[-1] == tokenizer.unk_token:
tokens.insert(tokenizer.unk_token_id, tokens.pop())
for token in tokenizer.additional_special_tokens:
if token not in tokens:
tokens.append(token)
num_madeup_words = getattr(
tokenizer, "num_madeup_words", model.config.vocab_size - len(tokens)
)
if num_madeup_words > 0:
tokens += ["madeupword%d" % i for i in range(num_madeup_words)]
return tokens
@register_loader("MBartConfig")
class MBartLoader(BartLoader):
@property
def architecture_name(self):
return "MBartForConditionalGeneration"
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
# MBart-25 passes the language code as the decoder start token.
if model.config.tokenizer_class in ("MBartTokenizer", None):
config.decoder_start_token = None
else:
config.decoder_start_token = tokenizer.eos_token
@register_loader("PegasusConfig")
class PegasusLoader(BartLoader):
@property
def architecture_name(self):
return "PegasusForConditionalGeneration"
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.pad_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
config.decoder_start_token = tokenizer.pad_token
@register_loader("OPTConfig")
class OPTLoader(BartLoader):
@property
def architecture_name(self):
return "OPTForCausalLM"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.num_hidden_layers,
model.config.num_attention_heads,
pre_norm=model.config.do_layer_norm_before,
activation=_SUPPORTED_ACTIVATIONS[model.config.activation_function],
project_in_out=model.config.word_embed_proj_dim != model.config.hidden_size,
)
self.set_decoder(spec.decoder, model.model.decoder)
self.set_linear(spec.decoder.projection, model.lm_head)
return spec
def smooth_activation(self, spec, activation_scales):
for i, layer in enumerate(spec.decoder.layer):
layer_scope = "model.decoder.layers.%d" % i
utils.smooth_activation(
layer.self_attention.layer_norm,
layer.self_attention.linear[0],
activation_scales["%s.self_attn.q_proj" % layer_scope],
)
utils.smooth_activation(
layer.ffn.layer_norm,
layer.ffn.linear_0,
activation_scales["%s.fc1" % layer_scope],
)
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_decoder(self, spec, decoder):
super().set_decoder(spec, decoder)
if decoder.project_in is not None:
self.set_linear(spec.project_in, decoder.project_in)
if decoder.project_out is not None:
self.set_linear(spec.project_out, decoder.project_out)
if decoder.final_layer_norm is not None:
self.set_layer_norm(spec.layer_norm, decoder.final_layer_norm)
def set_common_layers(self, spec, module):
spec.scale_embeddings = False
self.set_position_encodings(spec.position_encodings, module.embed_positions)
self.set_embeddings(spec.embeddings, module.embed_tokens)
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
i = 0
while len(tokens) % 8 != 0:
symbol = "madeupword{:04d}".format(i)
if symbol not in tokens:
tokens.append(symbol)
i += 1
return tokens
@register_loader("GPTBigCodeConfig")
class GPTBigCodeMHALoader(ModelLoader):
@property
def architecture_name(self):
return "GPTBigCodeForCausalLM"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.n_layer,
model.config.n_head,
pre_norm=True,
activation=_SUPPORTED_ACTIVATIONS[model.config.activation_function],
multi_query_attention=True,
)
self.set_decoder(spec.decoder, model.transformer)
self.set_linear(spec.decoder.projection, model.lm_head)
return spec
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
tokens.append("<extra_id_%d>" % i)
return tokens
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_decoder(self, spec, module):
spec.scale_embeddings = False
self.set_embeddings(spec.embeddings, module.wte)
self.set_position_encodings(spec.position_encodings, module.wpe)
self.set_layer_norm(spec.layer_norm, module.ln_f)
for layer_spec, layer in zip(spec.layer, module.h):
self.set_layer_norm(layer_spec.self_attention.layer_norm, layer.ln_1)
self.set_linear(layer_spec.self_attention.linear[0], layer.attn.c_attn)
self.set_linear(layer_spec.self_attention.linear[1], layer.attn.c_proj)
self.set_layer_norm(layer_spec.ffn.layer_norm, layer.ln_2)
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.c_fc)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.c_proj)
@register_loader("GPT2Config")
class GPT2Loader(ModelLoader):
@property
def architecture_name(self):
return "GPT2LMHeadModel"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.n_layer,
model.config.n_head,
pre_norm=True,
activation=_SUPPORTED_ACTIVATIONS[model.config.activation_function],
)
self.set_decoder(spec.decoder, model.transformer)
self.set_linear(spec.decoder.projection, model.lm_head)
return spec
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_decoder(self, spec, module):
spec.scale_embeddings = False
self.set_embeddings(spec.embeddings, module.wte)
self.set_position_encodings(spec.position_encodings, module.wpe)
self.set_layer_norm(spec.layer_norm, module.ln_f)
for layer_spec, layer in zip(spec.layer, module.h):
self.set_layer_norm(layer_spec.self_attention.layer_norm, layer.ln_1)
self.set_linear(layer_spec.self_attention.linear[0], layer.attn.c_attn)
self.set_linear(layer_spec.self_attention.linear[1], layer.attn.c_proj)
self.set_layer_norm(layer_spec.ffn.layer_norm, layer.ln_2)
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.c_fc)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.c_proj)
@register_loader("GPTJConfig")
class GPTJLoader(ModelLoader):
@property
def architecture_name(self):
return "GPTJForCausalLM"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.n_layer,
model.config.n_head,
pre_norm=True,
activation=_SUPPORTED_ACTIVATIONS[model.config.activation_function],
rotary_dim=model.config.rotary_dim,
rotary_interleave=False,
parallel_residual=True,
shared_layer_norm=True,
)
self.set_decoder(
spec.decoder,
model.transformer,
model.config.rotary_dim,
model.config.n_head,
)
self.set_linear(spec.decoder.projection, model.lm_head)
return spec
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_decoder(self, spec, module, rotary_dim, num_heads):
spec.scale_embeddings = False
self.set_embeddings(spec.embeddings, module.wte)
self.set_layer_norm(spec.layer_norm, module.ln_f)
for layer_spec, layer in zip(spec.layer, module.h):
self.set_layer_norm(layer_spec.shared_layer_norm, layer.ln_1)
qw = layer.attn.q_proj.weight
kw = layer.attn.k_proj.weight
vw = layer.attn.v_proj.weight
qw = utils.permute_for_sliced_rotary(qw, num_heads, rotary_dim)
kw = utils.permute_for_sliced_rotary(kw, num_heads, rotary_dim)
layer_spec.self_attention.linear[0].weight = torch.cat((qw, kw, vw))
self.set_linear(layer_spec.self_attention.linear[1], layer.attn.out_proj)
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.fc_in)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.fc_out)
@register_loader("CodeGenConfig")
class CodeGenLoader(ModelLoader):
@property
def architecture_name(self):
return "CodeGenForCausalLM"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.n_layer,
model.config.n_head,
pre_norm=True,
activation=_SUPPORTED_ACTIVATIONS[model.config.activation_function],
rotary_dim=model.config.rotary_dim,
rotary_interleave=False,
parallel_residual=True,
shared_layer_norm=True,
)
mp_num = 4
if hasattr(model.config, "head_dim") and model.config.head_dim in [128, 256]:
# models forked from "Salesforce/codegen2-1B" and "Salesforce/codegen2-3_7B"
# use a special setting of mp_num=8, all other using 4
# these model.config's use a special setting of head_dim
mp_num = 8
self.set_decoder(
spec.decoder,
model.transformer,
model.config.rotary_dim,
model.config.n_head,
model.config.n_embd,
mp_num=mp_num,
)
self.set_linear(spec.decoder.projection, model.lm_head)
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
# fix for additional vocab, see GPTNeoX Converter
tokens.append("<extra_id_%d>" % i)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_decoder(self, spec, module, rotary_dim, num_heads, embed_dim, mp_num):
spec.scale_embeddings = False
self.set_embeddings(spec.embeddings, module.wte)
self.set_layer_norm(spec.layer_norm, module.ln_f)
base_permutation = np.arange(0, mp_num * 3).reshape(-1, 3).T.flatten().tolist()
local_dim = embed_dim // mp_num
permutation = torch.cat(
[torch.arange(i * local_dim, (i + 1) * local_dim) for i in base_permutation]
)
for layer_spec, layer in zip(spec.layer, module.h):
self.set_layer_norm(layer_spec.shared_layer_norm, layer.ln_1)
# [start convert CodeGen to GPT-J format]
# see https://github.com/fauxpilot/fauxpilot/blob/fb4073a9078dd001ebeb7dfefb8cb2ecc8a88f4b/converter/codegen_gptj_convert.py # noqa
qkv_proj = layer.attn.qkv_proj.weight
# GPT-J and CodeGen slice up the qkv projection slightly differently.
# the following permutation brings Codegen 'qkv_proj'
# in GPT-J order of qw, vw, kw
# we permute the *rows* here because the computation is xA.T
new_qkv_proj = qkv_proj[permutation, :]
# the name QKV is misleading here; they are actually stored in QVK
qw, vw, kw = new_qkv_proj.chunk(3, dim=0)
# [end convert CodeGen to GPT-J.]
qw = utils.permute_for_sliced_rotary(qw, num_heads, rotary_dim)
kw = utils.permute_for_sliced_rotary(kw, num_heads, rotary_dim)
layer_spec.self_attention.linear[0].weight = torch.cat((qw, kw, vw))
self.set_linear(layer_spec.self_attention.linear[1], layer.attn.out_proj)
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.fc_in)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.fc_out)
@register_loader("GPTNeoXConfig")
class GPTNeoXLoader(ModelLoader):
@property
def architecture_name(self):
return "GPTNeoXForCausalLM"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.num_hidden_layers,
model.config.num_attention_heads,
pre_norm=True,
activation=_SUPPORTED_ACTIVATIONS[model.config.hidden_act],
rotary_dim=int(
model.config.rotary_pct
* (model.config.hidden_size // model.config.num_attention_heads)
),
rotary_interleave=False,
parallel_residual=model.config.use_parallel_residual,
shared_layer_norm=False,
)
self.set_decoder(spec.decoder, model.gpt_neox, model.config.num_attention_heads)
self.set_linear(spec.decoder.projection, model.embed_out)
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
tokens.append("<extra_id_%d>" % i)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_decoder(self, spec, module, num_heads):
spec.scale_embeddings = False
self.set_embeddings(spec.embeddings, module.embed_in)
self.set_layer_norm(spec.layer_norm, module.final_layer_norm)
for layer_spec, layer in zip(spec.layer, module.layers):
if hasattr(layer_spec, "input_layer_norm"): # Use parallel residual.
self.set_layer_norm(layer_spec.input_layer_norm, layer.input_layernorm)
self.set_layer_norm(
layer_spec.post_attention_layer_norm, layer.post_attention_layernorm
)
else:
self.set_layer_norm(
layer_spec.self_attention.layer_norm, layer.input_layernorm
)
self.set_layer_norm(
layer_spec.ffn.layer_norm, layer.post_attention_layernorm
)
qkv_w = layer.attention.query_key_value.weight
qkv_b = layer.attention.query_key_value.bias
qkv_w = (
qkv_w.reshape(num_heads, 3, -1, qkv_w.shape[-1])
.swapaxes(0, 1)
.reshape(-1, qkv_w.shape[-1])
)
qkv_b = qkv_b.reshape(num_heads, 3, -1).swapaxes(0, 1).reshape(-1)
layer_spec.self_attention.linear[0].weight = qkv_w
layer_spec.self_attention.linear[0].bias = qkv_b
self.set_linear(layer_spec.self_attention.linear[1], layer.attention.dense)
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.dense_h_to_4h)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.dense_4h_to_h)
@register_loader("WhisperConfig")
class WhisperLoader(BartLoader):
@property
def architecture_name(self):
return "WhisperForConditionalGeneration"
def get_model_spec(self, model):
spec = whisper_spec.WhisperSpec(
model.config.encoder_layers,
model.config.encoder_attention_heads,
)
self.set_encoder(spec.encoder, model.model.encoder)
self.set_decoder(spec.decoder, model.model.decoder)
self.set_linear(spec.decoder.projection, model.proj_out)
return spec
def set_config(self, config, model, tokenizer):
config.suppress_ids = model.config.suppress_tokens
config.suppress_ids_begin = model.config.begin_suppress_tokens
config.lang_ids = tokenizer.additional_special_tokens_ids[2:-6]
config.alignment_heads = _WHISPER_ALIGNMENT_HEADS.get(model.name_or_path)
if config.alignment_heads is None:
# Use the last half layers for alignment by default.
num_layers = model.config.decoder_layers
num_heads = model.config.decoder_attention_heads
config.alignment_heads = list(
itertools.product(
range(num_layers // 2, num_layers),
range(num_heads),
)
)
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
# Add timestamp tokens.
tokens.extend(
"<|%.2f|>" % (i * 0.02)
for i in range(model.config.vocab_size - len(tokens))
)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_encoder(self, spec, encoder):
self.set_conv1d(spec.conv1, encoder.conv1)
self.set_conv1d(spec.conv2, encoder.conv2)
super().set_encoder(spec, encoder)
def set_decoder(self, spec, decoder):
self.set_embeddings(spec.embeddings, decoder.embed_tokens)
super().set_decoder(spec, decoder)
def set_common_layers(self, spec, module):
self.set_position_encodings(spec.position_encodings, module.embed_positions)
self.set_layer_norm(spec.layer_norm, module.layer_norm)
def set_conv1d(self, spec, module):
spec.weight = module.weight
spec.bias = module.bias
@register_loader("T5Config")
class T5Loader(ModelLoader):
@property
def architecture_name(self):
return "T5ForConditionalGeneration"
def get_model_spec(self, model):
spec = transformer_spec.TransformerSpec.from_config(
(model.config.num_layers, model.config.num_decoder_layers),
model.config.num_heads,
pre_norm=True,
activation=_SUPPORTED_ACTIVATIONS[model.config.dense_act_fn],
ffn_glu=model.config.is_gated_act,
relative_attention_bias=True,
rms_norm=True,
)
self.set_stack(spec.encoder, model.encoder)
self.set_stack(spec.decoder, model.decoder, is_decoder=True)
self.set_linear(spec.decoder.projection, model.lm_head)
if model.config.tie_word_embeddings:
spec.decoder.scale_outputs = model.config.d_model**-0.5
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
tokens.append("<extra_id_%d>" % i)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_source_vocabulary(tokens)
spec.register_target_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.pad_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
config.decoder_start_token = tokenizer.pad_token
def set_stack(self, spec, module, is_decoder=False):
self.set_layer_norm(spec.layer_norm, module.final_layer_norm)
self.set_embeddings(
spec.embeddings[0]
if isinstance(spec.embeddings, list)
else spec.embeddings,
module.embed_tokens,
)
spec.scale_embeddings = False
for i, (layer_spec, block) in enumerate(zip(spec.layer, module.block)):
self.set_self_attention(layer_spec.self_attention, block.layer[0])
if i > 0:
# Reuse relative attention bias from the first layer.
first_self_attention = spec.layer[0].self_attention
layer_spec.self_attention.relative_attention_bias = (
first_self_attention.relative_attention_bias
)
layer_spec.self_attention.relative_attention_max_distance = (
first_self_attention.relative_attention_max_distance
)
if is_decoder:
self.set_cross_attention(layer_spec.attention, block.layer[1])
self.set_ffn(layer_spec.ffn, block.layer[-1])
def set_ffn(self, spec, module):
if hasattr(spec, "linear_0_noact"):
self.set_linear(spec.linear_0, module.DenseReluDense.wi_0)
self.set_linear(spec.linear_0_noact, module.DenseReluDense.wi_1)
else:
self.set_linear(spec.linear_0, module.DenseReluDense.wi)
self.set_linear(spec.linear_1, module.DenseReluDense.wo)
self.set_layer_norm(spec.layer_norm, module.layer_norm)
def set_self_attention(self, spec, module):
self.set_attention(spec, module.SelfAttention, self_attention=True)
self.set_layer_norm(spec.layer_norm, module.layer_norm)
def set_cross_attention(self, spec, module):
self.set_attention(spec, module.EncDecAttention)
self.set_layer_norm(spec.layer_norm, module.layer_norm)
def set_attention(self, spec, attention, self_attention=False):
spec.queries_scale = 1.0
split_layers = [common_spec.LinearSpec() for _ in range(3)]
self.set_linear(split_layers[0], attention.q)
self.set_linear(split_layers[1], attention.k)
self.set_linear(split_layers[2], attention.v)
if self_attention:
utils.fuse_linear(spec.linear[0], split_layers)
else:
utils.fuse_linear(spec.linear[0], split_layers[:1])
utils.fuse_linear(spec.linear[1], split_layers[1:])
self.set_linear(spec.linear[-1], attention.o)
if attention.has_relative_attention_bias:
spec.relative_attention_bias = attention.relative_attention_bias.weight
spec.relative_attention_max_distance = np.dtype("int32").type(
attention.relative_attention_max_distance
)
def set_layer_norm(self, spec, layer_norm):
spec.gamma = layer_norm.weight
@register_loader("MT5Config")
class MT5Loader(T5Loader):
@property
def architecture_name(self):
return "MT5ForConditionalGeneration"
@register_loader("BloomConfig")
class BloomLoader(ModelLoader):
@property
def architecture_name(self):
return "BloomForCausalLM"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.n_layer,
model.config.n_head,
pre_norm=True,
activation=common_spec.Activation.GELUTanh,
layernorm_embedding=True,
alibi=True,
alibi_use_positive_positions=True,
)
self.set_decoder(spec.decoder, model.transformer)
self.set_linear(spec.decoder.projection, model.lm_head)
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
tokens.append("<extra_id_%d>" % i)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_decoder(self, spec, module):
spec.scale_embeddings = False
self.set_embeddings(spec.embeddings, module.word_embeddings)
self.set_layer_norm(spec.layernorm_embedding, module.word_embeddings_layernorm)
self.set_layer_norm(spec.layer_norm, module.ln_f)
for layer_spec, layer in zip(spec.layer, module.h):
self.set_layer_norm(
layer_spec.self_attention.layer_norm, layer.input_layernorm
)
self.set_qkv_linear(
layer_spec.self_attention.linear[0],
layer.self_attention.query_key_value,
layer.self_attention.num_heads,
)
self.set_linear(
layer_spec.self_attention.linear[1], layer.self_attention.dense
)
self.set_layer_norm(
layer_spec.ffn.layer_norm, layer.post_attention_layernorm
)
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.dense_h_to_4h)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.dense_4h_to_h)
def set_qkv_linear(self, spec, module, num_heads):
weight = module.weight
weight = weight.reshape(num_heads, 3, -1, weight.shape[-1])
weight = weight.transpose(0, 1)
weight = weight.reshape(-1, weight.shape[-1])
bias = module.bias
bias = bias.reshape(num_heads, 3, -1)
bias = bias.transpose(0, 1)
bias = bias.reshape(-1)
spec.weight = weight
spec.bias = bias
@register_loader("MPTConfig")
class MPTLoader(ModelLoader):
@property
def architecture_name(self):
return "AutoModelForCausalLM"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.n_layers,
model.config.n_heads,
pre_norm=True,
activation=common_spec.Activation.GELU,
alibi=True,
)
self.set_decoder(spec.decoder, model.transformer)
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
tokens.append("<extra_id_%d>" % i)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_decoder(self, spec, module):
self.set_embeddings(spec.embeddings, module.wte)
self.set_layer_norm(spec.layer_norm, module.norm_f)
spec.scale_embeddings = False
spec.projection.weight = spec.embeddings.weight
for layer_spec, layer in zip(spec.layer, module.blocks):
self.set_layer_norm(layer_spec.self_attention.layer_norm, layer.norm_1)
self.set_linear(layer_spec.self_attention.linear[0], layer.attn.Wqkv)
self.set_linear(layer_spec.self_attention.linear[1], layer.attn.out_proj)
self.set_layer_norm(layer_spec.ffn.layer_norm, layer.norm_2)
self.set_linear(layer_spec.ffn.linear_0, layer.ffn.up_proj)
self.set_linear(layer_spec.ffn.linear_1, layer.ffn.down_proj)
def set_layer_norm(self, spec, module):
spec.gamma = module.weight
spec.beta = torch.zeros_like(spec.gamma)
@register_loader("LlamaConfig")
class LlamaLoader(ModelLoader):
@property
def architecture_name(self):
return "LlamaForCausalLM"
def get_model_spec(self, model):
num_layers = model.config.num_hidden_layers
num_heads = model.config.num_attention_heads
num_heads_kv = getattr(model.config, "num_key_value_heads", num_heads)
if num_heads_kv == num_heads:
num_heads_kv = None
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
num_layers,
num_heads,
activation=common_spec.Activation.SWISH,
pre_norm=True,
ffn_glu=True,
rms_norm=True,
rotary_dim=0,
rotary_interleave=False,
num_heads_kv=num_heads_kv,
)
self.set_decoder(spec.decoder, model.model)
self.set_linear(spec.decoder.projection, model.lm_head)
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
tokens.append("<extra_id_%d>" % i)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
config.layer_norm_epsilon = model.config.rms_norm_eps
def set_layer_norm(self, spec, layer_norm):
spec.gamma = layer_norm.weight
def set_decoder(self, spec, module):
spec.scale_embeddings = False
self.set_embeddings(spec.embeddings, module.embed_tokens)
self.set_layer_norm(spec.layer_norm, module.norm)
for layer_spec, layer in zip(spec.layer, module.layers):
self.set_layer_norm(
layer_spec.self_attention.layer_norm, layer.input_layernorm
)
self.set_layer_norm(
layer_spec.ffn.layer_norm, layer.post_attention_layernorm
)
wq = layer.self_attn.q_proj.weight
wk = layer.self_attn.k_proj.weight
wv = layer.self_attn.v_proj.weight
wo = layer.self_attn.o_proj.weight
layer_spec.self_attention.linear[0].weight = torch.cat([wq, wk, wv])
layer_spec.self_attention.linear[1].weight = wo
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.gate_proj)
self.set_linear(layer_spec.ffn.linear_0_noact, layer.mlp.up_proj)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.down_proj)
delattr(layer, "self_attn")
delattr(layer, "mlp")
gc.collect()
@register_loader("RWConfig")
class RWLoader(ModelLoader):
@property
def architecture_name(self):
return "AutoModelForCausalLM"
def get_model_spec(self, model):
if getattr(model.config, "multi_query", False):
num_heads_kv = 1
else:
num_heads_kv = getattr(model.config, "n_head_kv", None)
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.n_layer,
model.config.n_head,
pre_norm=True,
activation=common_spec.Activation.GELU,
alibi=model.config.alibi,
alibi_use_positive_positions=True,
rotary_dim=0,
rotary_interleave=False,
parallel_residual=model.config.parallel_attn,
shared_layer_norm=num_heads_kv == 1,
num_heads_kv=num_heads_kv,
)
self.set_decoder(spec.decoder, model.transformer)
self.set_linear(spec.decoder.projection, model.lm_head)
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
tokens.append("<extra_id_%d>" % i)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.eos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.eos_token
def set_decoder(self, spec, module):
spec.scale_embeddings = False
self.set_embeddings(spec.embeddings, module.word_embeddings)
self.set_layer_norm(spec.layer_norm, module.ln_f)
for layer_spec, layer in zip(spec.layer, module.h):
if hasattr(layer, "ln_attn"):
self.set_layer_norm(layer_spec.input_layer_norm, layer.ln_attn)
self.set_layer_norm(layer_spec.post_attention_layer_norm, layer.ln_mlp)
elif hasattr(layer_spec, "shared_layer_norm"):
self.set_layer_norm(layer_spec.shared_layer_norm, layer.input_layernorm)
else:
self.set_layer_norm(
layer_spec.self_attention.layer_norm, layer.input_layernorm
)
self.set_layer_norm(
layer_spec.ffn.layer_norm, layer.post_attention_layernorm
)
if layer.self_attention.num_kv == 1:
self.set_linear(
layer_spec.self_attention.linear[0],
layer.self_attention.query_key_value,
)
else:
self.set_qkv_linear(
layer_spec.self_attention.linear[0],
layer.self_attention.query_key_value,
layer.self_attention.num_heads,
layer.self_attention.num_kv
if layer.self_attention.num_kv < layer.self_attention.num_heads
else None,
)
self.set_linear(
layer_spec.self_attention.linear[1], layer.self_attention.dense
)
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.dense_h_to_4h)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.dense_4h_to_h)
def set_qkv_linear(self, spec, module, num_heads, num_kv=None):
weight = module.weight
if num_kv is None:
weight = weight.reshape(num_heads, 3, -1, weight.shape[-1])
weight = weight.transpose(0, 1)
weight = weight.reshape(-1, weight.shape[-1])
else:
head_dim = weight.shape[0] // (num_heads + num_kv * 2)
weight = weight.reshape(
-1, num_heads // num_kv + 2, head_dim, weight.shape[-1]
)
q, k, v = weight.split([num_heads // num_kv, 1, 1], dim=1)
weight = torch.cat(
[
q.reshape(num_heads * head_dim, -1),
k.reshape(num_kv * head_dim, -1),
v.reshape(num_kv * head_dim, -1),
]
)
spec.weight = weight
if module.bias is not None:
bias = module.bias
if num_kv is None:
bias = bias.reshape(num_heads, 3, -1)
bias = bias.transpose(0, 1)
bias = bias.reshape(-1)
else:
bias = bias.reshape(-1, num_heads // num_kv + 2, head_dim)
q, k, v = bias.split([num_heads // num_kv, 1, 1], dim=1)
bias = torch.cat(
[
q.reshape(num_heads * head_dim),
k.reshape(num_kv * head_dim),
v.reshape(num_kv * head_dim),
]
)
spec.bias = bias
@register_loader("DistilBertConfig")
class DistilBertLoader(ModelLoader):
@property
def architecture_name(self):
return "DistilBertModel"
def get_model_spec(self, model):
encoder_spec = transformer_spec.TransformerEncoderSpec(
model.config.n_layers,
model.config.n_heads,
pre_norm=False,
activation=_SUPPORTED_ACTIVATIONS[model.config.activation],
layernorm_embedding=True,
)
spec = transformer_spec.TransformerEncoderModelSpec(
encoder_spec,
)
spec.encoder.scale_embeddings = False
self.set_embeddings(
spec.encoder.embeddings[0], model.embeddings.word_embeddings
)
self.set_position_encodings(
spec.encoder.position_encodings, model.embeddings.position_embeddings
)
self.set_layer_norm(
spec.encoder.layernorm_embedding, model.embeddings.LayerNorm
)
for layer_spec, layer in zip(spec.encoder.layer, model.transformer.layer):
split_layers = [common_spec.LinearSpec() for _ in range(3)]
self.set_linear(split_layers[0], layer.attention.q_lin)
self.set_linear(split_layers[1], layer.attention.k_lin)
self.set_linear(split_layers[2], layer.attention.v_lin)
utils.fuse_linear(layer_spec.self_attention.linear[0], split_layers)
self.set_linear(
layer_spec.self_attention.linear[1], layer.attention.out_lin
)
self.set_layer_norm(
layer_spec.self_attention.layer_norm, layer.sa_layer_norm
)
self.set_linear(layer_spec.ffn.linear_0, layer.ffn.lin1)
self.set_linear(layer_spec.ffn.linear_1, layer.ffn.lin2)
self.set_layer_norm(layer_spec.ffn.layer_norm, layer.output_layer_norm)
return spec
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.unk_token = tokenizer.unk_token
config.layer_norm_epsilon = 1e-12
@register_loader("BertConfig")
class BertLoader(ModelLoader):
@property
def architecture_name(self):
return "BertModel"
def get_model_spec(self, model):
assert model.config.position_embedding_type == "absolute"
encoder_spec = transformer_spec.TransformerEncoderSpec(
model.config.num_hidden_layers,
model.config.num_attention_heads,
pre_norm=False,
activation=_SUPPORTED_ACTIVATIONS[model.config.hidden_act],
layernorm_embedding=True,
num_source_embeddings=2,
embeddings_merge=common_spec.EmbeddingsMerge.ADD,
)
spec = transformer_spec.TransformerEncoderModelSpec(
encoder_spec,
pooling_layer=True,
pooling_activation=common_spec.Activation.Tanh,
)
spec.encoder.scale_embeddings = False
self.set_embeddings(
spec.encoder.embeddings[0], model.embeddings.word_embeddings
)
self.set_embeddings(
spec.encoder.embeddings[1], model.embeddings.token_type_embeddings
)
self.set_position_encodings(
spec.encoder.position_encodings, model.embeddings.position_embeddings
)
self.set_layer_norm(
spec.encoder.layernorm_embedding, model.embeddings.LayerNorm
)
self.set_linear(spec.pooler_dense, model.pooler.dense)
for layer_spec, layer in zip(spec.encoder.layer, model.encoder.layer):
split_layers = [common_spec.LinearSpec() for _ in range(3)]
self.set_linear(split_layers[0], layer.attention.self.query)
self.set_linear(split_layers[1], layer.attention.self.key)
self.set_linear(split_layers[2], layer.attention.self.value)
utils.fuse_linear(layer_spec.self_attention.linear[0], split_layers)
self.set_linear(
layer_spec.self_attention.linear[1], layer.attention.output.dense
)
self.set_layer_norm(
layer_spec.self_attention.layer_norm, layer.attention.output.LayerNorm
)
self.set_linear(layer_spec.ffn.linear_0, layer.intermediate.dense)
self.set_linear(layer_spec.ffn.linear_1, layer.output.dense)
self.set_layer_norm(layer_spec.ffn.layer_norm, layer.output.LayerNorm)
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
tokens.append("<extra_id_%d>" % i)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.unk_token = tokenizer.unk_token
config.layer_norm_epsilon = model.config.layer_norm_eps
@register_loader("XLMRobertaConfig")
class XLMRobertaLoader(ModelLoader):
@property
def architecture_name(self):
return "XLMRobertaForSequenceClassification"
def get_model_spec(self, model):
assert model.config.position_embedding_type == "absolute"
encoder_spec = transformer_spec.TransformerEncoderSpec(
model.config.num_hidden_layers,
model.config.num_attention_heads,
pre_norm=False,
activation=_SUPPORTED_ACTIVATIONS[model.config.hidden_act],
layernorm_embedding=True,
num_source_embeddings=2,
embeddings_merge=common_spec.EmbeddingsMerge.ADD,
)
if model.roberta.pooler is None:
pooling_layer = False
else:
pooling_layer = True
spec = transformer_spec.TransformerEncoderModelSpec(
encoder_spec,
pooling_layer=pooling_layer,
pooling_activation=common_spec.Activation.Tanh,
)
spec.encoder.scale_embeddings = False
self.set_embeddings(
spec.encoder.embeddings[0], model.roberta.embeddings.word_embeddings
)
self.set_embeddings(
spec.encoder.embeddings[1], model.roberta.embeddings.token_type_embeddings
)
self.set_position_encodings(
spec.encoder.position_encodings,
model.roberta.embeddings.position_embeddings,
)
self.set_layer_norm(
spec.encoder.layernorm_embedding, model.roberta.embeddings.LayerNorm
)
if pooling_layer:
self.set_linear(spec.pooler_dense, model.roberta.pooler.dense)
for layer_spec, layer in zip(spec.encoder.layer, model.roberta.encoder.layer):
split_layers = [common_spec.LinearSpec() for _ in range(3)]
self.set_linear(split_layers[0], layer.attention.self.query)
self.set_linear(split_layers[1], layer.attention.self.key)
self.set_linear(split_layers[2], layer.attention.self.value)
utils.fuse_linear(layer_spec.self_attention.linear[0], split_layers)
self.set_linear(
layer_spec.self_attention.linear[1], layer.attention.output.dense
)
self.set_layer_norm(
layer_spec.self_attention.layer_norm, layer.attention.output.LayerNorm
)
self.set_linear(layer_spec.ffn.linear_0, layer.intermediate.dense)
self.set_linear(layer_spec.ffn.linear_1, layer.output.dense)
self.set_layer_norm(layer_spec.ffn.layer_norm, layer.output.LayerNorm)
return spec
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.unk_token = tokenizer.unk_token
config.layer_norm_epsilon = model.config.layer_norm_eps
def set_position_encodings(self, spec, module):
spec.encodings = module.weight
offset = getattr(module, "padding_idx", 0)
if offset > 0:
spec.encodings = spec.encodings[offset + 1 :]
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--model",
required=True,
help=(
"Name of the pretrained model to download, "
"or path to a directory containing the pretrained model."
),
)
parser.add_argument(
"--activation_scales",
help=(
"Path to the pre-computed activation scales. Models may "
"use them to rescale some weights to smooth the intermediate activations "
"and improve the quantization accuracy. See "
"https://github.com/mit-han-lab/smoothquant."
),
)
parser.add_argument(
"--copy_files",
nargs="+",
help=(
"List of filenames to copy from the Hugging Face model to the converted "
"model directory."
),
)
parser.add_argument(
"--revision",
help="Revision of the model to download from the Hugging Face Hub.",
)
parser.add_argument(
"--low_cpu_mem_usage",
action="store_true",
help="Enable the flag low_cpu_mem_usage when loading the model with from_pretrained.",
)
parser.add_argument(
"--trust_remote_code",
action="store_true",
help="Allow converting models using custom code.",
)
Converter.declare_arguments(parser)
args = parser.parse_args()
converter = TransformersConverter(
args.model,
activation_scales=args.activation_scales,
copy_files=args.copy_files,
load_as_float16=args.quantization in ("float16", "int8_float16"),
revision=args.revision,
low_cpu_mem_usage=args.low_cpu_mem_usage,
trust_remote_code=args.trust_remote_code,
)
converter.convert_from_args(args)
if __name__ == "__main__":
main()
# Cross-attention heads that are highly correlated to the word-level timing,
# i.e. the alignment between audio and text tokens.
# Obtained from https://github.com/openai/whisper/blob/v20230306/whisper/__init__.py#L31-L45
_WHISPER_ALIGNMENT_HEADS = {
"openai/whisper-tiny.en": [
(1, 0),
(2, 0),
(2, 5),
(3, 0),
(3, 1),
(3, 2),
(3, 3),
(3, 4),
],
"openai/whisper-tiny": [(2, 2), (3, 0), (3, 2), (3, 3), (3, 4), (3, 5)],
"openai/whisper-base.en": [(3, 3), (4, 7), (5, 1), (5, 5), (5, 7)],
"openai/whisper-base": [
(3, 1),
(4, 2),
(4, 3),
(4, 7),
(5, 1),
(5, 2),
(5, 4),
(5, 6),
],
"openai/whisper-small.en": [
(6, 6),
(7, 0),
(7, 3),
(7, 8),
(8, 2),
(8, 5),
(8, 7),
(9, 0),
(9, 4),
(9, 8),
(9, 10),
(10, 0),
(10, 1),
(10, 2),
(10, 3),
(10, 6),
(10, 11),
(11, 2),
(11, 4),
],
"openai/whisper-small": [
(5, 3),
(5, 9),
(8, 0),
(8, 4),
(8, 7),
(8, 8),
(9, 0),
(9, 7),
(9, 9),
(10, 5),
],
"openai/whisper-medium.en": [
(11, 4),
(14, 1),
(14, 12),
(14, 14),
(15, 4),
(16, 0),
(16, 4),
(16, 9),
(17, 12),
(17, 14),
(18, 7),
(18, 10),
(18, 15),
(20, 0),
(20, 3),
(20, 9),
(20, 14),
(21, 12),
],
"openai/whisper-medium": [(13, 15), (15, 4), (15, 15), (16, 1), (20, 0), (23, 4)],
"openai/whisper-large": [
(9, 19),
(11, 2),
(11, 4),
(11, 17),
(22, 7),
(22, 11),
(22, 17),
(23, 2),
(23, 15),
],
"openai/whisper-large-v2": [
(10, 12),
(13, 17),
(16, 11),
(16, 12),
(16, 13),
(17, 15),
(17, 16),
(18, 4),
(18, 11),
(18, 19),
(19, 11),
(21, 2),
(21, 3),
(22, 3),
(22, 9),
(22, 12),
(23, 5),
(23, 7),
(23, 13),
(25, 5),
(26, 1),
(26, 12),
(27, 15),
],
}
|
b66730e82652e2af97baa2347c7277b1096bedcb
|
8ed15d43652dbcab332c78923da416b91b139323
|
/python/fedml/core/mlops/mlops_utils.py
|
af2e2833dd403754385b2dcf0a56f4ade9e2022e
|
[
"Apache-2.0"
] |
permissive
|
FedML-AI/FedML
|
74d144038c9de4a0621eb328d00987abac35e2d1
|
b436fbd95cbb62f6c58d2233d7affa0f62cb1817
|
refs/heads/master
| 2023-08-31T22:15:39.786371
| 2023-08-24T03:41:58
| 2023-08-24T03:41:58
| 281,519,510
| 3,197
| 807
|
Apache-2.0
| 2023-09-14T02:14:20
| 2020-07-21T22:41:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,762
|
py
|
mlops_utils.py
|
import os
from os.path import expanduser
import time
class MLOpsUtils:
_ntp_offset = None
@staticmethod
def calc_ntp_from_config(mlops_config):
if mlops_config is None:
return
ntp_response = mlops_config.get("NTP_RESPONSE", None)
if ntp_response is None:
return
# setup ntp time from the configs
device_recv_time = int(time.time() * 1000)
device_send_time = ntp_response.get("deviceSendTime", None)
server_recv_time = ntp_response.get("serverRecvTime", None)
server_send_time = ntp_response.get("serverSendTime", None)
if device_send_time is None or server_recv_time is None or server_send_time is None:
return
# calculate the time offset(int)
ntp_time = (server_recv_time + server_send_time + device_recv_time - device_send_time) // 2
ntp_offset = ntp_time - device_recv_time
# set the time offset
MLOpsUtils.set_ntp_offset(ntp_offset)
@staticmethod
def set_ntp_offset(ntp_offset):
MLOpsUtils._ntp_offset = ntp_offset
@staticmethod
def get_ntp_time():
if MLOpsUtils._ntp_offset is not None:
return int(time.time() * 1000) + MLOpsUtils._ntp_offset
return int(time.time() * 1000)
@staticmethod
def get_ntp_offset():
return MLOpsUtils._ntp_offset
@staticmethod
def write_log_trace(log_trace):
log_trace_dir = os.path.join(expanduser("~"), "fedml_log")
if not os.path.exists(log_trace_dir):
os.makedirs(log_trace_dir, exist_ok=True)
log_file_obj = open(os.path.join(log_trace_dir, "logs.txt"), "a")
log_file_obj.write("{}\n".format(log_trace))
log_file_obj.close()
|
4d304e8de29ef3f60d82bbae57fcf5d346f4a591
|
3c2ee998c99a693b3b04d44f8c5af0fc5fb2c49d
|
/migrations/versions/dc250d726600_.py
|
9cb55b873d675a6664b4ae42be07f464e5ba607b
|
[
"BSD-2-Clause"
] |
permissive
|
hotosm/tasking-manager
|
4520a56b31b35ebfc82a337bc7e676f1f8bc946a
|
45bf3937c74902226096aee5b49e7abea62df524
|
refs/heads/develop
| 2023-09-01T02:43:43.875659
| 2023-08-16T21:26:02
| 2023-08-29T13:15:52
| 80,733,077
| 526
| 316
|
BSD-2-Clause
| 2023-09-14T10:15:55
| 2017-02-02T14:31:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
dc250d726600_.py
|
"""empty message
Revision ID: dc250d726600
Revises: ee5315dcf3e1
Create Date: 2017-05-29 10:14:06.958352
"""
from alembic import op
import sqlalchemy as sa
import geoalchemy2
# revision identifiers, used by Alembic.
revision = "dc250d726600"
down_revision = "ee5315dcf3e1"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"projects",
sa.Column(
"centroid",
geoalchemy2.types.Geometry(geometry_type="POINT", srid=4326),
nullable=True,
),
)
op.add_column(
"projects",
sa.Column(
"geometry",
geoalchemy2.types.Geometry(geometry_type="MULTIPOLYGON", srid=4326),
nullable=True,
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("projects", "geometry")
op.drop_column("projects", "centroid")
# ### end Alembic commands ###
|
829aeca95c7d02c05661e5857eb2367820590c4d
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/get_actual_news_from_rss_ya/webserver/web.py
|
0f1758f93d8e3026add0a355ee00c221b331f34d
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,384
|
py
|
web.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
from collections import defaultdict
from flask import Flask, jsonify, redirect, request
from common import get_news_list, get_news_list_and_mark_as_read, reset_all_is_read
app = Flask(__name__)
@app.route("/")
def index():
# return "Тут ничего нет интересного"
return redirect("/get_news_list?last=15")
@app.route("/get_news_list", defaults={"interest": None})
@app.route("/get_news_list/<interest>")
def get_news_list(interest=None):
"""
Функция возвращает новости.
:param interest:
:return:
"""
last = request.args.get("last")
if last:
last = int(last)
news_list, total = get_news_list(interest, last)
interest_by_news_list = defaultdict(list)
for title, url, interest in news_list:
interest_by_news_list[interest].append({
"title": title,
"url": url,
})
return jsonify({
"items": interest_by_news_list,
"count": len(news_list),
"total": total,
})
@app.route("/get_news_list_and_mark_as_read", defaults={"interest": None})
@app.route("/get_news_list_and_mark_as_read/<interest>")
def get_news_list_and_mark_as_read(interest=None):
"""
Функция возвращает непрочитанные еще новости и помечает их как помеченные.
:param interest:
:return:
"""
count = request.args.get("count")
if count:
count = int(count)
news_list, total = get_news_list_and_mark_as_read(interest, count)
interest_by_news_list = defaultdict(list)
for title, url, interest in news_list:
interest_by_news_list[interest].append({
"title": title,
"url": url,
})
return jsonify({
"items": interest_by_news_list,
"count": len(news_list),
"total": total,
})
@app.route("/reset_all_is_read")
def reset_all_is_read():
"""
Функция сбрасывает у всех новостей флаг is_read -- то, что они прочитаны.
:return:
"""
reset_all_is_read()
return jsonify({"status": "ok"})
if __name__ == "__main__":
app.debug = True
# Localhost
app.run()
# # Public IP
# app.run(host='0.0.0.0')
|
ebcdd92e653ae80e7b79729301a286e0ce36f0be
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/testing/merge_scripts/standard_gtest_merge_test.py
|
14a5b9db80c4c022cfe612104a67c04bd08235e6
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 17,548
|
py
|
standard_gtest_merge_test.py
|
#!/usr/bin/env vpython3
# Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import shutil
import six
import sys
import tempfile
import unittest
import common_merge_script_tests
import six
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
# For 'standard_gtest_merge.py'.
sys.path.insert(
0, os.path.abspath(os.path.join(THIS_DIR, '..', 'resources')))
import mock
import standard_gtest_merge
# gtest json output for successfully finished shard #0.
GOOD_GTEST_JSON_0 = {
'all_tests': [
'AlignedMemoryTest.DynamicAllocation',
'AlignedMemoryTest.ScopedDynamicAllocation',
'AlignedMemoryTest.StackAlignment',
'AlignedMemoryTest.StaticAlignment',
],
'disabled_tests': [
'ConditionVariableTest.TimeoutAcrossSetTimeOfDay',
'FileTest.TouchGetInfo',
'MessageLoopTestTypeDefault.EnsureDeletion',
],
'global_tags': ['CPU_64_BITS', 'MODE_DEBUG', 'OS_LINUX', 'OS_POSIX'],
'per_iteration_data': [{
'AlignedMemoryTest.DynamicAllocation': [{
'elapsed_time_ms': 0,
'losless_snippet': True,
'output_snippet': 'blah\\n',
'output_snippet_base64': 'YmxhaAo=',
'status': 'SUCCESS',
}],
'AlignedMemoryTest.ScopedDynamicAllocation': [{
'elapsed_time_ms': 0,
'losless_snippet': True,
'output_snippet': 'blah\\n',
'output_snippet_base64': 'YmxhaAo=',
'status': 'SUCCESS',
}],
}],
'test_locations': {
'AlignedMemoryTest.DynamicAllocation': {
'file': 'foo/bar/allocation_test.cc',
'line': 123,
},
'AlignedMemoryTest.ScopedDynamicAllocation': {
'file': 'foo/bar/allocation_test.cc',
'line': 456,
},
# This is a test from a different shard, but this happens in practice and we
# should not fail if information is repeated.
'AlignedMemoryTest.StaticAlignment': {
'file': 'foo/bar/allocation_test.cc',
'line': 12,
},
},
}
# gtest json output for successfully finished shard #1.
GOOD_GTEST_JSON_1 = {
'all_tests': [
'AlignedMemoryTest.DynamicAllocation',
'AlignedMemoryTest.ScopedDynamicAllocation',
'AlignedMemoryTest.StackAlignment',
'AlignedMemoryTest.StaticAlignment',
],
'disabled_tests': [
'ConditionVariableTest.TimeoutAcrossSetTimeOfDay',
'FileTest.TouchGetInfo',
'MessageLoopTestTypeDefault.EnsureDeletion',
],
'global_tags': ['CPU_64_BITS', 'MODE_DEBUG', 'OS_LINUX', 'OS_POSIX'],
'per_iteration_data': [{
'AlignedMemoryTest.StackAlignment': [{
'elapsed_time_ms': 0,
'losless_snippet': True,
'output_snippet': 'blah\\n',
'output_snippet_base64': 'YmxhaAo=',
'status': 'SUCCESS',
}],
'AlignedMemoryTest.StaticAlignment': [{
'elapsed_time_ms': 0,
'losless_snippet': True,
'output_snippet': 'blah\\n',
'output_snippet_base64': 'YmxhaAo=',
'status': 'SUCCESS',
}],
}],
'test_locations': {
'AlignedMemoryTest.StackAlignment': {
'file': 'foo/bar/allocation_test.cc',
'line': 789,
},
'AlignedMemoryTest.StaticAlignment': {
'file': 'foo/bar/allocation_test.cc',
'line': 12,
},
},
}
TIMED_OUT_GTEST_JSON_1 = {
'disabled_tests': [],
'global_tags': [],
'all_tests': [
'AlignedMemoryTest.DynamicAllocation',
'AlignedMemoryTest.ScopedDynamicAllocation',
'AlignedMemoryTest.StackAlignment',
'AlignedMemoryTest.StaticAlignment',
],
'per_iteration_data': [{
'AlignedMemoryTest.StackAlignment': [{
'elapsed_time_ms': 54000,
'losless_snippet': True,
'output_snippet': 'timed out',
'output_snippet_base64': '',
'status': 'FAILURE',
}],
'AlignedMemoryTest.StaticAlignment': [{
'elapsed_time_ms': 0,
'losless_snippet': True,
'output_snippet': '',
'output_snippet_base64': '',
'status': 'NOTRUN',
}],
}],
'test_locations': {
'AlignedMemoryTest.StackAlignment': {
'file': 'foo/bar/allocation_test.cc',
'line': 789,
},
'AlignedMemoryTest.StaticAlignment': {
'file': 'foo/bar/allocation_test.cc',
'line': 12,
},
},
}
# GOOD_GTEST_JSON_0 and GOOD_GTEST_JSON_1 merged.
GOOD_GTEST_JSON_MERGED = {
'all_tests': [
'AlignedMemoryTest.DynamicAllocation',
'AlignedMemoryTest.ScopedDynamicAllocation',
'AlignedMemoryTest.StackAlignment',
'AlignedMemoryTest.StaticAlignment',
],
'disabled_tests': [
'ConditionVariableTest.TimeoutAcrossSetTimeOfDay',
'FileTest.TouchGetInfo',
'MessageLoopTestTypeDefault.EnsureDeletion',
],
'global_tags': ['CPU_64_BITS', 'MODE_DEBUG', 'OS_LINUX', 'OS_POSIX'],
'missing_shards': [],
'per_iteration_data': [{
'AlignedMemoryTest.DynamicAllocation': [{
'elapsed_time_ms': 0,
'losless_snippet': True,
'output_snippet': 'blah\\n',
'output_snippet_base64': 'YmxhaAo=',
'status': 'SUCCESS',
}],
'AlignedMemoryTest.ScopedDynamicAllocation': [{
'elapsed_time_ms': 0,
'losless_snippet': True,
'output_snippet': 'blah\\n',
'output_snippet_base64': 'YmxhaAo=',
'status': 'SUCCESS',
}],
'AlignedMemoryTest.StackAlignment': [{
'elapsed_time_ms': 0,
'losless_snippet': True,
'output_snippet': 'blah\\n',
'output_snippet_base64': 'YmxhaAo=',
'status': 'SUCCESS',
}],
'AlignedMemoryTest.StaticAlignment': [{
'elapsed_time_ms': 0,
'losless_snippet': True,
'output_snippet': 'blah\\n',
'output_snippet_base64': 'YmxhaAo=',
'status': 'SUCCESS',
}],
}],
'swarming_summary': {
u'shards': [
{
u'state': u'COMPLETED',
u'outputs_ref': {
u'view_url': u'blah',
},
}
],
},
'test_locations': {
'AlignedMemoryTest.StackAlignment': {
'file': 'foo/bar/allocation_test.cc',
'line': 789,
},
'AlignedMemoryTest.StaticAlignment': {
'file': 'foo/bar/allocation_test.cc',
'line': 12,
},
'AlignedMemoryTest.DynamicAllocation': {
'file': 'foo/bar/allocation_test.cc',
'line': 123,
},
'AlignedMemoryTest.ScopedDynamicAllocation': {
'file': 'foo/bar/allocation_test.cc',
'line': 456,
},
},
}
# Only shard #1 finished. UNRELIABLE_RESULTS is set.
BAD_GTEST_JSON_ONLY_1_SHARD = {
'all_tests': [
'AlignedMemoryTest.DynamicAllocation',
'AlignedMemoryTest.ScopedDynamicAllocation',
'AlignedMemoryTest.StackAlignment',
'AlignedMemoryTest.StaticAlignment',
],
'disabled_tests': [
'ConditionVariableTest.TimeoutAcrossSetTimeOfDay',
'FileTest.TouchGetInfo',
'MessageLoopTestTypeDefault.EnsureDeletion',
],
'global_tags': [
'CPU_64_BITS',
'MODE_DEBUG',
'OS_LINUX',
'OS_POSIX',
'UNRELIABLE_RESULTS',
],
'missing_shards': [0],
'per_iteration_data': [{
'AlignedMemoryTest.StackAlignment': [{
'elapsed_time_ms': 0,
'losless_snippet': True,
'output_snippet': 'blah\\n',
'output_snippet_base64': 'YmxhaAo=',
'status': 'SUCCESS',
}],
'AlignedMemoryTest.StaticAlignment': [{
'elapsed_time_ms': 0,
'losless_snippet': True,
'output_snippet': 'blah\\n',
'output_snippet_base64': 'YmxhaAo=',
'status': 'SUCCESS',
}],
}],
'test_locations': {
'AlignedMemoryTest.StackAlignment': {
'file': 'foo/bar/allocation_test.cc',
'line': 789,
},
'AlignedMemoryTest.StaticAlignment': {
'file': 'foo/bar/allocation_test.cc',
'line': 12,
},
},
}
# GOOD_GTEST_JSON_0 and TIMED_OUT_GTEST_JSON_1 merged.
TIMED_OUT_GTEST_JSON_MERGED = {
'all_tests': [
'AlignedMemoryTest.DynamicAllocation',
'AlignedMemoryTest.ScopedDynamicAllocation',
'AlignedMemoryTest.StackAlignment',
'AlignedMemoryTest.StaticAlignment',
],
'disabled_tests': [
'ConditionVariableTest.TimeoutAcrossSetTimeOfDay',
'FileTest.TouchGetInfo',
'MessageLoopTestTypeDefault.EnsureDeletion',
],
'global_tags': ['CPU_64_BITS', 'MODE_DEBUG', 'OS_LINUX', 'OS_POSIX'],
'missing_shards': [],
'per_iteration_data': [{
'AlignedMemoryTest.DynamicAllocation': [{
'elapsed_time_ms': 0,
'losless_snippet': True,
'output_snippet': 'blah\\n',
'output_snippet_base64': 'YmxhaAo=',
'status': 'SUCCESS',
}],
'AlignedMemoryTest.ScopedDynamicAllocation': [{
'elapsed_time_ms': 0,
'losless_snippet': True,
'output_snippet': 'blah\\n',
'output_snippet_base64': 'YmxhaAo=',
'status': 'SUCCESS',
}],
'AlignedMemoryTest.StackAlignment': [{
'elapsed_time_ms': 54000,
'losless_snippet': True,
'output_snippet': 'timed out',
'output_snippet_base64': '',
'status': 'FAILURE',
}],
'AlignedMemoryTest.StaticAlignment': [{
'elapsed_time_ms': 0,
'losless_snippet': True,
'output_snippet': '',
'output_snippet_base64': '',
'status': 'NOTRUN',
}],
}],
'swarming_summary': {
u'shards': [
{
u'state': u'COMPLETED',
},
{
u'state': u'TIMED_OUT',
},
],
},
'test_locations': {
'AlignedMemoryTest.StackAlignment': {
'file': 'foo/bar/allocation_test.cc',
'line': 789,
},
'AlignedMemoryTest.StaticAlignment': {
'file': 'foo/bar/allocation_test.cc',
'line': 12,
},
'AlignedMemoryTest.DynamicAllocation': {
'file': 'foo/bar/allocation_test.cc',
'line': 123,
},
'AlignedMemoryTest.ScopedDynamicAllocation': {
'file': 'foo/bar/allocation_test.cc',
'line': 456,
},
},
}
class _StandardGtestMergeTest(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.temp_dir)
def _write_temp_file(self, path, content):
abs_path = os.path.join(self.temp_dir, path.replace('/', os.sep))
if not os.path.exists(os.path.dirname(abs_path)):
os.makedirs(os.path.dirname(abs_path))
with open(abs_path, 'w') as f:
if isinstance(content, dict):
json.dump(content, f)
else:
assert isinstance(content, str)
f.write(content)
return abs_path
class LoadShardJsonTest(_StandardGtestMergeTest):
def test_double_digit_jsons(self):
jsons_to_merge = []
for i in range(15):
json_dir = os.path.join(self.temp_dir, str(i))
json_path = os.path.join(json_dir, 'output.json')
if not os.path.exists(json_dir):
os.makedirs(json_dir)
with open(json_path, 'w') as f:
json.dump({'all_tests': ['LoadShardJsonTest.test%d' % i]}, f)
jsons_to_merge.append(json_path)
content, err = standard_gtest_merge.load_shard_json(
0, None, jsons_to_merge)
self.assertEqual({'all_tests': ['LoadShardJsonTest.test0']}, content)
self.assertIsNone(err)
content, err = standard_gtest_merge.load_shard_json(
12, None, jsons_to_merge)
self.assertEqual({'all_tests': ['LoadShardJsonTest.test12']}, content)
self.assertIsNone(err)
def test_double_task_id_jsons(self):
jsons_to_merge = []
for i in range(15):
json_dir = os.path.join(self.temp_dir, 'deadbeef%d' % i)
json_path = os.path.join(json_dir, 'output.json')
if not os.path.exists(json_dir):
os.makedirs(json_dir)
with open(json_path, 'w') as f:
json.dump({'all_tests': ['LoadShardJsonTest.test%d' % i]}, f)
jsons_to_merge.append(json_path)
content, err = standard_gtest_merge.load_shard_json(
0, 'deadbeef0', jsons_to_merge)
self.assertEqual({'all_tests': ['LoadShardJsonTest.test0']},
content)
self.assertIsNone(err)
content, err = standard_gtest_merge.load_shard_json(
12, 'deadbeef12', jsons_to_merge)
self.assertEqual({'all_tests': ['LoadShardJsonTest.test12']},
content)
self.assertIsNone(err)
class MergeShardResultsTest(_StandardGtestMergeTest):
"""Tests for merge_shard_results function."""
# pylint: disable=super-with-arguments
def setUp(self):
super(MergeShardResultsTest, self).setUp()
self.summary = None
self.test_files = []
# pylint: enable=super-with-arguments
def stage(self, summary, files):
self.summary = self._write_temp_file('summary.json', summary)
for path, content in files.items():
abs_path = self._write_temp_file(path, content)
self.test_files.append(abs_path)
def call(self):
stdout = six.StringIO()
with mock.patch('sys.stdout', stdout):
merged = standard_gtest_merge.merge_shard_results(
self.summary, self.test_files)
return merged, stdout.getvalue().strip()
def assertUnicodeEquals(self, expectation, result):
def convert_to_unicode(key_or_value):
if isinstance(key_or_value, str):
return six.text_type(key_or_value)
if isinstance(key_or_value, dict):
return {convert_to_unicode(k): convert_to_unicode(v)
for k, v in key_or_value.items()}
if isinstance(key_or_value, list):
return [convert_to_unicode(x) for x in key_or_value]
return key_or_value
unicode_expectations = convert_to_unicode(expectation)
unicode_result = convert_to_unicode(result)
self.assertEquals(unicode_expectations, unicode_result)
def test_ok(self):
# Two shards, both successfully finished.
self.stage({
u'shards': [
{
u'state': u'COMPLETED',
},
{
u'state': u'COMPLETED',
},
],
},
{
'0/output.json': GOOD_GTEST_JSON_0,
'1/output.json': GOOD_GTEST_JSON_1,
})
merged, stdout = self.call()
merged['swarming_summary'] = {
'shards': [
{
u'state': u'COMPLETED',
u'outputs_ref': {
u'view_url': u'blah',
},
}
],
}
self.assertUnicodeEquals(GOOD_GTEST_JSON_MERGED, merged)
self.assertEqual('', stdout)
def test_timed_out(self):
# Two shards, both successfully finished.
self.stage({
'shards': [
{
'state': 'COMPLETED',
},
{
'state': 'TIMED_OUT',
},
],
},
{
'0/output.json': GOOD_GTEST_JSON_0,
'1/output.json': TIMED_OUT_GTEST_JSON_1,
})
merged, stdout = self.call()
self.assertUnicodeEquals(TIMED_OUT_GTEST_JSON_MERGED, merged)
self.assertIn(
'Test runtime exceeded allocated time\n', stdout)
def test_missing_summary_json(self):
# summary.json is missing, should return None and emit warning.
self.summary = os.path.join(self.temp_dir, 'summary.json')
merged, output = self.call()
self.assertEqual(None, merged)
self.assertIn('@@@STEP_WARNINGS@@@', output)
self.assertIn('summary.json is missing or can not be read', output)
def test_unfinished_shards(self):
# Only one shard (#1) finished. Shard #0 did not.
self.stage({
u'shards': [
None,
{
u'state': u'COMPLETED',
},
],
},
{
u'1/output.json': GOOD_GTEST_JSON_1,
})
merged, stdout = self.call()
merged.pop('swarming_summary')
self.assertUnicodeEquals(BAD_GTEST_JSON_ONLY_1_SHARD, merged)
self.assertIn(
'@@@STEP_WARNINGS@@@\nsome shards did not complete: 0\n', stdout)
self.assertIn(
'@@@STEP_LOG_LINE@some shards did not complete: 0@'
'Missing results from the following shard(s): 0@@@\n', stdout)
def test_missing_output_json(self):
# Shard #0 output json is missing.
self.stage({
u'shards': [
{
u'state': u'COMPLETED',
},
{
u'state': u'COMPLETED',
},
],
},
{
u'1/output.json': GOOD_GTEST_JSON_1,
})
merged, stdout = self.call()
merged.pop('swarming_summary')
self.assertUnicodeEquals(BAD_GTEST_JSON_ONLY_1_SHARD, merged)
self.assertIn(
'No result was found: '
'shard 0 test output was missing', stdout)
def test_large_output_json(self):
# a shard is too large.
self.stage({
u'shards': [
{
u'state': u'COMPLETED',
},
{
u'state': u'COMPLETED',
},
],
},
{
'0/output.json': GOOD_GTEST_JSON_0,
'1/output.json': GOOD_GTEST_JSON_1,
})
old_json_limit = standard_gtest_merge.OUTPUT_JSON_SIZE_LIMIT
len0 = len(json.dumps(GOOD_GTEST_JSON_0))
len1 = len(json.dumps(GOOD_GTEST_JSON_1))
large_shard = "0" if len0 > len1 else "1"
try:
# Override max output.json size just for this test.
standard_gtest_merge.OUTPUT_JSON_SIZE_LIMIT = min(len0,len1)
merged, stdout = self.call()
merged.pop('swarming_summary')
self.assertUnicodeEquals(BAD_GTEST_JSON_ONLY_1_SHARD, merged)
self.assertIn(
'No result was found: '
'shard %s test output exceeded the size limit' % large_shard, stdout)
finally:
standard_gtest_merge.OUTPUT_JSON_SIZE_LIMIT = old_json_limit
class CommandLineTest(common_merge_script_tests.CommandLineTest):
# pylint: disable=super-with-arguments
def __init__(self, methodName='runTest'):
super(CommandLineTest, self).__init__(methodName, standard_gtest_merge)
# pylint: enable=super-with-arguments
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR)
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
|
d11081fa9c3b28a3a9464f40854ce36c455075bb
|
09a6d8dbad5b92f93791948b5bf9b75f5cb2e5ce
|
/pennylane/devices/default_qubit_autograd.py
|
dfe038a6ed2ee461b73374c4069ed1638a913959
|
[
"Apache-2.0"
] |
permissive
|
PennyLaneAI/pennylane
|
458efd5d9457e90ada31ca2ef0fb6bb96a24e9a7
|
0843183ff15a013c2622af5e61fea431d18076d3
|
refs/heads/master
| 2023-09-03T17:00:43.105784
| 2023-09-01T16:15:07
| 2023-09-01T16:15:07
| 129,936,360
| 1,431
| 410
|
Apache-2.0
| 2023-09-14T21:30:56
| 2018-04-17T16:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 5,284
|
py
|
default_qubit_autograd.py
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains an autograd implementation of the :class:`~.DefaultQubit`
reference plugin.
"""
from pennylane import numpy as np
from pennylane.devices import DefaultQubit
class DefaultQubitAutograd(DefaultQubit):
"""Simulator plugin based on ``"default.qubit"``, written using Autograd.
**Short name:** ``default.qubit.autograd``
This device provides a pure-state qubit simulator written using Autograd. As a result, it
supports classical backpropagation as a means to compute the gradient. This can be faster than
the parameter-shift rule for analytic quantum gradients when the number of parameters to be
optimized is large.
To use this device, you will need to install Autograd:
.. code-block:: console
pip install autograd
**Example**
The ``default.qubit.autograd`` is designed to be used with end-to-end classical backpropagation
(``diff_method="backprop"``) with the Autograd interface. This is the default method of
differentiation when creating a QNode with this device.
Using this method, the created QNode is a 'white-box', and is
tightly integrated with your Autograd computation:
>>> dev = qml.device("default.qubit.autograd", wires=1)
>>> @qml.qnode(dev, interface="autograd", diff_method="backprop")
... def circuit(x):
... qml.RX(x[1], wires=0)
... qml.Rot(x[0], x[1], x[2], wires=0)
... return qml.expval(qml.PauliZ(0))
>>> weights = np.array([0.2, 0.5, 0.1], requires_grad=True)
>>> grad_fn = qml.grad(circuit)
>>> print(grad_fn(weights))
array([-2.2526717e-01 -1.0086454e+00 1.3877788e-17])
There are a couple of things to keep in mind when using the ``"backprop"``
differentiation method for QNodes:
* You must use the ``"autograd"`` interface for classical backpropagation, as Autograd is
used as the device backend.
* Only exact expectation values, variances, and probabilities are differentiable.
When instantiating the device with ``analytic=False``, differentiating QNode
outputs will result in an error.
Args:
wires (int): the number of wires to initialize the device with
shots (None, int): How many times the circuit should be evaluated (or sampled) to estimate
the expectation values. Defaults to ``None`` if not specified, which means that the device
returns analytical results.
analytic (bool): Indicates if the device should calculate expectations
and variances analytically. In non-analytic mode, the ``diff_method="backprop"``
QNode differentiation method is not supported and it is recommended to consider
switching device to ``default.qubit`` and using ``diff_method="parameter-shift"``.
"""
name = "Default qubit (Autograd) PennyLane plugin"
short_name = "default.qubit.autograd"
_dot = staticmethod(np.dot)
_abs = staticmethod(np.abs)
_reduce_sum = staticmethod(lambda array, axes: np.sum(array, axis=tuple(axes)))
_reshape = staticmethod(np.reshape)
_flatten = staticmethod(lambda array: array.flatten())
_einsum = staticmethod(np.einsum)
_cast = staticmethod(np.asarray)
_transpose = staticmethod(np.transpose)
_tensordot = staticmethod(np.tensordot)
_conj = staticmethod(np.conj)
_real = staticmethod(np.real)
_imag = staticmethod(np.imag)
_roll = staticmethod(np.roll)
_stack = staticmethod(np.stack)
_size = staticmethod(np.size)
_ndim = staticmethod(np.ndim)
@staticmethod
def _asarray(array, dtype=None):
res = np.asarray(array, dtype=dtype)
if res.dtype is np.dtype("O"):
return np.hstack(array).flatten().astype(dtype)
return res
@staticmethod
def _const_mul(constant, array):
return constant * array
def __init__(self, wires, *, shots=None, analytic=None):
r_dtype = np.float64
c_dtype = np.complex128
super().__init__(wires, shots=shots, r_dtype=r_dtype, c_dtype=c_dtype, analytic=analytic)
# prevent using special apply methods for these gates due to slowdown in Autograd
# implementation
del self._apply_ops["PauliY"]
del self._apply_ops["Hadamard"]
del self._apply_ops["CZ"]
@classmethod
def capabilities(cls):
capabilities = super().capabilities().copy()
capabilities.update(passthru_interface="autograd")
return capabilities
@staticmethod
def _scatter(indices, array, new_dimensions):
new_array = np.zeros(new_dimensions, dtype=array.dtype.type)
new_array[indices] = array
return new_array
|
99655dca603148385aecf39f715210b41a9386ba
|
3359c58a44333b57b07dec4c3b0dbbc76aabe7f1
|
/helios/management/commands/__init__.py
|
bf2b23205932ba1c11187cd6513913dcffe79d66
|
[
"Apache-2.0",
"GPL-1.0-or-later",
"GPL-3.0-only"
] |
permissive
|
benadida/helios-server
|
f92b6267f64245afb85a7a1ad93d9ceba484c4c7
|
cea7411b83ca3ea3a419e218b9ab815418c83985
|
refs/heads/master
| 2023-08-26T00:12:53.626294
| 2023-04-09T08:22:22
| 2023-04-09T13:46:00
| 260,654
| 617
| 277
|
Apache-2.0
| 2023-09-07T20:37:10
| 2009-07-26T00:25:38
|
Python
|
UTF-8
|
Python
| false
| false
| 17
|
py
|
__init__.py
|
"""
commands
"""
|
b81aa59784a95095b2b76eb3fa12b1657601b14d
|
b200f895b66e682b2419fa758d73dee317830349
|
/autotorrent/clients/tests/test_qbittorrent.py
|
75e27767edcd876c9f895062ba597e0c0f44f497
|
[
"MIT"
] |
permissive
|
JohnDoee/autotorrent
|
8fe33abba8eafabb6a6d2b0e8cf317641c1ab966
|
4f1f1669eb3f8a8ea5751881cbf78db518f115d0
|
refs/heads/develop
| 2022-06-20T20:10:59.366051
| 2022-05-29T12:50:55
| 2022-05-29T12:50:55
| 13,565,210
| 302
| 51
|
MIT
| 2021-11-05T04:13:00
| 2013-10-14T15:32:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,972
|
py
|
test_qbittorrent.py
|
import os
import shutil
import tempfile
from unittest import TestCase
from ...bencode import bencode, bdecode
from ..qbittorrent import QBittorrentClient, UnableToLoginException
current_path = os.path.dirname(__file__)
class FakeSession:
status_code = None
_response = None
def __init__(self):
self.r = []
def post(self, url, **kwargs):
self.r.append(('post', url, kwargs))
return self
def get(self, url, **kwargs):
self.r.append(('get', url, kwargs))
return self
def json(self):
return self._response
@property
def text(self):
return self._response
class TestRTorrentClient(TestCase):
def setUp(self):
self.session = FakeSession()
self.client = QBittorrentClient('http://127.0.0.1', 'username', 'password', 'category')
self.client._session = self.session
self._temp_path = tempfile.mkdtemp()
def tearDown(self):
if self._temp_path.startswith('/tmp'): # paranoid-mon, the best pokemon.
shutil.rmtree(self._temp_path)
def test_login_check(self):
self.session.status_code = 200
self.client._login_check()
def test_login_check_failed(self):
self.session.status_code = 401
try:
self.client._login_check()
except UnableToLoginException:
pass
else:
self.fail('Failed login did not raise an exception')
def test_test_connection(self):
self.test_login_check()
self.session.status_code = 200
self.session._response = '4.0.0'
self.assertIn('4.0.0', self.client.test_connection())
def test_get_torrents(self):
self.test_login_check()
self.session._response = [{'hash': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'},
{'hash': 'ffffffffffffffffffffffffffffffffffffffff'}]
self.assertEqual(set(['aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'ffffffffffffffffffffffffffffffffffffffff']), self.client.get_torrents())
def test_add_torrent(self):
self.test_login_check()
with open(os.path.join(current_path, 'test.torrent'), 'rb') as f:
torrent_data = f.read()
torrent = bdecode(torrent_data)
files = []
for letter in ['a', 'b', 'c']:
filename = 'file_%s.txt' % letter
files.append({
'completed': True,
'length': 11,
'path': ['tmp', filename],
})
with open(os.path.join(self._temp_path, filename), 'wb') as f:
f.write(b'b' * 11)
self.assertTrue(self.client.add_torrent(torrent, self._temp_path, files))
method, url, kwargs = self.session.r[-1]
self.assertEqual(kwargs['data'], {
'savepath': self._temp_path,
'category': 'category',
'skip_checking': 'true',
})
|
67179c5cce3f50ab5c3042cbfb279e1086194014
|
ead6ec54c304046e8017289ecae2acb69f2e463d
|
/flotilla/external/combat.py
|
04e6784cffb1a869889d7e4e209623f4a7e3d7ea
|
[] |
permissive
|
YeoLab/flotilla
|
93e3576002f1b51917bc8576897d399176e1fa3a
|
31da64567e59003c2b9c03fc8f4eb27ee62e299c
|
refs/heads/master
| 2023-04-28T04:23:30.408159
| 2017-04-19T07:03:03
| 2017-04-19T07:03:03
| 19,319,564
| 104
| 27
|
BSD-3-Clause
| 2023-04-15T19:16:52
| 2014-04-30T16:14:31
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 8,720
|
py
|
combat.py
|
"""
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import sys
import pandas as pd
import patsy
import numpy.linalg as la
import numpy as np
def adjust_nums(numerical_covariates, drop_idxs):
# if we dropped some values, have to adjust those with a larger index.
if numerical_covariates is None: return drop_idxs
return [nc - sum(nc < di for di in drop_idxs)
for nc in numerical_covariates]
def design_mat(mod, numerical_covariates, batch_levels):
# require levels to make sure they are in the same order as we use in the
# rest of the script.
design = patsy.dmatrix("~ 0 + C(batch, levels=%s)" % str(batch_levels),
mod, return_type="dataframe")
mod = mod.drop(["batch"], axis=1)
numerical_covariates = list(numerical_covariates)
sys.stderr.write("found %i batches\n" % design.shape[1])
other_cols = [c for i, c in enumerate(mod.columns)
if not i in numerical_covariates]
factor_matrix = mod[other_cols]
design = pd.concat((design, factor_matrix), axis=1)
if numerical_covariates is not None:
sys.stderr.write("found %i numerical covariates...\n"
% len(numerical_covariates))
for i, nC in enumerate(numerical_covariates):
cname = mod.columns[nC]
sys.stderr.write("\t{0}\n".format(cname))
design[cname] = mod[mod.columns[nC]]
sys.stderr.write("found %i categorical variables:" % len(other_cols))
sys.stderr.write("\t" + ", ".join(other_cols) + '\n')
return design
def combat(data, batch, model=None, numerical_covariates=None):
"""Correct for batch effects in a dataset
Parameters
----------
data : pandas.DataFrame
A (n_features, n_samples) dataframe of the expression or methylation
data to batch correct
batch : List-like
A column corresponding to the batches in the data, in the same order
as the samples in ``data``
model : patsy.design_info.DesignMatrix, optional
A model matrix describing metadata on the samples which could be
causing batch effects. If not provided, then will attempt to coarsely
correct just from the information provided in ``batch``
numerical_covariates : list-like
List of covariates in the model which are numerical, rather than
categorical
Returns
-------
corrected : pandas.DataFrame
A (n_features, n_samples) dataframe of the batch-corrected data
"""
if isinstance(numerical_covariates, str):
numerical_covariates = [numerical_covariates]
if numerical_covariates is None:
numerical_covariates = []
if model is not None and isinstance(model, pd.DataFrame):
model["batch"] = list(batch)
else:
model = pd.DataFrame({'batch': batch})
batch_items = model.groupby("batch").groups.items()
batch_levels = [k for k, v in batch_items]
batch_info = [v for k, v in batch_items]
n_batch = len(batch_info)
n_batches = np.array([len(v) for v in batch_info])
n_array = float(sum(n_batches))
# drop intercept
drop_cols = [cname for cname, inter in ((model == 1).all()).iterkv()
if inter == True]
drop_idxs = [list(model.columns).index(cdrop) for cdrop in drop_cols]
model = model[[c for c in model.columns if not c in drop_cols]]
numerical_covariates = [list(model.columns).index(c)
if isinstance(c, str)
else c
for c in numerical_covariates if not c in drop_cols]
design = design_mat(model, numerical_covariates, batch_levels)
sys.stderr.write("Standardizing Data across genes.\n")
B_hat = np.dot(np.dot(la.inv(np.dot(design.T, design)), design.T), data.T)
grand_mean = np.dot((n_batches / n_array).T, B_hat[:n_batch,:])
var_pooled = np.dot(((data - np.dot(design, B_hat).T)**2),
np.ones((n_array, 1)) / n_array)
stand_mean = np.dot(grand_mean.T.reshape((len(grand_mean), 1)),
np.ones((1, n_array)))
tmp = np.array(design.copy())
tmp[:,:n_batch] = 0
stand_mean += np.dot(tmp, B_hat).T
s_data = ((data - stand_mean) / np.dot(np.sqrt(var_pooled),
np.ones((1, n_array))))
sys.stderr.write("Fitting L/S model and finding priors\n")
batch_design = design[design.columns[:n_batch]]
gamma_hat = np.dot(np.dot(la.inv(np.dot(batch_design.T, batch_design)),
batch_design.T), s_data.T)
delta_hat = []
for i, batch_idxs in enumerate(batch_info):
#batches = [list(model.columns).index(b) for b in batches]
delta_hat.append(s_data[batch_idxs].var(axis=1))
gamma_bar = gamma_hat.mean(axis=1)
t2 = gamma_hat.var(axis=1)
a_prior = list(map(aprior, delta_hat))
b_prior = list(map(bprior, delta_hat))
sys.stderr.write("Finding parametric adjustments\n")
gamma_star, delta_star = [], []
for i, batch_idxs in enumerate(batch_info):
#print '18 20 22 28 29 31 32 33 35 40 46'
#print batch_info[batch_id]
temp = it_sol(s_data[batch_idxs], gamma_hat[i],
delta_hat[i], gamma_bar[i], t2[i], a_prior[i], b_prior[i])
gamma_star.append(temp[0])
delta_star.append(temp[1])
sys.stdout.write("Adjusting data\n")
bayesdata = s_data
gamma_star = np.array(gamma_star)
delta_star = np.array(delta_star)
for j, batch_idxs in enumerate(batch_info):
dsq = np.sqrt(delta_star[j,:])
dsq = dsq.reshape((len(dsq), 1))
denom = np.dot(dsq, np.ones((1, n_batches[j])))
numer = np.array(bayesdata[batch_idxs]
- np.dot(batch_design.ix[batch_idxs], gamma_star).T)
bayesdata[batch_idxs] = numer / denom
vpsq = np.sqrt(var_pooled).reshape((len(var_pooled), 1))
bayesdata = bayesdata * np.dot(vpsq, np.ones((1, n_array))) + stand_mean
return bayesdata
def it_sol(sdat, g_hat, d_hat, g_bar, t2, a, b, conv=0.0001):
n = (1 - np.isnan(sdat)).sum(axis=1)
g_old = g_hat.copy()
d_old = d_hat.copy()
change = 1
count = 0
while change > conv:
#print g_hat.shape, g_bar.shape, t2.shape
g_new = postmean(g_hat, g_bar, n, d_old, t2)
sum2 = ((sdat - np.dot(g_new.reshape((g_new.shape[0], 1)),
np.ones((1, sdat.shape[1])))) ** 2).sum(axis=1)
d_new = postvar(sum2, n, a, b)
change = max((abs(g_new - g_old) / g_old).max(),
(abs(d_new - d_old) / d_old).max())
g_old = g_new #.copy()
d_old = d_new #.copy()
count = count + 1
adjust = (g_new, d_new)
return adjust
def aprior(gamma_hat):
m = gamma_hat.mean()
s2 = gamma_hat.var()
return (2 * s2 +m**2) / s2
def bprior(gamma_hat):
m = gamma_hat.mean()
s2 = gamma_hat.var()
return (m*s2+m**3)/s2
def postmean(g_hat, g_bar, n, d_star, t2):
return (t2*n*g_hat+d_star * g_bar) / (t2*n+d_star)
def postvar(sum2, n, a, b):
return (0.5 * sum2 + b) / (n / 2.0 + a - 1.0)
if __name__ == "__main__":
# NOTE: run this first to get the bladder batch stuff written to files.
"""
source("http://bioconductor.org/biocLite.R")
biocLite("sva")
library("sva")
options(stringsAsFactors=FALSE)
library(bladderbatch)
data(bladderdata)
pheno = pData(bladderEset)
# add fake age variable for numeric
pheno$age = c(1:7, rep(1:10, 5))
write.table(data.frame(cel=rownames(pheno), pheno),
row.names=F, quote=F, sep="\t", file="bladder-pheno.txt")
edata = exprs(bladderEset)
write.table(edata, row.names=T, quote=F, sep="\t", file="bladder-expr.txt")
# use dataframe instead of matrix
mod = model.matrix(~as.factor(cancer) + age, data=pheno)
t = Sys.time()
cdata = ComBat(dat=edata, batch=as.factor(pheno$batch),
mod=mod, numCov=match("age", colnames(mod)))
print(Sys.time() - t)
print(cdata[1:5, 1:5])
write.table(cdata, row.names=True, quote=F, sep="\t", file="r-batch.txt")
"""
pheno = pd.read_table('bladder-pheno.txt', index_col=0)
dat = pd.read_table('bladder-expr.txt', index_col=0)
mod = patsy.dmatrix("~ age + cancer", pheno, return_type="dataframe")
import time
t = time.time()
ebat = combat(dat, pheno.batch, mod, "age")
sys.stdout.write("%.2f seconds\n" % (time.time() - t))
sys.stdout.write(str(ebat.ix[:5, :5]))
ebat.to_csv("py-batch.txt", sep="\t")
mod = False
ebat = combat(dat, pheno.batch, mod)
|
79e1d64133f0918ec60cd22a17998cbca19a0974
|
6e0956238a85d5202f823dcc2317901b07817eb3
|
/languagechecker.py
|
0305b05b53ca5977a06ad1e0503e5dcedf61f9a0
|
[] |
no_license
|
vyos/vyos-documentation
|
7359a0d9267202f65bd4d3e367b8f9b0eb943871
|
60232115d3e89c57644cd943b1b689a1e17f5de5
|
refs/heads/master
| 2023-08-30T11:28:47.951186
| 2023-08-28T19:24:05
| 2023-08-28T19:24:05
| 150,965,003
| 169
| 402
| null | 2023-09-14T17:11:42
| 2018-09-30T12:23:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,638
|
py
|
languagechecker.py
|
'''
Parse gettext pot files and extract path:line and msgid information
compare this with downloaded files from localazy
the output are the elements which are downloaded but not needed anymore.
TODO: better output
'''
import os
from babel.messages.pofile import read_po
def extract_content(file):
content = []
with open(file) as f:
data = read_po(f)
for message in data:
if message.id:
content.append(message)
return content
gettext_dir = "docs/_build/gettext"
gettext_ext = ".pot"
original_content = list()
language_dir = "docs/_locale"
language_ext = ".pot"
language_content = dict()
# get gettext filepath
for (dirpath, dirnames, filenames) in os.walk(gettext_dir):
for file in filenames:
if gettext_ext in file:
original_content.extend(extract_content(f"{dirpath}/{file}"))
# get filepath per language
languages = next(os.walk(language_dir))[1]
for language in languages:
language_content[language] = list()
for (dirpath, dirnames, filenames) in os.walk(f"{language_dir}/{language}"):
for file in filenames:
if language_ext in file:
language_content[language].extend(extract_content(f"{dirpath}/{file}"))
for lang in language_content.keys():
for message in language_content[lang]:
found = False
for ori_message in original_content:
if ori_message.id == message.id:
found = True
if not found:
print()
print(f"{lang}: {message.id}")
for loc in message.locations:
print(f"{loc[0]}:{loc[1]}")
|
b948bb01502aceaffa12e98672c78507b090c64d
|
572afc77a246acb9483b47fc9e1839f47005d736
|
/python/federatedml/model_selection/stepwise/step.py
|
febecb8900d897649cdd8405b7c79ded49aab7af
|
[
"Apache-2.0"
] |
permissive
|
FederatedAI/FATE
|
7c787c308cca9ff46f287d24569c68de0a1cac07
|
8767db5ec0cb93784f64b290bc39b7b545c530fb
|
refs/heads/master
| 2023-08-17T10:13:00.302529
| 2023-06-14T07:01:38
| 2023-06-14T07:01:38
| 167,349,656
| 4,942
| 1,571
|
Apache-2.0
| 2023-09-14T07:02:29
| 2019-01-24T10:32:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,082
|
py
|
step.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import numpy as np
from federatedml.statistic.data_overview import get_header, get_anonymous_header
from federatedml.util import consts
from federatedml.util import LOGGER
from federatedml.util.data_transform import set_schema
class Step(object):
def __init__(self):
self.feature_list = []
self.step_direction = ""
self.n_step = 0
self.n_model = 0
def set_step_info(self, step_info):
n_step, n_model = step_info
self.n_step = n_step
self.n_model = n_model
def get_flowid(self):
flowid = "train.step{}.model{}".format(self.n_step, self.n_model)
return flowid
@staticmethod
def slice_data_instance(data_instance, feature_mask):
"""
return data_instance with features at given indices
Parameters
----------
data_instance: data Instance object, input data
feature_mask: mask to filter data_instance
"""
data_instance.features = data_instance.features[feature_mask]
return data_instance
@staticmethod
def get_new_schema(original_data, feature_mask):
schema = copy.deepcopy(original_data.schema)
old_header = get_header(original_data)
new_header = [old_header[i] for i in np.where(feature_mask > 0)[0]]
schema["header"] = new_header
old_anonymous_header = get_anonymous_header(original_data)
if old_anonymous_header:
new_anonymous_header = [old_anonymous_header[i] for i in np.where(feature_mask > 0)[0]]
schema["anonymous_header"] = new_anonymous_header
LOGGER.debug(f"given feature_mask: {feature_mask}, new anonymous header is: {new_anonymous_header}")
return schema
def run(self, original_model, train_data, validate_data, feature_mask):
model = copy.deepcopy(original_model)
current_flowid = self.get_flowid()
model.set_flowid(current_flowid)
if original_model.role != consts.ARBITER:
curr_train_data = train_data.mapValues(lambda v: Step.slice_data_instance(v, feature_mask))
new_schema = Step.get_new_schema(train_data, feature_mask)
# LOGGER.debug("new schema is: {}".format(new_schema))
set_schema(curr_train_data, new_schema)
model.header = new_schema.get("header")
else:
curr_train_data = train_data
model.fit(curr_train_data)
return model
|
0f7d7020576e273030845274280660faaa0eb0d4
|
a2b429075098ef615a104845b8434e7fdeff9d14
|
/antspynet/architectures/create_transformer_model.py
|
b6a98b298ce927d7b6731ff412647e2d4bb336d2
|
[
"Apache-2.0"
] |
permissive
|
ANTsX/ANTsPyNet
|
de95ec1ceca6bd146b99127c36273ba4649be40b
|
1703acb58ed053ce3348aa061e4087bac953dd07
|
refs/heads/master
| 2023-08-09T17:26:33.179674
| 2023-08-04T14:22:18
| 2023-08-04T14:22:18
| 189,067,098
| 171
| 36
|
Apache-2.0
| 2023-07-13T15:57:34
| 2019-05-28T16:44:24
|
Python
|
UTF-8
|
Python
| false
| false
| 16,923
|
py
|
create_transformer_model.py
|
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (Add, Dense, Dropout, Flatten,
Input, LayerNormalization, MultiHeadAttention)
from antspynet.utilities import (ExtractPatches2D, ExtractPatches3D, EncodePatches,
ExtractConvolutionalPatches2D, ExtractConvolutionalPatches3D,
StochasticDepth)
import numpy as np
def multilayer_perceptron(x, hidden_units, dropout_rate=0.0):
for units in hidden_units:
x = Dense(units, activation=tf.nn.gelu)(x)
if dropout_rate > 0.0:
x = Dropout(dropout_rate)(x)
return x
def create_vision_transformer_model_2d(input_image_size,
number_of_classification_labels=1000,
mode='classification',
patch_size=6,
number_of_transformer_layers=8,
transformer_units=[128, 64],
projection_dimension=64,
number_of_attention_heads=4,
mlp_head_units=[2048, 1024],
dropout_rate=0.5):
"""
Implementation of the Vision transformer architecture.
https://keras.io/examples/vision/image_classification_with_vision_transformer/
Arguments
---------
input_image_size : tuple of length 4
Used for specifying the input tensor shape. The shape (or dimension) of
that tensor is the image dimensions followed by the number of channels
(e.g., red, green, and blue).
number_of_classification_labels : int
Number of classification labels.
patch_size : int
Size of a single patch dimension.
number_of_transformer_layers : int
Number of transformer layers.
transformer_units : tuple or list
Size of the hidden units in the layers of the MLP.
projection_dimension : int
Multi-head attention layer parameter
mlp_head_units : tuple or list
Size of the dense layers of the final classifier.
dropout_rate : float between 0 and 1
Dropout rate of the multilayer perceptron and the previous dropout layer.
Returns
-------
Keras model
A 2-D keras model.
Example
-------
>>> model = create_vision_transformer_model_2d((224, 224, 1))
>>> model.summary()
"""
inputs = Input(shape=input_image_size)
patches = ExtractPatches2D(patch_size)(inputs)
number_of_patches = ((input_image_size[0] * input_image_size[1]) // (patch_size ** 2))
encoded_patches = EncodePatches(number_of_patches,
projection_dimension)(patches)
for _ in range(number_of_transformer_layers):
x1 = LayerNormalization(epsilon=1e-6)(encoded_patches)
attention_output = MultiHeadAttention(num_heads=number_of_attention_heads,
key_dim=projection_dimension,
dropout=dropout_rate/5.0)(x1, x1)
x2 = Add()([attention_output, encoded_patches])
x3 = LayerNormalization(epsilon=1e-6)(x2)
x3 = multilayer_perceptron(x3,
hidden_units=transformer_units,
dropout_rate=dropout_rate/5.0)
encoded_patches = Add()([x3, x2])
representation = LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = Flatten()(representation)
representation = Dropout(dropout_rate)(representation)
features = multilayer_perceptron(representation,
hidden_units=mlp_head_units,
dropout_rate=dropout_rate)
layer_activation = ''
if mode == 'classification':
layer_activation = 'softmax'
elif mode == 'regression':
layer_activation = 'linear'
elif mode == 'sigmoid':
layer_activation = 'sigmoid'
else:
raise ValueError('mode must be either `classification` or `regression`.')
outputs = Dense(number_of_classification_labels,
activation=layer_activation)(features)
model = Model(inputs=inputs, outputs=outputs)
return model
def create_vision_transformer_model_3d(input_image_size,
number_of_classification_labels=1000,
mode="classification",
patch_size=6,
number_of_transformer_layers=8,
transformer_units=[128, 64],
projection_dimension=64,
number_of_attention_heads=4,
mlp_head_units=[2048, 1024],
dropout_rate=0.5):
"""
Implementation of the Vision transformer architecture.
https://keras.io/examples/vision/image_classification_with_vision_transformer/
Arguments
---------
input_image_size : tuple of length 5
Used for specifying the input tensor shape. The shape (or dimension) of
that tensor is the image dimensions followed by the number of channels
(e.g., red, green, and blue).
number_of_classification_labels : int
Number of classification labels.
patch_size : int
Size of a single patch dimension.
number_of_transformer_layers : int
Number of transformer layers.
transformer_units : tuple or list
Size of the hidden units in the layers of the MLP.
projection_dimension : int
Multi-head attention layer parameter
mlp_head_units : tuple or list
Size of the dense layers of the final classifier.
dropout_rate : float between 0 and 1
Dropout rate of the multilayer perceptron and the previous dropout layer.
Returns
-------
Keras model
A 3-D keras model.
Example
-------
>>> model = create_vision_transformer_model_3d(((224, 224, 224, 1))
>>> model.summary()
"""
inputs = Input(shape=input_image_size)
patches = ExtractPatches3D(patch_size)(inputs)
number_of_patches = ((input_image_size[0] * input_image_size[1] * input_image_size[2]) // (patch_size ** 3))
encoded_patches = EncodePatches(number_of_patches,
projection_dimension)(patches)
for _ in range(number_of_transformer_layers):
x1 = LayerNormalization(epsilon=1e-6)(encoded_patches)
attention_output = MultiHeadAttention(num_heads=number_of_attention_heads,
key_dim=projection_dimension,
dropout=dropout_rate/5.0)(x1, x1)
x2 = Add()([attention_output, encoded_patches])
x3 = LayerNormalization(epsilon=1e-6)(x2)
x3 = multilayer_perceptron(x3,
hidden_units=transformer_units,
dropout_rate=dropout_rate/5.0)
encoded_patches = Add()([x3, x2])
representation = LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = Flatten()(representation)
representation = Dropout(dropout_rate)(representation)
features = multilayer_perceptron(representation,
hidden_units=mlp_head_units,
dropout_rate=dropout_rate)
layer_activation = ''
if mode == 'classification':
layer_activation = 'softmax'
elif mode == 'regression':
layer_activation = 'linear'
elif mode == 'sigmoid':
layer_activation = 'sigmoid'
else:
raise ValueError('mode must be either `classification` or `regression`.')
outputs = Dense(number_of_classification_labels,
activation=layer_activation)(features)
model = Model(inputs=inputs, outputs=outputs)
return model
def create_compact_convolutional_transformer_model_2d(input_image_size,
number_of_classification_labels=1000,
mode="classification",
number_of_transformer_layers=2,
transformer_units=[128, 128],
projection_dimension=64,
number_of_attention_heads=4,
stochastic_depth_rate=0.1):
"""
Implementation of the Vision transformer architecture.
https://keras.io/examples/vision/cct/
Arguments
---------
input_image_size : tuple of length 4
Used for specifying the input tensor shape. The shape (or dimension) of
that tensor is the image dimensions followed by the number of channels
(e.g., red, green, and blue).
number_of_classification_labels : int
Number of classification labels.
patch_size : int
Size of a single patch dimension.
number_of_transformer_layers : int
Number of transformer layers.
transformer_units : tuple or list
Size of the hidden units in the layers of the MLP.
projection_dimension : int
Multi-head attention layer parameter
stochastic_depth_rate : float between 0 and 1
Dropout rate of the stochastic depth layer
Returns
-------
Keras model
A 2-D keras model.
Example
-------
>>> model = antspynet.create_compact_convolutional_transformer_model_2d((224, 224, 1))
>>> model.summary()
"""
inputs = Input(shape=input_image_size)
ExtractPatches = ExtractConvolutionalPatches2D(kernel_size=3,
stride=1,
padding=1,
pooling_kernel_size=3,
pooling_stride=2,
number_of_filters=[64, 128],
do_positional_embedding=True)
encoded_patches = ExtractPatches(inputs)
# Apply positional embedding.
positional_embedding, sequence_length = ExtractPatches.positional_embedding(input_image_size)
positions = tf.range(start=0, limit=sequence_length, delta=1)
position_embeddings = positional_embedding(positions)
encoded_patches += position_embeddings
# Calculate Stochastic Depth probabilities.
dpr = [x for x in np.linspace(0, stochastic_depth_rate, number_of_transformer_layers)]
for i in range(number_of_transformer_layers):
x1 = LayerNormalization(epsilon=1e-5)(encoded_patches)
attention_output = MultiHeadAttention(num_heads=number_of_attention_heads,
key_dim=projection_dimension,
dropout=0.1)(x1, x1)
attention_output = StochasticDepth(dpr[i])(attention_output)
x2 = Add()([attention_output, encoded_patches])
x3 = LayerNormalization(epsilon=1e-5)(x2)
x3 = multilayer_perceptron(x3,
hidden_units=transformer_units,
dropout_rate=0.1)
x3 = StochasticDepth(dpr[i])(x3)
encoded_patches = Add()([x3, x2])
representation = LayerNormalization(epsilon=1e-5)(encoded_patches)
attention_weights = tf.nn.softmax(Dense(1)(representation), axis=1)
weighted_representation = tf.matmul(attention_weights, representation, transpose_a=True)
weighted_representation = tf.squeeze(weighted_representation, -2)
layer_activation = ''
if mode == 'classification':
layer_activation = 'softmax'
elif mode == 'regression':
layer_activation = 'linear'
elif mode == 'sigmoid':
layer_activation = 'sigmoid'
else:
raise ValueError('mode must be either `classification` or `regression`.')
outputs = Dense(number_of_classification_labels,
activation=layer_activation)(weighted_representation)
model = Model(inputs=inputs, outputs=outputs)
return model
def create_compact_convolutional_transformer_model_3d(input_image_size,
number_of_classification_labels=1000,
mode="classification",
number_of_transformer_layers=2,
transformer_units=[128, 128],
projection_dimension=64,
number_of_attention_heads=4,
stochastic_depth_rate=0.1):
"""
Implementation of the Vision transformer architecture.
https://keras.io/examples/vision/cct/
Arguments
---------
input_image_size : tuple of length 5
Used for specifying the input tensor shape. The shape (or dimension) of
that tensor is the image dimensions followed by the number of channels
(e.g., red, green, and blue).
number_of_classification_labels : int
Number of classification labels.
patch_size : int
Size of a single patch dimension.
number_of_transformer_layers : int
Number of transformer layers.
transformer_units : tuple or list
Size of the hidden units in the layers of the MLP.
projection_dimension : int
Multi-head attention layer parameter
stochastic_depth_rate : float between 0 and 1
Dropout rate of the stochastic depth layer
Returns
-------
Keras model
A 3-D keras model.
Example
-------
>>> model = antspynet.create_compact_convolutional_transformer_model_3d((224, 224, 224, 1))
>>> model.summary()
"""
inputs = Input(shape=input_image_size)
ExtractPatches = ExtractConvolutionalPatches3D(kernel_size=3,
stride=1,
padding=1,
pooling_kernel_size=3,
pooling_stride=2,
number_of_filters=[64, 128],
do_positional_embedding=True)
encoded_patches = ExtractPatches(inputs)
# Apply positional embedding.
positional_embedding, sequence_length = ExtractPatches.positional_embedding(input_image_size)
positions = tf.range(start=0, limit=sequence_length, delta=1)
position_embeddings = positional_embedding(positions)
encoded_patches += position_embeddings
# Calculate Stochastic Depth probabilities.
dpr = [x for x in np.linspace(0, stochastic_depth_rate, number_of_transformer_layers)]
for i in range(number_of_transformer_layers):
x1 = LayerNormalization(epsilon=1e-5)(encoded_patches)
attention_output = MultiHeadAttention(num_heads=number_of_attention_heads,
key_dim=projection_dimension,
dropout=0.1)(x1, x1)
attention_output = StochasticDepth(dpr[i])(attention_output)
x2 = Add()([attention_output, encoded_patches])
x3 = LayerNormalization(epsilon=1e-5)(x2)
x3 = multilayer_perceptron(x3,
hidden_units=transformer_units,
dropout_rate=0.1)
x3 = StochasticDepth(dpr[i])(x3)
encoded_patches = Add()([x3, x2])
representation = LayerNormalization(epsilon=1e-5)(encoded_patches)
attention_weights = tf.nn.softmax(Dense(1)(representation), axis=1)
weighted_representation = tf.matmul(attention_weights, representation, transpose_a=True)
weighted_representation = tf.squeeze(weighted_representation, -2)
layer_activation = ''
if mode == 'classification':
layer_activation = 'softmax'
elif mode == 'regression':
layer_activation = 'linear'
elif mode == 'sigmoid':
layer_activation = 'sigmoid'
else:
raise ValueError('mode must be either `classification` or `regression`.')
outputs = Dense(number_of_classification_labels,
activation=layer_activation)(weighted_representation)
model = Model(inputs=inputs, outputs=outputs)
return model
|
47a88cc1376deb4d5305c38cdecdc8bb94948842
|
376e1818d427b5e4d32fa6dd6c7b71e9fd88afdb
|
/x11/qt5-qtwebengine/patches/patch-src_3rdparty_chromium_third__party_blink_renderer_bindings_scripts_bind__gen_codegen__expr.py
|
53f40a109daa8f8ea463d5f23280e6398f255f79
|
[] |
no_license
|
NetBSD/pkgsrc
|
a0732c023519650ef821ab89c23ab6ab59e25bdb
|
d042034ec4896cc5b47ed6f2e5b8802d9bc5c556
|
refs/heads/trunk
| 2023-09-01T07:40:12.138283
| 2023-09-01T05:25:19
| 2023-09-01T05:25:19
| 88,439,572
| 321
| 138
| null | 2023-07-12T22:34:14
| 2017-04-16T20:04:15
| null |
UTF-8
|
Python
| false
| false
| 2,164
|
py
|
patch-src_3rdparty_chromium_third__party_blink_renderer_bindings_scripts_bind__gen_codegen__expr.py
|
$NetBSD: patch-src_3rdparty_chromium_third__party_blink_renderer_bindings_scripts_bind__gen_codegen__expr.py,v 1.1 2023/06/08 20:38:06 markd Exp $
build with python3
--- src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_expr.py.orig 2022-03-30 09:48:18.000000000 +0000
+++ src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_expr.py
@@ -109,7 +109,7 @@ def expr_and(terms):
if any(term.is_always_false for term in terms):
return _Expr(False)
- terms = filter(lambda x: not x.is_always_true, terms)
+ terms = list(filter(lambda x: not x.is_always_true, terms))
if not terms:
return _Expr(True)
if len(terms) == 1:
@@ -124,7 +124,7 @@ def expr_or(terms):
if any(term.is_always_true for term in terms):
return _Expr(True)
- terms = filter(lambda x: not x.is_always_false, terms)
+ terms = list(filter(lambda x: not x.is_always_false, terms))
if not terms:
return _Expr(False)
if len(terms) == 1:
@@ -222,7 +222,7 @@ def expr_from_exposure(exposure,
elif exposure.only_in_secure_contexts is False:
secure_context_term = _Expr(True)
else:
- terms = map(ref_enabled, exposure.only_in_secure_contexts)
+ terms = list(map(ref_enabled, exposure.only_in_secure_contexts))
secure_context_term = expr_or(
[_Expr("${is_in_secure_context}"),
expr_not(expr_and(terms))])
@@ -275,10 +275,11 @@ def expr_from_exposure(exposure,
# [ContextEnabled]
if exposure.context_enabled_features:
- terms = map(
- lambda feature: _Expr(
- "${{context_feature_settings}}->is{}Enabled()".format(
- feature)), exposure.context_enabled_features)
+ terms = list(
+ map(
+ lambda feature: _Expr(
+ "${{context_feature_settings}}->is{}Enabled()".format(
+ feature)), exposure.context_enabled_features))
context_enabled_terms.append(
expr_and([_Expr("${context_feature_settings}"),
expr_or(terms)]))
|
fd9a6d8b72ada10e23c6a6865720428b0edd8670
|
8ab40da4bdee2c0941251379fcf32bc096c63094
|
/modules/auth.py
|
1d356db3fb335267cb6417c522430a5f4683943a
|
[
"Apache-2.0"
] |
permissive
|
flipkart-incubator/Astra
|
e4c795a94c0bcfb9d38e201b67a03e0877812996
|
57c1e41a4355cdf849c662c59f0e0a4211db1672
|
refs/heads/master
| 2023-08-01T07:54:20.104528
| 2023-02-16T09:33:40
| 2023-02-16T09:33:40
| 116,951,658
| 2,347
| 391
|
Apache-2.0
| 2023-07-11T17:47:23
| 2018-01-10T11:56:11
|
Python
|
UTF-8
|
Python
| false
| false
| 5,747
|
py
|
auth.py
|
import requests
import utils.logger as logger
import utils.logs as logs
from . import sendrequest as req
import json
import ast
import base64
import sys
try:
import requests
requests.packages.urllib3.disable_warnings()
except:
print("[-]Failed to import requests module")
sys.path.append('../')
from utils.db import Database_update
from utils.config import get_value,get_allvalues
from http.cookies import SimpleCookie
from core.login import APILogin
dbupdate = Database_update()
api_logger = logger.logger()
api_login = APILogin()
def get_authdata():
# Fetching login and logout data
login_data = get_allvalues('config.property','login')
logout_data = get_allvalues('config.property','logout')
return login_data,logout_data
def fetch_auth_config(name):
# Returns the list of common authentication headers names and auth error from config file.
auth_config_value = get_value('scan.property','modules',name)
return auth_config_value.split(',')
def session_fixation(url,method,headers,body,scanid):
# This function deals with checking session fixation issue.
attack_result = {}
login_result = get_value('config.property','login','loginresult')
logout_result = get_value('config.property','logout','logoutresult')
if login_result == 'Y' and logout_result == 'Y':
login_data, logout_data = get_authdata()
if url == login_data['loginurl']:
logs.logging.info("Checking for Sesion fixation: %s", url)
url,method,headers,body = logout_data['logouturl'],logout_data['logoutmethod'], logout_data['logoutheaders'] ,logout_data['logoutbody']
logout_headers,auth_old = add_authheader(headers)
try:
logout_body = ast.literal_eval(base64.b64decode(body))
except:
logout_body = None
logs.logging.info("Logout request %s %s %s",url, logout_headers,logout_body)
logout_req = req.api_request(url,method,logout_headers,logout_body)
if logout_req == None or str(logout_req.status_code)[0] == '4' or str(logout_req.status_code)[0] == '5':
print("%s[!]Failed to logout. Session fixation attack won't be tested. Check log file for more information.%s"% (api_logger.Y, api_logger.W))
return
# Try to relogin and check if the application is serving the previous session
login_url,login_method,login_headers,login_body = login_data['loginurl'],login_data['loginmethod'], login_data['loginheaders'] ,login_data['loginbody']
logs.logging.info("Login request %s %s %s",url, headers,body)
login_req = api_login.fetch_logintoken(login_url,login_method,ast.literal_eval(login_headers),ast.literal_eval(login_body))
if login_req is True:
logs.logging.info("Relogin Successful")
auth_new = get_value('config.property','login','auth')
if auth_old == auth_new:
attack_result.update({"id" : 5,
"scanid": scanid,
"url" : login_url,
"alert": "Session Fixation",
"impact" : "Medium",
"req_headers" : login_headers,
"req_body" : req_body,
"res_headers" : "NA",
"res_body" : "NA"
})
dbupdate.insert_record(attack_result)
def auth_check(url,method,headers,body,scanid=None):
# This function removes auth header and check if server is accepting request without it
temp_headers = {}
temp_headers.update(headers)
try:
attack_result = {}
auth_headers = fetch_auth_config("auth_headers")
auth_fail = fetch_auth_config("auth_fail")
session_headers = headers
for auth_header in auth_headers:
for key,value in list(temp_headers.items()):
if key.lower() == auth_header.lower():
del temp_headers[auth_header]
updated_headers = temp_headers
logs.logging.info("Auth header is %s", auth_header)
auth_request = req.api_request(url,method,updated_headers,body)
if auth_request.status_code == 401:
logs.logging.info("API requires authentication hence it's not vulnerable %s", url)
return
elif auth_request.status_code == 200 or auth_request.status_code == 400:
# Check for false positive
for fail_name in auth_fail:
if fail_name.lower() in auth_request.content.lower():
logs.logging.info("API requires authentication hence it's not vulnerable %s", url)
else:
attack_result.update({"id" : 3,
"scanid": scanid,
"url" : url,
"alert": "Broken Authentication and session management",
"impact" : "High",
"req_headers" : updated_headers,
"req_body" : body,
"res_headers" : auth_request.headers,
"res_body" : auth_request.text
})
dbupdate.insert_record(attack_result)
print("%s[+]{0} is vulnerable to broken Authentication and session management %s ".format(url)% (api_logger.R, api_logger.W))
return
session_fixation(url,method,temp_headers,body,scanid)
else:
result = False
if result is False:
# Marking it as vulnerable if there has no authentication header present in HTTP request
brokenauth_request = req.api_request(url,method,headers,body)
attack_result.update({"id" : 4,
"scanid": scanid,
"url" : url,
"alert": "Broken Authentication and session management",
"impact" : "High",
"req_headers" : headers,
"req_body" : body,
"res_headers" : brokenauth_request.headers,
"res_body" : brokenauth_request.text
})
dbupdate.insert_record(attack_result)
print("%s[+]{0} is vulnerable to broken Authentication and session management %s ".format(url)% (api_logger.R, api_logger.W))
# Test for session fixation
session_fixation(url,method,updated_headers,body,scanid)
return
except:
pass
|
cd991b06da3a261a9db46c799b6a95e4663acdab
|
e76a79816ff5203be2c4061e263a09d31072c940
|
/test/com/facebook/buck/features/lua/testdata/lua_binary/packager.py
|
dbb86d4792a233406e01b0e151b1b4740b7744e1
|
[
"Apache-2.0"
] |
permissive
|
facebook/buck
|
ef3a833334499b1b44c586e9bc5e2eec8d930e09
|
9c7c421e49f4d92d67321f18c6d1cd90974c77c4
|
refs/heads/main
| 2023-08-25T19:30:28.803205
| 2023-04-19T11:32:59
| 2023-04-19T11:32:59
| 9,504,214
| 8,481
| 1,338
|
Apache-2.0
| 2023-05-04T22:13:59
| 2013-04-17T18:12:18
|
Java
|
UTF-8
|
Python
| false
| false
| 337
|
py
|
packager.py
|
import json
import optparse
import shutil
import sys
def main(argv):
parser = optparse.OptionParser()
parser.add_option("--entry-point")
parser.add_option("--interpreter")
options, args = parser.parse_args(argv[1:])
with open(args[0], "w") as f:
shutil.copyfileobj(sys.stdin, f)
sys.exit(main(sys.argv))
|
3ec52d7fb92b0d4800a863bce513e4fd21d8fe2f
|
0ca218c0f54dac33a2ade4accfdf8f5be3207588
|
/test/ext/mypy/plugin_files/mixin_not_mapped.py
|
9a4865eb6d351ac7efaba477f0cb28d4a0532554
|
[
"MIT"
] |
permissive
|
sqlalchemy/sqlalchemy
|
9d949c67c9b5396b1f33e7ff0f3230c81babf5be
|
b382bff6e3464f039db0fd1f2ce1b79038675e48
|
refs/heads/main
| 2023-08-31T17:40:59.565421
| 2023-08-30T15:01:41
| 2023-08-30T15:01:41
| 159,271,175
| 8,083
| 1,489
|
MIT
| 2023-09-12T18:53:55
| 2018-11-27T03:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 814
|
py
|
mixin_not_mapped.py
|
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import registry
reg: registry = registry()
Base = declarative_base()
class SomeAbstract(Base):
__abstract__ = True
class HasUpdatedAt:
updated_at = Column(Integer)
@reg.mapped
class Foo(SomeAbstract):
__tablename__ = "foo"
id: int = Column(Integer(), primary_key=True)
name: str = Column(String)
class Bar(HasUpdatedAt, Base):
__tablename__ = "bar"
id = Column(Integer(), primary_key=True)
num = Column(Integer)
Bar.__mapper__
# EXPECTED_MYPY: "Type[HasUpdatedAt]" has no attribute "__mapper__"
HasUpdatedAt.__mapper__
# EXPECTED_MYPY: "Type[SomeAbstract]" has no attribute "__mapper__"
SomeAbstract.__mapper__
|
d5535100bc3f285cefbce3e09fa11ef6815ee18b
|
8bc2bfc34352aac01bef774209e651e81c61c681
|
/pox/lib/packet/packet_utils.py
|
aba8cdc268d65ed303519cee9792055556e95c26
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
noxrepo/pox
|
0925260bc723cda49dd0dab7e827f61548e1370c
|
5f82461e01f8822bd7336603b361bff4ffbd2380
|
refs/heads/gar-experimental
| 2023-04-30T00:04:33.991794
| 2020-05-20T12:00:13
| 2020-05-20T12:00:13
| 3,382,021
| 467
| 370
|
Apache-2.0
| 2023-06-01T02:19:33
| 2012-02-07T22:18:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,744
|
py
|
packet_utils.py
|
# Copyright 2011,2012 James McCauley
# Copyright 2008 (C) Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is derived from the packet library in NOX, which was
# developed by Nicira, Inc.
"""
Various functionality and data for the packet library
"""
import array
import struct
from socket import ntohs
_ethtype_to_str = {}
_ipproto_to_str = {}
# Map ethernet type to string
_ethtype_to_str[0x0800] = 'IP'
_ethtype_to_str[0x0806] = 'ARP'
_ethtype_to_str[0x8035] = 'RARP'
_ethtype_to_str[0x8100] = 'VLAN'
_ethtype_to_str[0x88cc] = 'LLDP'
_ethtype_to_str[0x888e] = 'PAE'
_ethtype_to_str[0x8847] = 'MPLS'
_ethtype_to_str[0x8848] = 'MPLS_MC' # Multicast
_ethtype_to_str[0x86dd] = 'IPV6'
_ethtype_to_str[0x880b] = 'PPP'
_ethtype_to_str[0x88bb] = 'LWAPP'
_ethtype_to_str[0x880c] = 'GSMP'
_ethtype_to_str[0x8137] = 'IPX'
_ethtype_to_str[0x0842] = 'WOL' # Wake On LAN
_ethtype_to_str[0x22f3] = 'TRILL'
_ethtype_to_str[0x8870] = 'JUMBO'
_ethtype_to_str[0x889a] = 'SCSI' # SCSI Over Ethernet
_ethtype_to_str[0x88a2] = 'ATA' # ATA Over Ethernet
_ethtype_to_str[0x9100] = 'QINQ'
_ethtype_to_str[0xffff] = 'BAD'
# IP protocol to string
#TODO: This should probably be integrated with the decorator used in
# the ipv6 module.
_ipproto_to_str[0] = 'HOP_OPTS'
_ipproto_to_str[1] = 'ICMP'
_ipproto_to_str[2] = 'IGMP'
_ipproto_to_str[4] = 'IPIP'
_ipproto_to_str[6] = 'TCP'
_ipproto_to_str[9] = 'IGRP'
_ipproto_to_str[17] = 'UDP'
_ipproto_to_str[43] = 'IPV6_ROUTING'
_ipproto_to_str[44] = 'IPV6_FRAG'
_ipproto_to_str[47] = 'GRE'
_ipproto_to_str[58] = 'ICMP6'
_ipproto_to_str[59] = 'IPV6_NO_NEXT'
_ipproto_to_str[60] = 'DEST_OPTS'
_ipproto_to_str[89] = 'OSPF'
class MalformedException (RuntimeError):
pass
class TruncatedException (RuntimeError):
pass
def checksum (data, start = 0, skip_word = None):
"""
Calculate standard internet checksum over data starting at start'th byte
skip_word: If specified, it's the word offset of a word in data to "skip"
(as if it were zero). The purpose is when data is received
data which contains a computed checksum that you are trying to
verify -- you want to skip that word since it was zero when
the checksum was initially calculated.
"""
if len(data) % 2 != 0:
arr = array.array('H', data[:-1])
else:
arr = array.array('H', data)
if skip_word is not None:
for i in range(0, len(arr)):
if i == skip_word:
continue
start += arr[i]
else:
for i in range(0, len(arr)):
start += arr[i]
if len(data) % 2 != 0:
start += struct.unpack('H', data[-1]+'\0')[0] # Specify order?
start = (start >> 16) + (start & 0xffff)
start += (start >> 16)
#while start >> 16:
# start = (start >> 16) + (start & 0xffff)
return ntohs(~start & 0xffff)
def ethtype_to_str (t):
"""
Given numeric ethernet type or length, return human-readable representation
"""
if t <= 0x05dc:
return "802.3/%04x" % (t,)
return _ethtype_to_str.get(t, "%04x" % (t,))
def ipproto_to_str (t):
"""
Given a numeric IP protocol number (or IPv6 next_header), give human name
"""
if t in _ipproto_to_str:
return _ipproto_to_str[t]
else:
return "%02x" % (t,)
|
a2fcf2aa89699d426de719ea64033b3e511bee26
|
e441a2f416c83f04889ecd43d6b6bdcf5172b287
|
/tests/ethpm/tools/test_builder.py
|
d05f060e2c51ebcd5750328bf92e281a35888c8a
|
[
"MIT"
] |
permissive
|
ethereum/web3.py
|
f8d66eefaa84d30fa51a0978d1d1c44c6807b355
|
76da2146267fa03760f35c33ca8b9a96d9e24835
|
refs/heads/main
| 2023-08-31T18:34:30.144026
| 2023-08-29T15:43:25
| 2023-08-29T15:43:25
| 56,251,096
| 4,403
| 1,680
|
MIT
| 2023-09-14T20:46:08
| 2016-04-14T15:59:35
|
Python
|
UTF-8
|
Python
| false
| false
| 29,738
|
py
|
test_builder.py
|
import json
from pathlib import (
Path,
)
import pytest
from eth_utils import (
to_canonical_address,
)
from eth_utils.toolz import (
assoc,
assoc_in,
)
from ethpm import (
ASSETS_DIR,
Package,
)
from ethpm.backends.ipfs import (
get_ipfs_backend,
)
from ethpm.exceptions import (
EthPMValidationError,
ManifestBuildingError,
)
from ethpm.tools import (
get_ethpm_local_manifest,
get_ethpm_spec_manifest,
)
from ethpm.tools.builder import (
as_package,
authors,
build,
build_dependency,
contract_type,
deployment,
deployment_type,
description,
init_manifest,
inline_source,
keywords,
license,
links,
manifest_version,
normalize_contract_type,
package_name,
pin_source,
source_inliner,
source_pinner,
validate,
version,
write_to_disk,
)
from web3.tools.pytest_ethereum.linker import (
deploy,
link,
linker,
)
BASE_MANIFEST = {"name": "package", "manifest": "ethpm/3", "version": "1.0.0"}
@pytest.fixture
def owned_package(ethpm_spec_dir):
manifest = get_ethpm_spec_manifest("owned", "v3.json")
# source_id missing `./` prefix in ethpm-spec
# ("Owned.sol"/"./Owned.sol" though both are valid)
source_obj = manifest["sources"].pop("Owned.sol")
updated_manifest = assoc_in(manifest, ["sources", "./Owned.sol"], source_obj)
compiler = get_ethpm_local_manifest("owned", "output_v3.json")["contracts"]
contracts_dir = ethpm_spec_dir / "examples" / "owned" / "contracts"
return contracts_dir, updated_manifest, compiler
# todo validate no duplicate contracts in package
@pytest.fixture
def standard_token_package(ethpm_spec_dir):
standard_token_dir = ethpm_spec_dir / "examples" / "standard-token"
manifest = get_ethpm_spec_manifest("standard-token", "v3.json")
compiler = get_ethpm_local_manifest("standard-token", "output_v3.json")["contracts"]
contracts_dir = standard_token_dir / "contracts"
return contracts_dir, manifest, compiler
@pytest.fixture
def registry_package():
root = ASSETS_DIR / "registry"
compiler = json.loads(Path(root / "solc_output.json").read_text())["contracts"]
contracts_dir = root / "contracts"
manifest = json.loads((root / "v3.json").read_text())
return contracts_dir, manifest, compiler
@pytest.fixture
def manifest_dir(tmpdir):
return Path(tmpdir.mkdir("sub"))
def test_builder_simple_with_package(w3):
package = build(
{},
package_name("package"),
manifest_version("ethpm/3"),
version("1.0.0"),
validate(),
as_package(w3),
)
assert isinstance(package, Package)
assert package.version == "1.0.0"
PRETTY_MANIFEST = """{
"manifest": "ethpm/3",
"name": "package",
"version": "1.0.0"
}"""
MINIFIED_MANIFEST = '{"manifest":"ethpm/3","name":"package","version":"1.0.0"}'
OWNED_CONTRACT = "// SPDX-License-Identifier: MIT\npragma solidity ^0.6.8;\n\ncontract Owned {\n address owner;\n \n modifier onlyOwner { require(msg.sender == owner); _; }\n\n constructor() public {\n owner = msg.sender;\n }\n}" # noqa: E501
def test_builder_writes_manifest_to_disk(manifest_dir):
build(
{},
package_name("package"),
manifest_version("ethpm/3"),
version("1.0.0"),
validate(),
write_to_disk(
manifest_root_dir=manifest_dir, manifest_name="1.0.0.json", prettify=True
),
)
actual_manifest = (manifest_dir / "1.0.0.json").read_text()
assert actual_manifest == PRETTY_MANIFEST
def test_builder_to_disk_uses_default_cwd(manifest_dir, monkeypatch):
monkeypatch.chdir(manifest_dir)
build(
{},
package_name("package"),
manifest_version("ethpm/3"),
version("1.0.0"),
write_to_disk(),
validate(),
)
actual_manifest = (manifest_dir / "1.0.0.json").read_text()
assert actual_manifest == MINIFIED_MANIFEST
def test_to_disk_writes_minified_manifest_as_default(manifest_dir):
build(
{},
package_name("package"),
manifest_version("ethpm/3"),
version("1.0.0"),
write_to_disk(manifest_root_dir=manifest_dir, manifest_name="1.0.0.json"),
validate(),
)
actual_manifest = (manifest_dir / "1.0.0.json").read_text()
assert actual_manifest == MINIFIED_MANIFEST
def test_to_disk_uses_default_manifest_name(manifest_dir):
build(
{},
package_name("package"),
manifest_version("ethpm/3"),
version("1.0.0"),
write_to_disk(manifest_root_dir=manifest_dir),
validate(),
)
actual_manifest = (manifest_dir / "1.0.0.json").read_text()
assert actual_manifest == MINIFIED_MANIFEST
@pytest.mark.parametrize(
"write_to_disk_fn",
(
write_to_disk(manifest_root_dir=Path("not/a/directory")),
write_to_disk(manifest_name="invalid_name"),
),
)
def test_to_disk_with_invalid_args_raises_exception(manifest_dir, write_to_disk_fn):
with pytest.raises(ManifestBuildingError):
build(
{},
package_name("package"),
manifest_version("ethpm/3"),
version("1.0.0"),
write_to_disk_fn,
)
def test_builder_with_manifest_validation():
with pytest.raises(EthPMValidationError, match="_invalid_package_name"):
build(
{},
package_name("_invalid_package_name"),
manifest_version("ethpm/3"),
version("1.0.0"),
validate(),
)
@pytest.mark.parametrize(
"fn,value",
(
(authors("some", "guy"), {"authors": ["some", "guy"]}),
(license("MIT"), {"license": "MIT"}),
(description("This is a package."), {"description": "This is a package."}),
(keywords("awesome", "package"), {"keywords": ["awesome", "package"]}),
(
links(documentation="ipfs..", website="www"),
{"links": {"documentation": "ipfs..", "website": "www"}},
),
),
)
def test_builder_with_simple_meta_fields(fn, value):
manifest = build(BASE_MANIFEST, fn, validate())
expected = assoc(BASE_MANIFEST, "meta", value)
assert manifest == expected
def test_builder_simple_with_multi_meta_field():
manifest = build(
BASE_MANIFEST,
authors("some", "guy"),
license("MIT"),
description("description"),
keywords("awesome", "package"),
links(website="www", repository="github"),
validate(),
)
expected = assoc(
BASE_MANIFEST,
"meta",
{
"license": "MIT",
"authors": ["some", "guy"],
"description": "description",
"keywords": ["awesome", "package"],
"links": {"website": "www", "repository": "github"},
},
)
assert manifest == expected
def test_builder_with_inline_source(owned_package, monkeypatch):
root, _, compiler_output = owned_package
monkeypatch.chdir(root)
manifest = build(BASE_MANIFEST, inline_source("Owned", compiler_output), validate())
expected = assoc(
BASE_MANIFEST,
"sources",
{
"./Owned.sol": {
"content": OWNED_CONTRACT,
"installPath": "./Owned.sol",
"type": "solidity",
}
},
)
assert manifest == expected
def test_builder_with_source_inliner(owned_package, monkeypatch):
root, _, compiler_output = owned_package
monkeypatch.chdir(root)
inliner = source_inliner(compiler_output)
manifest = build(BASE_MANIFEST, inliner("Owned"), validate())
expected = assoc(
BASE_MANIFEST,
"sources",
{
"./Owned.sol": {
"content": OWNED_CONTRACT,
"installPath": "./Owned.sol",
"type": "solidity",
}
},
)
assert manifest == expected
def test_builder_with_inline_source_with_package_root_dir_arg(owned_package):
root, _, compiler_output = owned_package
manifest = build(
BASE_MANIFEST,
inline_source("Owned", compiler_output, package_root_dir=root),
validate(),
)
expected = assoc(
BASE_MANIFEST,
"sources",
{
"./Owned.sol": {
"content": OWNED_CONTRACT,
"installPath": "./Owned.sol",
"type": "solidity",
}
},
)
print(manifest)
print("-")
print(expected)
assert manifest == expected
def test_builder_with_pin_source(owned_package, dummy_ipfs_backend):
root, expected, compiler_output = owned_package
ipfs_backend = get_ipfs_backend()
manifest = build(
{},
package_name("owned"),
manifest_version("ethpm/3"),
version("1.0.0"),
authors("Piper Merriam <pipermerriam@gmail.com>"),
description(
"Reusable contracts which implement a privileged 'owner' model for authorization." # noqa: E501
),
keywords("authorization"),
license("MIT"),
links(documentation="ipfs://QmUYcVzTfSwJoigggMxeo2g5STWAgJdisQsqcXHws7b1FW"),
pin_source("Owned", compiler_output, ipfs_backend, root),
validate(),
)
assert manifest == expected
def test_builder_with_pinner(owned_package, dummy_ipfs_backend):
root, expected, compiler_output = owned_package
ipfs_backend = get_ipfs_backend()
pinner = source_pinner(compiler_output, ipfs_backend, root)
manifest = build(
{},
package_name("owned"),
manifest_version("ethpm/3"),
version("1.0.0"),
authors("Piper Merriam <pipermerriam@gmail.com>"),
description(
"Reusable contracts which implement a privileged 'owner' model for authorization." # noqa: E501
),
keywords("authorization"),
license("MIT"),
links(documentation="ipfs://QmUYcVzTfSwJoigggMxeo2g5STWAgJdisQsqcXHws7b1FW"),
pinner("Owned"),
validate(),
)
assert manifest == expected
def test_builder_with_init_manifest(owned_package, dummy_ipfs_backend):
root, expected, compiler_output = owned_package
ipfs_backend = get_ipfs_backend()
manifest = build(
init_manifest(package_name="owned", version="1.0.0"),
authors("Piper Merriam <pipermerriam@gmail.com>"),
description(
"Reusable contracts which implement a privileged 'owner' model for authorization." # noqa: E501
),
keywords("authorization"),
license("MIT"),
links(documentation="ipfs://QmUYcVzTfSwJoigggMxeo2g5STWAgJdisQsqcXHws7b1FW"),
pin_source("Owned", compiler_output, ipfs_backend, root),
validate(),
)
assert manifest == expected
def test_builder_with_default_contract_types(owned_package):
_, _, compiler_output = owned_package
manifest = build(BASE_MANIFEST, contract_type("Owned", compiler_output), validate())
contract_type_data = normalize_contract_type(
compiler_output["Owned.sol"]["Owned"], "Owned.sol"
)
compilers_data = contract_type_data.pop("compiler")
compilers_data["contractTypes"] = ["Owned"]
expected_with_contract_type = assoc(
BASE_MANIFEST, "contractTypes", {"Owned": contract_type_data}
)
expected = assoc(expected_with_contract_type, "compilers", [compilers_data])
assert manifest == expected
def test_builder_with_single_alias_kwarg(owned_package):
_, _, compiler_output = owned_package
manifest = build(
BASE_MANIFEST,
contract_type("Owned", compiler_output, alias="OwnedAlias"),
validate(),
)
contract_type_data = normalize_contract_type(
compiler_output["Owned.sol"]["Owned"], "Owned.sol"
)
compilers_data = contract_type_data.pop("compiler")
compilers_data["contractTypes"] = ["OwnedAlias"]
expected_with_contract_type = assoc(
BASE_MANIFEST,
"contractTypes",
{"OwnedAlias": assoc(contract_type_data, "contractType", "Owned")},
)
expected = assoc(expected_with_contract_type, "compilers", [compilers_data])
assert manifest == expected
def test_builder_without_alias_and_with_select_contract_types(owned_package):
_, _, compiler_output = owned_package
manifest = build(
BASE_MANIFEST,
contract_type("Owned", compiler_output, abi=True, source_id=True),
validate(),
)
contract_type_data = normalize_contract_type(
compiler_output["Owned.sol"]["Owned"], "Owned.sol"
)
omitted_fields = ("deploymentBytecode", "userdoc", "devdoc", "compiler")
selected_data = {
k: v for k, v in contract_type_data.items() if k not in omitted_fields
}
expected = assoc(BASE_MANIFEST, "contractTypes", {"Owned": selected_data})
assert manifest == expected
def test_builder_with_alias_and_select_contract_types(owned_package):
_, _, compiler_output = owned_package
manifest = build(
BASE_MANIFEST,
contract_type(
"Owned",
compiler_output,
alias="OwnedAlias",
abi=True,
compiler=False,
devdoc=True,
userdoc=True,
deployment_bytecode=True,
runtime_bytecode=False,
source_id=True,
),
validate(),
)
contract_type_data = normalize_contract_type(
compiler_output["Owned.sol"]["Owned"], "Owned.sol"
)
contract_type_data.pop("compiler")
expected = assoc(
BASE_MANIFEST,
"contractTypes",
{"OwnedAlias": assoc(contract_type_data, "contractType", "Owned")},
)
assert manifest == expected
def test_builder_manages_duplicate_compilers(owned_package):
_, _, compiler_output = owned_package
manifest = build(
BASE_MANIFEST,
contract_type(
"Owned",
compiler_output,
abi=True,
compiler=True,
source_id=True,
),
contract_type(
"Owned",
compiler_output,
alias="OwnedAlias",
abi=True,
compiler=True,
source_id=True,
),
validate(),
)
contract_type_data = normalize_contract_type(
compiler_output["Owned.sol"]["Owned"], "Owned.sol"
)
compiler_data = contract_type_data.pop("compiler")
contract_type_data.pop("deploymentBytecode")
contract_type_data.pop("devdoc")
contract_type_data.pop("userdoc")
compiler_data_with_contract_types = assoc(
compiler_data, "contractTypes", ["Owned", "OwnedAlias"]
)
expected_with_contract_types = assoc(
BASE_MANIFEST,
"contractTypes",
{
"Owned": assoc(contract_type_data, "contractType", "Owned"),
"OwnedAlias": assoc(contract_type_data, "contractType", "Owned"),
},
)
expected_with_contract_types["contractTypes"]["Owned"].pop("contractType")
expected = assoc(
expected_with_contract_types, "compilers", [compiler_data_with_contract_types]
)
assert manifest == expected
def test_builder_raises_exception_if_selected_contract_type_missing_from_solc(
owned_package,
):
_, _, compiler_output = owned_package
with pytest.raises(ManifestBuildingError, match="runtimeBytecode not available"):
build(
BASE_MANIFEST,
contract_type("Owned", compiler_output, abi=True, runtime_bytecode=True),
)
def test_builder_with_standard_token_manifest(
standard_token_package, dummy_ipfs_backend, monkeypatch
):
root, expected_manifest, compiler_output = standard_token_package
ipfs_backend = get_ipfs_backend()
monkeypatch.chdir(root)
manifest = build(
{},
package_name("standard-token"),
manifest_version("ethpm/3"),
version("1.0.0"),
pin_source("StandardToken", compiler_output, ipfs_backend),
pin_source("Token", compiler_output, ipfs_backend),
contract_type(
"StandardToken", compiler_output, abi=True, devdoc=True, source_id=True
),
contract_type(
"Token",
compiler_output,
abi=True,
devdoc=True,
userdoc=True,
source_id=True,
),
validate(),
)
assert manifest == expected_manifest
def test_builder_with_link_references(
registry_package, dummy_ipfs_backend, monkeypatch
):
root, expected_manifest, compiler_output = registry_package
monkeypatch.chdir(root)
inliner = source_inliner(compiler_output)
manifest = build(
{},
package_name("solidity-registry"),
manifest_version("ethpm/3"),
version("2.0.0"),
inliner("Authorized"),
inliner("IndexedOrderedSetLib"),
inliner("PackageDB"),
inliner("PackageRegistry"),
inliner("PackageRegistryInterface"),
inliner("ReleaseDB"),
inliner("ReleaseValidator"),
contract_type(
"AuthorityInterface",
compiler_output,
abi=True,
deployment_bytecode=True,
runtime_bytecode=True,
devdoc=True,
source_id=True,
),
contract_type(
"Authorized",
compiler_output,
abi=True,
compiler=True,
deployment_bytecode=True,
runtime_bytecode=True,
devdoc=True,
source_id=True,
),
contract_type(
"AuthorizedInterface",
compiler_output,
abi=True,
deployment_bytecode=True,
runtime_bytecode=True,
devdoc=True,
source_id=True,
),
contract_type(
"WhitelistAuthority",
compiler_output,
abi=True,
compiler=True,
deployment_bytecode=True,
runtime_bytecode=True,
devdoc=True,
source_id=True,
),
contract_type(
"WhitelistAuthorityInterface",
compiler_output,
abi=True,
deployment_bytecode=True,
runtime_bytecode=True,
devdoc=True,
source_id=True,
),
contract_type(
"IndexedOrderedSetLib",
compiler_output,
abi=True,
compiler=True,
deployment_bytecode=True,
runtime_bytecode=True,
devdoc=True,
source_id=True,
),
contract_type(
"PackageDB",
compiler_output,
abi=True,
compiler=True,
deployment_bytecode=True,
runtime_bytecode=True,
devdoc=True,
source_id=True,
),
contract_type(
"PackageRegistry",
compiler_output,
abi=True,
compiler=True,
deployment_bytecode=True,
runtime_bytecode=True,
devdoc=True,
source_id=True,
),
contract_type(
"PackageRegistryInterface",
compiler_output,
abi=True,
deployment_bytecode=True,
runtime_bytecode=True,
devdoc=True,
source_id=True,
),
contract_type(
"ReleaseDB",
compiler_output,
abi=True,
compiler=True,
deployment_bytecode=True,
runtime_bytecode=True,
devdoc=True,
source_id=True,
),
contract_type(
"ReleaseValidator",
compiler_output,
abi=True,
compiler=True,
deployment_bytecode=True,
runtime_bytecode=True,
devdoc=True,
source_id=True,
),
validate(),
)
assert manifest == expected_manifest
def test_builder_deployment_simple(w3):
expected = json.dumps(
{
"name": "package",
"version": "1.0.0",
"manifest": "ethpm/3",
"deployments": {
"blockchain://1234567890123456789012345678901234567890123456789012345678901234/block/1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef": { # noqa: E501
"Owned": {
"contractType": "Owned",
"address": "0xd3CdA913deB6f67967B99D67aCDFa1712C293601",
}
}
},
}
)
manifest = build(
BASE_MANIFEST,
deployment(
block_uri="blockchain://1234567890123456789012345678901234567890123456789012345678901234/block/1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", # noqa: E501
contract_instance="Owned",
contract_type="Owned",
address=to_canonical_address("0xd3cda913deb6f67967b99d67acdfa1712c293601"),
),
validate(),
)
assert manifest == json.loads(expected)
@pytest.fixture
def escrow_package(w3, deployer, ethpm_spec_dir):
manifest = ethpm_spec_dir / "examples" / "escrow" / "v3.json"
escrow_deployer = deployer(manifest)
escrow_strategy = linker(
deploy("SafeSendLib"),
link("Escrow", "SafeSendLib"),
deploy("Escrow", w3.eth.accounts[0]),
)
escrow_deployer.register_strategy("Escrow", escrow_strategy)
escrow_package = escrow_deployer.deploy("Escrow")
return escrow_package, w3
def test_builder_deployment_type_complex(escrow_package):
escrow, w3 = escrow_package
escrow_dep_type = deployment_type(
contract_instance="Escrow",
contract_type="Escrow",
deployment_bytecode={
"bytecode": "0x608060405234801561001057600080fd5b5060405160208061045383398101604081815291516002819055336000818152602081815285822084905583855294519294919390927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a3506103d2806100816000396000f3006080604052600436106100775763ffffffff7c0100000000000000000000000000000000000000000000000000000000600035041663095ea7b3811461007c57806318160ddd146100b457806323b872dd146100db57806370a0823114610105578063a9059cbb14610126578063dd62ed3e1461014a575b600080fd5b34801561008857600080fd5b506100a0600160a060020a0360043516602435610171565b604080519115158252519081900360200190f35b3480156100c057600080fd5b506100c96101d8565b60408051918252519081900360200190f35b3480156100e757600080fd5b506100a0600160a060020a03600435811690602435166044356101de565b34801561011157600080fd5b506100c9600160a060020a03600435166102c9565b34801561013257600080fd5b506100a0600160a060020a03600435166024356102e4565b34801561015657600080fd5b506100c9600160a060020a036004358116906024351661037b565b336000818152600160209081526040808320600160a060020a038716808552908352818420869055815186815291519394909390927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925928290030190a35060015b92915050565b60025481565b600160a060020a03831660009081526020819052604081205482118015906102295750600160a060020a03841660009081526001602090815260408083203384529091529020548211155b80156102355750600082115b156102be57600160a060020a0380841660008181526020818152604080832080548801905593881680835284832080548890039055600182528483203384528252918490208054879003905583518681529351929391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9281900390910190a35060016102c2565b5060005b9392505050565b600160a060020a031660009081526020819052604090205490565b3360009081526020819052604081205482118015906103035750600082115b15610373573360008181526020818152604080832080548790039055600160a060020a03871680845292819020805487019055805186815290519293927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a35060016101d2565b5060006101d2565b600160a060020a039182166000908152600160209081526040808320939094168252919091522054905600a165627a7a72305820cf9d6a3f751ca1e6b9bc2324e42633a4cde513d64c3e6cc32d6359629249e90200290000000000000000000000000000000000000000000000000000000000000001" # noqa: E501
},
runtime_bytecode={
"bytecode": "0x6080604052600436106100775763ffffffff7c0100000000000000000000000000000000000000000000000000000000600035041663095ea7b3811461007c57806318160ddd146100b457806323b872dd146100db57806370a0823114610105578063a9059cbb14610126578063dd62ed3e1461014a575b600080fd5b34801561008857600080fd5b506100a0600160a060020a0360043516602435610171565b604080519115158252519081900360200190f35b3480156100c057600080fd5b506100c96101d8565b60408051918252519081900360200190f35b3480156100e757600080fd5b506100a0600160a060020a03600435811690602435166044356101de565b34801561011157600080fd5b506100c9600160a060020a03600435166102c9565b34801561013257600080fd5b506100a0600160a060020a03600435166024356102e4565b34801561015657600080fd5b506100c9600160a060020a036004358116906024351661037b565b336000818152600160209081526040808320600160a060020a038716808552908352818420869055815186815291519394909390927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925928290030190a35060015b92915050565b60025481565b600160a060020a03831660009081526020819052604081205482118015906102295750600160a060020a03841660009081526001602090815260408083203384529091529020548211155b80156102355750600082115b156102be57600160a060020a0380841660008181526020818152604080832080548801905593881680835284832080548890039055600182528483203384528252918490208054879003905583518681529351929391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9281900390910190a35060016102c2565b5060005b9392505050565b600160a060020a031660009081526020819052604090205490565b3360009081526020819052604081205482118015906103035750600082115b15610373573360008181526020818152604080832080548790039055600160a060020a03871680845292819020805487019055805186815290519293927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a35060016101d2565b5060006101d2565b600160a060020a039182166000908152600160209081526040808320939094168252919091522054905600a165627a7a72305820cf9d6a3f751ca1e6b9bc2324e42633a4cde513d64c3e6cc32d6359629249e9020029" # noqa: E501
},
compiler={
"name": "solc",
"version": "0.4.24+commit.e67f0147.Emscripten.clang",
"settings": {"optimize": True},
},
)
safesendlib_dep_type = deployment_type(
contract_instance="SafeSendLib", contract_type="SafeSendLib"
)
manifest = build(
{},
package_name("escrow"),
version("1.0.0"),
manifest_version("ethpm/3"),
escrow_dep_type(
block_uri="blockchain://1111111111111111111111111111111111111111111111111111111111111111/block/1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", # noqa: E501
address=escrow.deployments.get_instance("Escrow").address,
),
# dep_type with block uri
safesendlib_dep_type(
block_uri="blockchain://1111111111111111111111111111111111111111111111111111111111111111/block/1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", # noqa: E501
address=escrow.deployments.get_instance("SafeSendLib").address,
),
# simple deployment
deployment(
block_uri="blockchain://1234567890123456789012345678901234567890123456789012345678901234/block/1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", # noqa: E501
contract_instance="Escrow",
contract_type="Escrow",
address=escrow.deployments.get_instance("Escrow").address,
),
# simple deployment
deployment(
block_uri="blockchain://1234567890123456789012345678901234567890123456789012345678901234/block/1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", # noqa: E501
contract_instance="SafeSendLib",
contract_type="SafeSendLib",
address=escrow.deployments.get_instance("SafeSendLib").address,
),
validate(),
)
assert len(manifest["deployments"].keys()) == 2
assert len(list(manifest["deployments"].values())[0]) == 2
assert len(list(manifest["deployments"].values())[1]) == 2
def test_builder_with_single_build_dependency():
expected_build_dep = {
"package": "ipfs://QmUYcVzTfSwJoigggMxeo2g5STWAgJdisQsqcXHws7b1FW"
}
expected = assoc_in(BASE_MANIFEST, ["buildDependencies"], expected_build_dep)
actual = build(
BASE_MANIFEST,
build_dependency(
"package", "ipfs://QmUYcVzTfSwJoigggMxeo2g5STWAgJdisQsqcXHws7b1FW"
),
validate(),
)
assert actual == expected
def test_builder_with_multiple_build_dependencies():
expected_build_deps = {
"escrow": "ipfs://QmPDwMHk8e1aMEZg3iKsUiPSkhHkywpGB3KHKM52RtGrkv",
"package": "ipfs://QmUYcVzTfSwJoigggMxeo2g5STWAgJdisQsqcXHws7b1FW",
}
expected = assoc_in(BASE_MANIFEST, ["buildDependencies"], expected_build_deps)
actual = build(
BASE_MANIFEST,
build_dependency(
"package", "ipfs://QmUYcVzTfSwJoigggMxeo2g5STWAgJdisQsqcXHws7b1FW"
),
build_dependency(
"escrow", "ipfs://QmPDwMHk8e1aMEZg3iKsUiPSkhHkywpGB3KHKM52RtGrkv"
),
validate(),
)
assert actual == expected
def test_builder_with_invalid_uri():
with pytest.raises(
EthPMValidationError, match="is not a supported content-addressed URI"
):
build(
{},
package_name("package"),
version("1.0.0"),
manifest_version("ethpm/3"),
build_dependency("package", "www.google.com"),
)
|
fadd346c6906427ba5103641f49084cd6582024e
|
66af5573eef648ba76fcf0156de41b411ca38c6c
|
/sikuli-ide/sample-scripts/skype.sikuli/skype.py
|
73eb9698be32b5c2d9ed4e5b7ba0ca1b9fe5458f
|
[
"MIT"
] |
permissive
|
sikuli/sikuli
|
5221f35a5fb9114bcaaab12d75fcf67ae341966b
|
4adaab7880d2f3e14702ca7287ae9c9e4f4de9ab
|
refs/heads/develop
| 2023-08-28T09:39:58.135194
| 2019-10-27T08:34:31
| 2019-10-27T08:34:31
| 2,393,437
| 1,486
| 302
|
MIT
| 2018-10-04T11:47:59
| 2011-09-15T15:47:51
|
HTML
|
UTF-8
|
Python
| false
| false
| 275
|
py
|
skype.py
|
v = VDict({
"1254515416894.png" : "1254512196727.png",
"1254512579147.png" : "1254512182329.png"})
while True:
call = find(Pattern("1254810120075.png").similar(0.27).firstN(1))
if call:
face = capture(call[0].x, call[0].y, 65, 65)
doubleClick(v[face][0])
|
65989fb372ce2bf455c2f22d52bc5885e48d7b89
|
b74320ad439e37dfa48cd8db38dab3b7a20a36ff
|
/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py
|
82e609ce7cd1d373ff9b286830fa74d1eb309112
|
[
"Apache-2.0"
] |
permissive
|
huggingface/diffusers
|
c82beba1ec5f0aba01b6744040a5accc41ec2493
|
5eeedd9e3336882d598091e191559f67433b6427
|
refs/heads/main
| 2023-08-29T01:22:52.237910
| 2023-08-28T18:16:27
| 2023-08-28T18:16:27
| 498,011,141
| 17,308
| 3,158
|
Apache-2.0
| 2023-09-14T20:57:44
| 2022-05-30T16:04:02
|
Python
|
UTF-8
|
Python
| false
| false
| 16,967
|
py
|
pipeline_kandinsky2_2_img2img.py
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNet2DConditionModel, VQModel
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
"""
# Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.downscale_height_and_width
def downscale_height_and_width(height, width, scale_factor=8):
new_height = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
new_width = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
# Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_img2img.prepare_image
def prepare_image(pil_image, w=512, h=512):
pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1)
arr = np.array(pil_image.convert("RGB"))
arr = arr.astype(np.float32) / 127.5 - 1
arr = np.transpose(arr, [2, 0, 1])
image = torch.from_numpy(arr).unsqueeze(0)
return image
class KandinskyV22Img2ImgPipeline(DiffusionPipeline):
"""
Pipeline for image-to-image generation using Kandinsky
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
scheduler ([`DDIMScheduler`]):
A scheduler to be used in combination with `unet` to generate image latents.
unet ([`UNet2DConditionModel`]):
Conditional U-Net architecture to denoise the image embedding.
movq ([`VQModel`]):
MoVQ Decoder to generate the image from the latents.
"""
def __init__(
self,
unet: UNet2DConditionModel,
scheduler: DDPMScheduler,
movq: VQModel,
):
super().__init__()
self.register_modules(
unet=unet,
scheduler=scheduler,
movq=movq,
)
self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1)
# Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_img2img.KandinskyImg2ImgPipeline.get_timesteps
def get_timesteps(self, num_inference_steps, strength, device):
# get the original timestep using init_timestep
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
timesteps = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
)
image = image.to(device=device, dtype=dtype)
batch_size = batch_size * num_images_per_prompt
if image.shape[1] == 4:
init_latents = image
else:
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
elif isinstance(generator, list):
init_latents = [
self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
]
init_latents = torch.cat(init_latents, dim=0)
else:
init_latents = self.movq.encode(image).latent_dist.sample(generator)
init_latents = self.movq.config.scaling_factor * init_latents
init_latents = torch.cat([init_latents], dim=0)
shape = init_latents.shape
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
# get latents
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
latents = init_latents
return latents
# Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.KandinskyV22Pipeline.enable_model_cpu_offload
def enable_model_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
"""
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
device = torch.device(f"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to("cpu", silence_dtype_warnings=True)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
hook = None
for cpu_offloaded_model in [self.unet, self.movq]:
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
# We'll offload the last model manually.
self.final_offload_hook = hook
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]],
image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
negative_image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]],
height: int = 512,
width: int = 512,
num_inference_steps: int = 100,
guidance_scale: float = 4.0,
strength: float = 0.3,
num_images_per_prompt: int = 1,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
output_type: Optional[str] = "pil",
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
return_dict: bool = True,
):
"""
Function invoked when calling the pipeline for generation.
Args:
image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
The clip image embeddings for text prompt, that will be used to condition the image generation.
image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
`Image`, or tensor representing an image batch, that will be used as the starting point for the
process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded
again.
strength (`float`, *optional*, defaults to 0.8):
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
will be used as a starting point, adding more noise to it the larger the `strength`. The number of
denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
be maximum and the denoising process will run for the full number of iterations specified in
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
The clip image embeddings for negative text prompt, will be used to condition the image generation.
height (`int`, *optional*, defaults to 512):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to 512):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 100):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 4.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
(`np.array`) or `"pt"` (`torch.Tensor`).
callback (`Callable`, *optional*):
A function that calls every `callback_steps` steps during inference. The function is called with the
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function is called. If not specified, the callback is called at
every step.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
Examples:
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`
"""
device = self._execution_device
do_classifier_free_guidance = guidance_scale > 1.0
if isinstance(image_embeds, list):
image_embeds = torch.cat(image_embeds, dim=0)
batch_size = image_embeds.shape[0]
if isinstance(negative_image_embeds, list):
negative_image_embeds = torch.cat(negative_image_embeds, dim=0)
if do_classifier_free_guidance:
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(
dtype=self.unet.dtype, device=device
)
if not isinstance(image, list):
image = [image]
if not all(isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image):
raise ValueError(
f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor"
)
image = torch.cat([prepare_image(i, width, height) for i in image], dim=0)
image = image.to(dtype=image_embeds.dtype, device=device)
latents = self.movq.encode(image)["latents"]
latents = latents.repeat_interleave(num_images_per_prompt, dim=0)
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
height, width = downscale_height_and_width(height, width, self.movq_scale_factor)
latents = self.prepare_latents(
latents, latent_timestep, batch_size, num_images_per_prompt, image_embeds.dtype, device, generator
)
for i, t in enumerate(self.progress_bar(timesteps)):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
added_cond_kwargs = {"image_embeds": image_embeds}
noise_pred = self.unet(
sample=latent_model_input,
timestep=t,
encoder_hidden_states=None,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
if do_classifier_free_guidance:
noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1)
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
_, variance_pred_text = variance_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1)
if not (
hasattr(self.scheduler.config, "variance_type")
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
noise_pred, _ = noise_pred.split(latents.shape[1], dim=1)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(
noise_pred,
t,
latents,
generator=generator,
)[0]
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
# post-processing
image = self.movq.decode(latents, force_not_quantize=True)["sample"]
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
image = image * 0.5 + 0.5
image = image.clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image)
|
f5832499c2bb7215b1c6c223f417876830e4c064
|
7ed2ef754060465709897be60ff14a0f4e2c9578
|
/delfin/drivers/hpe/hpe_msa/ssh_handler.py
|
9d58b20d6f4656ec416dae8cdc342eafed6db8cf
|
[
"Apache-2.0"
] |
permissive
|
sodafoundation/delfin
|
967b7ff276c20ea546e07538c2b02a7920aaddf4
|
978eff481945203bfbc3d84123e151f836748428
|
refs/heads/master
| 2023-09-04T11:27:21.103714
| 2023-07-13T09:02:14
| 2023-07-13T09:02:14
| 254,367,182
| 220
| 336
|
Apache-2.0
| 2023-09-13T07:04:15
| 2020-04-09T12:37:29
|
Python
|
UTF-8
|
Python
| false
| false
| 36,674
|
py
|
ssh_handler.py
|
import hashlib
import time
import six
from oslo_log import log as logging
from operator import itemgetter
from itertools import groupby
from delfin import exception
from delfin.common import constants, alert_util
from delfin.drivers.utils.ssh_client import SSHPool
from delfin.drivers.utils.tools import Tools
from delfin.drivers.hpe.hpe_msa import consts
try:
import defusedxml.cElementTree as Et
except ImportError:
import defusedxml.ElementTree as Et
LOG = logging.getLogger(__name__)
class SSHHandler(object):
def __init__(self, **kwargs):
self.ssh_pool = SSHPool(**kwargs)
def login(self):
try:
self.ssh_pool.do_exec('show pools')
except Exception as e:
LOG.error("Failed to login msa %s" %
(six.text_type(e)))
raise e
def get_storage(self, storage_id):
try:
system_info = self.ssh_pool.do_exec('show system')
system_data = self.handle_xml_to_dict(system_info, 'system')
version_info = self.ssh_pool.do_exec('show version')
version_arr = self.handle_xml_to_json(version_info, 'versions')
version_id = ""
if version_arr:
version_id = version_arr[0].get('bundle-version')
if system_data:
pools_list = self.list_storage_pools(storage_id)
total_capacity = 0
if pools_list:
for pool in pools_list:
total_capacity += int(pool.get('total_capacity'))
disks_list = self.list_storage_disks(storage_id)
raw_capacity = 0
if disks_list:
for disk in disks_list:
raw_capacity += int(disk.get('capacity'))
volumes_list = self.list_storage_volume(storage_id)
volume_all_size = 0
if volumes_list:
for volume in volumes_list:
volume_all_size += int(volume.get('total_capacity'))
health = system_data.get('health')
status = constants.StorageStatus.OFFLINE
if health == 'OK':
status = constants.StorageStatus.NORMAL
elif health == 'Degraded':
status = constants.StorageStatus.DEGRADED
serial_num = system_data.get('midplane-serial-number')
storage_map = {
'name': system_data.get('system-name'),
'vendor': consts.StorageVendor.HPE_MSA_VENDOR,
'model': system_data.get('product-id'),
'status': status,
'serial_number': serial_num,
'firmware_version': version_id,
'location': system_data.get('system-location'),
'raw_capacity': int(raw_capacity),
'total_capacity': int(total_capacity),
'used_capacity': int(volume_all_size),
'free_capacity': int(total_capacity - volume_all_size)
}
return storage_map
except Exception as e:
err_msg = "Failed to get system info : %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
def list_storage_disks(self, storage_id):
try:
disk_info = self.ssh_pool.do_exec('show disks')
disk_detail = self.handle_xml_to_json(disk_info, 'drives')
disks_arr = []
for data in disk_detail:
health = data.get('health')
status = constants.StoragePoolStatus.OFFLINE
if health == 'OK':
status = constants.StoragePoolStatus.NORMAL
size = self.parse_string_to_bytes(data.get('size'))
physical_type = consts.DiskPhysicalType.\
DISK_PHYSICAL_TYPE.get(data.get('description'),
constants.DiskPhysicalType.
UNKNOWN)
rpm = data.get('rpm')
if rpm:
rpm = int(rpm) * consts.RpmSpeed.RPM_SPEED
data_map = {
'native_disk_id': data.get('location'),
'name': data.get('location'),
'physical_type': physical_type,
'status': status,
'storage_id': storage_id,
'native_disk_group_id': data.get('disk-group'),
'serial_number': data.get('serial-number'),
'manufacturer': data.get('vendor'),
'model': data.get('model'),
'speed': rpm,
'capacity': int(size),
'health_score': status
}
disks_arr.append(data_map)
return disks_arr
except Exception as e:
err_msg = "Failed to get storage disk: %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
def list_storage_ports(self, storage_id):
try:
ports_info = self.ssh_pool.do_exec('show ports')
ports_split = ports_info.split('\n')
ports_array = ports_split[1:len(ports_split) - 1]
ports_xml_data = ''.join(ports_array)
xml_element = Et.fromstring(ports_xml_data)
ports_json = []
for element_data in xml_element.iter('OBJECT'):
property_name = element_data.get('basetype')
if property_name != 'status':
msg = {}
for child in element_data.iter('PROPERTY'):
msg[child.get('name')] = child.text
ports_json.append(msg)
ports_elements_info = []
for i in range(0, len(ports_json) - 1, 2):
port_element = ports_json[i].copy()
port_element.update(ports_json[i + 1])
ports_elements_info.append(port_element)
list_ports = []
for data in ports_elements_info:
status = constants.PortHealthStatus.NORMAL
conn_status = constants.PortConnectionStatus.CONNECTED
if data.get('health') != 'OK':
status = constants.PortHealthStatus.ABNORMAL
conn_status = constants.PortConnectionStatus.\
DISCONNECTED
wwn = None
port_type = constants.PortType.FC
location_port_type = data.get('port-type')
if location_port_type:
location_port_type = location_port_type.upper()
if location_port_type == 'ISCSI':
port_type = constants.PortType.ETH
else:
target_id = data.get('target-id')
if target_id:
wwn = target_id
location = '%s_%s' % (data.get('port'),
location_port_type)
speed = data.get('configured-speed', None)
max_speed = 0
if speed != 'Auto' and speed is not None:
max_speed = self.parse_string_to_bytes(speed)
data_map = {
'native_port_id': data.get('durable-id'),
'name': data.get('port'),
'type': port_type,
'connection_status': conn_status,
'health_status': status,
'location': location,
'storage_id': storage_id,
'speed': max_speed,
'max_speed': max_speed,
'mac_address': data.get('mac-address'),
'ipv4': data.get('ip-address'),
'wwn': wwn
}
list_ports.append(data_map)
return list_ports
except Exception as e:
err_msg = "Failed to get storage ports: %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
def list_storage_controller(self, storage_id):
try:
controller_info = self.ssh_pool.do_exec('show controllers')
controller_detail = self.handle_xml_to_json(
controller_info, 'controllers')
controller_arr = []
for data in controller_detail:
health = data.get('health')
status = constants.StoragePoolStatus.OFFLINE
if health == 'OK':
status = constants.StoragePoolStatus.NORMAL
cpu_info = data.get('sc-cpu-type')
cpu_count = None
if cpu_info:
cpu_count = 1
memory_size = data.get('system-memory-size')
if memory_size is not None:
memory_size += "MB"
system_memory_size = self.parse_string_to_bytes(
memory_size)
data_map = {
'native_controller_id': data.get('controller-id'),
'name': data.get('durable-id'),
'storage_id': storage_id,
'status': status,
'location': data.get('position'),
'soft_version': data.get('sc-fw'),
'cpu_info': cpu_info,
'cpu_count': cpu_count,
'memory_size': int(system_memory_size)
}
controller_arr.append(data_map)
return controller_arr
except Exception as e:
err_msg = "Failed to get storage controllers: %s"\
% (six.text_type(e))
LOG.error(err_msg)
raise e
def list_storage_volume(self, storage_id):
try:
volume_infos = self.ssh_pool.do_exec('show volumes')
volume_detail = self.handle_xml_to_json(volume_infos, 'volumes')
pools_info = self.ssh_pool.do_exec('show pools')
pool_detail = self.handle_xml_to_json(pools_info, 'pools')
list_volumes = []
for data in volume_detail:
health = data.get('health')
status = constants.StoragePoolStatus.OFFLINE
if health == 'OK':
status = constants.StoragePoolStatus.NORMAL
total_size = self.parse_string_to_bytes(data.get('total-size'))
total_avail = self.parse_string_to_bytes(
data.get('allocated-size'))
native_storage_pool_id = ''
if pool_detail:
native_storage_pool_id = pool_detail[0]. \
get('serial-number')
for pools in pool_detail:
if data.get('virtual-disk-name') == pools.\
get('name'):
native_storage_pool_id = pools.\
get('serial-number')
blocks = data.get('blocks')
if blocks is not None:
blocks = int(blocks)
volume_map = {
'name': data.get('volume-name'),
'storage_id': storage_id,
'description': data.get('volume-name'),
'status': status,
'native_volume_id': str(data.get('durable-id')),
'native_storage_pool_id': native_storage_pool_id,
'wwn': str(data.get('wwn')),
'type': data.get('volume-type'),
'total_capacity': int(total_size),
'free_capacit': int(total_size - total_avail),
'used_capacity': int(total_avail),
'blocks': int(blocks),
'compressed': True,
'deduplicated': True
}
list_volumes.append(volume_map)
return list_volumes
except Exception as e:
err_msg = "Failed to get storage volume: %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
def list_storage_pools(self, storage_id):
try:
pool_infos = self.ssh_pool.do_exec('show pools')
pool_detail = self.handle_xml_to_json(pool_infos, 'pools')
volume_list = self.list_storage_volume(storage_id)
pools_list = []
for data in pool_detail:
volume_size = 0
blocks = 0
if volume_list:
for volume in volume_list:
if volume.get('native_storage_pool_id') == data.\
get('serial-number'):
volume_size += volume.get('total_capacity')
blocks += volume.get('blocks')
health = data.get('health')
status = constants.StoragePoolStatus.OFFLINE
if health == 'OK':
status = constants.StoragePoolStatus.NORMAL
total_size = self.parse_string_to_bytes(
data.get('total-size'))
pool_map = {
'name': data.get('name'),
'storage_id': storage_id,
'native_storage_pool_id': data.get('serial-number'),
'status': status,
'storage_type': constants.StorageType.BLOCK,
'total_capacity': int(total_size),
'subscribed_capacity': int(blocks),
'used_capacity': volume_size,
'free_capacity': int(total_size - volume_size)
}
pools_list.append(pool_map)
return pools_list
except Exception as e:
err_msg = "Failed to get storage pool: %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
@staticmethod
def parse_string_to_bytes(value):
capacity = 0
if value:
if value.isdigit():
capacity = float(value)
else:
if value == '0B':
capacity = 0
else:
unit = value[-2:]
capacity = float(value[:-2]) * int(
Tools.change_capacity_to_bytes(unit))
return capacity
@staticmethod
def handle_xml_to_json(detail_info, element):
detail_arr = []
detail_data = detail_info.split('\n')
detail = detail_data[1:len(detail_data) - 1]
detail_xml = ''.join(detail)
xml_element = Et.fromstring(detail_xml)
for children in xml_element.iter('OBJECT'):
property_name = children.get('basetype')
if element == property_name:
msg = {}
for child in children.iter('PROPERTY'):
msg[child.get('name')] = child.text
detail_arr.append(msg)
return detail_arr
def list_alerts(self, query_para):
alert_list = []
try:
alert_infos = self.ssh_pool.do_exec('show events error')
alert_json = self.handle_xml_to_json(alert_infos, 'events')
for alert_map in alert_json:
now = time.time()
occur_time = int(round(now * consts.SecondsNumber
.SECONDS_TO_MS))
time_stamp = alert_map.get('time-stamp-numeric')
if time_stamp is not None:
occur_time = int(time_stamp) * consts.SecondsNumber\
.SECONDS_TO_MS
if not alert_util.is_alert_in_time_range(query_para,
occur_time):
continue
event_code = alert_map.get('event-code')
event_id = alert_map.get('event-id')
location = alert_map.get('message')
resource_type = alert_map.get('event-code')
severity = alert_map.get('severity')
additional_info = str(alert_map.get('additional-information'))
match_key = None
if event_code:
match_key = event_code
if severity:
match_key += severity
if location:
match_key += location
description = None
if additional_info:
description = additional_info
if severity == 'Informational' or severity == 'RESOLVED':
continue
alert_model = {
'alert_id': event_id,
'alert_name': event_code,
'severity': severity,
'category': constants.Category.FAULT,
'type': 'EquipmentAlarm',
'sequence_number': event_id,
'occur_time': occur_time,
'description': description,
'resource_type': resource_type,
'location': location,
'match_key': hashlib.md5(match_key.encode()).hexdigest()
}
alert_list.append(alert_model)
alert_list_data = SSHHandler.get_last_alert_data(alert_list)
return alert_list_data
except Exception as e:
err_msg = "Failed to get storage alert: %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
@staticmethod
def get_last_alert_data(alert_json):
alert_list = []
alert_json.sort(key=itemgetter('alert_name', 'location', 'severity'))
for key, item in groupby(alert_json, key=itemgetter(
'alert_name', 'location', 'severity')):
alert_last_index = 0
alert_list.append(list(item)[alert_last_index])
return alert_list
@staticmethod
def parse_alert(alert):
try:
alert_model = dict()
alert_id = None
description = None
severity = consts.TrapSeverity.TRAP_SEVERITY_MAP.get('8')
sequence_number = None
event_type = None
for alert_key, alert_value in alert.items():
if consts.AlertOIDNumber.OID_ERR_ID in alert_key:
alert_id = str(alert_value)
elif consts.AlertOIDNumber.OID_EVENT_TYPE in alert_key:
event_type = alert_value
elif consts.AlertOIDNumber.OID_EVENT_DESC in alert_key:
description = alert_value
elif consts.AlertOIDNumber.OID_SEVERITY in alert_key:
severity = consts.TrapSeverity.TRAP_SEVERITY_MAP\
.get(alert.get(consts.AlertOIDNumber.OID_SEVERITY),
constants.Severity.INFORMATIONAL)
elif consts.AlertOIDNumber.OID_EVENT_ID in alert_key:
sequence_number = alert_value
if description:
desc_arr = description.split(",")
if desc_arr:
alert_id = SSHHandler.split_by_char_and_number(
desc_arr[0], ":", 1)
alert_model['alert_id'] = str(alert_id)
alert_model['alert_name'] = event_type
alert_model['severity'] = severity
alert_model['category'] = constants.Category.FAULT
alert_model['type'] = constants.EventType.EQUIPMENT_ALARM
alert_model['sequence_number'] = sequence_number
now = time.time()
alert_model['occur_time'] = int(round(now * consts.
SecondsNumber.SECONDS_TO_MS))
alert_model['description'] = description
alert_model['location'] = description
return alert_model
except Exception as e:
LOG.error(e)
msg = "Failed to build alert model: %s." % (six.text_type(e))
raise exception.InvalidResults(msg)
@staticmethod
def split_by_char_and_number(split_str, split_char, arr_number):
split_value = ''
if split_str:
tmp_value = split_str.split(split_char, 1)
if arr_number == 1 and len(tmp_value) > 1:
split_value = tmp_value[arr_number].strip()
elif arr_number == 0:
split_value = tmp_value[arr_number].strip()
return split_value
@staticmethod
def handle_xml_to_dict(xml_info, element):
msg = {}
xml_split = xml_info.split('\n')
xml_data = xml_split[1:len(xml_split) - 1]
detail_xml = ''.join(xml_data)
xml_element = Et.fromstring(detail_xml)
for children in xml_element.iter('OBJECT'):
property_name = children.get('basetype')
if element == property_name:
for child in children.iter('PROPERTY'):
msg[child.get('name')] = child.text
return msg
def list_storage_host_initiators(self, storage_id):
try:
initiator_list = []
host_groups_info = self.ssh_pool.do_exec("show initiators")
host_groups_json = self.handle_xml_to_json(host_groups_info,
"initiator")
type_switch = {
consts.InitiatorType.ISCSI_INITIATOR_TYPE:
consts.InitiatorType.ISCSI_INITIATOR_DESCRIPTION,
consts.InitiatorType.FC_INITIATOR_TYPE:
consts.InitiatorType.FC_INITIATOR_DESCRIPTION,
}
for initiator in host_groups_json:
description = type_switch.get(
initiator.get('host-bus-type-numeric'),
consts.InitiatorType.UNKNOWN_INITIATOR_DESCRIPTION)
initiator_item = {
"name": initiator.get('nickname'),
"type": description,
"alias": initiator.get('durable-id'),
"storage_id": storage_id,
"native_storage_host_initiator_id":
initiator.get('durable-id'),
"wwn": initiator.get('id'),
"status": constants.InitiatorStatus.ONLINE,
"native_storage_host_id": initiator.get('host-id')
}
initiator_list.append(initiator_item)
return initiator_list
except Exception as e:
LOG.error("Failed to get initiator "
"from msa storage_id: %s" % storage_id)
raise e
def list_storage_hosts(self, storage_id):
try:
hosts_info = self.ssh_pool.do_exec('show host-groups')
host_list = []
hosts = self.handle_xml_to_json(hosts_info, 'host')
host_set = set()
for host in hosts:
status = constants.HostStatus.NORMAL
os_type = constants.HostOSTypes.HP_UX
host_member_count = int(host.get('member-count'))
if host_member_count > 0:
serial_number = host.get('serial-number')
if serial_number not in host_set:
host_set.add(host.get('serial-number'))
host_dict = {
"name": host.get('name'),
"description": host.get('durable-id'),
"storage_id": storage_id,
"native_storage_host_id":
host.get('serial-number'),
"os_type": os_type,
"status": status
}
host_list.append(host_dict)
return host_list
except Exception as e:
LOG.error("Failed to get host "
"from msa storage_id: %s" % storage_id)
raise e
def list_storage_host_groups(self, storage_id):
try:
host_groups_info = self.ssh_pool.do_exec('show host-groups')
host_group_list = []
storage_host_grp_relation_list = []
host_groups = self.handle_xml_to_json(
host_groups_info, 'host-group')
host_info_list = self.handle_xml_to_json(host_groups_info, 'host')
for host_group in host_groups:
member_count = int(host_group.get('member-count'))
if member_count > 0:
hosts_list = []
storage_host_group_id = host_group.get('serial-number')
for host_info in host_info_list:
host_id = host_info.get('serial-number')
host_group_id = host_info.get('host-group')
if host_id != 'NOHOST' and \
host_group_id == storage_host_group_id:
hosts_list.append(host_id)
storage_host_group_relation = {
'storage_id': storage_id,
'native_storage_host_group_id':
storage_host_group_id,
'native_storage_host_id': host_id
}
storage_host_grp_relation_list.\
append(storage_host_group_relation)
host_group_map = {
"name": host_group.get('name'),
"description": host_group.get('durable-id'),
"storage_id": storage_id,
"native_storage_host_group_id": storage_host_group_id,
"storage_hosts": ','.join(hosts_list)
}
host_group_list.append(host_group_map)
storage_host_groups_result = {
'storage_host_groups': host_group_list,
'storage_host_grp_host_rels':
storage_host_grp_relation_list
}
return storage_host_groups_result
except Exception as e:
LOG.error("Failed to get host_group from msa "
"storage_id: %s" % storage_id)
raise e
def list_volume_groups(self, storage_id):
try:
volume_group_list = []
volume_group_relation_list = []
volume_groups_info = self.ssh_pool.do_exec('show volume-groups')
volume_groups_json = self.handle_xml_to_json(
volume_groups_info, 'volume-groups')
volumes_json = self.handle_xml_to_json(
volume_groups_info, 'volumes')
for volume_group in volume_groups_json:
volumes_list = []
durable_id = volume_group.get('durable-id')
if volumes_json:
for volume_info in volumes_json:
group_key = volume_info.get('group-key')
volume_id = volume_info.get('durable-id')
if group_key == durable_id:
volumes_list.append(volume_id)
volume_group_relation = {
'storage_id': storage_id,
'native_volume_group_id': durable_id,
'native_volume_id': volume_id
}
volume_group_relation_list.\
append(volume_group_relation)
volume_groups_map = {
"name": volume_group.get('group-name'),
"description": volume_group.get('durable-id'),
"storage_id": storage_id,
"native_volume_group_id": durable_id,
"volumes": ','.join(volumes_list)
}
volume_group_list.append(volume_groups_map)
volume_group_result = {
'volume_groups': volume_group_list,
'vol_grp_vol_rels': volume_group_relation_list
}
return volume_group_result
except Exception as e:
LOG.error("Failed to get volume_group"
" from msa storage_id: %s" % storage_id)
raise e
def list_port_groups(self, storage_id):
try:
port_group_list = []
port_group_relation_list = []
storage_view_info = self.ssh_pool.do_exec('show maps all ')
storage_port_list = self.list_storage_ports(storage_id)
storage_host_view = self.handle_xml_to_json(
storage_view_info, 'volume-view-mappings')
reduce_set = set()
for storage_view in storage_host_view:
port_number = storage_view.get('ports')
port_group_dict = self.get_port_group_id_and_name(
port_number, storage_port_list)
native_port_group_id = port_group_dict.get(
'native_port_group_id')
native_port_group_name = port_group_dict.get(
'native_port_group_name')
if native_port_group_name:
native_port_group_id = "port_group_" + \
native_port_group_id
if native_port_group_id in reduce_set:
continue
reduce_set.add(native_port_group_id)
port_group_map = {
'name': native_port_group_id,
'description': native_port_group_id,
'storage_id': storage_id,
'native_port_group_id': native_port_group_id,
'ports': native_port_group_name
}
port_ids = native_port_group_name.split(',')
for port_id in port_ids:
port_group_relation = {
'storage_id': storage_id,
'native_port_group_id': native_port_group_id,
'native_port_id': port_id
}
port_group_relation_list.append(
port_group_relation)
port_group_list.append(port_group_map)
result = {
'port_groups': port_group_list,
'port_grp_port_rels': port_group_relation_list
}
return result
except Exception as e:
LOG.error("Failed to get port_group"
" from msa storage_id: %s" % storage_id)
raise e
@staticmethod
def get_port_group_id_and_name(port_number, storage_port_list):
native_port_group_id = []
native_port_group_name = []
if port_number:
port_codes = port_number.split(',')
for port_code in port_codes:
for port in storage_port_list:
port_name = port.get('name')
durable_id = port.get('native_port_id')
if port_code in port_name:
native_port_group_id.append(port_name)
native_port_group_name.append(durable_id)
port_group_dict = {
'native_port_group_id': ''.join(native_port_group_id),
'native_port_group_name': ','.join(native_port_group_name)
}
return port_group_dict
def list_masking_views(self, storage_id):
try:
views_list = []
storage_view_info = self.ssh_pool.do_exec('show maps all ')
if storage_view_info:
storage_port_list = self.list_storage_ports(storage_id)
host_list = self.list_storage_hosts(storage_id)
initiators_list = self.list_storage_host_initiators(storage_id)
host_group_list = self.list_storage_host_groups(storage_id)
storage_host_group = host_group_list.get('storage_host_groups')
storage_host_view = self.handle_xml_to_json(
storage_view_info, 'volume-view-mappings')
views_list.extend(
self.get_storage_view_list(storage_host_view, 'volume',
storage_id, storage_port_list,
host_list, initiators_list,
storage_host_group))
storage_host_volume_groups_view = self.handle_xml_to_json(
storage_view_info, 'volume-group-view-mappings')
views_list.extend(self.get_storage_view_list(
storage_host_volume_groups_view, 'group',
storage_id, storage_port_list, host_list, initiators_list,
storage_host_group))
return views_list
except Exception as e:
LOG.error("Failed to get view "
"from msa storage_id: %s" % storage_id)
raise e
def get_storage_view_list(self, storage_view_list, vol_type, storage_id,
storage_port_list, host_list, initiators_list,
storage_host_groups):
views_list = []
if storage_view_list:
native_volume_group_name = 'native_volume_group_id'\
if vol_type == 'group' else 'native_volume_id'
for host_view in storage_view_list:
access = host_view.get('access')
if access != 'not-mapped':
mapped_id = host_view.get('mapped-id')
native_masking_view_id = host_view.get('durable-id')
volume_id = host_view.get('parent-id')
port_number = host_view.get('ports')
view_name = host_view.get('nickname')
host_group_name = 'native_storage_host_group_id'\
if '.*.*' in view_name else 'native_storage_host_id'
native_port_group_dict = \
self.get_port_group_id_and_name(port_number,
storage_port_list)
native_port_group_id = native_port_group_dict.get(
'native_port_group_id')
native_storage_host_id = self.get_storage_host_id(
host_list, mapped_id, initiators_list,
storage_host_groups, view_name)
view_map = {
"name": view_name,
"description": view_name,
"storage_id": storage_id,
"native_masking_view_id":
native_masking_view_id + volume_id,
native_volume_group_name: volume_id,
host_group_name: native_storage_host_id
}
if native_port_group_id:
view_map['native_port_group_id'] = \
"port_group_" + native_port_group_id
views_list.append(view_map)
return views_list
@staticmethod
def get_storage_host_id(host_list, mapped_id, initiators_list,
storage_host_groups, view_name):
for host_value in host_list:
host_durable_id = host_value.get('description')
if host_durable_id == mapped_id:
native_storage_host_id = \
host_value.get('native_storage_host_id')
return native_storage_host_id
for initiators in initiators_list:
initiators_durable_id = initiators.get(
'native_storage_host_initiator_id')
if initiators_durable_id == mapped_id:
native_storage_host_id = \
initiators.get('native_storage_host_id')
return native_storage_host_id
group_name = view_name.split('.')[0]
for host_group in storage_host_groups:
if group_name == host_group.get('name'):
native_storage_host_id = \
host_group.get('native_storage_host_group_id')
return native_storage_host_id
|
c18f225d6fa7241802f2d152422467353f1da04c
|
59b43642e6d96af4ec0f9e89228576d196342b0e
|
/finplot/examples/btc-long-term.py
|
cd0499c9c4bbde3f920064c41e80daf6fec78a1d
|
[
"MIT"
] |
permissive
|
highfestiva/finplot
|
a6065e8db3e924193755b62fbeab3f92f1c36ad1
|
3c0f3c63ac46654299ec3e5a40eaa92d4a649990
|
refs/heads/master
| 2023-08-21T18:55:33.778807
| 2023-08-14T16:01:41
| 2023-08-14T16:01:41
| 145,775,657
| 746
| 173
|
MIT
| 2023-05-28T18:25:47
| 2018-08-22T23:56:19
|
Python
|
UTF-8
|
Python
| false
| false
| 917
|
py
|
btc-long-term.py
|
#!/usr/bin/env python3
from datetime import date
import finplot as fplt
import requests
import pandas as pd
now = date.today().strftime('%Y-%m-%d')
r = requests.get('https://www.bitstamp.net/api-internal/tradeview/price-history/BTC/USD/?step=86400&start_datetime=2011-08-18T00:00:00.000Z&end_datetime=%sT00:00:00.000Z' % now)
df = pd.DataFrame(r.json()['data']).astype({'timestamp':int, 'open':float, 'close':float, 'high':float, 'low':float}).set_index('timestamp')
# plot price
fplt.create_plot('Bitcoin 2011-%s'%now.split('-')[0], yscale='log')
fplt.candlestick_ochl(df['open close high low'.split()])
# monthly separator lines
months = pd.to_datetime(df.index, unit='s').strftime('%m')
last_month = ''
for x,(month,price) in enumerate(zip(months, df.close)):
if month != last_month:
fplt.add_line((x-0.5, price*0.5), (x-0.5, price*2), color='#bbb', style='--')
last_month = month
fplt.show()
|
4d385785d90cb8845335d9489b52081e5cfbc503
|
aaabf5c6cea75fb6649fafff57d4e92e9b7142b8
|
/smote_variants/base/_metrics.py
|
5a3ad472d5d9a15712f7b6044d5e7c0d3db82868
|
[
"MIT"
] |
permissive
|
analyticalmindsltd/smote_variants
|
febcdad7c64c5d6bce5a69a51d7228ea629aa7c4
|
708568e5b44abdc798d9329f993ac6561ad1439d
|
refs/heads/master
| 2023-05-12T21:24:57.222534
| 2023-04-17T10:43:06
| 2023-04-17T10:43:06
| 151,773,885
| 434
| 106
|
MIT
| 2023-05-04T14:18:28
| 2018-10-05T20:21:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,519
|
py
|
_metrics.py
|
"""
This module implements some imbalanced classification metrics.
"""
import numpy as np
from sklearn.metrics import roc_auc_score, log_loss
__all__ = ['prediction_labels',
'calculate_atoms',
'calculate_label_scores',
'calculate_prob_scores',
'calculate_all_scores',
'all_scores']
all_scores = ['acc', 'sens', 'spec', 'ppv', 'npv', 'fpr', 'fdr',
'fnr', 'bacc', 'gacc', 'f1', 'mcc', 'l', 'ltp', 'lfp', 'lfn',
'ltn', 'lp', 'ln', 'uc', 'informedness', 'markedness', 'p_top20',
'brier', 'log_loss', 'auc']
def prediction_labels(probabilities_maj):
"""
Determine the labels from the probabilities.
Args:
probabilities_maj (np.array): the majority probabilities
Returns:
np.array: the labels row-by-row
"""
labels = (probabilities_maj > 0.5) * 1
equals = probabilities_maj == 0.5
indices = np.where(equals)[0]
if len(indices) <= 1:
return labels
half = int(len(indices)/2)
labels[indices[:half]] = 0
labels[indices[half:]] = 1
return labels
def calculate_atoms(test_labels, predicted_labels, min_label=1):
"""
Calculate the atoms used for the measures.
Args:
test_labels (np.array): the true labels
predicted_labels (np.array): the predicted labels
min_label (int): the minority label
Returns:
dict: the atoms
"""
atoms = {}
equals = np.equal(test_labels, predicted_labels)
not_equals = np.logical_not(equals)
min_sample = test_labels == min_label
maj_sample = np.logical_not(min_sample)
atoms['tp'] = int(np.sum(np.logical_and(equals, min_sample)))
atoms['tn'] = int(np.sum(np.logical_and(equals, maj_sample)))
atoms['fp'] = int(np.sum(np.logical_and(not_equals, maj_sample)))
atoms['fn'] = int(np.sum(np.logical_and(not_equals, min_sample)))
return atoms
def _log_score(multiplier, value):
"""
Calculates a log score and returs None if not computable.
Args:
multiplier (float): the multiplier
value (float): the value to take the log of
Returns:
float: the score
"""
if value > 0:
log_value = np.log(value)
else:
log_value = np.nan
if not np.isfinite(log_value):
return None
return float(multiplier * log_value)
def _log_score_div(numerator, denominator):
"""
Calculates a log score and returs None if not computable.
Args:
nominator (float): the nominator
denominator (float): the denominator
Returns:
float: the score
"""
if denominator > 0:
return _log_score(numerator, numerator / denominator)
return None
def calculate_label_scores(atoms):
"""
Calculate scores from labels.
Args:
atoms (dict): the atomic scores
Returns:
dict: the label scores
"""
atoms['p'] = atoms['tp'] + atoms['fn']
atoms['n'] = atoms['fp'] + atoms['tn']
atoms['acc'] = (atoms['tp'] + atoms['tn']) / (atoms['p'] + atoms['n'])
atoms['sens'] = atoms['tp'] / atoms['p']
atoms['spec'] = atoms['tn'] / atoms['n']
if atoms['tp'] + atoms['fp'] > 0:
atoms['ppv'] = atoms['tp'] / (atoms['tp'] + atoms['fp'])
else:
atoms['ppv'] = 0.0
if atoms['tn'] + atoms['fn'] > 0:
atoms['npv'] = atoms['tn'] / (atoms['tn'] + atoms['fn'])
else:
atoms['npv'] = 0.0
atoms['fpr'] = 1.0 - atoms['spec']
atoms['fdr'] = 1.0 - atoms['ppv']
atoms['fnr'] = 1.0 - atoms['sens']
atoms['bacc'] = (atoms['sens'] + atoms['spec'])/2.0
atoms['gacc'] = float(np.sqrt(atoms['sens']*atoms['spec']))
atoms['f1'] = 2 * atoms['tp'] / (2 * atoms['tp'] + atoms['fp'] + atoms['fn'])
tp_fp = (atoms['tp'] + atoms['fp'])
tp_fn = (atoms['tp'] + atoms['fn'])
tn_fp = (atoms['fp'] + atoms['tn'])
tn_fn = (atoms['fn'] + atoms['tn'])
mcc_num = atoms['tp']*atoms['tn'] - atoms['fp']*atoms['fn']
mcc_denom = float(np.prod([tp_fp, tp_fn, tn_fp, tn_fn]))
if mcc_denom == 0:
atoms['mcc'] = None
else:
atoms['mcc'] = float(mcc_num/np.sqrt(mcc_denom))
atoms['l'] = float((atoms['p'] + atoms['n']) * np.log(atoms['p'] + atoms['n']))
atoms['ltp'] = _log_score_div(atoms['tp'], tp_fp * tp_fn)
atoms['lfp'] = _log_score_div(atoms['fp'], tp_fp * tn_fp)
atoms['lfn'] = _log_score_div(atoms['fn'], tp_fn * tn_fn)
atoms['ltn'] = _log_score_div(atoms['tn'], tn_fp * tn_fn)
atoms['lp'] = float(atoms['p'] * np.log(atoms['p']/(atoms['p'] + atoms['n'])))
atoms['ln'] = float(atoms['n'] * np.log(atoms['n']/(atoms['p'] + atoms['n'])))
items = [atoms['ltp'], atoms['lfp'], atoms['lfn'], atoms['ltn']]
if np.all([item is not None for item in items]):
uc_num = atoms['l'] + np.sum(items)
uc_denom = atoms['l'] + atoms['lp'] + atoms['ln']
atoms['uc'] = uc_num / uc_denom
else:
atoms['uc'] = None
atoms['informedness'] = atoms['sens'] + atoms['spec'] - 1.0
atoms['markedness'] = atoms['ppv'] + atoms['npv'] - 1.0
return atoms
def calculate_prob_scores(test_labels, probabilities, min_label=1):
"""
Calculate scores from probabilities.
Args:
test_labels (np.array): the true labels
probabilities (np.array): the probabilities
min_label (int): the minority label
Returns:
dict: the probability scores
"""
results = {}
thres = max(int(0.2*len(test_labels)), 1)
results['p_top20'] = float(np.sum(test_labels[:thres] == min_label)/thres)
results['brier'] = float(np.mean((probabilities - test_labels)**2))
results['log_loss'] = float(log_loss(test_labels, probabilities))
results['auc'] = float(roc_auc_score(test_labels, probabilities))
return results
def calculate_all_scores(test_labels, probabilities, min_label=1):
"""
Calculate all scores.
Args:
test_labels (np.array): the true labels
probabilities (np.array): the probabilities
min_label (int): the minority label
Returns:
dict: all scores
"""
pred_labels = prediction_labels(probabilities)
results = calculate_atoms(test_labels, pred_labels, min_label)
results = calculate_label_scores(results)
results = {**results, **calculate_prob_scores(test_labels,
probabilities,
min_label)}
return results
|
f6969fe69f2261ab6b24b8bc5c3ab3b1cba5c340
|
3b6b6a580bf6127b288a42ab4519565adc720fbd
|
/days/053-056-django-registration/demo/mysite/urls.py
|
60443a059f4ef79b066cd64a990272163c4d4bdc
|
[] |
no_license
|
talkpython/100daysofweb-with-python-course
|
f1f296a5e52670fccba895e078318a5098f96e2f
|
c6f2fb22a29f74284b2d52ee019e0ace6a6353fc
|
refs/heads/master
| 2023-07-19T11:21:46.515974
| 2023-04-25T21:34:27
| 2023-04-25T21:34:27
| 134,765,291
| 627
| 495
| null | 2023-04-25T21:34:28
| 2018-05-24T20:28:21
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 313
|
py
|
urls.py
|
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('quotes.urls')),
path('my-backend/', admin.site.urls),
path(r'accounts/', include('django_registration.backends.activation.urls')),
path(r'accounts/', include('django.contrib.auth.urls')),
]
|
d02742d016aee7d6badd5b07b4d55628ac543647
|
c33e5b0d8182b5aa05bd31fa052f4d23e83ec6a7
|
/parakeet/modules/attention.py
|
154625cc3c1d9426ed2d21edc3064798b73ccd3a
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Parakeet
|
baad66e2ca4f89cc996b865fcb0cb814f7ae4f7f
|
8705a2a8405e3c63f2174d69880d2b5525a6c9fd
|
refs/heads/develop
| 2021-11-27T20:22:08.102845
| 2021-11-19T02:21:01
| 2021-11-19T02:21:01
| 243,677,660
| 609
| 92
|
NOASSERTION
| 2022-01-27T02:27:56
| 2020-02-28T04:24:13
|
Python
|
UTF-8
|
Python
| false
| false
| 12,163
|
py
|
attention.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import paddle
from paddle import nn
from paddle.nn import functional as F
def scaled_dot_product_attention(q, k, v, mask=None, dropout=0.0,
training=True):
r"""Scaled dot product attention with masking.
Assume that q, k, v all have the same leading dimensions (denoted as * in
descriptions below). Dropout is applied to attention weights before
weighted sum of values.
Parameters
-----------
q : Tensor [shape=(\*, T_q, d)]
the query tensor.
k : Tensor [shape=(\*, T_k, d)]
the key tensor.
v : Tensor [shape=(\*, T_k, d_v)]
the value tensor.
mask : Tensor, [shape=(\*, T_q, T_k) or broadcastable shape], optional
the mask tensor, zeros correspond to paddings. Defaults to None.
Returns
----------
out : Tensor [shape=(\*, T_q, d_v)]
the context vector.
attn_weights : Tensor [shape=(\*, T_q, T_k)]
the attention weights.
"""
d = q.shape[-1] # we only support imperative execution
qk = paddle.matmul(q, k, transpose_y=True)
scaled_logit = paddle.scale(qk, 1.0 / math.sqrt(d))
if mask is not None:
scaled_logit += paddle.scale((1.0 - mask), -1e9) # hard coded here
attn_weights = F.softmax(scaled_logit, axis=-1)
attn_weights = F.dropout(attn_weights, dropout, training=training)
out = paddle.matmul(attn_weights, v)
return out, attn_weights
def drop_head(x, drop_n_heads, training=True):
"""Drop n context vectors from multiple ones.
Parameters
----------
x : Tensor [shape=(batch_size, num_heads, time_steps, channels)]
The input, multiple context vectors.
drop_n_heads : int [0<= drop_n_heads <= num_heads]
Number of vectors to drop.
training : bool
A flag indicating whether it is in training. If `False`, no dropout is
applied.
Returns
-------
Tensor
The output.
"""
if not training or (drop_n_heads == 0):
return x
batch_size, num_heads, _, _ = x.shape
# drop all heads
if num_heads == drop_n_heads:
return paddle.zeros_like(x)
mask = np.ones([batch_size, num_heads])
mask[:, :drop_n_heads] = 0
for subarray in mask:
np.random.shuffle(subarray)
scale = float(num_heads) / (num_heads - drop_n_heads)
mask = scale * np.reshape(mask, [batch_size, num_heads, 1, 1])
out = x * paddle.to_tensor(mask)
return out
def _split_heads(x, num_heads):
batch_size, time_steps, _ = x.shape
x = paddle.reshape(x, [batch_size, time_steps, num_heads, -1])
x = paddle.transpose(x, [0, 2, 1, 3])
return x
def _concat_heads(x):
batch_size, _, time_steps, _ = x.shape
x = paddle.transpose(x, [0, 2, 1, 3])
x = paddle.reshape(x, [batch_size, time_steps, -1])
return x
# Standard implementations of Monohead Attention & Multihead Attention
class MonoheadAttention(nn.Layer):
"""Monohead Attention module.
Parameters
----------
model_dim : int
Feature size of the query.
dropout : float, optional
Dropout probability of scaled dot product attention and final context
vector. Defaults to 0.0.
k_dim : int, optional
Feature size of the key of each scaled dot product attention. If not
provided, it is set to `model_dim / num_heads`. Defaults to None.
v_dim : int, optional
Feature size of the key of each scaled dot product attention. If not
provided, it is set to `model_dim / num_heads`. Defaults to None.
"""
def __init__(self,
model_dim: int,
dropout: float=0.0,
k_dim: int=None,
v_dim: int=None):
super(MonoheadAttention, self).__init__()
k_dim = k_dim or model_dim
v_dim = v_dim or model_dim
self.affine_q = nn.Linear(model_dim, k_dim)
self.affine_k = nn.Linear(model_dim, k_dim)
self.affine_v = nn.Linear(model_dim, v_dim)
self.affine_o = nn.Linear(v_dim, model_dim)
self.model_dim = model_dim
self.dropout = dropout
def forward(self, q, k, v, mask):
"""Compute context vector and attention weights.
Parameters
-----------
q : Tensor [shape=(batch_size, time_steps_q, model_dim)]
The queries.
k : Tensor [shape=(batch_size, time_steps_k, model_dim)]
The keys.
v : Tensor [shape=(batch_size, time_steps_k, model_dim)]
The values.
mask : Tensor [shape=(batch_size, times_steps_q, time_steps_k] or broadcastable shape
The mask.
Returns
----------
out : Tensor [shape=(batch_size, time_steps_q, model_dim)]
The context vector.
attention_weights : Tensor [shape=(batch_size, times_steps_q, time_steps_k)]
The attention weights.
"""
q = self.affine_q(q) # (B, T, C)
k = self.affine_k(k)
v = self.affine_v(v)
context_vectors, attention_weights = scaled_dot_product_attention(
q, k, v, mask, self.dropout, self.training)
out = self.affine_o(context_vectors)
return out, attention_weights
class MultiheadAttention(nn.Layer):
"""Multihead Attention module.
Parameters
-----------
model_dim: int
The feature size of query.
num_heads : int
The number of attention heads.
dropout : float, optional
Dropout probability of scaled dot product attention and final context
vector. Defaults to 0.0.
k_dim : int, optional
Feature size of the key of each scaled dot product attention. If not
provided, it is set to ``model_dim / num_heads``. Defaults to None.
v_dim : int, optional
Feature size of the key of each scaled dot product attention. If not
provided, it is set to ``model_dim / num_heads``. Defaults to None.
Raises
---------
ValueError
If ``model_dim`` is not divisible by ``num_heads``.
"""
def __init__(self,
model_dim: int,
num_heads: int,
dropout: float=0.0,
k_dim: int=None,
v_dim: int=None):
super(MultiheadAttention, self).__init__()
if model_dim % num_heads != 0:
raise ValueError("model_dim must be divisible by num_heads")
depth = model_dim // num_heads
k_dim = k_dim or depth
v_dim = v_dim or depth
self.affine_q = nn.Linear(model_dim, num_heads * k_dim)
self.affine_k = nn.Linear(model_dim, num_heads * k_dim)
self.affine_v = nn.Linear(model_dim, num_heads * v_dim)
self.affine_o = nn.Linear(num_heads * v_dim, model_dim)
self.num_heads = num_heads
self.model_dim = model_dim
self.dropout = dropout
def forward(self, q, k, v, mask):
"""Compute context vector and attention weights.
Parameters
-----------
q : Tensor [shape=(batch_size, time_steps_q, model_dim)]
The queries.
k : Tensor [shape=(batch_size, time_steps_k, model_dim)]
The keys.
v : Tensor [shape=(batch_size, time_steps_k, model_dim)]
The values.
mask : Tensor [shape=(batch_size, times_steps_q, time_steps_k] or broadcastable shape
The mask.
Returns
----------
out : Tensor [shape=(batch_size, time_steps_q, model_dim)]
The context vector.
attention_weights : Tensor [shape=(batch_size, times_steps_q, time_steps_k)]
The attention weights.
"""
q = _split_heads(self.affine_q(q), self.num_heads) # (B, h, T, C)
k = _split_heads(self.affine_k(k), self.num_heads)
v = _split_heads(self.affine_v(v), self.num_heads)
mask = paddle.unsqueeze(mask, 1) # unsqueeze for the h dim
context_vectors, attention_weights = scaled_dot_product_attention(
q, k, v, mask, self.dropout, self.training)
# NOTE: there is more sophisticated implementation: Scheduled DropHead
context_vectors = _concat_heads(context_vectors) # (B, T, h*C)
out = self.affine_o(context_vectors)
return out, attention_weights
class LocationSensitiveAttention(nn.Layer):
"""Location Sensitive Attention module.
Reference: `Attention-Based Models for Speech Recognition <https://arxiv.org/pdf/1506.07503.pdf>`_
Parameters
-----------
d_query: int
The feature size of query.
d_key : int
The feature size of key.
d_attention : int
The feature size of dimension.
location_filters : int
Filter size of attention convolution.
location_kernel_size : int
Kernel size of attention convolution.
"""
def __init__(self,
d_query: int,
d_key: int,
d_attention: int,
location_filters: int,
location_kernel_size: int):
super().__init__()
self.query_layer = nn.Linear(d_query, d_attention, bias_attr=False)
self.key_layer = nn.Linear(d_key, d_attention, bias_attr=False)
self.value = nn.Linear(d_attention, 1, bias_attr=False)
# Location Layer
self.location_conv = nn.Conv1D(
2,
location_filters,
kernel_size=location_kernel_size,
padding=int((location_kernel_size - 1) / 2),
bias_attr=False,
data_format='NLC')
self.location_layer = nn.Linear(
location_filters, d_attention, bias_attr=False)
def forward(self,
query,
processed_key,
value,
attention_weights_cat,
mask=None):
"""Compute context vector and attention weights.
Parameters
-----------
query : Tensor [shape=(batch_size, d_query)]
The queries.
processed_key : Tensor [shape=(batch_size, time_steps_k, d_attention)]
The keys after linear layer.
value : Tensor [shape=(batch_size, time_steps_k, d_key)]
The values.
attention_weights_cat : Tensor [shape=(batch_size, time_step_k, 2)]
Attention weights concat.
mask : Tensor, optional
The mask. Shape should be (batch_size, times_steps_k, 1).
Defaults to None.
Returns
----------
attention_context : Tensor [shape=(batch_size, d_attention)]
The context vector.
attention_weights : Tensor [shape=(batch_size, time_steps_k)]
The attention weights.
"""
processed_query = self.query_layer(paddle.unsqueeze(query, axis=[1]))
processed_attention_weights = self.location_layer(
self.location_conv(attention_weights_cat))
# (B, T_enc, 1)
alignment = self.value(
paddle.tanh(processed_attention_weights + processed_key +
processed_query))
if mask is not None:
alignment = alignment + (1.0 - mask) * -1e9
attention_weights = F.softmax(alignment, axis=1)
attention_context = paddle.matmul(
attention_weights, value, transpose_x=True)
attention_weights = paddle.squeeze(attention_weights, axis=-1)
attention_context = paddle.squeeze(attention_context, axis=1)
return attention_context, attention_weights
|
2251bdbb0e4646f046e0f32c606ef857a555a74e
|
63cb78527bcb90f984788587a29f8f115e94ab64
|
/dash_bio/Jsme.py
|
83d47b6a34e47121e55222170a385f143db023c0
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
plotly/dash-bio
|
2b3468626c7f021c083c8b9170e61862d5dc151d
|
8a97db7811cc586d7e0bf1d33c17b898052b2e8f
|
refs/heads/master
| 2023-09-03T13:30:45.743959
| 2023-08-16T15:26:27
| 2023-08-16T15:26:27
| 141,365,566
| 505
| 228
|
MIT
| 2023-08-23T01:28:46
| 2018-07-18T01:40:23
|
Python
|
UTF-8
|
Python
| false
| false
| 3,171
|
py
|
Jsme.py
|
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Jsme(Component):
"""A Jsme component.
JSME is a molecule editor that supports drawing and
editing of molecules and reactions on in a web app,
supporting both desktop and mobile devices. A built-in
substituent menu and several keyboard shortcuts
provide speedy access to the most common editing features and allow easy
and fast creation of even large and complex molecules. The editor
is able to export molecules as SMILES, MDL/Symyx/Accelrys Molfile or
in its own compact format (one line textual representation of a molecule or
reaction including also atomic 2D coordinates). The SMILES code generated by the JSME
is canonical, i.e. independent on the way how the molecule was drawn.
See more detailed documentation here: https://jsme-editor.github.io/help.html
Keyword arguments:
- id (string; default 'jsme'):
The ID of this component, used to identify dash components in
callbacks. The ID needs to be unique across all of the components
in an app.
- eventSmiles (string; optional):
A Dash prop that returns data when SMILE will be changed.
- height (string; default '600px'):
The height of the JSME container. Can be set in px, % etc.
- loading_state (dict; optional):
Object that holds the loading state object coming from
dash-renderer.
`loading_state` is a dict with keys:
- component_name (string; optional):
Holds the name of the component that is loading.
- is_loading (boolean; optional):
Determines if the component is loading or not.
- prop_name (string; optional):
Holds which property is loading.
- options (string; default 'newLook'):
String that is a comma separated string of JSME options. The
available options are described on the
https://wiki.jmol.org/index.php/Jmol_JavaScript_Object/JME/Options.
- smiles (string; optional):
The molecule SMILE to display.
- style (dict; optional):
Generic style overrides on the plot div.
- width (string; default '600px'):
The width of the JSME container. Can be set in px, % etc."""
_children_props = []
_base_nodes = ['children']
_namespace = 'dash_bio'
_type = 'Jsme'
@_explicitize_args
def __init__(self, id=Component.UNDEFINED, style=Component.UNDEFINED, options=Component.UNDEFINED, height=Component.UNDEFINED, width=Component.UNDEFINED, eventSmiles=Component.UNDEFINED, smiles=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['id', 'eventSmiles', 'height', 'loading_state', 'options', 'smiles', 'style', 'width']
self._valid_wildcard_attributes = []
self.available_properties = ['id', 'eventSmiles', 'height', 'loading_state', 'options', 'smiles', 'style', 'width']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs and excess named props
args = {k: _locals[k] for k in _explicit_args}
super(Jsme, self).__init__(**args)
|
8b1f800dc9a2f296d7dddb38290fff9df438f8cb
|
f24fff4a91925831a92aff11581da59fbe0e719a
|
/lib/locast_service.py
|
9655749d21e156e71801b2d2b71bbcac78032a21
|
[
"MIT"
] |
permissive
|
tgorgdotcom/locast2plex
|
6e1672185f41d7df75bc6726b312b2caeb2bb834
|
e57aeadae2ce6bbd92b515038da805209838cf1f
|
refs/heads/main
| 2023-08-31T11:32:02.580614
| 2021-08-26T03:29:52
| 2021-08-26T03:29:52
| 237,564,821
| 263
| 72
|
MIT
| 2021-08-05T21:27:22
| 2020-02-01T05:02:06
|
Python
|
UTF-8
|
Python
| false
| false
| 8,153
|
py
|
locast_service.py
|
# pylama:ignore=E722,E303
import json
import sys
import urllib.error
import urllib.parse
import urllib.request
import pathlib
from datetime import datetime
import lib.m3u8 as m3u8
import lib.stations as stations
from lib.l2p_tools import handle_url_except
class LocastService:
location = {
"latitude": None,
"longitude": None,
"DMA": None,
"city": None,
"active": False
}
current_token = None
DEFAULT_USER_AGENT = 'Mozilla/5.0'
def __init__(self, location):
self.location = location
@handle_url_except
def login(self, username, password):
# check environment vars
if (username is None):
print("Usernanme not specified in config.ini. Exiting...")
return False
if (password is None):
print("Password not specified in config.ini. Exiting...")
return False
# login
print("Logging into Locast using username " + username + "...")
# https://api.locastnet.org/api/user/login
# POST
# {"username":"thomas_vg1@hotmail.com","password":"xxxxxxxx"}
loginReq = urllib.request.Request('https://api.locastnet.org/api/user/login?client_id=9qXBrVzpTjUZmVGsZRnnWQ-7GvGeJ48QWtV9v%2Bbsen4%3D',
('{"username":"' + username + '","password":"' + password + '"}').encode("utf-8"),
{'Content-Type': 'application/json', 'User-agent': self.DEFAULT_USER_AGENT})
loginOpn = urllib.request.urlopen(loginReq)
loginRes = json.load(loginOpn)
loginOpn.close()
self.current_token = loginRes["token"]
return True
@handle_url_except
def validate_user(self):
print("Validating User Info...")
# get user info and make sure we donated
userReq = urllib.request.Request('https://api.locastnet.org/api/user/me',
headers={'Content-Type': 'application/json',
'authorization': 'Bearer ' + self.current_token,
'User-agent': self.DEFAULT_USER_AGENT})
userOpn = urllib.request.urlopen(userReq)
userRes = json.load(userOpn)
userOpn.close()
print("User Info obtained.")
print("User didDonate: {}".format(userRes['didDonate']))
# Check if the user has donated, and we got an actual expiration date.
if userRes['didDonate'] and userRes['donationExpire']:
# Check if donation has expired.
donateExp = datetime.fromtimestamp(userRes['donationExpire'] / 1000)
print("User donationExpire: {}".format(donateExp))
if datetime.now() > donateExp:
print("Error! User's donation ad-free period has expired.")
return False
else:
print("Error! User must donate for this to work.")
return False
return True
def get_stations(self):
# TODO: check if we dont return any results
print("Getting list of stations based on DMA...")
try:
# https://api.locastnet.org/api/watch/epg/504
# get stations
stationsReq = urllib.request.Request('https://api.locastnet.org/api/watch/epg/' + str(self.location["DMA"]),
headers={'Content-Type': 'application/json',
'authorization': 'Bearer ' + self.current_token,
'User-agent': self.DEFAULT_USER_AGENT})
stationsOpn = urllib.request.urlopen(stationsReq)
stationsRes = json.load(stationsOpn)
stationsOpn.close()
except urllib.error.URLError as urlError:
print("Error when getting the list of stations: " + str(urlError.reason))
return False
except urllib.error.HTTPError as httpError:
print("Error when getting the list of stations: " + str(httpError.reason))
return False
except:
stationErr = sys.exc_info()[0]
if hasattr(stationErr, 'message'):
print("Error when getting the list of stations: " + stationErr.message)
elif hasattr(stationErr, 'reason'):
print("Error when getting the list of stations: " + stationErr.reason)
else:
print("Error when getting the list of stations: " + str(stationErr))
return False
return stationsRes
def get_station_stream_uri(self, station_id):
print("Getting station info for " + station_id + "...")
try:
videoUrlReq = urllib.request.Request('https://api.locastnet.org/api/watch/station/' +
str(station_id) + '/' +
self.location['latitude'] + '/' +
self.location['longitude'],
headers={'Content-Type': 'application/json',
'authorization': 'Bearer ' + self.current_token,
'User-agent': self.DEFAULT_USER_AGENT})
videoUrlOpn = urllib.request.urlopen(videoUrlReq)
videoUrlRes = json.load(videoUrlOpn)
videoUrlOpn.close()
except urllib.error.URLError as urlError:
print("Error when getting the video URL: " + str(urlError.reason))
return False
except urllib.error.HTTPError as httpError:
print("Error when getting the video URL: " + str(httpError.reason))
return False
except:
videoUrlReqErr = sys.exc_info()[0]
if hasattr(videoUrlReqErr, 'message'):
print("Error when getting the video URL: " + videoUrlReqErr.message)
elif hasattr(videoUrlReqErr, 'reason'):
print("Error when getting the video URL: " + videoUrlReqErr.reason)
else:
print("Error when getting the video URL: " + str(videoUrlReqErr))
return False
print("Determining best video stream for " + station_id + "...")
bestStream = None
# find the heighest stream url resolution and save it to the list
videoUrlM3u = m3u8.load(videoUrlRes['streamUrl'], headers={'authorization': 'Bearer ' + self.current_token,
'User-agent': self.DEFAULT_USER_AGENT})
print("Found " + str(len(videoUrlM3u.playlists)) + " Playlists")
if len(videoUrlM3u.playlists) > 0:
for videoStream in videoUrlM3u.playlists:
if bestStream is None:
bestStream = videoStream
elif ((videoStream.stream_info.resolution[0] > bestStream.stream_info.resolution[0]) and
(videoStream.stream_info.resolution[1] > bestStream.stream_info.resolution[1])):
bestStream = videoStream
elif ((videoStream.stream_info.resolution[0] == bestStream.stream_info.resolution[0]) and
(videoStream.stream_info.resolution[1] == bestStream.stream_info.resolution[1]) and
(videoStream.stream_info.bandwidth > bestStream.stream_info.bandwidth)):
bestStream = videoStream
if bestStream is not None:
print(station_id + " will use " +
str(bestStream.stream_info.resolution[0]) + "x" + str(bestStream.stream_info.resolution[1]) +
" resolution at " + str(bestStream.stream_info.bandwidth) + "bps")
return bestStream.absolute_uri
else:
print("No variant streams found for this station. Assuming single stream only.")
return videoUrlRes['streamUrl']
|
54bd12b037e5ab01bef178aeb58d7e6ed030cbaf
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/toontown/effects/FireworkShow.py
|
2f1302f6590bc657cb6dc3159a9114d51f33337e
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 60,564
|
py
|
FireworkShow.py
|
from panda3d.core import *
from direct.interval.IntervalGlobal import *
from toontown.effects.FireworkGlobals import *
from toontown.effects.Firework import Firework
from toontown.toonbase import ToontownGlobals
from toontown.parties import PartyGlobals
import random
colors = [Vec4(1, 1, 1, 1),
Vec4(1, 0.1, 0.1, 1),
Vec4(0.1, 1, 0.1, 1),
Vec4(0.3, 1, 0.3, 1),
Vec4(0.2, 0.2, 1, 1),
Vec4(1, 1, 0.1, 1),
Vec4(1, 0.5, 0.1, 1),
Vec4(1, 0.1, 1, 1),
Vec4(0.1, 1, 1, 1),
Vec4(0.1, 0.5, 1, 1)]
fireworkShowTypes = [ToontownGlobals.JULY4_FIREWORKS,
PartyGlobals.FireworkShows.Summer,
ToontownGlobals.NEWYEARS_FIREWORKS,
ToontownGlobals.COMBO_FIREWORKS]
class FireworkShow(NodePath):
def r():
return random.randint(8, 12) / 10.0
def rV():
return Vec3(random.randint(-60, 60), random.randint(10, 30), random.randint(125, 150))
def rP():
return Point3(0, 0, 0)
def rS():
return 1.0 + random.random() / 2.0
def rC():
return random.choice(colors)
def rT():
return random.randint(12, 20) / 10.0
def rD():
return random.randint(1, 20) / 10.0
showData = {ToontownGlobals.JULY4_FIREWORKS: [[FireworkType.GlowFlare,
Vec3(-90, 0, 80),
Vec3(120, 0, 0),
rS(),
Vec4(1, 1, 1, 1),
Vec4(1, 1, 1, 1),
1.5,
0.0],
[FireworkType.GlowFlare,
Vec3(90, 0, 80),
Vec3(-120, 0, 0),
rS(),
Vec4(1, 1, 1, 1),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.BasicPeony,
Vec3(50, 0, 140),
rP(),
rS(),
Vec4(1, 1, 1, 1),
Vec4(1, 1, 1, 1),
rT(),
0.0],
[FireworkType.BasicPeony,
Vec3(-50, 0, 140),
rP(),
rS(),
Vec4(1, 1, 1, 1),
Vec4(1, 1, 1, 1),
rT(),
3.0],
[FireworkType.AdvancedPeony,
Vec3(-90, 0, 110),
rP(),
rS(),
rC(),
rC(),
rT(),
0.25],
[FireworkType.AdvancedPeony,
Vec3(0, 0, 90),
rP(),
rS(),
rC(),
rC(),
rT(),
0.25],
[FireworkType.AdvancedPeony,
Vec3(90, 0, 110),
rP(),
rS(),
rC(),
rC(),
rT(),
4.0],
[FireworkType.GlowFlare,
Vec3(-90, 0, 80),
Vec3(120, 0, 0),
1.5,
Vec4(1, 1, 1, 1),
Vec4(1, 1, 1, 1),
3.0,
3.0],
[FireworkType.Ring,
Vec3(-90, 0, 120),
rP(),
rS(),
rC(),
rC(),
rT(),
0.2],
[FireworkType.Ring,
Vec3(-30, 0, 100),
rP(),
rS(),
rC(),
rC(),
rT(),
0.2],
[FireworkType.Ring,
Vec3(30, 0, 120),
rP(),
rS(),
rC(),
rC(),
rT(),
0.2],
[FireworkType.Ring,
Vec3(90, 0, 100),
rP(),
rS(),
rC(),
rC(),
rT(),
3.0],
[FireworkType.Bees,
Vec3(0, 50, 120),
rP(),
rS(),
rC(),
rC(),
rT(),
2.0],
[FireworkType.TrailBurst,
Vec3(-70, 0, 120),
rP(),
rS(),
rC(),
rC(),
rT(),
0.25],
[FireworkType.TrailBurst,
Vec3(70, 0, 120),
rP(),
rS(),
rC(),
rC(),
rT(),
3.0],
[FireworkType.DiademPeony,
Vec3(90, 0, 120),
rP(),
rS(),
rC(),
rC(),
rT(),
0.15],
[FireworkType.DiademPeony,
Vec3(-30, 0, 120),
rP(),
rS(),
rC(),
rC(),
rT(),
0.15],
[FireworkType.DiademPeony,
Vec3(30, 0, 100),
rP(),
rS(),
rC(),
rC(),
rT(),
0.15],
[FireworkType.DiademPeony,
Vec3(-90, 0, 100),
rP(),
rS(),
rC(),
rC(),
rT(),
3.0],
[FireworkType.PalmTree,
Vec3(0, 40, 100),
rP(),
rS(),
rC(),
rC(),
rT(),
4.0],
[FireworkType.Chrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
0.0],
[FireworkType.DiademChrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.DiademChrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
3.0],
[FireworkType.Saturn,
Vec3(90, 0, 120),
rP(),
rS(),
rC(),
rC(),
rT(),
0.0],
[FireworkType.Saturn,
Vec3(-90, 0, 120),
rP(),
rS(),
rC(),
rC(),
rT(),
2.5],
[FireworkType.GlowFlare,
Vec3(0, 0, 90),
Vec3(-120, 0, 0),
rS(),
Vec4(0.1, 0.5, 1, 1),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 100),
Vec3(-60, 0, 0),
rS(),
Vec4(0.1, 0.5, 1, 1),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 110),
Vec3(0, 0, 0),
rS(),
Vec4(0.1, 0.5, 1, 1),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 120),
Vec3(60, 0, 0),
rS(),
Vec4(0.1, 0.5, 1, 1),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 130),
Vec3(120, 0, 0),
rS(),
Vec4(0.1, 0.5, 1, 1),
Vec4(1, 1, 1, 1),
1.5,
2.0],
[FireworkType.DiademChrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.DiademChrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
2.0],
[FireworkType.DiademChrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.DiademChrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
2.0],
[FireworkType.DiademChrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.DiademChrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
1.0],
[FireworkType.DiademChrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.DiademChrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
3.0],
[FireworkType.AmericanFlag,
Vec3(0, 0, 230),
Vec3(-50, 0, 0),
rS(),
rC(),
rC(),
rT(),
6],
[FireworkType.DiademPeony,
Vec3(90, 0, 120),
rP(),
rS(),
rC(),
rC(),
2.5,
0.15],
[FireworkType.DiademPeony,
Vec3(30, 0, 140),
rP(),
rS(),
rC(),
rC(),
2.5,
0.15],
[FireworkType.DiademPeony,
Vec3(-30, 0, 120),
rP(),
rS(),
rC(),
rC(),
2.5,
0.15],
[FireworkType.DiademPeony,
Vec3(-90, 0, 140),
rP(),
rS(),
rC(),
rC(),
2.5,
3.0],
[FireworkType.Mickey,
Vec3(0, 0, 100),
rP(),
1.4,
rC(),
rC(),
2.0,
10.0]],
PartyGlobals.FireworkShows.Summer: [[FireworkType.DiademPeony,
Vec3(90, 0, 120),
rP(),
rS(),
rC(),
rC(),
rT(),
0.0],
[FireworkType.DiademPeony,
Vec3(0, 0, 70),
rP(),
rS(),
rC(),
rC(),
rT(),
0.0],
[FireworkType.DiademPeony,
Vec3(-90, 0, 100),
rP(),
rS(),
rC(),
rC(),
rT(),
3.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 130),
Vec3(0, 0, 0),
rS(),
Vec4(0.1, 0.5, 1, 1),
Vec4(1, 1, 1, 1),
3.5,
1.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 90),
Vec3(-50, 0, 0),
rS(),
Vec4(0.1, 0.5, 1, 1),
Vec4(1, 1, 1, 1),
2.5,
0.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 90),
Vec3(50, 0, 0),
rS(),
Vec4(0.1, 0.5, 1, 1),
Vec4(1, 1, 1, 1),
2.5,
2.0],
[FireworkType.DiademChrysanthemum,
Vec3(40, 50, 140),
rP(),
rS(),
rC(),
rC(),
rT(),
1.5],
[FireworkType.DiademChrysanthemum,
Vec3(-40, -50, 140),
rP(),
rS(),
rC(),
rC(),
rT(),
3.0],
[FireworkType.GlowFlare,
Vec3(-90, 0, 80),
Vec3(120, 0, 0),
1.5,
Vec4(1, 1, 1, 1),
Vec4(1, 1, 1, 1),
3.0,
5.5],
[FireworkType.DiademChrysanthemum,
Vec3(0, 0, 100),
Vec3(-120, 0, 0),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.GlowFlare,
Vec3(0, 0, 100),
Vec3(-120, 0, 0),
rS(),
rC(),
rC(),
1.5,
1.0],
[FireworkType.DiademChrysanthemum,
Vec3(0, 0, 100),
Vec3(0, 20, 0),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.GlowFlare,
Vec3(0, 0, 100),
Vec3(0, 20, 0),
rS(),
rC(),
rC(),
1.5,
1.0],
[FireworkType.DiademChrysanthemum,
Vec3(0, 0, 100),
Vec3(120, 0, 0),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.GlowFlare,
Vec3(0, 0, 100),
Vec3(120, 0, 0),
rS(),
rC(),
rC(),
1.5,
5.0],
[FireworkType.AdvancedPeony,
Vec3(-90, 0, 110),
rP(),
rS(),
rC(),
rC(),
rT(),
0.25],
[FireworkType.AdvancedPeony,
Vec3(0, 0, 90),
rP(),
rS(),
rC(),
rC(),
rT(),
0.25],
[FireworkType.AdvancedPeony,
Vec3(90, 0, 110),
rP(),
rS(),
rC(),
rC(),
rT(),
4.0],
[FireworkType.Mickey,
Vec3(70, 0, 120),
rP(),
rS(),
rC(),
rC(),
rT(),
3.0],
[FireworkType.DiademPeony,
Vec3(90, 0, 120),
rP(),
rS(),
rC(),
rC(),
rT(),
0.15],
[FireworkType.DiademPeony,
Vec3(-30, 0, 120),
rP(),
rS(),
rC(),
rC(),
rT(),
0.15],
[FireworkType.DiademPeony,
Vec3(30, 0, 100),
rP(),
rS(),
rC(),
rC(),
rT(),
0.15],
[FireworkType.DiademPeony,
Vec3(-90, 0, 100),
rP(),
rS(),
rC(),
rC(),
rT(),
3.0],
[FireworkType.Bees,
Vec3(0, 0, 100),
rP(),
1.4,
rC(),
rC(),
2.0,
4.0],
[FireworkType.Chrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
0.0],
[FireworkType.DiademChrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.DiademChrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
3.0],
[FireworkType.GlowFlare,
Vec3(200, 0, 180),
Vec3(-60, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
2.0],
[FireworkType.GlowFlare,
Vec3(150, 10, 180),
Vec3(-60, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(100, 20, 180),
Vec3(-60, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(50, 30, 180),
Vec3(-60, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(0, 40, 180),
Vec3(-60, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
2.0],
[FireworkType.Saturn,
Vec3(0, 0, 100),
Vec3(-120, 0, 0),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.GlowFlare,
Vec3(0, 0, 100),
Vec3(-120, 0, 0),
rS(),
rC(),
rC(),
1.5,
1.0],
[FireworkType.Saturn,
Vec3(0, 0, 100),
Vec3(0, 0, 0),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.GlowFlare,
Vec3(0, 0, 100),
Vec3(0, 0, 0),
rS(),
rC(),
rC(),
1.5,
1.0],
[FireworkType.Saturn,
Vec3(0, 0, 100),
Vec3(120, 0, 0),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.GlowFlare,
Vec3(0, 0, 100),
Vec3(120, 0, 0),
rS(),
rC(),
rC(),
1.5,
5.0],
[FireworkType.GlowFlare,
Vec3(-15, 0, 60),
Vec3(0, 0, 0),
rS(),
Vec4(1, 1, 0.4, 1),
Vec4(1, 1, 1, 1),
2.5,
0.0],
[FireworkType.GlowFlare,
Vec3(15, 0, 60),
Vec3(0, 0, 0),
rS(),
Vec4(1, 1, 0.4, 1),
Vec4(1, 1, 1, 1),
2.5,
0.0],
[FireworkType.IceCream,
Vec3(0, 0, 80),
Vec3(0, 0, 0),
1.0,
Vec4(1, 1, 1, 1),
Vec4(1, 1, 1, 1),
1.5,
0.0],
[FireworkType.IceCream,
Vec3(0, 0, 110),
Vec3(0, 0, 0),
0.6,
Vec4(1, 1, 1, 1),
Vec4(1, 1, 1, 1),
1.5,
0.0],
[FireworkType.IceCream,
Vec3(0, 0, 130),
Vec3(0, 0, 0),
0.3,
Vec4(1, 1, 1, 1),
Vec4(1, 1, 1, 1),
1.5,
10.0]],
ToontownGlobals.NEWYEARS_FIREWORKS: [[FireworkType.GlowFlare,
Vec3(0, 0, 180),
Vec3(-120, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 120),
Vec3(-60, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 80),
Vec3(-10, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 80),
Vec3(10, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 120),
Vec3(60, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 180),
Vec3(120, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
2.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 80),
Vec3(120, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 120),
Vec3(60, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 180),
Vec3(10, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 180),
Vec3(-10, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 120),
Vec3(-60, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
1.0],
[FireworkType.GlowFlare,
Vec3(0, 0, 80),
Vec3(-120, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
1.5,
2.0],
[FireworkType.GlowFlare,
Vec3(-180, 0, 180),
Vec3(-60, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
2.5,
0.15],
[FireworkType.GlowFlare,
Vec3(180, 0, 180),
Vec3(60, 0, 0),
rS(),
rC(),
Vec4(1, 1, 1, 1),
2.5,
0.15],
[FireworkType.DiademChrysanthemum,
Vec3(40, 50, 140),
rP(),
rS(),
rC(),
rC(),
rT(),
1.5],
[FireworkType.DiademChrysanthemum,
Vec3(-40, -50, 140),
rP(),
rS(),
rC(),
rC(),
rT(),
3.0],
[FireworkType.DiademChrysanthemum,
Vec3(-140, 50, 120),
Vec3(0, 0, 0),
rS(),
rC(),
rC(),
rT(),
0.25],
[FireworkType.DiademChrysanthemum,
Vec3(70, -40, 90),
Vec3(0, 0, 0),
rS(),
rC(),
rC(),
1.5,
0.25],
[FireworkType.DiademChrysanthemum,
Vec3(-100, 30, 60),
Vec3(0, 0, 0),
rS(),
rC(),
rC(),
rT(),
0.25],
[FireworkType.DiademChrysanthemum,
Vec3(0, 20, 100),
Vec3(0, 0, 0),
rS(),
rC(),
rC(),
1.5,
0.25],
[FireworkType.DiademChrysanthemum,
Vec3(-70, 0, 130),
Vec3(0, 0, 0),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.DiademChrysanthemum,
Vec3(120, 50, 100),
Vec3(0, 0, 0),
rS(),
rC(),
rC(),
1.5,
3.5],
[FireworkType.Mickey,
Vec3(70, 0, 120),
rP(),
rS(),
rC(),
rC(),
rT(),
3.5],
[FireworkType.DiademPeony,
Vec3(90, 0, 120),
rP(),
rS(),
rC(),
rC(),
rT(),
0.15],
[FireworkType.DiademPeony,
Vec3(-30, 0, 120),
rP(),
rS(),
rC(),
rC(),
rT(),
0.15],
[FireworkType.DiademPeony,
Vec3(30, 0, 100),
rP(),
rS(),
rC(),
rC(),
rT(),
0.15],
[FireworkType.DiademPeony,
Vec3(-90, 0, 100),
rP(),
rS(),
rC(),
rC(),
rT(),
3.0],
[FireworkType.Chrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
0.15],
[FireworkType.DiademChrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.DiademChrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
1.5],
[FireworkType.DiademChrysanthemum,
rV(),
rP(),
rS(),
rC(),
rC(),
rT(),
3.0],
[FireworkType.Saturn,
Vec3(0, 0, 100),
Vec3(-120, 0, 0),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.Saturn,
Vec3(20, 0, 70),
Vec3(-120, 0, 0),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.DiademPeony,
Vec3(-30, 0, 120),
Vec3(120, 0, 0),
rS(),
rC(),
rC(),
rT(),
0.5],
[FireworkType.DiademPeony,
Vec3(0, 0, 90),
Vec3(120, 0, 0),
rS(),
rC(),
rC(),
rT(),
4.0],
[FireworkType.DiademPeony,
Vec3(-140, 50, 120),
Vec3(0, 0, 0),
rS(),
rC(),
rC(),
rT(),
0.25],
[FireworkType.DiademChrysanthemum,
Vec3(70, -40, 90),
Vec3(0, 0, 0),
rS(),
rC(),
rC(),
1.5,
0.25],
[FireworkType.DiademPeony,
Vec3(-100, 30, 60),
Vec3(0, 0, 0),
rS(),
rC(),
rC(),
2.25,
0.25],
[FireworkType.DiademChrysanthemum,
Vec3(0, 20, 100),
Vec3(0, 0, 0),
rS(),
rC(),
rC(),
1.5,
2.0],
[FireworkType.DiademPeony,
Vec3(-70, 0, 130),
Vec3(0, 0, 0),
rS(),
rC(),
rC(),
rT(),
1.5],
[FireworkType.DiademChrysanthemum,
Vec3(120, 50, 100),
Vec3(0, 0, 0),
rS(),
rC(),
rC(),
1.5,
5.0],
[FireworkType.Bees,
Vec3(0, 0, 100),
rP(),
1.4,
rC(),
rC(),
2.0,
10.0]]}
showData[ToontownGlobals.COMBO_FIREWORKS] = showData[ToontownGlobals.NEWYEARS_FIREWORKS]
sectionData = {ToontownGlobals.JULY4_FIREWORKS: [(0, 24), (24, len(showData[ToontownGlobals.JULY4_FIREWORKS]))],
PartyGlobals.FireworkShows.Summer: [(0, 24), (24, len(showData[PartyGlobals.FireworkShows.Summer]))],
ToontownGlobals.NEWYEARS_FIREWORKS: [(0, len(showData[PartyGlobals.FireworkShows.Summer]))],
ToontownGlobals.COMBO_FIREWORKS: [(0, len(showData[PartyGlobals.FireworkShows.Summer]))]}
showMusic = {}
@classmethod
def isValidShowType(cls, showType = -1):
if showType in list(cls.showData.keys()):
return True
else:
return False
def __init__(self, showType = ToontownGlobals.NEWYEARS_FIREWORKS):
NodePath.__init__(self, 'FireworkShow')
self.showType = showType
self.sectionIvals = []
self.fireworks = []
self.delaySectionStart = None
self.curSection = None
self.curOffset = 0.0
return
def beginSection(self, startIndex, endIndex, offset):
taskMgr.remove('beginSection' + str(startIndex) + str(endIndex))
sectionIval = Parallel()
time = 2.0
showMusic = self.showMusic.get(self.showType)
if showMusic:
base.musicMgr.load(showMusic, looping=False)
musicOffset = self.getDuration(0, startIndex) - self.getDuration(startIndex, startIndex) + offset
sectionIval.append(Func(base.musicMgr.request, showMusic, priority=2, looping=False))
sectionIval.append(Func(base.musicMgr.offsetMusic, musicOffset))
sectionData = self.showData.get(self.showType)[startIndex:endIndex]
for fireworkInfo in sectionData:
typeId = fireworkInfo[0]
velocity = fireworkInfo[1]
pos = fireworkInfo[2]
scale = fireworkInfo[3]
color1 = fireworkInfo[4]
color2 = fireworkInfo[5]
if color2 == -1:
color2 = color1
trailDur = fireworkInfo[6]
delay = fireworkInfo[7]
firework = Firework(typeId, velocity, scale, color1, color2, trailDur)
firework.reparentTo(self)
firework.setPos(pos)
self.fireworks.append(firework)
sectionIval.append(Sequence(Wait(time), firework.generateFireworkIval()))
time += delay
self.sectionIvals.append(sectionIval)
self.curSection = sectionIval
self.curOffset = offset
self.delaySectionStart = FrameDelayedCall('delaySectionStart', self.startCurSection, frames=24)
def startCurSection(self):
self.curSection.start(self.curOffset)
def begin(self, timestamp):
time = 0.0
for section in self.sectionData.get(self.showType):
startIndex = section[0]
endIndex = section[1]
sectionDur = self.getDuration(startIndex, endIndex)
if timestamp < sectionDur:
timestamp = max(0.0, timestamp)
taskMgr.doMethodLater(time, self.beginSection, 'beginSection' + str(startIndex) + str(endIndex), extraArgs=[startIndex, endIndex, timestamp])
time = time + sectionDur - timestamp
timestamp -= sectionDur
def getDuration(self, startIndex = 0, endIndex = None):
duration = 0.0
if endIndex == None:
endIndex = len(self.showData.get(self.showType))
for firework in self.showData.get(self.showType)[startIndex:endIndex]:
duration += firework[7]
return duration
def getShowDuration(self, eventId = None):
duration = 0.0
if eventId:
for firework in self.showData[eventId]:
duration += firework[7]
else:
for firework in self.showData[self.showType]:
duration += firework[7]
return duration
def isPlaying(self):
for ival in self.sectionIvals:
if ival.isPlaying():
return True
return False
def cleanupShow(self):
if self.delaySectionStart:
self.delaySectionStart.destroy()
del self.delaySectionStart
self.delaySectionStart = None
showMusic = self.showMusic.get(self.showType)
if showMusic:
base.musicMgr.requestFadeOut(showMusic)
for section in self.sectionData.get(self.showType):
startIndex = section[0]
endIndex = section[1]
taskMgr.remove('beginSection' + str(startIndex) + str(endIndex))
for ival in self.sectionIvals:
ival.pause()
del ival
ival = None
self.sectionIvals = []
for firework in self.fireworks:
firework.cleanup()
del firework
firework = None
self.fireworks = []
return
|
15088fb2c72a2fc53fc483c4cd2495f36ebd4828
|
bc5f2a47f94b026b4f63316954fa1c50ef69fcd7
|
/syntaxeditor/Data/syntaxeditor/PythonSource.py
|
b859616a8a5478ab41bc554e6df384eb32388cdb
|
[] |
no_license
|
syncfusion/wpf-demos
|
a3d74687af51159ead5fb8802541df866275e2bd
|
4de3b817518bdcf2aaece7776008e476d3d534e4
|
refs/heads/master
| 2023-08-09T21:05:44.170803
| 2023-07-28T06:03:54
| 2023-07-28T06:03:54
| 144,170,538
| 224
| 129
| null | 2023-03-24T10:19:34
| 2018-08-09T15:21:03
| null |
UTF-8
|
Python
| false
| false
| 5,066
|
py
|
PythonSource.py
|
import clr
clr.AddReference("WindowsBase")
clr.AddReference("PresentationCore")
clr.AddReference("PresentationFramework")
from System.Collections.Generic import *
from System import *
from System.ComponentModel import *
from System.Collections.ObjectModel import *
from System.Windows.Threading import *
from System.Text import *
# NotifyPropertyChangedBase class implements INotifyPropertyChanged interface
class NotifyPropertyChangedBase(INotifyPropertyChanged):
"""http://sdlsdk.codeplex.com/Thread/View.aspx?ThreadId=30322"""
PropertyChanged = None
def __init__(self):
(self.PropertyChanged, self._propertyChangedCaller) = make_event()
def add_PropertyChanged(self, value):
self.PropertyChanged += value
def remove_PropertyChanged(self, value):
self.PropertyChanged -= value
def OnPropertyChanged(self, propertyName):
self._propertyChangedCaller(self, PropertyChangedEventArgs(propertyName))
class StockData(NotifyPropertyChangedBase):
"""
Implementing business object StockData
"""
def Symbol(self, value):
self.Symbol = value
self.OnPropertyChanged("Symbol")
def Account(self, value):
self.Account = value
self.OnPropertyChanged("Account")
def LastTrade(self, value):
self.LastTrade = value
self.OnPropertyChanged("LastTrade")
def Change(self, value):
self.Change = value
self.OnPropertyChanged("Change")
def PreviousClose(self, value):
self.PreviousClose = value
self.OnPropertyChanged("PreviousClose")
def Open(self, value):
self.Open = value
self.OnPropertyChanged("Open")
def Volume(self, value):
self.Volume = value
self.OnPropertyChanged("Volume")
def InitializeOn(self, other):
self.Symbol = other.Symbol
self.LastTrade = other.LastTrade
self.Change = other.Change
self.PrevoiusChange = other.PreviousChange
self.Open = other.Open
self.Volume = other.Volume
class StocksViewModel(object):
def __init__(self):
self.r = Random()
self.data = ObservableCollection[StockData]()
self.AddRows(10)
self.timer = DispatcherTimer()
self.timer.Interval = TimeSpan.FromMilliseconds(500)
self.timer.Tick += self.OnTimerTick
self.StartTimer()
@property
def Stocks(self):
return self.data
""" Timer related code """
def StartTimer(self):
if not self.timer.IsEnabled:
self.timer.Start()
def StopTimer(self):
self.timer.Stop()
def OnTimerTick(self, sender, eventargs):
self.AddRows(self.r.Next(5))
self.ChangeRows(self.r.Next(20))
self.DeleteRows(self.r.Next(5))
def AddRows(self, count):
for i in range(0, count):
newRec = StockData()
newRec.Symbol = self.ChangeSymbol()
newRec.Account = self.ChangeAccount()
newRec.Open = Math.Round(self.r.NextDouble() * 30, 2)
newRec.LastTrade = Math.Round(1 + self.r.NextDouble() * 50)
d = self.r.NextDouble()
if d < 0.5:
newRec.Change = Math.Round(d, 2)
else:
newRec.Change = Math.Round(d, 2) * -1
newRec.PreviousClose = Math.Round(self.r.NextDouble() * 30, 2)
newRec.Volume = self.r.Next()
self.data.Add(newRec)
def ChangeSymbol(self):
builder = StringBuilder()
random = Random()
for i in range(0, 3):
ch = Convert.ToChar(Convert.ToInt32(Math.Floor(26 * random.NextDouble() + 65)))
builder.Append(ch)
return builder.ToString()
def ChangeAccount(self):
random = Random()
next = random.Next(1, 5)
if next == 1:
return "American Funds"
elif next == 2:
return "ChildrenCollegeSavings"
elif next == 3:
return "DayTrading"
elif next == 4:
return "RetirementSavings"
else:
return "FidelityFunds"
def DeleteRows(self, count):
if count < self.data.Count:
for i in range(0, count):
row = self.r.Next(self.data.Count)
self.data.RemoveAt(row)
def ChangeRows(self, count):
if count < self.data.Count:
for i in range(0, count):
recNo = self.r.Next(self.data.Count)
recRow = self.data[recNo]
recRow.LastTrade = Math.Round((1 + self.r.NextDouble() * 50))
d = self.r.NextDouble()
if d < 0.5:
recRow.Change = Math.Round(d, 2)
else:
recRow.Change = Math.Round(d, 2) * -1
recRow.PreviousClose = Math.Round(self.r.NextDouble() * 30, 2)
recRow.Volume = self.r.Next()
s = StocksViewModel()
grid = Application.FindName('dataGrid')
grid.ItemsSource = s.Stocks
|
b64616e6b19affdcce9f61dd1cf5cb9f8a719aca
|
e05d08569d36c7bc2b3d5ac5c90dceb63e4a7719
|
/setup.py
|
699e12536d7e0c19f3a0ed51a8ac0166ae324861
|
[
"MIT"
] |
permissive
|
korymath/talk-generator
|
612c7619edccc5ec33fdaa62f1f85f89b6b24dfc
|
bc1fa3a2a628e7f3033513236603d6902668ae29
|
refs/heads/master
| 2022-12-21T04:37:30.718955
| 2021-11-23T00:01:39
| 2021-11-23T00:01:39
| 139,507,918
| 125
| 11
|
MIT
| 2022-11-22T08:49:03
| 2018-07-03T00:19:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,943
|
py
|
setup.py
|
from os import listdir
from os.path import isfile, join
from setuptools import setup
from setuptools import find_packages
# Build a list of text-templates to install
DATA_PATH = "talkgenerator/data/"
text_templates_path = DATA_PATH + "text-templates/"
text_template_files = [
f for f in listdir(text_templates_path) if isfile(join(text_templates_path, f))
]
all_text_templates = []
for f in text_template_files:
all_text_templates.append(text_templates_path + f)
prohibited_images_path = DATA_PATH + "prohibited_images/"
prohibited_images_files = [
f
for f in listdir(prohibited_images_path)
if isfile(join(prohibited_images_path, f))
]
prohibited_images = []
for f in prohibited_images_files:
prohibited_images.append(prohibited_images_path + f)
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name="talkgenerator",
version="3.0",
description="Automatically generating presentation slide decks based on a given topic for improvised presentations",
long_description="Check our GitHub repository on https://github.com/korymath/talk-generator for more information!",
author="Thomas Winters, Kory Mathewson",
author_email="info@thomaswinters.be",
url="https://github.com/korymath/talk-generator",
license="MIT License",
platforms=["Mac", "Linux"],
packages=find_packages(), # auto-discovery submodules ["talkgenerator"],
package_dir={"talkgenerator": "talkgenerator"},
data_files=[
("images", [DATA_PATH + "images/black-transparent.png"]),
("images", [DATA_PATH + "images/error_placeholder.png"]),
("powerpoint", [DATA_PATH + "powerpoint/template.pptx"]),
("prohibited_images", prohibited_images),
("text-templates", all_text_templates),
],
include_package_data=True,
install_requires=required,
entry_points={"console_scripts": ["talkgenerator = talkgenerator.run:main_cli"]},
)
|
14ee4915b5a597f898cf2e2201fcbd81c366d164
|
a4c119e6990f3aae298136b389a36afe190991ca
|
/test/test_fee_setting.py
|
b9ddf150dac9d73ad80ec766333b487457c5c971
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
bitromortac/lndmanage
|
1a429b3d8431a5df1c44c236bb1b3431bd05fe87
|
7885f02f13eda3c89fda3d53f76310b6f50960d7
|
refs/heads/master
| 2023-08-04T22:20:15.289217
| 2023-04-26T06:33:06
| 2023-04-26T06:33:06
| 180,768,481
| 178
| 22
|
MIT
| 2023-07-20T12:13:22
| 2019-04-11T10:24:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,820
|
py
|
test_fee_setting.py
|
from unittest import TestCase
import sys
import logging
from test import testing_common
from lndmanage.lib.fee_setting import delta_demand, delta_min, optimization_parameters
testing_common.logger.addHandler(logging.StreamHandler(sys.stdout))
class TestFeeSetter(TestCase):
def test_delta_min(self):
cap = 2000000
# maximal upward adjustment for empty local balance
self.assertAlmostEqual(
1 + optimization_parameters["delta_min_up"],
delta_min(optimization_parameters, local_balance=0, capacity=cap),
)
# no adjustment if local balance is balance reserve
self.assertAlmostEqual(
1,
delta_min(
optimization_parameters,
local_balance=optimization_parameters["local_balance_reserve"],
capacity=cap,
),
)
# maximal downward adjustment for full local balance
self.assertAlmostEqual(
1 - optimization_parameters["delta_min_dn"],
delta_min(optimization_parameters, local_balance=cap, capacity=cap),
)
def test_factor_demand_fee_rate(self):
cap = 2000000
interval_days = 7
# maximal downward adjustment for full local balance and no demand
self.assertAlmostEqual(
1 - optimization_parameters["delta_min_dn"],
delta_demand(
optimization_parameters,
time_interval=interval_days,
amount_out=0,
local_balance=cap,
capacity=cap,
),
places=6,
)
# maximal upward adjustment in the case of empty local balance and no demand
self.assertAlmostEqual(
1 + optimization_parameters["delta_min_up"],
delta_demand(
optimization_parameters,
time_interval=interval_days,
amount_out=0,
local_balance=0,
capacity=cap,
),
places=6,
)
# optimal amount: no change
self.assertAlmostEqual(
1,
delta_demand(
optimization_parameters,
time_interval=interval_days,
amount_out=optimization_parameters["r_t"] * interval_days,
local_balance=cap,
capacity=cap,
),
places=6,
)
# maximal demand: highest change
self.assertAlmostEqual(
optimization_parameters["delta_max"],
delta_demand(
optimization_parameters,
time_interval=interval_days,
amount_out=1000000,
local_balance=cap,
capacity=cap,
),
places=6,
)
|
1222e7f26419435a7d4d2884c5e9ba70b2c2d33c
|
5e8e7a9f9ece885b2182d3a08a4f41d1812222c5
|
/alibi/models/pytorch/model.py
|
f0ad44addb6ba04a3ddc9065ad870f48b903b170
|
[
"Apache-2.0"
] |
permissive
|
SeldonIO/alibi
|
a69007b635b0fe7e02e817baf49355e3a9ed4697
|
54d0c957fb01c7ebba4e2a0d28fcbde52d9c6718
|
refs/heads/master
| 2023-08-29T13:16:14.970241
| 2023-08-01T10:17:08
| 2023-08-01T10:17:08
| 172,687,028
| 2,143
| 244
|
Apache-2.0
| 2023-09-05T00:34:00
| 2019-02-26T10:10:56
|
Python
|
UTF-8
|
Python
| false
| false
| 12,988
|
py
|
model.py
|
"""
This module tries to provided a class wrapper to mimic the TensorFlow API of `tensorflow.keras.Model`. It
is intended to simplify the training of a model through methods like compile, fit and evaluate which allow the user
to define custom loss functions, optimizers, evaluation metrics, train a model and evaluate it. Currently it is
used internally to test the functionalities for the Pytorch backend. To be discussed if the module will be exposed
to the user in future versions.
"""
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from typing import List, Dict, Callable, Union, Tuple, Optional
from alibi.models.pytorch.metrics import Metric, LossContainer
class Model(nn.Module):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def compile(self,
optimizer: optim.Optimizer,
loss: Union[Callable, List[Callable]],
loss_weights: Optional[List[float]] = None,
metrics: Optional[List[Metric]] = None):
"""
Compiles a model by setting the optimizer and the loss functions, loss weights and metrics to monitor
the training of the model.
Parameters
----------
optimizer
Optimizer to be used.
loss
Loss function to be used. Can be a list of the loss function which will be weighted and summed up to
compute the total loss.
loss_weights
Weights corresponding to each loss function. Only used if the `loss` argument is a list.
metrics
Metrics used to monitor the training process.
"""
self.optimizer = optimizer
self.metrics = [] if (metrics is None) else metrics
self.loss_weights = [] if (loss_weights is None) else loss_weights
self.loss: Union[LossContainer, List[LossContainer]]
if isinstance(loss, list):
# check if the number of weights is the same as the number of partial losses
if len(self.loss_weights) != len(loss):
raise ValueError("The number of loss weights differs from the number of losses")
self.loss = []
for i, partial_loss in enumerate(loss):
self.loss.append(LossContainer(partial_loss, name=f"output_{i+1}_loss"))
else:
self.loss = LossContainer(loss, name="loss")
def validate_prediction_labels(self,
y_pred: Union[torch.Tensor, List[torch.Tensor]],
y_true: Union[torch.Tensor, List[torch.Tensor]]):
"""
Validates the loss functions, loss weights, training labels and prediction labels.
Parameters
---------
y_pred
Prediction labels.
y_true
True labels.
"""
if isinstance(self.loss, list):
# check that prediction is a list
if not isinstance(y_pred, list):
raise ValueError("The prediction should be a list since list of losses have been passed.")
# check that the labels is a list
if not isinstance(y_true, list):
raise ValueError("The label should be a list since list of losses have been passed.")
# check if the number of predictions matches the number of labels
if len(y_true) != len(y_pred):
raise ValueError("Number of predictions differs from the number of labels.")
# check if the number of output heads matches the number of output losses
if len(y_pred) != len(self.loss):
raise ValueError("Number of model's heads differs from the number of losses.")
# TODO: remove this case since is already considered
if len(self.loss_weights) != 0 and (len(self.loss_weights) != len(self.loss)):
raise ValueError("Number of loss weights should be equal to the number of losses.")
else:
# check that the prediction is not a list
if isinstance(y_pred, list):
raise ValueError("The prediction is a list and should be a tensor since only one loss has been passed")
# check that the label is not a list
if isinstance(y_true, list):
raise ValueError("The label is a list and should be a tensor since only one loss has been passed")
# check if metrics and predictions agree
if (len(self.metrics) > 0) and (not isinstance(self.metrics, dict)) and isinstance(y_pred, list):
raise ValueError("Multiple model's head require dictionary of metrics.")
def compute_loss(self,
y_pred: Union[torch.Tensor, List[torch.Tensor]],
y_true: Union[torch.Tensor, List[torch.Tensor]]) -> Tuple[torch.Tensor, Dict[str, float]]:
"""
Computes the loss given the prediction labels and the true labels.
Parameters
---------
y_pred
Prediction labels.
y_true
True labels.
Returns
-------
A tuple consisting of the total loss computed as a weighted sum of individual losses and a dictionary \
of individual losses used of logging.
"""
# compute loss
if isinstance(self.loss, list):
assert isinstance(y_pred, list)
assert isinstance(y_true, list)
loss = torch.tensor(0.).to(self.device) # necessary for mypy otherwise use `type: ignore`
results = dict()
for i, partial_loss in enumerate(self.loss):
weight = self.loss_weights[i] if len(self.loss_weights) else 1.
loss += weight * partial_loss(y_pred[i], y_true[i])
results.update({key: weight * val for key, val in partial_loss.result().items()})
# compute total loss
results.update({"loss": sum(results.values())})
else:
assert isinstance(y_pred, torch.Tensor)
assert isinstance(y_true, torch.Tensor)
loss = self.loss(y_pred, y_true)
results = self.loss.result()
return loss, results
def compute_metrics(self,
y_pred: Union[torch.Tensor, List[torch.Tensor]],
y_true: Union[torch.Tensor, List[torch.Tensor]]) -> Dict[str, float]:
"""
Computes the metrics given the prediction labels and the true labels.
Parameters
----------
y_pred
Prediction labels.
y_true
True labels.
"""
results = dict()
if isinstance(self.metrics, dict):
for name in self.metrics:
i = int(name.split("_")[1]) - 1 # name is of the form output_1_... . Maybe we use re?
self.metrics[name].compute_metric(y_pred=y_pred[i], y_true=y_true[i])
# add output prefix in front of the results
result = {name + "_" + key: val for key, val in self.metrics[name].result().items()}
results.update(result)
else: # this is just for one head
assert isinstance(y_pred, torch.Tensor)
assert isinstance(y_true, torch.Tensor)
for metric in self.metrics:
metric.compute_metric(y_pred=y_pred, y_true=y_true)
results.update(metric.result())
return results
def train_step(self, x: torch.Tensor, y: Union[torch.Tensor, List[torch.Tensor]]) -> Dict[str, float]:
"""
Performs a train step.
Parameters
----------
x
Input tensor.
y
Label tensor.
"""
# set model to train
self.train()
# send tensors to device
x = x.to(self.device)
y_true: Union[torch.Tensor, List[torch.Tensor]] = \
[y_i.to(self.device) for y_i in y] if isinstance(y, list) else y.to(self.device)
# compute output
y_pred: Union[torch.Tensor, List[torch.Tensor]] = self.forward(x)
# validate prediction and labels
self.validate_prediction_labels(y_pred=y_pred, y_true=y_true)
# compute loss
loss, results = self.compute_loss(y_pred=y_pred, y_true=y_true)
# perform gradient update
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# update metrics
metrics = self.compute_metrics(y_pred=y_pred, y_true=y_true)
results.update(metrics)
return results
@torch.no_grad()
def test_step(self,
x: torch.Tensor,
y: Union[torch.Tensor, List[torch.Tensor]]):
"""
Performs a test step.
Parameters
----------
x
Input tensor.
y
Label tensor.
"""
# set to evaluation
self.eval()
# sent tensors to device
x = x.to(self.device)
y_true: Union[torch.Tensor, List[torch.Tensor]] = \
[y_i.to(self.device) for y_i in y] if isinstance(y, list) else y.to(self.device)
# compute output
y_pred: torch.Tensor = self.forward(x)
# validate prediction and labels
self.validate_prediction_labels(y_pred=y_pred, y_true=y_true)
# compute loss
loss, results = self.compute_loss(y_pred=y_pred, y_true=y_true)
# update metrics
metrics = self.compute_metrics(y_pred=y_pred, y_true=y_true)
results.update(metrics)
return results
def fit(self, trainloader: DataLoader, epochs: int) -> Dict[str, float]:
"""
Fit method. Equivalent of a training loop.
Parameters
----------
trainloader
Training data loader.
epochs
Number of epochs to train the model.
Returns
-------
Final epoch monitoring metrics.
"""
for epoch in range(epochs):
print("Epoch %d/%d" % (epoch, epochs))
# reset losses and metrics
self._reset_loss()
self._reset_metrics()
# perform train steps in batches
for data in tqdm(trainloader):
if len(data) < 2:
raise ValueError("An input and at least a label should be provided")
x = data[0]
y = data[1] if len(data) == 2 else data[1:]
metrics_vals = self.train_step(x, y)
# print metrics
print(Model._metrics_to_str(metrics_vals))
return metrics_vals
def evaluate(self, testloader: DataLoader) -> Dict[str, float]:
"""
Evaluation function. The function reports the evaluation metrics used for monitoring the training loop.
Parameters
----------
testloader
Test dataloader.
Returns
-------
Evaluation metrics.
"""
self._reset_loss()
self._reset_metrics()
# perform test steps in batches
for data in tqdm(testloader):
if len(data) < 2:
raise ValueError("An input and at least a label should be provided.")
x = data[0]
y = data[1] if len(data) == 2 else data[1:]
metrics_vals = self.test_step(x, y)
# log losses
print(Model._metrics_to_str(metrics_vals))
return metrics_vals
@staticmethod
def _metrics_to_str(metrics: Dict[str, float]) -> str:
"""
Converts a dictionary of metrics into a string for logging purposes.
Parameters
----------
metrics
Dictionary of metrics to be converted into a string.
Returns
-------
String representation of the metrics.
"""
str_losses = ''
for key in metrics:
str_losses += "%s: %.4f\t" % (key, metrics[key])
return str_losses
def _reset_loss(self):
"""
Rests the losses. Called at the beginning of each epoch.
"""
if isinstance(self.loss, list):
for partial_loss in self.loss:
partial_loss.reset()
else:
self.loss.reset()
def _reset_metrics(self):
"""
Resets the monitoring metrics. Called at the beginning of each epoch.
"""
metrics = self.metrics.values() if isinstance(self.metrics, dict) else self.metrics
for metric in metrics:
metric.reset()
def save_weights(self, path: str) -> None:
"""
Save the weight of the current model.
"""
torch.save(self.state_dict(), path)
def load_weights(self, path: str) -> None:
"""
Loads the weight of the current model.
"""
self.load_state_dict(torch.load(path))
|
edf158b15e9df4962d436123a3f854f83143fd52
|
e22fd36933c9114a9df1694e7a6274bf059de2a6
|
/tools/joystick/joystickd.py
|
82847e3fa11a4c1f2917535bd84efc197a85ab4f
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
commaai/openpilot
|
66dfb7f31290bc8f58c9ead95d56697a52b45afb
|
a0b49d54222c52ff0112c402bc0e0d9262e77a66
|
refs/heads/master
| 2023-09-05T21:34:14.076796
| 2023-09-05T21:15:18
| 2023-09-05T21:15:18
| 74,627,617
| 46,071
| 9,878
|
MIT
| 2023-09-14T21:51:23
| 2016-11-24T01:33:30
|
Python
|
UTF-8
|
Python
| false
| false
| 4,579
|
py
|
joystickd.py
|
#!/usr/bin/env python
import os
import argparse
import threading
from inputs import get_gamepad
import cereal.messaging as messaging
from openpilot.common.realtime import Ratekeeper
from openpilot.common.numpy_fast import interp, clip
from openpilot.common.params import Params
from openpilot.tools.lib.kbhit import KBHit
class Keyboard:
def __init__(self):
self.kb = KBHit()
self.axis_increment = 0.05 # 5% of full actuation each key press
self.axes_map = {'w': 'gb', 's': 'gb',
'a': 'steer', 'd': 'steer'}
self.axes_values = {'gb': 0., 'steer': 0.}
self.axes_order = ['gb', 'steer']
self.cancel = False
def update(self):
key = self.kb.getch().lower()
self.cancel = False
if key == 'r':
self.axes_values = {ax: 0. for ax in self.axes_values}
elif key == 'c':
self.cancel = True
elif key in self.axes_map:
axis = self.axes_map[key]
incr = self.axis_increment if key in ['w', 'a'] else -self.axis_increment
self.axes_values[axis] = clip(self.axes_values[axis] + incr, -1, 1)
else:
return False
return True
class Joystick:
def __init__(self, gamepad=False):
# TODO: find a way to get this from API, perhaps "inputs" doesn't support it
if gamepad:
self.cancel_button = 'BTN_NORTH' # (BTN_NORTH=X, ABS_RZ=Right Trigger)
accel_axis = 'ABS_Y'
steer_axis = 'ABS_RX'
else:
self.cancel_button = 'BTN_TRIGGER'
accel_axis = 'ABS_Y'
steer_axis = 'ABS_RZ'
self.min_axis_value = {accel_axis: 0., steer_axis: 0.}
self.max_axis_value = {accel_axis: 255., steer_axis: 255.}
self.axes_values = {accel_axis: 0., steer_axis: 0.}
self.axes_order = [accel_axis, steer_axis]
self.cancel = False
def update(self):
joystick_event = get_gamepad()[0]
event = (joystick_event.code, joystick_event.state)
if event[0] == self.cancel_button:
if event[1] == 1:
self.cancel = True
elif event[1] == 0: # state 0 is falling edge
self.cancel = False
elif event[0] in self.axes_values:
self.max_axis_value[event[0]] = max(event[1], self.max_axis_value[event[0]])
self.min_axis_value[event[0]] = min(event[1], self.min_axis_value[event[0]])
norm = -interp(event[1], [self.min_axis_value[event[0]], self.max_axis_value[event[0]]], [-1., 1.])
self.axes_values[event[0]] = norm if abs(norm) > 0.05 else 0. # center can be noisy, deadzone of 5%
else:
return False
return True
def send_thread(joystick):
joystick_sock = messaging.pub_sock('testJoystick')
rk = Ratekeeper(100, print_delay_threshold=None)
while 1:
dat = messaging.new_message('testJoystick')
dat.testJoystick.axes = [joystick.axes_values[a] for a in joystick.axes_order]
dat.testJoystick.buttons = [joystick.cancel]
joystick_sock.send(dat.to_bytes())
print('\n' + ', '.join(f'{name}: {round(v, 3)}' for name, v in joystick.axes_values.items()))
if "WEB" in os.environ:
import requests
requests.get("http://"+os.environ["WEB"]+":5000/control/%f/%f" % tuple([joystick.axes_values[a] for a in joystick.axes_order][::-1]), timeout=None)
rk.keep_time()
def joystick_thread(joystick):
Params().put_bool('JoystickDebugMode', True)
threading.Thread(target=send_thread, args=(joystick,), daemon=True).start()
while True:
joystick.update()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Publishes events from your joystick to control your car.\n' +
'openpilot must be offroad before starting joysticked.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--keyboard', action='store_true', help='Use your keyboard instead of a joystick')
parser.add_argument('--gamepad', action='store_true', help='Use gamepad configuration instead of joystick')
args = parser.parse_args()
if not Params().get_bool("IsOffroad") and "ZMQ" not in os.environ and "WEB" not in os.environ:
print("The car must be off before running joystickd.")
exit()
print()
if args.keyboard:
print('Gas/brake control: `W` and `S` keys')
print('Steering control: `A` and `D` keys')
print('Buttons')
print('- `R`: Resets axes')
print('- `C`: Cancel cruise control')
else:
print('Using joystick, make sure to run cereal/messaging/bridge on your device if running over the network!')
joystick = Keyboard() if args.keyboard else Joystick(args.gamepad)
joystick_thread(joystick)
|
033b83a31252ca3adfa7f1a743d551b05f021430
|
61673ab9a42f7151de7337608c442fa6247f13bb
|
/__scraping__/vanglaini.org - requests, BS/main.py
|
c10b93a127d343d8abd836f5e78b4c26deb71bd7
|
[
"MIT"
] |
permissive
|
furas/python-examples
|
22d101670ecd667a29376d7c7d7d86f8ec71f6cf
|
95cb53b664f312e0830f010c0c96be94d4a4db90
|
refs/heads/master
| 2022-08-23T23:55:08.313936
| 2022-08-01T14:48:33
| 2022-08-01T14:48:33
| 45,575,296
| 176
| 91
|
MIT
| 2021-02-17T23:33:37
| 2015-11-04T23:54:32
|
Python
|
UTF-8
|
Python
| false
| false
| 573
|
py
|
main.py
|
#!/usr/bin/env python3
# date: 2019.10.12
import requests
from bs4 import BeautifulSoup
import pandas as pd
response = requests.get('https://www.vanglaini.org/')
soup = BeautifulSoup(response.text, 'lxml')
data = []
for article in soup.find_all('article'):
if article.a is None:
continue
row = [
article.a.text.strip(), # headline
article.p.text.strip(), # summary
"https://www.vanglaini.org" + article.a['href'] # link
]
data.append(row)
df = pd.DataFrame(data, columns=['Headline', 'Summary', 'Link'])
print(df)
|
fa9cef351f55a41432d90b73fe3f8dca2db3c031
|
e9b2c7531440e99afa208f63f8a0fa814b422d58
|
/treenode/metadata.py
|
f5dc9492e0f83f30e1fe9411493eacc4e9814aaa
|
[
"MIT"
] |
permissive
|
fabiocaccamo/django-treenode
|
1adffcac9f178a40c83a45ffe274a817d08d5fb5
|
4bb8ec3d2f4f76d293351e4153b7109f40d38d0f
|
refs/heads/main
| 2023-08-29T03:22:34.982127
| 2023-07-18T08:33:50
| 2023-07-18T08:33:50
| 127,927,721
| 487
| 33
|
MIT
| 2023-09-08T06:16:46
| 2018-04-03T15:16:32
|
Python
|
UTF-8
|
Python
| false
| false
| 285
|
py
|
metadata.py
|
__author__ = "Fabio Caccamo"
__copyright__ = "Copyright (c) 2018-present Fabio Caccamo"
__description__ = "probably the best abstract model/admin for your tree based stuff."
__email__ = "fabio.caccamo@gmail.com"
__license__ = "MIT"
__title__ = "django-treenode"
__version__ = "0.20.1"
|
3d09c933cde265aa3fc2d2e189ba48423ba15dfd
|
fdbfbcf4d6a0ef6f3c1b600e7b8037eed0f03f9e
|
/bindings/pydrake/multibody/_math_extra.py
|
08c6aba11ad595436e8344031861fb4f67b1c4c6
|
[
"BSD-3-Clause"
] |
permissive
|
RobotLocomotion/drake
|
4529c397f8424145623dd70665531b5e246749a0
|
3905758e8e99b0f2332461b1cb630907245e0572
|
refs/heads/master
| 2023-08-30T21:45:12.782437
| 2023-08-30T15:59:07
| 2023-08-30T15:59:07
| 16,256,144
| 2,904
| 1,270
|
NOASSERTION
| 2023-09-14T20:51:30
| 2014-01-26T16:11:05
|
C++
|
UTF-8
|
Python
| false
| false
| 1,392
|
py
|
_math_extra.py
|
import pydrake.autodiffutils as _ad
from pydrake.common import (
_MangledName,
pretty_class_name as _pretty_class_name,
)
import pydrake.symbolic as _sym
def _indented_repr(o):
"""Returns repr(o), with any lines beyond the first one indented +2."""
return repr(o).replace("\n", "\n ")
def _spatial_vector_repr(rotation_name, translation_name):
def repr_with_closure(self):
rotation = self.rotational().tolist()
translation = self.translational().tolist()
return (
f"{_pretty_class_name(type(self))}(\n"
f" {rotation_name}={_indented_repr(rotation)},\n"
f" {translation_name}={_indented_repr(translation)},\n"
f")")
return repr_with_closure
def _add_repr_functions():
for T in [float, _ad.AutoDiffXd, _sym.Expression]:
SpatialVelocity_[T].__repr__ = _spatial_vector_repr("w", "v")
SpatialMomentum_[T].__repr__ = _spatial_vector_repr("h", "l")
SpatialAcceleration_[T].__repr__ = _spatial_vector_repr("alpha", "a")
SpatialForce_[T].__repr__ = _spatial_vector_repr("tau", "f")
_add_repr_functions()
def __getattr__(name):
"""Rewrites requests for Foo[bar] into their mangled form, for backwards
compatibility with unpickling.
"""
return _MangledName.module_getattr(
module_name=__name__, module_globals=globals(), name=name)
|
bea23c6975592423e288042416c2a71e082be802
|
612a0320d504226f80d25be53fb9564e1c7aa2ac
|
/lbworkflow/flowgen/__init__.py
|
20d3c2c466b0fa85875a90e2d06114db85af22c0
|
[
"MIT"
] |
permissive
|
vicalloy/django-lb-workflow
|
e174510f41bddb33004fd73989989dc78ec30117
|
117dedd331032841540d8bc6b9056fa9d05faecf
|
refs/heads/master
| 2023-06-26T19:48:28.600744
| 2023-06-14T07:21:51
| 2023-06-14T07:21:51
| 86,919,849
| 217
| 73
|
MIT
| 2023-04-21T21:47:52
| 2017-04-01T14:43:42
|
Python
|
UTF-8
|
Python
| false
| false
| 5,452
|
py
|
__init__.py
|
import inspect
import os
import shutil
import stat
from jinja2 import Environment, FileSystemLoader
__all__ = ("FlowAppGenerator", "clean_generated_files")
def clean_generated_files(model_class):
folder_path = os.path.dirname(inspect.getfile(model_class))
for path, dirs, files in os.walk(folder_path):
if not path.endswith(model_class.__name__.lower()):
shutil.rmtree(path)
for file in files:
if file not in ["models.py", "wfdata.py", "__init__.py"]:
try:
os.remove(os.path.join(path, file))
except: # NOQA
pass
def get_fields(model_class):
fields = []
ignore_fields = ["id", "pinstance", "created_on", "created_by"]
for f in model_class._meta.fields:
if f.name not in ignore_fields:
fields.append(f)
return fields
def get_field_names(model_class):
fields = get_fields(model_class)
return ", ".join(["'%s'" % e.name for e in fields])
def group(flat_list):
for i in range(len(flat_list) % 2):
flat_list.append(None)
pass
return list(zip(flat_list[0::2], flat_list[1::2]))
class FlowAppGenerator(object):
def __init__(self, app_template_path=None):
if not app_template_path:
app_template_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "app_template"
)
self.app_template_path = app_template_path
super().__init__()
def init_env(self, template_path):
loader = FileSystemLoader(template_path)
self.env = Environment(
block_start_string="[%",
block_end_string="%]",
variable_start_string="[[",
variable_end_string="]]",
comment_start_string="[#",
comment_end_string="#]",
loader=loader,
)
def gen(
self,
model_class,
item_model_class_list=None,
wf_code=None,
replace=False,
ignores=["wfdata.py"],
):
dest = os.path.dirname(inspect.getfile(model_class))
app_name = model_class.__module__.split(".")[-2]
if not wf_code:
wf_code = app_name
ctx = {
"app_name": app_name,
"wf_code": wf_code,
"class_name": model_class.__name__,
"wf_name": model_class._meta.verbose_name,
"field_names": get_field_names(model_class),
"fields": get_fields(model_class),
"grouped_fields": group(get_fields(model_class)),
}
if item_model_class_list:
item_list = []
for item_model_class in item_model_class_list:
item_ctx = {
"class_name": item_model_class.__name__,
"lowercase_class_name": item_model_class.__name__.lower(),
"field_names": get_field_names(item_model_class),
"fields": get_fields(item_model_class),
"grouped__fields": group(get_fields(item_model_class)),
}
item_list.append(item_ctx)
ctx["item_list"] = item_list
self.copy_template(self.app_template_path, dest, ctx, replace, ignores)
def copy_template(self, src, dest, ctx={}, replace=False, ignores=[]):
self.init_env(src)
for path, dirs, files in os.walk(src):
relative_path = path[len(src) :].lstrip(os.path.sep)
dest_path = os.path.join(dest, relative_path)
dest_path = dest_path.replace(
"app_name", ctx.get("app_name", "app_name")
)
if not os.path.exists(dest_path):
os.mkdir(dest_path)
for i, subdir in enumerate(dirs):
if subdir.startswith("."):
del dirs[i]
for filename in files:
if filename.endswith(".pyc") or filename.startswith("."):
continue
src_file_path = os.path.join(path, filename)
src_file_path = src_file_path[len(src) :].strip(os.path.sep)
dest_file_path = os.path.join(dest, relative_path, filename)
dest_file_path = dest_file_path.replace(
"app_name", ctx.get("app_name", "app_name")
)
if dest_file_path.endswith("-tpl"):
dest_file_path = dest_file_path[:-4]
is_exists = os.path.isfile(dest_file_path)
for ignore in ignores:
if dest_file_path.endswith(ignore):
replace = False
if is_exists and not replace:
continue
self.copy_template_file(src_file_path, dest_file_path, ctx)
def copy_template_file(self, src, dest, ctx={}):
if os.path.sep != "/":
# https://github.com/pallets/jinja/issues/767
# Jinja template names are not fileystem paths.
# They always use forward slashes so this is working as intended.
src = src.replace(os.path.sep, "/")
template = self.env.get_template(src)
template.stream(ctx).dump(dest, encoding="utf-8")
# Make new file writable.
if os.access(dest, os.W_OK):
st = os.stat(dest)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(dest, new_permissions)
|
af59b17d4a09471653bec122798c097cb831525c
|
2bbc6975dec5786a1895611d9ca391de841a5616
|
/mkdocs-material/setup.py
|
b2816a043203877e7f0bd247b189a01a298f9eaf
|
[
"MIT"
] |
permissive
|
youzan/ZanRedisDB
|
8a7a2b2fa084f218f18097f3f79eeae9ceece3e4
|
b620268e208d6c09ea2da6eef0811af04ff24843
|
refs/heads/master
| 2023-07-15T00:04:18.356283
| 2022-07-08T03:00:44
| 2022-07-08T03:00:44
| 134,360,234
| 413
| 68
|
MIT
| 2021-06-18T16:21:06
| 2018-05-22T04:23:45
|
Go
|
UTF-8
|
Python
| false
| false
| 2,025
|
py
|
setup.py
|
# Copyright (c) 2016-2017 Martin Donath <martin.donath@squidfunk.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import json
from setuptools import setup, find_packages
# Load package.json contents
with open("package.json") as data:
package = json.load(data)
# Load list of dependencies
with open("requirements.txt") as data:
install_requires = [
line for line in data.read().split("\n")
if line and not line.startswith("#")
]
# Package description
setup(
name = package["name"],
version = package["version"],
url = package["homepage"],
license = package["license"],
description = package["description"],
author = package["author"]["name"],
author_email = package["author"]["email"],
keywords = package["keywords"],
packages = find_packages(),
include_package_data = True,
install_requires = install_requires,
entry_points = {
"mkdocs.themes": [
"material = material",
]
},
zip_safe = False
)
|
d392124362118b5b68b5b4b70e7c29ff0367bbbf
|
adb83e5cec36bf7c079bc1b69b45eba3f495f5f4
|
/toggl/cli/themes.py
|
f0bba4130a3267eef2c15064ab45b2dfe536326b
|
[
"MIT"
] |
permissive
|
AuHau/toggl-cli
|
b1cda6b361c6cf6759bd857f33a5a18b394310b9
|
7ee7aa8ace000a88035c00a0de7842dbbf83d293
|
refs/heads/master
| 2023-05-10T19:14:07.328960
| 2023-02-18T08:20:38
| 2023-02-18T08:20:38
| 4,233,630
| 224
| 41
|
NOASSERTION
| 2023-05-01T04:57:31
| 2012-05-05T12:32:49
|
Python
|
UTF-8
|
Python
| false
| false
| 824
|
py
|
themes.py
|
"""
The values of the themes are derived from click.style
https://click.palletsprojects.com/en/7.x/api/#click.style
"""
class PlainTheme:
code = 'plain'
name = 'Plain theme'
title = {}
title_id = {}
header = {}
success = {}
error_color = 'red'
class LightTheme:
code = 'light'
name = 'Light theme'
title = {'fg': 'green'}
title_id = {'fg': 'green', 'dim': 1}
header = {'fg': 'bright_black', 'dim': 1}
success = {'fg': 'green'}
error_color = 'red'
class DarkTheme:
code = 'dark'
name = 'Dark theme'
title = {'fg': 'green'}
title_id = {'fg': 'green', 'dim': 1}
header = {'fg': 'white', 'dim': 1}
success = {'fg': 'green'}
error_color = 'red'
themes = {
'dark': DarkTheme,
'light': LightTheme,
'plain': PlainTheme
}
|
b5fe2c35be834481c85b14a5bb21e11edeb46dbf
|
9a101fbc437b37e4f263781dd9a329b97a7587b8
|
/scripts/benchmark_vincenty.py
|
be1688d8b62af2d40af6161ea87d4d9bb9697d82
|
[
"BSD-2-Clause"
] |
permissive
|
geospace-code/pymap3d
|
2aa3e8dfd764df1f248036f1b3b9f1dd6fd2d2cd
|
81172e221a4c9884450a38ec8a7ee382198cb7e3
|
refs/heads/main
| 2023-05-22T20:33:31.022682
| 2023-03-05T03:08:28
| 2023-03-05T05:31:37
| 22,567,162
| 202
| 46
|
BSD-2-Clause
| 2023-05-03T09:05:44
| 2014-08-03T04:28:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
benchmark_vincenty.py
|
#!/usr/bin/env python
"""
vreckon and vdist are iterative algorithms.
How much does PyPy help over Cpython?
Hmm, PyPy is slower than Cpython..
$ pypy3 tests/benchmark_vincenty.py 10000
2.1160879135131836
0.06056046485900879
$ python tests/benchmark_vincenty.py 10000
0.3325080871582031
0.02107095718383789
"""
import argparse
import shutil
import subprocess
import time
from pathlib import Path
import numpy as np
from pymap3d.vincenty import vdist, vreckon
R = Path(__file__).parent
MATLAB = shutil.which("matlab")
ll0 = (42.0, 82.0)
def bench_vreckon(N: int) -> float:
sr = np.random.random(N)
az = np.random.random(N)
tic = time.monotonic()
_, _ = vreckon(ll0[0], ll0[1], sr, az)
return time.monotonic() - tic
def bench_vdist(N: int) -> float:
lat = np.random.random(N)
lon = np.random.random(N)
tic = time.monotonic()
_, _ = vdist(ll0[0], ll0[1], lat, lon)
return time.monotonic() - tic
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("N", help="number of iterations", type=int)
args = p.parse_args()
N = args.N
print(f"vreckon: {bench_vreckon(N):.3f}")
print(f"vdist: {bench_vdist(N):.3f}")
if MATLAB:
print(f"matlab path {R}")
subprocess.check_call(
f'matlab -batch "helper_vdist({ll0[0]}, {ll0[1]}, {N})"', text=True, timeout=90, cwd=R
)
|
27a856a479b8bacf391d09b47154f5cbbc122c67
|
bb7300cb147c439241684dd0086dafb3aadbe495
|
/MAXIMUM_SUBARRAY/maximum_subarray_brownbear.py
|
451254b80dfec9c97b7273fdcde47e683b463616
|
[] |
no_license
|
29rithm/algospot
|
2f043ea9f20346e246d28e40d99cc4abb5f01929
|
3b4c671d089b58e49a558573b4e05abb80de04d8
|
refs/heads/master
| 2020-09-12T08:15:28.560875
| 2020-02-23T07:14:59
| 2020-02-23T07:14:59
| 222,365,096
| 174
| 8
| null | 2020-03-08T11:47:45
| 2019-11-18T04:42:29
|
Python
|
UTF-8
|
Python
| false
| false
| 358
|
py
|
maximum_subarray_brownbear.py
|
class Solution:
def maxSubArray(self, nums):
sequence_sum = 0
max_sum = -9999999999999
for num in nums:
if sequence_sum < 0:
sequence_sum = num
else:
sequence_sum += num
if sequence_sum > max_sum:
max_sum = sequence_sum
return max_sum
|
7436db4e95755e7079ce646881cad8f16a6beee1
|
7b7c570b30d6d7a0e9b904c7cb378cfb0d0f0e07
|
/tests/entities/test_metric.py
|
66821a9b5e22e62c03f07f75331dd181756175d3
|
[
"Apache-2.0"
] |
permissive
|
mlflow/mlflow
|
ca97bfbbf32f8e59f454e428f5e46eb3d34d062f
|
37298ffafcd34002352d01d579d4524790544267
|
refs/heads/master
| 2023-09-01T13:15:53.902815
| 2023-09-01T09:00:42
| 2023-09-01T09:00:42
| 136,202,695
| 14,102
| 3,748
|
Apache-2.0
| 2023-09-14T21:52:42
| 2018-06-05T16:05:58
|
Python
|
UTF-8
|
Python
| false
| false
| 905
|
py
|
test_metric.py
|
from mlflow.entities import Metric
from mlflow.utils.time_utils import get_current_time_millis
from tests.helper_functions import random_int, random_str
def _check(metric, key, value, timestamp, step):
assert type(metric) == Metric
assert metric.key == key
assert metric.value == value
assert metric.timestamp == timestamp
assert metric.step == step
def test_creation_and_hydration():
key = random_str()
value = 10000
ts = get_current_time_millis()
step = random_int()
metric = Metric(key, value, ts, step)
_check(metric, key, value, ts, step)
as_dict = {"key": key, "value": value, "timestamp": ts, "step": step}
assert dict(metric) == as_dict
proto = metric.to_proto()
metric2 = metric.from_proto(proto)
_check(metric2, key, value, ts, step)
metric3 = Metric.from_dictionary(as_dict)
_check(metric3, key, value, ts, step)
|
693321e5bb697ac3c139e283cbe9952c9aa5adfe
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractLostInTranslation.py
|
c3942407124ff98f865a2ac3d8001064fb39381b
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 772
|
py
|
feed_parse_extractLostInTranslation.py
|
def extractLostInTranslation(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'Third Prince Elmer' in item['tags']:
return buildReleaseMessageWithType(item, 'Third Prince Elmer', vol, chp, frag=frag, postfix=postfix)
if 'Otoko Aruji' in item['tags']:
return buildReleaseMessageWithType(item, 'Otoko Aruji', vol, chp, frag=frag, postfix=postfix)
if "Sword Saint's Disciple" in item['tags']:
return buildReleaseMessageWithType(item, "Sword Saint's Disciple", vol, chp, frag=frag, postfix=postfix)
if 'Doll Dungeon' in item['tags']:
return buildReleaseMessageWithType(item, 'Doll Dungeon', vol, chp, frag=frag, postfix=postfix)
return False
|
de707a8ead1682a410685a1eaffecb181ea75025
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/form_processor/migrations/0031_add_details_field_to_case_transaction.py
|
26bd10533b9aa2171f2c14397cc5c188a5771d25
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 788
|
py
|
0031_add_details_field_to_case_transaction.py
|
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('form_processor', '0030_casetransaction_revoked'),
]
operations = [
migrations.AddField(
model_name='casetransaction',
name='details',
field=jsonfield.fields.JSONField(default=dict),
preserve_default=True,
),
migrations.AlterField(
model_name='casetransaction',
name='type',
field=models.PositiveSmallIntegerField(choices=[(0, 'form'), (1, 'rebuild_with_reason'), (2, 'user_requested_rebuild'), (3, 'user_archived_rebuild'), (4, 'form_archive_rebuild'), (5, 'form_edit_rebuild')]),
preserve_default=True,
),
]
|
3aeff222d6503d0d1454708e6371bdd654344f73
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/test/test_complex.py
|
36f17b3b01f6a7d472c39f5aec0485756f758157
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 9,347
|
py
|
test_complex.py
|
# Owner(s): ["module: complex"]
import torch
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
onlyCPU,
)
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.common_dtype import complex_types
devices = (torch.device('cpu'), torch.device('cuda:0'))
class TestComplexTensor(TestCase):
@dtypes(*complex_types())
def test_to_list(self, device, dtype):
# test that the complex float tensor has expected values and
# there's no garbage value in the resultant list
self.assertEqual(torch.zeros((2, 2), device=device, dtype=dtype).tolist(), [[0j, 0j], [0j, 0j]])
@dtypes(torch.float32, torch.float64)
def test_dtype_inference(self, device, dtype):
# issue: https://github.com/pytorch/pytorch/issues/36834
default_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
x = torch.tensor([3., 3. + 5.j], device=device)
torch.set_default_dtype(default_dtype)
self.assertEqual(x.dtype, torch.cdouble if dtype == torch.float64 else torch.cfloat)
@onlyCPU
@dtypes(*complex_types())
def test_eq(self, device, dtype):
"Test eq on complex types"
nan = float("nan")
# Non-vectorized operations
for a, b in (
(torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype),
torch.tensor([-6.1278 - 8.5019j], device=device, dtype=dtype)),
(torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype),
torch.tensor([-6.1278 - 2.1172j], device=device, dtype=dtype)),
(torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype),
torch.tensor([-0.0610 - 8.5019j], device=device, dtype=dtype)),
):
actual = torch.eq(a, b)
expected = torch.tensor([False], device=device, dtype=torch.bool)
self.assertEqual(actual, expected, msg=f"\neq\nactual {actual}\nexpected {expected}")
actual = torch.eq(a, a)
expected = torch.tensor([True], device=device, dtype=torch.bool)
self.assertEqual(actual, expected, msg=f"\neq\nactual {actual}\nexpected {expected}")
actual = torch.full_like(b, complex(2, 2))
torch.eq(a, b, out=actual)
expected = torch.tensor([complex(0)], device=device, dtype=dtype)
self.assertEqual(actual, expected, msg=f"\neq(out)\nactual {actual}\nexpected {expected}")
actual = torch.full_like(b, complex(2, 2))
torch.eq(a, a, out=actual)
expected = torch.tensor([complex(1)], device=device, dtype=dtype)
self.assertEqual(actual, expected, msg=f"\neq(out)\nactual {actual}\nexpected {expected}")
# Vectorized operations
for a, b in (
(torch.tensor([
-0.0610 - 2.1172j, 5.1576 + 5.4775j, complex(2.8871, nan), -6.6545 - 3.7655j, -2.7036 - 1.4470j, 0.3712 + 7.989j,
-0.0610 - 2.1172j, 5.1576 + 5.4775j, complex(nan, -3.2650), -6.6545 - 3.7655j, -2.7036 - 1.4470j, 0.3712 + 7.989j],
device=device, dtype=dtype),
torch.tensor([
-6.1278 - 8.5019j, 0.5886 + 8.8816j, complex(2.8871, nan), 6.3505 + 2.2683j, 0.3712 + 7.9659j, 0.3712 + 7.989j,
-6.1278 - 2.1172j, 5.1576 + 8.8816j, complex(nan, -3.2650), 6.3505 + 2.2683j, 0.3712 + 7.9659j, 0.3712 + 7.989j],
device=device, dtype=dtype)),
):
actual = torch.eq(a, b)
expected = torch.tensor([False, False, False, False, False, True,
False, False, False, False, False, True],
device=device, dtype=torch.bool)
self.assertEqual(actual, expected, msg=f"\neq\nactual {actual}\nexpected {expected}")
actual = torch.eq(a, a)
expected = torch.tensor([True, True, False, True, True, True,
True, True, False, True, True, True],
device=device, dtype=torch.bool)
self.assertEqual(actual, expected, msg=f"\neq\nactual {actual}\nexpected {expected}")
actual = torch.full_like(b, complex(2, 2))
torch.eq(a, b, out=actual)
expected = torch.tensor([complex(0), complex(0), complex(0), complex(0), complex(0), complex(1),
complex(0), complex(0), complex(0), complex(0), complex(0), complex(1)],
device=device, dtype=dtype)
self.assertEqual(actual, expected, msg=f"\neq(out)\nactual {actual}\nexpected {expected}")
actual = torch.full_like(b, complex(2, 2))
torch.eq(a, a, out=actual)
expected = torch.tensor([complex(1), complex(1), complex(0), complex(1), complex(1), complex(1),
complex(1), complex(1), complex(0), complex(1), complex(1), complex(1)],
device=device, dtype=dtype)
self.assertEqual(actual, expected, msg=f"\neq(out)\nactual {actual}\nexpected {expected}")
@onlyCPU
@dtypes(*complex_types())
def test_ne(self, device, dtype):
"Test ne on complex types"
nan = float("nan")
# Non-vectorized operations
for a, b in (
(torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype),
torch.tensor([-6.1278 - 8.5019j], device=device, dtype=dtype)),
(torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype),
torch.tensor([-6.1278 - 2.1172j], device=device, dtype=dtype)),
(torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype),
torch.tensor([-0.0610 - 8.5019j], device=device, dtype=dtype)),
):
actual = torch.ne(a, b)
expected = torch.tensor([True], device=device, dtype=torch.bool)
self.assertEqual(actual, expected, msg=f"\nne\nactual {actual}\nexpected {expected}")
actual = torch.ne(a, a)
expected = torch.tensor([False], device=device, dtype=torch.bool)
self.assertEqual(actual, expected, msg=f"\nne\nactual {actual}\nexpected {expected}")
actual = torch.full_like(b, complex(2, 2))
torch.ne(a, b, out=actual)
expected = torch.tensor([complex(1)], device=device, dtype=dtype)
self.assertEqual(actual, expected, msg=f"\nne(out)\nactual {actual}\nexpected {expected}")
actual = torch.full_like(b, complex(2, 2))
torch.ne(a, a, out=actual)
expected = torch.tensor([complex(0)], device=device, dtype=dtype)
self.assertEqual(actual, expected, msg=f"\nne(out)\nactual {actual}\nexpected {expected}")
# Vectorized operations
for a, b in (
(torch.tensor([
-0.0610 - 2.1172j, 5.1576 + 5.4775j, complex(2.8871, nan), -6.6545 - 3.7655j, -2.7036 - 1.4470j, 0.3712 + 7.989j,
-0.0610 - 2.1172j, 5.1576 + 5.4775j, complex(nan, -3.2650), -6.6545 - 3.7655j, -2.7036 - 1.4470j, 0.3712 + 7.989j],
device=device, dtype=dtype),
torch.tensor([
-6.1278 - 8.5019j, 0.5886 + 8.8816j, complex(2.8871, nan), 6.3505 + 2.2683j, 0.3712 + 7.9659j, 0.3712 + 7.989j,
-6.1278 - 2.1172j, 5.1576 + 8.8816j, complex(nan, -3.2650), 6.3505 + 2.2683j, 0.3712 + 7.9659j, 0.3712 + 7.989j],
device=device, dtype=dtype)),
):
actual = torch.ne(a, b)
expected = torch.tensor([True, True, True, True, True, False,
True, True, True, True, True, False],
device=device, dtype=torch.bool)
self.assertEqual(actual, expected, msg=f"\nne\nactual {actual}\nexpected {expected}")
actual = torch.ne(a, a)
expected = torch.tensor([False, False, True, False, False, False,
False, False, True, False, False, False],
device=device, dtype=torch.bool)
self.assertEqual(actual, expected, msg=f"\nne\nactual {actual}\nexpected {expected}")
actual = torch.full_like(b, complex(2, 2))
torch.ne(a, b, out=actual)
expected = torch.tensor([complex(1), complex(1), complex(1), complex(1), complex(1), complex(0),
complex(1), complex(1), complex(1), complex(1), complex(1), complex(0)],
device=device, dtype=dtype)
self.assertEqual(actual, expected, msg=f"\nne(out)\nactual {actual}\nexpected {expected}")
actual = torch.full_like(b, complex(2, 2))
torch.ne(a, a, out=actual)
expected = torch.tensor([complex(0), complex(0), complex(1), complex(0), complex(0), complex(0),
complex(0), complex(0), complex(1), complex(0), complex(0), complex(0)],
device=device, dtype=dtype)
self.assertEqual(actual, expected, msg=f"\nne(out)\nactual {actual}\nexpected {expected}")
instantiate_device_type_tests(TestComplexTensor, globals())
if __name__ == '__main__':
run_tests()
|
5814711b2c55cc3e647495ad752b0d8381ca039e
|
8fa191cd4a67431a04eff62d35122ee83cc7b0af
|
/bookwyrm/tests/views/lists/test_embed.py
|
4191ffe0d712c420c7a01915a6c0d997d88baddc
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
bookwyrm-social/bookwyrm
|
24678676a7a58dba96641194dfae3fffbf01574d
|
0f8da5b738047f3c34d60d93f59bdedd8f797224
|
refs/heads/main
| 2023-08-20T21:45:30.957277
| 2023-08-19T23:41:50
| 2023-08-19T23:41:50
| 236,415,735
| 1,398
| 216
|
NOASSERTION
| 2023-09-08T20:43:06
| 2020-01-27T03:51:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,429
|
py
|
test_embed.py
|
""" test for app action functionality """
from unittest.mock import patch
from django.contrib.auth.models import AnonymousUser
from django.http.response import Http404
from django.template.response import TemplateResponse
from django.test import TestCase
from django.test.client import RequestFactory
from bookwyrm import models, views
from bookwyrm.tests.validate_html import validate_html
# pylint: disable=unused-argument
class ListViews(TestCase):
"""list view"""
def setUp(self):
"""we need basic test data and mocks"""
self.factory = RequestFactory()
with patch("bookwyrm.suggested_users.rerank_suggestions_task.delay"), patch(
"bookwyrm.activitystreams.populate_stream_task.delay"
), patch("bookwyrm.lists_stream.populate_lists_task.delay"):
self.local_user = models.User.objects.create_user(
"mouse@local.com",
"mouse@mouse.com",
"mouseword",
local=True,
localname="mouse",
remote_id="https://example.com/users/mouse",
)
work = models.Work.objects.create(title="Work")
self.book = models.Edition.objects.create(
title="Example Edition",
remote_id="https://example.com/book/1",
parent_work=work,
)
with patch(
"bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"
), patch("bookwyrm.lists_stream.remove_list_task.delay"):
self.list = models.List.objects.create(
name="Test List", user=self.local_user
)
self.anonymous_user = AnonymousUser
self.anonymous_user.is_authenticated = False
models.SiteSettings.objects.create()
def test_embed_call_without_key(self):
"""there are so many views, this just makes sure it DOESN’T load"""
view = views.unsafe_embed_list
request = self.factory.get("")
request.user = self.anonymous_user
with patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"):
models.ListItem.objects.create(
book_list=self.list,
user=self.local_user,
book=self.book,
approved=True,
order=1,
)
with patch("bookwyrm.views.list.list.is_api_request") as is_api:
is_api.return_value = False
with self.assertRaises(Http404):
view(request, self.list.id, "")
def test_embed_call_with_key(self):
"""there are so many views, this just makes sure it LOADS"""
view = views.unsafe_embed_list
request = self.factory.get("")
request.user = self.anonymous_user
with patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"):
models.ListItem.objects.create(
book_list=self.list,
user=self.local_user,
book=self.book,
approved=True,
order=1,
)
embed_key = str(self.list.embed_key.hex)
with patch("bookwyrm.views.list.list.is_api_request") as is_api:
is_api.return_value = False
result = view(request, self.list.id, embed_key)
self.assertIsInstance(result, TemplateResponse)
validate_html(result.render())
self.assertEqual(result.status_code, 200)
|
a35577f57ad33639bd5813b42ff47c4f48ce4c56
|
0d1e4c4a75bae5d5516b2066f680b62a25e1dd81
|
/vdirsyncer/storage/http.py
|
177e769326b79f32f55f25d1df69223fc3c9066f
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
pimutils/vdirsyncer
|
3e79768183598a8328a8b6d1391acdb408374239
|
adc974bdd1b2c6d4b34163beb66b10a627de5777
|
refs/heads/main
| 2023-08-18T23:25:49.567099
| 2023-08-05T22:05:06
| 2023-08-06T10:45:42
| 16,865,424
| 1,155
| 167
|
NOASSERTION
| 2023-09-08T11:03:01
| 2014-02-15T15:33:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,718
|
py
|
http.py
|
import urllib.parse as urlparse
import aiohttp
from .. import exceptions
from ..http import USERAGENT
from ..http import prepare_auth
from ..http import prepare_client_cert
from ..http import prepare_verify
from ..http import request
from ..vobject import Item
from ..vobject import split_collection
from .base import Storage
class HttpStorage(Storage):
storage_name = "http"
read_only = True
_repr_attributes = ["username", "url"]
_items = None
# Required for tests.
_ignore_uids = True
def __init__(
self,
url,
username="",
password="",
verify=None,
auth=None,
useragent=USERAGENT,
verify_fingerprint=None,
auth_cert=None,
*,
connector,
**kwargs
) -> None:
super().__init__(**kwargs)
self._settings = {
"cert": prepare_client_cert(auth_cert),
"latin1_fallback": False,
}
auth = prepare_auth(auth, username, password)
if auth:
self._settings["auth"] = auth
ssl = prepare_verify(verify, verify_fingerprint)
if ssl:
self._settings["ssl"] = ssl
self.username, self.password = username, password
self.useragent = useragent
assert connector is not None
self.connector = connector
collection = kwargs.get("collection")
if collection is not None:
url = urlparse.urljoin(url, collection)
self.url = url
self.parsed_url = urlparse.urlparse(self.url)
def _default_headers(self):
return {"User-Agent": self.useragent}
async def list(self):
async with aiohttp.ClientSession(
connector=self.connector,
connector_owner=False,
trust_env=True,
# TODO use `raise_for_status=true`, though this needs traces first,
) as session:
r = await request(
"GET",
self.url,
headers=self._default_headers(),
session=session,
**self._settings,
)
self._items = {}
for item in split_collection((await r.read()).decode("utf-8")):
item = Item(item)
if self._ignore_uids:
item = item.with_uid(item.hash)
self._items[item.ident] = item, item.hash
for href, (_, etag) in self._items.items():
yield href, etag
async def get(self, href):
if self._items is None:
async for _ in self.list():
pass
try:
return self._items[href]
except KeyError:
raise exceptions.NotFoundError(href)
|
35263c3375cddcee3d69a9972b21257dfa56e798
|
38e5c18fdb3da2fd51d6ffcdbd30fca1f4197220
|
/events/models/locale.py
|
8277990855f01b480edf8bdbf8d841b477796471
|
[
"BSD-2-Clause"
] |
permissive
|
GetTogetherComm/GetTogether
|
3472c00e94c25930bb5f854bdf5ddf6f0b25fe70
|
6708944bbcecb6d3d1467b096b2d72e991583d51
|
refs/heads/master
| 2023-08-20T17:57:30.082021
| 2022-04-18T22:22:54
| 2022-04-18T22:22:54
| 115,438,321
| 462
| 106
|
BSD-2-Clause
| 2023-02-15T18:23:18
| 2017-12-26T16:34:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,882
|
py
|
locale.py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
import pytz
from rest_framework import serializers
class Language(models.Model):
class Meta:
ordering = ("name",)
name = models.CharField(_("Language"), max_length=150, null=True)
code = models.CharField(_("Language Code"), max_length=20, null=True)
def __str__(self):
return u"%s" % (self.name)
class Continent(models.Model):
name = models.CharField(_("Name"), max_length=50)
class Meta:
ordering = ("name",)
def __str__(self):
return u"%s" % (self.name)
class Country(models.Model):
name = models.CharField(_("Name"), max_length=100)
code = models.CharField(_("Country Code"), max_length=8)
continents = models.ManyToManyField(Continent)
class Meta:
ordering = ("name",)
verbose_name_plural = "Countries"
def __str__(self):
return u"%s" % (self.name)
@property
def slug(self):
if self.name is not None:
return self.name.replace(",", "").replace(" ", "_")
else:
return "no_country"
class CountrySerializer(serializers.ModelSerializer):
display = serializers.CharField(source="__str__", read_only=True)
class Meta:
model = Country
fields = ("id", "name", "code", "display")
class SPR(models.Model):
name = models.CharField(_("Name"), max_length=100)
code = models.CharField(_("Admin Code"), max_length=8)
country = models.ForeignKey(Country, on_delete=models.CASCADE)
class Meta:
ordering = ("name",)
def __str__(self):
return u"%s, %s" % (self.name, self.country.name)
@property
def slug(self):
if self.name is not None:
return self.name.replace(",", "").replace(" ", "_")
else:
return "no_spr"
class SPRSerializer(serializers.ModelSerializer):
display = serializers.CharField(source="__str__", read_only=True)
class Meta:
model = SPR
fields = ("id", "name", "code", "country", "slug", "display")
class City(models.Model):
class Meta:
ordering = ("name",)
verbose_name = _("City")
verbose_name_plural = _("Cities")
name = models.CharField(_("Name"), max_length=100)
spr = models.ForeignKey(SPR, on_delete=models.CASCADE)
tz = models.CharField(
max_length=32,
verbose_name=_("Default Timezone"),
default="UTC",
choices=[(tz, tz) for tz in pytz.all_timezones],
blank=False,
null=False,
help_text=_("The most commonly used timezone for this Team."),
)
longitude = models.FloatField(
help_text=_("Longitude in Degrees East"), null=True, blank=True
)
latitude = models.FloatField(
help_text=_("Latitude in Degrees North"), null=True, blank=True
)
population = models.IntegerField(
help_text=_("Population"), null=False, blank=False, default=0
)
@property
def short_name(self):
if self.spr.country.name == "United States":
return u"%s, %s" % (self.name, self.spr.name)
else:
return u"%s, %s" % (self.name, self.spr.country.name)
def __str__(self):
return u"%s, %s, %s" % (self.name, self.spr.name, self.spr.country.name)
@property
def slug(self):
if self.name is not None:
return self.name.replace(",", "").replace(" ", "_")
else:
return "no_city"
class CitySerializer(serializers.ModelSerializer):
display = serializers.CharField(source="__str__", read_only=True)
class Meta:
model = City
fields = (
"id",
"name",
"short_name",
"spr",
"tz",
"latitude",
"longitude",
"slug",
"display",
)
|
3abd3d7ffc64594c3343447ac72511df6c25b98c
|
becf2cde221ca33b165d348203400e3290630f81
|
/winpython/_vendor/qtpy/tests/QtPrintSupport.py
|
263747d2c8f1f5d8ed7d62c582cfdff56fb90856
|
[
"MIT"
] |
permissive
|
winpython/winpython
|
620fe3c7aa91e593f614d5806184f28eb6c8f480
|
323c6fef4100220a84daf964ed0b78058862bc29
|
refs/heads/master
| 2023-09-05T00:43:32.719477
| 2023-08-26T14:59:41
| 2023-08-26T14:59:41
| 24,275,324
| 1,796
| 373
|
MIT
| 2023-08-26T14:59:42
| 2014-09-20T21:47:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,064
|
py
|
QtPrintSupport.py
|
# -----------------------------------------------------------------------------
# Copyright © 2009- The Spyder Development Team
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
# -----------------------------------------------------------------------------
"""Provides QtPrintSupport classes and functions."""
from . import PYQT5, PYQT6, PYSIDE6, PYSIDE2
if PYQT5:
from PyQt5.QtPrintSupport import *
elif PYQT6:
from PyQt6.QtPrintSupport import *
QPageSetupDialog.exec_ = lambda self, *args, **kwargs: self.exec(*args, **kwargs)
QPrintDialog.exec_ = lambda self, *args, **kwargs: self.exec(*args, **kwargs)
QPrintPreviewWidget.print_ = lambda self, *args, **kwargs: self.print(*args, **kwargs)
elif PYSIDE6:
from PySide6.QtPrintSupport import *
# Map DeprecationWarning methods
QPageSetupDialog.exec_ = lambda self, *args, **kwargs: self.exec(*args, **kwargs)
QPrintDialog.exec_ = lambda self, *args, **kwargs: self.exec(*args, **kwargs)
elif PYSIDE2:
from PySide2.QtPrintSupport import *
|
f93eb94b4e7dd5088fc1c671d15f73e5cc049461
|
2c91404d3f2061426135fc6022bd590180693b05
|
/src/query_spec.py
|
246a10b85099e22d96ea411de65b12bcd5947ba4
|
[] |
no_license
|
logv/snorkel
|
2b7e2b8ad306bf99cfd80b24ba42dc525141b73b
|
f8f33dc7d9b23d1e5bb208601731439ba4bad594
|
refs/heads/slite
| 2023-02-21T12:42:53.915381
| 2020-05-22T02:58:06
| 2020-05-22T02:58:06
| 8,814,504
| 153
| 21
| null | 2023-02-15T23:43:18
| 2013-03-16T05:10:17
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,644
|
py
|
query_spec.py
|
import werkzeug
try:
import dotmap
except:
dotmap = None
try:
import addict
except:
addict = None
class QuerySpec(object):
def __init__(self, query):
# TODO: list all attributes of a query spec up front so others know what to expect
md = werkzeug.MultiDict()
for q in query:
if type(q) == dict:
md.add(q['name'], q['value'].strip())
elif type(q) == list or type(q) == tuple:
md.add(q[0], q[1].strip())
else:
md.add(q, query[q])
self.ismultidict = False
self.isdotmap = False
if isinstance(query, werkzeug.MultiDict):
self.ismultidict = True
elif addict and isinstance(query, addict.Dict):
self.isdotmap = True
elif dotmap and isinstance(query, dotmap.DotMap):
self.isdotmap = True
elif isinstance(query, list):
self.ismultidict = True
else:
raise Exception("Unknown entry for query spec")
self.md = md
# we will need to put together an exported interface
self.fields = self.get_fields()
self.groupby = self.get_groupby()
def __makedict__(self):
ret = {
}
for f in self.md:
if f.endswith("[]"):
if self.ismultidict:
ret[f] = self.md.getlist(f)
else:
ret[f] = self.md.get(f)
else:
ret[f] = self.md.get(f)
return ret
def __json__(self):
return self.__makedict__()
def setlist(self, k, v):
self.md.setlist(k, v)
def set(self, k, v):
if k in self.md:
self.md.pop(k)
self.md.add(k,v)
def add(self, k, v):
self.md.add(k, v)
def getlist(self, k, d=[]):
if self.ismultidict:
return self.md.getlist(k)
return self.md.get(k) or []
def get(self, k, d=None):
return self.md.get(k, d)
def get_metric(self):
op = self.md.get('metric')
if not op:
op = self.md.get('agg', '')
op = op.lstrip("$")
return op
def get_groupby(self):
g = self.getlist('groupby[]')
if not g:
g = self.getlist('group_by')
return g
def get_fields(self):
g = self.getlist('fields[]')
if not g:
g = self.getlist('fieldset')
return g
def get_custom_fields(self):
g = self.getlist('custom_fields[]')
if not g:
g = self.getlist('custom_fields')
return g
|
4d525e9841a651b6cac4fc7f35461dc1addc5134
|
156623a5f9fcef7d0bf42f245dcea624a834a4eb
|
/twitch/helix/models/follow.py
|
d5a48a6526c22afb6186337075d18a3d3e482f58
|
[
"MIT"
] |
permissive
|
PetterKraabol/Twitch-Python
|
2129998114be85e62527cf2cca03c3c3dbda9f26
|
1c60f04030c5fad379e4de290474da4d36297152
|
refs/heads/main
| 2023-01-13T11:50:42.676901
| 2022-05-01T12:17:50
| 2022-05-01T12:17:50
| 151,992,604
| 224
| 57
|
MIT
| 2022-12-26T22:05:26
| 2018-10-07T22:59:11
|
Python
|
UTF-8
|
Python
| false
| false
| 921
|
py
|
follow.py
|
from typing import Dict, Any
import twitch.helix as helix
from twitch.api import API
from .model import Model
class Follow(Model):
def __init__(self, api: API, data: Dict[str, Any]):
super().__init__(api, data)
self.from_id: str = data.get('from_id')
self.from_name: str = data.get('from_name')
self.to_id: str = data.get('to_id')
self.to_name: str = data.get('to_name')
self.followed_at: str = data.get('followed_at')
@property
def follower(self) -> 'helix.User':
"""
This user follows the followed
:return: User following the user
"""
return helix.Users(self._api, int(self.from_id))[0]
@property
def followed(self) -> 'helix.User':
"""
This user is being followed by the follower
:return: User being followed
"""
return helix.Users(self._api, int(self.to_id))[0]
|
8571570b626f14b914df8edf6eb1ffd4c1de7c35
|
160f08e768d7271f9522ad2597ac4ee79c04477a
|
/src/c3nav/mapdata/migrations/0022_remove_space_category.py
|
9110b31afbe7a2e9a49767eee701df63ff9ada4b
|
[
"Apache-2.0"
] |
permissive
|
c3nav/c3nav
|
6254724dfc8589ee03c6028577befd7c65b05857
|
1a4ef5caa06ddacc8d9370b5adcee248fd4f55f7
|
refs/heads/main
| 2023-08-04T08:36:18.431458
| 2023-07-24T09:57:18
| 2023-07-24T09:57:18
| 56,852,994
| 140
| 47
|
Apache-2.0
| 2023-07-05T22:55:27
| 2016-04-22T12:13:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,401
|
py
|
0022_remove_space_category.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-11 15:10
from __future__ import unicode_literals
from django.db import migrations
def convert_space_category_to_location_group(apps, schema_editor):
Space = apps.get_model('mapdata', 'Space')
LocationGroupCategory = apps.get_model('mapdata', 'LocationGroupCategory')
category = LocationGroupCategory.objects.create(name='spacecategory', titles={
'en': 'Space Category',
'de': 'Raumkategorie',
}, single=True, allow_levels=False, allow_spaces=True, allow_areas=False, allow_pois=False, priority=-1)
space_categories = (
('stairs', {'en': 'Stairs', 'de': 'Treppe'}, '#dddddd'),
('escalator', {'en': 'Escalator', 'de': 'Rolltreppe'}, '#bbbbbb'),
('elevator', {'en': 'Elevator', 'de': 'Aufzug'}, '#00ffff')
)
for i, (name, titles, color) in enumerate(space_categories):
group = category.groups.create(titles=titles, can_search=False, can_describe=False, color=color, priority=i+1)
group.spaces.set(Space.objects.filter(category=name))
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0021_auto_20170710_1916'),
]
operations = [
migrations.RunPython(convert_space_category_to_location_group),
migrations.RemoveField(
model_name='space',
name='category',
),
]
|
ba85b7e28eed7e23d6232abfa8ba01c3422f2942
|
0c9e50d589a934b1d4bfd4448b793a75c0950e3f
|
/examples/streaming/server.py
|
fce98a8f5a644282db3241efb3a2fbfbae9f0f9d
|
[
"BSD-3-Clause"
] |
permissive
|
vmagamedov/grpclib
|
6eb4471e23d152e95325d9d6f5317cfa8d60fa80
|
0b9b171c8d439a76b8cad10b86a52f8585348dba
|
refs/heads/master
| 2023-08-31T22:34:57.510125
| 2023-08-02T05:49:52
| 2023-08-02T05:49:52
| 79,909,496
| 905
| 103
|
BSD-3-Clause
| 2023-07-28T05:29:17
| 2017-01-24T12:27:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,181
|
py
|
server.py
|
import asyncio
from grpclib.utils import graceful_exit
from grpclib.server import Server, Stream
from .helloworld_pb2 import HelloRequest, HelloReply
from .helloworld_grpc import GreeterBase
class Greeter(GreeterBase):
# UNARY_UNARY - simple RPC
async def UnaryUnaryGreeting(
self,
stream: Stream[HelloRequest, HelloReply],
) -> None:
request = await stream.recv_message()
assert request is not None
message = f'Hello, {request.name}!'
await stream.send_message(HelloReply(message=message))
# UNARY_STREAM - response streaming RPC
async def UnaryStreamGreeting(
self,
stream: Stream[HelloRequest, HelloReply],
) -> None:
request = await stream.recv_message()
assert request is not None
await stream.send_message(
HelloReply(message=f'Hello, {request.name}!'))
await stream.send_message(
HelloReply(message=f'Goodbye, {request.name}!'))
# STREAM_UNARY - request streaming RPC
async def StreamUnaryGreeting(
self,
stream: Stream[HelloRequest, HelloReply],
) -> None:
names = []
async for request in stream:
names.append(request.name)
message = 'Hello, {}!'.format(' and '.join(names))
await stream.send_message(HelloReply(message=message))
# STREAM_STREAM - bidirectional streaming RPC
async def StreamStreamGreeting(
self,
stream: Stream[HelloRequest, HelloReply],
) -> None:
async for request in stream:
message = f'Hello, {request.name}!'
await stream.send_message(HelloReply(message=message))
# Send another message to demonstrate responses are not
# coupled to requests.
message = 'Goodbye, all!'
await stream.send_message(HelloReply(message=message))
async def main(*, host: str = '127.0.0.1', port: int = 50051) -> None:
server = Server([Greeter()])
with graceful_exit([server]):
await server.start(host, port)
print(f'Serving on {host}:{port}')
await server.wait_closed()
if __name__ == '__main__':
asyncio.run(main())
|
a28a0a654e82d65cf6e44b11bf88eac75c3a14ae
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/EventFilter/RPCRawToDigi/test/testRPCDigiMerger_cfg.py
|
0e0272426146dd0025c67222367c84a60a7b7b18
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 4,736
|
py
|
testRPCDigiMerger_cfg.py
|
import FWCore.ParameterSet.Config as cms
from FWCore.PythonUtilities.LumiList import LumiList
from FWCore.ParameterSet.VarParsing import VarParsing
options = VarParsing("analysis")
process = cms.Process("testRPCDigiMerger")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = "102X_dataRun2_Sep2018Rereco_test_v1"
#######################################################
### RPC RawToDigi
### RPC RawToDigi - from Legacy
process.load("EventFilter.RPCRawToDigi.rpcUnpackingModule_cfi")
### RPC RawToDigi - from TwinMux
process.load("EventFilter.RPCRawToDigi.RPCTwinMuxRawToDigi_cff")
### RPC RawToDigi - from CPPF
process.load("EventFilter.RPCRawToDigi.RPCCPPFRawToDigi_cff")
# process.load("EventFilter.RPCRawToDigi.RPCCPPFRawToDigi_sqlite_cff") #to load CPPF link maps from the local DB
### RPC RawToDigi - from OMTF
process.load('Configuration.StandardSequences.RawToDigi_Data_cff')
process.omtfStage2Digis = cms.EDProducer("OmtfUnpacker",
inputLabel = cms.InputTag('rawDataCollector'),
)
process.load("EventFilter.RPCRawToDigi.RPCDigiMerger_cff")
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
# Source
process.source = cms.Source("PoolSource"
, fileNames = cms.untracked.vstring(
"/store/data/Run2018D/SingleMuon/RAW/v1/000/321/909/00000/34C0D29C-E3AA-E811-84FB-FA163EF5DB03.root",
"/store/data/Run2018D/SingleMuon/RAW/v1/000/321/909/00000/DEF27B09-E7AA-E811-B671-FA163EDF3211.root",
"/store/data/Run2018D/SingleMuon/RAW/v1/000/321/909/00000/163BB720-E7AA-E811-988D-FA163EC62C4D.root",
"/store/data/Run2018D/SingleMuon/RAW/v1/000/321/909/00000/B06CF0A5-E9AA-E811-AC1F-FA163EF7BA8C.root",
"/store/data/Run2018D/SingleMuon/RAW/v1/000/321/909/00000/AADE09C6-E9AA-E811-97DC-FA163E516F48.root",
"/store/data/Run2018D/SingleMuon/RAW/v1/000/321/909/00000/F4EB1DA6-E9AA-E811-8CF8-FA163ECCF441.root",
"/store/data/Run2018D/SingleMuon/RAW/v1/000/321/909/00000/A867DCA3-E9AA-E811-B8B3-FA163E1F69DA.root",
"/store/data/Run2018D/SingleMuon/RAW/v1/000/321/909/00000/DC0E29CC-E9AA-E811-AC0B-FA163E41F45F.root",
"/store/data/Run2018D/SingleMuon/RAW/v1/000/321/909/00000/8A5DAFA8-E9AA-E811-89C2-FA163EFF6119.root",
"/store/data/Run2018D/SingleMuon/RAW/v1/000/321/909/00000/B00828AF-E9AA-E811-8117-FA163E10FE53.root",
"/store/data/Run2018D/SingleMuon/RAW/v1/000/321/909/00000/28D7492E-E4AA-E811-9635-02163E013E90.root",
"/store/data/Run2018D/SingleMuon/RAW/v1/000/321/909/00000/FE2A5BAC-E9AA-E811-BC34-FA163E0639A2.root",
"/store/data/Run2018D/SingleMuon/RAW/v1/000/321/909/00000/7602E088-EDAA-E811-ABCA-FA163E749606.root",
"/store/data/Run2018D/SingleMuon/RAW/v1/000/321/909/00000/B49BB2AB-E9AA-E811-A0A0-FA163E3F57D2.root",
"/store/data/Run2018D/SingleMuon/RAW/v1/000/321/909/00000/14822C7E-EDAA-E811-8189-02163E01A095.root",
"/store/data/Run2018D/SingleMuon/RAW/v1/000/321/909/00000/B05E9DA8-EDAA-E811-B422-02163E016528.root",
)
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1000) )
process.p = cms.Path(
(
process.rpcUnpackingModule
+ process.rpcTwinMuxRawToDigi
+ process.rpcCPPFRawToDigi
+ process.omtfStage2Digis
)
* process.rpcDigiMerger
)
# Output
process.out = cms.OutputModule("PoolOutputModule"
, outputCommands = cms.untracked.vstring("drop *"
, "keep *_*_*_testRPCDigiMerger")
# , fileName = cms.untracked.string(options.outputFile)
, fileName = cms.untracked.string("testRPCDigiMerger.root")
#, fileName = cms.untracked.string("testRPCDigiMerger.root")
, SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring("p"))
)
process.e = cms.EndPath(process.out)
|
7dff3edeeb8b8ef90574789552f5b1ce3b78a06d
|
8d8eef5fdc9228f079ae4351df726eb017707872
|
/tests/modisco/test_core.py
|
0127af159c21302947aae46bd1f2ff9cbd41503c
|
[
"MIT"
] |
permissive
|
kundajelab/bpnet
|
046afe1c9b275a481c36865c502bc01d4db95c50
|
1c6e5c4caf97cf34ccf716ef5b8b9c8de231cca2
|
refs/heads/master
| 2022-05-28T02:28:27.199847
| 2022-05-15T19:46:21
| 2022-05-15T19:46:21
| 199,090,254
| 128
| 31
|
MIT
| 2022-04-15T17:00:58
| 2019-07-26T23:10:04
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 531
|
py
|
test_core.py
|
"""Test core objects
"""
from pytest import fixture
from bpnet.modisco.files import ModiscoFile, ModiscoFileGroup
from bpnet.modisco.core import Seqlet
@fixture
def pattern(modisco_dir):
mf = ModiscoFile(modisco_dir / 'modisco.h5')
return mf.get_pattern("metacluster_0/pattern_0")
@fixture
def seqlet():
return Seqlet(seqname='1',
start=10,
end=20,
name='m0_p0',
strand='-')
def test_pattern(pattern):
assert len(pattern.seq.shape) == 2
|
f062e4e7fe8655a79a12d6cead9f5957ae77cbab
|
e210c28eeed9d38eb78c14b3a6388eca1e0e85d8
|
/nvflare/fuel/f3/drivers/aio_grpc_driver.py
|
38c122eef7e51d0fffb4d386fee215b47f3a5358
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NVFlare
|
5a2d2e4c85a3fd0948e25f1ba510449727529a15
|
1433290c203bd23f34c29e11795ce592bc067888
|
refs/heads/main
| 2023-08-03T09:21:32.779763
| 2023-07-05T21:17:16
| 2023-07-05T21:17:16
| 388,876,833
| 442
| 140
|
Apache-2.0
| 2023-09-14T19:12:35
| 2021-07-23T17:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 16,267
|
py
|
aio_grpc_driver.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import threading
import time
from typing import Any, Dict, List
import grpc
from nvflare.fuel.f3.comm_config import CommConfigurator
from nvflare.fuel.f3.comm_error import CommError
from nvflare.fuel.f3.connection import BytesAlike, Connection
from nvflare.fuel.f3.drivers.aio_context import AioContext
from nvflare.fuel.f3.drivers.driver import ConnectorInfo
from nvflare.fuel.f3.drivers.grpc.streamer_pb2_grpc import (
StreamerServicer,
StreamerStub,
add_StreamerServicer_to_server,
)
from nvflare.security.logging import secure_format_exception, secure_format_traceback
from .base_driver import BaseDriver
from .driver_params import DriverCap, DriverParams
from .grpc.streamer_pb2 import Frame
from .net_utils import MAX_FRAME_SIZE, get_address, get_tcp_urls, ssl_required
GRPC_DEFAULT_OPTIONS = [
("grpc.max_send_message_length", MAX_FRAME_SIZE),
("grpc.max_receive_message_length", MAX_FRAME_SIZE),
]
class _ConnCtx:
def __init__(self):
self.conn = None
self.error = None
self.waiter = threading.Event()
class AioStreamSession(Connection):
seq_num = 0
def __init__(self, aio_ctx: AioContext, connector: ConnectorInfo, conn_props: dict, context=None, channel=None):
super().__init__(connector)
self.aio_ctx = aio_ctx
self.logger = logging.getLogger(self.__class__.__name__)
self.oq = asyncio.Queue(16)
self.closing = False
self.conn_props = conn_props
self.context = context # for server side
self.channel = channel # for client side
self.lock = threading.Lock()
def get_conn_properties(self) -> dict:
return self.conn_props
def close(self):
self.closing = True
with self.lock:
if self.context:
self.aio_ctx.run_coro(self.context.abort(grpc.StatusCode.CANCELLED, "service closed"))
self.context = None
if self.channel:
self.aio_ctx.run_coro(self.channel.close())
self.channel = None
def send_frame(self, frame: BytesAlike):
try:
AioStreamSession.seq_num += 1
seq = AioStreamSession.seq_num
f = Frame(seq=seq, data=bytes(frame))
self.aio_ctx.run_coro(self.oq.put(f))
except Exception as ex:
if not self.closing:
raise CommError(CommError.ERROR, f"Error sending frame on conn {self}: {secure_format_exception(ex)}")
async def read_loop(self, msg_iter):
ct = threading.current_thread()
self.logger.debug(f"{self}: started read_loop in thread {ct.name}")
try:
async for f in msg_iter:
if self.closing:
return
self.process_frame(f.data)
except grpc.aio.AioRpcError as error:
if not self.closing:
if error.code() == grpc.StatusCode.CANCELLED:
self.logger.debug(f"Connection {self} is closed by peer")
else:
self.logger.debug(f"Connection {self} Error: {error.details()}")
self.logger.debug(secure_format_traceback())
else:
self.logger.debug(f"Connection {self} is closed locally")
except Exception as ex:
if not self.closing:
self.logger.debug(f"{self}: exception {type(ex)} in read_loop: {secure_format_exception(ex)}")
self.logger.debug(secure_format_traceback())
self.logger.debug(f"{self}: in {ct.name}: done read_loop")
async def generate_output(self):
ct = threading.current_thread()
self.logger.debug(f"{self}: generate_output in thread {ct.name}")
try:
while True:
item = await self.oq.get()
yield item
except Exception as ex:
if self.closing:
self.logger.debug(f"{self}: connection closed by {type(ex)}: {secure_format_exception(ex)}")
else:
self.logger.debug(f"{self}: generate_output exception {type(ex)}: {secure_format_exception(ex)}")
self.logger.debug(secure_format_traceback())
self.logger.debug(f"{self}: done generate_output")
class Servicer(StreamerServicer):
def __init__(self, server, aio_ctx: AioContext):
self.server = server
self.aio_ctx = aio_ctx
self.logger = logging.getLogger(self.__class__.__name__)
async def _write_loop(self, connection, grpc_context):
self.logger.debug("started _write_loop")
try:
while True:
f = await connection.oq.get()
await grpc_context.write(f)
except Exception as ex:
self.logger.debug(f"_write_loop except: {type(ex)}: {secure_format_exception(ex)}")
self.logger.debug("finished _write_loop")
async def Stream(self, request_iterator, context):
connection = None
ct = threading.current_thread()
try:
self.logger.debug(f"SERVER started Stream CB in thread {ct.name}")
conn_props = {
DriverParams.PEER_ADDR.value: context.peer(),
DriverParams.LOCAL_ADDR.value: get_address(self.server.connector.params),
}
cn_names = context.auth_context().get("x509_common_name")
if cn_names:
conn_props[DriverParams.PEER_CN.value] = cn_names[0].decode("utf-8")
connection = AioStreamSession(
aio_ctx=self.aio_ctx,
connector=self.server.connector,
conn_props=conn_props,
context=context,
)
self.logger.debug(f"SERVER created connection in thread {ct.name}")
self.server.driver.add_connection(connection)
try:
await asyncio.gather(self._write_loop(connection, context), connection.read_loop(request_iterator))
except asyncio.CancelledError:
self.logger.debug("SERVER: RPC cancelled")
except Exception as ex:
self.logger.debug(f"await gather except: {type(ex)}: {secure_format_exception(ex)}")
self.logger.debug(f"SERVER: done await gather in thread {ct.name}")
except Exception as ex:
self.logger.debug(f"Connection closed due to error: {secure_format_exception(ex)}")
finally:
if connection:
with connection.lock:
connection.context = None
self.logger.debug(f"SERVER: closing connection {connection.name}")
self.server.driver.close_connection(connection)
self.logger.debug(f"SERVER: cleanly finished Stream CB in thread {ct.name}")
class Server:
def __init__(self, driver, connector, aio_ctx: AioContext, options, conn_ctx: _ConnCtx):
self.logger = logging.getLogger(self.__class__.__name__)
self.driver = driver
self.connector = connector
self.grpc_server = grpc.aio.server(options=options)
servicer = Servicer(self, aio_ctx)
add_StreamerServicer_to_server(servicer, self.grpc_server)
params = connector.params
host = params.get(DriverParams.HOST.value)
if not host:
host = "0.0.0.0"
port = int(params.get(DriverParams.PORT.value))
addr = f"{host}:{port}"
try:
self.logger.debug(f"SERVER: connector params: {params}")
secure = ssl_required(params)
if secure:
credentials = AioGrpcDriver.get_grpc_server_credentials(params)
self.grpc_server.add_secure_port(addr, server_credentials=credentials)
else:
self.grpc_server.add_insecure_port(addr)
except Exception as ex:
conn_ctx.error = f"cannot listen on {addr}: {type(ex)}: {secure_format_exception(ex)}"
self.logger.debug(conn_ctx.error)
async def start(self, conn_ctx: _ConnCtx):
self.logger.debug("starting grpc server")
try:
await self.grpc_server.start()
await self.grpc_server.wait_for_termination()
except Exception as ex:
conn_ctx.error = f"cannot start server: {type(ex)}: {secure_format_exception(ex)}"
raise ex
async def shutdown(self):
await self.grpc_server.stop(grace=0.5)
class AioGrpcDriver(BaseDriver):
aio_ctx = None
def __init__(self):
super().__init__()
self.server = None
self.options = GRPC_DEFAULT_OPTIONS
self.logger = logging.getLogger(self.__class__.__name__)
configurator = CommConfigurator()
config = configurator.get_config()
if config:
my_params = config.get("grpc")
if my_params:
self.options = my_params.get("options")
self.logger.debug(f"GRPC Config: options={self.options}")
self.closing = False
@staticmethod
def supported_transports() -> List[str]:
return ["grpc", "grpcs"]
@staticmethod
def capabilities() -> Dict[str, Any]:
return {DriverCap.HEARTBEAT.value: True, DriverCap.SUPPORT_SSL.value: True}
async def _start_server(self, connector: ConnectorInfo, aio_ctx: AioContext, conn_ctx: _ConnCtx):
self.connector = connector
self.server = Server(self, connector, aio_ctx, options=self.options, conn_ctx=conn_ctx)
if not conn_ctx.error:
try:
conn_ctx.conn = True
await self.server.start(conn_ctx)
except Exception as ex:
if not self.closing:
self.logger.debug(secure_format_traceback())
conn_ctx.error = f"failed to start server: {type(ex)}: {secure_format_exception(ex)}"
conn_ctx.waiter.set()
def listen(self, connector: ConnectorInfo):
self.logger.debug(f"listen called from thread {threading.current_thread().name}")
self.connector = connector
aio_ctx = AioContext.get_global_context()
conn_ctx = _ConnCtx()
aio_ctx.run_coro(self._start_server(connector, aio_ctx, conn_ctx))
while not conn_ctx.conn and not conn_ctx.error:
self.logger.debug("SERVER: waiting for server to be started")
time.sleep(0.1)
if conn_ctx.error:
raise CommError(code=CommError.ERROR, message=conn_ctx.error)
self.logger.debug("SERVER: waiting for server to finish")
conn_ctx.waiter.wait()
self.logger.debug("SERVER: server is done")
async def _start_connect(self, connector: ConnectorInfo, aio_ctx: AioContext, conn_ctx: _ConnCtx):
self.logger.debug("Started _start_connect coro")
self.connector = connector
params = connector.params
address = get_address(params)
self.logger.debug(f"CLIENT: trying to connect {address}")
try:
secure = ssl_required(params)
if secure:
grpc_channel = grpc.aio.secure_channel(
address, options=self.options, credentials=self.get_grpc_client_credentials(params)
)
else:
grpc_channel = grpc.aio.insecure_channel(address, options=self.options)
async with grpc_channel as channel:
self.logger.debug(f"CLIENT: connected to {address}")
stub = StreamerStub(channel)
self.logger.debug("CLIENT: got stub!")
conn_props = {DriverParams.PEER_ADDR.value: address}
if secure:
conn_props[DriverParams.PEER_CN.value] = "N/A"
connection = AioStreamSession(
aio_ctx=aio_ctx, connector=connector, conn_props=conn_props, channel=channel
)
try:
self.logger.debug(f"CLIENT: start streaming on connection {connection}")
msg_iter = stub.Stream(connection.generate_output())
conn_ctx.conn = connection
await connection.read_loop(msg_iter)
except asyncio.CancelledError as error:
self.logger.debug(f"CLIENT: RPC cancelled: {error}")
except Exception as ex:
if self.closing:
self.logger.debug(
f"Connection {connection} closed by {type(ex)}: {secure_format_exception(ex)}"
)
else:
self.logger.debug(
f"Connection {connection} client read exception {type(ex)}: {secure_format_exception(ex)}"
)
self.logger.debug(secure_format_traceback())
with connection.lock:
connection.channel = None
connection.close()
except asyncio.CancelledError:
self.logger.debug("CLIENT: RPC cancelled")
except Exception as ex:
conn_ctx.error = f"connection {connection} error: {type(ex)}: {secure_format_exception(ex)}"
if self.closing:
self.logger.debug(conn_ctx.error)
else:
self.logger.debug(conn_ctx.error)
self.logger.debug(secure_format_traceback())
conn_ctx.waiter.set()
def connect(self, connector: ConnectorInfo):
self.logger.debug("CLIENT: connect called")
aio_ctx = AioContext.get_global_context()
conn_ctx = _ConnCtx()
aio_ctx.run_coro(self._start_connect(connector, aio_ctx, conn_ctx))
time.sleep(0.2)
while not conn_ctx.conn and not conn_ctx.error:
self.logger.debug("CLIENT: waiting for connection")
time.sleep(0.1)
if conn_ctx.error:
raise CommError(CommError.ERROR, conn_ctx.error)
self.add_connection(conn_ctx.conn)
conn_ctx.waiter.wait()
self.close_connection(conn_ctx.conn)
def shutdown(self):
if self.closing:
return
self.closing = True
self.close_all()
if self.server:
aio_ctx = AioContext.get_global_context()
aio_ctx.run_coro(self.server.shutdown())
@staticmethod
def get_urls(scheme: str, resources: dict) -> (str, str):
secure = resources.get(DriverParams.SECURE)
if secure:
scheme = "grpcs"
return get_tcp_urls(scheme, resources)
@staticmethod
def get_grpc_client_credentials(params: dict):
root_cert = AioGrpcDriver.read_file(params.get(DriverParams.CA_CERT.value))
cert_chain = AioGrpcDriver.read_file(params.get(DriverParams.CLIENT_CERT))
private_key = AioGrpcDriver.read_file(params.get(DriverParams.CLIENT_KEY))
return grpc.ssl_channel_credentials(
certificate_chain=cert_chain, private_key=private_key, root_certificates=root_cert
)
@staticmethod
def get_grpc_server_credentials(params: dict):
root_cert = AioGrpcDriver.read_file(params.get(DriverParams.CA_CERT.value))
cert_chain = AioGrpcDriver.read_file(params.get(DriverParams.SERVER_CERT))
private_key = AioGrpcDriver.read_file(params.get(DriverParams.SERVER_KEY))
return grpc.ssl_server_credentials(
[(private_key, cert_chain)],
root_certificates=root_cert,
require_client_auth=True,
)
@staticmethod
def read_file(file_name: str):
if not file_name:
return None
with open(file_name, "rb") as f:
return f.read()
|
4b822e4a906918402694ff484087674ea1eb0c23
|
1518698c3f7c70912f4079261a3b7b81608bed63
|
/python/pyxel/editor/app.py
|
11c94c4284e5a39a06574a6dbf08cf1cdbd12100
|
[
"MIT"
] |
permissive
|
kitao/pyxel
|
2cff908a302ed316b31f55511977769e4c356fa5
|
5bffc3516e6f961b45098512ea90bf08c804d71e
|
refs/heads/main
| 2023-07-13T19:46:27.473953
| 2023-07-12T14:23:47
| 2023-07-12T14:23:47
| 136,780,445
| 13,103
| 1,060
|
MIT
| 2023-08-27T13:58:10
| 2018-06-10T04:58:54
|
Python
|
UTF-8
|
Python
| false
| false
| 7,290
|
py
|
app.py
|
import os
import pyxel
from .image_editor import ImageEditor
from .music_editor import MusicEditor
from .settings import APP_HEIGHT, APP_WIDTH, EDITOR_IMAGE, HELP_MESSAGE_COLOR
from .sound_editor import SoundEditor
from .tilemap_editor import TilemapEditor
from .widgets import ImageButton, RadioButton, Widget
from .widgets.settings import (
WIDGET_BACKGROUND_COLOR,
WIDGET_HOLD_TIME,
WIDGET_PANEL_COLOR,
WIDGET_REPEAT_TIME,
WIDGET_SHADOW_COLOR,
)
class App(Widget):
"""
Variables:
editor_no_var
help_message_var
"""
def __init__(self, resource_file, starting_editor):
# Get absolute path of resource file before initializing Pyxel
original_resource_file = resource_file
resource_file = os.path.abspath(resource_file)
# Check if resource file can be saved
if os.path.isdir(resource_file):
print(f"A directory named '{original_resource_file}' exists")
exit(1)
if not os.path.isdir(os.path.dirname(resource_file)):
print(f"Directory for '{original_resource_file}' does not exist")
exit(1)
# Initialize Pyxel
pyxel.init(APP_WIDTH, APP_HEIGHT, quit_key=pyxel.KEY_NONE)
pyxel.mouse(True)
pyxel.pal2()
self._set_title(original_resource_file)
if os.path.exists(resource_file):
pyxel.load(resource_file)
# Start initializing application
super().__init__(None, 0, 0, pyxel.width, pyxel.height)
self._resource_file = resource_file
# Initialize help_message_var
self.new_var("help_message_var", "")
# Initialize editor button
self._editor_button = RadioButton(
self,
1,
1,
img=EDITOR_IMAGE,
u=0,
v=0,
num_buttons=4,
value={"image": 0, "tilemap": 1, "sound": 2, "music": 3}.get(
starting_editor, 0
),
)
self._editor_button.add_event_listener("change", self.__on_editor_button_change)
self._editor_button.add_event_listener(
"mouse_hover", self.__on_editor_button_mouse_hover
)
self.copy_var("editor_no_var", self._editor_button, "value_var")
# Initialize undo button
self._undo_button = ImageButton(
self,
48,
1,
img=EDITOR_IMAGE,
u=36,
v=0,
)
self._undo_button.add_event_listener("press", self.__on_undo_button_press)
self._undo_button.add_event_listener(
"mouse_hover", self.__on_undo_button_mouse_hover
)
# Initialize redo button
self._redo_button = ImageButton(
self,
57,
1,
img=EDITOR_IMAGE,
u=45,
v=0,
)
self._redo_button.add_event_listener("press", self.__on_redo_button_press)
self._redo_button.add_event_listener(
"mouse_hover", self.__on_redo_button_mouse_hover
)
# Initialize save button
self._save_button = ImageButton(
self,
75,
1,
img=EDITOR_IMAGE,
u=54,
v=0,
)
self._save_button.add_event_listener("press", self.__on_save_button_press)
self._save_button.add_event_listener(
"mouse_hover", self.__on_save_button_mouse_hover
)
# Initialize editors
self._editors = [
ImageEditor(self),
TilemapEditor(self),
SoundEditor(self),
MusicEditor(self),
]
self.__on_editor_button_change(self.editor_no_var)
# Set event listeners
self.add_event_listener("update", self.__on_update)
self.add_event_listener("draw", self.__on_draw)
# Start application
pyxel.run(self.update_all, self.draw_all)
@property
def _editor(self):
return self._editors[self.editor_no_var]
@staticmethod
def _set_title(filename):
pyxel.title(f"Pyxel Editor - {filename}")
def __on_editor_button_change(self, value):
for i, editor in enumerate(self._editors):
editor.is_visible_var = i == value
def __on_editor_button_mouse_hover(self, x, y):
self.help_message_var = "EDITOR:ALT+LEFT/RIGHT"
def __on_undo_button_press(self):
self._editor.undo()
def __on_undo_button_mouse_hover(self, x, y):
self.help_message_var = "UNDO:CTRL+Z"
def __on_redo_button_press(self):
self._editor.redo()
def __on_redo_button_mouse_hover(self, x, y):
self.help_message_var = "REDO:CTRL+Y"
def __on_save_button_press(self):
pyxel.save(self._resource_file)
def __on_save_button_mouse_hover(self, x, y):
self.help_message_var = "SAVE:CTRL+S"
def __on_update(self):
if pyxel.drop_files:
drop_file = pyxel.drop_files[-1]
file_ext = os.path.splitext(drop_file)[1]
if file_ext == pyxel.RESOURCE_FILE_EXTENSION:
pyxel.stop()
if pyxel.btn(pyxel.KEY_CTRL) or pyxel.btn(pyxel.KEY_GUI):
self._editor.reset_history()
pyxel.load(
pyxel._drop_file,
image=(self.editor_no_var == 0),
tilemap=(self.editor_no_var == 1),
sound=(self.editor_no_var == 2),
music=(self.editor_no_var == 3),
)
else:
for editor in self._editors:
editor.reset_history()
pyxel.load(drop_file)
self._set_title(drop_file)
else:
self._editor.trigger_event("drop", drop_file)
if pyxel.btn(pyxel.KEY_ALT):
# Alt+Left: Switch editor
if pyxel.btnp(pyxel.KEY_LEFT):
self.editor_no_var = (self.editor_no_var - 1) % len(self._editors)
# Alt+Right: Switch editor
elif pyxel.btnp(pyxel.KEY_RIGHT):
self.editor_no_var = (self.editor_no_var + 1) % len(self._editors)
self._undo_button.is_enabled_var = self._editor.can_undo
self._redo_button.is_enabled_var = self._editor.can_redo
if pyxel.btn(pyxel.KEY_CTRL) or pyxel.btn(pyxel.KEY_GUI):
# Ctrl+S: Save
if pyxel.btnp(pyxel.KEY_S):
self._save_button.is_pressed_var = True
# Ctrl+Z: Undo
if self._editor.can_undo and pyxel.btnp(
pyxel.KEY_Z, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME
):
self._undo_button.is_pressed_var = True
# Ctrl+Y: Redo
elif self._editor.can_redo and pyxel.btnp(
pyxel.KEY_Y, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME
):
self._redo_button.is_pressed_var = True
def __on_draw(self):
pyxel.cls(WIDGET_BACKGROUND_COLOR)
pyxel.rect(0, 0, 240, 9, WIDGET_PANEL_COLOR)
pyxel.line(0, 9, 239, 9, WIDGET_SHADOW_COLOR)
pyxel.text(93, 2, self.help_message_var, HELP_MESSAGE_COLOR)
self.help_message_var = ""
|
4cd6996760a560df50e41009821418ca5cddabe2
|
25a6ea5c1cfbec72c5899ea49984de14080af680
|
/tests/input_files/config.py
|
edf04d5e3350b105419eb8a71e6d0d333c689001
|
[
"MIT"
] |
permissive
|
statgen/pheweb
|
a08aa0cabcffdcd5bd727ef518a1b2f05c64333c
|
1ecace294c1313a6efff11d775d4bc6cfde54b23
|
refs/heads/master
| 2023-08-31T14:37:15.397202
| 2023-08-21T16:33:29
| 2023-08-21T16:33:29
| 52,133,265
| 135
| 67
|
MIT
| 2023-07-18T19:28:00
| 2016-02-20T02:53:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,115
|
py
|
config.py
|
# this file will be interpreted as python3
urlprefix = '/test'
# Minor allele frequency (MAF) filters:
# Note:
# "Association" means an association between a variant and a phenotype.
# Every association has a p-value. It may also have other attributes.
# MAF-filters will apply to allele frequency (AF) and allele count (AC) (if PheWeb knows num_samples for the phenotype)
# First, PheWeb drops any association with a MAF < assoc_min_maf.
# Next, PheWeb drops any variant where every association has MAF < variant_inclusion_maf.
# In a dataset where all associations to a given variant all have the same MAF, the two filters do the same thing.
# - in that case, use `assoc_min_maf` to save disk space and parse time.
# If variant_inclusion_maf <= assoc_min_maf, it won't have any effect.
# Using assoc_min_maf will save disk space, even if you're already using variant_inclusion_maf.
assoc_min_maf = 0.005
variant_inclusion_maf = 0.01
# num_procs = 1 # for debugging convenience.
# directory for caching large (~1GB) common files like dbsnp
cache_dir = './fake-cache'
disallow_downloads = True
|
404a2a3212a2771d6460fc63786e1acdd3cd4adf
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/remoterendering/azure-mixedreality-remoterendering/tests/test_client_async.py
|
199eed12fc29e29c0f3bd2c1569d75eb8913898c
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 12,404
|
py
|
test_client_async.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import uuid
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import HttpResponseError
from azure.mixedreality.remoterendering.aio import RemoteRenderingClient
from azure.mixedreality.remoterendering import (
AssetConversionInputSettings,
AssetConversionOutputSettings,
AssetConversionStatus,
RenderingSessionSize,
RenderingSessionStatus
)
from devtools_testutils import AzureRecordedTestCase
class TestRemoteRenderingClientAsync(AzureRecordedTestCase):
def test_create_client(self, account_info):
client = RemoteRenderingClient(
endpoint=account_info["service_endpoint"],
account_id=account_info["account_id"],
account_domain=account_info["account_domain"],
credential=account_info["key_credential"]
)
assert client is not None
def test_create_client_with_invalid_arguments(self, account_info):
with pytest.raises(ValueError):
RemoteRenderingClient(
endpoint=None,
account_id=account_info["account_id"],
account_domain=account_info["account_domain"],
credential=account_info["key_credential"])
with pytest.raises(ValueError):
RemoteRenderingClient(
endpoint=account_info["service_endpoint"],
account_id=None,
account_domain=account_info["account_domain"],
credential=account_info["key_credential"])
with pytest.raises(ValueError):
RemoteRenderingClient(
endpoint=account_info["service_endpoint"],
account_id=account_info["account_id"],
account_domain=None,
credential=account_info["key_credential"])
with pytest.raises(ValueError):
RemoteRenderingClient(
endpoint=account_info["service_endpoint"],
account_id=account_info["account_id"],
account_domain=account_info["account_domain"],
credential=None)
with pytest.raises(ValueError):
RemoteRenderingClient(
endpoint=account_info["service_endpoint"],
account_id=account_info["account_id"],
account_domain=account_info["account_domain"],
credential=account_info["key_credential"],
authentication_endpoint_url="#")
@pytest.mark.asyncio
async def test_simple_conversion(self, recorded_test, account_info, async_arr_client):
conversion_id = account_info["id_placeholder"]
if self.is_live:
conversion_id += str(uuid.uuid4())
storage_container_uri = "https://"+account_info["storage_account_name"] + \
".blob."+account_info["storage_endpoint_suffix"]+"/"+account_info["blob_container_name"]
input_settings = AssetConversionInputSettings(
storage_container_uri=storage_container_uri,
relative_input_asset_path="testBox.fbx",
blob_prefix="Input",
storage_container_read_list_sas="?"+account_info["sas_token"]
)
output_settings = AssetConversionOutputSettings(
storage_container_uri=storage_container_uri,
blob_prefix=conversion_id,
storage_container_write_sas="?"+account_info["sas_token"]
)
conversion_poller = await async_arr_client.begin_asset_conversion(
conversion_id=conversion_id, input_settings=input_settings, output_settings=output_settings
)
conversion = await async_arr_client.get_asset_conversion(conversion_id)
assert conversion.id == conversion_id
assert conversion.settings.input_settings.relative_input_asset_path == input_settings.relative_input_asset_path
assert conversion.status != AssetConversionStatus.FAILED
finished_conversion = await conversion_poller.result()
assert finished_conversion.id == conversion_id
assert finished_conversion.settings.input_settings.relative_input_asset_path == input_settings.relative_input_asset_path
assert finished_conversion.status == AssetConversionStatus.SUCCEEDED
finished_conversion.output.asset_uri.endswith(conversion_id+"/testBox.arrAsset")
foundConversion = False
conversions = await async_arr_client.list_asset_conversions()
async for c in conversions:
if(c.id == conversion_id):
foundConversion = True
break
assert foundConversion == True
@pytest.mark.asyncio
async def test_failed_conversion_unauthorized(self, recorded_test, account_info):
client = RemoteRenderingClient(
endpoint=account_info["service_endpoint"],
account_id=account_info["account_id"],
account_domain=account_info["account_domain"],
credential=AzureKeyCredential("wrong_key")
)
conversion_id = account_info["id_placeholder"]
if self.is_live:
conversion_id += str(uuid.uuid4())
storage_container_uri = "https://"+account_info["storage_account_name"] + \
".blob."+account_info["storage_endpoint_suffix"]+"/"+account_info["blob_container_name"]
input_settings = AssetConversionInputSettings(
storage_container_uri=storage_container_uri,
relative_input_asset_path="testBox.fbx",
blob_prefix="Input"
# Do not provide SAS access to the container, and assume the test account is not linked to the storage.
)
output_settings = AssetConversionOutputSettings(
storage_container_uri=storage_container_uri,
blob_prefix=conversion_id
# Do not provide SAS access to the container, and assume the test account is not linked to the storage.
)
with pytest.raises(HttpResponseError) as excinfo:
# make the request which cannot access the storage account
conversion_poller = await client.begin_asset_conversion(
conversion_id=conversion_id, input_settings=input_settings, output_settings=output_settings
)
exception = excinfo.value
assert exception.status_code == 401
assert "Unauthorized" in exception.message
@pytest.mark.asyncio
async def test_failed_conversion_no_access(self, recorded_test, account_info, async_arr_client):
conversion_id = account_info["id_placeholder"]
if self.is_live:
conversion_id += str(uuid.uuid4())
storage_container_uri = "https://"+account_info["storage_account_name"] + \
".blob."+account_info["storage_endpoint_suffix"]+"/"+account_info["blob_container_name"]
input_settings = AssetConversionInputSettings(
storage_container_uri=storage_container_uri,
relative_input_asset_path="testBox.fbx",
blob_prefix="Input"
# Do not provide SAS access to the container, and assume the test account is not linked to the storage.
)
output_settings = AssetConversionOutputSettings(
storage_container_uri=storage_container_uri,
blob_prefix=conversion_id
# Do not provide SAS access to the container, and assume the test account is not linked to the storage.
)
with pytest.raises(HttpResponseError) as excinfo:
# make the request which cannot access the storage account
conversion_poller = await async_arr_client.begin_asset_conversion(
conversion_id=conversion_id, input_settings=input_settings, output_settings=output_settings
)
assert excinfo.value.status_code == 403
error_details = excinfo.value
assert "storage" in error_details.message
assert "permissions" in error_details.message
@pytest.mark.asyncio
async def test_failed_conversion_missing_asset(self, recorded_test, account_info, async_arr_client):
conversion_id = account_info["id_placeholder"]
if self.is_live:
conversion_id += str(uuid.uuid4())
storage_container_uri = "https://"+account_info["storage_account_name"] + \
".blob."+account_info["storage_endpoint_suffix"]+"/"+account_info["blob_container_name"]
input_settings = AssetConversionInputSettings(
storage_container_uri=storage_container_uri,
relative_input_asset_path="testBoxWhichDoesNotExist.fbx",
blob_prefix="Input",
storage_container_read_list_sas="?"+account_info["sas_token"]
)
output_settings = AssetConversionOutputSettings(
storage_container_uri=storage_container_uri,
blob_prefix=conversion_id,
storage_container_write_sas="?"+account_info["sas_token"]
)
with pytest.raises(HttpResponseError) as excinfo:
conversion_poller = await async_arr_client.begin_asset_conversion(
conversion_id=conversion_id, input_settings=input_settings, output_settings=output_settings
)
await conversion_poller.result()
error_details = excinfo.value
assert "invalid input" in error_details.error.message.lower()
assert "logs" in error_details.error.message.lower()
@pytest.mark.asyncio
async def test_simple_session(self, recorded_test, account_info, async_arr_client):
session_id = account_info["id_placeholder"]
if self.is_live:
session_id += str(uuid.uuid4())
session_poller = await async_arr_client.begin_rendering_session(
session_id=session_id, size=RenderingSessionSize.STANDARD, lease_time_minutes=15)
session = await async_arr_client.get_rendering_session(session_id)
assert session.id == session_id
assert session.size == RenderingSessionSize.STANDARD
assert session.lease_time_minutes == 15
assert session.status != RenderingSessionStatus.ERROR
ready_session = await session_poller.result()
assert ready_session.id == session_id
assert ready_session.size == RenderingSessionSize.STANDARD
assert ready_session.lease_time_minutes == 15
assert ready_session.status == RenderingSessionStatus.READY
assert ready_session.hostname
assert ready_session.arr_inspector_port is not None
assert ready_session.handshake_port is not None
extended_session = await async_arr_client.update_rendering_session(session_id=session_id, lease_time_minutes=20)
assert extended_session.id == session_id
assert extended_session.size == RenderingSessionSize.STANDARD
assert extended_session.lease_time_minutes == 15 or extended_session.lease_time_minutes == 20
assert extended_session.status == RenderingSessionStatus.READY
foundSession = False
async for s in await async_arr_client.list_rendering_sessions():
if s.id == session_id:
foundSession = True
break
assert foundSession == True
await async_arr_client.stop_rendering_session(session_id)
stopped_session = await async_arr_client.get_rendering_session(session_id)
assert stopped_session.status == RenderingSessionStatus.STOPPED
@pytest.mark.asyncio
async def test_failed_session_request(self, recorded_test, account_info, async_arr_client):
session_id = account_info["id_placeholder"]
if self.is_live:
session_id += str(uuid.uuid4())
with pytest.raises(HttpResponseError) as excinfo:
# Make an invalid request (negative lease time).
session_poller = await async_arr_client.begin_rendering_session(
session_id=session_id, size=RenderingSessionSize.STANDARD, lease_time_minutes=-4)
assert excinfo.value.status_code == 400
exception = excinfo.value
assert "lease" in exception.message.lower()
assert "negative" in exception.message.lower()
|
25cf5359e3541b5140fde55ef897374246d2cb4e
|
eda6e7b8f399dedcdb960f4b48a2134b978f8d83
|
/tests/zzz_deprecated_unmaintained/movetests/MergeMoveEndToEndTest.py
|
6d2c41e148ee92c3e7f203ea9a6d6dd31b4be4d5
|
[
"BSD-3-Clause"
] |
permissive
|
bnpy/bnpy
|
8ed61bc4fe2f0ed99e0254c11a21c27c0cee59b2
|
ffc2242427451aa6a61dcac1473c47577a5ade6f
|
refs/heads/master
| 2023-08-16T06:49:58.716279
| 2022-10-15T15:59:12
| 2022-10-15T15:59:12
| 75,731,181
| 197
| 54
|
NOASSERTION
| 2023-07-21T20:59:10
| 2016-12-06T12:56:07
|
Python
|
UTF-8
|
Python
| false
| false
| 20,554
|
py
|
MergeMoveEndToEndTest.py
|
'''
Generic tests for using merge moves during model training with bnpy.
'''
import os
import sys
import numpy as np
import unittest
from nose.plugins.attrib import attr
import bnpy
def arg2name(aArg):
if isinstance(aArg, dict):
aName = aArg['name']
elif isinstance(aArg, str):
aName = aArg
return aName
def pprintResult(model, Info, Ktrue=0):
""" Pretty print the result of a learning algorithm.
"""
hdist_str = ''
if 'outputdir' in Info and Info['outputdir'] is not None:
hdistfile = os.path.join(Info['outputdir'], 'hamming-distance.txt')
if os.path.exists(hdistfile):
hdist_str = 'hdist=' + '%.3f' % (float(np.loadtxt(hdistfile)[-1]))
print(" %25s after %4.1f sec and %4d laps. ELBO=% 7.5f %s K=%d Ktrue=%d"\
% (Info['status'][:25],
Info['elapsedTimeInSec'],
Info['lapTrace'][-1],
Info['evBound'],
hdist_str,
model.allocModel.K,
Ktrue,
))
def pprint(val):
""" Pretty print the provided value.
"""
if isinstance(val, str):
print(' %s' % (val[:40]))
elif hasattr(val, 'items'):
firstMsg = ''
msg = ''
for (k, v) in list(val.items()):
if k.count('name'):
firstMsg = str(v)
else:
msg += " %s=%s" % (k, str(v))
print(' ' + firstMsg + ' ' + msg)
def pprintCommandToReproduceError(dataArg, aArg, oArg, algName, **kwargs):
for key, val in list(dataArg.items()):
if key == 'name':
continue
kwargs[key] = val
del kwargs['doWriteStdOut']
del kwargs['doSaveToDisk']
kwargs['printEvery'] = 1
kwstr = ' '.join(['--%s %s' % (key, kwargs[key]) for key in kwargs])
print("python -m bnpy.Run %s %s %s %s %s" % (
dataArg['name'],
aArg['name'],
oArg['name'],
algName,
kwstr,
))
def is_monotonic(ELBOvec, aArg=None, atol=1e-5, verbose=True):
''' Returns True if provided vector monotonically increases, False o.w.
Returns
-------
result : boolean (True or False)
'''
if aArg is not None:
if 'name' in aArg:
if aArg['name'] == 'HDPTopicModel':
# ELBO can fluctuate more due to no caching at localstep
atol = 1e-3
ELBOvec = np.asarray(ELBOvec, dtype=np.float64)
assert ELBOvec.ndim == 1
diff = ELBOvec[1:] - ELBOvec[:-1]
maskIncrease = diff > 0
maskWithinTol = np.abs(diff) < atol
maskOK = np.logical_or(maskIncrease, maskWithinTol)
isMonotonic = np.all(maskOK)
if not isMonotonic and verbose:
print("NOT MONOTONIC!")
print(' %d violations in vector of size %d. Biggest drop %.8f' \
% (np.sum(1 - maskOK), ELBOvec.size, diff[diff < 0].max()))
return isMonotonic
class MergeMoveEndToEndTest(unittest.TestCase):
""" Defines test exercises for executing bnpy.run on provided dataset.
Attributes
----
Data : bnpy.data.DataObj
dataset under testing
"""
__test__ = False # Do not execute this abstract module!
def shortDescription(self):
return None
def makeAllKwArgs(self, aArg, obsArg, initArg=dict(),
**kwargs):
allKwargs = dict(
doSaveToDisk=False,
doWriteStdOut=False,
saveEvery=-1,
printEvery=-1,
traceEvery=1,
convergeThr=0.0001,
doFullPassBeforeMstep=1,
nLap=300,
nBatch=2,
mergeStartLap=2,
deleteStartLap=2,
nCoordAscentItersLP=50,
convThrLP=0.001,
creationProposalName='randBlocks',
minBlockSize=10,
maxBlockSize=50,
doVizSeqCreate=0,
)
allKwargs.update(kwargs)
allKwargs.update(aArg)
allKwargs.update(obsArg)
allKwargs.update(initArg)
allKwargs.update(self.datasetArg)
if allKwargs['moves'].count('delete'):
try:
MaxSize = 0.5 * int(self.datasetArg['nDocTotal'])
except KeyError:
MaxSize = 0.5 * int(self.datasetArg['nObsTotal'])
allKwargs['dtargetMaxSize'] = int(MaxSize)
if aArg['name'] == 'HDPTopicModel':
allKwargs['mergePairSelection'] = 'corrlimitdegree'
else:
allKwargs['mergePairSelection'] = 'wholeELBObetter'
return allKwargs
def run_MOVBWithMoves(self, aArg, oArg,
moves='merge',
algName='moVB',
nWorkers=0,
**kwargs):
""" Execute single run with merge moves enabled.
Post Condition
--------------
Will raise AssertionError if any bad results detected.
"""
Ktrue = self.Data.TrueParams['K']
pprint(aArg)
pprint(oArg)
initArg = dict(**kwargs)
pprint(initArg)
kwargs = self.makeAllKwArgs(
aArg, oArg, initArg,
moves=moves, nWorkers=nWorkers, **kwargs)
model, Info = bnpy.run(
self.Data, arg2name(aArg), arg2name(oArg), algName, **kwargs)
pprintResult(model, Info, Ktrue=Ktrue)
afterFirstLapMask = Info['lapTrace'] >= 1.0
evTraceAfterFirstLap = Info['evTrace'][afterFirstLapMask]
isMonotonic = is_monotonic(evTraceAfterFirstLap,
aArg=aArg)
try:
assert isMonotonic
assert model.allocModel.K == model.obsModel.K
assert model.allocModel.K == Ktrue
except AssertionError as e:
pprintCommandToReproduceError(
self.datasetArg, aArg, oArg, algName, **kwargs)
assert isMonotonic
assert model.allocModel.K == model.obsModel.K
if not model.allocModel.K == Ktrue:
print('>>>>>> WHOA! Kfinal != Ktrue <<<<<<')
return Info
def run_MOVBWithMoves_SegmentManySeq(
self, aArg, oArg, moves='merge,delete,shuffle,seqcreate',
algName='moVB',
nWorkers=0,
**kwargs):
""" Execute single run with all moves enabled.
Post Condition
--------------
Will raise AssertionError if any bad results detected.
"""
self.Data.alwaysTrackTruth = 1
Ktrue = np.unique(self.Data.TrueParams['Z']).size
pprint(aArg)
pprint(oArg)
initArg = dict(**kwargs)
pprint(initArg)
viterbiPath = os.path.expandvars(
'$BNPYROOT/bnpy/learnalg/extras/XViterbi.py')
kwargs = self.makeAllKwArgs(aArg, oArg, initArg,
moves=moves, nWorkers=nWorkers,
customFuncPath=viterbiPath,
doSaveToDisk=1,
doWriteStdOut=1,
printEvery=1,
saveEvery=1000,
**kwargs)
kwargs['jobname'] += '-creationProposalName=%s' % (
kwargs['creationProposalName'])
model, Info = bnpy.run(
self.Data, arg2name(aArg), arg2name(oArg), algName, **kwargs)
pprintResult(model, Info, Ktrue=Ktrue)
try:
assert model.allocModel.K == model.obsModel.K
assert model.allocModel.K == Ktrue
except AssertionError as e:
pprintCommandToReproduceError(
self.datasetArg, aArg, oArg, algName, **kwargs)
assert model.allocModel.K == model.obsModel.K
if not model.allocModel.K == Ktrue:
print('>>>>>> WHOA! Kfinal != Ktrue <<<<<<')
print('')
return Info
def run_MOVBWithMoves_SegmentSingleSeq(
self, aArg, oArg,
moves='merge,delete,shuffle,seqcreate',
algName='moVB', nWorkers=0, n=0, **kwargs):
""" Execute single run with all moves enabled.
Post Condition
--------------
Will raise AssertionError if any bad results detected.
"""
if hasattr(self.Data, 'nDoc'):
Data_n = self.Data.select_subset_by_mask(
[n], doTrackTruth=1, doTrackFullSize=0)
assert Data_n.nDocTotal == 1
else:
# Make GroupXData dataset from XData
# This code block rearranges rows so that we
# cycle thru the true labels twice as contig blocks.
zTrue = self.Data.TrueParams['Z']
half1 = list()
half2 = list()
for uID in np.unique(zTrue):
dataIDs = np.flatnonzero(zTrue == uID)
Nk = dataIDs.size
half1.append(dataIDs[:Nk / 2])
half2.append(dataIDs[Nk / 2:])
dIDs_1 = np.hstack([x for x in half1])
dIDs_2 = np.hstack([x for x in half2])
dIDs = np.hstack([dIDs_1, dIDs_2])
Data_n = bnpy.data.GroupXData(
X=self.Data.X[dIDs],
doc_range=np.asarray([0, self.Data.nObs]),
TrueZ=self.Data.TrueParams['Z'][dIDs])
aArg['name'] = 'HDPHMM'
aArg['hmmKappa'] = 50
aArg['alpha'] = 0.5
aArg['gamma'] = 10.0
aArg['startAlpha'] = 10.0
Data_n.name = self.Data.name
Data_n.alwaysTrackTruth = 1
if hasattr(self.Data, 'TrueParams'):
assert hasattr(Data_n, 'TrueParams')
Ktrue = np.unique(Data_n.TrueParams['Z']).size
pprint(aArg)
pprint(oArg)
initArg = dict(**kwargs)
pprint(initArg)
viterbiPath = os.path.expandvars(
'$BNPYROOT/bnpy/learnalg/extras/XViterbi.py')
kwargs = self.makeAllKwArgs(aArg, oArg, initArg,
moves=moves, nWorkers=nWorkers,
customFuncPath=viterbiPath,
doSaveToDisk=1,
doWriteStdOut=1,
printEvery=1,
saveEvery=1000,
nBatch=1,
**kwargs)
kwargs['jobname'] += '-creationProposalName=%s' % (
kwargs['creationProposalName'])
model, Info = bnpy.run(
Data_n, arg2name(aArg), arg2name(oArg), algName, **kwargs)
if Ktrue == 0:
pprintResult(model, Info, Ktrue=Ktrue)
else:
pprintResult(model, Info, Ktrue=Ktrue)
try:
assert model.allocModel.K == model.obsModel.K
assert model.allocModel.K == Ktrue
except AssertionError as e:
pprintCommandToReproduceError(
self.datasetArg, aArg, oArg, algName, **kwargs)
assert model.allocModel.K == model.obsModel.K
if not model.allocModel.K == Ktrue:
print('>>>>>> WHOA! Kfinal != Ktrue <<<<<<')
print('')
'''
from bnpy.viz import SequenceViz
SequenceViz.plotSingleJob(
self.Data.name, kwargs['jobname'],
taskids='1', lap='final',
sequences=[1],
showELBOInTitle=False,
dispTrue=True,
aspectFactor=4.0,
specialStateIDs=None,
cmap='Set1',
maxT=None,
)
SequenceViz.pylab.show(block=1)
'''
return Info
def runMany_MOVBWithMoves(self,
initnames=['truelabels',
'repeattruelabels',
'truelabelsandempty'],
algName='moVB',
nWorkers=0,
moves='merge,delete,shuffle'):
print('')
for aKwArgs in self.nextAllocKwArgsForVB():
for oKwArgs in self.nextObsKwArgsForVB():
Info = dict()
for iname in initnames:
if iname.count('junk') or iname.count('empty'):
initKextra = 1
else:
initKextra = 0
Info[iname] = self.run_MOVBWithMoves(
aKwArgs, oKwArgs,
moves=moves,
algName=algName,
nWorkers=nWorkers,
initKextra=initKextra,
initname=iname)
def test_MOVBWithMerges(self):
self.runMany_MOVBWithMoves(moves='merge')
def test_MOVBWithDeletes(self):
self.runMany_MOVBWithMoves(moves='delete')
def test_MOVBWithMergeDeletes(self):
self.runMany_MOVBWithMoves(moves='merge,delete')
def test_MOVBWithShuffleMergeDeletes(self):
self.runMany_MOVBWithMoves(moves='shuffle,merge,delete')
def test_MOVBWithMerges_0ParallelWorkers(self):
self.runMany_MOVBWithMoves(moves='merge', algName='pmoVB',
nWorkers=0)
def test_MOVBWithMerges_2ParallelWorkers(self):
self.runMany_MOVBWithMoves(moves='merge', algName='pmoVB',
nWorkers=2)
def test_MOVBCreateDestroy_SingleSeq(self):
print('')
argDict = parseCmdLineArgs()
for aKwArgs in self.nextAllocKwArgsForVB():
for oKwArgs in self.nextObsKwArgsForVB():
Info = dict()
for iPattern in argDict['initnameVals'].split(','):
fields = iPattern.split('-')
initargs = dict()
for kvstr in fields:
kvpair = kvstr.split('=')
key = kvpair[0]
val = kvpair[1]
initargs[key] = val
initargs.update(argDict)
initargs['jobname'] = 'nosetest-initname=%s-K=%s' % (
initargs['initname'], initargs['K'])
self.run_MOVBWithMoves_SegmentSingleSeq(
aKwArgs, oKwArgs,
moves='merge,delete,shuffle,seqcreate',
**initargs)
print('')
print('')
print('')
print('')
return
def test_MOVBCreateDestroy_ManySeq(self):
print('')
argDict = parseCmdLineArgs()
for aKwArgs in self.nextAllocKwArgsForVB():
for oKwArgs in self.nextObsKwArgsForVB():
Info = dict()
for iPattern in argDict['initnameVals'].split(','):
fields = iPattern.split('-')
initargs = dict()
for kvstr in fields:
kvpair = kvstr.split('=')
key = kvpair[0]
val = kvpair[1]
initargs[key] = val
initargs.update(argDict)
initargs['jobname'] = 'nosetest-initname=%s-K=%s' % (
initargs['initname'], initargs['K'])
self.run_MOVBWithMoves_SegmentManySeq(
aKwArgs, oKwArgs,
moves='merge,delete,shuffle,seqcreate',
**initargs)
print('')
print('')
print('')
print('')
return
def interactivetest_findBestCut_SingleSeq(self, n=0, **kwargs):
""" Interactively try out findBestCut.
Post Condition
--------------
Will raise AssertionError if any bad results detected.
"""
print('')
argDict = parseCmdLineArgs()
for aArg in self.nextAllocKwArgsForVB():
for oArg in self.nextObsKwArgsForVB():
for iPattern in argDict['initnameVals'].split(','):
fields = iPattern.split('-')
for kvstr in fields:
kvpair = kvstr.split('=')
key = kvpair[0]
val = kvpair[1]
argDict[key] = val
break
if hasattr(self.Data, 'nDoc'):
Data_n = self.Data.select_subset_by_mask(
[n], doTrackTruth=1, doTrackFullSize=0)
assert Data_n.nDocTotal == 1
else:
# Make GroupXData dataset from XData
# This code block rearranges rows so that we
# cycle thru the true labels twice as contig blocks.
zTrue = self.Data.TrueParams['Z']
half1 = list()
half2 = list()
for uID in np.unique(zTrue):
dataIDs = np.flatnonzero(zTrue == uID)
Nk = dataIDs.size
half1.append(dataIDs[:Nk / 2])
half2.append(dataIDs[Nk / 2:])
dIDs_1 = np.hstack([x for x in half1])
dIDs_2 = np.hstack([x for x in half2])
dIDs = np.hstack([dIDs_1, dIDs_2])
Data_n = bnpy.data.GroupXData(
X=self.Data.X[dIDs],
doc_range=np.asarray([0, self.Data.nObs]),
TrueZ=self.Data.TrueParams['Z'][dIDs])
aArg['name'] = 'HDPHMM'
aArg['hmmKappa'] = 50
aArg['alpha'] = 0.5
aArg['gamma'] = 10.0
aArg['startAlpha'] = 10.0
Data_n.name = self.Data.name
Data_n.alwaysTrackTruth = 1
assert hasattr(Data_n, 'TrueParams')
# Create and initialize model
hmodel = bnpy.HModel.CreateEntireModel(
'VB', aArg['name'], oArg['name'], aArg, oArg, Data_n)
print(argDict)
hmodel.init_global_params(Data_n, **argDict)
# Run initial segmentation
LP_n = hmodel.calc_local_params(Data_n)
Z_n = LP_n['resp'].argmax(axis=1)
Ztrue = np.asarray(Data_n.TrueParams['Z'], dtype=np.int32)
Ktrue = np.max(Ztrue) + 1
# Explore the findBestCut idea
from matplotlib import pylab
from bnpy.init.SeqCreateProposals import findBestCutForBlock
while True:
keypress = input("Enter start stop stride >>> ")
fields = keypress.split(" ")
if len(fields) < 2:
break
a = int(fields[0])
b = int(fields[1])
if len(fields) > 2:
stride = int(fields[2])
else:
stride = 3
m = findBestCutForBlock(Data_n, hmodel, a=a, b=b, stride=stride)
print("Best Cut: [a=%d, m=%d, b=%d]" % (a, m, b))
Kcur = LP_n['resp'].shape[1]
Kmax_cur = np.maximum(Kcur, Ktrue)
CMap = bnpy.util.StateSeqUtil.makeStateColorMap(
nTrue=Kmax_cur, nExtra=2)
Z_n_mod = Z_n.copy()
Z_n_mod[a:m] = Kmax_cur
Z_n_mod[m:b] = Kmax_cur + 1
imshowArgs = dict(interpolation='nearest',
aspect=Z_n.size / 1.0,
cmap=CMap,
vmin=0, vmax=Kmax_cur + 1)
pylab.close()
pylab.subplots(nrows=3, ncols=1)
ax = pylab.subplot(3, 1, 1)
pylab.imshow(Ztrue[np.newaxis, :], **imshowArgs)
pylab.yticks([])
pylab.subplot(3, 1, 2, sharex=ax)
pylab.imshow(Z_n[np.newaxis, :], **imshowArgs)
pylab.yticks([])
pylab.subplot(3, 1, 3, sharex=ax)
pylab.imshow(Z_n_mod[np.newaxis, :], **imshowArgs)
pylab.yticks([])
L = b - a
amin = np.maximum(0, a - L / 5)
bmax = np.minimum(Z_n.size - 1, b + L / 5)
pylab.xlim([amin, bmax])
pylab.show(block=0)
def parseCmdLineArgs():
cmdlineArgList = sys.argv[1:]
argList = list()
for aa, arg in enumerate(cmdlineArgList):
if arg.startswith('-'):
continue
elif arg.count('.py'):
continue
argList.append(arg)
assert len(argList) % 2 == 0
argDict = dict()
argDict['initnameVals'] = 'initname=randcontigblocks-K=1'
for ii in range(0, len(argList), 2):
key = argList[ii]
val = argList[ii + 1]
argDict[key] = val
return argDict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.