blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
10c59f0cd6e1f09c725478e829a1e86b68eb2d07
|
c26304a54824faa7c1b34bb7882ee7a335a8e7fb
|
/flink-python/pyflink/table/tests/test_types.py
|
d3bd37c6ffd8fa2c08572a6199c4a5f0a9def8dd
|
[
"BSD-3-Clause",
"OFL-1.1",
"ISC",
"MIT",
"Apache-2.0"
] |
permissive
|
apache/flink
|
905e0709de6389fc9212a7c48a82669706c70b4a
|
fbef3c22757a2352145599487beb84e02aaeb389
|
refs/heads/master
| 2023-09-04T08:11:07.253750
| 2023-09-04T01:33:25
| 2023-09-04T01:33:25
| 20,587,599
| 23,573
| 14,781
|
Apache-2.0
| 2023-09-14T21:49:04
| 2014-06-07T07:00:10
|
Java
|
UTF-8
|
Python
| false
| false
| 39,663
|
py
|
test_types.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import array
import ctypes
import datetime
import pickle
import sys
import tempfile
import unittest
from pyflink.pyflink_gateway_server import on_windows
from pyflink.serializers import BatchedSerializer, PickleSerializer
from pyflink.java_gateway import get_gateway
from pyflink.table.types import (_infer_schema_from_data, _infer_type,
_array_signed_int_typecode_ctype_mappings,
_array_unsigned_int_typecode_ctype_mappings,
_array_type_mappings, _merge_type,
_create_type_verifier, UserDefinedType, DataTypes, Row, RowField,
RowType, ArrayType, BigIntType, VarCharType, MapType, DataType,
_from_java_data_type, ZonedTimestampType,
LocalZonedTimestampType, _to_java_data_type)
from pyflink.testing.test_case_utils import PyFlinkTestCase
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sql_type(cls):
return DataTypes.ARRAY(DataTypes.DOUBLE(False))
@classmethod
def module(cls):
return 'pyflink.table.tests.test_types'
@classmethod
def java_udt(cls):
return 'org.apache.flink.table.types.python.ExamplePointUserDefinedType'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sql_type(cls):
return DataTypes.ARRAY(DataTypes.DOUBLE(False))
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.OFFSET = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.OFFSET
def dst(self, dt):
return self.OFFSET
class TypesTests(PyFlinkTestCase):
def test_infer_schema(self):
from decimal import Decimal
class A(object):
def __init__(self):
self.a = 1
from collections import namedtuple
Point = namedtuple('Point', 'x y')
data = [
True,
1,
"a",
u"a",
datetime.date(1970, 1, 1),
datetime.time(0, 0, 0),
datetime.datetime(1970, 1, 1, 0, 0),
1.0,
array.array("d", [1]),
[1],
(1,),
Point(1.0, 5.0),
{"a": 1},
bytearray(1),
Decimal(1),
Row(a=1),
Row("a")(1),
A(),
]
expected = [
'BooleanType(true)',
'BigIntType(true)',
'VarCharType(2147483647, true)',
'VarCharType(2147483647, true)',
'DateType(true)',
'TimeType(0, true)',
'LocalZonedTimestampType(6, true)',
'DoubleType(true)',
"ArrayType(DoubleType(false), true)",
"ArrayType(BigIntType(true), true)",
'RowType(RowField(_1, BigIntType(true), ...))',
'RowType(RowField(x, DoubleType(true), ...),RowField(y, DoubleType(true), ...))',
'MapType(VarCharType(2147483647, false), BigIntType(true), true)',
'VarBinaryType(2147483647, true)',
'DecimalType(38, 18, true)',
'RowType(RowField(a, BigIntType(true), ...))',
'RowType(RowField(a, BigIntType(true), ...))',
'RowType(RowField(a, BigIntType(true), ...))',
]
schema = _infer_schema_from_data([data])
self.assertEqual(expected, [repr(f.data_type) for f in schema.fields])
def test_infer_schema_nulltype(self):
elements = [Row(c1=[], c2={}, c3=None),
Row(c1=[Row(a=1, b='s')], c2={"key": Row(c=1.0, d="2")}, c3="")]
schema = _infer_schema_from_data(elements)
self.assertTrue(isinstance(schema, RowType))
self.assertEqual(3, len(schema.fields))
# first column is array
self.assertTrue(isinstance(schema.fields[0].data_type, ArrayType))
# element type of first column is struct
self.assertTrue(isinstance(schema.fields[0].data_type.element_type, RowType))
self.assertTrue(isinstance(schema.fields[0].data_type.element_type.fields[0].data_type,
BigIntType))
self.assertTrue(isinstance(schema.fields[0].data_type.element_type.fields[1].data_type,
VarCharType))
# second column is map
self.assertTrue(isinstance(schema.fields[1].data_type, MapType))
self.assertTrue(isinstance(schema.fields[1].data_type.key_type, VarCharType))
self.assertTrue(isinstance(schema.fields[1].data_type.value_type, RowType))
# third column is varchar
self.assertTrue(isinstance(schema.fields[2].data_type, VarCharType))
def test_infer_schema_not_enough_names(self):
schema = _infer_schema_from_data([["a", "b"]], ["col1"])
self.assertTrue(schema.names, ['col1', '_2'])
def test_infer_schema_fails(self):
with self.assertRaises(TypeError):
_infer_schema_from_data([[1, 1], ["x", 1]], names=["a", "b"])
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
data1 = [NestedRow([1, 2], {"row1": 1.0}), NestedRow([2, 3], {"row2": 2.0})]
schema1 = _infer_schema_from_data(data1)
expected1 = [
'ArrayType(BigIntType(true), true)',
'MapType(VarCharType(2147483647, false), DoubleType(true), true)'
]
self.assertEqual(expected1, [repr(f.data_type) for f in schema1.fields])
data2 = [NestedRow([[1, 2], [2, 3]], [1, 2]), NestedRow([[2, 3], [3, 4]], [2, 3])]
schema2 = _infer_schema_from_data(data2)
expected2 = [
'ArrayType(ArrayType(BigIntType(true), true), true)',
'ArrayType(BigIntType(true), true)'
]
self.assertEqual(expected2, [repr(f.data_type) for f in schema2.fields])
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.as_dict()['l'][0].a)
self.assertEqual(1.0, row.as_dict()['d']['key'].c)
def test_udt(self):
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_create_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _create_type_verifier(ExamplePointUDT())([1.0, 2.0]))
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_create_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _create_type_verifier(PythonOnlyUDT())([1.0, 2.0]))
def test_nested_udt_in_df(self):
expected_schema = DataTypes.ROW() \
.add("_1", DataTypes.BIGINT()).add("_2", DataTypes.ARRAY(PythonOnlyUDT()))
data = (1, [PythonOnlyPoint(float(1), float(2))])
self.assertEqual(expected_schema, _infer_type(data))
expected_schema = DataTypes.ROW().add("_1", DataTypes.BIGINT()).add(
"_2", DataTypes.MAP(DataTypes.BIGINT(False), PythonOnlyUDT()))
p = (1, {1: PythonOnlyPoint(1, float(2))})
self.assertEqual(expected_schema, _infer_type(p))
def test_struct_type(self):
row1 = DataTypes.ROW().add("f1", DataTypes.STRING(nullable=True)) \
.add("f2", DataTypes.STRING(nullable=True))
row2 = DataTypes.ROW([DataTypes.FIELD("f1", DataTypes.STRING(nullable=True)),
DataTypes.FIELD("f2", DataTypes.STRING(nullable=True), None)])
self.assertEqual(row1.field_names(), row2.names)
self.assertEqual(row1, row2)
row1 = DataTypes.ROW().add("f1", DataTypes.STRING(nullable=True)) \
.add("f2", DataTypes.STRING(nullable=True))
row2 = DataTypes.ROW([DataTypes.FIELD("f1", DataTypes.STRING(nullable=True))])
self.assertNotEqual(row1.field_names(), row2.names)
self.assertNotEqual(row1, row2)
row1 = (DataTypes.ROW().add(DataTypes.FIELD("f1", DataTypes.STRING(nullable=True)))
.add("f2", DataTypes.STRING(nullable=True)))
row2 = DataTypes.ROW([DataTypes.FIELD("f1", DataTypes.STRING(nullable=True)),
DataTypes.FIELD("f2", DataTypes.STRING(nullable=True))])
self.assertEqual(row1.field_names(), row2.names)
self.assertEqual(row1, row2)
row1 = (DataTypes.ROW().add(DataTypes.FIELD("f1", DataTypes.STRING(nullable=True)))
.add("f2", DataTypes.STRING(nullable=True)))
row2 = DataTypes.ROW([DataTypes.FIELD("f1", DataTypes.STRING(nullable=True))])
self.assertNotEqual(row1.field_names(), row2.names)
self.assertNotEqual(row1, row2)
# Catch exception raised during improper construction
self.assertRaises(ValueError, lambda: DataTypes.ROW().add("name"))
row1 = DataTypes.ROW().add("f1", DataTypes.STRING(nullable=True)) \
.add("f2", DataTypes.STRING(nullable=True))
for field in row1:
self.assertIsInstance(field, RowField)
row1 = DataTypes.ROW().add("f1", DataTypes.STRING(nullable=True)) \
.add("f2", DataTypes.STRING(nullable=True))
self.assertEqual(len(row1), 2)
row1 = DataTypes.ROW().add("f1", DataTypes.STRING(nullable=True)) \
.add("f2", DataTypes.STRING(nullable=True))
self.assertIs(row1["f1"], row1.fields[0])
self.assertIs(row1[0], row1.fields[0])
self.assertEqual(row1[0:1], DataTypes.ROW(row1.fields[0:1]))
self.assertRaises(KeyError, lambda: row1["f9"])
self.assertRaises(IndexError, lambda: row1[9])
self.assertRaises(TypeError, lambda: row1[9.9])
def test_infer_bigint_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
schema = _infer_schema_from_data(longrow)
self.assertEqual(DataTypes.BIGINT(), schema.fields[1].data_type)
self.assertEqual(DataTypes.BIGINT(), _infer_type(1))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 10))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 20))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 31 - 1))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 31))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 61))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 71))
def test_merge_type(self):
self.assertEqual(_merge_type(DataTypes.BIGINT(), DataTypes.NULL()), DataTypes.BIGINT())
self.assertEqual(_merge_type(DataTypes.NULL(), DataTypes.BIGINT()), DataTypes.BIGINT())
self.assertEqual(_merge_type(DataTypes.BIGINT(), DataTypes.BIGINT()), DataTypes.BIGINT())
self.assertEqual(_merge_type(
DataTypes.ARRAY(DataTypes.BIGINT()),
DataTypes.ARRAY(DataTypes.BIGINT())
), DataTypes.ARRAY(DataTypes.BIGINT()))
with self.assertRaises(TypeError):
_merge_type(DataTypes.ARRAY(DataTypes.BIGINT()), DataTypes.ARRAY(DataTypes.DOUBLE()))
self.assertEqual(_merge_type(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT()),
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())
), DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT()))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT()),
DataTypes.MAP(DataTypes.DOUBLE(), DataTypes.BIGINT()))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT()),
DataTypes.MAP(DataTypes.STRING(), DataTypes.DOUBLE()))
self.assertEqual(_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.BIGINT()),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.BIGINT()),
DataTypes.FIELD('f2', DataTypes.STRING())])
), DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.BIGINT()),
DataTypes.FIELD('f2', DataTypes.STRING())]))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.BIGINT()),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.DOUBLE()),
DataTypes.FIELD('f2', DataTypes.STRING())]))
self.assertEqual(_merge_type(
DataTypes.ROW([DataTypes.FIELD(
'f1', DataTypes.ROW([DataTypes.FIELD('f2', DataTypes.BIGINT())]))]),
DataTypes.ROW([DataTypes.FIELD(
'f1', DataTypes.ROW([DataTypes.FIELD('f2', DataTypes.BIGINT())]))])
), DataTypes.ROW([DataTypes.FIELD(
'f1', DataTypes.ROW([DataTypes.FIELD('f2', DataTypes.BIGINT())]))]))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ROW(
[DataTypes.FIELD('f2', DataTypes.BIGINT())]))]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ROW(
[DataTypes.FIELD('f2', DataTypes.STRING())]))]))
self.assertEqual(_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())])
), DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.ARRAY(DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.ARRAY(DataTypes.DOUBLE())),
DataTypes.FIELD('f2', DataTypes.STRING())]))
self.assertEqual(_merge_type(
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())])
), DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.MAP(DataTypes.STRING(), DataTypes.DOUBLE())),
DataTypes.FIELD('f2', DataTypes.STRING())]))
self.assertEqual(_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())))]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())))])
), DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())))]))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())))]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(
DataTypes.MAP(DataTypes.DOUBLE(), DataTypes.BIGINT())))])
)
def test_array_types(self):
# This test need to make sure that the Scala type selected is at least
# as large as the python's types. This is necessary because python's
# array types depend on C implementation on the machine. Therefore there
# is no machine independent correspondence between python's array types
# and Scala types.
# See: https://docs.python.org/2/library/array.html
def assert_collect_success(typecode, value, element_type):
self.assertEqual(element_type,
str(_infer_type(array.array(typecode, [value])).element_type))
# supported string types
#
# String types in python's array are "u" for Py_UNICODE and "c" for char.
# "u" will be removed in python 4, and "c" is not supported in python 3.
supported_string_types = []
if sys.version_info[0] < 4:
supported_string_types += ['u']
# test unicode
assert_collect_success('u', u'a', 'CHAR')
# supported float and double
#
# Test max, min, and precision for float and double, assuming IEEE 754
# floating-point format.
supported_fractional_types = ['f', 'd']
assert_collect_success('f', ctypes.c_float(1e+38).value, 'FLOAT')
assert_collect_success('f', ctypes.c_float(1e-38).value, 'FLOAT')
assert_collect_success('f', ctypes.c_float(1.123456).value, 'FLOAT')
assert_collect_success('d', sys.float_info.max, 'DOUBLE')
assert_collect_success('d', sys.float_info.min, 'DOUBLE')
assert_collect_success('d', sys.float_info.epsilon, 'DOUBLE')
def get_int_data_type(size):
if size <= 8:
return "TINYINT"
if size <= 16:
return "SMALLINT"
if size <= 32:
return "INT"
if size <= 64:
return "BIGINT"
# supported signed int types
#
# The size of C types changes with implementation, we need to make sure
# that there is no overflow error on the platform running this test.
supported_signed_int_types = list(
set(_array_signed_int_typecode_ctype_mappings.keys()).intersection(
set(_array_type_mappings.keys())))
for t in supported_signed_int_types:
ctype = _array_signed_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assert_collect_success(t, max_val - 1, get_int_data_type(ctypes.sizeof(ctype) * 8))
assert_collect_success(t, -max_val, get_int_data_type(ctypes.sizeof(ctype) * 8))
# supported unsigned int types
#
# JVM does not have unsigned types. We need to be very careful to make
# sure that there is no overflow error.
supported_unsigned_int_types = list(
set(_array_unsigned_int_typecode_ctype_mappings.keys()).intersection(
set(_array_type_mappings.keys())))
for t in supported_unsigned_int_types:
ctype = _array_unsigned_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assert_collect_success(t, max_val, get_int_data_type(ctypes.sizeof(ctype) * 8 + 1))
# all supported types
#
# Make sure the types tested above:
# 1. are all supported types
# 2. cover all supported types
supported_types = (supported_string_types +
supported_fractional_types +
supported_signed_int_types +
supported_unsigned_int_types)
self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))
# all unsupported types
#
# Keys in _array_type_mappings is a complete list of all supported types,
# and types not in _array_type_mappings are considered unsupported.
all_types = set(array.typecodes)
unsupported_types = all_types - set(supported_types)
# test unsupported types
for t in unsupported_types:
with self.assertRaises(TypeError):
_infer_schema_from_data([Row(myarray=array.array(t))])
def test_data_type_eq(self):
lt = DataTypes.BIGINT()
lt2 = pickle.loads(pickle.dumps(DataTypes.BIGINT()))
self.assertEqual(lt, lt2)
def test_decimal_type(self):
t1 = DataTypes.DECIMAL(10, 0)
t2 = DataTypes.DECIMAL(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
def test_datetype_equal_zero(self):
dt = DataTypes.DATE()
self.assertEqual(dt.from_sql_type(0), datetime.date(1970, 1, 1))
@unittest.skipIf(on_windows(), "Windows x64 system only support the datetime not larger "
"than time.ctime(32536799999), so this test can't run "
"under Windows platform")
def test_timestamp_microsecond(self):
tst = DataTypes.TIMESTAMP()
self.assertEqual(tst.to_sql_type(datetime.datetime.max) % 1000000, 999999)
@unittest.skipIf(on_windows(), "Windows x64 system only support the datetime not larger "
"than time.ctime(32536799999), so this test can't run "
"under Windows platform")
def test_local_zoned_timestamp_type(self):
lztst = DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE()
last_abbreviation = DataTypes.TIMESTAMP_LTZ()
self.assertEqual(lztst, last_abbreviation)
ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 0000)
self.assertEqual(0, lztst.to_sql_type(ts))
import pytz
# suppose the timezone of the data is +9:00
timezone = pytz.timezone("Asia/Tokyo")
orig_epoch = LocalZonedTimestampType.EPOCH_ORDINAL
try:
# suppose the local timezone is +8:00
LocalZonedTimestampType.EPOCH_ORDINAL = 28800000000
ts_tokyo = timezone.localize(ts)
self.assertEqual(-3600000000, lztst.to_sql_type(ts_tokyo))
finally:
LocalZonedTimestampType.EPOCH_ORDINAL = orig_epoch
if sys.version_info >= (3, 6):
ts2 = lztst.from_sql_type(0)
self.assertEqual(ts.astimezone(), ts2.astimezone())
def test_zoned_timestamp_type(self):
ztst = ZonedTimestampType()
ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 0000, tzinfo=UTCOffsetTimezone(1))
self.assertEqual((0, 3600), ztst.to_sql_type(ts))
ts2 = ztst.from_sql_type((0, 3600))
self.assertEqual(ts, ts2)
def test_day_time_inteval_type(self):
ymt = DataTypes.INTERVAL(DataTypes.DAY(), DataTypes.SECOND())
td = datetime.timedelta(days=1, seconds=10)
self.assertEqual(86410000000, ymt.to_sql_type(td))
td2 = ymt.from_sql_type(86410000000)
self.assertEqual(td, td2)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
def test_invalid_create_row(self):
row_class = Row("c1", "c2")
self.assertRaises(ValueError, lambda: row_class(1, 2, 3))
def test_nullable(self):
t = DataType(nullable=False)
self.assertEqual(t._nullable, False)
t_nullable = t.nullable()
self.assertEqual(t_nullable._nullable, True)
def test_not_null(self):
t = DataType(nullable=True)
self.assertEqual(t._nullable, True)
t_notnull = t.not_null()
self.assertEqual(t_notnull._nullable, False)
class DataTypeVerificationTests(PyFlinkTestCase):
def test_verify_type_exception_msg(self):
self.assertRaises(
ValueError,
lambda: _create_type_verifier(
DataTypes.STRING(nullable=False), name="test_name")(None))
schema = DataTypes.ROW(
[DataTypes.FIELD('a', DataTypes.ROW([DataTypes.FIELD('b', DataTypes.INT())]))])
self.assertRaises(
TypeError,
lambda: _create_type_verifier(schema)([["data"]]))
def test_verify_type_ok_nullable(self):
obj = None
types = [DataTypes.INT(), DataTypes.FLOAT(), DataTypes.STRING(), DataTypes.ROW([])]
for data_type in types:
try:
_create_type_verifier(data_type)(obj)
except (TypeError, ValueError):
self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type))
def test_verify_type_not_nullable(self):
import array
import datetime
import decimal
schema = DataTypes.ROW([
DataTypes.FIELD('s', DataTypes.STRING(nullable=False)),
DataTypes.FIELD('i', DataTypes.INT(True))])
class MyObj:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# obj, data_type
success_spec = [
# String
("", DataTypes.STRING()),
(u"", DataTypes.STRING()),
# UDT
(ExamplePoint(1.0, 2.0), ExamplePointUDT()),
# Boolean
(True, DataTypes.BOOLEAN()),
# TinyInt
(-(2 ** 7), DataTypes.TINYINT()),
(2 ** 7 - 1, DataTypes.TINYINT()),
# SmallInt
(-(2 ** 15), DataTypes.SMALLINT()),
(2 ** 15 - 1, DataTypes.SMALLINT()),
# Int
(-(2 ** 31), DataTypes.INT()),
(2 ** 31 - 1, DataTypes.INT()),
# BigInt
(2 ** 64, DataTypes.BIGINT()),
# Float & Double
(1.0, DataTypes.FLOAT()),
(1.0, DataTypes.DOUBLE()),
# Decimal
(decimal.Decimal("1.0"), DataTypes.DECIMAL(10, 0)),
# Binary
(bytearray([1]), DataTypes.BINARY(1)),
# Date/Time/Timestamp
(datetime.date(2000, 1, 2), DataTypes.DATE()),
(datetime.datetime(2000, 1, 2, 3, 4), DataTypes.DATE()),
(datetime.time(1, 1, 2), DataTypes.TIME()),
(datetime.datetime(2000, 1, 2, 3, 4), DataTypes.TIMESTAMP()),
# Array
([], DataTypes.ARRAY(DataTypes.INT())),
(["1", None], DataTypes.ARRAY(DataTypes.STRING(nullable=True))),
([1, 2], DataTypes.ARRAY(DataTypes.INT())),
((1, 2), DataTypes.ARRAY(DataTypes.INT())),
(array.array('h', [1, 2]), DataTypes.ARRAY(DataTypes.INT())),
# Map
({}, DataTypes.MAP(DataTypes.STRING(), DataTypes.INT())),
({"a": 1}, DataTypes.MAP(DataTypes.STRING(), DataTypes.INT())),
({"a": None}, DataTypes.MAP(DataTypes.STRING(nullable=False), DataTypes.INT(True))),
# Struct
({"s": "a", "i": 1}, schema),
({"s": "a", "i": None}, schema),
({"s": "a"}, schema),
({"s": "a", "f": 1.0}, schema),
(Row(s="a", i=1), schema),
(Row(s="a", i=None), schema),
(Row(s="a", i=1, f=1.0), schema),
(["a", 1], schema),
(["a", None], schema),
(("a", 1), schema),
(MyObj(s="a", i=1), schema),
(MyObj(s="a", i=None), schema),
(MyObj(s="a"), schema),
]
# obj, data_type, exception class
failure_spec = [
# Char/VarChar (match anything but None)
(None, DataTypes.VARCHAR(1), ValueError),
(None, DataTypes.CHAR(1), ValueError),
# VarChar (length exceeds maximum length)
("abc", DataTypes.VARCHAR(1), ValueError),
# Char (length exceeds length)
("abc", DataTypes.CHAR(1), ValueError),
# UDT
(ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),
# Boolean
(1, DataTypes.BOOLEAN(), TypeError),
("True", DataTypes.BOOLEAN(), TypeError),
([1], DataTypes.BOOLEAN(), TypeError),
# TinyInt
(-(2 ** 7) - 1, DataTypes.TINYINT(), ValueError),
(2 ** 7, DataTypes.TINYINT(), ValueError),
("1", DataTypes.TINYINT(), TypeError),
(1.0, DataTypes.TINYINT(), TypeError),
# SmallInt
(-(2 ** 15) - 1, DataTypes.SMALLINT(), ValueError),
(2 ** 15, DataTypes.SMALLINT(), ValueError),
# Int
(-(2 ** 31) - 1, DataTypes.INT(), ValueError),
(2 ** 31, DataTypes.INT(), ValueError),
# Float & Double
(1, DataTypes.FLOAT(), TypeError),
(1, DataTypes.DOUBLE(), TypeError),
# Decimal
(1.0, DataTypes.DECIMAL(10, 0), TypeError),
(1, DataTypes.DECIMAL(10, 0), TypeError),
("1.0", DataTypes.DECIMAL(10, 0), TypeError),
# Binary
(1, DataTypes.BINARY(1), TypeError),
# VarBinary (length exceeds maximum length)
(bytearray([1, 2]), DataTypes.VARBINARY(1), ValueError),
# Char (length exceeds length)
(bytearray([1, 2]), DataTypes.BINARY(1), ValueError),
# Date/Time/Timestamp
("2000-01-02", DataTypes.DATE(), TypeError),
("10:01:02", DataTypes.TIME(), TypeError),
(946811040, DataTypes.TIMESTAMP(), TypeError),
# Array
(["1", None], DataTypes.ARRAY(DataTypes.VARCHAR(1, nullable=False)), ValueError),
([1, "2"], DataTypes.ARRAY(DataTypes.INT()), TypeError),
# Map
({"a": 1}, DataTypes.MAP(DataTypes.INT(), DataTypes.INT()), TypeError),
({"a": "1"}, DataTypes.MAP(DataTypes.VARCHAR(1), DataTypes.INT()), TypeError),
({"a": None}, DataTypes.MAP(DataTypes.VARCHAR(1), DataTypes.INT(False)), ValueError),
# Struct
({"s": "a", "i": "1"}, schema, TypeError),
(Row(s="a"), schema, ValueError), # Row can't have missing field
(Row(s="a", i="1"), schema, TypeError),
(["a"], schema, ValueError),
(["a", "1"], schema, TypeError),
(MyObj(s="a", i="1"), schema, TypeError),
(MyObj(s=None, i="1"), schema, ValueError),
]
# Check success cases
for obj, data_type in success_spec:
try:
_create_type_verifier(data_type.not_null())(obj)
except (TypeError, ValueError):
self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type))
# Check failure cases
for obj, data_type, exp in failure_spec:
msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp)
with self.assertRaises(exp, msg=msg):
_create_type_verifier(data_type.not_null())(obj)
class DataTypeConvertTests(PyFlinkTestCase):
def test_basic_type(self):
test_types = [DataTypes.STRING(),
DataTypes.BOOLEAN(),
DataTypes.BYTES(),
DataTypes.TINYINT(),
DataTypes.SMALLINT(),
DataTypes.INT(),
DataTypes.BIGINT(),
DataTypes.FLOAT(),
DataTypes.DOUBLE(),
DataTypes.DATE(),
DataTypes.TIME(),
DataTypes.TIMESTAMP(3)]
java_types = [_to_java_data_type(item) for item in test_types]
converted_python_types = [_from_java_data_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_atomic_type_with_data_type_with_parameters(self):
gateway = get_gateway()
JDataTypes = gateway.jvm.DataTypes
java_types = [JDataTypes.TIME(3).notNull(),
JDataTypes.TIMESTAMP(3).notNull(),
JDataTypes.VARBINARY(100).notNull(),
JDataTypes.BINARY(2).notNull(),
JDataTypes.VARCHAR(30).notNull(),
JDataTypes.CHAR(50).notNull(),
JDataTypes.DECIMAL(20, 10).notNull()]
converted_python_types = [_from_java_data_type(item) for item in java_types]
expected = [DataTypes.TIME(3, False),
DataTypes.TIMESTAMP(3).not_null(),
DataTypes.VARBINARY(100, False),
DataTypes.BINARY(2, False),
DataTypes.VARCHAR(30, False),
DataTypes.CHAR(50, False),
DataTypes.DECIMAL(20, 10, False)]
self.assertEqual(converted_python_types, expected)
def test_array_type(self):
# nullable/not_null flag will be lost during the conversion.
test_types = [DataTypes.ARRAY(DataTypes.BIGINT()),
DataTypes.ARRAY(DataTypes.BIGINT()),
DataTypes.ARRAY(DataTypes.STRING()),
DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.BIGINT())),
DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.STRING()))]
java_types = [_to_java_data_type(item) for item in test_types]
converted_python_types = [_from_java_data_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_multiset_type(self):
test_types = [DataTypes.MULTISET(DataTypes.BIGINT()),
DataTypes.MULTISET(DataTypes.STRING()),
DataTypes.MULTISET(DataTypes.MULTISET(DataTypes.BIGINT())),
DataTypes.MULTISET(DataTypes.MULTISET(DataTypes.STRING()))]
java_types = [_to_java_data_type(item) for item in test_types]
converted_python_types = [_from_java_data_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_map_type(self):
test_types = [DataTypes.MAP(DataTypes.BIGINT(), DataTypes.BIGINT()),
DataTypes.MAP(DataTypes.STRING(), DataTypes.STRING()),
DataTypes.MAP(DataTypes.STRING(),
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.MAP(DataTypes.STRING(),
DataTypes.MAP(DataTypes.STRING(), DataTypes.STRING()))]
java_types = [_to_java_data_type(item) for item in test_types]
converted_python_types = [_from_java_data_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_row_type(self):
test_types = [DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b",
DataTypes.ROW(
[DataTypes.FIELD("c",
DataTypes.STRING())]))])]
java_types = [_to_java_data_type(item) for item in test_types]
converted_python_types = [_from_java_data_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_list_view_type(self):
test_types = [DataTypes.LIST_VIEW(DataTypes.BIGINT()),
DataTypes.LIST_VIEW(DataTypes.STRING())]
java_types = [_to_java_data_type(item) for item in test_types]
converted_python_types = [_from_java_data_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_map_view_type(self):
test_types = [DataTypes.MAP_VIEW(DataTypes.STRING(), DataTypes.BIGINT()),
DataTypes.MAP_VIEW(DataTypes.INT(), DataTypes.STRING())]
java_types = [_to_java_data_type(item) for item in test_types]
converted_python_types = [_from_java_data_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
class DataSerializerTests(PyFlinkTestCase):
def test_java_pickle_deserializer(self):
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
serializer = PickleSerializer()
data = [(1, 2), (3, 4), (5, 6), (7, 8)]
try:
serializer.serialize(data, temp_file)
finally:
temp_file.close()
gateway = get_gateway()
result = [tuple(int_pair) for int_pair in
list(gateway.jvm.PythonBridgeUtils.readPythonObjects(temp_file.name, False))]
self.assertEqual(result, [(1, 2), (3, 4), (5, 6), (7, 8)])
def test_java_batch_deserializer(self):
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
serializer = BatchedSerializer(PickleSerializer(), 2)
data = [(1, 2), (3, 4), (5, 6), (7, 8)]
try:
serializer.serialize(data, temp_file)
finally:
temp_file.close()
gateway = get_gateway()
result = [tuple(int_pair) for int_pair in
list(gateway.jvm.PythonBridgeUtils.readPythonObjects(temp_file.name, True))]
self.assertEqual(result, [(1, 2), (3, 4), (5, 6), (7, 8)])
if __name__ == "__main__":
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
b7f4020697dfedb5a1021564f4317816092e04f2
|
f9587ccd1dbf6b9dcdedc1333db8d05c62c5011b
|
/utils/constants.py
|
206b7a0e4dd421ae227db4090434f4e1d08273ba
|
[
"MIT"
] |
permissive
|
gordicaleksa/pytorch-learn-reinforcement-learning
|
472e9146dec592554c9143746aa8a1b09bacd276
|
26dd439e73bb804b2065969caa5fa5429becfdd5
|
refs/heads/main
| 2023-04-25T05:19:26.368001
| 2021-05-09T12:01:17
| 2021-05-09T12:01:17
| 355,128,000
| 138
| 26
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
constants.py
|
import os
BINARIES_PATH = os.path.join(os.path.dirname(__file__), os.pardir, 'models', 'binaries')
CHECKPOINTS_PATH = os.path.join(os.path.dirname(__file__), os.pardir, 'models', 'checkpoints')
DATA_DIR_PATH = os.path.join(os.path.dirname(__file__), os.pardir, 'data')
# Make sure these exist as the rest of the code assumes it
os.makedirs(BINARIES_PATH, exist_ok=True)
os.makedirs(CHECKPOINTS_PATH, exist_ok=True)
|
283e985b3352395a4bd72cc8b571c15aa2f55916
|
274d099df5e08c56de00fe7fade47854a056ab4c
|
/CI/src/integration_tests/test_mount_hook.py
|
66bef3c2f369715822e73576a34078cf51dd7128
|
[
"BSD-3-Clause"
] |
permissive
|
eth-cscs/sarus
|
95e36bf21c77f2bb822ea0a97dfd7d64a71f6971
|
9c01d76736940feb360175c515e5778e408e631e
|
refs/heads/master
| 2023-08-31T02:33:19.008492
| 2023-05-05T12:09:57
| 2023-05-05T12:09:57
| 160,826,280
| 116
| 11
|
BSD-3-Clause
| 2023-03-01T13:11:28
| 2018-12-07T13:19:07
|
C++
|
UTF-8
|
Python
| false
| false
| 6,434
|
py
|
test_mount_hook.py
|
# Sarus
#
# Copyright (c) 2018-2023, ETH Zurich. All rights reserved.
#
# Please, refer to the LICENSE file in the root directory.
# SPDX-License-Identifier: BSD-3-Clause
import unittest
import os
import shutil
import pytest
import common.util as util
class TestMountHook(unittest.TestCase):
"""
These tests verify that the features of the Mount hook (bind mounts, device mounts, wildcards resolution)
work correctly.
"""
OCIHOOK_CONFIG_FILE = os.environ["CMAKE_INSTALL_PREFIX"] + "/etc/hooks.d/mount_hook.json"
CONTAINER_IMAGE = "quay.io/ethcscs/ubuntu:20.04"
_CI_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
_DUMMY_LIB_PATH = _CI_DIR + "/dummy_libs/lib_dummy_0.so"
HOST_LIB_HASH = util.generate_file_md5_hash(_DUMMY_LIB_PATH, "md5")
@classmethod
def setUpClass(cls):
cls._pull_docker_images()
@classmethod
def _pull_docker_images(cls):
util.pull_image_if_necessary(is_centralized_repository=False,
image=cls.CONTAINER_IMAGE)
util.pull_image_if_necessary(is_centralized_repository=False,
image="quay.io/ethcscs/sarus-integration-tests:libfabric")
@staticmethod
def _generate_hook_config(args, with_ldconfig=False):
hook_config = {
"version": "1.0.0",
"hook": {
"path": os.environ["CMAKE_INSTALL_PREFIX"] + "/bin/mount_hook",
"args": ["mount_hook"] + args
},
"when": {
"always": True
},
"stages": ["prestart"]
}
if with_ldconfig:
hook_config["hook"]["env"] = ["LDCONFIG_PATH=" + shutil.which("ldconfig")]
return hook_config
def test_bind_mount(self):
mount_destination = "/usr/lib64/libMountHook.so.1"
self._check_hook_mount(hook_config=self._generate_hook_config_with_dummylib_mount(mount_destination),
expected_destination=mount_destination)
def test_ldcache_update(self):
mount_destination = "/usr/local/lib/libMountHook.so.1"
hook_config = self._generate_hook_config_with_dummylib_mount(mount_destination, with_ldconfig=True)
with util.temporary_hook_files((hook_config, self.OCIHOOK_CONFIG_FILE)):
ldcache_list = util.run_command_in_container(is_centralized_repository=False,
image=self.CONTAINER_IMAGE,
command=["/sbin/ldconfig", "-p"])
assert any(mount_destination in line for line in ldcache_list)
def test_fi_provider_path_wildcard_default(self):
self._check_hook_mount(hook_config=self._generate_hook_config_with_wildcard_mount(),
expected_destination="/usr/lib/provider-fi.so")
def test_fi_provider_path_wildcard_from_environment(self):
env_var_value = "/fi/provider/path"
self._check_hook_mount(hook_config=self._generate_hook_config_with_wildcard_mount(),
expected_destination=f"{env_var_value}/provider-fi.so",
options=[f"--env=FI_PROVIDER_PATH={env_var_value}"])
def test_fi_provider_path_wildcard_from_environment_precedes_ldcache(self):
env_var_value = "/fi/provider/path"
self._check_hook_mount(hook_config=self._generate_hook_config_with_wildcard_mount(with_ldconfig=True),
expected_destination=f"{env_var_value}/provider-fi.so",
image="quay.io/ethcscs/sarus-integration-tests:libfabric",
options=[f"--env=FI_PROVIDER_PATH={env_var_value}"])
def test_fi_provider_path_wildcard_from_ldcache(self):
self._check_hook_mount(hook_config=self._generate_hook_config_with_wildcard_mount(with_ldconfig=True),
expected_destination="/usr/lib/libfabric/provider-fi.so",
image="quay.io/ethcscs/sarus-integration-tests:libfabric")
def _generate_hook_config_with_dummylib_mount(self, mount_destination, **kwargs):
return self._generate_hook_config([f"--mount=type=bind,src={self._DUMMY_LIB_PATH},dst={mount_destination}"],
**kwargs)
def _generate_hook_config_with_wildcard_mount(self, **kwargs):
return self._generate_hook_config_with_dummylib_mount("<FI_PROVIDER_PATH>/provider-fi.so", **kwargs)
def _check_hook_mount(self, hook_config, expected_destination, image=CONTAINER_IMAGE, **kwargs):
with util.temporary_hook_files((hook_config, self.OCIHOOK_CONFIG_FILE)):
file_hash = util.get_hash_of_file_in_container(expected_destination, image, **kwargs)
assert file_hash == self.HOST_LIB_HASH
@pytest.mark.asroot
class TestMountHookDevices(unittest.TestCase):
"""
These tests verify that the mount hook is able to mount and whitelist devices
for access in the container devices cgroup.
"""
OCIHOOK_CONFIG_FILE = os.environ["CMAKE_INSTALL_PREFIX"] + "/etc/hooks.d/mount_hook.json"
DEVICE_FILENAME = "/dev/test0"
CONTAINER_IMAGE = "quay.io/ethcscs/ubuntu:20.04"
@classmethod
def setUpClass(cls):
util.pull_image_if_necessary(is_centralized_repository=False, image=cls.CONTAINER_IMAGE)
cls._create_device_file()
@classmethod
def tearDownClass(cls):
os.remove(cls.DEVICE_FILENAME)
@classmethod
def _create_device_file(cls):
import stat
device_mode = 0o666 | stat.S_IFCHR
device_id = os.makedev(511, 511)
os.mknod(cls.DEVICE_FILENAME, device_mode, device_id)
def test_whitelist_device(self):
hook_config = TestMountHook._generate_hook_config([f"--device={self.DEVICE_FILENAME}:rw"])
with util.temporary_hook_files((hook_config, self.OCIHOOK_CONFIG_FILE)):
devices_list = self._get_devices_list_from_cgroup_in_container()
assert "c 511:511 rw" in devices_list
def _get_devices_list_from_cgroup_in_container(self):
return util.run_command_in_container(is_centralized_repository=False,
image=self.CONTAINER_IMAGE,
command=["cat", "/sys/fs/cgroup/devices/devices.list"])
|
880cf60db35d739f599be62883611d92b352b457
|
c3e0a6919caf85c35239ef23084df9bbf8dd61c3
|
/pypeit/core/coadd.py
|
6a4dd41db9002eef00f4c87fc9a3d2eb8ccd7cae
|
[
"BSD-3-Clause"
] |
permissive
|
pypeit/PypeIt
|
6eb9e5afd62acc9d363e497cd9e367d620f86ea4
|
0d2e2196afc6904050b1af4d572f5c643bb07e38
|
refs/heads/release
| 2023-08-25T21:15:59.113114
| 2023-06-04T15:23:39
| 2023-06-04T15:23:39
| 36,958,428
| 136
| 98
|
BSD-3-Clause
| 2023-09-12T17:42:15
| 2015-06-05T22:25:37
|
Python
|
UTF-8
|
Python
| false
| false
| 160,195
|
py
|
coadd.py
|
"""
Coadding module.
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import os
import sys
from IPython import embed
import numpy as np
import scipy
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator
from astropy import stats
from astropy import convolution
from pypeit import utils
from pypeit.core import fitting
from pypeit import specobjs
from pypeit import msgs
from pypeit.core import combine
from pypeit.core.wavecal import wvutils
from pypeit.core import pydl
from pypeit import data
def renormalize_errors_qa(chi, maskchi, sigma_corr, sig_range = 6.0,
title:str='', qafile:str=None):
'''
Generate a histogram QA plot of the input chi distribution.
Args:
chi (`numpy.ndarray`_):
your chi values
maskchi (`numpy.ndarray`_):
True = good, mask for your chi array of type bool
sigma_corr (float):
corrected sigma
sig_range (float):
used to set binsize, default +- 6-sigma
title (str, optional):
plot title
qafile (str, optional):
Write figure to this output QA file, if provided
'''
# Prep
n_bins = 50
binsize = 2.0*sig_range/n_bins
bins_histo = -sig_range + np.arange(n_bins)*binsize+binsize/2.0
xvals = np.arange(-10.0,10,0.02)
gauss = scipy.stats.norm(loc=0.0,scale=1.0)
gauss_corr = scipy.stats.norm(loc=0.0,scale=sigma_corr)
# Plot
plt.figure(figsize=(12, 8))
plt.hist(chi[maskchi],bins=bins_histo,density=True,histtype='step', align='mid',color='k',linewidth=3,label='Chi distribution')
plt.plot(xvals,gauss.pdf(xvals),'c-',lw=3,label='sigma=1')
plt.plot(xvals,gauss_corr.pdf(xvals),'m--',lw=2,label='new sigma={:4.2f}'.format(round(sigma_corr,2)))
plt.ylabel('Residual distribution')
plt.xlabel('chi')
plt.xlim([-6.05,6.05])
plt.legend(fontsize=13,loc=2)
plt.title(title, fontsize=16, color='red')
if qafile is not None:
if len(qafile.split('.'))==1:
msgs.info("No fomat given for the qafile, save to PDF format.")
qafile = qafile+'.pdf'
plt.savefig(qafile,dpi=300)
msgs.info("Wrote QA: {:s}".format(qafile))
plt.show()
plt.close()
def renormalize_errors(chi, mask, clip=6.0, max_corr=5.0, title = '', debug=False):
"""
Function for renormalizing errors. The distribution of input chi (defined by chi = (data - model)/sigma) values is
analyzed, and a correction factor to the standard deviation sigma_corr is returned. This should be multiplied into
the errors. In this way, a rejection threshold of i.e. 3-sigma, will always correspond to roughly the same percentile.
This renormalization guarantees that rejection is not too agressive in cases where the empirical errors determined
from the chi-distribution differ significantly from the noise model which was used to determine chi.
Args:
chi (`numpy.ndarray`_):
input chi values
mask (`numpy.ndarray`_):
True = good, mask for your chi array of type bool
mask (`numpy.ndarray`_):
True = good, mask for your chi array of type bool
clip (float, optional):
threshold for outliers which will be clipped for the purpose of computing the renormalization factor
max_corr (float, optional):
maximum corrected sigma allowed.
title (str, optional):
title for QA plot, passed to renormalize_errors_qa
debug (bool, optional):
If True, show the QA plot created by renormalize_errors_qa
Returns:
tuple: (1) sigma_corr (float), corrected new sigma; (2) maskchi
(`numpy.ndarray`_, bool): new mask (True=good) which indicates the values
used to compute the correction (i.e it includes clipping)
"""
chi2 = chi**2
maskchi = (chi2 < clip**2) & mask
if (np.sum(maskchi) > 0):
gauss_prob = 1.0 - 2.0 * scipy.stats.norm.cdf(-1.0)
chi2_sigrej = np.percentile(chi2[maskchi], 100.0*gauss_prob)
sigma_corr = np.sqrt(chi2_sigrej)
if sigma_corr < 1.0:
msgs.warn("Error renormalization found correction factor sigma_corr = {:f}".format(sigma_corr) +
" < 1." + msgs.newline() +
" Errors are overestimated so not applying correction")
sigma_corr = 1.0
if sigma_corr > max_corr:
msgs.warn(("Error renormalization found sigma_corr/sigma = {:f} > {:f}." + msgs.newline() +
"Errors are severely underestimated." + msgs.newline() +
"Setting correction to sigma_corr = {:4.2f}").format(sigma_corr, max_corr, max_corr))
sigma_corr = max_corr
if debug:
renormalize_errors_qa(chi, maskchi, sigma_corr, title=title)
else:
msgs.warn('No good pixels in error_renormalize. There are probably issues with your data')
sigma_corr = 1.0
return sigma_corr, maskchi
def poly_model_eval(theta, func, model, wave, wave_min, wave_max):
"""
Routine to evaluate the polynomial fit
Args:
theta (`numpy.ndarray`_):
coefficient parameter vector of type=float
func (str):
polynomial type
model (str):
model type, valid model types are 'poly', 'square', or 'exp', corresponding to normal polynomial,
squared polynomial, or exponentiated polynomial
wave (`numpy.ndarray`_):
array of wavelength values of type=float
wave_min (float):
minimum wavelength for polynomial fit range
wave_max (float):
maximum wavelength for polynomial fit range
Returns:
`numpy.ndarray`_: Array of evaluated polynomial with same shape as wave
"""
# Evaluate the polynomial for rescaling
if 'poly' in model:
ymult = fitting.evaluate_fit(theta, func, wave, minx=wave_min, maxx=wave_max)
elif 'square' in model:
ymult = (fitting.evaluate_fit(theta, func, wave, minx=wave_min, maxx=wave_max)) ** 2
elif 'exp' in model:
# Clipping to avoid overflow.
ymult = np.exp(np.clip(fitting.evaluate_fit(theta, func, wave, minx=wave_min, maxx=wave_max)
, None, 0.8 * np.log(sys.float_info.max)))
else:
msgs.error('Unrecognized value of model requested')
return ymult
def poly_ratio_fitfunc_chi2(theta, gpm, arg_dict):
"""
Function for computing the chi^2 loss function for solving for the polynomial rescaling of one spectrum to another.
There are two non-standard things implemented here which increase ther robustness. The first is a non-standard error used for the
chi, which adds robustness and increases the stability of the optimization. This was taken from the idlutils
solve_poly_ratio code. The second thing is that the chi is remapped using the scipy huber loss function to
reduce sensitivity to outliers, ased on the scipy cookbook on robust optimization.
Args:
theta (`numpy.ndarray`_): parameter vector for the polymomial fit
gpm (`numpy.ndarray`_): boolean mask for the current iteration of the optimization, True=good
arg_dict (dict): dictionary containing arguments
Returns:
float: this is effectively the chi^2, i.e. the quantity to be
minimized by the optimizer. Note that this is not formally the
chi^2 since the huber loss function re-maps the chi to be less
sensitive to outliers.
"""
# Unpack the data to be rescaled, the mask for the reference spectrum, and the wavelengths
mask = arg_dict['mask']
flux_med = arg_dict['flux_med']
ivar_med = arg_dict['ivar_med']
flux_ref_med = arg_dict['flux_ref_med']
ivar_ref_med = arg_dict['ivar_ref_med']
wave = arg_dict['wave']
wave_min = arg_dict['wave_min']
wave_max = arg_dict['wave_max']
func = arg_dict['func']
model = arg_dict['model']
ymult = poly_model_eval(theta, func, model, wave, wave_min, wave_max)
flux_scale = ymult*flux_med
mask_both = mask & gpm
# This is the formally correct ivar used for the rejection, but not used in the fitting. This appears to yield
# unstable results
#totvar = utils.inverse(ivar_ref, positive=True) + ymult**2*utils.inverse(ivar, positive=True)
#ivartot = mask_both*utils.inverse(totvar, positive=True)
# The errors are rescaled at every function evaluation, but we only allow the errors to get smaller by up to a
# factor of 1e4, and we only allow them to get larger slowly (as the square root). This should very strongly
# constrain the flux-corrrection vectors from going too small (or negative), or too large.
## Schlegel's version here
vmult = np.fmax(ymult,1e-4)*(ymult <= 1.0) + np.sqrt(ymult)*(ymult > 1.0)
ivarfit = mask_both/(1.0/(ivar_med + np.invert(mask_both)) + np.square(vmult)/(ivar_ref_med + np.invert(mask_both)))
chi_vec = mask_both * (flux_ref_med - flux_scale) * np.sqrt(ivarfit)
# Changing the Huber loss parameter from step to step results in instability during optimization --MSR.
# Robustly characterize the dispersion of this distribution
#chi_mean, chi_median, chi_std = stats.sigma_clipped_stats(
# chi_vec, np.invert(mask_both), cenfunc='median', stdfunc=utils.nan_mad_std, maxiters=5, sigma=2.0)
chi_std = np.std(chi_vec)
# The Huber loss function smoothly interpolates between being chi^2/2 for standard chi^2 rejection and
# a linear function of residual in the outlying tails for large residuals. This transition occurs at the
# value of the first argument, which we have set to be 2.0*chi_std, which is 2-sigma given the modified
# errors described above from Schlegel's code.
robust_scale = 2.0
huber_vec = scipy.special.huber(robust_scale*chi_std, chi_vec)
loss_function = np.sum(huber_vec*mask_both)
#chi2 = np.sum(np.square(chi_vec))
return loss_function
def poly_ratio_fitfunc(flux_ref, gpm, arg_dict, init_from_last=None, **kwargs_opt):
"""
Function to be optimized by robust_optimize for solve_poly_ratio
polynomial rescaling of one spectrum to match a reference
spectrum. This function has the correct format for running
robust_optimize optimization. In addition to running the
optimization, this function recomputes the error vector ivartot
for the error rejection that takes place at each iteration of the
robust_optimize optimization. The ivartot is also renormalized
using the renormalize_errors function enabling rejection. A scale
factor is multiplied into the true errors to allow one to reject
based on the statistics of the actual error distribution.
Args:
flux_ref (`numpy.ndarray`_):
Reference flux that we are trying to rescale our spectrum
to match
gpm (`numpy.ndarray`_):
Boolean array with mask for the current iteration of the
optimization. True=good
arg_dict (:obj:`dict`):
dictionary containing arguments for the optimizing
function. See poly_ratio_fitfunc_chi2 for how arguments
are used. They are mask, flux_med, flux_ref_med,
ivar_ref_med, wave, wave_min, wave_max, func
init_from_last (obj, optional):
Use this scipy optimization object from a previous iteration as the guess
kwargs_opt (:obj:`dict`):
arguments to be passed to the optimizer, which in this
case is just vanilla scipy.minimize with the default
optimizer
Returns:
tuple:
Three objects are returned. (1) scipy optimization object,
(2) scale factor to be applied to the data to match the
reference spectrum flux_ref, (3) error vector to be used for
the rejection that takes place at each iteration of the
robust_optimize optimization
"""
# flux_ref, ivar_ref act like the 'data', the rescaled flux will be the 'model'
guess = arg_dict['guess'] if init_from_last is None else init_from_last.x
result = scipy.optimize.minimize(poly_ratio_fitfunc_chi2, guess, args=(gpm, arg_dict), **kwargs_opt)
flux = arg_dict['flux']
ivar = arg_dict['ivar']
mask = arg_dict['mask']
ivar_ref = arg_dict['ivar_ref']
wave = arg_dict['wave']
wave_min = arg_dict['wave_min']
wave_max = arg_dict['wave_max']
func = arg_dict['func']
model = arg_dict['model']
# Evaluate the polynomial for rescaling
ymult = poly_model_eval(result.x, func, model, wave, wave_min, wave_max)
flux_scale = ymult*flux
mask_both = mask & gpm
totvar = utils.inverse(ivar_ref) + ymult**2*utils.inverse(ivar)
ivartot1 = mask_both*utils.inverse(totvar)
# Now rescale the errors
chi = (flux_scale - flux_ref)*np.sqrt(ivartot1)
try:
debug = arg_dict['debug']
except KeyError:
debug = False
sigma_corr, maskchi = renormalize_errors(chi, mask=gpm, title = 'poly_ratio_fitfunc', debug=debug)
ivartot = ivartot1/sigma_corr**2
return result, flux_scale, ivartot
def median_filt_spec(flux, ivar, gpm, med_width):
'''
Utility routine to median filter a spectrum using the mask and propagating the errors using the
utils.fast_running_median function.
Parameters
----------
flux : `numpy.ndarray`_
flux array with shape (nspec,)
ivar : `numpy.ndarray`_
inverse variance with shape (nspec,)
gpm : `numpy.ndarray`_
Boolean mask on the spectrum with shape (nspec,). True = good
med_width : float
width for median filter in pixels
Returns
-------
flux_med : `numpy.ndarray`_
Median filtered flux
ivar_med : `numpy.ndarray`_
corresponding propagated variance
'''
flux_med = np.zeros_like(flux)
ivar_med = np.zeros_like(ivar)
flux_med0 = utils.fast_running_median(flux[gpm], med_width)
flux_med[gpm] = flux_med0
var = utils.inverse(ivar)
var_med0 = utils.smooth(var[gpm], med_width)
ivar_med[gpm] = utils.inverse(var_med0)
return flux_med, ivar_med
def solve_poly_ratio(wave, flux, ivar, flux_ref, ivar_ref, norder, mask = None, mask_ref = None,
scale_min = 0.05, scale_max = 100.0, func='legendre', model ='square',
maxiter=3, sticky=True, lower=3.0, upper=3.0, median_frac=0.01,
ref_percentile=70.0, debug=False):
"""
Routine for solving for the polynomial rescaling of an input
spectrum flux to match a reference spectrum flux_ref. The two
spectra need to be defined on the same wavelength grid. The code
will work best if you choose the reference to be the higher S/N
ratio spectrum. Note that the code multiplies in the square of a
polnomial of order norder to ensure positivity of the scale
factor. It also operates on median filtered spectra to be more
robust against outliers
Parameters
----------
wave : `numpy.ndarray`_
wavelength array of shape (nspec,). flux, ivar, flux_ref, and ivar_ref
must all be on the same wavelength grid
flux : `numpy.ndarray`_
flux that you want to rescale to match flux_ref
ivar : `numpy.ndarray`_
inverse varaiance of the array that you want to rescale to match flux_ref
flux_ref : `numpy.ndarray`_
reference flux that you want to rescale flux to match.
ivar_ref : `numpy.ndarray`_
inverse variance for reference flux
norder : int
Order of polynomial rescaling; norder=1 is a linear fit and norder must
be >= 1 otherwise the code will fault.
mask : `numpy.ndarray`_, optional
boolean mask for spectrum that you want to rescale, True=Good
mask_ref : `numpy.ndarray`_, optional
boolean mask for reference flux
scale_min : float, optional
minimum scaling factor allowed. default =0.05
scale_max : float, optional
maximum scaling factor allowed. default=100.0
func : str, optional
function you want to use. default='legendre'
model : str, optional
model type, valid model types are 'poly', 'square', or 'exp',
corresponding to normal polynomial, squared polynomial, or exponentiated
polynomial. default = 'square'
maxiter : int, optional
maximum number of iterations for robust_optimize. default=3
sticky : bool, optional
whether you want the rejection to be sticky or not with robust_optimize.
See docs for djs_reject for definition of sticky. default=True
lower : float, optional
lower sigrej rejection threshold for robust_optimize. default=3.0
upper : float, optional
upper sigrej rejection threshold for robust_optimize. default=3.0
median_frac : float, optional
the code rescales median filtered spectra with 'reflect' boundary
conditions. The with of the median filter will be median_frac*nspec,
where nspec is the number of spectral pixels. default = 0.01,
debug : bool, optional
If True, show interactive QA plot. default=False
Returns
-------
ymult : `numpy.ndarray`_, (nspec,)
rescaling factor to be multiplied into flux to match flux_ref.
fit_tuple : :obj:`tuple`
Tuple with the polynomial coefficients, the minimum wavelength
coordinate and maximum wavelength coordinate used in the fit.
flux_rescale : `numpy.ndarray`_, (nspec,)
rescaled flux, i.e. ymult multiplied into flux.
ivar_rescale : `numpy.ndarray`_, (nspec,)
rescaled inverse variance
outmask : `numpy.ndarray`_, bool, (nspec,)
output mask determined from the robust_optimize optimization/rejection
iterations. True=Good
"""
if norder < 1:
msgs.error('You cannot solve for the polynomial ratio for norder < 1. For rescaling by a constant use robust_median_ratio')
if mask is None:
mask = (ivar > 0.0)
if mask_ref is None:
mask_ref = (ivar_ref > 0.0)
#
nspec = wave.size
# Determine an initial guess
ratio = robust_median_ratio(flux, ivar, flux_ref, ivar_ref, mask=mask, mask_ref=mask_ref,
ref_percentile=ref_percentile, max_factor=scale_max)
# guess = np.append(ratio, np.zeros(norder))
wave_min = wave.min()
wave_max = wave.max()
# Now compute median filtered versions of the spectra which we will actually operate on for the fitting. Note
# that rejection will however work on the non-filtered spectra.
med_width = (2.0*np.ceil(median_frac/2.0*nspec) + 1).astype(int)
flux_med, ivar_med = median_filt_spec(flux, ivar, mask, med_width)
flux_ref_med, ivar_ref_med = median_filt_spec(flux_ref, ivar_ref, mask_ref, med_width)
if 'poly' in model:
guess = np.append(ratio, np.zeros(norder))
elif 'square' in model:
guess = np.append(np.sqrt(ratio), np.zeros(norder))
elif 'exp' in model:
guess = np.append(np.log(ratio), np.zeros(norder))
else:
msgs.error('Unrecognized model type')
## JFH I'm not convinced any of this below is right or necessary. Going back to previous logic but
## leaving this here for now
# Use robust_fit to get a best-guess linear fit as the starting point. The logic below deals with whether
# we re fitting a polynomial model to the data model='poly', to the square model='square', or taking the exponential
# of a polynomial fit model='exp'
#if 'poly' in model:
# #guess = np.append(ratio, np.zeros(norder))
# yval = flux_ref_med
# yval_ivar = ivar_ref_med
# scale_mask = np.ones_like(flux_ref_med, dtype=bool) & (wave > 1.0)
#elif 'square' in model:
# #guess = np.append(np.sqrt(ratio), np.zeros(norder))
# yval = np.sqrt(flux_ref_med + (flux_ref_med < 0))
# yval_ivar = 4.0*flux_ref_med*ivar_ref_med
# scale_mask = (flux_ref_med >= 0) & (wave > 1.0)
#elif 'exp' in model:
# #guess = np.append(np.log(ratio), np.zeros(norder))
# yval = np.log(flux_ref_med + (flux_ref_med <= 0))
# yval_ivar = flux_ref_med**2*ivar_ref_med
# scale_mask = (flux_ref_med > 0) & (wave > 1.0)
#else:
# msgs.error('Unrecognized model type')
#pypfit = fitting.robust_fit(wave, yval, 1, function=func, in_gpm=scale_mask, invvar=yval_ivar,
# sticky=False, use_mad=False, debug=debug, upper=3.0, lower=3.0)
#guess = np.append(pypfit.fitc, np.zeros(norder - 2)) if norder > 1 else pypfit.fitc
arg_dict = dict(flux = flux, ivar = ivar, mask = mask,
flux_med = flux_med, ivar_med = ivar_med,
flux_ref_med = flux_ref_med, ivar_ref_med = ivar_ref_med,
ivar_ref = ivar_ref, wave = wave, wave_min = wave_min,
wave_max = wave_max, func = func, model=model, norder = norder, guess = guess, debug=debug)
result, ymodel, ivartot, outmask = fitting.robust_optimize(flux_ref, poly_ratio_fitfunc, arg_dict, inmask=mask_ref,
maxiter=maxiter, lower=lower, upper=upper, sticky=sticky)
ymult1 = poly_model_eval(result.x, func, model, wave, wave_min, wave_max)
ymult = np.fmin(np.fmax(ymult1, scale_min), scale_max)
flux_rescale = ymult*flux
ivar_rescale = ivar/ymult**2
if debug:
# Determine the y-range for the QA plots
scale_spec_qa(wave, flux_med, ivar_med, wave, flux_ref_med, ivar_ref_med, ymult, 'poly', mask = mask, mask_ref=mask_ref,
title='Median Filtered Spectra that were poly_ratio Fit')
return ymult, (result.x, wave_min, wave_max), flux_rescale, ivar_rescale, outmask
def interp_oned(wave_new, wave_old, flux_old, ivar_old, gpm_old, sensfunc=False):
"""
Interpolate a 1D spectrum onto a new wavelength grid.
Interpolation is done using `scipy.interpolate.interp1d` with ``cubic``
interpolation. Any wavelengths in ``wave_new`` that are beyond the range
of ``wave_old`` are set to ``np.nan`` and masked via the output
good-pixel mask.
.. warning::
Any wavelength in ``wave_old`` that is less than 1 is assumed to
indicate that the wavelength is invalid!
Args:
wave_new (`numpy.ndarray`_):
New wavelength grid for the output spectra. Must be 1D.
wave_old (`numpy.ndarray`_):
Old wavelength grid. Must be 1D, need not have the same size as
``wave_new``.
flux_old (`numpy.ndarray`_):
Old flux on the wave_old grid. Shape must match ``wave_old``.
ivar_old (`numpy.ndarray`_):
Old ivar on the wave_old grid. Shape must match ``wave_old``.
gpm_old (`numpy.ndarray`_):
Old good-pixel mask (True=Good) on the wave_old grid. Shape must
match ``wave_old``.
sensfunc (:obj:`bool`, optional):
If True, the quantities ``flux*delta_wave`` and the corresponding
``ivar/delta_wave**2`` will be interpolated and returned instead of
``flux`` and ``ivar``. This is useful for sensitivity function
computation where we need flux*(wavelength bin width). Beacause
delta_wave is a difference of the wavelength grid, interpolating
in the presence of masked data requires special care.
Returns:
:obj:`tuple`: Returns three `numpy.ndarray`_ objects with the
interpolated flux, inverse variance, and good-pixel mask arrays with
the length matching the new wavelength grid.
"""
# Check input
if wave_new.ndim != 1 or wave_old.ndim != 1:
msgs.error('All input vectors must be 1D.')
if flux_old.shape != wave_old.shape or ivar_old.shape != wave_old.shape \
or gpm_old.shape != wave_old.shape:
msgs.error('All vectors to interpolate must have the same size.')
# Do not interpolate if the wavelength is exactly same with wave_new
if np.array_equal(wave_new, wave_old):
return flux_old, ivar_old, gpm_old
wave_gpm = wave_old > 1.0 # Deal with the zero wavelengths
if sensfunc:
delta_wave_interp = wvutils.get_delta_wave(wave_old, wave_gpm)
flux_interp = flux_old[wave_gpm]/delta_wave_interp[wave_gpm]
ivar_interp = ivar_old[wave_gpm]*delta_wave_interp[wave_gpm]**2
else:
flux_interp = flux_old[wave_gpm]
ivar_interp = ivar_old[wave_gpm]
flux_new = scipy.interpolate.interp1d(wave_old[wave_gpm], flux_interp, kind='cubic',
bounds_error=False, fill_value=np.nan)(wave_new)
ivar_new = scipy.interpolate.interp1d(wave_old[wave_gpm], ivar_interp, kind='cubic',
bounds_error=False, fill_value=np.nan)(wave_new)
# Interpolate a floating-point version of the mask
gpm_new_tmp = scipy.interpolate.interp1d(wave_old[wave_gpm], gpm_old.astype(float)[wave_gpm],
kind='cubic', bounds_error=False,
fill_value=np.nan)(wave_new)
# Don't allow the ivar to be ever be less than zero
ivar_new = (ivar_new > 0.0)*ivar_new
gpm_new = (gpm_new_tmp > 0.8) & (ivar_new > 0.0) & np.isfinite(flux_new) & np.isfinite(ivar_new)
return flux_new, ivar_new, gpm_new
# TODO: ``sensfunc`` should be something like "conserve_flux". It would be
# useful to compare these resampling routines against
# `pypeit.sampling.Resample`.
def interp_spec(wave_new, waves, fluxes, ivars, gpms, sensfunc=False):
"""
Interpolate a set of spectra onto a new wavelength grid.
The method can perform two types of interpolation, depending on the
shapes of the input arrays.
1. If the new wavelength grid (``wave_new``) is 1D, all input spectra
are interpolated to this new grid. The input spectra can be
provided as either 1D or 2D arrays.
2. If the new wavelength grid (``wave_new``) is 2D, all input spectra
*must* be 1D. The single spectrum is then interpolated onto each of
the new wavelength grids.
Parameters
----------
wave_new : `numpy.ndarray`_, shape (nspec,) or (nspec, nimgs),
New wavelength grid for output spectra. Shape can be 1D or 2D. See the
method description for how this affects the code flow above.
waves : `numpy.ndarray`_, shape (nspec,) or (nspec, nexp)
Wavelength vector for current spectra. Shape can be 1D or 2D, where
nexp, need not equal nimgs. See the method description for how this
affects the code flow above.
fluxes : `numpy.ndarray`_
Flux vectors. Shape must match ``waves``.
ivars : `numpy.ndarray`_
Inverse variance vectors. Shape must match ``waves``.
gpms : `numpy.ndarray`_
Boolean good-pixel masks for each spectrum (True=Good). Shape must match
``waves``.
sensfunc : :obj:`bool`, optional
If True, the quantities ``flux*delta_wave`` and the corresponding
``ivar/delta_wave**2`` will be interpolated and returned instead of
``flux`` and ``ivar``. This is useful for sensitivity function
computation where we need flux*(wavelength bin width). Beacause
delta_wave is a difference of the wavelength grid, interpolating in the
presence of masked data requires special care.
Returns
-------
fluxes_inter : `numpy.ndarray`_
interpolated flux with size and shape matching the new wavelength grid.
ivars_inter : `numpy.ndarray`_
interpolated inverse variance with size and shape matching the new
wavelength grid.
gpms_inter : `numpy.ndarray`_
interpolated good-pixel mask with size and shape matching the new
wavelength grid.
"""
# Check input
if wave_new.ndim > 2:
msgs.error('Invalid shape for wave_new; must be 1D or 2D')
if wave_new.ndim == 2 and fluxes.ndim != 1:
msgs.error('If new wavelength grid is 2D, all other input arrays must be 1D.')
if fluxes.shape != waves.shape or ivars.shape != waves.shape or gpms.shape != waves.shape:
msgs.error('Input spectral arrays must all have the same shape.')
# First case: interpolate either an (nspec, nexp) array of spectra onto a
# single wavelength grid
if wave_new.ndim == 1:
if fluxes.ndim == 1:
return interp_oned(wave_new, waves, fluxes, ivars, gpms, sensfunc=sensfunc)
nexp = fluxes.shape[1]
# Interpolate spectra to have the same wave grid with the iexp spectrum.
# And scale spectra to the same flux level with the iexp spectrum.
fluxes_inter = np.zeros((wave_new.size, nexp), dtype=float)
ivars_inter = np.zeros((wave_new.size, nexp), dtype=float)
gpms_inter = np.zeros((wave_new.size, nexp), dtype=bool)
for ii in range(nexp):
fluxes_inter[:,ii], ivars_inter[:,ii], gpms_inter[:,ii] \
= interp_oned(wave_new, waves[:,ii], fluxes[:,ii], ivars[:,ii], gpms[:,ii],
sensfunc=sensfunc)
return fluxes_inter, ivars_inter, gpms_inter
# Second case: interpolate a single spectrum onto an (nspec, nexp) array of
# wavelengths. To make it here, wave_new.ndim must be 2.
fluxes_inter = np.zeros_like(wave_new, dtype=float)
ivars_inter = np.zeros_like(wave_new, dtype=float)
gpms_inter = np.zeros_like(wave_new, dtype=bool)
for ii in range(wave_new.shape[1]):
fluxes_inter[:,ii], ivars_inter[:,ii], gpms_inter[:,ii] \
= interp_oned(wave_new[:,ii], waves, fluxes, ivars, gpms, sensfunc=sensfunc)
return fluxes_inter, ivars_inter, gpms_inter
def smooth_weights(inarr, gdmsk, sn_smooth_npix):
"""Smooth the input weights with a Gaussian 1D kernel.
Args:
inarr (`numpy.ndarray`_):
S/N spectrum to be smoothed. shape = (nspec,)
gdmsk (`numpy.ndarray`_):
Boolean mask of good pixels. shape = (nspec,)
sn_smooth_npix (float):
Number of pixels used for determining smoothly varying S/N ratio weights.
The sigma of the kernel is set by
sig_res = max(sn_smooth_npix / 10.0, 3.0)
Returns:
`numpy.ndarray`_: smoothed version of inarr.
"""
spec_vec = np.arange(gdmsk.size)
sn_med1 = np.zeros(inarr.size)
sn_med1[gdmsk] = utils.fast_running_median(inarr[gdmsk], sn_smooth_npix)
sn_med2 = scipy.interpolate.interp1d(spec_vec[gdmsk], sn_med1[gdmsk], kind='cubic',
bounds_error=False, fill_value=-999)(spec_vec)
# Fill the S/N weight to the left and right with the nearest value
mask_good = np.where(sn_med2 != -999)[0]
idx_mn, idx_mx = np.min(mask_good), np.max(mask_good)
sn_med2[:idx_mn] = sn_med2[idx_mn]
sn_med2[idx_mx:] = sn_med2[idx_mx]
# Smooth with a Gaussian kernel
sig_res = np.fmax(sn_smooth_npix / 10.0, 3.0)
gauss_kernel = convolution.Gaussian1DKernel(sig_res)
sn_conv = convolution.convolve(sn_med2, gauss_kernel, boundary='extend')
return sn_conv
def sn_weights(waves, fluxes, ivars, masks, sn_smooth_npix, const_weights=False,
ivar_weights=False, relative_weights=False, verbose=False):
"""
Calculate the S/N of each input spectrum and create an array of
(S/N)^2 weights to be used for coadding.
Parameters
----------
waves : `numpy.ndarray`_
Reference wavelength grid for all the spectra. If wave is a
1d array the routine will assume that all spectra are on the
same wavelength grid. If wave is a 2-d array, it will use
the individual. shape = (nspec,) or (nspec, nexp)
fluxes : `numpy.ndarray`_
Stack of (nspec, nexp) spectra where nexp = number of
exposures, and nspec is the length of the spectrum.
ivars : `numpy.ndarray`_
Inverse variance noise vectors for the spectra; shape = (nspec, nexp)
masks : `numpy.ndarray`_
Mask for stack of spectra. True=Good, False=Bad; shape = (nspec, nexp)
sn_smooth_npix : float
Number of pixels used for determining smoothly varying S/N ratio weights.
const_weights : bool, optional
Use a constant weights for each spectrum?
ivar_weights : bool, optional
Use inverse variance weighted scheme?
relative_weights : bool, optional
Calculate weights by fitting to the ratio of spectra? Note, relative weighting will
only work well when there is at least one spectrum with a reasonable S/N, and a continuum.
RJC note - This argument may only be better when the object being used has a strong
continuum + emission lines. The reference spectrum is assigned a value of 1 for all
wavelengths, and the weights of all other spectra will be determined relative to the
reference spectrum. This is particularly useful if you are dealing with highly variable
spectra (e.g. emission lines) and require a precision better than ~1 per cent.
verbose : bool, optional
Verbosity of print out.
Returns
-------
rms_sn : `numpy.ndarray`_
Root mean square S/N value for each input spectra; shape (nexp,)
weights : `numpy.ndarray`_
Weights to be applied to the spectra. These are
signal-to-noise squared weights. shape = (nspec, nexp)
"""
# Give preference to ivar_weights
if ivar_weights and relative_weights:
msgs.warn("Performing inverse variance weights instead of relative weighting")
relative_weights = False
# Check input
if fluxes.ndim == 1:
nstack = 1
nspec = fluxes.shape[0]
wave_stack = waves.reshape((nspec, nstack))
flux_stack = fluxes.reshape((nspec, nstack))
ivar_stack = ivars.reshape((nspec, nstack))
mask_stack = masks.reshape((nspec, nstack))
elif fluxes.ndim == 2:
nspec, nstack = fluxes.shape
wave_stack = waves
flux_stack = fluxes
ivar_stack = ivars
mask_stack = masks
elif fluxes.ndim == 3:
nspec, norder, nexp = fluxes.shape
wave_stack = np.reshape(waves, (nspec, norder * nexp), order='F')
flux_stack = np.reshape(fluxes, (nspec, norder * nexp), order='F')
ivar_stack = np.reshape(ivars, (nspec, norder * nexp), order='F')
mask_stack = np.reshape(masks, (nspec, norder * nexp), order='F')
nstack = norder*nexp
else:
msgs.error('Unrecognized dimensionality for flux')
# Calculate S/N
sn_val = flux_stack*np.sqrt(ivar_stack)
sn_val_ma = np.ma.array(sn_val, mask=np.logical_not(mask_stack))
sn_sigclip = stats.sigma_clip(sn_val_ma, sigma=3, maxiters=5)
# TODO: Update with sigma_clipped stats with our new cenfunc and std_func = mad_std
sn2 = (sn_sigclip.mean(axis=0).compressed())**2 #S/N^2 value for each spectrum
if sn2.shape[0] != nstack:
msgs.error('No unmasked value in one of the exposures. Check inputs.')
rms_sn = np.sqrt(sn2) # Root Mean S/N**2 value for all spectra
# Check if relative weights input
if relative_weights:
# Relative weights are requested, use the highest S/N spectrum as a reference
ref_spec = np.argmax(sn2)
if verbose:
msgs.info(
"The reference spectrum (ref_spec={0:d}) has a typical S/N = {1:.3f}".format(ref_spec, sn2[ref_spec]))
# Adjust the arrays to be relative
refscale = (sn_val[:, ref_spec] > 0) / (sn_val[:, ref_spec] + (sn_val[:, ref_spec] == 0))
for iexp in range(nstack):
# Compute the relative (S/N)^2 and update the mask
sn2[iexp] /= sn2[ref_spec]
mask_stack[:, iexp] = mask_stack[:, iexp] & ((mask_stack[:, ref_spec]) | (sn_val[:, ref_spec] != 0))
sn_val[:, iexp] *= refscale
# TODO: ivar weights is better than SN**2 or const_weights for merging orders. Eventually, we will change it to
# TODO: Should ivar weights be deprecated??
# Initialise weights
weights = np.zeros_like(flux_stack)
if ivar_weights:
if verbose:
msgs.info("Using ivar weights for merging orders")
for iexp in range(nstack):
weights[:, iexp] = smooth_weights(ivar_stack[:, iexp], mask_stack[:, iexp], sn_smooth_npix)
else:
for iexp in range(nstack):
# Now
if (rms_sn[iexp] < 3.0) or const_weights:
weight_method = 'constant'
weights[:, iexp] = np.full(nspec, np.fmax(sn2[iexp], 1e-2)) # set the minimum to be 1e-2 to avoid zeros
else:
weight_method = 'wavelength dependent'
# JFH THis line is experimental but it deals with cases where the spectrum drops to zero. We thus
# transition to using ivar_weights. This needs more work because the spectra are not rescaled at this point.
# RJC - also note that nothing should be changed to sn_val is relative_weights=True
#sn_val[sn_val[:, iexp] < 1.0, iexp] = ivar_stack[sn_val[:, iexp] < 1.0, iexp]
weights[:, iexp] = smooth_weights(sn_val[:, iexp]**2, mask_stack[:, iexp], sn_smooth_npix)
if verbose:
msgs.info('Using {:s} weights for coadding, S/N '.format(weight_method) +
'= {:4.2f}, weight = {:4.2f} for {:}th exposure'.format(
rms_sn[iexp], np.mean(weights[:, iexp]), iexp))
if fluxes.ndim == 3:
rms_sn = np.reshape(rms_sn, (norder, nexp), order='F')
weights = np.reshape(weights, (nspec, norder, nexp), order='F')
# Finish
return rms_sn, weights
# TODO: This was commented out and would need to be refactored if brought back
# because of changes to the SensFunc and Telluric datamodels.
## TODO Rename this function to something sensfunc related
#def get_tell_from_file(sensfile, waves, masks, iord=None):
# '''
# Get the telluric model from the sensfile.
#
# Args:
# sensfile (str): the name of your fits format sensfile
# waves (ndarray): wavelength grid for your output telluric model
# masks (ndarray, bool): mask for the wave
# iord (int or None): if None returns telluric model for all orders, otherwise return the order you want
#
# Returns:
# ndarray: telluric model on your wavelength grid
# '''
#
#
# sens_param = Table.read(sensfile, 1)
# sens_table = Table.read(sensfile, 2)
# telluric = np.zeros_like(waves)
#
# if (waves.ndim == 1) and (iord is None):
# msgs.info('Loading Telluric from Longslit sensfiles.')
# tell_interp = scipy.interpolate.interp1d(sens_table[0]['WAVE'], sens_table[0]['TELLURIC'], kind='cubic',
# bounds_error=False, fill_value=np.nan)(waves[masks])
# telluric[masks] = tell_interp
# elif (waves.ndim == 1) and (iord is not None):
# msgs.info('Loading order {:} Telluric from Echelle sensfiles.'.format(iord))
# wave_tell_iord = sens_table[iord]['WAVE']
# tell_mask = (wave_tell_iord > 1.0)
# tell_iord = sens_table[iord]['TELLURIC']
# tell_iord_interp = scipy.interpolate.interp1d(wave_tell_iord[tell_mask], tell_iord[tell_mask], kind='cubic',
# bounds_error=False, fill_value=np.nan)(waves[masks])
# telluric[masks] = tell_iord_interp
# else:
# norder = np.shape(waves)[1]
# for iord in range(norder):
# wave_iord = waves[:, iord]
# mask_iord = masks[:, iord]
#
# # Interpolate telluric to the same grid with waves
# # Since it will be only used for plotting, I just simply interpolate it rather than evaluate it based on the model
# wave_tell_iord = sens_table[iord]['WAVE']
# tell_mask = (wave_tell_iord > 1.0)
# tell_iord = sens_table[iord]['TELLURIC']
# tell_iord_interp = scipy.interpolate.interp1d(wave_tell_iord[tell_mask], tell_iord[tell_mask], kind='cubic',
# bounds_error=False, fill_value=np.nan)(wave_iord[mask_iord])
# telluric[mask_iord, iord] = tell_iord_interp
#
# return telluric
def robust_median_ratio(flux, ivar, flux_ref, ivar_ref, mask=None, mask_ref=None, ref_percentile=70.0, min_good=0.05,
maxiters=5, sigrej=3.0, max_factor=10.0, snr_do_not_rescale=1.0,
verbose=False):
"""
Robustly determine the ratio between input spectrum flux and reference spectrum flux_ref. The code will perform
best if the reference spectrum is chosen to be the higher S/N ratio spectrum, i.e. a preliminary stack that you want
to scale each exposure to match. Note that the flux and flux_ref need to be on the same wavelength grid!!
Parameters
----------
flux: `numpy.ndarray`_
spectrum that will be rescaled. shape=(nspec,)
ivar: `numpy.ndarray`_
inverse variance for the spectrum that will be rescaled.
Same shape as flux
flux_ref: `numpy.ndarray`_
reference spectrum. Same shape as flux
mask: `numpy.ndarray`_, optional
boolean mask for the spectrum that will be rescaled. True=Good.
If not input, computed from inverse variance
ivar_ref: `numpy.ndarray`_, optional
inverse variance of reference spectrum.
mask_ref: `numpy.ndarray`_, optional
Boolean mask for reference spectrum. True=Good. If not input, computed from inverse variance.
ref_percentile: float, optional, default=70.0
Percentile fraction used for selecting the minimum SNR cut from the reference spectrum. Pixels above this
percentile cut are deemed the "good" pixels and are used to compute the ratio. This must be a number
between 0 and 100.
min_good: float, optional, default = 0.05
Minimum fraction of good pixels determined as a fraction of the total pixels for estimating the median ratio
maxiters: int, optional, default = 5
Maximum number of iterations for astropy.stats.SigmaClip
sigrej: float, optional, default = 3.0
Rejection threshold for astropy.stats.SigmaClip
max_factor: float, optional, default = 10.0,
Maximum allowed value of the returned ratio
snr_do_not_rescale: float, optional default = 1.0
If the S/N ratio of the set of pixels (defined by upper ref_percentile in the reference spectrum) in the
input spectrum have a median value below snr_do_not_rescale, median rescaling will not be attempted
and the code returns ratio = 1.0. We also use this parameter to define the set of pixels (determined from
the reference spectrum) to compare for the rescaling.
Returns
-------
ratio: float
the number that must be multiplied into flux in order to get it to match up with flux_ref
"""
## Mask for reference spectrum and your spectrum
if mask is None:
mask = ivar > 0.0
if mask_ref is None:
mask_ref = ivar_ref > 0.0
nspec = flux.size
snr_ref = flux_ref * np.sqrt(ivar_ref)
snr_ref_best = np.fmax(np.percentile(snr_ref[mask_ref], ref_percentile),snr_do_not_rescale)
calc_mask = (snr_ref > snr_ref_best) & mask_ref & mask
snr_resc = flux*np.sqrt(ivar)
snr_resc_med = np.median(snr_resc[calc_mask])
if (np.sum(calc_mask) > min_good*nspec) & (snr_resc_med > snr_do_not_rescale):
# Take the best part of the higher SNR reference spectrum
sigclip = stats.SigmaClip(sigma=sigrej, maxiters=maxiters, cenfunc='median', stdfunc=utils.nan_mad_std)
flux_ref_ma = np.ma.MaskedArray(flux_ref, np.invert(calc_mask))
flux_ref_clipped, lower, upper = sigclip(flux_ref_ma, masked=True, return_bounds=True)
mask_ref_clipped = np.invert(flux_ref_clipped.mask) # mask_stack = True are good values
flux_ma = np.ma.MaskedArray(flux_ref, np.invert(calc_mask))
flux_clipped, lower, upper = sigclip(flux_ma, masked=True, return_bounds=True)
mask_clipped = np.invert(flux_clipped.mask) # mask_stack = True are good values
new_mask = mask_ref_clipped & mask_clipped
flux_ref_median = np.median(flux_ref[new_mask])
flux_dat_median = np.median(flux[new_mask])
if (flux_ref_median < 0.0) or (flux_dat_median < 0.0):
msgs.warn('Negative median flux found. Not rescaling')
ratio = 1.0
else:
if verbose:
msgs.info('Used {:} good pixels for computing median flux ratio'.format(np.sum(new_mask)))
ratio = np.fmax(np.fmin(flux_ref_median/flux_dat_median, max_factor), 1.0/max_factor)
else:
if (np.sum(calc_mask) <= min_good*nspec):
msgs.warn('Found only {:} good pixels for computing median flux ratio.'.format(np.sum(calc_mask))
+ msgs.newline() + 'No median rescaling applied')
if (snr_resc_med <= snr_do_not_rescale):
msgs.warn('Median flux ratio of pixels in reference spectrum {:} <= snr_do_not_rescale = {:}.'.format(snr_resc_med, snr_do_not_rescale)
+ msgs.newline() + 'No median rescaling applied')
ratio = 1.0
return ratio
def order_median_scale(waves, fluxes, ivars, masks, min_good=0.05, maxiters=5,
max_factor=10., sigrej=3, debug=False, show=False):
'''
Function to scaling different orders by the median S/N
Args:
waves (`numpy.ndarray`_): wavelength array of your spectra with the shape of (nspec, norder)
fluxes (`numpy.ndarray`_): flux array of your spectra with the shape of (nspec, norder)
ivars (`numpy.ndarray`_): ivar array of your spectra with the shape of (nspec, norder)
masks (`numpy.ndarray`_): mask for your spectra with the shape of (nspec, norder)
min_good (float, optional): minimum fraction of the total number of good pixels needed for estimate the median ratio
maxiters (int or float, optional): maximum iterations for rejecting outliers
max_factor (float, optional): maximum scale factor
sigrej (float, optional): sigma used for rejecting outliers
debug (bool, optional): if True show intermediate QA
show (bool, optional): if True show the final QA
Returns:
tuple: (1) fluxes_new (`numpy.ndarray`_): re-scaled fluxes with the shape
of (nspec, norder). (2) ivars_new (`numpy.ndarray`_): re-scaled ivars
with the shape of (nspec, norder) (3) order_ratios (`numpy.ndarray`_): an
array of scale factor with the length of norder
'''
norder = np.shape(waves)[1]
order_ratios = np.ones(norder)
## re-scale bluer orders to match the reddest order.
# scaling spectrum order by order. We use the reddest order as the reference since slit loss in redder is smaller
for ii in range(norder - 1):
iord = norder - ii - 1
wave_blue, flux_blue, ivar_blue, mask_blue = waves[:, iord-1], fluxes[:, iord-1],\
ivars[:, iord-1], masks[:, iord-1]
wave_red_tmp, flux_red_tmp = waves[:, iord], fluxes[:, iord]*order_ratios[iord]
ivar_red_tmp, mask_red_tmp = ivars[:, iord]*1.0/order_ratios[iord]**2, masks[:, iord]
wave_mask = wave_red_tmp>1.0
wave_red, flux_red, ivar_red, mask_red = wave_red_tmp[wave_mask], flux_red_tmp[wave_mask], \
ivar_red_tmp[wave_mask], mask_red_tmp[wave_mask],
# interpolate iord-1 (bluer) to iord-1 (redder)
flux_blue_inter, ivar_blue_inter, mask_blue_inter = interp_spec(wave_red, wave_blue, flux_blue, ivar_blue, mask_blue)
npix_overlap = np.sum(mask_blue_inter & mask_red)
percentile_iord = np.fmax(100.0 * (npix_overlap / np.sum(mask_red)-0.05), 10)
mask_both = mask_blue_inter & mask_red
snr_median_red = np.median(flux_red[mask_both]*np.sqrt(ivar_red[mask_both]))
snr_median_blue = np.median(flux_blue_inter[mask_both]*np.sqrt(ivar_blue_inter[mask_both]))
## TODO: we set the SNR to be minimum of 300 to turn off the scaling but we need the QA plot
## need to think more about whether we need to scale different orders, it seems make the spectra
## much bluer than what it should be.
if (snr_median_blue>300.0) & (snr_median_red>300.0):
order_ratio_iord = robust_median_ratio(flux_blue_inter, ivar_blue_inter, flux_red, ivar_red, mask=mask_blue_inter,
mask_ref=mask_red, ref_percentile=percentile_iord, min_good=min_good,
maxiters=maxiters, max_factor=max_factor, sigrej=sigrej)
order_ratios[iord - 1] = np.fmax(np.fmin(order_ratio_iord, max_factor), 1.0/max_factor)
msgs.info('Scaled {}th order to {}th order by {:}'.format(iord-1, iord, order_ratios[iord-1]))
else:
if ii>0:
order_ratios[iord - 1] = order_ratios[iord]
msgs.warn('Scaled {}th order to {}th order by {:} using the redder order scaling '
'factor'.format(iord-1, iord, order_ratios[iord-1]))
else:
msgs.warn('The SNR in the overlapped region is too low or there is not enough overlapped pixels.'+ msgs.newline() +
'Median scale between order {:} and order {:} was not attempted'.format(iord-1, iord))
if debug:
plt.figure(figsize=(12, 8))
plt.plot(wave_red[mask_red], flux_red[mask_red], 'k-', label='reference spectrum')
plt.plot(wave_blue[mask_blue], flux_blue[mask_blue],color='dodgerblue', lw=3, label='raw spectrum')
plt.plot(wave_blue[mask_blue], flux_blue[mask_blue]*order_ratios[iord-1], color='r',
alpha=0.5, label='re-scaled spectrum')
ymin, ymax = get_ylim(flux_blue, ivar_blue, mask_blue)
plt.ylim([ymin, ymax])
plt.xlim([np.min(wave_blue[mask_blue]), np.max(wave_red[mask_red])])
plt.legend()
plt.xlabel('wavelength')
plt.ylabel('Flux')
plt.show()
# Update flux and ivar
fluxes_new = np.zeros_like(fluxes)
ivars_new = np.zeros_like(ivars)
for ii in range(norder):
fluxes_new[:, ii] *= order_ratios[ii]
ivars_new[:, ii] *= 1.0/order_ratios[ii]**2
if show:
plt.figure(figsize=(12, 8))
ymin = []
ymax = []
for ii in range(norder):
wave_stack_iord = waves[:, ii]
flux_stack_iord = fluxes_new[:, ii]
ivar_stack_iord = ivars_new[:, ii]
mask_stack_iord = masks[:, ii]
med_width = (2.0 * np.ceil(0.1 / 10.0 * np.size(wave_stack_iord[mask_stack_iord])) + 1).astype(int)
flux_med, ivar_med = median_filt_spec(flux_stack_iord, ivar_stack_iord, mask_stack_iord, med_width)
plt.plot(wave_stack_iord[mask_stack_iord], flux_med[mask_stack_iord], alpha=0.7)
#plt.plot(wave_stack_iord[mask_stack_iord], flux_stack_iord[mask_stack_iord], alpha=0.5)
# plt.plot(wave_stack_iord[mask_stack_iord],1.0/np.sqrt(ivar_stack_iord[mask_stack_iord]))
ymin_ii, ymax_ii = get_ylim(flux_stack_iord, ivar_stack_iord, mask_stack_iord)
ymax.append(ymax_ii)
ymin.append(ymin_ii)
plt.xlim([np.min(waves[masks]), np.max(waves[masks])])
plt.ylim([-0.15*np.median(ymax), 1.5*np.median(ymax)])
plt.xlabel('Wavelength ($\\rm\\AA$)')
plt.ylabel('Flux')
plt.show()
return fluxes_new, ivars_new, order_ratios
def scale_spec(wave, flux, ivar, sn, wave_ref, flux_ref, ivar_ref, mask=None, mask_ref=None, scale_method='auto', min_good=0.05,
ref_percentile=70.0, maxiters=5, sigrej=3, max_median_factor=10.0,
npoly=None, hand_scale=None, sn_min_polyscale=2.0, sn_min_medscale=0.5, debug=False, show=False):
"""
Routine for solving for the best way to rescale an input spectrum
flux to match a reference spectrum flux_ref. The code will work
best if you choose the reference to be the highest S/N ratio
spectrum. If the scale_method is not specified, the code will
make a decision about which method to use based on the input S/N
ratio.
Parameters
----------
wave: `numpy.ndarray`_
wavelengths grid for the spectra of shape (nspec,)
flux: `numpy.ndarray`_
spectrum that will be rescaled.
ivar: `numpy.ndarray`_
inverse variance for the spectrum that will be rescaled.
sn: float
S/N of the spectrum that is being scaled used to make decisions about the scaling method.
This can be computed by sn_weights and passed in.
flux_ref: `numpy.ndarray`_, (nspec,)
reference spectrum.
ivar_ref: `numpy.ndarray`_, (nspec,)
inverse variance of reference spectrum.
mask: `numpy.ndarray`_
Boolean mask for the spectrum that will be rescaled. True=Good. If not input, computed from inverse variance
mask_ref: `numpy.ndarray`_
Boolean mask for reference spectrum. True=Good. If not input, computed from inverse variance.
min_good: float, optional, default = 0.05
minimum fraction of the total number of good pixels needed for estimate the median ratio
maxiters: int, optional
maximum number of iterations for rejecting outliers used
by the robust_median_ratio routine if median rescaling is
the method used.
max_median_factor: float, optional, default=10.0
maximum scale factor for median rescaling for robust_median_ratio if median rescaling is the method used.
sigrej: float, optional, default=3.0
rejection threshold used for rejecting outliers by robsut_median_ratio
ref_percentile: float, optional, default=70.0
percentile fraction cut used for selecting minimum SNR cut for robust_median_ratio
npoly: int, optional, default=None
order for the poly ratio scaling if polynomial rescaling
is the method used. Default is to automatically compute
this based on S/N ratio of data.
scale_method: str, optional
scale method, str, default='auto'. Options are auto,
poly, median, none, or hand. Hand is not well tested.
User can optionally specify the rescaling method. Default
is to let the code determine this automitically which
works well.
hand_scale: `numpy.ndarray`_, optional
array of hand scale factors, not well tested. shape=(nexp,)
sn_min_polyscale: float, optional, default=2.0
maximum SNR for perforing median scaling
sn_min_medscale: float, optional, default=0.5
minimum SNR for perforing median scaling
debug: bool, optional, default=False
show interactive QA plot
Returns
-------
Multiple items: tuple
(1) flux_scale: ndarray (nspec,) scaled spectrum; (2)
ivar_scale: ndarray (nspec,) inverse variance for scaled
spectrum; (3) scale: `numpy.ndarray`_ (nspec,) scale factor applied to
the spectrum and inverse variance; (4) scale_method: str, method
that was used to scale the spectra.
"""
if mask is None:
mask = ivar > 0.0
if mask_ref is None:
mask_ref = ivar_ref > 0.0
# Interpolate the reference spectrum onto the wavelengths of the spectrum that will be rescaled
flux_ref_int, ivar_ref_int, mask_ref_int = interp_spec(wave, wave_ref, flux_ref, ivar_ref, mask_ref)
# estimates the SNR of each spectrum and the stacked mean SNR
#rms_sn, weights = sn_weights(wave, flux, ivar, mask, sn_smooth_npix)
#sn = np.sqrt(np.mean(rms_sn**2))
if scale_method == 'auto':
if sn > sn_min_polyscale:
method_used = 'poly'
elif ((sn <= sn_min_polyscale) and (sn > sn_min_medscale)):
method_used = 'median'
else:
method_used = 'none'
else:
method_used = scale_method
# Estimate the scale factor
if method_used == 'poly':
# Decide on the order of the polynomial rescaling
if npoly is None:
if sn > 25.0:
npoly = 5 # quintic, Is this stable?
elif sn > 8.0:
npoly = 3 # cubic
elif sn >= 5.0:
npoly = 2 # quadratic
else:
npoly = 1 # linear
scale, fit_tuple, flux_scale, ivar_scale, outmask = solve_poly_ratio(
wave, flux, ivar, flux_ref_int, ivar_ref_int, npoly,mask=mask, mask_ref=mask_ref_int,
ref_percentile=ref_percentile, debug=debug)
elif method_used == 'median':
# Median ratio (reference to spectrum)
med_scale = robust_median_ratio(flux, ivar, flux_ref_int, ivar_ref_int,ref_percentile=ref_percentile,min_good=min_good,
mask=mask, mask_ref=mask_ref_int, maxiters=maxiters,
max_factor=max_median_factor,sigrej=sigrej)
# Apply
flux_scale = flux * med_scale
ivar_scale = ivar * 1.0/med_scale**2
scale = np.full_like(flux,med_scale)
elif method_used == 'hand':
# Input?
if hand_scale is None:
msgs.error("Need to provide hand_scale parameter, single value")
flux_scale = flux * hand_scale
ivar_scale = ivar * 1.0 / hand_scale ** 2
scale = np.full(flux.size, hand_scale)
elif method_used == 'none':
flux_scale = flux.copy()
ivar_scale = ivar.copy()
scale = np.ones_like(flux)
else:
msgs.error("Scale method not recognized! Check documentation for available options")
# Finish
if show:
scale_spec_qa(wave, flux, ivar, wave_ref, flux_ref, ivar_ref, scale, method_used, mask = mask, mask_ref=mask_ref,
title='Scaling Applied to the Data')
return flux_scale, ivar_scale, scale, method_used
def compute_stack(wave_grid, waves, fluxes, ivars, masks, weights, min_weight=1e-8):
'''
Compute a stacked spectrum from a set of exposures on the specified wave_grid with proper treatment of
weights and masking. This code uses np.histogram to combine the data using NGP and does not perform any
interpolations and thus does not correlate errors. It uses wave_grid to determine the set of wavelength bins that
the data are averaged on. The final spectrum will be on an ouptut wavelength grid which is not the same as wave_grid.
The ouput wavelength grid is the weighted average of the individual wavelengths used for each exposure that fell into
a given wavelength bin in the input wave_grid. This 1d coadding routine thus maintains the independence of the
errors for each pixel in the combined spectrum and computes the weighted averaged wavelengths of each pixel
in an analogous way to the 2d extraction procedure which also never interpolates to avoid correlating erorrs.
Parameters
----------
wave_grid: `numpy.ndarray`_
new wavelength grid desired. This will typically be a reguarly spaced grid created by the get_wave_grid routine.
The reason for the ngrid+1 is that this is the general way to specify a set of bins if you desire ngrid
bin centers, i.e. the output stacked spectra have ngrid elements. The spacing of this grid can be regular in
lambda (better for multislit) or log lambda (better for echelle). This new wavelength grid should be designed
with the sampling of the data in mind. For example, the code will work fine if you choose the sampling to be
too fine, but then the number of exposures contributing to any given wavelength bin will be one or zero in the
limiting case of very small wavelength bins. For larger wavelength bins, the number of exposures contributing
to a given bin will be larger. shape=(ngrid +1,)
waves: `numpy.ndarray`_
wavelength arrays for spectra to be stacked. Note that the wavelength grids can in general be different for
each exposure and irregularly spaced.
shape=(nspec, nexp)
fluxes: `numpy.ndarray`_
fluxes for each exposure on the waves grid
shape=(nspec, nexp)
ivars: `numpy.ndarray`_
Inverse variances for each exposure on the waves grid
shape=(nspec, nexp)
masks: `numpy.ndarray`_
Boolean masks for each exposure on the waves grid. True=Good.
shape=(nspec, nexp)
weights: `numpy.ndarray`_
Weights to be used for combining your spectra. These are computed using sn_weights
shape=(nspec, nexp)
min_weight: float, optional
Minimum allowed weight for any individual spectrum
Returns
-------
wave_stack: `numpy.ndarray`_
Wavelength grid for stacked
spectrum. As discussed above, this is the weighted average
of the wavelengths of each spectrum that contriuted to a
bin in the input wave_grid wavelength grid. It thus has
ngrid elements, whereas wave_grid has ngrid+1 elements to
specify the ngrid total number of bins. Note that
wave_stack is NOT simply the wave_grid bin centers, since
it computes the weighted average. shape=(ngrid,)
flux_stack: `numpy.ndarray`_
Final stacked spectrum on wave_stack wavelength grid
shape=(ngrid,)
ivar_stack: `numpy.ndarray`_
Inverse variance spectrum on wave_stack wavelength grid.
Errors are propagated according to weighting and masking.
shape=(ngrid,)
mask_stack: `numpy.ndarray`_
Boolean Mask for stacked
spectrum on wave_stack wavelength grid. True=Good.
shape=(ngrid,)
nused: `numpy.ndarray`_
Number of exposures which contributed to
each pixel in the wave_stack. Note that this is in general
different from nexp because of masking, but also becuse of
the sampling specified by wave_grid. In other words,
sometimes more spectral pixels in the irregularly gridded
input wavelength array waves will land in one bin versus
another depending on the sampling.
shape=(ngrid,)
'''
#mask bad values and extreme values (usually caused by extreme low sensitivity at the edge of detectors)
ubermask = masks & (weights > 0.0) & (waves > 1.0) & (ivars > 0.0) & (utils.inverse(ivars)<1e10)
waves_flat = waves[ubermask].flatten()
fluxes_flat = fluxes[ubermask].flatten()
ivars_flat = ivars[ubermask].flatten()
vars_flat = utils.inverse(ivars_flat)
weights_flat = weights[ubermask].flatten()
# Counts how many pixels in each wavelength bin
nused, wave_edges = np.histogram(waves_flat,bins=wave_grid,density=False)
# Calculate the summed weights for the denominator
weights_total, wave_edges = np.histogram(waves_flat,bins=wave_grid,density=False,weights=weights_flat)
# Calculate the stacked wavelength
## TODO: JFH Made the minimum weight 1e-8 from 1e-4. I'm not sure what this min_weight is necessary for, or
# is achieving FW.
wave_stack_total, wave_edges = np.histogram(waves_flat,bins=wave_grid,density=False,weights=waves_flat*weights_flat)
wave_stack = (weights_total > min_weight)*wave_stack_total/(weights_total+(weights_total==0.))
# Calculate the stacked flux
flux_stack_total, wave_edges = np.histogram(waves_flat,bins=wave_grid,density=False,weights=fluxes_flat*weights_flat)
flux_stack = (weights_total > min_weight)*flux_stack_total/(weights_total+(weights_total==0.))
# Calculate the stacked ivar
var_stack_total, wave_edges = np.histogram(waves_flat,bins=wave_grid,density=False,weights=vars_flat*weights_flat**2)
var_stack = (weights_total > min_weight)*var_stack_total/(weights_total+(weights_total==0.))**2
ivar_stack = utils.inverse(var_stack)
# New mask for the stack
mask_stack = (weights_total > min_weight) & (nused > 0.0)
return wave_stack, flux_stack, ivar_stack, mask_stack, nused
def get_ylim(flux, ivar, mask):
"""
Utility routine for setting the plot limits for QA plots.
Args:
flux (`numpy.ndarray`_):
(nspec,) flux array
ivar (`numpy.ndarray`_):
(nspec,) inverse variance array
mask (`numpy.ndarray`_):
bool, (nspec,) mask array. True=Good
Returns:
tuple: lower and upper limits for plotting.
"""
med_width = (2.0 * np.ceil(0.1 / 2.0 * np.size(flux[mask])) + 1).astype(int)
flux_med, ivar_med = median_filt_spec(flux, ivar, mask, med_width)
mask_lim = ivar_med > np.percentile(ivar_med, 20)
ymax = 2.5 * np.max(flux_med[mask_lim])
ymin = -0.15 * ymax
return ymin, ymax
def scale_spec_qa(wave, flux, ivar, wave_ref, flux_ref, ivar_ref, ymult,
scale_method, mask=None, mask_ref=None, ylim = None, title=''):
'''
QA plot for spectrum scaling.
Parameters
----------
wave: `numpy.ndarray`_
wavelength array for spectrum to be scaled and reference spectrum.
shape=(nspec,)
flux: `numpy.ndarray`_
flux for spectrum to be scaled; shape=(nspec,)
ivar: `numpy.ndarray`_
inverse variance for spectrum to be scaled. shape=(nspec,)
wave_ref: `numpy.ndarray`_
reference wavelengths; shape=(nspec,)
flux_ref: `numpy.ndarray`_
reference flux; shape=(nspec,)
ivar_ref: `numpy.ndarray`_
inverse variance of reference flux; shape=(nspec,)
ymult: `numpy.ndarray`_
scale factor array; shape=(nspec,)
scale_method: str
label of method used for rescaling which will be shown on QA plot.
mask: `numpy.ndarray`_, optional
Boolean mask for spectrum to be scaled. True=Good. If not specified determined form inverse variance
shape=(nspec,)
mask_ref: `numpy.ndarray`_, optional
Boolean mask for reference flux. True=Good.
shape=(nspec,)
ylim: tuple, optional
tuple for limits of the QA plot. If None, will be determined automtically with get_ylim
title: str, optional
QA plot title
'''
if mask is None:
mask = ivar > 0.0
if mask_ref is None:
mask_ref = ivar_ref > 0.0
# This deals with spectrographs that have zero wavelength values. They are masked in mask, but this impacts plotting
wave_mask = wave > 1.0
wave_mask_ref = wave_ref > 1.0
#dwave = wave[wave_mask].max() - wave[wave_mask].min()
#dwave_ref = wave_ref[wave_mask_ref].max() - wave_ref[wave_mask_ref].min()
# Get limits
if ylim is None:
ylim = get_ylim(flux_ref, ivar_ref, mask_ref)
nullfmt = NullFormatter() # no labels
fig = plt.figure(figsize=(12, 8))
# [left, bottom, width, height]
poly_plot = fig.add_axes([0.1, 0.75, 0.8, 0.20])
spec_plot = fig.add_axes([0.1, 0.10, 0.8, 0.65])
poly_plot.xaxis.set_major_formatter(nullfmt) # no x-axis labels for polynomial plot
poly_plot.plot(wave[wave_mask], ymult[wave_mask], color='black', linewidth=3.0, label=scale_method + ' scaling')
poly_plot.legend()
# This logic below allows more of the spectrum to be plotted if wave_ref is a multi-order stack which has broader
# wavelength coverage. For the longslit or single order case, this will plot the correct range as well
wave_min = np.fmax(0.8*wave[wave_mask].min(), wave_ref[wave_mask_ref].min())
wave_max = np.fmin(1.2*wave[wave_mask].max(), wave_ref[wave_mask_ref].max())
poly_plot.set_xlim((wave_min, wave_max))
spec_plot.set_xlim((wave_min, wave_max))
spec_plot.set_ylim(ylim)
spec_plot.plot(wave[wave_mask], flux[wave_mask], color='red', zorder=10,
marker='o', markersize=1.0, mfc='k', fillstyle='full', linestyle='None', label='original spectrum')
spec_plot.plot(wave[wave_mask], flux[wave_mask]*ymult[wave_mask], color='dodgerblue', drawstyle='steps-mid', alpha=0.5, zorder=5, linewidth=2,
label='rescaled spectrum')
spec_plot.plot(wave_ref[wave_mask_ref], flux_ref[wave_mask_ref], color='black', drawstyle='steps-mid', zorder=7, alpha = 0.5, label='reference spectrum')
spec_plot.legend()
fig.suptitle(title)
plt.show()
# TODO: Change mask to gpm
def coadd_iexp_qa(wave, flux, rejivar, mask, wave_stack, flux_stack, ivar_stack, mask_stack,
outmask, norder=None, title='', qafile=None):
"""
Routine to creqate QA for showing the individual spectrum
compared to the combined stacked spectrum indicating which pixels
were rejected.
Args:
wave (`numpy.ndarray`_):
Wavelength array for spectrum of the exposure in
question. Shape is (nspec,).
flux (`numpy.ndarray`_):
Flux for the exposure in question. Shape is (nspec,).
ivar (`numpy.ndarray`_):
Inverse variance for the exposure in question. Shape is
(nspec,).
mask (`numpy.ndarray`_):
Boolean array with mask for the exposure in question
True=Good. If not specified determined form inverse
variance. Shape is (nspec,).
flux_stack (`numpy.ndarray`_):
Stacked spectrum to be compared to the exposure in
question. Shape is (nspec,).
ivar_stack (`numpy.ndarray`_):
Inverse variance of the stacked spectrum. Shape is
(nspec,).
mask_stack (`numpy.ndarray`_):
Boolean array with mask for stacked spectrum. Shape is
(nspec,).
norder (:obj:`int`, optional):
Indicate the number of orders if this is an echelle
stack. If None, ...
title (:obj:`str`, optional):
Plot title
qafile (:obj:`str`, optional):
QA file name
"""
fig = plt.figure(figsize=(14, 8))
spec_plot = fig.add_axes([0.1, 0.1, 0.8, 0.85])
# Get limits
ymin, ymax = get_ylim(flux_stack, ivar_stack, mask_stack)
# Plot spectrum
rejmask = mask & np.invert(outmask)
wave_mask = wave > 1.0
wave_stack_mask = wave_stack > 1.0
spec_plot.plot(wave[rejmask], flux[rejmask],'s',zorder=10,mfc='None', mec='r', label='rejected pixels')
spec_plot.plot(wave[np.invert(mask)], flux[np.invert(mask)],'v', zorder=10, mfc='None', mec='orange',
label='originally masked')
if norder is None:
spec_plot.plot(wave[wave_mask], flux[wave_mask], color='dodgerblue', drawstyle='steps-mid',
zorder=2, alpha=0.5,label='single exposure')
spec_plot.plot(wave[wave_mask], np.sqrt(utils.inverse(rejivar[wave_mask])),zorder=3,
color='0.7', alpha=0.5, drawstyle='steps-mid')
spec_plot.plot(wave_stack[wave_stack_mask],flux_stack[wave_stack_mask]*mask_stack[wave_stack_mask],color='k',
drawstyle='steps-mid',lw=2,zorder=3, alpha=0.5, label='coadd')
# TODO Use one of our telluric models here instead
# Plot transmission
if (np.max(wave[mask]) > 9000.0):
skytrans_file = data.get_skisim_filepath('atm_transmission_secz1.5_1.6mm.dat')
skycat = np.genfromtxt(skytrans_file, dtype='float')
scale = 0.8 * ymax
spec_plot.plot(skycat[:, 0] * 1e4, skycat[:, 1] * scale, 'm-', alpha=0.5, zorder=11)
else:
npix = np.size(flux)
nspec = int(npix / norder)
spec_plot.plot(wave_stack[wave_stack_mask], flux_stack[wave_stack_mask] * mask_stack[wave_stack_mask],
color='k', drawstyle='steps-mid', lw=1, zorder=3, alpha=0.5, label='coadd')
for iord in range(norder):
spec_plot.plot(wave[nspec*iord:nspec*(iord+1)][wave_mask[nspec*iord:nspec*(iord+1)]],
flux[nspec*iord:nspec*(iord+1)][wave_mask[nspec*iord:nspec*(iord+1)]],
drawstyle='steps-mid', zorder=1, alpha=0.7, label='order {:d}'.format(iord))
# This logic below allows more of the spectrum to be plotted if wave_ref is a multi-order stack which has broader
# wavelength coverage. For the longslit or single order case, this will plot the correct range as well
wave_min = np.fmax(0.8*wave[wave_mask].min(), wave_stack[wave_stack_mask].min())
wave_max = np.fmin(1.2*wave[wave_mask].max(), wave_stack[wave_stack_mask].max())
# properties
spec_plot.legend(fontsize=13)
spec_plot.set_ylim([ymin, ymax])
spec_plot.set_xlim((wave_min, wave_max))
spec_plot.set_xlabel('Wavelength ($\\rm\\AA$)')
spec_plot.set_ylabel('Flux')
spec_plot.set_title(title, fontsize=16, color='red')
if qafile is not None:
if len(qafile.split('.'))==1:
msgs.info("No fomat given for the qafile, save to PDF format.")
qafile = qafile+'.pdf'
plt.savefig(qafile,dpi=300)
msgs.info("Wrote QA: {:s}".format(qafile))
plt.show()
def weights_qa(waves, weights, masks, title=''):
'''
Routine to make a QA plot for the weights used to compute a stacked spectrum.
Parameters
----------
wave: `numpy.ndarray`_
wavelength array for spectra that went into a stack;
shape=(nspec, nexp)
weights: `numpy.ndarray`_
(S/N)^2 weights for the exposures that went into a stack. This would have been computed by sn_weights
shape=(nspec, nexp)
mask: `numpy.ndarray`_
Boolean array indicating pixels which were masked
in each individual exposure which go into the stack.
shape=(nspec, nexp)
title: str, optional
Title for the plot.
'''
if waves.ndim == 1:
nstack = 1
nspec = waves.shape[0]
waves_stack = waves.reshape((nspec, nstack))
weights_stack = weights.reshape((nspec, nstack))
masks_stack = masks.reshape((nspec, nstack))
elif waves.ndim == 2:
nspec, nstack = waves.shape
waves_stack = waves
weights_stack = weights
masks_stack = masks
elif weights.ndim == 3:
nspec, norder, nexp = waves.shape
waves_stack = np.reshape(waves, (nspec, norder * nexp), order='F')
weights_stack = np.reshape(weights, (nspec, norder * nexp), order='F')
masks_stack = np.reshape(masks, (nspec, norder * nexp), order='F')
nstack = norder*nexp
else:
msgs.error('Unrecognized dimensionality for waves')
fig = plt.figure(figsize=(12, 8))
for iexp in range(nstack):
wave_mask = waves_stack[:, iexp] > 1.0
plt.plot(waves_stack[wave_mask,iexp], weights_stack[wave_mask,iexp]*masks_stack[wave_mask,iexp])
plt.xlim(waves_stack[(waves_stack > 1.0)].min(), waves_stack[(waves_stack > 1.0)].max())
plt.xlabel('Wavelength (Angstrom)')
plt.ylabel('Weights')
plt.title(title, fontsize=16, color='red')
plt.show()
def coadd_qa(wave, flux, ivar, nused, mask=None, tell=None,
title=None, qafile=None):
'''
Routine to make QA plot of the final stacked spectrum. It works for both longslit/mulitslit, coadded individual
order spectrum of the Echelle data and the final coadd of the Echelle data.
Parameters
----------
wave: `numpy.ndarray`_
one-d wavelength array of your spectrum;
shape=(nspec,)
flux: `numpy.ndarray`_
one-d flux array of your spectrum;
shape=(nspec,)
ivar: `numpy.ndarray`_
one-d ivar array of your spectrum;
shape=(nspec,)
nused: `numpy.ndarray`_
how many exposures used in the stack for each pixel, the same size with flux
shape=(nspec,)
mask: `numpy.ndarray`_, optional
boolean mask array for your spectrum;
shape=(nspec,)
tell: `numpy.ndarray`_, optional
one-d telluric array for your spectrum; shape=(nspec,)
title: str, optional
plot title
qafile: str, optional
QA file name
'''
#TODO: This routine should take a parset
if mask is None:
mask = ivar > 0.0
wave_mask = wave > 1.0
wave_min = wave[wave_mask].min()
wave_max = wave[wave_mask].max()
fig = plt.figure(figsize=(12, 8))
# plot how may exposures you used at each pixel
# [left, bottom, width, height]
num_plot = fig.add_axes([0.10, 0.70, 0.80, 0.23])
spec_plot = fig.add_axes([0.10, 0.10, 0.80, 0.60])
num_plot.plot(wave[wave_mask],nused[wave_mask],drawstyle='steps-mid',color='k',lw=2)
num_plot.set_xlim([wave_min, wave_max])
num_plot.set_ylim([0.0, np.fmax(1.1*nused.max(), nused.max()+1.0)])
num_plot.set_ylabel('$\\rm N_{EXP}$')
num_plot.yaxis.set_major_locator(MaxNLocator(integer=True))
num_plot.yaxis.set_minor_locator(NullLocator())
# Plot spectrum
spec_plot.plot(wave[wave_mask], flux[wave_mask], color='black', drawstyle='steps-mid',zorder=1,alpha=0.8, label='Single exposure')
spec_plot.plot(wave[wave_mask], np.sqrt(utils.inverse(ivar[wave_mask])),zorder=2, color='red', alpha=0.7,
drawstyle='steps-mid', linestyle=':')
# Get limits
ymin, ymax = get_ylim(flux, ivar, mask)
# Plot transmission
if (np.max(wave[mask])>9000.0) and (tell is None):
skytrans_file = data.get_skisim_filepath('atm_transmission_secz1.5_1.6mm.dat')
skycat = np.genfromtxt(skytrans_file,dtype='float')
scale = 0.8*ymax
spec_plot.plot(skycat[:,0]*1e4,skycat[:,1]*scale,'m-',alpha=0.5,zorder=11)
elif tell is not None:
scale = 0.8*ymax
spec_plot.plot(wave[wave_mask], tell[wave_mask]*scale, drawstyle='steps-mid', color='m',alpha=0.5,zorder=11)
spec_plot.set_ylim([ymin, ymax])
spec_plot.set_xlim([wave_min, wave_max])
spec_plot.set_xlabel('Wavelength ($\\rm\\AA$)')
spec_plot.set_ylabel('Flux')
if title is not None:
num_plot.set_title(title,color='red',fontsize=16)
if qafile is not None:
if len(qafile.split('.'))==1:
msgs.info("No fomat given for the qafile, save to PDF format.")
qafile = qafile+'.pdf'
plt.savefig(qafile,dpi=300)
msgs.info("Wrote QA: {:s}".format(qafile))
plt.show()
def update_errors(fluxes, ivars, masks, fluxes_stack, ivars_stack, masks_stack,
sn_clip=30.0, title='', debug=False):
'''
Determine corrections to errors using the residuals of each exposure about a preliminary stack. This routine is
used as part of the iterative masking/stacking loop to determine the corrections to the errors used to reject pixels
for the next iteration of the stack. The routine returns a set of corrections for each of the exposures that is input.
Args:
fluxes (`numpy.ndarray`_):
fluxes for each exposure on the native wavelength grids
shape=(nspec, nexp)
ivars (`numpy.ndarray`_):
Inverse variances for each exposure on the native wavelength grids
shape=(nspec, nexp)
masks (`numpy.ndarray`_):
Boolean masks for each exposure on the native wavelength grids. True=Good.
shape=(nspec, nexp)
fluxes_stack (`numpy.ndarray`_):
Stacked spectrum for this iteration interpolated on the native wavelength grid of the fluxes exposures.
shape=(nspec, nexp)
ivars_stack (`numpy.ndarray`_):
Inverse variances of stacked spectrum for this iteration interpolated on the native wavelength grid of the
fluxes exposures.
shape=(nspec, nexp)
masks_stack (`numpy.ndarray`_):
Boolean mask of stacked spectrum for this iteration interpolated on the native wavelength grid of the fluxes exposures.
shape=(nspec, nexp)
sn_clip (float, optional):
Errors are capped in output rejivars so that the S/N is never greater than sn_clip. This prevents overly
aggressive rejection in high S/N ratio spectra which neverthless differ at a level greater than the implied S/N due to
systematics.
title (str, optional):
Title for QA plot
debug (bool, optional):
If True, show QA plots useful for debuggin.
Returns:
tuple:
- rejivars: `numpy.ndarray`_, (nspec, nexp): Updated inverse
variances to be used in rejection
- sigma_corrs, `numpy.ndarray`_, (nexp): Array of correction factors
applied to the original ivars to get the new rejivars
- outchi: `numpy.ndarray`_, (nspec, nexp): The original
chi=(fluxes-fluxes_stack)*np.sqrt(ivars) used to determine
the correction factors. This quantity is useful for
plotting. Note that the outchi is computed using the
original non-corrected errors.
- maskchi: `numpy.ndarray`_, bool, (nspec, nexp): Mask returned by
renormalize_erorrs indicating which pixels were used in
the computation of the correction factors. This is
basically the union of the input masks but with chi > clip
(clip=6.0 is the default) values clipped out.
'''
if fluxes.ndim == 1:
nexp = 1
else:
nexp = np.shape(fluxes)[1]
outchi = np.zeros_like(ivars)
maskchi = np.zeros_like(outchi,dtype=bool)
rejivars = np.zeros_like(outchi)
sigma_corrs = np.zeros(nexp)
outmasks = np.copy(masks)
# Loop on images to update noise model for rejection
for iexp in range(nexp):
if fluxes.ndim>1:
# Grab the spectrum
thisflux = fluxes[:, iexp]
thisivar = ivars[:, iexp]
thismask = outmasks[:,iexp]
# Grab the stack interpolated with the same grid as the current exposure
thisflux_stack = fluxes_stack[:, iexp]
thisvar_stack = utils.inverse(ivars_stack[:, iexp])
thismask_stack = masks_stack[:, iexp]
else:
thisflux = fluxes
thisivar = ivars
thismask = outmasks
# Grab the stack interpolated with the same grid as the current exposure
thisflux_stack = fluxes_stack
thisvar_stack = utils.inverse(ivars_stack)
thismask_stack = masks_stack
# var_tot = total variance of the quantity (fluxes - fluxes_stack), i.e. the quadrature sum of the two variances
var_tot = thisvar_stack + utils.inverse(thisivar)
mask_tot = thismask & thismask_stack
ivar_tot = utils.inverse(var_tot)
# Impose the S/N clipping threshold before computing chi and renormalizing the errors
ivar_clip = mask_tot*utils.clip_ivar(thisflux_stack, ivar_tot, sn_clip, gpm=mask_tot)
# TODO Do we need the offset code to re-center the chi? If so add it right here into the chi
chi = np.sqrt(ivar_clip)*(thisflux - thisflux_stack)
# Adjust errors to reflect the statistics of the distribution of errors. This fixes cases where the
# the noise model is not quite right
this_sigma_corr, igood = renormalize_errors(chi, mask_tot, clip=6.0, max_corr=5.0, title=title, debug=debug)
ivar_tot_corr = ivar_clip/this_sigma_corr ** 2
# TODO is this correct below? JFH Thinks no
#ivar_cap = utils.clip_ivar(thisflux_stack, ivar_tot_corr, sn_clip, mask=mask_tot)
#ivar_cap = np.minimum(ivar_tot_corr, (sn_clip/(thisflux_stack + (thisflux_stack <= 0.0))) ** 2)
if fluxes.ndim>1:
sigma_corrs[iexp] = this_sigma_corr
rejivars[:, iexp] = ivar_tot_corr
outchi[:, iexp] = chi
maskchi[:, iexp] = igood
else:
sigma_corrs = np.array([this_sigma_corr])
rejivars = ivar_tot_corr
outchi = chi
maskchi = igood
return rejivars, sigma_corrs, outchi, maskchi
def spec_reject_comb(wave_grid, waves, fluxes, ivars, masks, weights, sn_clip=30.0, lower=3.0, upper=3.0,
maxrej=None, maxiter_reject=5, title='', debug=False,
verbose=False):
"""
Routine for executing the iterative combine and rejection of a set of spectra to compute a final stacked spectrum.
Parameters
----------
wave_grid: `numpy.ndarray`_
new wavelength grid desired. This will typically be a reguarly spaced grid created by the get_wave_grid routine.
The reason for the ngrid+1 is that this is the general way to specify a set of bins if you desire ngrid
bin centers, i.e. the output stacked spectra have ngrid elements. The spacing of this grid can be regular in
lambda (better for multislit) or log lambda (better for echelle). This new wavelength grid should be designed
with the sampling of the data in mind. For example, the code will work fine if you choose the sampling to be
too fine, but then the number of exposures contributing to any given wavelength bin will be one or zero in the
limiting case of very small wavelength bins. For larger wavelength bins, the number of exposures contributing
to a given bin will be larger.
shape=(ngrid +1,)
waves: `numpy.ndarray`_
wavelength arrays for spectra to be stacked. Note that the wavelength grids can in general be different for
each exposure and irregularly spaced.
shape=(nspec, nexp)
fluxes: `numpy.ndarray`_
fluxes for each exposure on the waves grid
ivars: `numpy.ndarray`_
Inverse variances for each exposure on the waves grid
masks: `numpy.ndarray`_
Boolean masks for each exposure on the waves grid. True=Good.
weights: `numpy.ndarray`_
Weights to be used for combining your spectra. These are computed using sn_weights
sn_clip: float, optional, default=30.0
Errors are capped during rejection so that the S/N is never greater than sn_clip. This prevents overly aggressive rejection
in high S/N ratio spectrum which neverthless differ at a level greater than the implied S/N due to
systematics.
lower: float, optional, default=3.0
lower rejection threshold for djs_reject
upper: float, optional, default=3.0
upper rejection threshold for djs_reject
maxrej: int, optional, default=None
maximum number of pixels to reject in each iteration for djs_reject.
maxiter_reject: int, optional, default=5
maximum number of iterations for stacking and rejection. The code stops iterating either when
the output mask does not change betweeen successive iterations or when maxiter_reject is reached.
title: str, optional
Title for QA plot
debug: bool, optional, default=False
Show QA plots useful for debugging.
verbose: bool, optional, default=False
Level
Returns
-------
wave_stack: `numpy.ndarray`_
Wavelength grid for stacked
spectrum. As discussed above, this is the weighted average
of the wavelengths of each spectrum that contriuted to a
bin in the input wave_grid wavelength grid. It thus has
ngrid elements, whereas wave_grid has ngrid+1 elements to
specify the ngrid total number of bins. Note that
wave_stack is NOT simply the wave_grid bin centers, since
it computes the weighted average.
shape=(ngrid,)
flux_stack: `numpy.ndarray`_
Final stacked spectrum on
wave_stack wavelength grid
shape=(ngrid,)
ivar_stack: `numpy.ndarray`_
Inverse variance spectrum
on wave_stack wavelength grid. Erors are propagated
according to weighting and masking.
shape=(ngrid,)
mask_stack: `numpy.ndarray`_
Boolean mask for stacked
spectrum on wave_stack wavelength grid. True=Good.
shape=(ngrid,)
outmask: `numpy.ndarray`_
Output bool mask with shape=(nspec, nexp)
indicating which pixels are rejected in each exposure of
the original input spectra after performing all of the
iterations of combine/rejection
nused: `numpy.ndarray`_
Number of exposures which
contributed to each pixel in the wave_stack. Note that
this is in general different from nexp because of masking,
but also becuse of the sampling specified by wave_grid. In
other words, sometimes more spectral pixels in the
irregularly gridded input wavelength array waves will land
in one bin versus another depending on the sampling.
shape=(ngrid,)
"""
thismask = np.copy(masks)
iter = 0
qdone = False
while (not qdone) and (iter < maxiter_reject):
wave_stack, flux_stack, ivar_stack, mask_stack, nused = compute_stack(
wave_grid, waves, fluxes, ivars, thismask, weights)
flux_stack_nat, ivar_stack_nat, mask_stack_nat = interp_spec(
waves, wave_stack, flux_stack, ivar_stack, mask_stack)
rejivars, sigma_corrs, outchi, maskchi = update_errors(fluxes, ivars, thismask,
flux_stack_nat, ivar_stack_nat, mask_stack_nat,
sn_clip=sn_clip)
thismask, qdone = pydl.djs_reject(fluxes, flux_stack_nat, outmask=thismask,inmask=masks, invvar=rejivars,
lower=lower,upper=upper, maxrej=maxrej, sticky=False)
iter += 1
if (iter == maxiter_reject) & (maxiter_reject != 0):
msgs.warn('Maximum number of iterations maxiter={:}'.format(maxiter_reject) + ' reached in spec_reject_comb')
outmask = np.copy(thismask)
# print out a summary of how many pixels were rejected
nexp = waves.shape[1]
nrej = np.sum(np.invert(outmask) & masks, axis=0)
norig = np.sum((waves > 1.0) & np.invert(masks), axis=0)
if verbose:
for iexp in range(nexp):
# nrej = pixels that are now masked that were previously good
msgs.info("Rejected {:d} pixels in exposure {:d}/{:d}".format(nrej[iexp], iexp, nexp))
# Compute the final stack using this outmask
wave_stack, flux_stack, ivar_stack, mask_stack, nused = compute_stack(wave_grid, waves, fluxes, ivars, outmask, weights)
# Used only for plotting below
if debug:
# TODO Add a line here to optionally show the distribution of all pixels about the stack as we do for X-shooter.
#flux_stack_nat, ivar_stack_nat, mask_stack_nat = interp_spec(waves, wave_stack, flux_stack, ivar_stack, mask_stack)
for iexp in range(nexp):
# plot the residual distribution for each exposure
title_renorm = title + ': Error distriution about stack for exposure {:d}/{:d}'.format(iexp,nexp)
renormalize_errors_qa(outchi[:, iexp], maskchi[:, iexp], sigma_corrs[iexp], title=title_renorm)
# plot the rejections for each exposures
title_coadd_iexp = title + ': nrej={:d} pixels rejected,'.format(nrej[iexp]) + \
' norig={:d} originally masked,'.format(norig[iexp]) + \
' for exposure {:d}/{:d}'.format(iexp,nexp)
coadd_iexp_qa(waves[:, iexp], fluxes[:, iexp], rejivars[:, iexp], masks[:, iexp], wave_stack, flux_stack,
ivar_stack, mask_stack, outmask[:, iexp], qafile=None, title=title_coadd_iexp)
# weights qa
title_weights = title + ': Weights Used -- nrej={:d} total pixels rejected,'.format(np.sum(nrej)) + \
' norig={:d} originally masked'.format(np.sum(norig))
weights_qa(waves, weights, outmask, title=title_weights)
return wave_stack, flux_stack, ivar_stack, mask_stack, outmask, nused
def scale_spec_stack(wave_grid, waves, fluxes, ivars, masks, sn, weights,
ref_percentile=70.0, maxiter_scale=5,
sigrej_scale=3.0, scale_method='auto',
hand_scale=None, sn_min_polyscale=2.0, sn_min_medscale=0.5,
debug=False, show=False):
"""
THIS NEEDS A PROPER DESCRIPTION
Parameters
----------
wave_grid: `numpy.ndarray`_
New wavelength grid desired. This will typically be a reguarly spaced grid created by the get_wave_grid routine.
The reason for the ngrid+1 is that this is the general way to specify a set of bins if you desire ngrid
bin centers, i.e. the output stacked spectra have ngrid elements. The spacing of this grid can be regular in
lambda (better for multislit) or log lambda (better for echelle). This new wavelength grid should be designed
with the sampling of the data in mind. For example, the code will work fine if you choose the sampling to be
too fine, but then the number of exposures contributing to any given wavelength bin will be one or zero in the
limiting case of very small wavelength bins. For larger wavelength bins, the number of exposures contributing
to a given bin will be larger.
shape=(ngrid +1,)
waves: `numpy.ndarray`_
wavelength arrays for spectra to be stacked. Note that the wavelength grids can in general be different for
each exposure and irregularly spaced.
shape=(nspec, nexp)
fluxes: `numpy.ndarray`_
fluxes for each exposure on the waves grid
shape=(nspec, nexp)
ivars: `numpy.ndarray`_
Inverse variances for each exposure on the waves grid
shape=(nspec, nexp)
masks: `numpy.ndarray`_
Bool masks for each exposure on the waves grid. True=Good.
shape=(nspec, nexp)
sn: `numpy.ndarray`_
sn of each spectrum in the stack used to determine which scaling method should be used. This can
be computed using sn_weights. shape=(nexp,)
sigrej_scale: float, optional, default=3.0
Rejection threshold used for rejecting pixels when rescaling spectra with scale_spec.
ref_percentile: float, optional, default=70.0
percentile fraction cut used for selecting minimum SNR cut for robust_median_ratio
maxiter_scale: int, optional, default=5
Maximum number of iterations performed for rescaling spectra.
scale_method: str, optional, default='auto'
Options are auto, poly, median, none, or hand. Hand is not well tested.
User can optionally specify the rescaling method. Default is to let the
code determine this automitically which works well.
hand_scale: `numpy.ndarray`_, optional
array of hand scale factors, not well tested
sn_min_polyscale: float, optional, default=2.0
maximum SNR for perforing median scaling
sn_min_medscale: float, optional default=0.5
minimum SNR for perforing median scaling
debug: bool, optional, default=False
show interactive QA plot
Returns
-------
fluxes_scales: `numpy.ndarray`_
Scale factors applied to the fluxes
shape=(nspec, nexp)
ivars_scales: `numpy.ndarray`_
Scale factors applied to the ivars;
shape=(nspec, nexp)
scales: `numpy.ndarray`_
shape=(nspec, nexp); Scale factors applied to
each individual spectrum before the combine computed by
scale_spec
scale_method_used: list
List of methods used for rescaling spectra.
"""
# Compute an initial stack as the reference, this has its own wave grid based on the weighted averages
wave_stack, flux_stack, ivar_stack, mask_stack, nused = compute_stack(wave_grid, waves, fluxes, ivars, masks, weights)
# Rescale spectra to line up with our preliminary stack so that we can sensibly reject outliers
nexp = np.shape(fluxes)[1]
fluxes_scale = np.zeros_like(fluxes)
ivars_scale = np.zeros_like(ivars)
scales = np.zeros_like(fluxes)
scale_method_used = []
for iexp in range(nexp):
hand_scale_iexp = None if hand_scale is None else hand_scale[iexp]
fluxes_scale[:, iexp], ivars_scale[:, iexp], scales[:, iexp], scale_method_iexp = scale_spec(
waves[:, iexp], fluxes[:, iexp], ivars[:, iexp], sn[iexp], wave_stack, flux_stack, ivar_stack,
mask=masks[:, iexp], mask_ref=mask_stack, ref_percentile=ref_percentile, maxiters=maxiter_scale,
sigrej=sigrej_scale, scale_method=scale_method, hand_scale=hand_scale_iexp, sn_min_polyscale=sn_min_polyscale,
sn_min_medscale=sn_min_medscale, debug=debug, show=show)
scale_method_used.append(scale_method_iexp)
return fluxes_scale, ivars_scale, scales, scale_method_used
def combspec(waves, fluxes, ivars, masks, sn_smooth_npix,
wave_method='linear', dwave=None, dv=None, dloglam=None,
spec_samp_fact=1.0, wave_grid_min=None, wave_grid_max=None,
ref_percentile=70.0, maxiter_scale=5, wave_grid_input=None,
sigrej_scale=3.0, scale_method='auto', hand_scale=None,
sn_min_polyscale=2.0, sn_min_medscale=0.5,
const_weights=False, maxiter_reject=5, sn_clip=30.0,
lower=3.0, upper=3.0, maxrej=None, qafile=None, title='', debug=False,
debug_scale=False, show_scale=False, show=False,
verbose=True):
'''
Main driver routine for coadding longslit/multi-slit spectra.
NEED LOTS MORE HERE
Parameters
----------
waves: `numpy.ndarray`_
Wavelength arrays for spectra to be stacked.
shape=(nspec, nexp)
fluxes: `numpy.ndarray`_
Flux arrays for spectra to be stacked.
shape=(nspec, nexp)
ivars: `numpy.ndarray`_
ivar arrays for spectra to be stacked.
shape=(nspec, nexp)
sn_smooth_npix: int
Number of pixels to median filter by when computing S/N used to decide how to scale and weight spectra
wave_method: str, optional
method for generating new wavelength grid with get_wave_grid. Deafult is 'linear' which creates a uniformly
space grid in lambda. See docuementation on get_wave_grid for description of the options.
dwave: float, optional
dispersion in units of A in case you want to specify it for get_wave_grid, otherwise the code computes the
median spacing from the data.
dv: float, optional
Dispersion in units of km/s in case you want to specify it in the get_wave_grid (for the 'velocity' option),
otherwise a median value is computed from the data.
spec_samp_fact: float, optional
Make the wavelength grid sampling finer (spec_samp_fact < 1.0) or coarser (spec_samp_fact > 1.0) by this
sampling factor. This basically multiples the 'native' spectral pixels by spec_samp_fact, i.e. units
spec_samp_fact are pixels.
wave_grid_min: float, optional
In case you want to specify the minimum wavelength in your wavelength grid, default=None computes from data.
wave_grid_max: float, optional
In case you want to specify the maximum wavelength in your wavelength grid, default=None computes from data.
ref_percentile:
percentile fraction cut used for selecting minimum SNR cut for robust_median_ratio
maxiter_scale: int, optional, default=5
Maximum number of iterations performed for rescaling spectra.
wave_grid_input : `numpy.ndarray`_
User input wavelength grid to be used with the 'user_input' wave_method. Shape is (nspec_input,)
maxiter_reject: int, optional, default=5
maximum number of iterations for stacking and rejection. The code stops iterating either when
the output mask does not change betweeen successive iterations or when maxiter_reject is reached.
sigrej_scale: float, optional, default=3.0
Rejection threshold used for rejecting pixels when rescaling spectra with scale_spec.
scale_method: str, optional
Options are auto, poly, median, none, or hand. Hand is not well tested.
User can optionally specify the rescaling method.
Default is 'auto' will let the code determine this automitically which works well.
hand_scale: `numpy.ndarray`_, optional
Array of hand scale factors, not well tested
sn_min_polyscale: float, optional, default = 2.0
maximum SNR for perforing median scaling
sn_min_medscale: float, optional, default = 0.5
minimum SNR for perforing median scaling
const_weights: bool, optional
If True, apply constant weight
sn_clip: float, optional, default=30.0
Errors are capped during rejection so that the S/N is never greater than sn_clip. This prevents overly aggressive rejection
in high S/N ratio spectrum which neverthless differ at a level greater than the implied S/N due to
systematics.
lower: float, optional, default=3.0
lower rejection threshold for djs_reject
upper: float: optional, default=3.0
upper rejection threshold for djs_reject
maxrej: int, optional
maximum number of pixels to reject in each iteration for djs_reject.
qafile: str, default=None
Root name for QA, if None, it will be determined from the outfile
title: str, optional
Title for QA plots
debug: bool, default=False
Show all QA plots useful for debugging. Note there are lots of QA plots, so only set this to True if you want to inspect them all.
debug_scale: bool, default=False
show interactive QA plots for the rescaling of the spectra
show: bool, default=False
If True, show key QA plots or not
show_scale: bool, default=False
If True, show interactive QA plots for the rescaling of the spectra
Returns
-------
wave_grid_mid: `numpy.ndarray`_
Wavelength grid (in Angstrom) evaluated at the bin centers,
uniformly-spaced either in lambda or log10-lambda/velocity. See core.wavecal.wvutils.py for more.
shape=(ngrid,)
wave_stack: `numpy.ndarray`_
Wavelength grid for stacked
spectrum. As discussed above, this is the weighted average
of the wavelengths of each spectrum that contriuted to a
bin in the input wave_grid wavelength grid. It thus has
ngrid elements, whereas wave_grid has ngrid+1 elements to
specify the ngrid total number of bins. Note that
wave_stack is NOT simply the wave_grid bin centers, since
it computes the weighted average.
shape=(ngrid,)
flux_stack: `numpy.ndarray`_
Final stacked spectrum on
wave_stack wavelength grid
shape=(ngrid,)
ivar_stack: `numpy.ndarray`_
Inverse variance spectrum on wave_stack
wavelength grid. Erors are propagated according to
weighting and masking.
shape=(ngrid,)
mask_stack: `numpy.ndarray`_
Boolean mask for stacked
spectrum on wave_stack wavelength grid. True=Good.
shape=(ngrid,)
'''
# We cast to float64 because of a bug in np.histogram
waves = np.float64(waves)
fluxes = np.float64(fluxes)
ivars = np.float64(ivars)
# Generate a giant wave_grid
wave_grid, wave_grid_mid, _ = wvutils.get_wave_grid(
waves=waves, masks = masks, wave_method=wave_method,
wave_grid_min=wave_grid_min, wave_grid_max=wave_grid_max,
wave_grid_input=wave_grid_input,
dwave=dwave, dv=dv, dloglam=dloglam, spec_samp_fact=spec_samp_fact)
# Evaluate the sn_weights. This is done once at the beginning
rms_sn, weights = sn_weights(waves, fluxes, ivars, masks, sn_smooth_npix, const_weights=const_weights, verbose=verbose)
fluxes_scale, ivars_scale, scales, scale_method_used = scale_spec_stack(
wave_grid, waves, fluxes, ivars, masks, rms_sn, weights, ref_percentile=ref_percentile, maxiter_scale=maxiter_scale,
sigrej_scale=sigrej_scale, scale_method=scale_method, hand_scale=hand_scale,
sn_min_polyscale=sn_min_polyscale, sn_min_medscale=sn_min_medscale, debug=debug_scale, show=show_scale)
# Rejecting and coadding
wave_stack, flux_stack, ivar_stack, mask_stack, outmask, nused = spec_reject_comb(
wave_grid, waves, fluxes_scale, ivars_scale, masks, weights, sn_clip=sn_clip, lower=lower, upper=upper,
maxrej=maxrej, maxiter_reject=maxiter_reject, debug=debug, title=title)
if show:
coadd_qa(wave_stack, flux_stack, ivar_stack, nused, mask=mask_stack, title='Stacked spectrum', qafile=qafile)
return wave_grid_mid, wave_stack, flux_stack, ivar_stack, mask_stack
def multi_combspec(waves, fluxes, ivars, masks, sn_smooth_npix=None,
wave_method='linear', dwave=None, dv=None, dloglam=None, spec_samp_fact=1.0, wave_grid_min=None,
wave_grid_max=None, ref_percentile=70.0, maxiter_scale=5,
sigrej_scale=3.0, scale_method='auto', hand_scale=None, sn_min_polyscale=2.0, sn_min_medscale=0.5,
const_weights=False, maxiter_reject=5, sn_clip=30.0, lower=3.0, upper=3.0,
maxrej=None,
qafile=None, debug=False, debug_scale=False, show_scale=False, show=False):
"""
Routine for coadding longslit/multi-slit spectra. Calls combspec which is
the main stacking algorithm.
Args:
waves (`numpy.ndarray`_):
Wavelength array with shape (nspec, nexp) containing the spectra to be coadded.
fluxes (`numpy.ndarray`_):
Flux array with shape (nspec, nexp) containing the spectra to be coadded.
ivars (`numpy.ndarray`_):
Ivar array with shape (nspec, nexp) containing the spectra to be coadded.
masks (`numpy.ndarray`_):
Maks array with shape (nspec, nexp) containing the spectra to be coadded.
sn_smooth_npix (int): optional
Number of pixels to median filter by when computing S/N used to decide how to scale and weight spectra. If
set to None, the code will determine the effective number of good pixels per spectrum
in the stack that is being co-added and use 10% of this neff.
wave_method (str, optional):
method for generating new wavelength grid with get_wave_grid. Deafult is 'linear' which creates a uniformly
space grid in lambda. See docuementation on get_wave_grid for description of the options.
dwave (float, optional):
dispersion in units of A in case you want to specify it for get_wave_grid, otherwise the code computes the
median spacing from the data.
dv (float, optional):
Dispersion in units of km/s in case you want to specify it in the get_wave_grid (for the 'velocity' option),
otherwise a median value is computed from the data.
spec_samp_fact (float, optional):
Make the wavelength grid sampling finer (spec_samp_fact < 1.0) or coarser (spec_samp_fact > 1.0) by this
sampling factor. This basically multiples the 'native' spectral pixels by spec_samp_fact, i.e. units
spec_samp_fact are pixels.
wave_grid_min (float, optional):
In case you want to specify the minimum wavelength in your wavelength grid, default=None computes from data.
wave_grid_max (float, optional):
In case you want to specify the maximum wavelength in your wavelength grid, default=None computes from data.
wave_grid_input (`numpy.ndarray`_):
User input wavelength grid to be used with the 'user_input' wave_method. Shape is (nspec_input,)
maxiter_reject (int): optional
maximum number of iterations for stacking and rejection. The code stops iterating either when
the output mask does not change betweeen successive iterations or when maxiter_reject is reached. Default=5.
ref_percentile (float, optional):
percentile fraction cut used for selecting minimum SNR cut for robust_median_ratio. Should be a number between
0 and 100, default = 70.0
maxiter_scale (int, optional):
Maximum number of iterations performed for rescaling spectra. Default=5.
sigrej_scale (float, optional):
Rejection threshold used for rejecting pixels when rescaling spectra with scale_spec. Default=3.0
scale_method (str, optional):
Options are auto, poly, median, none, or hand. Hand is not well tested.
User can optionally specify the rescaling method. Default='auto' will let the
code determine this automitically which works well.
hand_scale (`numpy.ndarray`_, optional):
Array of hand scale factors, not well tested
sn_min_polyscale (float, optional):
maximum SNR for perforing median scaling
sn_min_medscale (float, optional):
minimum SNR for perforing median scaling
const_weights (`numpy.ndarray`_, optional):
Constant weight factors specified
maxiter_reject (int, optional):
maximum number of iterations for stacking and rejection. The code stops iterating either when
the output mask does not change betweeen successive iterations or when maxiter_reject is reached.
sn_clip (float, optional):
Errors are capped during rejection so that the S/N is never greater than sn_clip. This prevents overly aggressive rejection
in high S/N ratio spectrum which neverthless differ at a level greater than the implied S/N due to
systematics.
lower (float, optional):
lower rejection threshold for djs_reject
upper (float, optional):
upper rejection threshold for djs_reject
maxrej (int, optional):
maximum number of pixels to reject in each iteration for djs_reject.
nmaskedge (int, optional):
Number of edge pixels to mask. This should be removed/fixed.
qafile (str, optional): optional, default=None
Root name for QA, if None, it will be determined from the outfile
outfile (str, optional): optional, default=None,
Root name for QA, if None, it will come from the target name from the fits header.
debug (bool, optional): optinoal, default=False,
Show all QA plots useful for debugging. Note there are lots of QA plots, so only set this to True if you want to inspect them all.
debug_scale (bool, optional): optional, default=False
show interactive QA plots for the rescaling of the spectra
show (bool, optional): optional, default=False,
Show key QA plots or not
Returns:
:obj:`tuple`:
- wave_grid_mid: `numpy.ndarray`_, (ngrid,): Wavelength grid (in Angstrom)
evaluated at the bin centers, uniformly-spaced either in lambda or
log10-lambda/velocity. See core.wavecal.wvutils.py for more.
- wave_stack: `numpy.ndarray`_, (ngrid,): Wavelength grid for stacked
spectrum. As discussed above, this is the weighted average of the
wavelengths of each spectrum that contriuted to a bin in the input
wave_grid wavelength grid. It thus has ngrid elements, whereas
wave_grid has ngrid+1 elements to specify the ngrid total number
of bins. Note that wave_stack is NOT simply the wave_grid bin
centers, since it computes the weighted average.
- flux_stack: `numpy.ndarray`_, (ngrid,): Final stacked spectrum on
wave_stack wavelength grid
- ivar_stack: `numpy.ndarray`_, (ngrid,): Inverse variance spectrum on
wave_stack wavelength grid. Erors are propagated according to
weighting and masking.
- mask_stack: `numpy.ndarray`_, bool, (ngrid,): Mask for stacked spectrum on
wave_stack wavelength grid. True=Good.
"""
# Decide how much to smooth the spectra by if this number was not passed in
if sn_smooth_npix is None:
nspec, nexp = waves.shape
# This is the effective good number of spectral pixels in the stack
nspec_eff = np.sum(waves > 1.0)/nexp
sn_smooth_npix = int(np.round(0.1*nspec_eff))
msgs.info('Using a sn_smooth_npix={:d} to decide how to scale and weight your spectra'.format(sn_smooth_npix))
wave_grid_mid, wave_stack, flux_stack, ivar_stack, mask_stack = combspec(
waves, fluxes,ivars, masks, wave_method=wave_method, dwave=dwave, dv=dv, dloglam=dloglam,
spec_samp_fact=spec_samp_fact, wave_grid_min=wave_grid_min, wave_grid_max=wave_grid_max, ref_percentile=ref_percentile,
maxiter_scale=maxiter_scale, sigrej_scale=sigrej_scale, scale_method=scale_method, hand_scale=hand_scale,
sn_min_medscale=sn_min_medscale, sn_min_polyscale=sn_min_polyscale, sn_smooth_npix=sn_smooth_npix,
const_weights=const_weights, maxiter_reject=maxiter_reject, sn_clip=sn_clip, lower=lower, upper=upper,
maxrej=maxrej, qafile=qafile, title='multi_combspec', debug=debug, debug_scale=debug_scale, show_scale=show_scale,
show=show)
# Write to disk?
#if outfile is not None:
# save.save_coadd1d_to_fits(outfile, wave_stack, flux_stack, ivar_stack, mask_stack, header=header,
# ex_value=ex_value, overwrite=True)
return wave_grid_mid, wave_stack, flux_stack, ivar_stack, mask_stack
def ech_combspec(waves, fluxes, ivars, masks, weights_sens, nbest=None,
wave_method='log10', dwave=None, dv=None, dloglam=None,
spec_samp_fact=1.0, wave_grid_min=None, wave_grid_max=None,
ref_percentile=70.0, maxiter_scale=5, niter_order_scale=3,
sigrej_scale=3.0, scale_method='auto',
hand_scale=None, sn_min_polyscale=2.0, sn_min_medscale=0.5,
sn_smooth_npix=None, const_weights=False, maxiter_reject=5,
sn_clip=30.0, lower=3.0, upper=3.0,
maxrej=None, qafile=None, debug_scale=False, debug=False,
show_order_stacks=False, show_order_scale=False,
show_exp=False, show=False, verbose=False):
"""
Driver routine for coadding Echelle spectra. Calls combspec which is the main stacking algorithm. It will deliver
three fits files: spec1d_order_XX.fits (stacked individual orders, one order per extension), spec1d_merge_XX.fits
(straight combine of stacked individual orders), spec1d_stack_XX.fits (a giant stack of all exposures and all orders).
In most cases, you should use spec1d_stack_XX.fits for your scientific analyses since it reject most outliers.
..todo.. -- Clean up the doc formatting
Parameters
----------
waves: `numpy.ndarray`_
Wavelength arrays for spectra to be stacked.
shape=(nspec, norder, nexp)
fluxes: `numpy.ndarray`_
Flux arrays for spectra to be stacked.
shape=(nspec, norder, nexp)
ivars: `numpy.ndarray`_
ivar arrays for spectra to be stacked.
shape=(nspec, norder, nexp)
masks: `numpy.ndarray`_
Mask array with shape (nspec, norders, nexp) containing the spectra to be coadded.
weights_sens: `numpy.ndarray`_
Sensitivity function weights required for relatively weighting of the
orders. Must have the same shape as waves, etc.
nbest: int, optional
Number of orders to use for estimating the per exposure weights.
Default is nbest=None, which will just use one fourth of the orders.
wave_method: str, optional
method for generating new wavelength grid with get_wave_grid. Deafult is 'log10' which creates a uniformly
space grid in log10(lambda), which is typically the best for echelle spectrographs
dwave: float, optional
dispersion in units of A in case you want to specify it for get_wave_grid, otherwise the code computes the
median spacing from the data.
dv: float, optional
Dispersion in units of km/s in case you want to specify it in the get_wave_grid (for the 'velocity' option),
otherwise a median value is computed from the data.
dloglam: float, optional
Dispersion in dimensionless units
spec_samp_fact: float, optional
Make the wavelength grid sampling finer (spec_samp_fact < 1.0) or coarser (spec_samp_fact > 1.0) by this
sampling factor. This basically multiples the 'native' spectral pixels by spec_samp_fact, i.e. units
spec_samp_fact are pixels.
wave_grid_min: float, optional
In case you want to specify the minimum wavelength in your wavelength grid, default=None computes from data.
wave_grid_max: float, optional
In case you want to specify the maximum wavelength in your wavelength grid, default=None computes from data.
wave_grid_input : `numpy.ndarray`_
User input wavelength grid to be used with the 'user_input' wave_method. Shape is (nspec_input,)
ref_percentile:
percentile fraction cut used for selecting minimum SNR cut for robust_median_ratio
maxiter_scale: int, optional, default=5
Maximum number of iterations performed for rescaling spectra.
sigrej_scale: float, optional, default=3.0
Rejection threshold used for rejecting pixels when rescaling spectra with scale_spec.
hand_scale: `numpy.ndarray`_, optional
Array of hand scale factors, not well tested
sn_min_polyscale: float, optional, default = 2.0
maximum SNR for perforing median scaling
sn_min_medscale: float, optional, default = 0.5
minimum SNR for perforing median scaling
const_weights: bool, optional
If True, apply constant weight
maxiter_reject: int, optional, default=5
maximum number of iterations for stacking and rejection. The code stops iterating either when
the output mask does not change betweeen successive iterations or when maxiter_reject is reached.
sn_clip: float, optional, default=30.0
Errors are capped during rejection so that the S/N is never greater than sn_clip. This prevents overly aggressive rejection
in high S/N ratio spectrum which neverthless differ at a level greater than the implied S/N due to
lower: float, optional, default=3.0
lower rejection threshold for djs_reject
upper: float: optional, default=3.0
upper rejection threshold for djs_reject
maxrej: int, optional
maximum number of pixels to reject in each iteration for djs_reject.
Returns
-------
wave_grid_mid: `numpy.ndarray`_
Wavelength grid (in Angstrom) evaluated at the bin centers,
uniformly-spaced either in lambda or log10-lambda/velocity. See core.wavecal.wvutils.py for more.
shape=(ngrid,)
a_tuple: tuple
(wave_giant_stack: ndarray, (ngrid,): Wavelength grid for
stacked spectrum. As discussed above, this is the weighted
average of the wavelengths of each spectrum that
contriuted to a bin in the input wave_grid wavelength
grid. It thus has ngrid elements, whereas wave_grid has
ngrid+1 elements to specify the ngrid total number of
bins. Note that wave_giant_stack is NOT simply the
wave_grid bin centers, since it computes the weighted
average;
flux_giant_stack: ndarray, (ngrid,): Final stacked
spectrum on wave_stack wavelength grid;
ivar_giant_stack: ndarray, (ngrid,): Inverse variance
spectrum on wave_stack wavelength grid. Erors are
propagated according to weighting and masking.;
mask_giant_stack: ndarray, bool, (ngrid,): Mask for
stacked spectrum on wave_stack wavelength grid. True=Good.
)
another_tuple: tuple
(waves_stack_orders,
fluxes_stack_orders,
ivars_stack_orders,
masks_stack_orders,
None)
"""
# TODO: Please leave this commented docstring entry here for now.
# merge_stack: bool, default=False,
# Compute an experimental combine of the high S/N combined orders in addition to the default algorithm,
# which is to compute one giant stack using all order overlaps
# output filenams for fits and QA plots
#outfile_order = outfile.replace('.fits', '_order.fits') if outfile is not None else None
if qafile is not None:
qafile_stack = qafile.replace('.pdf', '_stack.pdf')
qafile_chi = qafile.replace('.pdf', '_chi.pdf')
else:
qafile_stack = None
qafile_chi = None
# data shape
nspec, norder, nexp = waves.shape
if nbest is None:
# Decide how many orders to use for estimating the per exposure weights. If nbest was not passed in
# default to using one fourth of the orders
nbest = int(np.ceil(norder/4))
# Decide how much to smooth the spectra by if this number was not passed in
if sn_smooth_npix is None:
# This is the effective good number of spectral pixels in the stack
nspec_eff = np.sum(waves > 1.0)/(norder*nexp)
sn_smooth_npix = int(np.round(0.1 * nspec_eff))
msgs.info('Using a sn_smooth_pix={:d} to decide how to scale and weight your spectra'.format(sn_smooth_npix))
# create some arrays
scales = np.zeros_like(waves)
# Generate a giant wave_grid
wave_grid, wave_grid_mid, _ = wvutils.get_wave_grid(waves, masks=masks, wave_method=wave_method,
wave_grid_min=wave_grid_min,
wave_grid_max=wave_grid_max, dwave=dwave, dv=dv,
dloglam=dloglam, spec_samp_fact=spec_samp_fact)
# Evaluate the sn_weights. This is done once at the beginning
rms_sn, weights_sn = sn_weights(waves, fluxes, ivars, masks, sn_smooth_npix, const_weights=const_weights, verbose=verbose)
# Isolate the nbest best orders, and then use the average S/N of these to determine the per exposure relative weights.
mean_sn_ord = np.mean(rms_sn, axis=1)
best_orders = np.argsort(mean_sn_ord)[::-1][0:nbest]
rms_sn_per_exp = np.mean(rms_sn[best_orders, :], axis=0)
weights_exp = np.tile(rms_sn_per_exp**2, (nspec, norder, 1))
weights = weights_exp*weights_sens
#
# Old code below for ivar weights if the sensfile was not passed in
#msgs.error('Using ivar weights is deprecated.')
#msgs.warn('No sensfunc is available for weighting, using smoothed ivar weights which is not optimal!')
#_, weights_ivar = sn_weights(waves, fluxes, ivars, masks, sn_smooth_npix, const_weights=const_weights,
# ivar_weights=True, verbose=True)
#weights = weights_exp*weights_ivar
if debug:
weights_qa(waves, weights, masks, title='ech_combspec')
fluxes_scl_interord = np.zeros_like(fluxes)
ivars_scl_interord = np.zeros_like(ivars)
scales_interord = np.zeros_like(fluxes)
# First perform inter-order scaling once
for iord in range(norder):
# TODO Add checking here such that orders with low S/N ratio are instead scaled using scale factors from
# higher S/N ratio. The point is it makes no sense to take 0.0/0.0. In the low S/N regime, i.e. DLAs,
# GP troughs, we should be rescaling using scale factors from orders with signal. This also applies
# to the echelle combine below.
fluxes_scl_interord[:, iord], ivars_scl_interord[:,iord], scales_interord[:,iord], scale_method_used = \
scale_spec_stack(wave_grid, waves[:, iord, :], fluxes[:, iord, :], ivars[:, iord, :], masks[:, iord, :],
rms_sn[iord, :], weights[:, iord, :], ref_percentile=ref_percentile,
maxiter_scale=maxiter_scale, sigrej_scale=sigrej_scale, scale_method=scale_method,
hand_scale=hand_scale,
sn_min_polyscale=sn_min_polyscale, sn_min_medscale=sn_min_medscale, debug=debug_scale)
# Arrays to store rescaled spectra. Need Fortran like order reshaping to create a (nspec, norder*nexp) stack of spectra.
# The order of the reshaping in the second dimension is such that blocks norder long for each exposure are stacked
# sequentially, i.e. for order number [:, 0:norder] would be the 1st exposure, [:,norder:2*norder] would be the
# 2nd exposure, etc.
shape_2d = (nspec, norder * nexp)
waves_2d = np.reshape(waves, shape_2d, order='F')
fluxes_2d = np.reshape(fluxes_scl_interord, shape_2d, order='F')
ivars_2d = np.reshape(ivars_scl_interord, shape_2d, order='F')
masks_2d = np.reshape(masks, shape_2d, order='F')
scales_2d = np.reshape(scales_interord, shape_2d, order='F')
weights_2d = np.reshape(weights, shape_2d, order='F')
rms_sn_2d = np.reshape(rms_sn, (norder*nexp), order='F')
# Iteratively scale and stack the spectra, this takes or the order re-scaling we were doing previously
fluxes_pre_scale = fluxes_2d.copy()
ivars_pre_scale = ivars_2d.copy()
# For the first iteration use the scale_method input as an argument (default=None, which will allow
# soly_poly_ratio scaling which is very slow). For all the other iterations simply use median rescaling since
# we are then applying tiny corrections and median scaling is much faster
scale_method_iter = [scale_method] + ['median']*(niter_order_scale - 1)
for iter in range(niter_order_scale):
fluxes_scale_2d, ivars_scale_2d, scales_iter, scale_method_used = scale_spec_stack(
wave_grid, waves_2d, fluxes_pre_scale, ivars_pre_scale, masks_2d, rms_sn_2d, weights_2d, ref_percentile=ref_percentile,
maxiter_scale=maxiter_scale, sigrej_scale=sigrej_scale, scale_method=scale_method_iter[iter], hand_scale=hand_scale,
sn_min_polyscale=sn_min_polyscale, sn_min_medscale=sn_min_medscale,
show=(show_order_scale & (iter == (niter_order_scale-1))))
scales_2d *= scales_iter
fluxes_pre_scale = fluxes_scale_2d.copy()
ivars_pre_scale = ivars_scale_2d.copy()
# Reshape the outputs to be (nspec, norder, nexp)
fluxes_scale = np.reshape(fluxes_scale_2d, (nspec, norder, nexp), order='F')
ivars_scale = np.reshape(ivars_scale_2d, (nspec, norder, nexp), order='F')
scales = np.reshape(scales_2d, (nspec, norder, nexp), order='F')
# Arrays to store stacked individual order spectra.
waves_stack_orders = np.zeros((np.size(wave_grid) - 1, norder))
fluxes_stack_orders = np.zeros_like(waves_stack_orders)
ivars_stack_orders = np.zeros_like(waves_stack_orders)
masks_stack_orders = np.zeros_like(waves_stack_orders, dtype=bool)
outmasks_orders = np.zeros_like(masks)
# Now perform stacks order by order
for iord in range(norder):
# Rejecting and coadding
waves_stack_orders[:, iord], fluxes_stack_orders[:, iord], ivars_stack_orders[:, iord], \
masks_stack_orders[:, iord], outmasks_orders[:,iord,:], nused_iord = spec_reject_comb(
wave_grid, waves[:, iord, :], fluxes_scale[:, iord, :], ivars_scale[:, iord, :], masks[:, iord, :], weights[:, iord, :],
sn_clip=sn_clip, lower=lower, upper=upper, maxrej=maxrej, maxiter_reject=maxiter_reject, debug=debug,
title='order_stacks')
if show_order_stacks:
# TODO This will probably crash since sensfile is not guarnetted to have telluric.
#if sensfile is not None:
# tell_iord = get_tell_from_file(sensfile, waves_stack_orders[:, iord], masks_stack_orders[:, iord], iord=iord)
#else:
# tell_iord = None
tell_iord=None
coadd_qa(waves_stack_orders[:, iord], fluxes_stack_orders[:, iord], ivars_stack_orders[:, iord], nused_iord,
mask=masks_stack_orders[:, iord], tell=tell_iord,
title='Coadded spectrum of order {:d}/{:d}'.format(iord, norder))
# Now compute the giant stack
wave_giant_stack, flux_giant_stack, ivar_giant_stack, mask_giant_stack, outmask_giant_stack, nused_giant_stack = \
spec_reject_comb(wave_grid, waves_2d, fluxes_2d, ivars_2d, masks_2d, weights_2d, sn_clip=sn_clip,
lower=lower, upper=upper, maxrej=maxrej, maxiter_reject=maxiter_reject, debug=debug)
# Reshape everything now exposure-wise
waves_2d_exps = waves_2d.reshape((nspec * norder, nexp), order='F')
fluxes_2d_exps = fluxes_2d.reshape(np.shape(waves_2d_exps), order='F')
ivars_2d_exps = ivars_2d.reshape(np.shape(waves_2d_exps), order='F')
masks_2d_exps = masks_2d.reshape(np.shape(waves_2d_exps), order='F')
outmasks_2d_exps = outmask_giant_stack.reshape(np.shape(waves_2d_exps), order='F')
# rejection statistics, exposure by exposure
nrej = np.sum(np.invert(outmasks_2d_exps) & masks_2d_exps, axis=0) # rejected pixels
norig = np.sum((waves_2d_exps > 1.0) & np.invert(masks_2d_exps), axis=0) # originally masked pixels
if debug or show:
# Interpolate stack onto native 2d wavelength grids reshaped exposure-wise
flux_stack_2d_exps, ivar_stack_2d_exps, mask_stack_2d_exps = interp_spec(
waves_2d_exps, wave_giant_stack, flux_giant_stack, ivar_giant_stack, mask_giant_stack)
if show_exp:
# Show QA plots for each exposure
rejivars_2d_exps, sigma_corrs_2d_exps, outchi_2d_exps, maskchi_2d_exps = update_errors(
fluxes_2d_exps, ivars_2d_exps, outmasks_2d_exps, flux_stack_2d_exps, ivar_stack_2d_exps,
mask_stack_2d_exps, sn_clip=sn_clip)
# QA for individual exposures
for iexp in range(nexp):
# plot the residual distribution
msgs.info('QA plots for exposure {:} with new_sigma = {:}'.format(iexp, sigma_corrs_2d_exps[iexp]))
# plot the residual distribution for each exposure
title_renorm = 'ech_combspec: Error distribution about stack for exposure {:d}/{:d}'.format(iexp, nexp)
renormalize_errors_qa(outchi_2d_exps[:, iexp], maskchi_2d_exps[:, iexp], sigma_corrs_2d_exps[iexp],
title=title_renorm)
title_coadd_iexp = 'ech_combspec: nrej={:d} pixels rejected,'.format(nrej[iexp]) + \
' norig={:d} originally masked,'.format(norig[iexp]) + \
' for exposure {:d}/{:d}'.format(iexp, nexp)
coadd_iexp_qa(waves_2d_exps[:,iexp], fluxes_2d_exps[:,iexp], rejivars_2d_exps[:,iexp], masks_2d_exps[:,iexp],
wave_giant_stack, flux_giant_stack, ivar_giant_stack, mask_giant_stack, outmasks_2d_exps[:, iexp],
norder=norder, qafile=None, title=title_coadd_iexp)
# Global QA
rejivars_1d, sigma_corrs_1d, outchi_1d, maskchi_1d = update_errors(
fluxes_2d_exps.flatten(), ivars_2d_exps.flatten(), outmasks_2d_exps.flatten(),
flux_stack_2d_exps.flatten(), ivar_stack_2d_exps.flatten(), mask_stack_2d_exps.flatten(), sn_clip=sn_clip)
renormalize_errors_qa(outchi_1d, maskchi_1d, sigma_corrs_1d[0], qafile=qafile_chi, title='Global Chi distribution')
# show the final coadded spectrum
coadd_qa(wave_giant_stack, flux_giant_stack, ivar_giant_stack, nused_giant_stack, mask=mask_giant_stack,
title='Final stacked spectrum', qafile=qafile_stack)
# TODO: Please leave this commented code in for now.
# ## Stack with an altnernative method: combine the stacked individual order spectra directly. This is deprecated
# merge_stack = False
# if merge_stack:
# order_weights = sensfunc_weights(sensfile, waves_stack_orders, debug=debug)
# wave_merge, flux_merge, ivar_merge, mask_merge, nused_merge = compute_stack(
# wave_grid, waves_stack_orders, fluxes_stack_orders, ivars_stack_orders, masks_stack_orders, order_weights)
# if show_order_stacks:
# qafile_merge = 'spec1d_merge_{:}'.format(qafile)
# coadd_qa(wave_merge, flux_merge, ivar_merge, nused_merge, mask=mask_merge, tell = None,
# title='Straight combined spectrum of the stacked individual orders', qafile=qafile_merge)
# #if outfile is not None:
# # outfile_merge = outfile.replace('.fits', '_merge.fits')
# # save.save_coadd1d_to_fits(outfile_merge, wave_merge, flux_merge, ivar_merge, mask_merge, header=header,
# # ex_value=ex_value, overwrite=True)
# Save stacked individual order spectra
#save.save_coadd1d_to_fits(outfile_order, waves_stack_orders, fluxes_stack_orders, ivars_stack_orders, masks_stack_orders,
# header=header, ex_value = ex_value, overwrite=True)
#save.save_coadd1d_to_fits(outfile, wave_giant_stack, flux_giant_stack, ivar_giant_stack, mask_giant_stack,
# header=header, ex_value=ex_value, overwrite=True)
return wave_grid_mid, (wave_giant_stack, flux_giant_stack, ivar_giant_stack, mask_giant_stack), \
(waves_stack_orders, fluxes_stack_orders, ivars_stack_orders, masks_stack_orders,)
# ####################################################################
# ####################################################################
# Coadd2d routines follow this point
# ####################################################################
def get_wave_ind(wave_grid, wave_min, wave_max):
"""
Utility routine used by coadd2d to determine the starting and ending indices of a wavelength grid.
Parameters
----------
wave_grid: `numpy.ndarray`_
Wavelength grid.
wave_min: float
Minimum wavelength covered by the data in question.
wave_max: float
Maximum wavelength covered by the data in question.
Returns
-------
ind_lower: int
Integer lower indice corresponding to wave_min
ind_upper: int
Integer upper indice corresponding to wave_max
"""
diff = wave_grid - wave_min
diff[diff > 0] = np.inf
if not np.any(diff < 0):
ind_lower = 0
msgs.warn('Your wave grid does not extend blue enough. Taking bluest point')
else:
ind_lower = np.argmin(np.abs(diff))
diff = wave_max - wave_grid
diff[diff > 0] = np.inf
if not np.any(diff < 0):
ind_upper = wave_grid.size-1
msgs.warn('Your wave grid does not extend red enough. Taking reddest point')
else:
ind_upper = np.argmin(np.abs(diff))
return ind_lower, ind_upper
def get_wave_bins(thismask_stack, waveimg_stack, wave_grid):
"""
Utility routine to get the wavelength bins for 2d coadds from a mask
Parameters
----------
thismask_stack: list
List of boolean arrays containing the masks indicating which pixels are on
the slit in question. `True` values are on the slit;
`False` values are off the slit. Length of the list is nimgs. Shapes of the individual elements in the list
are (nspec, nspat), but each image can have a different shape.
waveimg_stack: list
List of the wavelength images, each of which is a float `numpy.ndarray`_. Length of the list is nimgs.
Shapes of the individual elements in the list are (nspec, nspat), but each image can have a different shape.
wave_grid: array shape (ngrid)
The wavelength grid created for the 2d coadd
Returns
-------
wave_bins : `numpy.ndarray`_
shape (ind_upper-ind_lower + 1, )
Wavelength bins that are relevant given the illuminated pixels (thismask_stack) and
wavelength coverage (waveimg_stack) of the image stack
"""
# Determine the wavelength grid that we will use for the current slit/order
# TODO This cut on waveimg_stack should not be necessary
wave_lower = np.inf
wave_upper = -np.inf
for thismask, waveimg in zip(thismask_stack, waveimg_stack):
wavemask = thismask & (waveimg > 1.0)
wave_lower = min(wave_lower, np.amin(waveimg[wavemask]))
wave_upper = max(wave_upper, np.amax(waveimg[wavemask]))
# wave_min = waveimg[wavemask].min()
# wave_max = waveimg[wavemask].max()
# wave_lower = wave_min if wave_min < wave_lower else wave_lower
# wave_upper = wave_max if wave_max > wave_upper else wave_upper
ind_lower, ind_upper = get_wave_ind(wave_grid, wave_lower, wave_upper)
return wave_grid[ind_lower:ind_upper + 1]
def get_spat_bins(thismask_stack, trace_stack, spat_samp_fact=1.0):
"""
Determine the spatial bins for a 2d coadd and relative pixel coordinate
images. This routine loops over all the images being coadded and creates an
image of spatial pixel positions relative to the reference trace for each
image in units of the desired rebinned spatial pixel sampling
spat_samp_fact. The minimum and maximum relative pixel positions in this
frame are then used to define a spatial position grid with whatever desired
pixel spatial sampling.
Parameters
----------
thismask_stack : list
List of boolean arrays containing the masks indicating which pixels are
on the slit in question. `True` values are on the slit; `False` values
are off the slit. Length of the list is nimgs. Shapes of the
individual elements in the list are (nspec, nspat), but each image can
have a different shape.
ref_trace_stack : list
List of reference traces about which the images are rectified and
coadded. If the images were not dithered then this reference trace can
simply be the center of the slit:
.. code-block:: python
slitcen = (slit_left + slit_righ)/2
If the images were dithered, then this object can either be the slitcen
appropriately shifted with the dither pattern, or it could be the trace
of the object of interest in each exposure determined by running PypeIt
on the individual images. The list has nimgs elements, each of which is
a 1D `numpy.ndarray`_ of shape (nspec,).
spat_samp_fact : float, optional
Spatial sampling for 2d coadd spatial bins in pixels. A value > 1.0
(i.e. bigger pixels) will downsample the images spatially, whereas < 1.0
will oversample. Default = 1.0
Returns
-------
dspat_bins : `numpy.ndarray`_
shape (spat_max_int +1 - spat_min_int,)
Array of spatial bins for rectifying the image.
dspat_stack : `numpy.ndarray`_
shape (nimgs, nspec, nspat)
Image stack which has the spatial position of each exposure relative to the trace in the trace_stack for that
image.
"""
nimgs = len(thismask_stack)
dspat_stack = []
spat_min = np.inf
spat_max = -np.inf
for thismask, trace in zip(thismask_stack, trace_stack):
nspec, nspat = thismask.shape
dspat_iexp = (np.arange(nspat)[np.newaxis, :] - trace[:, np.newaxis]) / spat_samp_fact
dspat_stack.append(dspat_iexp)
spat_min = min(spat_min, np.amin(dspat_iexp[thismask]))
spat_max = max(spat_max, np.amax(dspat_iexp[thismask]))
# spat_min_iexp = dspat_iexp[thismask].min()
# spat_max_iexp = dspat_iexp[thismask].max()
# spat_min = spat_min_iexp if spat_min_iexp < spat_min else spat_min
# spat_max = spat_max_iexp if spat_max_iexp > spat_max else spat_max
spat_min_int = np.floor(spat_min)
spat_max_int = np.ceil(spat_max)
dspat_bins = np.arange(spat_min_int, spat_max_int + 1.0, 1.0,dtype=float)
return dspat_bins, dspat_stack
# TODO JFH I would like to modify this to take a stakc or coordinate or spatial
# position image instead of a stack of reference traces
def compute_coadd2d(ref_trace_stack, sciimg_stack, sciivar_stack, skymodel_stack,
inmask_stack, thismask_stack, waveimg_stack,
wave_grid, spat_samp_fact=1.0, maskdef_dict=None,
weights='uniform', interp_dspat=True):
"""
Construct a 2d co-add of a stack of PypeIt spec2d reduction outputs.
Slits are 'rectified' onto a spatial and spectral grid, which
encompasses the spectral and spatial coverage of the image stacks.
The rectification uses nearest grid point interpolation to avoid
covariant errors. Dithering is supported as all images are centered
relative to a set of reference traces in trace_stack.
.. todo::
These docs appear out-of-date
Args:
ref_trace_stack (list):
List of reference traces about which the images are
rectified and coadded. If the images were not dithered then
this reference trace can simply be the center of the slit::
slitcen = (slit_left + slit_righ)/2
If the images were dithered, then this object can either be
the slitcen appropriately shifted with the dither pattern,
or it could be the trace of the object of interest in each
exposure determined by running PypeIt on the individual
images. Shape is (nspec, nimgs).
sciimg_stack (list):
List of science images, each of which is a float `numpy.ndarray`_. Length of the list is nimgs.
Shapes of the individual elements in the list are (nspec, nspat), but each image can have a different shape.
sciivar_stack (list):
List of inverse variance images, each of which is a float `numpy.ndarray`_. Length of the list is nimgs.
Shapes of the individual elements in the list are (nspec, nspat), but each image can have a different shape.
skymodel_stack (list):
List of the model sky images, , each of which is a float `numpy.ndarray`_. Length of the list is nimgs.
Shapes of the individual elements in the list are (nspec, nspat), but each image can have a different shape.
inmask_stack (list):
List of input good pixel masks (i.e. `True` values are *good*, `False` values are *bad*.), each of which
is a boolean `numpy.ndarray`_. Length of the list is nimgs. Shapes of the individual elements in the list
are (nspec, nspat), but each image can have a different shape.
waveimg_stack (list):
List of the wavelength images, , each of which is a float `numpy.ndarray`_. Length of the list is nimgs.
Shapes of the individual elements in the list are (nspec, nspat), but each image can have a different shape.
thismask_stack (list):
List of boolean arrays containing the masks indicating which pixels are on
the slit in question. `True` values are on the slit;
`False` values are off the slit. Length of the list is nimgs. Shapes of the individual elements in the list
are (nspec, nspat), but each image can have a different shape.
weights (list or str, optional):
The weights used when combining the rectified images (see
:func:`weighted_combine`). If weights is set to 'uniform' then a
uniform weighting is used. Weights are broadast to the correct size
of the image stacks (see :func:`broadcast_weights`), as necessary.
If a list is passed it must have a length of nimgs. The individual elements
of the list can either be floats, indiciating the weight to be used for each image, or
arrays with shape = (nspec,) or shape = (nspec, nspat), indicating pixel weights
for the individual images. Weights are broadast to the correct size
of the image stacks (see :func:`broadcast_weights`), as necessary.
(TODO: JFH I think the str option should be changed here, but am leaving it for now.)
spat_samp_fact (float, optional):
Spatial sampling for 2d coadd spatial bins in pixels. A value > 1.0
(i.e. bigger pixels) will downsample the images spatially, whereas <
1.0 will oversample. Default = 1.0
loglam_grid (`numpy.ndarray`_, optional):
Wavelength grid in log10(wave) onto which the image stacks
will be rectified. The code will automatically choose the
subset of this grid encompassing the wavelength coverage of
the image stacks provided (see :func:`waveimg_stack`).
Either `loglam_grid` or `wave_grid` must be provided.
wave_grid (`numpy.ndarray`_, optional):
Same as `loglam_grid` but in angstroms instead of
log(angstroms). (TODO: Check units...)
maskdef_dict (:obj:`dict`, optional): Dictionary containing all the maskdef info. The quantities saved
are: maskdef_id, maskdef_objpos, maskdef_slitcen, maskdef_designtab. To learn what
they are see :class:`~pypeit.slittrace.SlitTraceSet` datamodel.
interp_dspat (bool, optional):
Interpolate in the spatial coordinate image to faciliate running
through core.extract.local_skysub_extract. This can be slow. Default=True.
Returns:
dict: Returns a dict with the following keys:
- wave_bins:
- dspat_bins:
- wave_mid:
- wave_min:
- wave_max:
- dspat_mid:
- sciimg: float ndarray shape = (nspec_coadd, nspat_coadd):
Rectified and coadded science image
- sciivar: float ndarray shape = (nspec_coadd, nspat_coadd):
Rectified and coadded inverse variance image with correct
error propagation
- imgminsky: float ndarray shape = (nspec_coadd,
nspat_coadd): Rectified and coadded sky subtracted image
- outmask: bool ndarray shape = (nspec_coadd, nspat_coadd):
Output mask for rectified and coadded images. True = Good,
False=Bad.
- nused: int ndarray shape = (nspec_coadd, nspat_coadd):
Image of integers indicating the number of images from the
image stack that contributed to each pixel
- waveimg: float ndarray shape = (nspec_coadd, nspat_coadd):
The averaged wavelength image corresponding to the
rectified and coadded data.
- dspat: float ndarray shape = (nspec_coadd, nspat_coadd):
The average spatial offsets in pixels from the reference
trace trace_stack corresponding to the rectified and
coadded data.
- nspec: int
- nspat: int
- maskdef_id: int
- maskdef_slitcen: int
- maskdef_objpos: int
- maskdef_designtab: int
"""
#nimgs, nspec, nspat = sciimg_stack.shape
# TODO -- If weights is a numpy.ndarray, how can this not crash?
# Maybe the doc string above is inaccurate?
nimgs =len(sciimg_stack)
if isinstance(weights,str) and weights == 'uniform':
msgs.info('No weights were provided. Using uniform weights.')
weights = np.ones(nimgs)/float(nimgs)
shape_list = [sciimg.shape for sciimg in sciimg_stack]
weights_stack = combine.broadcast_lists_of_weights(weights, shape_list)
# Determine the wavelength grid that we will use for the current slit/order
wave_bins = get_wave_bins(thismask_stack, waveimg_stack, wave_grid)
dspat_bins, dspat_stack = get_spat_bins(thismask_stack, ref_trace_stack, spat_samp_fact=spat_samp_fact)
skysub_stack = [sciimg - skymodel for sciimg, skymodel in zip(sciimg_stack, skymodel_stack)]
#sci_list = [weights_stack, sciimg_stack, skysub_stack, tilts_stack,
# waveimg_stack, dspat_stack]
sci_list = [weights_stack, sciimg_stack, skysub_stack, waveimg_stack, dspat_stack]
var_list = [[utils.inverse(sciivar) for sciivar in sciivar_stack]]
sci_list_rebin, var_list_rebin, norm_rebin_stack, nsmp_rebin_stack \
= rebin2d(wave_bins, dspat_bins, waveimg_stack, dspat_stack, thismask_stack,
inmask_stack, sci_list, var_list)
# Now compute the final stack with sigma clipping
sigrej = 3.0
maxiters = 10
# sci_list_rebin[0] = rebinned weights image stack
# sci_list_rebin[1:] = stacks of images that we want to weighted combine
# sci_list_rebin[2] = rebinned sciimg-sky_model images that we used for the sigma clipping
# NOTE: outmask is a gpm
sci_list_out, var_list_out, outmask, nused \
= combine.weighted_combine(sci_list_rebin[0], sci_list_rebin[1:], var_list_rebin,
norm_rebin_stack != 0, sigma_clip=True,
sigma_clip_stack=sci_list_rebin[2], sigrej=sigrej,
maxiters=maxiters)
sciimg, imgminsky, waveimg, dspat = sci_list_out
sciivar = utils.inverse(var_list_out[0])
# Compute the midpoints vectors, and lower/upper bins of the rectified image in spectral and spatial directions
wave_mid = ((wave_bins + np.roll(wave_bins,1))/2.0)[1:]
wave_min = wave_bins[:-1]
wave_max = wave_bins[1:]
dspat_mid = ((dspat_bins + np.roll(dspat_bins,1))/2.0)[1:]
# Interpolate the dspat images wherever the coadds are masked
# because a given pixel was not sampled. This is done because the
# dspat image is not allowed to have holes if it is going to work
# with local_skysub_extract
nspec_coadd, nspat_coadd = imgminsky.shape
spat_img_coadd, spec_img_coadd = np.meshgrid(np.arange(nspat_coadd), np.arange(nspec_coadd))
if np.any(np.logical_not(outmask)) and interp_dspat:
points_good = np.stack((spec_img_coadd[outmask], spat_img_coadd[outmask]), axis=1)
points_bad = np.stack((spec_img_coadd[np.logical_not(outmask)],
spat_img_coadd[np.logical_not(outmask)]), axis=1)
values_dspat = dspat[outmask]
# JFH Changed to nearest on 5-26-20 because cubic is incredibly slow
dspat_bad = scipy.interpolate.griddata(points_good, values_dspat, points_bad,
method='nearest')
dspat[np.logical_not(outmask)] = dspat_bad
# Points outside the convex hull of the data are set to nan. We
# identify those and simply assume them values from the
# dspat_img_fake, which is what dspat would be on a regular
# perfectly rectified image grid.
nanpix = np.isnan(dspat)
if np.any(nanpix):
dspat_img_fake = spat_img_coadd + dspat_mid[0]
dspat[nanpix] = dspat_img_fake[nanpix]
else:
dspat_img_fake = spat_img_coadd + dspat_mid[0]
dspat[np.logical_not(outmask)] = dspat_img_fake[np.logical_not(outmask)]
# initiate maskdef parameters
# TODO I don't think this maskdef code belongs here. It should be rather be moved to the coadd2d class. This
# is a core method that coadds images without any references to mask design tables
maskdef_id = None
maskdef_designtab = None
new_maskdef_objpos = None
new_maskdef_slitcen = None
if maskdef_dict is not None and maskdef_dict['maskdef_id'] is not None:
maskdef_id = maskdef_dict['maskdef_id']
# update maskdef_objpos and maskdef_slitcen with the new value in the new slit
if maskdef_dict['maskdef_objpos'] is not None and maskdef_dict['maskdef_slitcen'] is not None:
new_maskdef_objpos = np.searchsorted(dspat[nspec_coadd//2, :], maskdef_dict['maskdef_objpos'])
# maskdef_slitcen is the old slit center
new_maskdef_slitcen = np.searchsorted(dspat[nspec_coadd//2, :], maskdef_dict['maskdef_slitcen'])
if maskdef_dict['maskdef_designtab'] is not None:
maskdef_designtab = maskdef_dict['maskdef_designtab']
# TODO The rebin_stacks are indluded now for debugging but keeping them all may blow up memory usage so consider
# removing
return dict(wave_bins=wave_bins, dspat_bins=dspat_bins, wave_mid=wave_mid, wave_min=wave_min,
wave_max=wave_max, dspat_mid=dspat_mid, sciimg=sciimg, sciivar=sciivar,
imgminsky=imgminsky, outmask=outmask, nused=nused, waveimg=waveimg, # tilts=tilts,
dspat=dspat, nspec=imgminsky.shape[0], nspat=imgminsky.shape[1],
maskdef_id=maskdef_id, maskdef_slitcen=new_maskdef_slitcen, maskdef_objpos=new_maskdef_objpos,
maskdef_designtab=maskdef_designtab)
# rebin_weights_stack=sci_list_rebin[0], rebin_sciimg_stack=sci_list_rebin[1],
# rebin_imgminsky_stack=sci_list_rebin[2], rebin_tilts_stack=sci_list_rebin[3],
# rebin_waveimg_stack=sci_list_rebin[4], rebin_dspat_stack=sci_list_rebin[5],
# rebin_var_stack=var_list_rebin[0], rebin_nsmp_stack=nsmp_rebin_stack,
def rebin2d(spec_bins, spat_bins, waveimg_stack, spatimg_stack,
thismask_stack, inmask_stack, sci_list, var_list):
"""
Rebin a set of images and propagate variance onto a new spectral and spatial grid. This routine effectively
"recitifies" images using np.histogram2d which is extremely fast and effectively performs
nearest grid point interpolation.
Parameters
----------
spec_bins: `numpy.ndarray`_
Spectral bins to rebin to.
float, shape = (nspec_rebin)
spat_bins: `numpy.ndarray`_
Spatial bins to rebin to.
float ndarray, shape = (nspat_rebin)
waveimg_stack: `numpy.ndarray`_
Stack of nimgs wavelength images with shape = (nspec, nspat) each
float , shape = (nimgs, nspec, nspat)
spatimg_stack: `numpy.ndarray`_
Stack of nimgs spatial position images with shape = (nspec, nspat) each
float, shape = (nimgs, nspec, nspat)
thismask_stack: `numpy.ndarray`_
Stack of nimgs images with shape = (nspec, nspat) indicating the locatons on the pixels on an image that
are on the slit in question.
bool, shape = (nimgs, nspec, nspat)
inmask_stack: `numpy.ndarray`_
Stack of nimgs images with shape = (nspec, nspat) indicating which pixels on an image are masked.
True = Good, False = Bad
bool ndarray, shape = (nimgs, nspec, nspat)
sci_list: list
Nested list of images, i.e. list of lists of images, where sci_list[i][j] is a shape = (nspec, nspat) where
the shape can be different for each image. The ith index is the image type, i.e. sciimg, skysub, tilts, waveimg,
the jth index is the exposure or image number, i.e. nimgs. These images are to be rebinned onto
the commong grid.
var_list: list
Nested list of variance images, i.e. list of lists of images. The format is the same as for sci_list, but
note that sci_list and var_list can have different lengths. Since this routine performs a NGP rebinning,
it effectively comptues the average of a science image landing on a pixel. This means that the science
is weighted by the 1/norm_rebin_stack, and hence variances must be weighted by that factor squared,
which his why they must be input here as a separate list.
Returns
-------
sci_list_out: list. The list of ndarray rebinned images
with new shape (nimgs, nspec_rebin, nspat_rebin)
var_list_out: list. The list of ndarray rebinned variance
images with correct error propagation with shape (nimgs,
nspec_rebin, nspat_rebin)
norm_rebin_stack: int ndarray, shape (nimgs, nspec_rebin,
nspat_rebin). An image stack indicating the integer
occupation number of a given pixel. In other words, this
number would be zero for empty bins, one for bins that
were populated by a single pixel, etc. This image takes
the input inmask_stack into account. The output mask for
each image can be formed via outmask_rebin_satck =
(norm_rebin_stack > 0)
nsmp_rebin_stack: int ndarray, shape (nimgs, nspec_rebin,
nspat_rebin). An image stack indicating the integer
occupation number of a given pixel taking only the
thismask_stack into account, but taking the inmask_stack
into account. This image is mainly constructed for
bookeeping purposes, as it represents the number of times
each pixel in the rebin image was populated taking only
the "geometry" of the rebinning into account (i.e. the
thismask_stack), but not the masking (inmask_stack).
"""
# allocate the output mages
nimgs = len(sci_list[0])
nspec_rebin = spec_bins.size - 1
nspat_rebin = spat_bins.size - 1
shape_out = (nimgs, nspec_rebin, nspat_rebin)
nsmp_rebin_stack = np.zeros(shape_out)
norm_rebin_stack = np.zeros(shape_out)
sci_list_out = []
for ii in range(len(sci_list)):
sci_list_out.append(np.zeros(shape_out))
var_list_out = []
for jj in range(len(var_list)):
var_list_out.append(np.zeros(shape_out))
for img, (waveimg, spatimg, thismask, inmask) in enumerate(zip(waveimg_stack, spatimg_stack, thismask_stack, inmask_stack)):
spec_rebin_this = waveimg[thismask]
spat_rebin_this = spatimg[thismask]
# This fist image is purely for bookeeping purposes to determine the number of times each pixel
# could have been sampled
nsmp_rebin_stack[img, :, :], spec_edges, spat_edges = np.histogram2d(spec_rebin_this, spat_rebin_this,
bins=[spec_bins, spat_bins], density=False)
finmask = thismask & inmask
spec_rebin = waveimg[finmask]
spat_rebin = spatimg[finmask]
norm_img, spec_edges, spat_edges = np.histogram2d(spec_rebin, spat_rebin,
bins=[spec_bins, spat_bins], density=False)
norm_rebin_stack[img, :, :] = norm_img
# Rebin the science images
for indx, sci in enumerate(sci_list):
weigh_sci, spec_edges, spat_edges = np.histogram2d(spec_rebin, spat_rebin,
bins=[spec_bins, spat_bins], density=False,
weights=sci[img][finmask])
sci_list_out[indx][img, :, :] = (norm_img > 0.0) * weigh_sci/(norm_img + (norm_img == 0.0))
# Rebin the variance images, note the norm_img**2 factor for correct error propagation
for indx, var in enumerate(var_list):
weigh_var, spec_edges, spat_edges = np.histogram2d(spec_rebin, spat_rebin,
bins=[spec_bins, spat_bins], density=False,
weights=var[img][finmask])
var_list_out[indx][img, :, :] = (norm_img > 0.0)*weigh_var/(norm_img + (norm_img == 0.0))**2
return sci_list_out, var_list_out, norm_rebin_stack.astype(int), nsmp_rebin_stack.astype(int)
|
979086e14c7103bcf1f69c16ba439d55d235f28c
|
b40d1a26ea04a19ec0da7bf55db84b7ee36cc898
|
/leetcode.com/python/322_Coin_Change.py
|
7a91b618af090bed2cccd08eb2d18a1df784d94b
|
[
"MIT"
] |
permissive
|
partho-maple/coding-interview-gym
|
5e8af7d404c28d4b9b52e5cffc540fd51d8025cf
|
20ae1a048eddbc9a32c819cf61258e2b57572f05
|
refs/heads/master
| 2022-09-11T16:36:01.702626
| 2022-03-14T08:39:47
| 2022-03-14T08:39:47
| 69,802,909
| 862
| 438
|
MIT
| 2022-08-18T06:42:46
| 2016-10-02T14:51:31
|
Python
|
UTF-8
|
Python
| false
| false
| 3,568
|
py
|
322_Coin_Change.py
|
import sys
# Top-Down approach. But solution has Excedd the Time Limit
class Solution(object):
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
if amount < 1:
return 0
numOfCoins = [float('inf') for n in range(amount + 1)]
numOfCoins[0] = 0
return self.coinChangeHelper(coins, amount, numOfCoins)
def coinChangeHelper(self, coins, remainder, numOfCoins):
# minimum coins to make change for negative amount is -1.
# this is just a base case we arbitrary define
if remainder < 0:
return -1
# the minimum number of coins to make change for 0 is always 0
if remainder == 0:
return 0
# If we already have the answer cached, just return it
if numOfCoins[remainder] != float('inf'):
return numOfCoins[remainder]
# No answer yet. Try each coin as the last coin in the change that we make for the amount
for coin in coins:
changeResultForRestAmount = self.coinChangeHelper(coins, remainder - coin, numOfCoins)
if changeResultForRestAmount >= 0:
numOfCoins[remainder] = min(numOfCoins[remainder], changeResultForRestAmount + 1)
return numOfCoins[remainder] if numOfCoins[remainder] != float('inf') else -1
# Top-Down approach (Recursive). But solution has been accepted
class Solution(object):
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
if amount < 1:
return 0
numOfCoins = [float('inf') for n in range(amount + 1)]
numOfCoins[0] = 0
return self.coinChangeHelper(coins, amount, numOfCoins)
def coinChangeHelper(self, coins, remainder, numOfCoins):
# minimum coins to make change for negative amount is -1.
# this is just a base case we arbitrary define
if remainder < 0:
return -1
# the minimum number of coins to make change for 0 is always 0
if remainder == 0:
return 0
# If we already have the answer cached, just return it
if numOfCoins[remainder] != float('inf'):
return numOfCoins[remainder]
# No answer yet. Try each coin as the last coin in the change that we make for the amount
minimum = sys.maxint
for coin in coins:
changeResultForRestAmount = self.coinChangeHelper(coins, remainder - coin, numOfCoins)
if changeResultForRestAmount >= 0 and changeResultForRestAmount < minimum:
minimum = changeResultForRestAmount + 1
numOfCoins[remainder] = -1 if minimum == sys.maxint else minimum
return numOfCoins[remainder]
# Bottom-Up approach (Iterative). But solution has been accepted
class Solution(object):
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
if amount < 1:
return 0
numOfCoins = [float('inf') for n in range(amount + 1)]
numOfCoins[0] = 0
for coin in coins:
for n in range(amount + 1):
if coin <= n:
numOfCoins[n] = min(numOfCoins[n], numOfCoins[n - coin] + 1)
return numOfCoins[amount] if numOfCoins[amount] != float('inf') else -1
sol = Solution()
minCoin = sol.coinChange([2], 3)
print("Minimum number of coin: ", minCoin)
|
0a4fc5b1510f25614a3d6b4ab12c8b01588c63b2
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/refactoring/extractmethod/ConditionOfConditionalExpression.before.py
|
5df14f415ca9c5d4e57bad98ca3af8d78d809e47
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 65
|
py
|
ConditionOfConditionalExpression.before.py
|
def f(n):
return n * 2 if <selection>n</selection> else n + 1
|
bb1c0eab206bd2b74ca57865ccfa628088e46ed8
|
5130754859e274cd06f63260439e5203c2000a11
|
/core/domain/platform_parameter_domain_test.py
|
799fae7124288cde365047d2f87f3877126024da
|
[
"Apache-2.0"
] |
permissive
|
oppia/oppia
|
8ebc9c7c7f2b336e9a79ce04533abe3956f48cbe
|
d16fdf23d790eafd63812bd7239532256e30a21d
|
refs/heads/develop
| 2023-09-04T07:50:13.661276
| 2023-09-03T09:21:32
| 2023-09-03T09:21:32
| 40,687,563
| 6,172
| 4,666
|
Apache-2.0
| 2023-09-14T18:25:11
| 2015-08-14T00:16:14
|
Python
|
UTF-8
|
Python
| false
| false
| 74,262
|
py
|
platform_parameter_domain_test.py
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the domain objects relating to platform parameters."""
from __future__ import annotations
import collections
from core import feconf
from core import utils
from core.domain import platform_parameter_domain as parameter_domain
from core.tests import test_utils
from typing import Dict, Final, List, Optional, Union
ServerMode = parameter_domain.ServerMode
class PlatformParameterChangeTests(test_utils.GenericTestBase):
"""Test for the PlatformParameterChange class."""
CMD_EDIT_RULES: Final = (
parameter_domain.PlatformParameterChange.CMD_EDIT_RULES
)
def test_param_change_object_with_missing_cmd_raises_exception(
self
) -> None:
with self.assertRaisesRegex(
utils.ValidationError, 'Missing cmd key in change dict'):
parameter_domain.PlatformParameterChange({'invalid': 'data'})
def test_param_change_object_with_invalid_cmd_raises_exception(
self
) -> None:
with self.assertRaisesRegex(
utils.ValidationError, 'Command invalid is not allowed'):
parameter_domain.PlatformParameterChange({'cmd': 'invalid'})
def test_param_change_object_missing_attribute_in_cmd_raises_exception(
self
) -> None:
with self.assertRaisesRegex(
utils.ValidationError,
'The following required attributes are missing: new_rules'):
parameter_domain.PlatformParameterChange({
'cmd': self.CMD_EDIT_RULES
})
def test_param_change_object_with_extra_attribute_in_cmd_raises_exception(
self
) -> None:
param_change_dict: Dict[str, Union[str, List[str]]] = {
'cmd': self.CMD_EDIT_RULES,
'new_rules': [],
'invalid': 'invalid'
}
with self.assertRaisesRegex(
utils.ValidationError,
'The following extra attributes are present: invalid'):
parameter_domain.PlatformParameterChange(param_change_dict)
def test_param_change_object_with_valid_data_success(self) -> None:
param_change_dict: Dict[str, Union[str, List[str]]] = {
'cmd': self.CMD_EDIT_RULES,
'new_rules': []
}
param_change_object = (
parameter_domain.PlatformParameterChange(param_change_dict)
)
self.assertEqual(
param_change_object.cmd, self.CMD_EDIT_RULES)
self.assertEqual(
param_change_object.new_rules, [])
def test_to_dict_returns_correct_dict(self) -> None:
param_change_dict: Dict[str, Union[str, List[str]]] = {
'cmd': self.CMD_EDIT_RULES,
'new_rules': []
}
param_change_object = parameter_domain.PlatformParameterChange(
param_change_dict)
self.assertEqual(
param_change_object.to_dict(),
param_change_dict)
class EvaluationContextTests(test_utils.GenericTestBase):
"""Test for the EvaluationContext."""
def test_create_context_from_dict_returns_correct_instance(self) -> None:
context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'Android',
'app_version': '1.0.0',
},
{
'server_mode': ServerMode.DEV,
},
)
self.assertEqual(context.platform_type, 'Android')
self.assertEqual(context.app_version, '1.0.0')
self.assertEqual(context.server_mode, ServerMode.DEV)
def test_is_valid_with_invalid_platform_type_returns_false(self) -> None:
context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'invalid',
'app_version': '1.0.0',
},
{
'server_mode': ServerMode.DEV,
},
)
self.assertFalse(context.is_valid)
def test_is_valid_with_valid_android_context_returns_true(self) -> None:
context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'Android',
'app_version': '1.0.0',
},
{
'server_mode': ServerMode.DEV,
},
)
self.assertTrue(context.is_valid)
def test_is_valid_with_valid_web_context_returns_true(self) -> None:
context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'Web',
'app_version': None,
},
{
'server_mode': ServerMode.DEV,
},
)
self.assertTrue(context.is_valid)
def test_is_valid_with_valid_backend_context_returns_true(self) -> None:
context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'Backend',
'app_version': '3.0.0',
},
{
'server_mode': ServerMode.DEV,
},
)
self.assertTrue(context.is_valid)
def test_validate_with_valid_context_passes_without_exception(self) -> None:
context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'Android',
'app_version': '1.0.0',
},
{
'server_mode': ServerMode.DEV,
},
)
context.validate()
def test_validate_with_invalid_platform_type_does_not_raise_exception(
self
) -> None:
context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'invalid',
'app_version': '1.0.0',
},
{
'server_mode': ServerMode.DEV,
},
)
# No exception should be raised since invalid platform types are
# ignored.
context.validate()
def test_validate_with_invalid_app_version_raises_exception(self) -> None:
context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'Android',
'app_version': 'a.a.a',
},
{
'server_mode': ServerMode.DEV,
},
)
with self.assertRaisesRegex(
utils.ValidationError, 'Invalid version \'a.a.a\''):
context.validate()
def test_validate_with_invalid_app_sub_version_numbers_raises_exception(
self
) -> None:
context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'Android',
'app_version': '1.0.0.0',
},
{
'server_mode': ServerMode.DEV,
},
)
with self.assertRaisesRegex(
utils.ValidationError, 'Invalid version \'1.0.0.0\''):
context.validate()
def test_validate_with_invalid_app_version_flavor_raises_exception(
self
) -> None:
context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'Android',
'app_version': '1.0.0-abcedef-invalid',
},
{
'server_mode': ServerMode.DEV,
},
)
with self.assertRaisesRegex(
utils.ValidationError, 'Invalid version flavor \'invalid\''):
context.validate()
def test_validate_with_invalid_server_mode_raises_exception(self) -> None:
MockEnum = collections.namedtuple('MockEnum', ['value'])
mock_enum = MockEnum('invalid')
context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'Android',
'app_version': '1.0.0',
},
{
# Here we use MyPy ignore because the expected type of
# 'server_mode' key is Enum defined under the name ServerMode,
# but for testing purpose we are providing namedtuple (MockEnum)
# which causes MyPy to throw error. Thus to avoid the error,
# we used ignore here.
'server_mode': mock_enum, # type: ignore[typeddict-item]
},
)
with self.assertRaisesRegex(
utils.ValidationError, 'Invalid server mode \'invalid\''
):
context.validate()
class PlatformParameterFilterTests(test_utils.GenericTestBase):
"""Test for the PlatformParameterFilter."""
def _create_example_context(
self,
platform_type: str = 'Android',
app_version: Optional[str] = '1.2.3',
mode: str = 'DEV'
) -> parameter_domain.EvaluationContext:
"""Creates and returns an EvaluationContext using the given
arguments.
"""
return parameter_domain.EvaluationContext.from_dict(
{
'platform_type': platform_type,
'app_version': app_version,
},
{
'server_mode': getattr(ServerMode, mode),
},
)
def _test_flavor_relation_holds(
self,
version: str,
op: str,
flavor_b: str
) -> None:
"""Helper method to test relation 'flavor_a <op> flavor_b' hold,
where flavor_a is the flavor of the argument 'version'.
"""
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(
{'type': 'app_version_flavor', 'conditions': [[op, flavor_b]]}
)
)
self.assertTrue(filter_domain.evaluate(
self._create_example_context(
app_version=version)))
def _test_flavor_relation_does_not_hold(
self,
version: str,
op: str,
flavor_b: str
) -> None:
"""Helper method to test relation 'flavor_a <op> flavor_b' doesn't
holds, where flavor_a is the flavor of the argument 'version'.
"""
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(
{'type': 'app_version_flavor', 'conditions': [[op, flavor_b]]}
)
)
self.assertFalse(filter_domain.evaluate(
self._create_example_context(
app_version=version)))
def test_create_from_dict_returns_correct_instance(self) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['=', '1.2.3']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
self.assertEqual(filter_domain.type, 'app_version')
self.assertEqual(filter_domain.conditions, [['=', '1.2.3']])
def test_to_dict_returns_correct_dict(self) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['=', '1.2.3']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
self.assertEqual(filter_domain.to_dict(), filter_dict)
def test_evaluate_dev_server_mode_filter_with_dev_env_returns_true(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'server_mode',
'conditions': [['=', 'dev']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
dev_context = self._create_example_context(mode='DEV')
self.assertTrue(filter_domain.evaluate(dev_context))
def test_evaluate_dev_server_mode_filter_with_prod_env_returns_false(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'server_mode',
'conditions': [['=', 'dev']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
prod_context = self._create_example_context(mode='PROD')
self.assertFalse(filter_domain.evaluate(prod_context))
def test_eval_backend_client_filter_with_backend_client_returns_true(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'platform_type',
'conditions': [['=', 'Backend']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
web_context = self._create_example_context(platform_type='Backend')
self.assertTrue(filter_domain.evaluate(web_context))
def test_evaluate_web_client_filter_with_web_client_returns_true(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'platform_type',
'conditions': [['=', 'Web']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
web_context = self._create_example_context(platform_type='Web')
self.assertTrue(filter_domain.evaluate(web_context))
def test_evaluate_web_client_filter_with_native_client_returns_false(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'platform_type',
'conditions': [['=', 'Web']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
native_context = self._create_example_context(platform_type='Android')
self.assertFalse(filter_domain.evaluate(native_context))
def test_evaluate_eq_version_filter_with_same_version_returns_true(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['=', '1.2.3']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
self.assertTrue(filter_domain.evaluate(
self._create_example_context(app_version='1.2.3')))
def test_evaluate_eq_version_filter_with_diff_version_returns_false(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['=', '1.2.3']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version='1.2.4')))
def test_evaluate_gt_version_filter_with_small_version_returns_false(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['>', '1.2.3']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version='0.2.3')))
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version='1.1.2')))
def test_evaluate_gt_version_filter_with_same_version_returns_false(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['>', '1.2.3']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version='1.2.3')))
def test_evaluate_gt_version_filter_with_large_version_returns_true(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['>', '1.2.3']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
self.assertTrue(filter_domain.evaluate(
self._create_example_context(app_version='1.2.4')))
self.assertTrue(filter_domain.evaluate(
self._create_example_context(app_version='1.3.0')))
self.assertTrue(filter_domain.evaluate(
self._create_example_context(app_version='2.0.0')))
def test_evaluate_gte_version_filter_with_small_version_returns_false(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['>=', '1.2.3']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version='0.2.3')))
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version='1.1.2')))
def test_evaluate_gte_version_filter_with_same_version_returns_true(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['>=', '1.2.3']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
self.assertTrue(filter_domain.evaluate(
self._create_example_context(app_version='1.2.3')))
def test_evaluate_gte_version_filter_with_large_version_returns_true(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['>=', '1.2.3']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
self.assertTrue(filter_domain.evaluate(
self._create_example_context(app_version='1.2.4')))
self.assertTrue(filter_domain.evaluate(
self._create_example_context(app_version='1.3.0')))
self.assertTrue(filter_domain.evaluate(
self._create_example_context(app_version='2.0.0')))
def test_evaluate_lt_version_filter_with_small_version_returns_true(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['<', '1.2.3']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
self.assertTrue(filter_domain.evaluate(
self._create_example_context(app_version='0.3.4')))
self.assertTrue(filter_domain.evaluate(
self._create_example_context(app_version='1.1.0')))
self.assertTrue(filter_domain.evaluate(
self._create_example_context(app_version='1.1.2')))
self.assertTrue(filter_domain.evaluate(
self._create_example_context(app_version='1.2.2')))
def test_evaluate_lt_version_filter_with_same_version_returns_false(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['<', '1.2.3']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version='1.2.3')))
def test_evaluate_lt_version_filter_with_large_version_returns_false(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['<', '1.2.3']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version='1.2.4')))
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version='1.3.0')))
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version='1.10.0')))
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version='2.0.0')))
def test_evaluate_lte_version_filter_with_small_version_returns_true(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['<=', '1.2.3']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
self.assertTrue(filter_domain.evaluate(
self._create_example_context(app_version='0.3.4')))
self.assertTrue(filter_domain.evaluate(
self._create_example_context(app_version='1.1.0')))
self.assertTrue(filter_domain.evaluate(
self._create_example_context(app_version='1.2.2')))
def test_evaluate_lte_version_filter_with_same_version_returns_true(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['<=', '1.2.3']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
self.assertTrue(filter_domain.evaluate(
self._create_example_context(app_version='1.2.3')))
def test_evaluate_lte_version_filter_with_large_version_returns_false(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['<=', '1.2.3']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version='1.2.4')))
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version='1.3.0')))
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version='1.10.0')))
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version='2.0.0')))
def test_evaluate_test_version_with_eq_test_cond_returns_true(self) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-test', '=', 'test')
def test_evaluate_test_version_with_eq_alpha_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-test', '=', 'alpha')
def test_evaluate_test_version_with_eq_beta_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-test', '=', 'beta')
def test_evaluate_test_version_with_eq_release_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-test', '=', 'release')
def test_evaluate_test_version_with_lt_test_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-test', '<', 'test')
def test_evaluate_test_version_with_lt_alpha_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-test', '<', 'alpha')
def test_evaluate_test_version_with_lt_beta_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-test', '<', 'beta')
def test_evaluate_test_version_with_lt_release_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-test', '<', 'release')
def test_evaluate_test_version_with_lte_test_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-test', '<=', 'test')
def test_evaluate_test_version_with_lte_alpha_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-test', '<=', 'alpha')
def test_evaluate_test_version_with_lte_beta_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-test', '<=', 'beta')
def test_evaluate_test_version_with_lte_release_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-test', '<=', 'release')
def test_evaluate_test_version_with_gt_test_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-test', '>', 'test')
def test_evaluate_test_version_with_gt_alpha_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-test', '>', 'alpha')
def test_evaluate_test_version_with_gt_beta_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-test', '>', 'beta')
def test_evaluate_test_version_with_gt_release_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-test', '>', 'release')
def test_evaluate_test_version_with_gte_test_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-test', '>=', 'test')
def test_evaluate_test_version_with_gte_alpha_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-test', '>=', 'alpha')
def test_evaluate_test_version_with_gte_beta_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-test', '>=', 'beta')
def test_evaluate_test_version_with_gte_release_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-test', '>=', 'release')
def test_evaluate_alpha_version_with_eq_test_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-alpha', '=', 'test')
def test_evaluate_alpha_version_with_eq_alpha_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-alpha', '=', 'alpha')
def test_evaluate_alpha_version_with_eq_beta_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-alpha', '=', 'beta')
def test_evaluate_alpha_version_with_eq_release_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-alpha', '=', 'release')
def test_evaluate_alpha_version_with_lt_test_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-alpha', '<', 'test')
def test_evaluate_alpha_version_with_lt_alpha_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-alpha', '<', 'alpha')
def test_evaluate_alpha_version_with_lt_beta_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-alpha', '<', 'beta')
def test_evaluate_alpha_version_with_lt_release_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-alpha', '<', 'release')
def test_evaluate_alpha_version_with_lte_test_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-alpha', '<=', 'test')
def test_evaluate_alpha_version_with_lte_alpha_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-alpha', '<=', 'alpha')
def test_evaluate_alpha_version_with_lte_beta_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-alpha', '<=', 'beta')
def test_evaluate_alpha_version_with_lte_release_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-alpha', '<=', 'release')
def test_evaluate_alpha_version_with_gt_test_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-alpha', '>', 'test')
def test_evaluate_alpha_version_with_gt_alpha_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-alpha', '>', 'alpha')
def test_evaluate_alpha_version_with_gt_beta_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-alpha', '>', 'beta')
def test_evaluate_alpha_version_with_gt_release_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-alpha', '>', 'release')
def test_evaluate_alpha_version_with_gte_test_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-alpha', '>=', 'test')
def test_evaluate_alpha_version_with_gte_alpha_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-alpha', '>=', 'alpha')
def test_evaluate_alpha_version_with_gte_beta_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-alpha', '>=', 'beta')
def test_evaluate_alpha_version_with_gte_release_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-alpha', '>=', 'release')
def test_evaluate_beta_version_with_eq_test_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-beta', '=', 'test')
def test_evaluate_beta_version_with_eq_alpha_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-beta', '=', 'alpha')
def test_evaluate_beta_version_with_eq_beta_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-beta', '=', 'beta')
def test_evaluate_beta_version_with_eq_release_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-beta', '=', 'release')
def test_evaluate_beta_version_with_lt_test_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-beta', '<', 'test')
def test_evaluate_beta_version_with_lt_alpha_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-beta', '<', 'alpha')
def test_evaluate_beta_version_with_lt_beta_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-beta', '<', 'beta')
def test_evaluate_beta_version_with_lt_release_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-beta', '<', 'release')
def test_evaluate_beta_version_with_lte_test_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-beta', '<=', 'test')
def test_evaluate_beta_version_with_lte_alpha_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-beta', '<=', 'alpha')
def test_evaluate_beta_version_with_lte_beta_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-beta', '<=', 'beta')
def test_evaluate_beta_version_with_lte_release_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-beta', '<=', 'release')
def test_evaluate_beta_version_with_gt_test_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-beta', '>', 'test')
def test_evaluate_beta_version_with_gt_alpha_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-beta', '>', 'alpha')
def test_evaluate_beta_version_with_gt_beta_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-beta', '>', 'beta')
def test_evaluate_beta_version_with_gt_release_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-beta', '>', 'release')
def test_evaluate_beta_version_with_gte_test_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-beta', '>=', 'test')
def test_evaluate_beta_version_with_gte_alpha_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-beta', '>=', 'alpha')
def test_evaluate_beta_version_with_gte_beta_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-beta', '>=', 'beta')
def test_evaluate_beta_version_with_gte_release_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-beta', '>=', 'release')
def test_evaluate_release_version_with_eq_test_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-release', '=', 'test')
def test_evaluate_release_version_with_eq_alpha_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-release', '=', 'alpha')
def test_evaluate_release_version_with_eq_beta_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-release', '=', 'beta')
def test_evaluate_release_version_with_eq_release_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-release', '=', 'release')
def test_evaluate_release_version_with_lt_test_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-release', '<', 'test')
def test_evaluate_release_version_with_lt_alpha_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-release', '<', 'alpha')
def test_evaluate_release_version_with_lt_beta_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-release', '<', 'beta')
def test_evaluate_release_version_with_lt_release_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-release', '<', 'release')
def test_evaluate_release_version_with_lte_test_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-release', '<=', 'test')
def test_evaluate_release_version_with_lte_alpha_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-release', '<=', 'alpha')
def test_evaluate_release_version_with_lte_beta_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-release', '<=', 'beta')
def test_evaluate_release_version_with_lte_release_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-release', '<=', 'release')
def test_evaluate_release_version_with_gt_test_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-release', '>', 'test')
def test_evaluate_release_version_with_gt_alpha_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-release', '>', 'alpha')
def test_evaluate_release_version_with_gt_beta_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-release', '>', 'beta')
def test_evaluate_release_version_with_gt_release_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef-release', '>', 'release')
def test_evaluate_release_version_with_gte_test_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-release', '>=', 'test')
def test_evaluate_release_version_with_gte_alpha_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-release', '>=', 'alpha')
def test_evaluate_release_version_with_gte_beta_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-release', '>=', 'beta')
def test_evaluate_release_version_with_gte_release_cond_returns_true(
self
) -> None:
self._test_flavor_relation_holds(
'1.0.0-abcdef-release', '>=', 'release')
def test_evaluate_unspecified_version_with_eq_test_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '=', 'test')
def test_evaluate_unspecified_version_with_eq_alpha_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '=', 'alpha')
def test_evaluate_unspecified_version_with_eq_beta_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '=', 'beta')
def test_evaluate_unspecified_version_with_eq_release_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '=', 'release')
def test_evaluate_unspecified_version_with_lt_test_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '<', 'test')
def test_evaluate_unspecified_version_with_lt_alpha_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '<', 'alpha')
def test_evaluate_unspecified_version_with_lt_beta_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '<', 'beta')
def test_evaluate_unspecified_version_with_lt_release_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '<', 'release')
def test_evaluate_unspecified_version_with_lte_test_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '<=', 'test')
def test_evaluate_unspecified_version_with_lte_alpha_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '<=', 'alpha')
def test_evaluate_unspecified_version_with_lte_beta_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '<=', 'beta')
def test_evaluate_unspecified_version_with_lte_release_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '<=', 'release')
def test_evaluate_unspecified_version_with_gt_test_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '>', 'test')
def test_evaluate_unspecified_version_with_gt_alpha_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '>', 'alpha')
def test_evaluate_unspecified_version_with_gt_beta_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '>', 'beta')
def test_evaluate_unspecified_version_with_gt_release_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '>', 'release')
def test_evaluate_unspecified_version_with_gte_test_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '>=', 'test')
def test_evaluate_unspecified_version_with_gte_alpha_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '>=', 'alpha')
def test_evaluate_unspecified_version_with_gte_beta_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '>=', 'beta')
def test_evaluate_unspecified_version_with_gte_release_cond_returns_false(
self
) -> None:
self._test_flavor_relation_does_not_hold(
'1.0.0-abcdef', '>=', 'release')
def test_evaluate_multi_value_filter_with_one_matched_returns_true(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'server_mode',
'conditions': [['=', 'dev'], ['=', 'prod']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
dev_context = self._create_example_context(mode='DEV')
self.assertTrue(filter_domain.evaluate(dev_context))
def test_evaluate_multi_value_filter_with_none_matched_returns_true(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'server_mode',
'conditions': [['=', 'dev'], ['=', 'prod']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
test_context = self._create_example_context(mode='TEST')
self.assertFalse(filter_domain.evaluate(test_context))
def test_evaluate_app_version_filter_without_version_returns_false(
self
) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['=', '1.2.3'], ['=', '1.2.4']]
}
filter_domain = parameter_domain.PlatformParameterFilter.from_dict(
filter_dict)
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version=None)))
def test_evaluate_filter_with_unsupported_operation_raises_exception(
self
) -> None:
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(
{'type': 'server_mode', 'conditions': [['!=', 'dev']]}
))
with self.assertRaisesRegex(
Exception, 'Unsupported comparison operator \'!=\''):
filter_domain.evaluate(self._create_example_context())
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'app_version',
'conditions': [['>>', '1.2.3']]
}
filter_domain = parameter_domain.PlatformParameterFilter.from_dict(
filter_dict)
with self.assertRaisesRegex(
Exception, 'Unsupported comparison operator \'>>\''):
self.assertFalse(filter_domain.evaluate(
self._create_example_context(app_version='1.0.0-abcdef-test')))
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(
{'type': 'app_version_flavor', 'conditions': [['==', 'beta']]}
))
with self.assertRaisesRegex(
Exception, 'Unsupported comparison operator \'==\''):
filter_domain.evaluate(
self._create_example_context(app_version='1.0.0-abcdef-test')
)
def test_validate_filter_passes_without_exception(self) -> None:
filter_dict: parameter_domain.PlatformParameterFilterDict = {
'type': 'server_mode',
'conditions': [['=', 'dev'], ['=', 'prod']]
}
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(filter_dict))
filter_domain.validate()
def test_validate_filter_with_invalid_type_raises_exception(self) -> None:
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(
{'type': 'invalid', 'conditions': [['=', 'value1']]}
))
with self.assertRaisesRegex(
utils.ValidationError, 'Unsupported filter type \'invalid\''):
filter_domain.validate()
def test_validate_filter_with_unsupported_operation_raises_exception(
self
) -> None:
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(
{'type': 'server_mode', 'conditions': [['!=', 'dev']]}
))
with self.assertRaisesRegex(
utils.ValidationError, 'Unsupported comparison operator \'!=\''):
filter_domain.validate()
def test_validate_filter_with_invalid_server_mode_raises_exception(
self
) -> None:
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(
{'type': 'server_mode', 'conditions': [['=', 'invalid']]}
))
with self.assertRaisesRegex(
utils.ValidationError, 'Invalid server mode \'invalid\''):
filter_domain.validate()
def test_validate_filter_with_invalid_platform_type_raises_exception(
self
) -> None:
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(
{'type': 'platform_type', 'conditions': [['=', 'invalid']]}
))
with self.assertRaisesRegex(
utils.ValidationError, 'Invalid platform type \'invalid\''):
filter_domain.validate()
def test_validate_filter_with_invalid_version_expr_raises_exception(
self
) -> None:
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(
{'type': 'app_version', 'conditions': [['=', '1.a.2']]}
))
with self.assertRaisesRegex(
utils.ValidationError, 'Invalid version expression \'1.a.2\''):
filter_domain.validate()
def test_validate_filter_with_invalid_version_flavor_raises_exception(
self
) -> None:
filter_domain = (
parameter_domain
.PlatformParameterFilter.from_dict(
{'type': 'app_version_flavor', 'conditions': [['=', 'invalid']]}
))
with self.assertRaisesRegex(
utils.ValidationError, 'Invalid app version flavor \'invalid\''):
filter_domain.validate()
class PlatformParameterRuleTests(test_utils.GenericTestBase):
"""Test for the PlatformParameterRule."""
def test_create_from_dict_returns_correct_instance(self) -> None:
filters: List[parameter_domain.PlatformParameterFilterDict] = [
{
'type': 'app_version',
'conditions': [['=', '1.2.3']]
},
{
'type': 'server_mode',
'conditions': [['=', 'dev'], ['=', 'test']]
}
]
rule = parameter_domain.PlatformParameterRule.from_dict(
{
'filters': filters,
'value_when_matched': False,
},
)
self.assertIsInstance(rule, parameter_domain.PlatformParameterRule)
filter_domain = rule.filters[0]
self.assertIsInstance(
filter_domain, parameter_domain.PlatformParameterFilter)
self.assertEqual(len(rule.filters), 2)
self.assertEqual(filter_domain.type, 'app_version')
self.assertEqual(filter_domain.conditions, [['=', '1.2.3']])
self.assertEqual(rule.value_when_matched, False)
def test_to_dict_returns_correct_dict(self) -> None:
rule_dict: parameter_domain.PlatformParameterRuleDict = {
'filters': [
{
'type': 'app_version',
'conditions': [['=', '1.2.3']]
}
],
'value_when_matched': False,
}
rule = parameter_domain.PlatformParameterRule.from_dict(rule_dict)
self.assertEqual(rule.to_dict(), rule_dict)
def test_evaluation_with_matching_context_returns_true(self) -> None:
rule = parameter_domain.PlatformParameterRule.from_dict(
{
'filters': [
{'type': 'app_version', 'conditions': [['=', '1.2.3']]},
{'type': 'platform_type', 'conditions': [['=', 'Android']]},
],
'value_when_matched': 'matched_val',
},
)
context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'Android',
'app_version': '1.2.3',
},
{
'server_mode': ServerMode.DEV,
},
)
self.assertTrue(rule.evaluate(context))
def test_evaluation_with_unmatching_context_returns_false(self) -> None:
rule = parameter_domain.PlatformParameterRule.from_dict(
{
'filters': [
{'type': 'app_version', 'conditions': [['=', '1.2.3']]},
{'type': 'platform_type', 'conditions': [['=', 'Web']]},
],
'value_when_matched': 'matched_val',
},
)
context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'Android',
'app_version': '1.2.3',
},
{
'server_mode': ServerMode.DEV,
},
)
self.assertFalse(rule.evaluate(context))
def test_validate_with_invalid_filter_raises_exception(self) -> None:
filters: List[parameter_domain.PlatformParameterFilterDict] = [
{'type': 'app_version', 'conditions': [['=', '1.2.3']]},
{'type': 'invalid', 'conditions': [['=', '1.2.3']]},
]
rule = parameter_domain.PlatformParameterRule.from_dict(
{
'filters': filters,
'value_when_matched': False,
}
)
with self.assertRaisesRegex(
utils.ValidationError, 'Unsupported filter type \'invalid\''):
rule.validate()
class PlatformParameterTests(test_utils.GenericTestBase):
"""Test for the PlatformParameter."""
def test_create_from_dict_returns_correct_instance(self) -> None:
param = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': 'for test',
'data_type': 'string',
'rules': [
{
'filters': [
{
'type': 'server_mode',
'conditions': [['=', 'dev']]
}
],
'value_when_matched': '222'
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': '333',
'is_feature': False,
'feature_stage': None,
})
self.assertIsInstance(param, parameter_domain.PlatformParameter)
self.assertEqual(param.name, 'parameter_a')
self.assertEqual(param.description, 'for test')
self.assertEqual(param.data_type, 'string')
self.assertEqual(len(param.rules), 1)
self.assertEqual(param.is_feature, False)
self.assertIsNone(param.feature_stage)
self.assertEqual(param.default_value, '333')
self.assertEqual(
param.rule_schema_version,
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION)
def test_validate_with_invalid_name_raises_exception(self) -> None:
param = parameter_domain.PlatformParameter.from_dict({
'name': 'Invalid~Name',
'description': 'for test',
'data_type': 'string',
'rules': [
{
'filters': [
{
'type': 'server_mode',
'conditions': [['=', 'dev']]
}
],
'value_when_matched': '222'
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': '333',
'is_feature': False,
'feature_stage': None,
})
with self.assertRaisesRegex(
utils.ValidationError,
'Invalid parameter name \'%s\'' % param.name):
param.validate()
param1 = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter.name',
'description': 'for test',
'data_type': 'string',
'rules': [
{
'filters': [
{
'type': 'server_mode',
'conditions': [['=', 'dev']]
}
],
'value_when_matched': '222'
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': '333',
'is_feature': False,
'feature_stage': None,
})
with self.assertRaisesRegex(
utils.ValidationError,
'Invalid parameter name \'%s\'' % param1.name):
param1.validate()
def test_validate_with_long_name_raises_exception(self) -> None:
long_name = 'Long_' * 50 + 'Name'
param = parameter_domain.PlatformParameter.from_dict({
'name': long_name,
'description': 'for test',
'data_type': 'string',
'rules': [
{
'filters': [
{
'type': 'server_mode',
'conditions': [['=', 'dev']]
}
],
'value_when_matched': '222'
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': '333',
'is_feature': False,
'feature_stage': None,
})
with self.assertRaisesRegex(
utils.ValidationError,
'Invalid parameter name \'%s\'' % long_name):
param.validate()
def test_validate_with_unsupported_data_type_raises_exception(self) -> None:
param = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': 'for test',
'data_type': 'InvalidType',
'rules': [
{
'filters': [
{
'type': 'server_mode',
'conditions': [['=', 'dev']]
}
],
'value_when_matched': '222'
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': '333',
'is_feature': False,
'feature_stage': None,
})
with self.assertRaisesRegex(
utils.ValidationError, 'Unsupported data type \'InvalidType\''):
param.validate()
def test_validate_with_inconsistent_data_type_in_rules_raises_exception(
self
) -> None:
param = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': 'for test',
'data_type': 'bool',
'rules': [
{
'filters': [
{
'type': 'server_mode',
'conditions': [['=', 'dev']]
}
],
'value_when_matched': '222'
},
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': False,
'is_feature': False,
'feature_stage': None,
})
with self.assertRaisesRegex(
utils.ValidationError,
'Expected bool, received \'222\' in value_when_matched'):
param.validate()
def test_validate_with_inconsistent_default_value_type_raises_exception(
self
) -> None:
param = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': 'for test',
'data_type': 'bool',
'rules': [],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': '111',
'is_feature': False,
'feature_stage': None,
})
with self.assertRaisesRegex(
utils.ValidationError,
'Expected bool, received \'111\' in default value'):
param.validate()
def test_create_with_old_rule_schema_version_failure(self) -> None:
with self.swap(
feconf, 'CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION', 2):
with self.assertRaisesRegex(
Exception,
'Current platform parameter rule schema version is v2, '
'received v1'):
parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': 'for test',
'data_type': 'string',
'rules': [
{
'filters': [
{
'type': 'server_mode',
'conditions': [['=', 'dev']]
}
],
'value_when_matched': '222'
}
],
'rule_schema_version': 1,
'default_value': '333',
'is_feature': False,
'feature_stage': None,
})
def test_to_dict_returns_correct_dict(self) -> None:
param_dict: parameter_domain.PlatformParameterDict = {
'name': 'parameter_a',
'description': 'for test',
'data_type': 'string',
'rules': [
{
'filters': [
{
'type': 'server_mode',
'conditions': [['=', 'dev']]
}
],
'value_when_matched': '222'
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': '333',
'is_feature': False,
'feature_stage': None
}
parameter = parameter_domain.PlatformParameter.from_dict(param_dict)
self.assertDictEqual(parameter.to_dict(), param_dict)
def test_set_rules_correctly_changes_rules(self) -> None:
param = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': 'for test',
'data_type': 'string',
'rules': [
{
'filters': [
{
'type': 'server_mode',
'conditions': [['=', 'dev']]
}
],
'value_when_matched': '222'
},
{
'filters': [
{
'type': 'server_mode',
'conditions': [['=', 'test']]
}
],
'value_when_matched': '555'
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': '333',
'is_feature': False,
'feature_stage': None
})
new_rule_dict: parameter_domain.PlatformParameterRuleDict = {
'filters': [
{'type': 'server_mode', 'conditions': [['=', 'test']]}
],
'value_when_matched': 'new rule value',
}
new_rule = parameter_domain.PlatformParameterRule.from_dict(
new_rule_dict)
param.set_rules([new_rule])
self.assertEqual(len(param.rules), 1)
self.assertEqual(param.rules[0].to_dict(), new_rule_dict)
def test_set_default_value_correctly_changes_default_value(self) -> None:
param = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': 'for test',
'data_type': 'string',
'rules': [
{
'filters': [
{
'type': 'server_mode',
'conditions': [['=', 'dev']]
}
],
'value_when_matched': '222'
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': '333',
'is_feature': False,
'feature_stage': None
})
param.set_default_value('default')
self.assertEqual(param.default_value, 'default')
def test_evaluate_with_matched_rule_returns_correct_value(self) -> None:
parameter = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': 'for test',
'data_type': 'string',
'rules': [
{
'filters': [
{
'type': 'server_mode',
'conditions': [['=', 'dev']]
}
],
'value_when_matched': '222'
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': '333',
'is_feature': False,
'feature_stage': None,
})
dev_context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'Android',
'app_version': '1.2.3',
},
{
'server_mode': ServerMode.DEV,
},
)
self.assertEqual(parameter.evaluate(dev_context), '222')
def test_evaluate_without_matched_rule_returns_default_value(self) -> None:
parameter = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': 'for test',
'data_type': 'string',
'rules': [
{
'filters': [
{
'type': 'server_mode',
'conditions': [['=', 'dev']]
}
],
'value_when_matched': '222'
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': '111',
'is_feature': False,
'feature_stage': None,
})
prod_context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'Android',
'app_version': '1.2.3',
},
{
'server_mode': ServerMode.PROD,
},
)
self.assertEqual(parameter.evaluate(prod_context), '111')
def test_evaluate_matching_feature_invalid_platform_type_returns_def(
self
) -> None:
parameter = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': 'for test',
'data_type': 'string',
'rules': [
{
'filters': [
{
'type': 'server_mode',
'conditions': [['=', 'dev']]
}
],
'value_when_matched': '222'
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': '111',
'is_feature': False,
'feature_stage': None,
})
dev_context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'invalid',
'app_version': '1.2.3',
},
{
'server_mode': ServerMode.DEV,
},
)
self.assertEqual(parameter.evaluate(dev_context), '111')
def test_evaluate_matching_feature_missing_platform_type_returns_def(
self
) -> None:
parameter = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': 'for test',
'data_type': 'string',
'rules': [
{
'filters': [
{
'type': 'server_mode',
'conditions': [['=', 'dev']]
}
],
'value_when_matched': '222'
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': '111',
'is_feature': False,
'feature_stage': None,
})
dev_context = parameter_domain.EvaluationContext.from_dict(
{
'platform_type': '',
'app_version': '1.2.3',
},
{
'server_mode': ServerMode.DEV,
},
)
self.assertEqual(parameter.evaluate(dev_context), '111')
def test_validate_feature_passes_without_exception(self) -> None:
parameter = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': 'for test',
'data_type': 'bool',
'rules': [
{
'filters': [
{'type': 'server_mode', 'conditions': [['=', 'dev']]}
],
'value_when_matched': False
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': False,
'is_feature': True,
'feature_stage': 'dev',
})
parameter.validate()
def test_validate_feature_with_invalid_type_raises_exception(self) -> None:
parameter = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': 'for test',
'data_type': 'string',
'rules': [],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': '111',
'is_feature': True,
'feature_stage': 'dev',
})
with self.assertRaisesRegex(
utils.ValidationError,
'Data type of feature flags must be bool, got \'string\' instead'):
parameter.validate()
def test_validate_feature_with_invalid_stage_raises_exception(self) -> None:
parameter = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': 'for test',
'data_type': 'bool',
'rules': [],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': False,
'is_feature': True,
'feature_stage': 'Invalid',
})
with self.assertRaisesRegex(
utils.ValidationError, 'Invalid feature stage, got \'Invalid\''):
parameter.validate()
def test_validate_dev_feature_for_test_env_raises_exception(self) -> None:
parameter = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': '',
'data_type': 'bool',
'rules': [
{
'filters': [
{'type': 'server_mode', 'conditions': [['=', 'test']]}],
'value_when_matched': True
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': False,
'is_feature': True,
'feature_stage': 'dev',
})
with self.assertRaisesRegex(
utils.ValidationError, 'cannot be enabled in test or production'):
parameter.validate()
def test_validate_dev_feature_for_prod_env_raises_exception(self) -> None:
parameter = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': '',
'data_type': 'bool',
'rules': [
{
'filters': [
{'type': 'server_mode', 'conditions': [['=', 'prod']]}],
'value_when_matched': True
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': False,
'is_feature': True,
'feature_stage': 'dev',
})
with self.assertRaisesRegex(
utils.ValidationError, 'cannot be enabled in test or production'):
parameter.validate()
def test_validate_test_feature_for_prod_env_raises_exception(
self
) -> None:
parameter = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': '',
'data_type': 'bool',
'rules': [
{
'filters': [
{'type': 'server_mode', 'conditions': [['=', 'prod']]}],
'value_when_matched': True
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': False,
'is_feature': True,
'feature_stage': 'test',
})
with self.assertRaisesRegex(
utils.ValidationError, 'cannot be enabled in production'):
parameter.validate()
def test_serialize_and_deserialize_returns_unchanged_platform_parameter(
self
) -> None:
"""Checks that serializing and then deserializing a default parameter
works as intended by leaving the parameter unchanged.
"""
parameter = parameter_domain.PlatformParameter.from_dict({
'name': 'parameter_a',
'description': '',
'data_type': 'bool',
'rules': [
{
'filters': [
{
'type': 'server_mode', 'conditions': [['=', 'prod']]
}
],
'value_when_matched': True
}
],
'rule_schema_version': (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION),
'default_value': False,
'is_feature': True,
'feature_stage': 'test',
})
self.assertEqual(
parameter.to_dict(),
parameter_domain.PlatformParameter.deserialize(
parameter.serialize()).to_dict())
|
f31935407947a2b11d06d6023da684f84852c8d6
|
ac2f43c8e0d9649a7f063c59b3dffdfed9fd7ed7
|
/tests2/tools/gen_i2c_tree.py
|
e00c858b223ffb576ffb84f858e6efe729d51dbe
|
[] |
no_license
|
facebook/openbmc
|
bef10604ced226288600f55248b7f1be9945aea4
|
32777c66a8410d767eae15baabf71c61a0bef13c
|
refs/heads/helium
| 2023-08-17T03:13:54.729494
| 2023-08-16T23:24:18
| 2023-08-16T23:24:18
| 31,917,712
| 684
| 331
| null | 2023-07-25T21:19:08
| 2015-03-09T19:18:35
|
C
|
UTF-8
|
Python
| false
| false
| 4,018
|
py
|
gen_i2c_tree.py
|
#!/usr/bin/env python3
#
# Copyright 2019-present Facebook. All Rights Reserved.
#
# This program file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program in a file named COPYING; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
import argparse
import os
import sys
from utils.i2c_utils import I2cSysfsUtils
class Log_Simple:
def __init__(self, verbose=False):
self._verbose = verbose
def verbose(self, message):
if self._verbose:
print(message)
def info(self, message):
print(message)
def load_i2c_tree(logger):
i2c_tree = {}
for filename in os.listdir(I2cSysfsUtils.i2c_device_dir()):
if not I2cSysfsUtils.is_i2c_device_entry(filename):
continue
logger.verbose("parsing i2c device %s" % filename)
dev_info = {}
dev_info["name"] = I2cSysfsUtils.i2c_device_get_name(filename)
dev_info["driver"] = I2cSysfsUtils.i2c_device_get_driver(filename)
i2c_tree[filename] = dev_info
return i2c_tree
def dump_i2c_py_format(i2c_tree, filename, logger):
logger.verbose("writing i2c tree to %s.." % filename)
with open(filename, "w") as fp:
fp.write("plat_i2c_tree = {\n")
for dev_name, info in i2c_tree.items():
fp.write(" '%s': {\n" % dev_name)
fp.write(" 'name': '%s',\n" % info["name"])
fp.write(" 'driver': '%s',\n" % info["driver"])
fp.write(" },\n")
fp.write("}\n")
def dump_i2c_json_format(i2c_tree, filename, logger):
logger.verbose("writing i2c tree to %s.." % filename)
with open(filename, "w") as fp:
fp.write("{\n")
for dev_name, info in i2c_tree.items():
fp.write(" '%s': {\n" % dev_name)
fp.write(" 'name': '%s',\n" % info["name"])
fp.write(" 'driver': '%s',\n" % info["driver"])
fp.write(" },\n")
fp.write("}\n")
def dump_summary(i2c_tree, logger):
bindings = 0
dev_list = []
for dev_name, info in i2c_tree.items():
if info["driver"] != "":
bindings += 1
else:
dev_list.append("%s (name=%s)" % (dev_name, info["name"]))
total = len(i2c_tree.keys())
total_msg = "Total %d i2c devices" % total
if total == bindings:
bind_msg = "all of them are binded with drivers"
else:
bind_msg = "%d of them are binded with drivers" % bindings
logger.info("%s: %s" % (total_msg, bind_msg))
if dev_list:
logger.info("List of devices without drivers:")
for entry in dev_list:
logger.info("\t%s" % entry)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=False,
help="increase output verbosity",
)
parser.add_argument(
"-j",
"--json-format",
action="store_true",
default=False,
help="generate i2c tree in json format",
)
parser.add_argument("outfile", action="store")
args = parser.parse_args()
logger = Log_Simple(verbose=args.verbose)
i2c_tree = load_i2c_tree(logger)
if args.json_format:
dump_i2c_json_format(i2c_tree, args.outfile, logger)
else:
dump_i2c_py_format(i2c_tree, args.outfile, logger)
dump_summary(i2c_tree, logger)
logger.info("Commmand completed successfully!")
sys.exit(0)
|
ed6f8054861ef205eb6dad2f2172d2fe6fe17df7
|
6d54a7b26d0eb82152a549a6a9dfde656687752c
|
/src/controller/python/py_matter_yamltest_repl_adapter/matter_yamltest_repl_adapter/adapter.py
|
916a30ea00385394bb53d9079feebeeaf89a837b
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
project-chip/connectedhomeip
|
81a123d675cf527773f70047d1ed1c43be5ffe6d
|
ea3970a7f11cd227ac55917edaa835a2a9bc4fc8
|
refs/heads/master
| 2023-09-01T11:43:37.546040
| 2023-09-01T08:01:32
| 2023-09-01T08:01:32
| 244,694,174
| 6,409
| 1,789
|
Apache-2.0
| 2023-09-14T20:56:31
| 2020-03-03T17:05:10
|
C++
|
UTF-8
|
Python
| false
| false
| 1,262
|
py
|
adapter.py
|
# Copyright (c) 2023 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from chip.yaml.runner import ReplTestRunner
from matter_yamltests.adapter import TestAdapter
class Adapter(TestAdapter):
def __init__(self, specifications):
self._adapter = ReplTestRunner(specifications, None, None)
def encode(self, request):
return self._adapter.encode(request)
def decode(self, response):
# TODO We should provide more meaningful logs here, but to adhere to
# abstract function definition we do need to return list here.
logs = []
decoded_response = self._adapter.decode(response)
if len(decoded_response) == 0:
decoded_response = [{}]
return decoded_response, logs
|
a02c5d6459e0492f901502be70a24baf5e45e6fd
|
52245910f830dbfb2b1432ad2a967df7321ee6de
|
/panel/tests/ui/widgets/test_tabulator.py
|
bca6ff646a196ff00f205a45e2d1996f4972807b
|
[
"BSD-3-Clause"
] |
permissive
|
holoviz/panel
|
92c19f979353d456512abbce5a027dff6ddb3a5c
|
2c6e165e2bba96c0cb97947aa072d4429133cf7a
|
refs/heads/main
| 2023-08-17T11:28:06.581979
| 2023-08-17T11:23:09
| 2023-08-17T11:23:09
| 145,848,899
| 2,544
| 373
|
BSD-3-Clause
| 2023-09-14T17:13:31
| 2018-08-23T12:14:24
|
Python
|
UTF-8
|
Python
| false
| false
| 110,863
|
py
|
test_tabulator.py
|
from __future__ import annotations
import datetime as dt
import time
import numpy as np
import pandas as pd
import param
import pytest
from bokeh.models.widgets.tables import (
BooleanFormatter, CheckboxEditor, DateEditor, DateFormatter,
HTMLTemplateFormatter, IntEditor, NumberEditor, NumberFormatter,
ScientificFormatter, SelectEditor, StringEditor, StringFormatter,
TextEditor,
)
from panel.layout.base import Column
try:
from playwright.sync_api import expect
except ImportError:
pytestmark = pytest.mark.skip('playwright not available')
pytestmark = pytest.mark.ui
from panel import state
from panel.depends import bind
from panel.io.server import serve
from panel.models.tabulator import _TABULATOR_THEMES_MAPPING
from panel.tests.util import get_ctrl_modifier, wait_until
from panel.widgets import Select, Tabulator
@pytest.fixture
def df_mixed():
df = pd.DataFrame({
'int': [1, 2, 3, 4],
'float': [3.14, 6.28, 9.42, -2.45],
'str': ['A', 'B', 'C', 'D'],
'bool': [True, True, True, False],
'date': [dt.date(2019, 1, 1), dt.date(2020, 1, 1), dt.date(2020, 1, 10), dt.date(2019, 1, 10)],
'datetime': [dt.datetime(2019, 1, 1, 10), dt.datetime(2020, 1, 1, 12), dt.datetime(2020, 1, 10, 13), dt.datetime(2020, 1, 15, 13)]
}, index=['idx0', 'idx1', 'idx2', 'idx3'])
return df
@pytest.fixture(scope='session')
def df_mixed_as_string():
return """index
int
float
str
bool
date
datetime
idx0
1
3.14
A
true
2019-01-01
2019-01-01 10:00:00
idx1
2
6.28
B
true
2020-01-01
2020-01-01 12:00:00
idx2
3
9.42
C
true
2020-01-10
2020-01-10 13:00:00
idx3
4
-2.45
D
false
2019-01-10
2020-01-15 13:00:00
"""
@pytest.fixture
def df_multiindex(df_mixed):
df_mi = df_mixed.copy()
df_mi.index = pd.MultiIndex.from_tuples([
('group0', 'subgroup0'),
('group0', 'subgroup1'),
('group1', 'subgroup0'),
('group1', 'subgroup1'),
], names=['groups', 'subgroups'])
return df_mi
def count_per_page(count: int, page_size: int):
"""
>>> count_per_page(12, 7)
[7, 5]
"""
original_count = count
count_per_page = []
while True:
page_count = min(count, page_size)
count_per_page.append(page_count)
count -= page_count
if count == 0:
break
assert sum(count_per_page) == original_count
return count_per_page
def tabulator_column_values(page, col_name: str) -> list[str]:
"""Get the values of a column.
>>> tabulator_column_values(page, 'color')
['blue', 'red']
"""
cells = page.locator(f'[tabulator-field={col_name}][role=gridcell]')
return cells.all_inner_texts()
def test_tabulator_no_console_error(page, port, df_mixed):
widget = Tabulator(df_mixed)
serve(widget, port=port, threaded=True, show=False)
msgs = []
page.on("console", lambda msg: msgs.append(msg))
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
time.sleep(1)
assert [msg for msg in msgs if msg.type == 'error' and 'favicon' not in msg.location['url']] == []
def test_tabulator_default(page, port, df_mixed, df_mixed_as_string):
nrows, ncols = df_mixed.shape
widget = Tabulator(df_mixed)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expected_ncols = ncols + 2 # _index + index + data columns
# Check that the whole table content is on the page
table = page.locator('.pnx-tabulator.tabulator')
expect(table).to_have_text(
df_mixed_as_string,
use_inner_text=True
)
# Check that the default layout is fitDataTable
assert widget.layout == 'fit_data_table'
assert table.get_attribute('tabulator-layout') == 'fitDataTable'
# Check the table has the right number of rows
rows = page.locator('.tabulator-row')
assert rows.count() == nrows
# Check that the hidden _index column is added by Panel
cols = page.locator(".tabulator-col")
assert cols.count() == expected_ncols
assert cols.nth(0).get_attribute('tabulator-field') == '_index'
assert cols.nth(0).is_hidden()
# Check that the first visible is the index column
assert widget.show_index
assert page.locator('text="index"').is_visible()
assert cols.nth(1).is_visible()
# Check that the columns are sortable by default
assert page.locator(".tabulator-sortable").count() == expected_ncols
# And that none of them is sorted on start
for i in range(expected_ncols):
assert cols.nth(i).get_attribute('aria-sort') == 'none'
@pytest.mark.flaky(max_runs=3)
def test_tabulator_value_changed(page, port, df_mixed):
widget = Tabulator(df_mixed)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
df_mixed.loc['idx0', 'str'] = 'AA'
# Need to trigger the value as the dataframe was modified
# in place which is not detected.
widget.param.trigger('value')
changed_cell = page.locator('text="AA"')
expect(changed_cell).to_have_count(1)
def test_tabulator_disabled(page, port, df_mixed):
widget = Tabulator(df_mixed, disabled=True)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="A"')
cell.click()
# If the cell was editable then this input element should
# be found.
expect(page.locator('input[type="text"]')).to_have_count(0)
def test_tabulator_show_index_disabled(page, port, df_mixed):
widget = Tabulator(df_mixed, show_index=False)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expect(page.locator('text="index"')).to_have_count(0)
def test_tabulator_titles(page, port, df_mixed):
titles = {col: col.upper() for col in df_mixed.columns}
widget = Tabulator(df_mixed, titles=titles)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
for col in df_mixed.columns:
expected_title = titles[col]
expect(page.locator(f'text="{expected_title}"')).to_have_count(1)
def test_tabulator_hidden_columns(page, port, df_mixed):
widget = Tabulator(df_mixed, hidden_columns=['float', 'date', 'datetime'])
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.5)
page.goto(f"http://localhost:{port}")
expected_text = """
index
int
str
bool
idx0
1
A
true
idx1
2
B
true
idx2
3
C
true
idx3
4
D
false
"""
# Check that the whole table content is on the page
table = page.locator('.pnx-tabulator.tabulator')
expect(table).to_have_text(expected_text, use_inner_text=True)
def test_tabulator_buttons_display(page, port, df_mixed):
nrows, ncols = df_mixed.shape
icon_text = 'icon'
widget = Tabulator(df_mixed, buttons={'Print': icon_text})
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expected_ncols = ncols + 3 # _index + index + data columns + button col
# Check that an additional column has been added to the table
# with no header title
cols = page.locator(".tabulator-col")
expect(cols).to_have_count(expected_ncols)
button_col_idx = expected_ncols - 1
assert not cols.nth(button_col_idx).get_attribute('tabulator-field')
assert cols.nth(button_col_idx).inner_text() == '\xa0'
assert cols.nth(button_col_idx).is_visible()
# Check the button column has the right content
icons = page.locator(f'text="{icon_text}"')
assert icons.all_inner_texts() == [icon_text] * nrows
# Check the buttons are centered
for i in range(icons.count()):
assert 'text-align: center' in icons.nth(i).get_attribute('style')
def test_tabulator_buttons_event(page, port, df_mixed):
button_col_name = 'Print'
widget = Tabulator(df_mixed, buttons={button_col_name: 'icon'})
state = []
expected_state = [(button_col_name, 0, None)]
def cb(e):
state.append((e.column, e.row, e.value))
widget.on_click(cb)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
icon = page.locator("text=icon").first
icon.wait_for()
# Click on the first button
icon.click()
time.sleep(0.2)
assert state == expected_state
wait_until(lambda: state == expected_state, page)
def test_tabulator_formatters_bokeh_bool(page, port, df_mixed):
s = [True] * len(df_mixed)
s[-1] = False
df_mixed['bool'] = s
widget = Tabulator(df_mixed, formatters={'bool': BooleanFormatter()})
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# The BooleanFormatter renders with svg icons.
cells = page.locator(".tabulator-cell", has=page.locator("svg"))
expect(cells).to_have_count(len(df_mixed))
for i in range(len(df_mixed) - 1):
assert cells.nth(i).get_attribute('aria-checked') == 'true'
assert cells.last.get_attribute('aria-checked') == 'false'
def test_tabulator_formatters_bokeh_date(page, port, df_mixed):
widget = Tabulator(
df_mixed,
formatters={
'date': DateFormatter(format='COOKIE'),
'datetime': DateFormatter(format='%H:%M'),
},
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expect(page.locator('text="10:00"')).to_have_count(1)
assert page.locator('text="Tue, 01 Jan 2019"').count() == 1
def test_tabulator_formatters_bokeh_date_with_nan(page, port, df_mixed):
df_mixed.loc['idx1', 'date'] = np.nan
df_mixed.loc['idx1', 'datetime'] = np.nan
widget = Tabulator(
df_mixed,
formatters={
'date': DateFormatter(format='COOKIE', nan_format='nan-date'),
'datetime': DateFormatter(format='%H:%M', nan_format= 'nan-datetime'),
},
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expect(page.locator('text="10:00"')).to_have_count(1)
assert page.locator('text="Tue, 01 Jan 2019"').count() == 1
assert page.locator('text="nan-date"').count() == 1
assert page.locator('text="nan-datetime"').count() == 1
def test_tabulator_formatters_bokeh_number(page, port, df_mixed):
df_mixed.loc['idx1', 'int'] = np.nan
df_mixed.loc['idx1', 'float'] = np.nan
widget = Tabulator(
df_mixed,
formatters={
'int': NumberFormatter(format='0.000', nan_format='nan-int'),
'float': NumberFormatter(format='0.000', nan_format='nan-float'),
},
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expect(page.locator('text="1.000"')).to_have_count(1)
assert page.locator('text="3.140"').count() == 1
assert page.locator('text="nan-int"').count() == 1
assert page.locator('text="nan-float"').count() == 1
def test_tabulator_formatters_bokeh_string(page, port, df_mixed):
widget = Tabulator(
df_mixed,
formatters={
'str': StringFormatter(font_style='bold', text_align='center', text_color='red'),
},
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expect(page.locator('text="A"')).to_have_attribute(
"style",
"font-weight: bold; text-align: center; color: rgb(255, 0, 0);"
)
def test_tabulator_formatters_bokeh_html(page, port, df_mixed):
widget = Tabulator(
df_mixed,
formatters={
'str': HTMLTemplateFormatter(template='<p style="font-weight: bold;"><%= value %></p>'),
},
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expect(page.locator('text="A"')).to_have_attribute(
"style",
"font-weight: bold;"
)
def test_tabulator_formatters_bokeh_scientific(page, port, df_mixed):
df_mixed['float'] = df_mixed['float'] * 1e6
df_mixed.loc['idx1', 'float'] = np.nan
widget = Tabulator(
df_mixed,
formatters={
'float': ScientificFormatter(precision=3, nan_format='nan-float'),
},
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expect(page.locator('text="3.140e+6"')).to_have_count(1)
assert page.locator('text="nan-float"').count() == 1
def test_tabulator_formatters_tabulator_str(page, port, df_mixed):
widget = Tabulator(
df_mixed,
formatters={'int': 'star'},
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# The star formatter renders with svg icons.
cells = page.locator(".tabulator-cell", has=page.locator("svg"))
expect(cells).to_have_count(len(df_mixed))
def test_tabulator_formatters_tabulator_dict(page, port, df_mixed):
nstars = 10
widget = Tabulator(
df_mixed,
formatters={'int': {'type': 'star', 'stars': nstars}},
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# The star formatter renders with svg icons.
cells = page.locator(".tabulator-cell", has=page.locator("svg"))
expect(cells).to_have_count(len(df_mixed))
stars = page.locator('svg')
assert stars.count() == len(df_mixed) * nstars
def test_tabulator_formatters_after_init(page, port, df_mixed):
widget = Tabulator(df_mixed)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Wait until the table is rendered
expect(page.locator('.tabulator-row')).to_have_count(len(df_mixed))
# Formatters can be set after initialization, the table should be
# updated accordingly
widget.formatters = {
'str': HTMLTemplateFormatter(template='<p style="font-weight: bold;"><%= value %></p>'),
}
expect(page.locator('text="A"')).to_have_attribute(
"style",
"font-weight: bold;"
)
def test_tabulator_editors_bokeh_string(page, port, df_mixed):
widget = Tabulator(df_mixed, editors={'str': StringEditor()})
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="A"')
cell.click()
# A StringEditor is turned into an input text tabulator editor
expect(page.locator('input[type="text"]')).to_have_count(1)
def test_tabulator_editors_bokeh_string_completions(page, port, df_mixed):
widget = Tabulator(df_mixed, editors={'str': StringEditor(completions=['AAA'])})
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="A"')
cell.click()
# A StringEditor with completions is turned into an autocomplete
# tabulator editor.
expect(page.locator('text="AAA"')).to_have_count(1)
def test_tabulator_editors_bokeh_text(page, port, df_mixed):
widget = Tabulator(df_mixed, editors={'str': TextEditor()})
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="A"')
cell.click()
# A TextEditor with completions is turned into a textarea
# tabulator editor.
expect(page.locator('textarea')).to_have_count(1)
def test_tabulator_editors_bokeh_int(page, port, df_mixed):
step = 2
widget = Tabulator(df_mixed, editors={'int': IntEditor(step=step)})
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="1"').first
cell.click()
# An IntEditor with step is turned into a number tabulator editor
# with step respected
input = page.locator('input[type="number"]')
expect(input).to_have_count(1)
assert int(input.get_attribute('step')) == step
def test_tabulator_editors_bokeh_number(page, port, df_mixed):
step = 0.1
widget = Tabulator(df_mixed, editors={'float': NumberEditor(step=step)})
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="3.14"')
cell.click()
# A NumberEditor with step is turned into a number tabulator editor
# with step respected
input = page.locator('input[type="number"]')
expect(input).to_have_count(1)
assert input.get_attribute('step') == str(step)
def test_tabulator_editors_bokeh_checkbox(page, port, df_mixed):
widget = Tabulator(df_mixed, editors={'bool': CheckboxEditor()})
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="true"').first
cell.click()
# A CheckboxEditor is turned into a tickCross tabulator editor
input = page.locator('input[type="checkbox"]')
expect(input).to_have_count(1)
assert input.get_attribute('value') == "true"
def test_tabulator_editors_bokeh_date(page, port, df_mixed):
widget = Tabulator(df_mixed, editors={'date': DateEditor()})
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="2019-01-01"')
cell.click()
# A DateEditor is turned into a Panel date editor
expect(page.locator('input[type="date"]')).to_have_count(1)
def test_tabulator_editors_bokeh_select(page, port, df_mixed):
widget = Tabulator(df_mixed, editors={'str': SelectEditor(options=['option1'])})
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="A"')
cell.click()
# A SelectEditor with options is turned into a select tabulator editor.
expect(page.locator('text="option1"')).to_have_count(1)
def test_tabulator_editors_panel_date(page, port, df_mixed):
widget = Tabulator(df_mixed, editors={'date': 'date'})
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="2019-01-01"')
cell.click()
# A date editor is turned into an date input
cell_edit = page.locator('input[type="date"]')
new_date = "1980-01-01"
cell_edit.fill(new_date)
# Need to Enter to validate the change
page.locator('input[type="date"]').press('Enter')
expect(page.locator(f'text="{new_date}"')).to_have_count(1)
new_date = dt.datetime.strptime(new_date, '%Y-%m-%d').date()
assert new_date in widget.value['date'].tolist()
cell = page.locator(f'text="{new_date}"')
cell.click()
cell_edit = page.locator('input[type="date"]')
new_date2 = "1990-01-01"
cell_edit.fill(new_date2)
# Escape invalidates the change
page.locator('input[type="date"]').press('Escape')
expect(page.locator(f'text="{new_date2}"')).to_have_count(0)
new_date2 = dt.datetime.strptime(new_date2, '%Y-%m-%d').date()
assert new_date2 not in widget.value['date'].tolist()
def test_tabulator_editors_panel_datetime(page, port, df_mixed):
widget = Tabulator(df_mixed, editors={'datetime': 'datetime'})
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="2019-01-01 10:00:00"')
cell.click()
# A date editor is turned into an date input
cell_edit = page.locator('input[type="datetime-local"]')
new_datetime = dt.datetime(1980, 11, 30, 4, 51, 0)
time_to_fill = new_datetime.isoformat()
# Somehow the seconds don't seem to be handled by datetime-local
time_to_fill = time_to_fill[:-3]
cell_edit.fill(time_to_fill)
# Need to Enter to validate the change
page.locator('input[type="datetime-local"]').press('Enter')
new_datetime_display = new_datetime.strftime('%Y-%m-%d %H:%M:%S')
expect(page.locator(f'text="{new_datetime_display}"')).to_have_count(1)
wait_until(lambda: new_datetime in widget.value['datetime'].tolist(), page)
cell = page.locator(f'text="{new_datetime_display}"')
cell.click()
cell_edit = page.locator('input[type="datetime-local"]')
new_datetime2 = dt.datetime(1990, 3, 31, 12, 45, 0)
time_to_fill2 = new_datetime2.isoformat()
time_to_fill2 = time_to_fill2[:-3]
cell_edit.fill(time_to_fill2)
# Escape invalidates the change
page.locator('input[type="datetime-local"]').press('Escape')
new_datetime_display2 = new_datetime2.strftime('%Y-%m-%d %H:%M:%S')
expect(page.locator(f'text="{new_datetime_display2}"')).to_have_count(0)
assert new_datetime2 not in widget.value['datetime'].tolist()
def test_tabulator_editors_tabulator_disable_one(page, port, df_mixed):
widget = Tabulator(
df_mixed,
editors={'float': None},
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
page.locator('text="3.14"').click()
page.wait_for_timeout(200)
expect(page.locator('input[type="number"]')).to_have_count(0)
def test_tabulator_editors_tabulator_str(page, port, df_mixed):
widget = Tabulator(df_mixed, editors={'str': 'textarea'})
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="A"')
cell.click()
expect(page.locator('textarea')).to_have_count(1)
def test_tabulator_editors_tabulator_dict(page, port, df_mixed):
widget = Tabulator(
df_mixed,
editors={'str': {'type': 'textarea', 'elementAttributes': {'maxlength': '10'}}}
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="A"')
cell.click()
textarea = page.locator('textarea')
expect(textarea).to_have_count(1)
assert textarea.get_attribute('maxlength') == "10"
def test_tabulator_editors_tabulator_list_default(page, port):
df = pd.DataFrame({'values': ['A', 'B']})
widget = Tabulator(df, header_filters={'values': 'list'})
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
header = page.locator('input[type="search"]')
expect(header).to_have_count(1)
header.click()
# There should be a select element with the list of unique values
# found in the column.
expect(page.locator('.tabulator-edit-list')).to_have_text('AB')
@pytest.mark.parametrize('layout', Tabulator.param['layout'].objects)
def test_tabulator_column_layouts(page, port, df_mixed, layout):
widget = Tabulator(df_mixed, layout=layout)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
layout_mapping = {
"fit_data": "fitData",
"fit_data_fill": "fitDataFill",
"fit_data_stretch": "fitDataStretch",
"fit_data_table": "fitDataTable",
"fit_columns": "fitColumns",
}
expected_layout = layout_mapping[layout]
expect(page.locator('.pnx-tabulator')).to_have_attribute('tabulator-layout', expected_layout)
def test_tabulator_alignment_header_default(page, port, df_mixed):
widget = Tabulator(df_mixed)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# The default header alignment is left
for col in df_mixed.columns:
expect(page.locator(f'text="{col}"')).to_have_css('text-align', 'left')
def test_tabulator_alignment_text_default(page, port, df_mixed):
widget = Tabulator(df_mixed)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
findex = df_mixed.index[0]
cell = page.locator(f'text="{findex}"')
# Indexes are left aligned
expect(cell).to_have_css('text-align', 'left')
val = df_mixed.at[findex, 'int']
# Selecting the visible 1 as there's a non displayed 1 in the hidden index
cell = page.locator(f'text="{val}"').first
# Integers are right aligned
expect(cell).to_have_css('text-align', 'right')
val = df_mixed.at[findex, 'float']
cell = page.locator(f'text="{val}"')
# Floats are right aligned
expect(cell).to_have_css('text-align', 'right')
val = df_mixed.at[findex, 'bool']
val = 'true' if val else 'false'
cell = page.locator(f'text="{val}"').first
# Booleans are centered
expect(cell).to_have_css('text-align', 'center')
val = df_mixed.at[findex, 'datetime']
val = val.strftime('%Y-%m-%d %H:%M:%S')
cell = page.locator(f'text="{val}"')
# Datetimes are right aligned
expect(cell).to_have_css('text-align', 'right')
val = df_mixed.at[findex, 'str']
cell = page.locator(f'text="{val}"')
# Other types are left aligned
expect(cell).to_have_css('text-align', 'left')
def test_tabulator_alignment_header_str(page, port, df_mixed):
halign = 'center'
widget = Tabulator(df_mixed, header_align=halign)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
for col in df_mixed.columns:
expect(page.locator(f'text="{col}"')).to_have_css('text-align', halign)
def test_tabulator_alignment_header_dict(page, port, df_mixed):
halign = {'int': 'left'}
widget = Tabulator(df_mixed, header_align=halign)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# for col in df_mixed.columns:
for col, align in halign.items():
expect(page.locator(f'text="{col}"')).to_have_css('text-align', align)
def test_tabulator_alignment_text_str(page, port, df_mixed):
talign = 'center'
widget = Tabulator(df_mixed, text_align=talign)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cells = page.locator('.tabulator-cell:visible')
expect(cells).to_have_count(len(df_mixed) * (df_mixed.shape[1] + 1))
for i in range(cells.count()):
expect(cells.nth(i)).to_have_css('text-align', talign)
def test_tabulator_frozen_columns(page, port, df_mixed):
widths = 100
width = int(((df_mixed.shape[1] + 1) * widths) / 2)
frozen_cols = ['float', 'int']
widget = Tabulator(df_mixed, frozen_columns=frozen_cols, width=width, widths=widths)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expected_text = """
float
int
index
str
bool
date
datetime
3.14
1
idx0
A
true
2019-01-01
2019-01-01 10:00:00
6.28
2
idx1
B
true
2020-01-01
2020-01-01 12:00:00
9.42
3
idx2
C
true
2020-01-10
2020-01-10 13:00:00
-2.45
4
idx3
D
false
2019-01-10
2020-01-15 13:00:00
"""
# Check that the whole table content is on the page, it is not in the
# same order as if the table was displayed without frozen columns
table = page.locator('.pnx-tabulator.tabulator')
expect(table).to_have_text(
expected_text,
use_inner_text=True
)
float_bb = page.locator('text="float"').bounding_box()
int_bb = page.locator('text="int"').bounding_box()
bool_bb = page.locator('text="bool"').bounding_box()
# Check that the float column is rendered before the int column
assert float_bb['x'] < int_bb['x']
# Scroll to the right, and give it a little extra time
page.locator('text="2019-01-01 10:00:00"').scroll_into_view_if_needed()
page.wait_for_timeout(200)
# Check that the two frozen columns haven't moved after scrolling right
assert float_bb == page.locator('text="float"').bounding_box()
assert int_bb == page.locator('text="int"').bounding_box()
# But check that the position of one of the non frozen columns has indeed moved
assert bool_bb['x'] > page.locator('text="bool"').bounding_box()['x']
def test_tabulator_frozen_rows(page, port):
arr = np.array(['a'] * 10)
arr[1] = 'X'
arr[-2] = 'Y'
arr[-1] = 'T'
df = pd.DataFrame({'col': arr})
height, width = 200, 200
widget = Tabulator(df, frozen_rows=[-2, 1], height=height, width=width)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expected_text = """
index
col
1
X
8
Y
0
a
2
a
3
a
4
a
5
a
6
a
7
a
9
T
"""
expect(page.locator('.tabulator')).to_have_text(
expected_text,
use_inner_text=True
)
X_bb = page.locator('text="X"').bounding_box()
Y_bb = page.locator('text="Y"').bounding_box()
# Scroll to the bottom, and give it a little extra time
page.locator('text="T"').scroll_into_view_if_needed()
page.wait_for_timeout(200)
# Check that the two frozen columns haven't moved after scrolling right
assert X_bb == page.locator('text="X"').bounding_box()
assert Y_bb == page.locator('text="Y"').bounding_box()
@pytest.mark.xfail(reason='See https://github.com/holoviz/panel/issues/3669')
def test_tabulator_patch_no_horizontal_rescroll(page, port, df_mixed):
widths = 100
width = int(((df_mixed.shape[1] + 1) * widths) / 2)
df_mixed['tomodify'] = 'target'
widget = Tabulator(df_mixed, width=width, widths=widths)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="target"').first
# Scroll to the right
cell.scroll_into_view_if_needed()
page.wait_for_timeout(200)
bb = page.locator('text="tomodify"').bounding_box()
# Patch a cell in the latest column
widget.patch({'tomodify': [(0, 'target-modified')]}, as_index=False)
# Catch a potential rescroll
page.wait_for_timeout(400)
# The table should keep the same scroll position
# This fails
assert bb == page.locator('text="tomodify"').bounding_box()
@pytest.mark.xfail(reason='See https://github.com/holoviz/panel/issues/3249')
def test_tabulator_patch_no_vertical_rescroll(page, port):
size = 10
arr = np.random.choice(list('abcd'), size=size)
target, new_val = 'X', 'Y'
arr[-1] = target
df = pd.DataFrame({'col': arr})
height, width = 100, 200
widget = Tabulator(df, height=height, width=width)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Scroll to the bottom
target_cell = page.locator(f'text="{target}"')
target_cell.scroll_into_view_if_needed()
page.wait_for_timeout(400)
# Unfortunately that doesn't scroll down quite enough, it's missing
# a little scroll down so we do it manually which is more brittle.
# Might be a little brittle, setting the mouse somewhere in the table
# and scroll down
page.mouse.move(x=int(width/2), y=int(height/2))
page.mouse.wheel(delta_x=0, delta_y=10000)
# Give it time to scroll
page.wait_for_timeout(400)
bb = page.locator(f'text="{target}"').bounding_box()
# Patch a cell in the latest row
widget.patch({'col': [(size-1, new_val)]})
# Wait to catch a potential rescroll
page.wait_for_timeout(400)
# The table should keep the same scroll position
# This fails
assert bb == page.locator(f'text="{new_val}"').bounding_box()
@pytest.mark.parametrize(
'pagination',
(
pytest.param('local', marks=pytest.mark.xfail(reason='See https://github.com/holoviz/panel/issues/3553')),
pytest.param('remote', marks=pytest.mark.xfail(reason='See https://github.com/holoviz/panel/issues/3553')),
None,
)
)
def test_tabulator_header_filter_no_horizontal_rescroll(page, port, df_mixed, pagination):
widths = 100
width = int(((df_mixed.shape[1] + 1) * widths) / 2)
col_name = 'newcol'
df_mixed[col_name] = 'on'
widget = Tabulator(
df_mixed,
width=width,
widths=widths,
header_filters={col_name: {'type': 'input', 'func': 'like'}},
pagination=pagination
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.3)
page.goto(f"http://localhost:{port}")
header = page.locator(f'text="{col_name}"')
# Scroll to the right
header.scroll_into_view_if_needed()
bb = header.bounding_box()
header = page.locator('input[type="search"]')
header.click()
header.fill('off')
header.press('Enter')
# Wait to catch a potential rescroll
page.wait_for_timeout(400)
header = page.locator(f'text="{col_name}"')
header.wait_for()
# The table should keep the same scroll position, this fails
assert bb == header.bounding_box()
# assert bb == page.locator(f'text="{col_name}"').bounding_box()
def test_tabulator_header_filter_always_visible(page, port, df_mixed):
col_name = 'newcol'
df_mixed[col_name] = 'on'
widget = Tabulator(
df_mixed,
header_filters={col_name: {'type': 'input', 'func': 'like'}},
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
header = page.locator('input[type="search"]')
expect(header).to_have_count(1)
header.click()
header.fill('off')
header.press('Enter')
wait_until(lambda: widget.current_view.empty, page)
header = page.locator('input[type="search"]')
expect(header).to_have_count(1)
@pytest.mark.parametrize('theme', Tabulator.param['theme'].objects)
def test_tabulator_theming(page, port, df_mixed, df_mixed_as_string, theme):
# Subscribe the response events to check that the CSS is loaded
responses = []
page.on("response", lambda response: responses.append(response))
widget = Tabulator(df_mixed, theme=theme)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Check that the whole table content is on the page
table = page.locator('.pnx-tabulator.tabulator')
expect(table).to_have_text(
df_mixed_as_string,
use_inner_text=True
)
found = False
theme = _TABULATOR_THEMES_MAPPING.get(theme, theme)
for response in responses:
base = response.url.split('/')[-1]
if base == f'tabulator_{theme}.min.css':
found = True
break
# default theme
elif base == 'tabulator.min.css':
found = True
break
assert found
assert response.status
def test_tabulator_selection_selectable_by_default(page, port, df_mixed):
widget = Tabulator(df_mixed)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
assert widget.selectable
# Click on the first row of the index column to select the row
rows = page.locator('.tabulator-row')
c0 = page.locator('text="idx0"')
c0.wait_for()
c0.click()
wait_until(lambda: widget.selection == [0], page)
assert 'tabulator-selected' in rows.first.get_attribute('class')
for i in range(1, rows.count()):
assert 'tabulator-selected' not in rows.nth(i).get_attribute('class')
expected_selected = df_mixed.loc[['idx0'], :]
assert widget.selected_dataframe.equals(expected_selected)
def test_tabulator_selection_selectable_one_at_a_time(page, port, df_mixed):
widget = Tabulator(df_mixed)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
rows = page.locator('.tabulator-row')
# Click on the first row of the index column to select the row
c0 = page.locator('text="idx0"')
c0.wait_for()
c0.click()
wait_until(lambda: widget.selection == [0], page)
expected_selected = df_mixed.loc[['idx0'], :]
assert widget.selected_dataframe.equals(expected_selected)
# Click on the second row should deselect the first one
page.locator('text="idx1"').click()
wait_until(lambda: widget.selection == [1], page)
expected_selected = df_mixed.loc[['idx1'], :]
assert widget.selected_dataframe.equals(expected_selected)
for i in range(rows.count()):
if i == 1:
assert 'tabulator-selected' in rows.nth(i).get_attribute('class')
else:
assert 'tabulator-selected' not in rows.nth(i).get_attribute('class')
# Clicking again on the second row should not change anything
page.locator('text="idx1"').click()
wait_until(lambda: widget.selection == [1], page)
assert widget.selected_dataframe.equals(expected_selected)
for i in range(rows.count()):
if i == 1:
assert 'tabulator-selected' in rows.nth(i).get_attribute('class')
else:
assert 'tabulator-selected' not in rows.nth(i).get_attribute('class')
def test_tabulator_selection_selectable_ctrl(page, port, df_mixed):
widget = Tabulator(df_mixed)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
rows = page.locator('.tabulator-row')
# Click on the first row of the index column to select the row
c0 = page.locator('text="idx0"')
c0.wait_for()
c0.click()
# Click on the third row with CTRL pressed should add that row to the selection
modifier = get_ctrl_modifier()
page.locator("text=idx2").click(modifiers=[modifier])
expected_selection = [0, 2]
wait_until(lambda: widget.selection == expected_selection, page)
expected_selected = df_mixed.loc[['idx0', 'idx2'], :]
assert widget.selected_dataframe.equals(expected_selected)
for i in range(rows.count()):
if i in expected_selection:
assert 'tabulator-selected' in rows.nth(i).get_attribute('class')
else:
assert 'tabulator-selected' not in rows.nth(i).get_attribute('class')
# Clicking again on the third row with CTRL pressed should remove the row from the selection
page.locator("text=idx2").click(modifiers=[modifier])
expected_selection = [0]
wait_until(lambda: widget.selection == expected_selection, page)
expected_selected = df_mixed.loc[['idx0'], :]
assert widget.selected_dataframe.equals(expected_selected)
for i in range(rows.count()):
if i in expected_selection:
assert 'tabulator-selected' in rows.nth(i).get_attribute('class')
else:
assert 'tabulator-selected' not in rows.nth(i).get_attribute('class')
def test_tabulator_selection_selectable_shift(page, port, df_mixed):
widget = Tabulator(df_mixed)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
rows = page.locator('.tabulator-row')
# Click on the first row of the index column to select the row
c0 = page.locator('text="idx0"')
c0.wait_for()
c0.click()
# Click on the third row with SHIFT pressed should select the 2nd row too
page.locator("text=idx2").click(modifiers=['Shift'])
expected_selection = [0, 1, 2]
wait_until(lambda: widget.selection == expected_selection, page)
expected_selected = df_mixed.loc['idx0':'idx2', :]
assert widget.selected_dataframe.equals(expected_selected)
for i in range(rows.count()):
if i in expected_selection:
assert 'tabulator-selected' in rows.nth(i).get_attribute('class')
else:
assert 'tabulator-selected' not in rows.nth(i).get_attribute('class')
def test_tabulator_selection_selectable_disabled(page, port, df_mixed):
widget = Tabulator(df_mixed, selectable=False)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Click on the first row of the index column
rows = page.locator('.tabulator-row')
c0 = page.locator('text="idx0"')
c0.wait_for()
c0.click()
# Wait for a potential selection event to be propagated, this should not
# be the case.
page.wait_for_timeout(200)
assert widget.selection == []
assert widget.selected_dataframe.empty
for i in range(rows.count()):
assert 'tabulator-selected' not in rows.nth(i).get_attribute('class')
def test_tabulator_selection_default_selection(page, port, df_mixed):
selection = [0, 2]
widget = Tabulator(df_mixed, selection=[0, 2])
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
rows = page.locator('.tabulator-row')
# Check that the rows in the selection are selected in the front-end
for i in range(rows.count()):
if i in selection:
assert 'tabulator-selected' in rows.nth(i).get_attribute('class')
else:
assert 'tabulator-selected' not in rows.nth(i).get_attribute('class')
expected_selected = df_mixed.loc[['idx0', 'idx2'], :]
assert widget.selected_dataframe.equals(expected_selected)
def test_tabulator_selection_selectable_checkbox_all(page, port, df_mixed):
widget = Tabulator(df_mixed, selectable='checkbox')
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Select the first checkbox and check it
checkboxes = page.locator('input[type="checkbox"]')
checkboxes.first.wait_for()
checkboxes.first.check()
# All the checkboxes should be checked
for i in range(checkboxes.count()):
assert checkboxes.nth(i).is_checked()
# And all the rows should be selected
rows = page.locator('.tabulator-row')
for i in range(rows.count()):
assert 'tabulator-selected' in rows.nth(i).get_attribute('class')
# The selection should have all the indexes
wait_until(lambda: widget.selection == list(range(len(df_mixed))), page)
assert widget.selected_dataframe.equals(df_mixed)
def test_tabulator_selection_selectable_checkbox_multiple(page, port, df_mixed):
widget = Tabulator(df_mixed, selectable='checkbox')
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
checkboxes = page.locator('input[type="checkbox"]')
checkboxes.first.wait_for()
checkboxes.nth(1).check()
checkboxes.last.check()
expected_selection = [0, len(df_mixed) - 1]
for i in range(1, checkboxes.count()):
if (i - 1) in expected_selection:
assert checkboxes.nth(i).is_checked()
else:
assert not checkboxes.nth(i).is_checked()
rows = page.locator('.tabulator-row')
for i in range(rows.count()):
if i in expected_selection:
assert 'tabulator-selected' in rows.nth(i).get_attribute('class')
else:
assert 'tabulator-selected' not in rows.nth(i).get_attribute('class')
wait_until(lambda: widget.selection == expected_selection, page)
expected_selected = df_mixed.iloc[expected_selection, :]
assert widget.selected_dataframe.equals(expected_selected)
def test_tabulator_selection_selectable_checkbox_single(page, port, df_mixed):
widget = Tabulator(df_mixed, selectable='checkbox-single')
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
checkboxes = page.locator('input[type="checkbox"]')
expect(checkboxes).to_have_count(len(df_mixed))
checkboxes.first.check()
checkboxes.last.check()
expected_selection = [0, len(df_mixed) - 1]
for i in range(checkboxes.count()):
if i in expected_selection:
assert checkboxes.nth(i).is_checked()
else:
assert not checkboxes.nth(i).is_checked()
rows = page.locator('.tabulator-row')
for i in range(rows.count()):
if i in expected_selection:
assert 'tabulator-selected' in rows.nth(i).get_attribute('class')
else:
assert 'tabulator-selected' not in rows.nth(i).get_attribute('class')
wait_until(lambda: widget.selection == expected_selection, page)
expected_selected = df_mixed.iloc[expected_selection, :]
assert widget.selected_dataframe.equals(expected_selected)
def test_tabulator_selection_selectable_toggle(page, port, df_mixed):
widget = Tabulator(df_mixed, selectable='toggle')
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
rows = page.locator('.tabulator-row')
# Click on the first row of the index column to select the row
c0 = page.locator('text="idx0"')
c0.wait_for()
c0.click()
for i in range(rows.count()):
if i == 0:
assert 'tabulator-selected' in rows.nth(i).get_attribute('class')
else:
assert 'tabulator-selected' not in rows.nth(i).get_attribute('class')
wait_until(lambda: widget.selection == [0], page)
expected_selected = df_mixed.loc[['idx0'], :]
assert widget.selected_dataframe.equals(expected_selected)
# Click on the second row, the first row should still be selected
page.locator('text="idx1"').click()
for i in range(rows.count()):
if i in [0, 1]:
assert 'tabulator-selected' in rows.nth(i).get_attribute('class')
else:
assert 'tabulator-selected' not in rows.nth(i).get_attribute('class')
wait_until(lambda: widget.selection == [0, 1], page)
expected_selected = df_mixed.loc[['idx0', 'idx1'], :]
assert widget.selected_dataframe.equals(expected_selected)
# Click on a selected row deselect it
page.locator('text="idx1"').click()
for i in range(rows.count()):
if i == 0:
assert 'tabulator-selected' in rows.nth(i).get_attribute('class')
else:
assert 'tabulator-selected' not in rows.nth(i).get_attribute('class')
wait_until(lambda: widget.selection == [0], page)
expected_selected = df_mixed.loc[['idx0'], :]
assert widget.selected_dataframe.equals(expected_selected)
def test_tabulator_selection_selectable_rows(page, port, df_mixed):
widget = Tabulator(df_mixed, selectable_rows=lambda df: [1])
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
rows = page.locator('.tabulator-row')
# Click on the first row of the index column to select the row
c1 = page.locator('text="idx1"')
c1.wait_for()
c1.click()
wait_until(lambda: widget.selection == [1], page)
expected_selected = df_mixed.loc[['idx1'], :]
assert widget.selected_dataframe.equals(expected_selected)
# Click on the first row with CTRL pressed should NOT add that row to the selection
# as this row is not selectable
modifier = get_ctrl_modifier()
page.locator("text=idx0").click(modifiers=[modifier])
page.wait_for_timeout(200)
assert widget.selection == [1]
for i in range(rows.count()):
if i == 1:
assert 'tabulator-selected' in rows.nth(i).get_attribute('class')
else:
assert 'tabulator-selected' not in rows.nth(i).get_attribute('class')
assert widget.selected_dataframe.equals(expected_selected)
@pytest.mark.flaky(max_runs=3)
def test_tabulator_row_content(page, port, df_mixed):
widget = Tabulator(df_mixed, row_content=lambda i: f"{i['str']}-row-content")
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
openables = page.locator('text="►"')
expect(openables).to_have_count(len(df_mixed))
expected_expanded = []
for i in range(len(df_mixed)):
openables = page.locator('text="►"')
openables.first.click()
row_content = page.locator(f'text="{df_mixed.iloc[i]["str"]}-row-content"')
expect(row_content).to_have_count(1)
closables = page.locator('text="▼"')
expect(closables).to_have_count(i + 1)
assert row_content.is_visible()
expected_expanded.append(i)
wait_until(lambda: widget.expanded == expected_expanded, page)
for i in range(len(df_mixed)):
closables = page.locator('text="▼"')
closables.first.click()
row_content = page.locator(f'text="{df_mixed.iloc[i]["str"]}-row-content"')
expect(row_content).to_have_count(0) # timeout here?
expected_expanded.remove(i)
wait_until(lambda: widget.expanded == expected_expanded, page)
def test_tabulator_row_content_expand_from_python_init(page, port, df_mixed):
widget = Tabulator(
df_mixed,
row_content=lambda i: f"{i['str']}-row-content",
expanded = [0, 2],
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
for i in range(len(df_mixed)):
row_content = page.locator(f'text="{df_mixed.iloc[i]["str"]}-row-content"')
if i in widget.expanded:
expect(row_content).to_have_count(1)
else:
expect(row_content).to_have_count(0)
openables = page.locator('text="►"')
closables = page.locator('text="▼"')
assert closables.count() == len(widget.expanded)
assert openables.count() == len(df_mixed) - len(widget.expanded)
def test_tabulator_row_content_expand_from_python_after(page, port, df_mixed):
widget = Tabulator(df_mixed, row_content=lambda i: f"{i['str']}-row-content")
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Expanding the rows after the server is launched
widget.expanded = [0, 2]
for i in range(len(df_mixed)):
row_content = page.locator(f'text="{df_mixed.iloc[i]["str"]}-row-content"')
if i in widget.expanded:
expect(row_content).to_have_count(1)
else:
expect(row_content).to_have_count(0)
openables = page.locator('text="►"')
closables = page.locator('text="▼"')
assert closables.count() == len(widget.expanded)
assert openables.count() == len(df_mixed) - len(widget.expanded)
widget.expanded = []
time.sleep(0.2)
openables = page.locator('text="►"')
closables = page.locator('text="▼"')
assert closables.count() == 0
assert openables.count() == len(df_mixed)
def test_tabulator_groups(page, port, df_mixed):
widget = Tabulator(
df_mixed,
groups={'Group1': ['int', 'float'], 'Group2': ['date', 'datetime']},
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expected_text = """
index
Group1
int
float
str
bool
Group2
date
datetime
idx0
1
3.14
A
true
2019-01-01
2019-01-01 10:00:00
idx1
2
6.28
B
true
2020-01-01
2020-01-01 12:00:00
idx2
3
9.42
C
true
2020-01-10
2020-01-10 13:00:00
idx3
4
-2.45
D
false
2019-01-10
2020-01-15 13:00:00
"""
expect(page.locator('.tabulator')).to_have_text(
expected_text,
use_inner_text=True,
)
expect(page.locator('.tabulator-col-group')).to_have_count(2)
def test_tabulator_groupby(page, port):
df = pd.DataFrame({
'cat1': ['A', 'B', 'A', 'A', 'B', 'B', 'B'],
'cat2': ['X', 'X', 'X', 'X', 'Y', 'Y', 'Y'],
'value': list(range(7)),
})
widget = Tabulator(df, groupby=['cat1', 'cat2'])
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expected_text = """
index
cat1
cat2
value
cat1: A, cat2: X(3 items)
0
A
X
0
2
A
X
2
3
A
X
3
cat1: B, cat2: X(1 item)
1
B
X
1
cat1: B, cat2: Y(3 items)
4
B
Y
4
5
B
Y
5
6
B
Y
6
"""
expect(page.locator('.tabulator')).to_have_text(
expected_text,
use_inner_text=True,
)
expect(page.locator('.tabulator-group')).to_have_count(3)
@pytest.mark.xfail(reason='See https://github.com/holoviz/panel/issues/3564')
def test_tabulator_hierarchical(page, port, df_multiindex):
widget = Tabulator(df_multiindex, hierarchical=True)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expect(page.locator('text="Index: groups | subgroups"')).to_have_count(1)
for i in range(len(df_multiindex.index.get_level_values(0).unique())):
gr = page.locator(f'text="group{i}"')
expect(gr).to_have_count(1)
assert gr.is_visible()
for i in range(len(df_multiindex.index.get_level_values(1).unique())):
subgr = page.locator(f'text="subgroup{i}"')
expect(subgr).to_have_count(0)
# This fails
page.locator("text=group1 >> div").first.click(timeout=2000)
for i in range(len(df_multiindex.index.get_level_values(1).unique())):
subgr = page.locator(f'text="subgroup{i}"')
expect(subgr).to_have_count(1)
assert subgr.is_visible()
def test_tabulator_cell_click_event(page, port, df_mixed):
widget = Tabulator(df_mixed)
values = []
widget.on_click(lambda e: values.append((e.column, e.row, e.value)))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.5)
page.goto(f"http://localhost:{port}")
page.locator('text="idx0"').click()
wait_until(lambda: len(values) >= 1, page)
assert values[-1] == ('index', 0, 'idx0')
page.locator('text="A"').click()
wait_until(lambda: len(values) >= 2, page)
assert values[-1] == ('str', 0, 'A')
def test_tabulator_edit_event(page, port, df_mixed):
widget = Tabulator(df_mixed)
values = []
widget.on_edit(lambda e: values.append((e.column, e.row, e.old, e.value)))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="A"')
cell.click()
editable_cell = page.locator('input[type="text"]')
editable_cell.fill("AA")
editable_cell.press('Enter')
wait_until(lambda: len(values) >= 1, page)
assert values[0] == ('str', 0, 'A', 'AA')
assert df_mixed.at['idx0', 'str'] == 'AA'
def test_tabulator_edit_event_abort(page, port, df_mixed):
widget = Tabulator(df_mixed)
values = []
widget.on_edit(lambda e: values.append((e.column, e.row, e.old, e.value)))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="3.14"')
cell.click()
editable_cell = page.locator('input[type="number"]')
editable_cell.fill('0')
editable_cell.press('Escape')
time.sleep(0.2)
assert not values
assert cell.text_content() == '3.14'
def test_tabulator_edit_event_empty_to_nan(page, port, df_mixed):
widget = Tabulator(df_mixed)
values = []
widget.on_edit(lambda e: values.append((e.column, e.row, e.old, e.value)))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.5)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="3.14"')
cell.click()
editable_cell = page.locator('input[type="number"]')
editable_cell.fill('')
editable_cell.press('Enter')
wait_until(lambda: len(values) == 1, page)
assert values[0][:-1] == ('float', 0, 3.14)
assert np.isnan(values[0][-1])
assert page.query_selector('text="-"') is not None
@pytest.mark.parametrize('pagination', ['remote', 'local'])
def test_tabulator_pagination(page, port, df_mixed, pagination):
page_size = 2
widget = Tabulator(df_mixed, pagination=pagination, page_size=page_size)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
counts = count_per_page(len(df_mixed), page_size)
i = 0
while True:
wait_until(lambda: widget.page == i + 1, page)
rows = page.locator('.tabulator-row')
expect(rows).to_have_count(counts[i])
assert page.locator(f'[aria-label="Show Page {i+1}"]').count() == 1
df_page = df_mixed.iloc[i * page_size: (i + 1) * page_size]
for idx in df_page.index:
assert page.locator(f'text="{idx}"').count() == 1
if i < len(counts) - 1:
page.locator(f'[aria-label="Show Page {i+2}"]').click()
i += 1
else:
break
def test_tabulator_pagination_programmatic_update(page, port, df_mixed):
widget = Tabulator(df_mixed, pagination='local', page_size=2)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
widget.page = 2
time.sleep(0.2)
expect(page.locator('.tabulator-page.active')).to_have_text('2')
def test_tabulator_filter_constant_scalar(page, port, df_mixed):
widget = Tabulator(df_mixed)
fltr, col = 'A', 'str'
widget.add_filter(fltr, col)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Check the table has the right number of rows
expect(page.locator('.tabulator-row')).to_have_count(1)
assert page.locator('text="A"').count() == 1
assert page.locator('text="B"').count() == 0
expected_current_view = df_mixed.loc[ df_mixed[col] == fltr, :]
assert widget.current_view.equals(expected_current_view)
def test_tabulator_filter_constant_list(page, port, df_mixed):
widget = Tabulator(df_mixed)
fltr, col = ['A', 'B'], 'str'
widget.add_filter(fltr, col)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Check the table has the right number of rows
expect(page.locator('.tabulator-row')).to_have_count(2)
assert page.locator('text="A"').count() == 1
assert page.locator('text="B"').count() == 1
assert page.locator('text="C"').count() == 0
expected_current_view = df_mixed.loc[df_mixed[col].isin(fltr), :]
assert widget.current_view.equals(expected_current_view)
def test_tabulator_filter_constant_tuple_range(page, port, df_mixed):
widget = Tabulator(df_mixed)
fltr, col = (1, 2), 'int'
widget.add_filter(fltr, col)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Check the table has the right number of rows
expect(page.locator('.tabulator-row')).to_have_count(2)
assert page.locator('text="A"').count() == 1
assert page.locator('text="B"').count() == 1
assert page.locator('text="C"').count() == 0
expected_current_view = df_mixed.loc[(df_mixed[col] >= fltr[0]) & (df_mixed[col] <= fltr[1]), : ]
assert widget.current_view.equals(expected_current_view)
def test_tabulator_filter_param(page, port, df_mixed):
widget = Tabulator(df_mixed)
class P(param.Parameterized):
s = param.String()
filt_val, filt_col = 'A', 'str'
p = P(s=filt_val)
widget.add_filter(p.param['s'], column=filt_col)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
df_filtered = df_mixed.loc[df_mixed[filt_col] == filt_val, :]
wait_until(lambda: widget.current_view.equals(df_filtered), page)
# Check the table has the right number of rows
expect(page.locator('.tabulator-row')).to_have_count(len(df_filtered))
for filt_val in ['B', 'NOT']:
p.s = filt_val
page.wait_for_timeout(200)
df_filtered = df_mixed.loc[df_mixed[filt_col] == filt_val, :]
wait_until(lambda: widget.current_view.equals(df_filtered), page)
# Check the table has the right number of rows
expect(page.locator('.tabulator-row')).to_have_count(len(df_filtered))
def test_tabulator_filter_bound_function(page, port, df_mixed):
widget = Tabulator(df_mixed)
def filt_(df, val):
return df[df['str'] == val]
filt_val = 'A'
w_filter = Select(value='A', options=['A', 'B', ''])
widget.add_filter(bind(filt_, val=w_filter))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
df_filtered = filt_(df_mixed, w_filter.value)
wait_until(lambda: widget.current_view.equals(df_filtered), page)
# Check the table has the right number of rows
expect(page.locator('.tabulator-row')).to_have_count(len(df_filtered))
for filt_val in w_filter.options[1:]:
w_filter.value = filt_val
page.wait_for_timeout(200)
df_filtered = filt_(df_mixed, filt_val)
wait_until(lambda: widget.current_view.equals(df_filtered), page)
# Check the table has the right number of rows
expect(page.locator('.tabulator-row')).to_have_count(len(df_filtered))
@pytest.mark.parametrize(
'cols',
[
['int', 'float', 'str', 'bool'],
pytest.param(['date', 'datetime'], marks=pytest.mark.xfail(reason='See https://github.com/holoviz/panel/issues/3655')),
],
)
def test_tabulator_header_filters_default(page, port, df_mixed, cols):
df_mixed = df_mixed[cols]
widget = Tabulator(df_mixed, header_filters=True)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Check that all the columns have a header filter, including the index column
expect(page.locator('.tabulator-header-filter')).to_have_count(len(cols) + 1)
# Check the table has the right number of rows, i.e. no filter is applied by default
assert page.locator('.tabulator-row').count() == len(df_mixed)
assert widget.filters == []
assert widget.current_view.equals(df_mixed)
@pytest.mark.parametrize(
('index', 'expected_selector'),
(
(['idx0', 'idx1'], 'input[type="search"]'),
([0, 1], 'input[type="number"]'),
(np.array([0, 1], dtype=np.uint64), 'input[type="number"]'),
([0.1, 1.1], 'input[type="number"]'),
# ([True, False], 'input[type="checkbox"]'), # Pandas cannot have boolean indexes apparently
),
)
def test_tabulator_header_filters_default_index(page, port, index, expected_selector):
df = pd.DataFrame(index=index)
widget = Tabulator(df, header_filters=True)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# The number columns (unit, int and float) are expected to have a number input
expect(page.locator(expected_selector)).to_have_count(1)
def test_tabulator_header_filters_init_from_editors(page, port, df_mixed):
df_mixed = df_mixed[['float']]
editors = {
'float': {'type': 'number', 'step': 0.5},
'str': {'type': 'autocomplete', 'values': True}
}
widget = Tabulator(df_mixed, header_filters=True, editors=editors)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
number_header = page.locator('input[type="number"]')
expect(number_header).to_have_count(1)
assert number_header.get_attribute('step') == '0.5'
def test_tabulator_header_filters_init_explicitly(page, port, df_mixed):
header_filters = {
'float': {'type': 'number', 'func': '>=', 'placeholder': 'Placeholder float'},
'str': {'type': 'input', 'func': 'like', 'placeholder': 'Placeholder str'},
}
widget = Tabulator(df_mixed, header_filters=header_filters)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Check that only the columns explicitly given a header filter spec have a header filter
expect(page.locator('.tabulator-header-filter')).to_have_count(len(header_filters))
number_header = page.locator('input[type="number"]')
expect(number_header).to_have_count(1)
assert number_header.get_attribute('placeholder') == 'Placeholder float'
str_header = page.locator('input[type="search"]')
expect(str_header).to_have_count(1)
assert str_header.get_attribute('placeholder') == 'Placeholder str'
def test_tabulator_header_filters_set_from_client(page, port, df_mixed):
header_filters = {
'float': {'type': 'number', 'func': '>=', 'placeholder': 'Placeholder float'},
'str': {'type': 'input', 'func': 'like', 'placeholder': 'Placeholder str'},
}
widget = Tabulator(df_mixed, header_filters=header_filters)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
number_header = page.locator('input[type="number"]')
number_header.click()
val, cmp, col = '0', '>=', 'float'
number_header.fill(val)
number_header.press('Enter')
query1 = f'{col} {cmp} {val}'
expected_filter_df = df_mixed.query(query1)
expected_filter1 = {'field': col, 'type': cmp, 'value': val}
expect(page.locator('.tabulator-row')).to_have_count(len(expected_filter_df))
wait_until(lambda: widget.filters == [expected_filter1], page)
wait_until(lambda: widget.current_view.equals(expected_filter_df), page)
str_header = page.locator('input[type="search"]')
str_header.click()
val, cmp, col = 'A', 'like', 'str'
str_header.fill(val)
str_header.press('Enter')
query2 = f'{col} == {val!r}'
expected_filter_df = df_mixed.query(f'{query1} and {query2}')
expected_filter2 = {'field': col, 'type': cmp, 'value': val}
expect(page.locator('.tabulator-row')).to_have_count(len(expected_filter_df))
wait_until(lambda: widget.filters == [expected_filter1, expected_filter2], page)
wait_until(lambda: widget.current_view.equals(expected_filter_df), page)
def test_tabulator_download(page, port, df_mixed, df_mixed_as_string):
widget = Tabulator(df_mixed)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Check that the whole table content is on the page, just
# to make sure the page is loaded before triggering the
# download.
table = page.locator('.tabulator')
expect(table).to_have_text(
df_mixed_as_string,
use_inner_text=True
)
# Start waiting for the download
with page.expect_download() as download_info:
widget.download()
download = download_info.value
# Wait for the download process to complete
path = download.path()
saved_df = pd.read_csv(path, index_col='index')
# Some transformations required to reform the dataframe as the original one.
saved_df['date'] = pd.to_datetime(saved_df['date'], unit='ms')
saved_df['date'] = [d.to_pydatetime().date() for d in saved_df['date']]
saved_df['datetime'] = pd.to_datetime(saved_df['datetime'], unit='ms')
saved_df.index.name = None
pd.testing.assert_frame_equal(df_mixed, saved_df)
def test_tabulator_streaming_default(page, port):
df = pd.DataFrame(np.random.random((3, 2)), columns=['A', 'B'])
widget = Tabulator(df)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expect(page.locator('.tabulator-row')).to_have_count(len(df))
height_start = page.locator('.pnx-tabulator.tabulator').bounding_box()['height']
def stream_data():
widget.stream(df) # follow is True by default
repetitions = 3
state.add_periodic_callback(stream_data, period=100, count=repetitions)
expected_len = len(df) * (repetitions + 1)
expect(page.locator('.tabulator-row')).to_have_count(expected_len)
assert len(widget.value) == expected_len
assert widget.current_view.equals(widget.value)
assert page.locator('.pnx-tabulator.tabulator').bounding_box()['height'] > height_start
def test_tabulator_streaming_no_follow(page, port):
nrows1 = 10
arr = np.random.randint(10, 20, (nrows1, 2))
val = [-1]
arr[0, :] = val[0]
df = pd.DataFrame(arr, columns=['A', 'B'])
widget = Tabulator(df, height=100)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expect(page.locator('.tabulator-row')).to_have_count(len(df))
assert page.locator('text="-1"').count() == 2
height_start = page.locator('.pnx-tabulator.tabulator').bounding_box()['height']
recs = []
nrows2 = 5
def stream_data():
arr = np.random.randint(10, 20, (nrows2, 2))
val[0] = val[0] - 1
arr[-1, :] = val[0]
recs.append(val[0])
new_df = pd.DataFrame(arr, columns=['A', 'B'])
widget.stream(new_df, follow=False)
repetitions = 3
state.add_periodic_callback(stream_data, period=100, count=repetitions)
# Explicit wait to make sure the periodic callback has completed
page.wait_for_timeout(500)
expect(page.locator('text="-1"')).to_have_count(2)
# As we're not in follow mode the last row isn't visible
# and seems to be out of reach to the selector. How visibility
# is used here seems brittle though, may need to be revisited.
expect(page.locator(f'text="{val[0]}"')).to_have_count(0)
assert len(widget.value) == nrows1 + repetitions * nrows2
assert widget.current_view.equals(widget.value)
assert page.locator('.pnx-tabulator.tabulator').bounding_box()['height'] == height_start
def test_tabulator_patching(page, port, df_mixed):
widget = Tabulator(df_mixed)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
new_vals = {
'str': ['AA', 'BB'],
'int': [100, 101],
}
widget.patch({
'str': [(0, new_vals['str'][0]), (1, new_vals['str'][1])],
'int': [(slice(0, 2), new_vals['int'])]
}, as_index=False)
for v in new_vals:
expect(page.locator(f'text="{v}"')).to_have_count(1)
assert list(widget.value['str'].iloc[[0, 1]]) == new_vals['str']
assert list(widget.value['int'].iloc[0 : 2]) == new_vals['int']
assert df_mixed.equals(widget.current_view)
assert df_mixed.equals(widget.value)
def test_tabulator_patching_no_event(page, port, df_mixed):
# Patching should not emit emit any event when watching `value`
widget = Tabulator(df_mixed)
events = []
widget.param.watch(lambda e: events.append(e), 'value')
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
new_vals = {
'str': ['AA', 'BB'],
}
widget.patch({
'str': [(0, new_vals['str'][0]), (1, new_vals['str'][1])],
}, as_index=False)
for v in new_vals:
expect(page.locator(f'text="{v}"')).to_have_count(1)
assert list(widget.value['str'].iloc[[0, 1]]) == new_vals['str']
assert df_mixed.equals(widget.value)
assert len(events) == 0
def color_false(val):
color = 'red' if not val else 'black'
return 'color: %s' % color
def highlight_max(s):
is_max = s == s.max()
return ['background-color: yellow' if v else '' for v in is_max]
# Playwright returns the colors as RGB
_color_mapping = {
'red': 'rgb(255, 0, 0)',
'black': 'rgb(0, 0, 0)',
'yellow': 'rgb(255, 255, 0)',
}
def test_tabulator_styling_init(page, port, df_mixed):
df_styled = (
df_mixed.style
.apply(highlight_max, subset=['int'])
.applymap(color_false, subset=['bool'])
)
widget = Tabulator(df_styled)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
max_int = df_mixed['int'].max()
max_cell = page.locator('.tabulator-cell', has=page.locator(f'text="{max_int}"'))
expect(max_cell).to_have_count(1)
expect(max_cell).to_have_css('background-color', _color_mapping['yellow'])
expect(page.locator('text="false"')).to_have_css('color', _color_mapping['red'])
def test_tabulator_patching_and_styling(page, port, df_mixed):
df_styled = df_mixed.style.apply(highlight_max, subset=['int'])
widget = Tabulator(df_styled)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Changing the highest value in the int column should
# update the style so that this cell gets a yellow background
widget.patch({'int': [(0, 100)]}, as_index=False)
max_int = df_mixed['int'].max()
max_cell = page.locator('.tabulator-cell', has=page.locator(f'text="{max_int}"'))
expect(max_cell).to_have_count(1)
expect(max_cell).to_have_css('background-color', _color_mapping['yellow'])
def test_tabulator_filters_and_styling(page, port, df_mixed):
df_styled = df_mixed.style.apply(highlight_max, subset=['int'])
select = Select(options = [None, 'A', 'B', 'C', 'D'], size = 5)
table = Tabulator(df_styled)
table.add_filter(select, 'str')
layout = Column(select, table)
serve(layout, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Filtering to one field and then clicking None again should display all data, with styling
page.locator('option').nth(1).click()
page.locator('option').nth(0).click()
max_int = df_mixed['int'].max()
max_cell = page.locator('.tabulator-cell', has=page.locator(f'text="{max_int}"'))
expect(max_cell).to_have_count(1)
expect(max_cell).to_have_css('background-color', _color_mapping['yellow'])
def test_tabulator_configuration(page, port, df_mixed):
# By default the Tabulator widget has sortable columns.
# Pass a configuration property to disable this behaviour.
widget = Tabulator(df_mixed, configuration={'columnDefaults': {'headerSort': False}})
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expect(page.locator(".tabulator-sortable")).to_have_count(0)
@pytest.mark.xfail(reason='See https://github.com/holoviz/panel/issues/3620')
def test_tabulator_editor_datetime_nan(page, port, df_mixed):
df_mixed.at['idx0', 'datetime'] = np.nan
widget = Tabulator(df_mixed, configuration={'headerSort': False})
events = []
def callback(e):
events.append(e)
widget.on_edit(callback)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Doesn't trigger a table edit event
cell = page.locator('text="-"')
cell.wait_for()
cell.click()
page.locator('input[type="date"]').press("Escape")
# Error: these two triggers a table edit event, i.e. hit Enter
# or click away
page.locator('text="-"').click()
page.locator('input[type="date"]').press("Enter")
page.locator('text="-"').click()
page.locator("html").click()
wait_until(lambda: len(events) == 0, page)
@pytest.mark.parametrize('col', ['index', 'int', 'float', 'str', 'date', 'datetime'])
@pytest.mark.parametrize('dir', ['ascending', 'descending'])
def test_tabulator_sorters_on_init(page, port, df_mixed, col, dir):
dir_ = 'asc' if dir == 'ascending' else 'desc'
widget = Tabulator(df_mixed, sorters=[{'field': col, 'dir': dir_}])
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
sorted_header = page.locator(f'[aria-sort="{dir}"]:visible')
expect(sorted_header).to_have_attribute('tabulator-field', col)
ascending = dir == 'ascending'
if col == 'index':
expected_current_view = df_mixed.sort_index(ascending=ascending)
else:
expected_current_view = df_mixed.sort_values(col, ascending=ascending)
assert widget.current_view.equals(expected_current_view)
@pytest.mark.xfail(reason='See https://github.com/holoviz/panel/issues/3657')
def test_tabulator_sorters_on_init_multiple(page, port):
df = pd.DataFrame({
'col1': [1, 2, 3, 4],
'col2': [1, 4, 3, 2],
})
sorters = [{'field': 'col1', 'dir': 'desc'}, {'field': 'col2', 'dir': 'asc'}]
widget = Tabulator(df, sorters=sorters)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
s1 = page.locator('[aria-sort="descending"]:visible')
expect(s1).to_have_attribute('tabulator-field', 'col1')
s2 = page.locator('[aria-sort="ascending"]:visible')
expect(s2).to_have_attribute('tabulator-field', 'col2')
first_index_rendered = page.locator('.tabulator-cell:visible').first.inner_text()
df_sorted = df.sort_values('col1', ascending=True).sort_values('col2', ascending=False)
expected_first_index = df_sorted.index[0]
# This fails
assert int(first_index_rendered) == expected_first_index
def test_tabulator_sorters_set_after_init(page, port, df_mixed):
widget = Tabulator(df_mixed)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
widget.sorters = [{'field': 'int', 'dir': 'desc'}]
sheader = page.locator('[aria-sort="descending"]:visible')
expect(sheader).to_have_count(1)
assert sheader.get_attribute('tabulator-field') == 'int'
expected_df_sorted = df_mixed.sort_values('int', ascending=False)
assert widget.current_view.equals(expected_df_sorted)
def test_tabulator_sorters_from_client(page, port, df_mixed):
widget = Tabulator(df_mixed)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
page.locator('.tabulator-col', has_text='float').locator('.tabulator-col-sorter').click()
sheader = page.locator('[aria-sort="ascending"]:visible')
expect(sheader).to_have_count(1)
assert sheader.get_attribute('tabulator-field') == 'float'
wait_until(lambda: widget.sorters == [{'field': 'float', 'dir': 'asc'}], page)
expected_df_sorted = df_mixed.sort_values('float', ascending=True)
assert widget.current_view.equals(expected_df_sorted)
@pytest.mark.xfail(reason='See https://github.com/holoviz/panel/issues/3658')
def test_tabulator_sorters_pagination_no_page_reset(page, port, df_mixed):
widget = Tabulator(df_mixed, pagination='remote', page_size=2)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
page.locator('text="Next"').click()
expect(page.locator('text="idx2"')).to_have_count(1)
widget.sorters = [{'field': 'float', 'dir': 'asc'}]
page.locator('.tabulator-col', has_text='index').locator('.tabulator-col-sorter').click()
# This fails, explicit timeout required
page.wait_for_timeout(500)
expect(page.locator('text="idx2"')).to_have_count(1, timeout=1000)
assert widget.page == 2
@pytest.mark.flaky(max_runs=3)
@pytest.mark.parametrize('pagination', ['remote', 'local'])
def test_tabulator_sorters_pagination(page, port, df_mixed, pagination):
widget = Tabulator(df_mixed, pagination=pagination, page_size=2)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
s = page.locator('.tabulator-col', has_text='str').locator('.tabulator-col-sorter')
s.click()
# Having to wait when pagination is set to remote before the next click,
# maybe there's a better way.
page.wait_for_timeout(100)
s.click()
sheader = page.locator('[aria-sort="descending"]:visible')
expect(sheader).to_have_count(1)
assert sheader.get_attribute('tabulator-field') == 'str'
expected_sorted_df = df_mixed.sort_values('str', ascending=False)
wait_until(lambda: widget.current_view.equals(expected_sorted_df), page)
# Check that if we go to the next page the current_view hasn't changed
page.locator('text="Next"').click()
page.wait_for_timeout(200)
wait_until(lambda: widget.current_view.equals(expected_sorted_df), page)
def test_tabulator_edit_event_sorters_not_automatically_applied(page, port, df_mixed):
widget = Tabulator(df_mixed, sorters=[{'field': 'str', 'dir': 'desc'}])
values = []
widget.on_edit(lambda e: values.append((e.column, e.row, e.old, e.value)))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expected_vals = list(df_mixed['str'].sort_values(ascending=False))
wait_until(lambda: tabulator_column_values(page, 'str') == expected_vals, page)
# Chankge the cell that contains B to BB
cell = page.locator('text="B"')
cell.click()
editable_cell = page.locator('input[type="text"]')
editable_cell.fill("Z")
editable_cell.press('Enter')
wait_until(lambda: len(values) == 1, page)
expected_vals = [item if item != 'B' else 'Z' for item in expected_vals]
wait_until(lambda: tabulator_column_values(page, 'str') == expected_vals, page)
def test_tabulator_click_event_and_header_filters(page, port):
df = pd.DataFrame({
'col1': list('ABCDD'),
'col2': list('XXXXZ'),
})
widget = Tabulator(
df,
header_filters={'col1': {'type': 'input', 'func': 'like'}},
)
values = []
widget.on_click(lambda e: values.append((e.column, e.row, e.value)))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Set a filter on col1
str_header = page.locator('input[type="search"]')
str_header.click()
str_header.fill('D')
str_header.press('Enter')
wait_until(lambda: len(widget.filters) == 1, page)
# Click on the last cell
cell = page.locator('text="Z"')
cell.click()
wait_until(lambda: len(values) == 1, page)
# This cell was at index 4 in col2 of the original dataframe
assert values[0] == ('col2', 4, 'Z')
def test_tabulator_click_event_and_header_filters_and_streamed_data(page, port):
df = pd.DataFrame({
'col1': list('ABCDD'),
'col2': list('XXXXZ'),
})
widget = Tabulator(
df,
header_filters={'col1': {'type': 'input', 'func': 'like'}},
)
values = []
widget.on_click(lambda e: values.append((e.column, e.row, e.value)))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.5)
page.goto(f"http://localhost:{port}")
# Set a filter on col1
str_header = page.locator('input[type="search"]')
str_header.click()
str_header.fill('D')
str_header.press('Enter')
wait_until(lambda: len(widget.filters) == 1, page)
# Stream data in ensuring that it does not mess up the index
widget.stream(pd.DataFrame([('D', 'Y')], columns=['col1', 'col2'], index=[5]))
# Click on the last cell
cell = page.locator('text="Z"')
cell.click()
wait_until(lambda: len(values) == 1, page)
# This cell was at index 4 in col2 of the original dataframe
assert values[0] == ('col2', 4, 'Z')
cell = page.locator('text="Y"')
cell.click()
wait_until(lambda: len(values) == 2, page)
# This cell was at index 5 in col2 of the original dataframe
assert values[1] == ('col2', 5, 'Y')
def test_tabulator_edit_event_and_header_filters_last_row(page, port):
df = pd.DataFrame({
'col1': list('ABCDD'),
'col2': list('XXXXZ'),
})
widget = Tabulator(
df,
header_filters={'col1': {'type': 'input', 'func': 'like'}},
)
values = []
widget.on_edit(lambda e: values.append((e.column, e.row, e.old, e.value)))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Set a filter on col1
str_header = page.locator('input[type="search"]')
str_header.click()
str_header.fill('D')
str_header.press('Enter')
wait_until(lambda: len(widget.filters) == 1, page)
# Click on the last cell
cell = page.locator('text="Z"')
cell.click()
editable_cell = page.locator('input[type="text"]')
editable_cell.fill("ZZ")
editable_cell.press('Enter')
wait_until(lambda: len(values) == 1, page)
# This cell was at index 4 in col2 of the original dataframe
assert values[0] == ('col2', 4, 'Z', 'ZZ')
assert df['col2'].iloc[-1] == 'ZZ'
assert widget.value.equals(df)
assert widget.current_view.equals(df.query('col1 == "D"'))
def test_tabulator_edit_event_and_header_filters(page, port):
df = pd.DataFrame({
'col1': list('aaabcd'),
'col2': list('ABCDEF')
})
widget = Tabulator(
df,
header_filters={'col1': {'type': 'input', 'func': 'like'}},
)
values = []
widget.on_edit(lambda e: values.append((e.column, e.row, e.old, e.value)))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Set a filter on col1
str_header = page.locator('input[type="search"]')
str_header.click()
str_header.fill('a')
str_header.press('Enter')
# Change the cell that contains B to BB
cell = page.locator('text="B"')
cell.click()
editable_cell = page.locator('input[type="text"]')
editable_cell.fill("BB")
editable_cell.press('Enter')
wait_until(lambda: len(values) == 1, page)
# This cell was at index 1 in col2 of the original dataframe
assert values[0] == ('col2', 1, 'B', 'BB')
assert df['col2'][1] == 'BB'
assert widget.value.equals(df)
assert widget.current_view.equals(df.query('col1 == "a"'))
@pytest.mark.flaky(max_runs=3)
@pytest.mark.parametrize('show_index', [True, False])
@pytest.mark.parametrize('index_name', ['index', 'foo'])
def test_tabulator_edit_event_and_header_filters_same_column(page, port, show_index, index_name):
df = pd.DataFrame({
'values': ['A', 'A', 'B', 'B'],
}, index=['idx0', 'idx1', 'idx2', 'idx3'])
df.index.name = index_name
widget = Tabulator(
df,
header_filters={'values': {'type': 'input', 'func': 'like'}},
show_index=show_index,
)
values = []
widget.on_edit(lambda e: values.append((e.column, e.row, e.old, e.value)))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
header = page.locator('input[type="search"]')
header.click()
header.fill('B')
header.press('Enter')
# Check the table has the right number of rows
expect(page.locator('.tabulator-row')).to_have_count(2)
# Edit a cell in the filtered column, from B to X
cell = page.locator('text="B"').nth(1)
cell.click()
editable_cell = page.locator('input[type="text"]')
# For some reason there's sometimes an edit event sent with the old
# value as new value. Waiting here helps.
page.wait_for_timeout(200)
editable_cell.fill("X")
editable_cell.press('Enter')
wait_until(lambda: len(values) == 1, page)
assert values[0] == ('values', len(df) - 1, 'B', 'X')
assert df.at['idx3', 'values'] == 'X'
# The current view should show the edited value
assert len(widget.current_view) == 2
# In the same column, edit X to Y
cell = page.locator('text="X"')
cell.click()
editable_cell = page.locator('input[type="text"]')
editable_cell.fill("Y")
editable_cell.press('Enter')
wait_until(lambda: len(values) == 2, page)
assert values[-1] == ('values', len(df) - 1, 'X', 'Y')
assert df.at['idx3', 'values'] == 'Y'
assert len(widget.current_view) == 2
# Edit the last B value found in that column, from B to Z
cell = page.locator('text="B"')
cell.click()
editable_cell = page.locator('input[type="text"]')
editable_cell.fill("Z")
editable_cell.press('Enter')
wait_until(lambda: len(values) == 3, page)
assert values[-1] == ('values', len(df) - 2, 'B', 'Z')
assert df.at['idx2', 'values'] == 'Z'
# current_view should show Y and Z, there's no more B
assert len(widget.current_view) == 2
@pytest.mark.parametrize('pagination', ['remote', 'local'])
def test_tabulator_edit_event_and_header_filters_same_column_pagination(page, port, pagination):
df = pd.DataFrame({
'values': ['A', 'A', 'B', 'B', 'B', 'B'],
}, index=['idx0', 'idx1', 'idx2', 'idx3', 'idx4', 'idx5'])
widget = Tabulator(
df,
header_filters={'values': {'type': 'input', 'func': 'like'}},
pagination=pagination,
page_size=2,
)
values = []
widget.on_edit(lambda e: values.append((e.column, e.row, e.old, e.value)))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
header = page.locator('input[type="search"]')
header.click()
header.fill('B')
header.press('Enter')
wait_until(lambda: widget.current_view.equals(df[df['values'] == 'B']))
cell = page.locator('text="B"').first
cell.click()
editable_cell = page.locator('input[type="text"]')
editable_cell.fill("Q")
editable_cell.press('Enter')
wait_until(lambda: len(values) == 1, page)
assert values[-1] == ('values', 2, 'B', 'Q')
assert df.at['idx2', 'values'] == 'Q'
# current_view should show Y and Z, there's no more B
assert len(widget.current_view) == 4
page.locator('text="Last"').click()
page.wait_for_timeout(200)
# Check the table has the right number of rows
expect(page.locator('.tabulator-row')).to_have_count(2)
# Edit a cell in the filtered column, from B to X
cell = page.locator('text="B"').nth(1)
cell.click()
editable_cell = page.locator('input[type="text"]')
editable_cell.fill("X")
editable_cell.press('Enter')
wait_until(lambda: len(values) == 2, page)
assert values[-1] == ('values', len(df) - 1, 'B', 'X')
assert df.at['idx5', 'values'] == 'X'
# The current view should show the edited value
assert len(widget.current_view) == 4
# In the same column, edit X to Y
cell = page.locator('text="X"')
cell.click()
editable_cell = page.locator('input[type="text"]')
editable_cell.fill("Y")
editable_cell.press('Enter')
wait_until(lambda: len(values) == 3, page)
assert values[-1] == ('values', len(df) - 1, 'X', 'Y')
assert df.at['idx5', 'values'] == 'Y'
assert len(widget.current_view) == 4
# Edit the last B value found in that column, from B to Z
cell = page.locator('text="B"')
cell.click()
editable_cell = page.locator('input[type="text"]')
editable_cell.fill("Z")
editable_cell.press('Enter')
wait_until(lambda: len(values) == 4, page)
assert values[-1] == ('values', len(df) - 2, 'B', 'Z')
assert df.at['idx4', 'values'] == 'Z'
# current_view should show Y and Z, there's no more B
assert len(widget.current_view) == 4
@pytest.mark.parametrize('sorter', ['sorter', 'no_sorter'])
@pytest.mark.parametrize('python_filter', ['python_filter', 'no_python_filter'])
@pytest.mark.parametrize('header_filter', ['header_filter', 'no_header_filter'])
@pytest.mark.parametrize('pagination', ['remote', 'local', 'no_pagination'])
def test_tabulator_edit_event_integrations(page, port, sorter, python_filter, header_filter, pagination):
sorter_col = 'col3'
python_filter_col = 'col2'
python_filter_val = 'd'
header_filter_col = 'col1'
header_filter_val = 'Y'
target_col = 'col4'
target_val = 'G'
new_val = 'GG'
df = pd.DataFrame({
'col1': list('XYYYYYYZ'),
'col2': list('abcddddd'),
'col3': list(range(8)),
'col4': list('ABCDEFGH')
})
target_index = df.set_index(target_col).index.get_loc(target_val)
kwargs = {}
if pagination != 'no_pagination':
kwargs = dict(pagination=pagination, page_size=3)
if header_filter == 'header_filter':
kwargs.update(dict(header_filters={header_filter_col: {'type': 'input', 'func': 'like'}}))
widget = Tabulator(df, **kwargs)
if python_filter == 'python_filter':
widget.add_filter(python_filter_val, python_filter_col)
values = []
widget.on_edit(lambda e: values.append((e.column, e.row, e.old, e.value)))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
if sorter == 'sorter':
s = page.locator('.tabulator-col', has_text=sorter_col).locator('.tabulator-col-sorter')
s.click()
# Having to wait when pagination is set to remote before the next click,
# maybe there's a better way.
page.wait_for_timeout(200)
s.click()
page.wait_for_timeout(200)
if header_filter == 'header_filter':
str_header = page.locator('input[type="search"]')
str_header.click()
str_header.fill(header_filter_val)
str_header.press('Enter')
wait_until(lambda: len(widget.filters) == 1, page)
if pagination != 'no_pagination' and sorter == 'no_sorter':
page.locator('text="Last"').click()
page.wait_for_timeout(200)
# Change the cell concent
cell = page.locator(f'text="{target_val}"')
cell.click()
editable_cell = page.locator('input[type="text"]')
editable_cell.fill(new_val)
editable_cell.press('Enter')
wait_until(lambda: len(values) == 1, page)
assert values[0] == (target_col, target_index, target_val, new_val)
assert df[target_col][target_index] == new_val
assert widget.value.equals(df)
if sorter == 'sorter':
expected_current_view = widget.value.sort_values(sorter_col, ascending=False)
else:
expected_current_view = widget.value
if python_filter == 'python_filter':
expected_current_view = expected_current_view.query(f'{python_filter_col} == @python_filter_val')
if header_filter == 'header_filter':
expected_current_view = expected_current_view.query(f'{header_filter_col} == @header_filter_val')
assert widget.current_view.equals(expected_current_view)
@pytest.mark.parametrize('sorter', ['sorter', 'no_sorter'])
@pytest.mark.parametrize('python_filter', ['python_filter', 'no_python_filter'])
@pytest.mark.parametrize('header_filter', ['header_filter', 'no_header_filter'])
@pytest.mark.parametrize('pagination', ['remote', 'local', 'no_pagination'])
def test_tabulator_click_event_selection_integrations(page, port, sorter, python_filter, header_filter, pagination):
sorter_col = 'col3'
python_filter_col = 'col2'
python_filter_val = 'd'
header_filter_col = 'col1'
header_filter_val = 'Y'
target_col = 'col4'
target_val = 'G'
df = pd.DataFrame({
'col1': list('XYYYYYYZ'),
'col2': list('abcddddd'),
'col3': list(range(8)),
'col4': list('ABCDEFGH')
})
target_index = df.set_index(target_col).index.get_loc(target_val)
kwargs = {}
if pagination != 'no_pagination':
kwargs.update(dict(pagination=pagination, page_size=3))
if header_filter == 'header_filter':
kwargs.update(dict(header_filters={header_filter_col: {'type': 'input', 'func': 'like'}}))
widget = Tabulator(df, disabled=True, **kwargs)
if python_filter == 'python_filter':
widget.add_filter(python_filter_val, python_filter_col)
values = []
widget.on_click(lambda e: values.append((e.column, e.row, e.value)))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
if sorter == 'sorter':
s = page.locator('.tabulator-col', has_text=sorter_col).locator('.tabulator-col-sorter')
s.click()
# Having to wait when pagination is set to remote before the next click,
# maybe there's a better way.
page.wait_for_timeout(200)
s.click()
page.wait_for_timeout(200)
if header_filter == 'header_filter':
str_header = page.locator('input[type="search"]')
str_header.click()
str_header.fill(header_filter_val)
str_header.press('Enter')
wait_until(lambda: len(widget.filters) == 1, page)
if pagination != 'no_pagination' and sorter == 'no_sorter':
page.locator('text="Last"').click()
page.wait_for_timeout(200)
# Click on the cell
cell = page.locator(f'text="{target_val}"')
cell.click()
wait_until(lambda: len(values) == 1, page)
assert values[0] == (target_col, target_index, target_val)
target_selection_index = widget.current_view.index.get_loc(target_index)
if python_filter == 'python_filter' or header_filter == 'header_filter' or sorter == 'sorter':
pytest.xfail(reason='See https://github.com/holoviz/panel/issues/3664')
wait_until(lambda: widget.selection == [target_selection_index], page)
if header_filter == 'header_filter' or sorter == 'sorter' or (pagination == 'remote' and python_filter == 'python_filter'):
pytest.xfail(reason='See https://github.com/holoviz/panel/issues/3664')
expected_selected = df.iloc[[target_index], :]
assert widget.selected_dataframe.equals(expected_selected)
@pytest.mark.xfail(reason='See https://github.com/holoviz/panel/issues/3664')
def test_tabulator_selection_sorters_on_init(page, port, df_mixed):
widget = Tabulator(df_mixed, sorters=[{'field': 'int', 'dir': 'desc'}])
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Click on the last index cell to select it
last_index = df_mixed.index[-1]
cell = page.locator(f'text="{last_index}"')
cell.click()
wait_until(lambda: widget.selection == [len(df_mixed) - 1], page)
expected_selected = df_mixed.loc[[last_index], :]
assert widget.selected_dataframe.equals(expected_selected)
@pytest.mark.xfail(reason='https://github.com/holoviz/panel/issues/3664')
def test_tabulator_selection_header_filter_unchanged(page, port):
df = pd.DataFrame({
'col1': list('XYYYYY'),
'col2': list('abcddd'),
'col3': list('ABCDEF')
})
selection = [2, 3]
widget = Tabulator(
df,
selection=selection,
header_filters={'col1': {'type': 'input', 'func': 'like'}}
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
str_header = page.locator('input[type="search"]')
str_header.click()
str_header.fill('Y')
str_header.press('Enter')
page.wait_for_timeout(300)
assert widget.selection == selection
expected_selected = df.iloc[selection, :]
assert widget.selected_dataframe.equals(expected_selected)
@pytest.mark.xfail(reason='See https://github.com/holoviz/panel/issues/3670')
def test_tabulator_selection_header_filter_changed(page, port):
df = pd.DataFrame({
'col1': list('XYYYYY'),
'col2': list('abcddd'),
'col3': list('ABCDEF')
})
selection = [0, 3]
widget = Tabulator(
df,
selection=selection,
header_filters={'col1': {'type': 'input', 'func': 'like'}}
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
str_header = page.locator('input[type="search"]')
str_header.click()
str_header.fill('Y')
str_header.press('Enter')
page.wait_for_timeout(300)
assert widget.selection == selection
expected_selected = df.iloc[selection, :]
assert widget.selected_dataframe.equals(expected_selected)
def test_tabulator_loading_no_horizontal_rescroll(page, port, df_mixed):
widths = 100
width = int(((df_mixed.shape[1] + 1) * widths) / 2)
df_mixed['Target'] = 'target'
widget = Tabulator(df_mixed, width=width, widths=widths)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
cell = page.locator('text="target"').first
# Scroll to the right
page.wait_for_timeout(200)
cell.scroll_into_view_if_needed()
page.wait_for_timeout(200)
bb = page.locator('text="Target"').bounding_box()
widget.loading = True
page.wait_for_timeout(200)
widget.loading = False
# To catch a potential rescroll
page.wait_for_timeout(400)
# The table should keep the same scroll position
assert bb == page.locator('text="Target"').bounding_box()
def test_tabulator_loading_no_vertical_rescroll(page, port):
arr = np.array(['a'] * 10)
arr[-1] = 'T'
df = pd.DataFrame({'col': arr})
height, width = 200, 200
widget = Tabulator(df, height=height, width=width)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Scroll to the bottom, and give it a little extra time
cell = page.locator('text="T"')
page.wait_for_timeout(200)
cell.scroll_into_view_if_needed()
page.wait_for_timeout(200)
bb = page.locator('text="T"').bounding_box()
widget.loading = True
page.wait_for_timeout(200)
widget.loading = False
# To catch a potential rescroll
page.wait_for_timeout(400)
# The table should keep the same scroll position
assert bb == page.locator('text="T"').bounding_box()
def test_tabulator_trigger_value_update(page, port):
# Checking that this issue is resolved:
# https://github.com/holoviz/panel/issues/3695
nrows = 25
df = pd.DataFrame(np.random.rand(nrows, 2), columns=['a', 'b'])
widget = Tabulator(df)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
expect(page.locator('.tabulator-row')).to_have_count(nrows)
widget.param.trigger('value')
page.wait_for_timeout(200)
# This currently fails because of a Tabulator JS issue,
# it only displays the first 20 rows.
expect(page.locator('.tabulator-row')).to_have_count(nrows)
@pytest.mark.parametrize('pagination', ['remote', 'local'])
def test_tabulator_selection_header_filter_pagination_updated(page, port, df_mixed, pagination):
widget = Tabulator(
df_mixed,
header_filters={'str': {'type': 'input', 'func': 'like'}},
pagination=pagination,
page_size=3,
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
page.locator('text="Last"').click()
wait_until(lambda: widget.page == 2, page)
str_header = page.locator('input[type="search"]')
str_header.click()
str_header.fill('D')
str_header.press('Enter')
wait_until(lambda: widget.page == 1, page)
def test_tabulator_sort_algorithm(page, port):
df = pd.DataFrame({
'vals': [
'A',
'i',
'W',
'g',
'r',
'l',
'a',
'n',
'z',
'N',
'a',
'l',
's',
'm',
'J',
'C',
'w'
],
'groups': [
'A',
'B',
'C',
'B',
'C',
'C',
'C',
'C',
'C',
'C',
'C',
'C',
'C',
'C',
'C',
'A',
'A'
],
})
target_col = 'vals'
widget = Tabulator(df, sorters=[{'field': 'groups', 'dir': 'asc'}])
values = []
widget.on_click(lambda e: values.append((e.column, e.row, e.value)))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Click on the cell
target_val = 'i'
target_index = df.set_index(target_col).index.get_loc(target_val)
cell = page.locator(f'text="{target_val}"')
cell.click()
wait_until(lambda: len(values) == 1, page)
assert values[0] == (target_col, target_index, target_val)
# Click on the cell
target_val = 'W'
target_index = df.set_index(target_col).index.get_loc(target_val)
cell = page.locator(f'text="{target_val}"')
cell.click()
wait_until(lambda: len(values) == 2, page)
assert values[1] == (target_col, target_index, target_val)
def test_tabulator_sort_algorithm_no_show_index(page, port):
df = pd.DataFrame({
'vals': [
'A',
'i',
'W',
'g',
'r',
'l',
'a',
'n',
'z',
'N',
'a',
'l',
's',
'm',
'J',
'C',
'w'
],
'groups': [
'A',
'B',
'C',
'B',
'C',
'C',
'C',
'C',
'C',
'C',
'C',
'C',
'C',
'C',
'C',
'A',
'A'
],
}, index=np.random.choice(list(range(17)), size=17, replace=False))
target_col = 'vals'
widget = Tabulator(df, sorters=[{'field': 'groups', 'dir': 'asc'}], show_index=False)
values = []
widget.on_click(lambda e: values.append((e.column, e.row, e.value)))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Click on the cell
target_val = 'i'
target_index = df.set_index(target_col).index.get_loc(target_val)
cell = page.locator(f'text="{target_val}"')
cell.click()
wait_until(lambda: len(values) == 1, page)
assert values[0] == (target_col, target_index, target_val)
# Click on the cell
target_val = 'W'
target_index = df.set_index(target_col).index.get_loc(target_val)
cell = page.locator(f'text="{target_val}"')
cell.click()
wait_until(lambda: len(values) == 2, page)
assert values[1] == (target_col, target_index, target_val)
@pytest.mark.parametrize(
('col', 'vals'),
(
('string', [np.nan, '', 'B', 'a', '', np.nan]),
('number', [1.0, 1.0, 0.0, 0.0]),
('boolean', [True, True, False, False]),
('datetime', [dt.datetime(2019, 1, 1, 1), np.nan, dt.datetime(2019, 12, 1, 1), dt.datetime(2019, 12, 1, 1), np.nan, dt.datetime(2019, 6, 1, 1), np.nan])
),
)
def test_tabulator_sort_algorithm_by_type(page, port, col, vals):
df = pd.DataFrame({
col: vals,
})
widget = Tabulator(df, sorters=[{'field': col, 'dir': 'asc'}])
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Attempt at making this test more robust.
page.wait_for_timeout(200)
client_index = [int(i) for i in tabulator_column_values(page, 'index')]
def indexes_equal():
assert client_index == list(widget.current_view.index)
wait_until(indexes_equal, page)
def test_tabulator_python_filter_edit(page, port):
df = pd.DataFrame({
'values': ['A', 'A', 'B', 'B'],
}, index=['idx0', 'idx1', 'idx2', 'idx3'])
widget = Tabulator(df)
fltr, col = 'B', 'values'
widget.add_filter(fltr, col)
values = []
widget.on_edit(lambda e: values.append((e.column, e.row, e.old, e.value)))
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
# Check the table has the right number of rows
expect(page.locator('.tabulator-row')).to_have_count(2)
cell = page.locator('text="B"').nth(1)
cell.click()
editable_cell = page.locator('input[type="text"]')
editable_cell.fill("X")
editable_cell.press('Enter')
wait_until(lambda: len(values) == 1, page)
assert values[0] == ('values', len(df) - 1, 'B', 'X')
assert df.at['idx3', 'values'] == 'X'
cell = page.locator('text="X"')
cell.click()
editable_cell = page.locator('input[type="text"]')
editable_cell.fill("Y")
editable_cell.press('Enter')
wait_until(lambda: len(values) == 2, page)
assert values[-1] == ('values', len(df) - 1, 'X', 'Y')
assert df.at['idx3', 'values'] == 'Y'
def test_tabulator_sorter_default_number(page, port):
df = pd.DataFrame({'x': []}).astype({'x': int})
widget = Tabulator(df, sorters=[{"field": "x", "dir": "desc"}])
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
df2 = pd.DataFrame({'x': [0, 96, 116]})
widget.value = df2
def x_values():
table_values = [int(v) for v in tabulator_column_values(page, 'x')]
assert table_values == list(df2['x'].sort_values(ascending=False))
wait_until(x_values, page)
def test_tabulator_update_hidden_columns(page, port):
df = pd.DataFrame({
'a': [1, 2, 3],
'b': [1, 2, 3]
})
widget = Tabulator(
df, hidden_columns=['a', 'b'], sizing_mode='stretch_width'
)
serve(widget, port=port, threaded=True, show=False)
time.sleep(0.2)
page.goto(f"http://localhost:{port}")
time.sleep(0.2)
col_a_cells = page.locator('text="3"')
assert not col_a_cells.nth(0).is_visible()
assert not col_a_cells.nth(1).is_visible()
widget.hidden_columns = ['b']
time.sleep(0.2)
col_a_cells = page.locator('text="3"')
title_bbox = page.locator('text="a"').bounding_box()
cell_bbox = col_a_cells.first.bounding_box()
assert col_a_cells.nth(0).is_visible()
assert not col_a_cells.nth(1).is_visible()
assert title_bbox['x'] == cell_bbox['x']
assert title_bbox['width'] == cell_bbox['width']
|
88b3ca8a90f78716349af3736318385bb68c3ffc
|
9ed4d46aedd4d4acadb48d610e940594b5b7b3fd
|
/project_euler/problem_113/sol1.py
|
2077c0fa62f3468a2da5e3606efa937ca0f215dc
|
[
"CC-BY-NC-4.0",
"CC-BY-NC-SA-4.0",
"MIT"
] |
permissive
|
TheAlgorithms/Python
|
7596a0e236ed12a61f9db19a7ea68309779cc85b
|
421ace81edb0d9af3a173f4ca7e66cc900078c1d
|
refs/heads/master
| 2023-09-01T17:32:20.190949
| 2023-08-29T13:18:10
| 2023-08-29T13:18:10
| 63,476,337
| 184,217
| 48,615
|
MIT
| 2023-09-14T02:05:29
| 2016-07-16T09:44:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,872
|
py
|
sol1.py
|
"""
Project Euler Problem 113: https://projecteuler.net/problem=113
Working from left-to-right if no digit is exceeded by the digit to its left it is
called an increasing number; for example, 134468.
Similarly if no digit is exceeded by the digit to its right it is called a decreasing
number; for example, 66420.
We shall call a positive integer that is neither increasing nor decreasing a
"bouncy" number; for example, 155349.
As n increases, the proportion of bouncy numbers below n increases such that there
are only 12951 numbers below one-million that are not bouncy and only 277032
non-bouncy numbers below 10^10.
How many numbers below a googol (10^100) are not bouncy?
"""
def choose(n: int, r: int) -> int:
"""
Calculate the binomial coefficient c(n,r) using the multiplicative formula.
>>> choose(4,2)
6
>>> choose(5,3)
10
>>> choose(20,6)
38760
"""
ret = 1.0
for i in range(1, r + 1):
ret *= (n + 1 - i) / i
return round(ret)
def non_bouncy_exact(n: int) -> int:
"""
Calculate the number of non-bouncy numbers with at most n digits.
>>> non_bouncy_exact(1)
9
>>> non_bouncy_exact(6)
7998
>>> non_bouncy_exact(10)
136126
"""
return choose(8 + n, n) + choose(9 + n, n) - 10
def non_bouncy_upto(n: int) -> int:
"""
Calculate the number of non-bouncy numbers with at most n digits.
>>> non_bouncy_upto(1)
9
>>> non_bouncy_upto(6)
12951
>>> non_bouncy_upto(10)
277032
"""
return sum(non_bouncy_exact(i) for i in range(1, n + 1))
def solution(num_digits: int = 100) -> int:
"""
Calculate the number of non-bouncy numbers less than a googol.
>>> solution(6)
12951
>>> solution(10)
277032
"""
return non_bouncy_upto(num_digits)
if __name__ == "__main__":
print(f"{solution() = }")
|
89a3177aa2884eef02736d30a7c0cb301e34652e
|
117aaf186609e48230bff9f4f4e96546d3484963
|
/templates/TemplateQmlWidget.py
|
f8af17db2102fd8c6e8e2012e55c1bf7462009bc
|
[
"MIT"
] |
permissive
|
eyllanesc/stackoverflow
|
8d1c4b075e578496ea8deecbb78ef0e08bcc092e
|
db738fbe10e8573b324d1f86e9add314f02c884d
|
refs/heads/master
| 2022-08-19T22:23:34.697232
| 2022-08-10T20:59:17
| 2022-08-10T20:59:17
| 76,124,222
| 355
| 433
|
MIT
| 2022-08-10T20:59:18
| 2016-12-10T16:29:34
|
C++
|
UTF-8
|
Python
| false
| false
| 489
|
py
|
TemplateQmlWidget.py
|
from PyQt5 import QtCore, QtGui, QtQml
if __name__ == '__main__':
import os
import sys
# sys.argv += ['--style', 'material']
app = QtGui.QGuiApplication(sys.argv)
engine = QtQml.QQmlApplicationEngine()
# engine.rootContext().setContextProperty()
file = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "main.qml")
engine.load(QtCore.QUrl.fromLocalFile(file))
if not engine.rootObjects():
sys.exit(-1)
sys.exit(app.exec())
|
2c1e1e102b22c13fca2d72813670425ad133cbf9
|
fc01018efee841513e097bd1609a07b0f1c9a6a9
|
/scripts/bot/gui.py
|
8d1618eec9896efd4c65aad9dfbf882b9629cda9
|
[
"Apache-2.0"
] |
permissive
|
baidu/amis
|
f50c09267a1994448c95b0d3eae48f97a3f9029b
|
cdaeb01902e73047519b89c7ba82c78d00713f4f
|
refs/heads/master
| 2023-09-05T23:45:28.887028
| 2023-09-04T10:36:20
| 2023-09-04T10:36:20
| 183,874,244
| 15,321
| 2,373
|
Apache-2.0
| 2023-09-14T12:38:14
| 2019-04-28T07:42:14
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 3,536
|
py
|
gui.py
|
from vector_store import get_client
from split_markdown import split_markdown
from embedding import get_embedding
import gradio as gr
import os
import pickle
from llm.wenxin import Wenxin, ModelName
from dotenv import load_dotenv
load_dotenv()
chroma_client = get_client()
collection = chroma_client.get_collection(name="amis")
wenxin = Wenxin()
text_blocks_by_id = {}
with open(os.path.join(os.path.dirname(__file__), 'text.pickle'), 'rb') as f:
text_blocks_by_id = pickle.load(f)
def get_prompt(context, query):
return f"""
请只根据下面的资料回答问题,如果无法根据这些资料回答,回答“找不到相关答案”:
资料:
{context}
问题是:{query}
回答:"""
def get_context(search_result, include_code=True, max_length=1024):
context = ""
doc_ids = []
for doc_id in search_result['ids'][0]:
doc_id = doc_id.split("_")[0]
if doc_id not in doc_ids:
doc_ids.append(doc_id)
for doc_id in doc_ids:
markdown_block = text_blocks_by_id[doc_id]
block_text = markdown_block.gen_text(512, include_code)
if (len(context) + len(block_text)) < max_length:
context += block_text + "\n\n"
return context
query = gr.Textbox(label="问题")
include_code = gr.Checkbox(value=True, label="提示词中是否要包含 amis schema",
info="包含的好处是大模型会返回 json,但也会导致内容太长,只能提供少量段落给大模型,导致错过重要资料")
n_result = gr.Number(
value=10, precision=0, label="向量搜索查询返回个数")
bot_result = gr.Textbox(label="文心的回答")
bot_turbo_result = gr.Textbox(label="文心 Turbo 的回答")
booomz_result = gr.Textbox(label="开源 BLOOMZ 的回答")
prompt = gr.Textbox(label="提示词")
vector_search_result = gr.Dataframe(
label="向量相关搜索结果,这个结果只是为了辅助调试,确认是因为没找到相关内容还是大模型没能理解",
headers=["相关段落", "所属文档"],
datatype=["str", "str"],
col_count=(2, "dynamic"),
wrap=True
)
def amis_search(query, n_result=10, include_code=True):
if query.strip() == "":
return "必须有输入", "", "", []
search_result = collection.query(
query_embeddings=get_embedding(query).tolist(),
n_results=n_result
)
context = get_context(search_result, include_code)
if (context == ""):
return "检索不到相关内容", "", "", []
prompt = get_prompt(context, query)
bot_result = wenxin.generate(prompt, ModelName.ERNIE_BOT)
# bloomz_result = wenxin.generate(prompt, ModelName.BLOOMZ_7B)
markdown_blocks = []
index = 0
for doc in search_result['documents'][0]:
markdown_block = []
markdown_block.append(doc)
if index < len(search_result['metadatas'][0]):
source = search_result['metadatas'][0][index]['source'].replace(
'docs/zh-CN/', '')
markdown_block.append(
source)
else:
print("index out of range", doc)
markdown_blocks.append(markdown_block)
index += 1
return bot_result, prompt, markdown_blocks
demo = gr.Interface(amis_search, title="amis 文档问答机器人", inputs=[
query, n_result, include_code], outputs=[bot_result, prompt, vector_search_result])
if __name__ == '__main__':
demo.queue(concurrency_count=10).launch(share=False, server_name="0.0.0.0")
|
bfd459329d25f1178e946f9373cc842822bd1450
|
7ae27ce9a8c477855f8fd5fac54685716d868349
|
/invokeai/backend/image_util/__init__.py
|
0be5a78a9373bebd2b9e2a53cfed61bcc45467f2
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
invoke-ai/InvokeAI
|
5f7a2c1f19b1f686099a8cf4cec85aa9c7b6d81d
|
2bd3cf28eabff2dcf3339669be222061dd208cb8
|
refs/heads/main
| 2023-08-31T07:06:56.721576
| 2023-08-30T19:05:17
| 2023-08-30T19:05:17
| 525,592,995
| 15,987
| 1,678
|
Apache-2.0
| 2023-09-14T20:29:39
| 2022-08-17T01:04:27
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 770
|
py
|
__init__.py
|
"""
Initialization file for invokeai.backend.image_util methods.
"""
from .patchmatch import PatchMatch # noqa: F401
from .pngwriter import PngWriter, PromptFormatter, retrieve_metadata, write_metadata # noqa: F401
from .seamless import configure_model_padding # noqa: F401
from .txt2mask import Txt2Mask # noqa: F401
from .util import InitImageResizer, make_grid # noqa: F401
def debug_image(debug_image, debug_text, debug_show=True, debug_result=False, debug_status=False):
from PIL import ImageDraw
if not debug_status:
return
image_copy = debug_image.copy().convert("RGBA")
ImageDraw.Draw(image_copy).text((5, 5), debug_text, (255, 0, 0))
if debug_show:
image_copy.show()
if debug_result:
return image_copy
|
87a69191ee521c9ffcc50f15d1b1f12d30107294
|
51819802a13fbf4c71ea0f6ee3771b86fcf1834c
|
/srsly/tests/cloudpickle/cloudpickle_test.py
|
b293c53f2b2b6c2242fb415c93780ffbdefa6002
|
[
"MIT"
] |
permissive
|
explosion/srsly
|
1860eda76b79bce49e46a8edeb39828774d1d900
|
1aa4ae1b690b513092ce1e58257427cddf38e97f
|
refs/heads/master
| 2023-08-23T00:56:04.762619
| 2023-07-24T11:40:07
| 2023-07-24T11:40:07
| 159,904,634
| 383
| 43
|
MIT
| 2023-07-25T12:13:06
| 2018-12-01T03:21:56
|
Python
|
UTF-8
|
Python
| false
| false
| 106,164
|
py
|
cloudpickle_test.py
|
import _collections_abc
import abc
import collections
import base64
import functools
import io
import itertools
import logging
import math
import multiprocessing
from operator import itemgetter, attrgetter
import pickletools
import platform
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import types
import unittest
import weakref
import os
import enum
import typing
from functools import wraps
import pytest
try:
# try importing numpy and scipy. These are not hard dependencies and
# tests should be skipped if these modules are not available
import numpy as np
import scipy.special as spp
except (ImportError, RuntimeError):
np = None
spp = None
try:
# Ditto for Tornado
import tornado
except ImportError:
tornado = None
import srsly.cloudpickle as cloudpickle
from srsly.cloudpickle.compat import pickle
from srsly.cloudpickle import register_pickle_by_value
from srsly.cloudpickle import unregister_pickle_by_value
from srsly.cloudpickle import list_registry_pickle_by_value
from srsly.cloudpickle.cloudpickle import _should_pickle_by_reference
from srsly.cloudpickle.cloudpickle import _make_empty_cell, cell_set
from srsly.cloudpickle.cloudpickle import _extract_class_dict, _whichmodule
from srsly.cloudpickle.cloudpickle import _lookup_module_and_qualname
from .testutils import subprocess_pickle_echo
from .testutils import subprocess_pickle_string
from .testutils import assert_run_python_script
from .testutils import subprocess_worker
_TEST_GLOBAL_VARIABLE = "default_value"
_TEST_GLOBAL_VARIABLE2 = "another_value"
class RaiserOnPickle:
def __init__(self, exc):
self.exc = exc
def __reduce__(self):
raise self.exc
def pickle_depickle(obj, protocol=cloudpickle.DEFAULT_PROTOCOL):
"""Helper function to test whether object pickled with cloudpickle can be
depickled with pickle
"""
return pickle.loads(cloudpickle.dumps(obj, protocol=protocol))
def _escape(raw_filepath):
# Ugly hack to embed filepaths in code templates for windows
return raw_filepath.replace("\\", r"\\\\")
def _maybe_remove(list_, item):
try:
list_.remove(item)
except ValueError:
pass
return list_
def test_extract_class_dict():
class A(int):
"""A docstring"""
def method(self):
return "a"
class B:
"""B docstring"""
B_CONSTANT = 42
def method(self):
return "b"
class C(A, B):
C_CONSTANT = 43
def method_c(self):
return "c"
clsdict = _extract_class_dict(C)
assert sorted(clsdict.keys()) == ["C_CONSTANT", "__doc__", "method_c"]
assert clsdict["C_CONSTANT"] == 43
assert clsdict["__doc__"] is None
assert clsdict["method_c"](C()) == C().method_c()
class CloudPickleTest(unittest.TestCase):
protocol = cloudpickle.DEFAULT_PROTOCOL
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix="tmp_cloudpickle_test_")
def tearDown(self):
shutil.rmtree(self.tmpdir)
@pytest.mark.skipif(
platform.python_implementation() != "CPython" or
(sys.version_info >= (3, 8, 0) and sys.version_info < (3, 8, 2)),
reason="Underlying bug fixed upstream starting Python 3.8.2")
def test_reducer_override_reference_cycle(self):
# Early versions of Python 3.8 introduced a reference cycle between a
# Pickler and it's reducer_override method. Because a Pickler
# object references every object it has pickled through its memo, this
# cycle prevented the garbage-collection of those external pickled
# objects. See #327 as well as https://bugs.python.org/issue39492
# This bug was fixed in Python 3.8.2, but is still present using
# cloudpickle and Python 3.8.0/1, hence the skipif directive.
class MyClass:
pass
my_object = MyClass()
wr = weakref.ref(my_object)
cloudpickle.dumps(my_object)
del my_object
assert wr() is None, "'del'-ed my_object has not been collected"
def test_itemgetter(self):
d = range(10)
getter = itemgetter(1)
getter2 = pickle_depickle(getter, protocol=self.protocol)
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = pickle_depickle(getter, protocol=self.protocol)
self.assertEqual(getter(d), getter2(d))
def test_attrgetter(self):
class C:
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = pickle_depickle(getter, protocol=self.protocol)
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = pickle_depickle(getter, protocol=self.protocol)
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = pickle_depickle(getter, protocol=self.protocol)
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = pickle_depickle(getter, protocol=self.protocol)
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
@pytest.mark.skip(reason="Requires pytest -s to pass")
def test_pickling_file_handles(self):
out1 = sys.stderr
out2 = pickle.loads(cloudpickle.dumps(out1, protocol=self.protocol))
self.assertEqual(out1, out2)
def test_func_globals(self):
class Unpicklable:
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
self.assertRaises(Exception, lambda: cloudpickle.dumps(
exit, protocol=self.protocol))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.__code__.co_names)
cloudpickle.dumps(foo)
def test_buffer(self):
try:
buffer_obj = buffer("Hello")
buffer_clone = pickle_depickle(buffer_obj, protocol=self.protocol)
self.assertEqual(buffer_clone, str(buffer_obj))
buffer_obj = buffer("Hello", 2, 3)
buffer_clone = pickle_depickle(buffer_obj, protocol=self.protocol)
self.assertEqual(buffer_clone, str(buffer_obj))
except NameError: # Python 3 does no longer support buffers
pass
def test_memoryview(self):
buffer_obj = memoryview(b"Hello")
self.assertEqual(pickle_depickle(buffer_obj, protocol=self.protocol),
buffer_obj.tobytes())
def test_dict_keys(self):
keys = {"a": 1, "b": 2}.keys()
results = pickle_depickle(keys)
self.assertEqual(results, keys)
assert isinstance(results, _collections_abc.dict_keys)
def test_dict_values(self):
values = {"a": 1, "b": 2}.values()
results = pickle_depickle(values)
self.assertEqual(sorted(results), sorted(values))
assert isinstance(results, _collections_abc.dict_values)
def test_dict_items(self):
items = {"a": 1, "b": 2}.items()
results = pickle_depickle(items)
self.assertEqual(results, items)
assert isinstance(results, _collections_abc.dict_items)
def test_odict_keys(self):
keys = collections.OrderedDict([("a", 1), ("b", 2)]).keys()
results = pickle_depickle(keys)
self.assertEqual(results, keys)
assert type(keys) == type(results)
def test_odict_values(self):
values = collections.OrderedDict([("a", 1), ("b", 2)]).values()
results = pickle_depickle(values)
self.assertEqual(list(results), list(values))
assert type(values) == type(results)
def test_odict_items(self):
items = collections.OrderedDict([("a", 1), ("b", 2)]).items()
results = pickle_depickle(items)
self.assertEqual(results, items)
assert type(items) == type(results)
def test_sliced_and_non_contiguous_memoryview(self):
buffer_obj = memoryview(b"Hello!" * 3)[2:15:2]
self.assertEqual(pickle_depickle(buffer_obj, protocol=self.protocol),
buffer_obj.tobytes())
def test_large_memoryview(self):
buffer_obj = memoryview(b"Hello!" * int(1e7))
self.assertEqual(pickle_depickle(buffer_obj, protocol=self.protocol),
buffer_obj.tobytes())
def test_lambda(self):
self.assertEqual(
pickle_depickle(lambda: 1, protocol=self.protocol)(), 1)
def test_nested_lambdas(self):
a, b = 1, 2
f1 = lambda x: x + a
f2 = lambda x: f1(x) // b
self.assertEqual(pickle_depickle(f2, protocol=self.protocol)(1), 1)
def test_recursive_closure(self):
def f1():
def g():
return g
return g
def f2(base):
def g(n):
return base if n <= 1 else n * g(n - 1)
return g
g1 = pickle_depickle(f1(), protocol=self.protocol)
self.assertEqual(g1(), g1)
g2 = pickle_depickle(f2(2), protocol=self.protocol)
self.assertEqual(g2(5), 240)
def test_closure_none_is_preserved(self):
def f():
"""a function with no closure cells
"""
self.assertTrue(
f.__closure__ is None,
msg='f actually has closure cells!',
)
g = pickle_depickle(f, protocol=self.protocol)
self.assertTrue(
g.__closure__ is None,
msg='g now has closure cells even though f does not',
)
def test_empty_cell_preserved(self):
def f():
if False: # pragma: no cover
cell = None
def g():
cell # NameError, unbound free variable
return g
g1 = f()
with pytest.raises(NameError):
g1()
g2 = pickle_depickle(g1, protocol=self.protocol)
with pytest.raises(NameError):
g2()
def test_unhashable_closure(self):
def f():
s = {1, 2} # mutable set is unhashable
def g():
return len(s)
return g
g = pickle_depickle(f(), protocol=self.protocol)
self.assertEqual(g(), 2)
def test_dynamically_generated_class_that_uses_super(self):
class Base:
def method(self):
return 1
class Derived(Base):
"Derived Docstring"
def method(self):
return super().method() + 1
self.assertEqual(Derived().method(), 2)
# Pickle and unpickle the class.
UnpickledDerived = pickle_depickle(Derived, protocol=self.protocol)
self.assertEqual(UnpickledDerived().method(), 2)
# We have special logic for handling __doc__ because it's a readonly
# attribute on PyPy.
self.assertEqual(UnpickledDerived.__doc__, "Derived Docstring")
# Pickle and unpickle an instance.
orig_d = Derived()
d = pickle_depickle(orig_d, protocol=self.protocol)
self.assertEqual(d.method(), 2)
def test_cycle_in_classdict_globals(self):
class C:
def it_works(self):
return "woohoo!"
C.C_again = C
C.instance_of_C = C()
depickled_C = pickle_depickle(C, protocol=self.protocol)
depickled_instance = pickle_depickle(C())
# Test instance of depickled class.
self.assertEqual(depickled_C().it_works(), "woohoo!")
self.assertEqual(depickled_C.C_again().it_works(), "woohoo!")
self.assertEqual(depickled_C.instance_of_C.it_works(), "woohoo!")
self.assertEqual(depickled_instance.it_works(), "woohoo!")
def test_locally_defined_function_and_class(self):
LOCAL_CONSTANT = 42
def some_function(x, y):
# Make sure the __builtins__ are not broken (see #211)
sum(range(10))
return (x + y) / LOCAL_CONSTANT
# pickle the function definition
self.assertEqual(pickle_depickle(some_function, protocol=self.protocol)(41, 1), 1)
self.assertEqual(pickle_depickle(some_function, protocol=self.protocol)(81, 3), 2)
hidden_constant = lambda: LOCAL_CONSTANT
class SomeClass:
"""Overly complicated class with nested references to symbols"""
def __init__(self, value):
self.value = value
def one(self):
return LOCAL_CONSTANT / hidden_constant()
def some_method(self, x):
return self.one() + some_function(x, 1) + self.value
# pickle the class definition
clone_class = pickle_depickle(SomeClass, protocol=self.protocol)
self.assertEqual(clone_class(1).one(), 1)
self.assertEqual(clone_class(5).some_method(41), 7)
clone_class = subprocess_pickle_echo(SomeClass, protocol=self.protocol)
self.assertEqual(clone_class(5).some_method(41), 7)
# pickle the class instances
self.assertEqual(pickle_depickle(SomeClass(1)).one(), 1)
self.assertEqual(pickle_depickle(SomeClass(5)).some_method(41), 7)
new_instance = subprocess_pickle_echo(SomeClass(5),
protocol=self.protocol)
self.assertEqual(new_instance.some_method(41), 7)
# pickle the method instances
self.assertEqual(pickle_depickle(SomeClass(1).one)(), 1)
self.assertEqual(pickle_depickle(SomeClass(5).some_method)(41), 7)
new_method = subprocess_pickle_echo(SomeClass(5).some_method,
protocol=self.protocol)
self.assertEqual(new_method(41), 7)
def test_partial(self):
partial_obj = functools.partial(min, 1)
partial_clone = pickle_depickle(partial_obj, protocol=self.protocol)
self.assertEqual(partial_clone(4), 1)
@pytest.mark.skipif(platform.python_implementation() == 'PyPy',
reason="Skip numpy and scipy tests on PyPy")
def test_ufunc(self):
# test a numpy ufunc (universal function), which is a C-based function
# that is applied on a numpy array
if np:
# simple ufunc: np.add
self.assertEqual(pickle_depickle(np.add, protocol=self.protocol),
np.add)
else: # skip if numpy is not available
pass
if spp:
# custom ufunc: scipy.special.iv
self.assertEqual(pickle_depickle(spp.iv, protocol=self.protocol),
spp.iv)
else: # skip if scipy is not available
pass
def test_loads_namespace(self):
obj = 1, 2, 3, 4
returned_obj = cloudpickle.loads(cloudpickle.dumps(
obj, protocol=self.protocol))
self.assertEqual(obj, returned_obj)
def test_load_namespace(self):
obj = 1, 2, 3, 4
bio = io.BytesIO()
cloudpickle.dump(obj, bio)
bio.seek(0)
returned_obj = cloudpickle.load(bio)
self.assertEqual(obj, returned_obj)
def test_generator(self):
def some_generator(cnt):
for i in range(cnt):
yield i
gen2 = pickle_depickle(some_generator, protocol=self.protocol)
assert type(gen2(3)) == type(some_generator(3))
assert list(gen2(3)) == list(range(3))
def test_classmethod(self):
class A:
@staticmethod
def test_sm():
return "sm"
@classmethod
def test_cm(cls):
return "cm"
sm = A.__dict__["test_sm"]
cm = A.__dict__["test_cm"]
A.test_sm = pickle_depickle(sm, protocol=self.protocol)
A.test_cm = pickle_depickle(cm, protocol=self.protocol)
self.assertEqual(A.test_sm(), "sm")
self.assertEqual(A.test_cm(), "cm")
def test_bound_classmethod(self):
class A:
@classmethod
def test_cm(cls):
return "cm"
A.test_cm = pickle_depickle(A.test_cm, protocol=self.protocol)
self.assertEqual(A.test_cm(), "cm")
def test_method_descriptors(self):
f = pickle_depickle(str.upper)
self.assertEqual(f('abc'), 'ABC')
def test_instancemethods_without_self(self):
class F:
def f(self, x):
return x + 1
g = pickle_depickle(F.f, protocol=self.protocol)
self.assertEqual(g.__name__, F.f.__name__)
# self.assertEqual(g(F(), 1), 2) # still fails
def test_module(self):
pickle_clone = pickle_depickle(pickle, protocol=self.protocol)
self.assertEqual(pickle, pickle_clone)
def test_dynamic_module(self):
mod = types.ModuleType('mod')
code = '''
x = 1
def f(y):
return x + y
class Foo:
def method(self, x):
return f(x)
'''
exec(textwrap.dedent(code), mod.__dict__)
mod2 = pickle_depickle(mod, protocol=self.protocol)
self.assertEqual(mod.x, mod2.x)
self.assertEqual(mod.f(5), mod2.f(5))
self.assertEqual(mod.Foo().method(5), mod2.Foo().method(5))
if platform.python_implementation() != 'PyPy':
# XXX: this fails with excessive recursion on PyPy.
mod3 = subprocess_pickle_echo(mod, protocol=self.protocol)
self.assertEqual(mod.x, mod3.x)
self.assertEqual(mod.f(5), mod3.f(5))
self.assertEqual(mod.Foo().method(5), mod3.Foo().method(5))
# Test dynamic modules when imported back are singletons
mod1, mod2 = pickle_depickle([mod, mod])
self.assertEqual(id(mod1), id(mod2))
# Ensure proper pickling of mod's functions when module "looks" like a
# file-backed module even though it is not:
try:
sys.modules['mod'] = mod
depickled_f = pickle_depickle(mod.f, protocol=self.protocol)
self.assertEqual(mod.f(5), depickled_f(5))
finally:
sys.modules.pop('mod', None)
def test_module_locals_behavior(self):
# Makes sure that a local function defined in another module is
# correctly serialized. This notably checks that the globals are
# accessible and that there is no issue with the builtins (see #211)
pickled_func_path = os.path.join(self.tmpdir, 'local_func_g.pkl')
child_process_script = '''
from srsly.cloudpickle.compat import pickle
import gc
with open("{pickled_func_path}", 'rb') as f:
func = pickle.load(f)
assert func(range(10)) == 45
'''
child_process_script = child_process_script.format(
pickled_func_path=_escape(pickled_func_path))
try:
from srsly.tests.cloudpickle.testutils import make_local_function
g = make_local_function()
with open(pickled_func_path, 'wb') as f:
cloudpickle.dump(g, f, protocol=self.protocol)
assert_run_python_script(textwrap.dedent(child_process_script))
finally:
os.unlink(pickled_func_path)
def test_dynamic_module_with_unpicklable_builtin(self):
# Reproducer of https://github.com/cloudpipe/cloudpickle/issues/316
# Some modules such as scipy inject some unpicklable objects into the
# __builtins__ module, which appears in every module's __dict__ under
# the '__builtins__' key. In such cases, cloudpickle used to fail
# when pickling dynamic modules.
class UnpickleableObject:
def __reduce__(self):
raise ValueError('Unpicklable object')
mod = types.ModuleType("mod")
exec('f = lambda x: abs(x)', mod.__dict__)
assert mod.f(-1) == 1
assert '__builtins__' in mod.__dict__
unpicklable_obj = UnpickleableObject()
with pytest.raises(ValueError):
cloudpickle.dumps(unpicklable_obj)
# Emulate the behavior of scipy by injecting an unpickleable object
# into mod's builtins.
# The __builtins__ entry of mod's __dict__ can either be the
# __builtins__ module, or the __builtins__ module's __dict__. #316
# happens only in the latter case.
if isinstance(mod.__dict__['__builtins__'], dict):
mod.__dict__['__builtins__']['unpickleable_obj'] = unpicklable_obj
elif isinstance(mod.__dict__['__builtins__'], types.ModuleType):
mod.__dict__['__builtins__'].unpickleable_obj = unpicklable_obj
depickled_mod = pickle_depickle(mod, protocol=self.protocol)
assert '__builtins__' in depickled_mod.__dict__
if isinstance(depickled_mod.__dict__['__builtins__'], dict):
assert "abs" in depickled_mod.__builtins__
elif isinstance(
depickled_mod.__dict__['__builtins__'], types.ModuleType):
assert hasattr(depickled_mod.__builtins__, "abs")
assert depickled_mod.f(-1) == 1
# Additional check testing that the issue #425 is fixed: without the
# fix for #425, `mod.f` would not have access to `__builtins__`, and
# thus calling `mod.f(-1)` (which relies on the `abs` builtin) would
# fail.
assert mod.f(-1) == 1
def test_load_dynamic_module_in_grandchild_process(self):
# Make sure that when loaded, a dynamic module preserves its dynamic
# property. Otherwise, this will lead to an ImportError if pickled in
# the child process and reloaded in another one.
# We create a new dynamic module
mod = types.ModuleType('mod')
code = '''
x = 1
'''
exec(textwrap.dedent(code), mod.__dict__)
# This script will be ran in a separate child process. It will import
# the pickled dynamic module, and then re-pickle it under a new name.
# Finally, it will create a child process that will load the re-pickled
# dynamic module.
parent_process_module_file = os.path.join(
self.tmpdir, 'dynamic_module_from_parent_process.pkl')
child_process_module_file = os.path.join(
self.tmpdir, 'dynamic_module_from_child_process.pkl')
child_process_script = '''
from srsly.cloudpickle.compat import pickle
import textwrap
import srsly.cloudpickle as cloudpickle
from srsly.tests.cloudpickle.testutils import assert_run_python_script
child_of_child_process_script = {child_of_child_process_script}
with open('{parent_process_module_file}', 'rb') as f:
mod = pickle.load(f)
with open('{child_process_module_file}', 'wb') as f:
cloudpickle.dump(mod, f, protocol={protocol})
assert_run_python_script(textwrap.dedent(child_of_child_process_script))
'''
# The script ran by the process created by the child process
child_of_child_process_script = """ '''
from srsly.cloudpickle.compat import pickle
with open('{child_process_module_file}','rb') as fid:
mod = pickle.load(fid)
''' """
# Filling the two scripts with the pickled modules filepaths and,
# for the first child process, the script to be executed by its
# own child process.
child_of_child_process_script = child_of_child_process_script.format(
child_process_module_file=child_process_module_file)
child_process_script = child_process_script.format(
parent_process_module_file=_escape(parent_process_module_file),
child_process_module_file=_escape(child_process_module_file),
child_of_child_process_script=_escape(child_of_child_process_script),
protocol=self.protocol)
try:
with open(parent_process_module_file, 'wb') as fid:
cloudpickle.dump(mod, fid, protocol=self.protocol)
assert_run_python_script(textwrap.dedent(child_process_script))
finally:
# Remove temporary created files
if os.path.exists(parent_process_module_file):
os.unlink(parent_process_module_file)
if os.path.exists(child_process_module_file):
os.unlink(child_process_module_file)
def test_correct_globals_import(self):
def nested_function(x):
return x + 1
def unwanted_function(x):
return math.exp(x)
def my_small_function(x, y):
return nested_function(x) + y
b = cloudpickle.dumps(my_small_function, protocol=self.protocol)
# Make sure that the pickle byte string only includes the definition
# of my_small_function and its dependency nested_function while
# extra functions and modules such as unwanted_function and the math
# module are not included so as to keep the pickle payload as
# lightweight as possible.
assert b'my_small_function' in b
assert b'nested_function' in b
assert b'unwanted_function' not in b
assert b'math' not in b
def test_module_importability(self):
pytest.importorskip("_cloudpickle_testpkg")
from srsly.cloudpickle.compat import pickle
import os.path
import distutils
import distutils.ccompiler
assert _should_pickle_by_reference(pickle)
assert _should_pickle_by_reference(os.path) # fake (aliased) module
assert _should_pickle_by_reference(distutils) # package
assert _should_pickle_by_reference(distutils.ccompiler) # module in package
dynamic_module = types.ModuleType('dynamic_module')
assert not _should_pickle_by_reference(dynamic_module)
if platform.python_implementation() == 'PyPy':
import _codecs
assert _should_pickle_by_reference(_codecs)
# #354: Check that modules created dynamically during the import of
# their parent modules are considered importable by cloudpickle.
# See the mod_with_dynamic_submodule documentation for more
# details of this use case.
import _cloudpickle_testpkg.mod.dynamic_submodule as m
assert _should_pickle_by_reference(m)
assert pickle_depickle(m, protocol=self.protocol) is m
# Check for similar behavior for a module that cannot be imported by
# attribute lookup.
from _cloudpickle_testpkg.mod import dynamic_submodule_two as m2
# Note: import _cloudpickle_testpkg.mod.dynamic_submodule_two as m2
# works only for Python 3.7+
assert _should_pickle_by_reference(m2)
assert pickle_depickle(m2, protocol=self.protocol) is m2
# Submodule_three is a dynamic module only importable via module lookup
with pytest.raises(ImportError):
import _cloudpickle_testpkg.mod.submodule_three # noqa
from _cloudpickle_testpkg.mod import submodule_three as m3
assert not _should_pickle_by_reference(m3)
# This module cannot be pickled using attribute lookup (as it does not
# have a `__module__` attribute like classes and functions.
assert not hasattr(m3, '__module__')
depickled_m3 = pickle_depickle(m3, protocol=self.protocol)
assert depickled_m3 is not m3
assert m3.f(1) == depickled_m3.f(1)
# Do the same for an importable dynamic submodule inside a dynamic
# module inside a file-backed module.
import _cloudpickle_testpkg.mod.dynamic_submodule.dynamic_subsubmodule as sm # noqa
assert _should_pickle_by_reference(sm)
assert pickle_depickle(sm, protocol=self.protocol) is sm
expected = "cannot check importability of object instances"
with pytest.raises(TypeError, match=expected):
_should_pickle_by_reference(object())
def test_Ellipsis(self):
self.assertEqual(Ellipsis,
pickle_depickle(Ellipsis, protocol=self.protocol))
def test_NotImplemented(self):
ExcClone = pickle_depickle(NotImplemented, protocol=self.protocol)
self.assertEqual(NotImplemented, ExcClone)
def test_NoneType(self):
res = pickle_depickle(type(None), protocol=self.protocol)
self.assertEqual(type(None), res)
def test_EllipsisType(self):
res = pickle_depickle(type(Ellipsis), protocol=self.protocol)
self.assertEqual(type(Ellipsis), res)
def test_NotImplementedType(self):
res = pickle_depickle(type(NotImplemented), protocol=self.protocol)
self.assertEqual(type(NotImplemented), res)
def test_builtin_function(self):
# Note that builtin_function_or_method are special-cased by cloudpickle
# only in python2.
# builtin function from the __builtin__ module
assert pickle_depickle(zip, protocol=self.protocol) is zip
from os import mkdir
# builtin function from a "regular" module
assert pickle_depickle(mkdir, protocol=self.protocol) is mkdir
def test_builtin_type_constructor(self):
# This test makes sure that cloudpickling builtin-type
# constructors works for all python versions/implementation.
# pickle_depickle some builtin methods of the __builtin__ module
for t in list, tuple, set, frozenset, dict, object:
cloned_new = pickle_depickle(t.__new__, protocol=self.protocol)
assert isinstance(cloned_new(t), t)
# The next 4 tests cover all cases into which builtin python methods can
# appear.
# There are 4 kinds of method: 'classic' methods, classmethods,
# staticmethods and slotmethods. They will appear under different types
# depending on whether they are called from the __dict__ of their
# class, their class itself, or an instance of their class. This makes
# 12 total combinations.
# This discussion and the following tests are relevant for the CPython
# implementation only. In PyPy, there is no builtin method or builtin
# function types/flavours. The only way into which a builtin method can be
# identified is with it's builtin-code __code__ attribute.
def test_builtin_classicmethod(self):
obj = 1.5 # float object
bound_classicmethod = obj.hex # builtin_function_or_method
unbound_classicmethod = type(obj).hex # method_descriptor
clsdict_classicmethod = type(obj).__dict__['hex'] # method_descriptor
assert unbound_classicmethod is clsdict_classicmethod
depickled_bound_meth = pickle_depickle(
bound_classicmethod, protocol=self.protocol)
depickled_unbound_meth = pickle_depickle(
unbound_classicmethod, protocol=self.protocol)
depickled_clsdict_meth = pickle_depickle(
clsdict_classicmethod, protocol=self.protocol)
# No identity on the bound methods they are bound to different float
# instances
assert depickled_bound_meth() == bound_classicmethod()
assert depickled_unbound_meth is unbound_classicmethod
assert depickled_clsdict_meth is clsdict_classicmethod
@pytest.mark.skipif(
(platform.machine() == "aarch64" and sys.version_info[:2] >= (3, 10))
or platform.python_implementation() == "PyPy"
or (sys.version_info[:2] == (3, 10) and sys.version_info >= (3, 10, 8))
# Skipping tests on 3.11 due to https://github.com/cloudpipe/cloudpickle/pull/486.
or sys.version_info[:2] == (3, 11),
reason="Fails on aarch64 + python 3.10+ in cibuildwheel, currently unable to replicate failure elsewhere; fails sometimes for pypy on conda-forge; fails for python 3.10.8+ and 3.11")
def test_builtin_classmethod(self):
obj = 1.5 # float object
bound_clsmethod = obj.fromhex # builtin_function_or_method
unbound_clsmethod = type(obj).fromhex # builtin_function_or_method
clsdict_clsmethod = type(
obj).__dict__['fromhex'] # classmethod_descriptor
depickled_bound_meth = pickle_depickle(
bound_clsmethod, protocol=self.protocol)
depickled_unbound_meth = pickle_depickle(
unbound_clsmethod, protocol=self.protocol)
depickled_clsdict_meth = pickle_depickle(
clsdict_clsmethod, protocol=self.protocol)
# float.fromhex takes a string as input.
arg = "0x1"
# Identity on both the bound and the unbound methods cannot be
# tested: the bound methods are bound to different objects, and the
# unbound methods are actually recreated at each call.
assert depickled_bound_meth(arg) == bound_clsmethod(arg)
assert depickled_unbound_meth(arg) == unbound_clsmethod(arg)
if platform.python_implementation() == 'CPython':
# Roundtripping a classmethod_descriptor results in a
# builtin_function_or_method (CPython upstream issue).
assert depickled_clsdict_meth(arg) == clsdict_clsmethod(float, arg)
if platform.python_implementation() == 'PyPy':
# builtin-classmethods are simple classmethod in PyPy (not
# callable). We test equality of types and the functionality of the
# __func__ attribute instead. We do not test the the identity of
# the functions as __func__ attributes of classmethods are not
# pickleable and must be reconstructed at depickling time.
assert type(depickled_clsdict_meth) == type(clsdict_clsmethod)
assert depickled_clsdict_meth.__func__(
float, arg) == clsdict_clsmethod.__func__(float, arg)
def test_builtin_slotmethod(self):
obj = 1.5 # float object
bound_slotmethod = obj.__repr__ # method-wrapper
unbound_slotmethod = type(obj).__repr__ # wrapper_descriptor
clsdict_slotmethod = type(obj).__dict__['__repr__'] # ditto
depickled_bound_meth = pickle_depickle(
bound_slotmethod, protocol=self.protocol)
depickled_unbound_meth = pickle_depickle(
unbound_slotmethod, protocol=self.protocol)
depickled_clsdict_meth = pickle_depickle(
clsdict_slotmethod, protocol=self.protocol)
# No identity tests on the bound slotmethod are they are bound to
# different float instances
assert depickled_bound_meth() == bound_slotmethod()
assert depickled_unbound_meth is unbound_slotmethod
assert depickled_clsdict_meth is clsdict_slotmethod
@pytest.mark.skipif(
platform.python_implementation() == "PyPy",
reason="No known staticmethod example in the pypy stdlib")
def test_builtin_staticmethod(self):
obj = "foo" # str object
bound_staticmethod = obj.maketrans # builtin_function_or_method
unbound_staticmethod = type(obj).maketrans # ditto
clsdict_staticmethod = type(obj).__dict__['maketrans'] # staticmethod
assert bound_staticmethod is unbound_staticmethod
depickled_bound_meth = pickle_depickle(
bound_staticmethod, protocol=self.protocol)
depickled_unbound_meth = pickle_depickle(
unbound_staticmethod, protocol=self.protocol)
depickled_clsdict_meth = pickle_depickle(
clsdict_staticmethod, protocol=self.protocol)
assert depickled_bound_meth is bound_staticmethod
assert depickled_unbound_meth is unbound_staticmethod
# staticmethod objects are recreated at depickling time, but the
# underlying __func__ object is pickled by attribute.
assert depickled_clsdict_meth.__func__ is clsdict_staticmethod.__func__
type(depickled_clsdict_meth) is type(clsdict_staticmethod)
@pytest.mark.skipif(tornado is None,
reason="test needs Tornado installed")
def test_tornado_coroutine(self):
# Pickling a locally defined coroutine function
from tornado import gen, ioloop
@gen.coroutine
def f(x, y):
yield gen.sleep(x)
raise gen.Return(y + 1)
@gen.coroutine
def g(y):
res = yield f(0.01, y)
raise gen.Return(res + 1)
data = cloudpickle.dumps([g, g], protocol=self.protocol)
f = g = None
g2, g3 = pickle.loads(data)
self.assertTrue(g2 is g3)
loop = ioloop.IOLoop.current()
res = loop.run_sync(functools.partial(g2, 5))
self.assertEqual(res, 7)
@pytest.mark.skipif(
(3, 11, 0, 'beta') <= sys.version_info < (3, 11, 0, 'beta', 4),
reason="https://github.com/python/cpython/issues/92932"
)
def test_extended_arg(self):
# Functions with more than 65535 global vars prefix some global
# variable references with the EXTENDED_ARG opcode.
nvars = 65537 + 258
names = ['g%d' % i for i in range(1, nvars)]
r = random.Random(42)
d = {name: r.randrange(100) for name in names}
# def f(x):
# x = g1, g2, ...
# return zlib.crc32(bytes(bytearray(x)))
code = """
import zlib
def f():
x = {tup}
return zlib.crc32(bytes(bytearray(x)))
""".format(tup=', '.join(names))
exec(textwrap.dedent(code), d, d)
f = d['f']
res = f()
data = cloudpickle.dumps([f, f], protocol=self.protocol)
d = f = None
f2, f3 = pickle.loads(data)
self.assertTrue(f2 is f3)
self.assertEqual(f2(), res)
def test_submodule(self):
# Function that refers (by attribute) to a sub-module of a package.
# Choose any module NOT imported by __init__ of its parent package
# examples in standard library include:
# - http.cookies, unittest.mock, curses.textpad, xml.etree.ElementTree
global xml # imitate performing this import at top of file
import xml.etree.ElementTree
def example():
x = xml.etree.ElementTree.Comment # potential AttributeError
s = cloudpickle.dumps(example, protocol=self.protocol)
# refresh the environment, i.e., unimport the dependency
del xml
for item in list(sys.modules):
if item.split('.')[0] == 'xml':
del sys.modules[item]
# deserialise
f = pickle.loads(s)
f() # perform test for error
def test_submodule_closure(self):
# Same as test_submodule except the package is not a global
def scope():
import xml.etree.ElementTree
def example():
x = xml.etree.ElementTree.Comment # potential AttributeError
return example
example = scope()
s = cloudpickle.dumps(example, protocol=self.protocol)
# refresh the environment (unimport dependency)
for item in list(sys.modules):
if item.split('.')[0] == 'xml':
del sys.modules[item]
f = cloudpickle.loads(s)
f() # test
def test_multiprocess(self):
# running a function pickled by another process (a la dask.distributed)
def scope():
def example():
x = xml.etree.ElementTree.Comment
return example
global xml
import xml.etree.ElementTree
example = scope()
s = cloudpickle.dumps(example, protocol=self.protocol)
# choose "subprocess" rather than "multiprocessing" because the latter
# library uses fork to preserve the parent environment.
command = ("import base64; "
"from srsly.cloudpickle.compat import pickle; "
"pickle.loads(base64.b32decode('" +
base64.b32encode(s).decode('ascii') +
"'))()")
assert not subprocess.call([sys.executable, '-c', command])
def test_import(self):
# like test_multiprocess except subpackage modules referenced directly
# (unlike test_submodule)
global etree
def scope():
import xml.etree as foobar
def example():
x = etree.Comment
x = foobar.ElementTree
return example
example = scope()
import xml.etree.ElementTree as etree
s = cloudpickle.dumps(example, protocol=self.protocol)
command = ("import base64; "
"from srsly.cloudpickle.compat import pickle; "
"pickle.loads(base64.b32decode('" +
base64.b32encode(s).decode('ascii') +
"'))()")
assert not subprocess.call([sys.executable, '-c', command])
def test_multiprocessing_lock_raises(self):
lock = multiprocessing.Lock()
with pytest.raises(RuntimeError, match="only be shared between processes through inheritance"):
cloudpickle.dumps(lock)
def test_cell_manipulation(self):
cell = _make_empty_cell()
with pytest.raises(ValueError):
cell.cell_contents
ob = object()
cell_set(cell, ob)
self.assertTrue(
cell.cell_contents is ob,
msg='cell contents not set correctly',
)
def check_logger(self, name):
logger = logging.getLogger(name)
pickled = pickle_depickle(logger, protocol=self.protocol)
self.assertTrue(pickled is logger, (pickled, logger))
dumped = cloudpickle.dumps(logger)
code = """if 1:
import base64, srsly.cloudpickle as cloudpickle, logging
logging.basicConfig(level=logging.INFO)
logger = cloudpickle.loads(base64.b32decode(b'{}'))
logger.info('hello')
""".format(base64.b32encode(dumped).decode('ascii'))
proc = subprocess.Popen([sys.executable, "-W ignore", "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = proc.communicate()
self.assertEqual(proc.wait(), 0)
self.assertEqual(out.strip().decode(),
f'INFO:{logger.name}:hello')
def test_logger(self):
# logging.RootLogger object
self.check_logger(None)
# logging.Logger object
self.check_logger('cloudpickle.dummy_test_logger')
def test_getset_descriptor(self):
assert isinstance(float.real, types.GetSetDescriptorType)
depickled_descriptor = pickle_depickle(float.real)
self.assertIs(depickled_descriptor, float.real)
def test_abc_cache_not_pickled(self):
# cloudpickle issue #302: make sure that cloudpickle does not pickle
# the caches populated during instance/subclass checks of abc.ABCMeta
# instances.
MyClass = abc.ABCMeta('MyClass', (), {})
class MyUnrelatedClass:
pass
class MyRelatedClass:
pass
MyClass.register(MyRelatedClass)
assert not issubclass(MyUnrelatedClass, MyClass)
assert issubclass(MyRelatedClass, MyClass)
s = cloudpickle.dumps(MyClass)
assert b"MyUnrelatedClass" not in s
assert b"MyRelatedClass" in s
depickled_class = cloudpickle.loads(s)
assert not issubclass(MyUnrelatedClass, depickled_class)
assert issubclass(MyRelatedClass, depickled_class)
def test_abc(self):
class AbstractClass(abc.ABC):
@abc.abstractmethod
def some_method(self):
"""A method"""
@classmethod
@abc.abstractmethod
def some_classmethod(cls):
"""A classmethod"""
@staticmethod
@abc.abstractmethod
def some_staticmethod():
"""A staticmethod"""
@property
@abc.abstractmethod
def some_property():
"""A property"""
class ConcreteClass(AbstractClass):
def some_method(self):
return 'it works!'
@classmethod
def some_classmethod(cls):
assert cls == ConcreteClass
return 'it works!'
@staticmethod
def some_staticmethod():
return 'it works!'
@property
def some_property(self):
return 'it works!'
# This abstract class is locally defined so we can safely register
# tuple in it to verify the unpickled class also register tuple.
AbstractClass.register(tuple)
concrete_instance = ConcreteClass()
depickled_base = pickle_depickle(AbstractClass, protocol=self.protocol)
depickled_class = pickle_depickle(ConcreteClass,
protocol=self.protocol)
depickled_instance = pickle_depickle(concrete_instance)
assert issubclass(tuple, AbstractClass)
assert issubclass(tuple, depickled_base)
self.assertEqual(depickled_class().some_method(), 'it works!')
self.assertEqual(depickled_instance.some_method(), 'it works!')
self.assertEqual(depickled_class.some_classmethod(), 'it works!')
self.assertEqual(depickled_instance.some_classmethod(), 'it works!')
self.assertEqual(depickled_class().some_staticmethod(), 'it works!')
self.assertEqual(depickled_instance.some_staticmethod(), 'it works!')
self.assertEqual(depickled_class().some_property, 'it works!')
self.assertEqual(depickled_instance.some_property, 'it works!')
self.assertRaises(TypeError, depickled_base)
class DepickledBaseSubclass(depickled_base):
def some_method(self):
return 'it works for realz!'
@classmethod
def some_classmethod(cls):
assert cls == DepickledBaseSubclass
return 'it works for realz!'
@staticmethod
def some_staticmethod():
return 'it works for realz!'
@property
def some_property():
return 'it works for realz!'
self.assertEqual(DepickledBaseSubclass().some_method(),
'it works for realz!')
class IncompleteBaseSubclass(depickled_base):
def some_method(self):
return 'this class lacks some concrete methods'
self.assertRaises(TypeError, IncompleteBaseSubclass)
def test_abstracts(self):
# Same as `test_abc` but using deprecated `abc.abstract*` methods.
# See https://github.com/cloudpipe/cloudpickle/issues/367
class AbstractClass(abc.ABC):
@abc.abstractmethod
def some_method(self):
"""A method"""
@abc.abstractclassmethod
def some_classmethod(cls):
"""A classmethod"""
@abc.abstractstaticmethod
def some_staticmethod():
"""A staticmethod"""
@abc.abstractproperty
def some_property(self):
"""A property"""
class ConcreteClass(AbstractClass):
def some_method(self):
return 'it works!'
@classmethod
def some_classmethod(cls):
assert cls == ConcreteClass
return 'it works!'
@staticmethod
def some_staticmethod():
return 'it works!'
@property
def some_property(self):
return 'it works!'
# This abstract class is locally defined so we can safely register
# tuple in it to verify the unpickled class also register tuple.
AbstractClass.register(tuple)
concrete_instance = ConcreteClass()
depickled_base = pickle_depickle(AbstractClass, protocol=self.protocol)
depickled_class = pickle_depickle(ConcreteClass,
protocol=self.protocol)
depickled_instance = pickle_depickle(concrete_instance)
assert issubclass(tuple, AbstractClass)
assert issubclass(tuple, depickled_base)
self.assertEqual(depickled_class().some_method(), 'it works!')
self.assertEqual(depickled_instance.some_method(), 'it works!')
self.assertEqual(depickled_class.some_classmethod(), 'it works!')
self.assertEqual(depickled_instance.some_classmethod(), 'it works!')
self.assertEqual(depickled_class().some_staticmethod(), 'it works!')
self.assertEqual(depickled_instance.some_staticmethod(), 'it works!')
self.assertEqual(depickled_class().some_property, 'it works!')
self.assertEqual(depickled_instance.some_property, 'it works!')
self.assertRaises(TypeError, depickled_base)
class DepickledBaseSubclass(depickled_base):
def some_method(self):
return 'it works for realz!'
@classmethod
def some_classmethod(cls):
assert cls == DepickledBaseSubclass
return 'it works for realz!'
@staticmethod
def some_staticmethod():
return 'it works for realz!'
@property
def some_property(self):
return 'it works for realz!'
self.assertEqual(DepickledBaseSubclass().some_method(),
'it works for realz!')
class IncompleteBaseSubclass(depickled_base):
def some_method(self):
return 'this class lacks some concrete methods'
self.assertRaises(TypeError, IncompleteBaseSubclass)
def test_weakset_identity_preservation(self):
# Test that weaksets don't lose all their inhabitants if they're
# pickled in a larger data structure that includes other references to
# their inhabitants.
class SomeClass:
def __init__(self, x):
self.x = x
obj1, obj2, obj3 = SomeClass(1), SomeClass(2), SomeClass(3)
things = [weakref.WeakSet([obj1, obj2]), obj1, obj2, obj3]
result = pickle_depickle(things, protocol=self.protocol)
weakset, depickled1, depickled2, depickled3 = result
self.assertEqual(depickled1.x, 1)
self.assertEqual(depickled2.x, 2)
self.assertEqual(depickled3.x, 3)
self.assertEqual(len(weakset), 2)
self.assertEqual(set(weakset), {depickled1, depickled2})
def test_non_module_object_passing_whichmodule_test(self):
# https://github.com/cloudpipe/cloudpickle/pull/326: cloudpickle should
# not try to instrospect non-modules object when trying to discover the
# module of a function/class. This happenened because codecov injects
# tuples (and not modules) into sys.modules, but type-checks were not
# carried out on the entries of sys.modules, causing cloupdickle to
# then error in unexpected ways
def func(x):
return x ** 2
# Trigger a loop during the execution of whichmodule(func) by
# explicitly setting the function's module to None
func.__module__ = None
class NonModuleObject:
def __ini__(self):
self.some_attr = None
def __getattr__(self, name):
# We whitelist func so that a _whichmodule(func, None) call
# returns the NonModuleObject instance if a type check on the
# entries of sys.modules is not carried out, but manipulating
# this instance thinking it really is a module later on in the
# pickling process of func errors out
if name == 'func':
return func
else:
raise AttributeError
non_module_object = NonModuleObject()
assert func(2) == 4
assert func is non_module_object.func
# Any manipulation of non_module_object relying on attribute access
# will raise an Exception
with pytest.raises(AttributeError):
_ = non_module_object.some_attr
try:
sys.modules['NonModuleObject'] = non_module_object
func_module_name = _whichmodule(func, None)
assert func_module_name != 'NonModuleObject'
assert func_module_name is None
depickled_func = pickle_depickle(func, protocol=self.protocol)
assert depickled_func(2) == 4
finally:
sys.modules.pop('NonModuleObject')
def test_unrelated_faulty_module(self):
# Check that pickling a dynamically defined function or class does not
# fail when introspecting the currently loaded modules in sys.modules
# as long as those faulty modules are unrelated to the class or
# function we are currently pickling.
for base_class in (object, types.ModuleType):
for module_name in ['_missing_module', None]:
class FaultyModule(base_class):
def __getattr__(self, name):
# This throws an exception while looking up within
# pickle.whichmodule or getattr(module, name, None)
raise Exception()
class Foo:
__module__ = module_name
def foo(self):
return "it works!"
def foo():
return "it works!"
foo.__module__ = module_name
if base_class is types.ModuleType: # noqa
faulty_module = FaultyModule('_faulty_module')
else:
faulty_module = FaultyModule()
sys.modules["_faulty_module"] = faulty_module
try:
# Test whichmodule in save_global.
self.assertEqual(pickle_depickle(Foo()).foo(), "it works!")
# Test whichmodule in save_function.
cloned = pickle_depickle(foo, protocol=self.protocol)
self.assertEqual(cloned(), "it works!")
finally:
sys.modules.pop("_faulty_module", None)
@pytest.mark.skip(reason="fails for pytest v7.2.0")
def test_dynamic_pytest_module(self):
# Test case for pull request https://github.com/cloudpipe/cloudpickle/pull/116
import py
def f():
s = py.builtin.set([1])
return s.pop()
# some setup is required to allow pytest apimodules to be correctly
# serializable.
from srsly.cloudpickle import CloudPickler
from srsly.cloudpickle import cloudpickle_fast as cp_fast
CloudPickler.dispatch_table[type(py.builtin)] = cp_fast._module_reduce
g = cloudpickle.loads(cloudpickle.dumps(f, protocol=self.protocol))
result = g()
self.assertEqual(1, result)
def test_function_module_name(self):
func = lambda x: x
cloned = pickle_depickle(func, protocol=self.protocol)
self.assertEqual(cloned.__module__, func.__module__)
def test_function_qualname(self):
def func(x):
return x
# Default __qualname__ attribute (Python 3 only)
if hasattr(func, '__qualname__'):
cloned = pickle_depickle(func, protocol=self.protocol)
self.assertEqual(cloned.__qualname__, func.__qualname__)
# Mutated __qualname__ attribute
func.__qualname__ = '<modifiedlambda>'
cloned = pickle_depickle(func, protocol=self.protocol)
self.assertEqual(cloned.__qualname__, func.__qualname__)
def test_property(self):
# Note that the @property decorator only has an effect on new-style
# classes.
class MyObject:
_read_only_value = 1
_read_write_value = 1
@property
def read_only_value(self):
"A read-only attribute"
return self._read_only_value
@property
def read_write_value(self):
return self._read_write_value
@read_write_value.setter
def read_write_value(self, value):
self._read_write_value = value
my_object = MyObject()
assert my_object.read_only_value == 1
assert MyObject.read_only_value.__doc__ == "A read-only attribute"
with pytest.raises(AttributeError):
my_object.read_only_value = 2
my_object.read_write_value = 2
depickled_obj = pickle_depickle(my_object)
assert depickled_obj.read_only_value == 1
assert depickled_obj.read_write_value == 2
# make sure the depickled read_only_value attribute is still read-only
with pytest.raises(AttributeError):
my_object.read_only_value = 2
# make sure the depickled read_write_value attribute is writeable
depickled_obj.read_write_value = 3
assert depickled_obj.read_write_value == 3
type(depickled_obj).read_only_value.__doc__ == "A read-only attribute"
def test_namedtuple(self):
MyTuple = collections.namedtuple('MyTuple', ['a', 'b', 'c'])
t1 = MyTuple(1, 2, 3)
t2 = MyTuple(3, 2, 1)
depickled_t1, depickled_MyTuple, depickled_t2 = pickle_depickle(
[t1, MyTuple, t2], protocol=self.protocol)
assert isinstance(depickled_t1, MyTuple)
assert depickled_t1 == t1
assert depickled_MyTuple is MyTuple
assert isinstance(depickled_t2, MyTuple)
assert depickled_t2 == t2
@pytest.mark.skipif(platform.python_implementation() == "PyPy",
reason="fails sometimes for pypy on conda-forge")
def test_interactively_defined_function(self):
# Check that callables defined in the __main__ module of a Python
# script (or jupyter kernel) can be pickled / unpickled / executed.
code = """\
from srsly.tests.cloudpickle.testutils import subprocess_pickle_echo
CONSTANT = 42
class Foo(object):
def method(self, x):
return x
foo = Foo()
def f0(x):
return x ** 2
def f1():
return Foo
def f2(x):
return Foo().method(x)
def f3():
return Foo().method(CONSTANT)
def f4(x):
return foo.method(x)
def f5(x):
# Recursive call to a dynamically defined function.
if x <= 0:
return f4(x)
return f5(x - 1) + 1
cloned = subprocess_pickle_echo(lambda x: x**2, protocol={protocol})
assert cloned(3) == 9
cloned = subprocess_pickle_echo(f0, protocol={protocol})
assert cloned(3) == 9
cloned = subprocess_pickle_echo(Foo, protocol={protocol})
assert cloned().method(2) == Foo().method(2)
cloned = subprocess_pickle_echo(Foo(), protocol={protocol})
assert cloned.method(2) == Foo().method(2)
cloned = subprocess_pickle_echo(f1, protocol={protocol})
assert cloned()().method('a') == f1()().method('a')
cloned = subprocess_pickle_echo(f2, protocol={protocol})
assert cloned(2) == f2(2)
cloned = subprocess_pickle_echo(f3, protocol={protocol})
assert cloned() == f3()
cloned = subprocess_pickle_echo(f4, protocol={protocol})
assert cloned(2) == f4(2)
cloned = subprocess_pickle_echo(f5, protocol={protocol})
assert cloned(7) == f5(7) == 7
""".format(protocol=self.protocol)
assert_run_python_script(textwrap.dedent(code))
def test_interactively_defined_global_variable(self):
# Check that callables defined in the __main__ module of a Python
# script (or jupyter kernel) correctly retrieve global variables.
code_template = """\
from srsly.tests.cloudpickle.testutils import subprocess_pickle_echo
from srsly.cloudpickle import dumps, loads
def local_clone(obj, protocol=None):
return loads(dumps(obj, protocol=protocol))
VARIABLE = "default_value"
def f0():
global VARIABLE
VARIABLE = "changed_by_f0"
def f1():
return VARIABLE
assert f0.__globals__ is f1.__globals__
# pickle f0 and f1 inside the same pickle_string
cloned_f0, cloned_f1 = {clone_func}([f0, f1], protocol={protocol})
# cloned_f0 and cloned_f1 now share a global namespace that is isolated
# from any previously existing namespace
assert cloned_f0.__globals__ is cloned_f1.__globals__
assert cloned_f0.__globals__ is not f0.__globals__
# pickle f1 another time, but in a new pickle string
pickled_f1 = dumps(f1, protocol={protocol})
# Change the value of the global variable in f0's new global namespace
cloned_f0()
# thanks to cloudpickle isolation, depickling and calling f0 and f1
# should not affect the globals of already existing modules
assert VARIABLE == "default_value", VARIABLE
# Ensure that cloned_f1 and cloned_f0 share the same globals, as f1 and
# f0 shared the same globals at pickling time, and cloned_f1 was
# depickled from the same pickle string as cloned_f0
shared_global_var = cloned_f1()
assert shared_global_var == "changed_by_f0", shared_global_var
# f1 is unpickled another time, but because it comes from another
# pickle string than pickled_f1 and pickled_f0, it will not share the
# same globals as the latter two.
new_cloned_f1 = loads(pickled_f1)
assert new_cloned_f1.__globals__ is not cloned_f1.__globals__
assert new_cloned_f1.__globals__ is not f1.__globals__
# get the value of new_cloned_f1's VARIABLE
new_global_var = new_cloned_f1()
assert new_global_var == "default_value", new_global_var
"""
for clone_func in ['local_clone', 'subprocess_pickle_echo']:
code = code_template.format(protocol=self.protocol,
clone_func=clone_func)
assert_run_python_script(textwrap.dedent(code))
def test_closure_interacting_with_a_global_variable(self):
global _TEST_GLOBAL_VARIABLE
assert _TEST_GLOBAL_VARIABLE == "default_value"
orig_value = _TEST_GLOBAL_VARIABLE
try:
def f0():
global _TEST_GLOBAL_VARIABLE
_TEST_GLOBAL_VARIABLE = "changed_by_f0"
def f1():
return _TEST_GLOBAL_VARIABLE
# pickle f0 and f1 inside the same pickle_string
cloned_f0, cloned_f1 = pickle_depickle([f0, f1],
protocol=self.protocol)
# cloned_f0 and cloned_f1 now share a global namespace that is
# isolated from any previously existing namespace
assert cloned_f0.__globals__ is cloned_f1.__globals__
assert cloned_f0.__globals__ is not f0.__globals__
# pickle f1 another time, but in a new pickle string
pickled_f1 = cloudpickle.dumps(f1, protocol=self.protocol)
# Change the global variable's value in f0's new global namespace
cloned_f0()
# depickling f0 and f1 should not affect the globals of already
# existing modules
assert _TEST_GLOBAL_VARIABLE == "default_value"
# Ensure that cloned_f1 and cloned_f0 share the same globals, as f1
# and f0 shared the same globals at pickling time, and cloned_f1
# was depickled from the same pickle string as cloned_f0
shared_global_var = cloned_f1()
assert shared_global_var == "changed_by_f0", shared_global_var
# f1 is unpickled another time, but because it comes from another
# pickle string than pickled_f1 and pickled_f0, it will not share
# the same globals as the latter two.
new_cloned_f1 = pickle.loads(pickled_f1)
assert new_cloned_f1.__globals__ is not cloned_f1.__globals__
assert new_cloned_f1.__globals__ is not f1.__globals__
# get the value of new_cloned_f1's VARIABLE
new_global_var = new_cloned_f1()
assert new_global_var == "default_value", new_global_var
finally:
_TEST_GLOBAL_VARIABLE = orig_value
def test_interactive_remote_function_calls(self):
code = """if __name__ == "__main__":
from srsly.tests.cloudpickle.testutils import subprocess_worker
def interactive_function(x):
return x + 1
with subprocess_worker(protocol={protocol}) as w:
assert w.run(interactive_function, 41) == 42
# Define a new function that will call an updated version of
# the previously called function:
def wrapper_func(x):
return interactive_function(x)
def interactive_function(x):
return x - 1
# The change in the definition of interactive_function in the main
# module of the main process should be reflected transparently
# in the worker process: the worker process does not recall the
# previous definition of `interactive_function`:
assert w.run(wrapper_func, 41) == 40
""".format(protocol=self.protocol)
assert_run_python_script(code)
def test_interactive_remote_function_calls_no_side_effect(self):
code = """if __name__ == "__main__":
from srsly.tests.cloudpickle.testutils import subprocess_worker
import sys
with subprocess_worker(protocol={protocol}) as w:
GLOBAL_VARIABLE = 0
class CustomClass(object):
def mutate_globals(self):
global GLOBAL_VARIABLE
GLOBAL_VARIABLE += 1
return GLOBAL_VARIABLE
custom_object = CustomClass()
assert w.run(custom_object.mutate_globals) == 1
# The caller global variable is unchanged in the main process.
assert GLOBAL_VARIABLE == 0
# Calling the same function again starts again from zero. The
# worker process is stateless: it has no memory of the past call:
assert w.run(custom_object.mutate_globals) == 1
# The symbols defined in the main process __main__ module are
# not set in the worker process main module to leave the worker
# as stateless as possible:
def is_in_main(name):
return hasattr(sys.modules["__main__"], name)
assert is_in_main("CustomClass")
assert not w.run(is_in_main, "CustomClass")
assert is_in_main("GLOBAL_VARIABLE")
assert not w.run(is_in_main, "GLOBAL_VARIABLE")
""".format(protocol=self.protocol)
assert_run_python_script(code)
def test_interactive_dynamic_type_and_remote_instances(self):
code = """if __name__ == "__main__":
from srsly.tests.cloudpickle.testutils import subprocess_worker
with subprocess_worker(protocol={protocol}) as w:
class CustomCounter:
def __init__(self):
self.count = 0
def increment(self):
self.count += 1
return self
counter = CustomCounter().increment()
assert counter.count == 1
returned_counter = w.run(counter.increment)
assert returned_counter.count == 2, returned_counter.count
# Check that the class definition of the returned instance was
# matched back to the original class definition living in __main__.
assert isinstance(returned_counter, CustomCounter)
# Check that memoization does not break provenance tracking:
def echo(*args):
return args
C1, C2, c1, c2 = w.run(echo, CustomCounter, CustomCounter,
CustomCounter(), returned_counter)
assert C1 is CustomCounter
assert C2 is CustomCounter
assert isinstance(c1, CustomCounter)
assert isinstance(c2, CustomCounter)
""".format(protocol=self.protocol)
assert_run_python_script(code)
def test_interactive_dynamic_type_and_stored_remote_instances(self):
"""Simulate objects stored on workers to check isinstance semantics
Such instances stored in the memory of running worker processes are
similar to dask-distributed futures for instance.
"""
code = """if __name__ == "__main__":
import srsly.cloudpickle as cloudpickle, uuid
from srsly.tests.cloudpickle.testutils import subprocess_worker
with subprocess_worker(protocol={protocol}) as w:
class A:
'''Original class definition'''
pass
def store(x):
storage = getattr(cloudpickle, "_test_storage", None)
if storage is None:
storage = cloudpickle._test_storage = dict()
obj_id = uuid.uuid4().hex
storage[obj_id] = x
return obj_id
def lookup(obj_id):
return cloudpickle._test_storage[obj_id]
id1 = w.run(store, A())
# The stored object on the worker is matched to a singleton class
# definition thanks to provenance tracking:
assert w.run(lambda obj_id: isinstance(lookup(obj_id), A), id1)
# Retrieving the object from the worker yields a local copy that
# is matched back the local class definition this instance
# originally stems from.
assert isinstance(w.run(lookup, id1), A)
# Changing the local class definition should be taken into account
# in all subsequent calls. In particular the old instances on the
# worker do not map back to the new class definition, neither on
# the worker itself, nor locally on the main program when the old
# instance is retrieved:
class A:
'''Updated class definition'''
pass
assert not w.run(lambda obj_id: isinstance(lookup(obj_id), A), id1)
retrieved1 = w.run(lookup, id1)
assert not isinstance(retrieved1, A)
assert retrieved1.__class__ is not A
assert retrieved1.__class__.__doc__ == "Original class definition"
# New instances on the other hand are proper instances of the new
# class definition everywhere:
a = A()
id2 = w.run(store, a)
assert w.run(lambda obj_id: isinstance(lookup(obj_id), A), id2)
assert isinstance(w.run(lookup, id2), A)
# Monkeypatch the class defintion in the main process to a new
# class method:
A.echo = lambda cls, x: x
# Calling this method on an instance will automatically update
# the remote class definition on the worker to propagate the monkey
# patch dynamically.
assert w.run(a.echo, 42) == 42
# The stored instance can therefore also access the new class
# method:
assert w.run(lambda obj_id: lookup(obj_id).echo(43), id2) == 43
""".format(protocol=self.protocol)
assert_run_python_script(code)
@pytest.mark.skip(reason="Seems to have issues outside of linux and CPython")
def test_interactive_remote_function_calls_no_memory_leak(self):
code = """if __name__ == "__main__":
from srsly.tests.cloudpickle.testutils import subprocess_worker
import struct
with subprocess_worker(protocol={protocol}) as w:
reference_size = w.memsize()
assert reference_size > 0
def make_big_closure(i):
# Generate a byte string of size 1MB
itemsize = len(struct.pack("l", 1))
data = struct.pack("l", i) * (int(1e6) // itemsize)
def process_data():
return len(data)
return process_data
for i in range(100):
func = make_big_closure(i)
result = w.run(func)
assert result == int(1e6), result
import gc
w.run(gc.collect)
# By this time the worker process has processed 100MB worth of data
# passed in the closures. The worker memory size should not have
# grown by more than a few MB as closures are garbage collected at
# the end of each remote function call.
growth = w.memsize() - reference_size
# For some reason, the memory growth after processing 100MB of
# data is ~10MB on MacOS, and ~1MB on Linux, so the upper bound on
# memory growth we use is only tight for MacOS. However,
# - 10MB is still 10x lower than the expected memory growth in case
# of a leak (which would be the total size of the processed data,
# 100MB)
# - the memory usage growth does not increase if using 10000
# iterations instead of 100 as used now (100x more data)
assert growth < 1.5e7, growth
""".format(protocol=self.protocol)
assert_run_python_script(code)
def test_pickle_reraise(self):
for exc_type in [Exception, ValueError, TypeError, RuntimeError]:
obj = RaiserOnPickle(exc_type("foo"))
with pytest.raises((exc_type, pickle.PicklingError)):
cloudpickle.dumps(obj, protocol=self.protocol)
def test_unhashable_function(self):
d = {'a': 1}
depickled_method = pickle_depickle(d.get, protocol=self.protocol)
self.assertEqual(depickled_method('a'), 1)
self.assertEqual(depickled_method('b'), None)
def test_itertools_count(self):
counter = itertools.count(1, step=2)
# advance the counter a bit
next(counter)
next(counter)
new_counter = pickle_depickle(counter, protocol=self.protocol)
self.assertTrue(counter is not new_counter)
for _ in range(10):
self.assertEqual(next(counter), next(new_counter))
def test_wraps_preserves_function_name(self):
from functools import wraps
def f():
pass
@wraps(f)
def g():
f()
f2 = pickle_depickle(g, protocol=self.protocol)
self.assertEqual(f2.__name__, f.__name__)
def test_wraps_preserves_function_doc(self):
from functools import wraps
def f():
"""42"""
pass
@wraps(f)
def g():
f()
f2 = pickle_depickle(g, protocol=self.protocol)
self.assertEqual(f2.__doc__, f.__doc__)
def test_wraps_preserves_function_annotations(self):
def f(x):
pass
f.__annotations__ = {'x': 1, 'return': float}
@wraps(f)
def g(x):
f(x)
f2 = pickle_depickle(g, protocol=self.protocol)
self.assertEqual(f2.__annotations__, f.__annotations__)
def test_type_hint(self):
t = typing.Union[list, int]
assert pickle_depickle(t) == t
def test_instance_with_slots(self):
for slots in [["registered_attribute"], "registered_attribute"]:
class ClassWithSlots:
__slots__ = slots
def __init__(self):
self.registered_attribute = 42
initial_obj = ClassWithSlots()
depickled_obj = pickle_depickle(
initial_obj, protocol=self.protocol)
for obj in [initial_obj, depickled_obj]:
self.assertEqual(obj.registered_attribute, 42)
with pytest.raises(AttributeError):
obj.non_registered_attribute = 1
class SubclassWithSlots(ClassWithSlots):
def __init__(self):
self.unregistered_attribute = 1
obj = SubclassWithSlots()
s = cloudpickle.dumps(obj, protocol=self.protocol)
del SubclassWithSlots
depickled_obj = cloudpickle.loads(s)
assert depickled_obj.unregistered_attribute == 1
@unittest.skipIf(not hasattr(types, "MappingProxyType"),
"Old versions of Python do not have this type.")
def test_mappingproxy(self):
mp = types.MappingProxyType({"some_key": "some value"})
assert mp == pickle_depickle(mp, protocol=self.protocol)
def test_dataclass(self):
dataclasses = pytest.importorskip("dataclasses")
DataClass = dataclasses.make_dataclass('DataClass', [('x', int)])
data = DataClass(x=42)
pickle_depickle(DataClass, protocol=self.protocol)
assert data.x == pickle_depickle(data, protocol=self.protocol).x == 42
def test_locally_defined_enum(self):
class StringEnum(str, enum.Enum):
"""Enum when all members are also (and must be) strings"""
class Color(StringEnum):
"""3-element color space"""
RED = "1"
GREEN = "2"
BLUE = "3"
def is_green(self):
return self is Color.GREEN
green1, green2, ClonedColor = pickle_depickle(
[Color.GREEN, Color.GREEN, Color], protocol=self.protocol)
assert green1 is green2
assert green1 is ClonedColor.GREEN
assert green1 is not ClonedColor.BLUE
assert isinstance(green1, str)
assert green1.is_green()
# cloudpickle systematically tracks provenance of class definitions
# and ensure reconciliation in case of round trips:
assert green1 is Color.GREEN
assert ClonedColor is Color
green3 = pickle_depickle(Color.GREEN, protocol=self.protocol)
assert green3 is Color.GREEN
def test_locally_defined_intenum(self):
# Try again with a IntEnum defined with the functional API
DynamicColor = enum.IntEnum("Color", {"RED": 1, "GREEN": 2, "BLUE": 3})
green1, green2, ClonedDynamicColor = pickle_depickle(
[DynamicColor.GREEN, DynamicColor.GREEN, DynamicColor],
protocol=self.protocol)
assert green1 is green2
assert green1 is ClonedDynamicColor.GREEN
assert green1 is not ClonedDynamicColor.BLUE
assert ClonedDynamicColor is DynamicColor
def test_interactively_defined_enum(self):
code = """if __name__ == "__main__":
from enum import Enum
from srsly.tests.cloudpickle.testutils import subprocess_worker
with subprocess_worker(protocol={protocol}) as w:
class Color(Enum):
RED = 1
GREEN = 2
def check_positive(x):
return Color.GREEN if x >= 0 else Color.RED
result = w.run(check_positive, 1)
# Check that the returned enum instance is reconciled with the
# locally defined Color enum type definition:
assert result is Color.GREEN
# Check that changing the definition of the Enum class is taken
# into account on the worker for subsequent calls:
class Color(Enum):
RED = 1
BLUE = 2
def check_positive(x):
return Color.BLUE if x >= 0 else Color.RED
result = w.run(check_positive, 1)
assert result is Color.BLUE
""".format(protocol=self.protocol)
assert_run_python_script(code)
def test_relative_import_inside_function(self):
pytest.importorskip("_cloudpickle_testpkg")
# Make sure relative imports inside round-tripped functions is not
# broken. This was a bug in cloudpickle versions <= 0.5.3 and was
# re-introduced in 0.8.0.
from _cloudpickle_testpkg import relative_imports_factory
f, g = relative_imports_factory()
for func, source in zip([f, g], ["module", "package"]):
# Make sure relative imports are initially working
assert func() == f"hello from a {source}!"
# Make sure relative imports still work after round-tripping
cloned_func = pickle_depickle(func, protocol=self.protocol)
assert cloned_func() == f"hello from a {source}!"
def test_interactively_defined_func_with_keyword_only_argument(self):
# fixes https://github.com/cloudpipe/cloudpickle/issues/263
def f(a, *, b=1):
return a + b
depickled_f = pickle_depickle(f, protocol=self.protocol)
for func in (f, depickled_f):
assert func(2) == 3
assert func.__kwdefaults__ == {'b': 1}
@pytest.mark.skipif(not hasattr(types.CodeType, "co_posonlyargcount"),
reason="Requires positional-only argument syntax")
def test_interactively_defined_func_with_positional_only_argument(self):
# Fixes https://github.com/cloudpipe/cloudpickle/issues/266
# The source code of this test is bundled in a string and is ran from
# the __main__ module of a subprocess in order to avoid a SyntaxError
# in versions of python that do not support positional-only argument
# syntax.
code = """
import pytest
from srsly.cloudpickle import loads, dumps
def f(a, /, b=1):
return a + b
depickled_f = loads(dumps(f, protocol={protocol}))
for func in (f, depickled_f):
assert func(2) == 3
assert func.__code__.co_posonlyargcount == 1
with pytest.raises(TypeError):
func(a=2)
""".format(protocol=self.protocol)
assert_run_python_script(textwrap.dedent(code))
def test___reduce___returns_string(self):
# Non regression test for objects with a __reduce__ method returning a
# string, meaning "save by attribute using save_global"
pytest.importorskip("_cloudpickle_testpkg")
from _cloudpickle_testpkg import some_singleton
assert some_singleton.__reduce__() == "some_singleton"
depickled_singleton = pickle_depickle(
some_singleton, protocol=self.protocol)
assert depickled_singleton is some_singleton
def test_cloudpickle_extract_nested_globals(self):
def function_factory():
def inner_function():
global _TEST_GLOBAL_VARIABLE
return _TEST_GLOBAL_VARIABLE
return inner_function
globals_ = set(cloudpickle.cloudpickle._extract_code_globals(
function_factory.__code__).keys())
assert globals_ == {'_TEST_GLOBAL_VARIABLE'}
depickled_factory = pickle_depickle(function_factory,
protocol=self.protocol)
inner_func = depickled_factory()
assert inner_func() == _TEST_GLOBAL_VARIABLE
def test_recursion_during_pickling(self):
class A:
def __getattribute__(self, name):
return getattr(self, name)
a = A()
with pytest.raises(pickle.PicklingError, match='recursion'):
cloudpickle.dumps(a)
def test_out_of_band_buffers(self):
if self.protocol < 5:
pytest.skip("Need Pickle Protocol 5 or later")
np = pytest.importorskip("numpy")
class LocallyDefinedClass:
data = np.zeros(10)
data_instance = LocallyDefinedClass()
buffers = []
pickle_bytes = cloudpickle.dumps(data_instance, protocol=self.protocol,
buffer_callback=buffers.append)
assert len(buffers) == 1
reconstructed = pickle.loads(pickle_bytes, buffers=buffers)
np.testing.assert_allclose(reconstructed.data, data_instance.data)
def test_pickle_dynamic_typevar(self):
T = typing.TypeVar('T')
depickled_T = pickle_depickle(T, protocol=self.protocol)
attr_list = [
"__name__", "__bound__", "__constraints__", "__covariant__",
"__contravariant__"
]
for attr in attr_list:
assert getattr(T, attr) == getattr(depickled_T, attr)
def test_pickle_dynamic_typevar_tracking(self):
T = typing.TypeVar("T")
T2 = subprocess_pickle_echo(T, protocol=self.protocol)
assert T is T2
def test_pickle_dynamic_typevar_memoization(self):
T = typing.TypeVar('T')
depickled_T1, depickled_T2 = pickle_depickle((T, T),
protocol=self.protocol)
assert depickled_T1 is depickled_T2
def test_pickle_importable_typevar(self):
pytest.importorskip("_cloudpickle_testpkg")
from _cloudpickle_testpkg import T
T1 = pickle_depickle(T, protocol=self.protocol)
assert T1 is T
# Standard Library TypeVar
from typing import AnyStr
assert AnyStr is pickle_depickle(AnyStr, protocol=self.protocol)
def test_generic_type(self):
T = typing.TypeVar('T')
class C(typing.Generic[T]):
pass
assert pickle_depickle(C, protocol=self.protocol) is C
# Identity is not part of the typing contract: only test for
# equality instead.
assert pickle_depickle(C[int], protocol=self.protocol) == C[int]
with subprocess_worker(protocol=self.protocol) as worker:
def check_generic(generic, origin, type_value, use_args):
assert generic.__origin__ is origin
assert len(origin.__orig_bases__) == 1
ob = origin.__orig_bases__[0]
assert ob.__origin__ is typing.Generic
if use_args:
assert len(generic.__args__) == 1
assert generic.__args__[0] is type_value
else:
assert len(generic.__parameters__) == 1
assert generic.__parameters__[0] is type_value
assert len(ob.__parameters__) == 1
return "ok"
# backward-compat for old Python 3.5 versions that sometimes relies
# on __parameters__
use_args = getattr(C[int], '__args__', ()) != ()
assert check_generic(C[int], C, int, use_args) == "ok"
assert worker.run(check_generic, C[int], C, int, use_args) == "ok"
def test_generic_subclass(self):
T = typing.TypeVar('T')
class Base(typing.Generic[T]):
pass
class DerivedAny(Base):
pass
class LeafAny(DerivedAny):
pass
class DerivedInt(Base[int]):
pass
class LeafInt(DerivedInt):
pass
class DerivedT(Base[T]):
pass
class LeafT(DerivedT[T]):
pass
klasses = [
Base, DerivedAny, LeafAny, DerivedInt, LeafInt, DerivedT, LeafT
]
for klass in klasses:
assert pickle_depickle(klass, protocol=self.protocol) is klass
with subprocess_worker(protocol=self.protocol) as worker:
def check_mro(klass, expected_mro):
assert klass.mro() == expected_mro
return "ok"
for klass in klasses:
mro = klass.mro()
assert check_mro(klass, mro)
assert worker.run(check_mro, klass, mro) == "ok"
def test_locally_defined_class_with_type_hints(self):
with subprocess_worker(protocol=self.protocol) as worker:
for type_ in _all_types_to_test():
class MyClass:
def method(self, arg: type_) -> type_:
return arg
MyClass.__annotations__ = {'attribute': type_}
def check_annotations(obj, expected_type, expected_type_str):
assert obj.__annotations__["attribute"] == expected_type
assert (
obj.method.__annotations__["arg"] == expected_type
)
assert (
obj.method.__annotations__["return"]
== expected_type
)
return "ok"
obj = MyClass()
assert check_annotations(obj, type_, "type_") == "ok"
assert (
worker.run(check_annotations, obj, type_, "type_") == "ok"
)
def test_generic_extensions_literal(self):
typing_extensions = pytest.importorskip('typing_extensions')
for obj in [typing_extensions.Literal, typing_extensions.Literal['a']]:
depickled_obj = pickle_depickle(obj, protocol=self.protocol)
assert depickled_obj == obj
def test_generic_extensions_final(self):
typing_extensions = pytest.importorskip('typing_extensions')
for obj in [typing_extensions.Final, typing_extensions.Final[int]]:
depickled_obj = pickle_depickle(obj, protocol=self.protocol)
assert depickled_obj == obj
def test_class_annotations(self):
class C:
pass
C.__annotations__ = {'a': int}
C1 = pickle_depickle(C, protocol=self.protocol)
assert C1.__annotations__ == C.__annotations__
def test_function_annotations(self):
def f(a: int) -> str:
pass
f1 = pickle_depickle(f, protocol=self.protocol)
assert f1.__annotations__ == f.__annotations__
def test_always_use_up_to_date_copyreg(self):
# test that updates of copyreg.dispatch_table are taken in account by
# cloudpickle
import copyreg
try:
class MyClass:
pass
def reduce_myclass(x):
return MyClass, (), {'custom_reduce': True}
copyreg.dispatch_table[MyClass] = reduce_myclass
my_obj = MyClass()
depickled_myobj = pickle_depickle(my_obj, protocol=self.protocol)
assert hasattr(depickled_myobj, 'custom_reduce')
finally:
copyreg.dispatch_table.pop(MyClass)
def test_literal_misdetection(self):
# see https://github.com/cloudpipe/cloudpickle/issues/403
class MyClass:
@property
def __values__(self):
return ()
o = MyClass()
pickle_depickle(o, protocol=self.protocol)
def test_final_or_classvar_misdetection(self):
# see https://github.com/cloudpipe/cloudpickle/issues/403
class MyClass:
@property
def __type__(self):
return int
o = MyClass()
pickle_depickle(o, protocol=self.protocol)
@pytest.mark.skip(reason="Requires pytest -s to pass")
def test_pickle_constructs_from_module_registered_for_pickling_by_value(self): # noqa
_prev_sys_path = sys.path.copy()
try:
# We simulate an interactive session that:
# - we start from the /path/to/cloudpickle/tests directory, where a
# local .py file (mock_local_file) is located.
# - uses constructs from mock_local_file in remote workers that do
# not have access to this file. This situation is
# the justification behind the
# (un)register_pickle_by_value(module) api that cloudpickle
# exposes.
_mock_interactive_session_cwd = os.path.dirname(__file__)
# First, remove sys.path entries that could point to
# /path/to/cloudpickle/tests and be in inherited by the worker
_maybe_remove(sys.path, '')
_maybe_remove(sys.path, _mock_interactive_session_cwd)
# Add the desired session working directory
sys.path.insert(0, _mock_interactive_session_cwd)
with subprocess_worker(protocol=self.protocol) as w:
# Make the module unavailable in the remote worker
w.run(
lambda p: sys.path.remove(p), _mock_interactive_session_cwd
)
# Import the actual file after starting the module since the
# worker is started using fork on Linux, which will inherits
# the parent sys.modules. On Python>3.6, the worker can be
# started using spawn using mp_context in ProcessPoolExectutor.
# TODO Once Python 3.6 reaches end of life, rely on mp_context
# instead.
import mock_local_folder.mod as mod
# The constructs whose pickling mechanism is changed using
# register_pickle_by_value are functions, classes, TypeVar and
# modules.
from mock_local_folder.mod import (
local_function, LocalT, LocalClass
)
# Make sure the module/constructs are unimportable in the
# worker.
with pytest.raises(ImportError):
w.run(lambda: __import__("mock_local_folder.mod"))
with pytest.raises(ImportError):
w.run(
lambda: __import__("mock_local_folder.subfolder.mod")
)
for o in [mod, local_function, LocalT, LocalClass]:
with pytest.raises(ImportError):
w.run(lambda: o)
register_pickle_by_value(mod)
# function
assert w.run(lambda: local_function()) == local_function()
# typevar
assert w.run(lambda: LocalT.__name__) == LocalT.__name__
# classes
assert (
w.run(lambda: LocalClass().method())
== LocalClass().method()
)
# modules
assert (
w.run(lambda: mod.local_function()) == local_function()
)
# Constructs from modules inside subfolders should be pickled
# by value if a namespace module pointing to some parent folder
# was registered for pickling by value. A "mock_local_folder"
# namespace module falls into that category, but a
# "mock_local_folder.mod" one does not.
from mock_local_folder.subfolder.submod import (
LocalSubmodClass, LocalSubmodT, local_submod_function
)
# Shorter aliases to comply with line-length limits
_t, _func, _class = (
LocalSubmodT, local_submod_function, LocalSubmodClass
)
with pytest.raises(ImportError):
w.run(
lambda: __import__("mock_local_folder.subfolder.mod")
)
with pytest.raises(ImportError):
w.run(lambda: local_submod_function)
unregister_pickle_by_value(mod)
with pytest.raises(ImportError):
w.run(lambda: local_function)
with pytest.raises(ImportError):
w.run(lambda: __import__("mock_local_folder.mod"))
# Test the namespace folder case
import mock_local_folder
register_pickle_by_value(mock_local_folder)
assert w.run(lambda: local_function()) == local_function()
assert w.run(lambda: _func()) == _func()
unregister_pickle_by_value(mock_local_folder)
with pytest.raises(ImportError):
w.run(lambda: local_function)
with pytest.raises(ImportError):
w.run(lambda: local_submod_function)
# Test the case of registering a single module inside a
# subfolder.
import mock_local_folder.subfolder.submod
register_pickle_by_value(mock_local_folder.subfolder.submod)
assert w.run(lambda: _func()) == _func()
assert w.run(lambda: _t.__name__) == _t.__name__
assert w.run(lambda: _class().method()) == _class().method()
# Registering a module from a subfolder for pickling by value
# should not make constructs from modules from the parent
# folder pickleable
with pytest.raises(ImportError):
w.run(lambda: local_function)
with pytest.raises(ImportError):
w.run(lambda: __import__("mock_local_folder.mod"))
unregister_pickle_by_value(
mock_local_folder.subfolder.submod
)
with pytest.raises(ImportError):
w.run(lambda: local_submod_function)
# Test the subfolder namespace module case
import mock_local_folder.subfolder
register_pickle_by_value(mock_local_folder.subfolder)
assert w.run(lambda: _func()) == _func()
assert w.run(lambda: _t.__name__) == _t.__name__
assert w.run(lambda: _class().method()) == _class().method()
unregister_pickle_by_value(mock_local_folder.subfolder)
finally:
_fname = "mock_local_folder"
sys.path = _prev_sys_path
for m in [_fname, f"{_fname}.mod", f"{_fname}.subfolder",
f"{_fname}.subfolder.submod"]:
mod = sys.modules.pop(m, None)
if mod and mod.__name__ in list_registry_pickle_by_value():
unregister_pickle_by_value(mod)
def test_pickle_constructs_from_installed_packages_registered_for_pickling_by_value( # noqa
self
):
pytest.importorskip("_cloudpickle_testpkg")
for package_or_module in ["package", "module"]:
if package_or_module == "package":
import _cloudpickle_testpkg as m
f = m.package_function_with_global
_original_global = m.global_variable
elif package_or_module == "module":
import _cloudpickle_testpkg.mod as m
f = m.module_function_with_global
_original_global = m.global_variable
try:
with subprocess_worker(protocol=self.protocol) as w:
assert w.run(lambda: f()) == _original_global
# Test that f is pickled by value by modifying a global
# variable that f uses, and making sure that this
# modification shows up when calling the function remotely
register_pickle_by_value(m)
assert w.run(lambda: f()) == _original_global
m.global_variable = "modified global"
assert m.global_variable != _original_global
assert w.run(lambda: f()) == "modified global"
unregister_pickle_by_value(m)
finally:
m.global_variable = _original_global
if m.__name__ in list_registry_pickle_by_value():
unregister_pickle_by_value(m)
def test_pickle_various_versions_of_the_same_function_with_different_pickling_method( # noqa
self
):
pytest.importorskip("_cloudpickle_testpkg")
# Make sure that different versions of the same function (possibly
# pickled in a different way - by value and/or by reference) can
# peacefully co-exist (e.g. without globals interaction) in a remote
# worker.
import _cloudpickle_testpkg
from _cloudpickle_testpkg import package_function_with_global as f
_original_global = _cloudpickle_testpkg.global_variable
def _create_registry():
_main = __import__("sys").modules["__main__"]
_main._cloudpickle_registry = {}
# global _cloudpickle_registry
def _add_to_registry(v, k):
_main = __import__("sys").modules["__main__"]
_main._cloudpickle_registry[k] = v
def _call_from_registry(k):
_main = __import__("sys").modules["__main__"]
return _main._cloudpickle_registry[k]()
try:
with subprocess_worker(protocol=self.protocol) as w:
w.run(_create_registry)
w.run(_add_to_registry, f, "f_by_ref")
register_pickle_by_value(_cloudpickle_testpkg)
_cloudpickle_testpkg.global_variable = "modified global"
w.run(_add_to_registry, f, "f_by_val")
assert (
w.run(_call_from_registry, "f_by_ref") == _original_global
)
assert (
w.run(_call_from_registry, "f_by_val") == "modified global"
)
finally:
_cloudpickle_testpkg.global_variable = _original_global
if "_cloudpickle_testpkg" in list_registry_pickle_by_value():
unregister_pickle_by_value(_cloudpickle_testpkg)
@pytest.mark.skipif(
sys.version_info < (3, 7),
reason="Determinism can only be guaranteed for Python 3.7+"
)
def test_deterministic_pickle_bytes_for_function(self):
# Ensure that functions with references to several global names are
# pickled to fixed bytes that do not depend on the PYTHONHASHSEED of
# the Python process.
vals = set()
def func_with_globals():
return _TEST_GLOBAL_VARIABLE + _TEST_GLOBAL_VARIABLE2
for i in range(5):
vals.add(
subprocess_pickle_string(func_with_globals,
protocol=self.protocol,
add_env={"PYTHONHASHSEED": str(i)}))
if len(vals) > 1:
# Print additional debug info on stdout with dis:
for val in vals:
pickletools.dis(val)
pytest.fail(
"Expected a single deterministic payload, got %d/5" % len(vals)
)
class Protocol2CloudPickleTest(CloudPickleTest):
protocol = 2
def test_lookup_module_and_qualname_dynamic_typevar():
T = typing.TypeVar('T')
module_and_name = _lookup_module_and_qualname(T, name=T.__name__)
assert module_and_name is None
def test_lookup_module_and_qualname_importable_typevar():
pytest.importorskip("_cloudpickle_testpkg")
import _cloudpickle_testpkg
T = _cloudpickle_testpkg.T
module_and_name = _lookup_module_and_qualname(T, name=T.__name__)
assert module_and_name is not None
module, name = module_and_name
assert module is _cloudpickle_testpkg
assert name == 'T'
def test_lookup_module_and_qualname_stdlib_typevar():
module_and_name = _lookup_module_and_qualname(typing.AnyStr,
name=typing.AnyStr.__name__)
assert module_and_name is not None
module, name = module_and_name
assert module is typing
assert name == 'AnyStr'
def test_register_pickle_by_value():
pytest.importorskip("_cloudpickle_testpkg")
import _cloudpickle_testpkg as pkg
import _cloudpickle_testpkg.mod as mod
assert list_registry_pickle_by_value() == set()
register_pickle_by_value(pkg)
assert list_registry_pickle_by_value() == {pkg.__name__}
register_pickle_by_value(mod)
assert list_registry_pickle_by_value() == {pkg.__name__, mod.__name__}
unregister_pickle_by_value(mod)
assert list_registry_pickle_by_value() == {pkg.__name__}
msg = f"Input should be a module object, got {pkg.__name__} instead"
with pytest.raises(ValueError, match=msg):
unregister_pickle_by_value(pkg.__name__)
unregister_pickle_by_value(pkg)
assert list_registry_pickle_by_value() == set()
msg = f"{pkg} is not registered for pickle by value"
with pytest.raises(ValueError, match=re.escape(msg)):
unregister_pickle_by_value(pkg)
msg = f"Input should be a module object, got {pkg.__name__} instead"
with pytest.raises(ValueError, match=msg):
register_pickle_by_value(pkg.__name__)
dynamic_mod = types.ModuleType('dynamic_mod')
msg = (
f"{dynamic_mod} was not imported correctly, have you used an "
f"`import` statement to access it?"
)
with pytest.raises(ValueError, match=re.escape(msg)):
register_pickle_by_value(dynamic_mod)
def _all_types_to_test():
T = typing.TypeVar('T')
class C(typing.Generic[T]):
pass
types_to_test = [
C, C[int],
T, typing.Any, typing.Optional,
typing.Generic, typing.Union,
typing.Optional[int],
typing.Generic[T],
typing.Callable[[int], typing.Any],
typing.Callable[..., typing.Any],
typing.Callable[[], typing.Any],
typing.Tuple[int, ...],
typing.Tuple[int, C[int]],
typing.List[int],
typing.Dict[int, str],
typing.ClassVar,
typing.ClassVar[C[int]],
typing.NoReturn,
]
return types_to_test
def test_module_level_pickler():
# #366: cloudpickle should expose its pickle.Pickler subclass as
# cloudpickle.Pickler
assert hasattr(cloudpickle, "Pickler")
assert cloudpickle.Pickler is cloudpickle.CloudPickler
if __name__ == '__main__':
unittest.main()
|
675a7556fd424779f2d8106d5b1f29675d79fb38
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/capacity/reservation/_list.py
|
3834e4a6a90f6db4c0d9d30568596ce320e48c9a
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 10,333
|
py
|
_list.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"capacity reservation list",
)
class List(AAZCommand):
"""List all of the capacity reservations in the specified capacity reservation group. Use the nextLink property in the response to get the next page of capacity reservations.
:example: List capacity reservation.
az capacity reservation list -c ReservationGroupName -g MyResourceGroup
"""
_aaz_info = {
"version": "2022-08-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/capacityreservationgroups/{}/capacityreservations", "2022-08-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.capacity_reservation_group_name = AAZStrArg(
options=["-c", "--capacity-reservation-group", "--capacity-reservation-group-name"],
help="The name of the capacity reservation group.",
required=True,
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.CapacityReservationsListByCapacityReservationGroup(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class CapacityReservationsListByCapacityReservationGroup(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"capacityReservationGroupName", self.ctx.args.capacity_reservation_group_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-08-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType(
flags={"required": True},
)
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.location = AAZStrType(
flags={"required": True},
)
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.sku = AAZObjectType(
flags={"required": True},
)
_element.tags = AAZDictType()
_element.type = AAZStrType(
flags={"read_only": True},
)
_element.zones = AAZListType()
properties = cls._schema_on_200.value.Element.properties
properties.instance_view = AAZObjectType(
serialized_name="instanceView",
)
properties.platform_fault_domain_count = AAZIntType(
serialized_name="platformFaultDomainCount",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.provisioning_time = AAZStrType(
serialized_name="provisioningTime",
flags={"read_only": True},
)
properties.reservation_id = AAZStrType(
serialized_name="reservationId",
flags={"read_only": True},
)
properties.time_created = AAZStrType(
serialized_name="timeCreated",
flags={"read_only": True},
)
properties.virtual_machines_associated = AAZListType(
serialized_name="virtualMachinesAssociated",
flags={"read_only": True},
)
instance_view = cls._schema_on_200.value.Element.properties.instance_view
instance_view.statuses = AAZListType()
instance_view.utilization_info = AAZObjectType(
serialized_name="utilizationInfo",
)
statuses = cls._schema_on_200.value.Element.properties.instance_view.statuses
statuses.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.properties.instance_view.statuses.Element
_element.code = AAZStrType()
_element.display_status = AAZStrType(
serialized_name="displayStatus",
)
_element.level = AAZStrType()
_element.message = AAZStrType()
_element.time = AAZStrType()
utilization_info = cls._schema_on_200.value.Element.properties.instance_view.utilization_info
utilization_info.current_capacity = AAZIntType(
serialized_name="currentCapacity",
flags={"read_only": True},
)
utilization_info.virtual_machines_allocated = AAZListType(
serialized_name="virtualMachinesAllocated",
flags={"read_only": True},
)
virtual_machines_allocated = cls._schema_on_200.value.Element.properties.instance_view.utilization_info.virtual_machines_allocated
virtual_machines_allocated.Element = AAZObjectType()
_ListHelper._build_schema_sub_resource_read_only_read(virtual_machines_allocated.Element)
virtual_machines_associated = cls._schema_on_200.value.Element.properties.virtual_machines_associated
virtual_machines_associated.Element = AAZObjectType()
_ListHelper._build_schema_sub_resource_read_only_read(virtual_machines_associated.Element)
sku = cls._schema_on_200.value.Element.sku
sku.capacity = AAZIntType()
sku.name = AAZStrType()
sku.tier = AAZStrType()
tags = cls._schema_on_200.value.Element.tags
tags.Element = AAZStrType()
zones = cls._schema_on_200.value.Element.zones
zones.Element = AAZStrType()
return cls._schema_on_200
class _ListHelper:
"""Helper class for List"""
_schema_sub_resource_read_only_read = None
@classmethod
def _build_schema_sub_resource_read_only_read(cls, _schema):
if cls._schema_sub_resource_read_only_read is not None:
_schema.id = cls._schema_sub_resource_read_only_read.id
return
cls._schema_sub_resource_read_only_read = _schema_sub_resource_read_only_read = AAZObjectType()
sub_resource_read_only_read = _schema_sub_resource_read_only_read
sub_resource_read_only_read.id = AAZStrType(
flags={"read_only": True},
)
_schema.id = cls._schema_sub_resource_read_only_read.id
__all__ = ["List"]
|
93444a20ace5ad627fff839d28d96cc87b160b6e
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Keras_tensorflow_nightly/source2.7/tensorflow/contrib/quantize/python/quantize.py
|
d2d0426d233aaadb4ffd0fb222c77ade0a98278c
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 20,855
|
py
|
quantize.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logic to update a TensorFlow model graph with quantization operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib import graph_editor
from tensorflow.contrib.quantize.python import common
from tensorflow.contrib.quantize.python import graph_matcher
from tensorflow.contrib.quantize.python import input_to_ops
from tensorflow.contrib.quantize.python import quant_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
# Quantizable operation types that are supported by the quantization rewrite.
_QUANTIZABLE_TYPES = {'Conv2D', 'MatMul', 'DepthwiseConv2dNative'}
# Activations that are supported by the quantization rewrite.
_ACTIVATION_TYPES = {'Relu', 'Relu6', 'Identity'}
def Quantize(graph,
is_training,
weight_bits=8,
activation_bits=8,
ema_decay=0.999,
quant_delay=None,
vars_collection=ops.GraphKeys.GLOBAL_VARIABLES,
scope=None):
"""Updates graph with quantization operations.
Currently we quantize the following tensors:
* Conv/MatMul: Quantize the weights if it matches.
* Activation: Quantize the output if it matches.
* Bypass/Post-activation Bypass: Quantize both input and output
if it matches.
Args:
graph: Graph to modify.
is_training: Whether quantizing training graph or eval graph.
weight_bits: Number of bits to use for quantizing weights.
activation_bits: Number of bits to use for quantizing activations.
ema_decay: (Optional) Float, EMA decay parameter. EMA is used to update
quantization intervals for quantizing activations (see here about EMA:
https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
quant_delay: (Optional, default None) Int, count of global steps for which
to delay quantization. This helps weights stabilize at the start of
training.
vars_collection: (Optional) Collection where to store the variables for
quantization interval ends.
scope: The scope to be transformed. If it's not None, only the ops which
are in this scope will be transformed.
Raises:
ValueError: When quantization fails.
"""
if scope and not scope.endswith('/'):
scope += '/'
input_to_ops_map = input_to_ops.InputToOps(graph)
for layer_match in _FindLayersToQuantize(graph):
# Quantize the weights.
context = _GetContextFromOp(layer_match.layer_op)
# If `scope` is given, only quantize it if the consumer of weights
# (the layer op) is in the right scope.
_InsertQuantOp(
context,
'weights_quant',
layer_match.weight_tensor.op, [layer_match.layer_op],
is_training,
moving_avg=False,
ema_decay=ema_decay,
quant_delay=quant_delay,
narrow_range=True,
vars_collection=vars_collection,
bits=weight_bits,
consumer_scope=scope)
# Quantize the activations.
consumer_ops = input_to_ops_map.ConsumerOperations(
layer_match.activation_op)
add_context = context
if layer_match.bypass_op:
add_context = re.search(r'^(.*)/([^/]+)', context).group(1)
# If `scope` is given, only quantize it if the producer of weights
# (usually it's the layer op) is in the right scope.
_InsertQuantOp(
add_context,
'act_quant',
layer_match.activation_op,
consumer_ops,
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
init_min=0.0,
producer_scope=scope)
# Quantize the inputs and output to the bypass (if it exists). The input to
# the bypass is the bias add, and the output is the activation.
if layer_match.bypass_op is not None:
# If `scope` is given, only quantize it if the both the producer and the
# consumer are in the right scope.
_InsertQuantOp(
context,
'conv_quant',
layer_match.bias_add_op, [layer_match.bypass_op],
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
producer_scope=scope,
consumer_scope=scope)
_InsertQuantOp(
add_context,
'add_quant',
layer_match.bypass_op,
input_to_ops_map.ConsumerOperations(layer_match.bypass_op),
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
producer_scope=scope,
consumer_scope=scope)
# Quantize bypass ops that occur after the activation.
if layer_match.post_activation_bypass_op is not None:
post_activation_bypass_context = re.search(
r'^(.*)/([^/]+)', layer_match.post_activation_bypass_op.name).group(1)
# If `scope` is given, only quantize it if the producer is in the right
# scope.
_InsertQuantOp(
post_activation_bypass_context,
'post_activation_bypass_quant',
layer_match.post_activation_bypass_op,
input_to_ops_map.ConsumerOperations(
layer_match.post_activation_bypass_op),
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
producer_scope=scope)
def _FindLayersToQuantize(graph):
"""Matches layers in graph to quantize.
The following patterns get matched. Nodes surrounded by [] will be
optionally matched:
weight|folded_weight
/
conv|fc
|
[post_conv_correction]
|
biasadd|folded_bias
|
[bypass]
|
activation
|
[post_activation_bypass]
Match replacements:
If weight|folded_weight is found, FakeQuant is added afterwards.
If bypass is found, FakeQuant is added before and after.
If activation is found, FakeQuant is added afterwards.
If post_activation_bypass is found, FakeQuant is added afterwards.
Args:
graph: Graph to perform match on.
Returns:
list of _LayerMatches.
"""
input_pattern = graph_matcher.OpTypePattern('*')
weight_var_pattern = graph_matcher.OpTypePattern('Variable|VariableV2')
weight_identity_pattern = graph_matcher.OpTypePattern(
'Identity', inputs=[weight_var_pattern])
weight_resource_var_pattern = graph_matcher.OpTypePattern('ReadVariableOp')
folded_weight_pattern = graph_matcher.OpTypePattern('Mul')
# The weights inputs to the layer operation can either be from the Variable or
# the folded weight (Mul).
layer_pattern = graph_matcher.OpTypePattern(
'|'.join(_QUANTIZABLE_TYPES),
inputs=[
input_pattern,
graph_matcher.OneofPattern([
weight_identity_pattern, weight_resource_var_pattern,
folded_weight_pattern
])
])
folded_bias_mul_pattern = graph_matcher.OpTypePattern(
'Mul', inputs=[graph_matcher.OpTypePattern('*'), layer_pattern])
post_layer_op_correction_pattern = graph_matcher.OpTypePattern(
'Add', inputs=[folded_bias_mul_pattern,
graph_matcher.OpTypePattern('*')])
folded_bias_add_pattern = graph_matcher.OpTypePattern(
'Add',
inputs=[
post_layer_op_correction_pattern,
graph_matcher.OpTypePattern('*')
])
bias_add_pattern = graph_matcher.OpTypePattern(
'Add|BiasAdd', inputs=[layer_pattern, '*'])
# The bias can come from the bias add or the folded bias add.
bypass_pattern_a = graph_matcher.OpTypePattern(
'Add',
inputs=[
graph_matcher.OneofPattern(
[bias_add_pattern, folded_bias_add_pattern]), '*'
])
bypass_pattern_b = graph_matcher.OpTypePattern(
'Add',
inputs=[
'*',
graph_matcher.OneofPattern(
[bias_add_pattern, folded_bias_add_pattern])
])
# The input to the activation can come from bias add, fold bias add, the
# bypasses.
activation_pattern = graph_matcher.OpTypePattern(
'|'.join(_ACTIVATION_TYPES),
inputs=[
graph_matcher.OneofPattern([
bias_add_pattern, folded_bias_add_pattern, bypass_pattern_a,
bypass_pattern_b
])
])
post_activation_bypass_pattern_a = graph_matcher.OpTypePattern(
'Add', inputs=['*', activation_pattern])
post_activation_bypass_pattern_b = graph_matcher.OpTypePattern(
'Add', inputs=[activation_pattern, '*'])
# The order of the following matching blocks is very important. Since matches
# aren't guaranteed to be disjoint, we structure matches from largest to
# smallest to guarantee that the largest match always wins. Additionally, we
# ensure that we don't match layers multiple times.
layer_matches = []
# We use matched_layer_set to ensure that layers aren't matched multiple
# times.
matched_layer_set = set()
# First, we match layers that have a post activation bypass. We do this first
# to ensure we don't match only the first part of this layer, missing the
# post activation bypass node.
post_activation_bypass_layer_matcher = graph_matcher.GraphMatcher(
graph_matcher.OneofPattern([
post_activation_bypass_pattern_a,
post_activation_bypass_pattern_b,
]))
for match_result in post_activation_bypass_layer_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(folded_weight_pattern)
activation_op = match_result.get_op(activation_pattern)
bias_add_op = match_result.get_op(bias_add_pattern)
if bias_add_op is None:
bias_add_op = match_result.get_op(folded_bias_add_pattern)
bypass_op = match_result.get_op(bypass_pattern_a)
if bypass_op is None:
bypass_op = match_result.get_op(bypass_pattern_b)
post_activation_bypass_op = match_result.get_op(
post_activation_bypass_pattern_a)
if post_activation_bypass_op is None:
post_activation_bypass_op = match_result.get_op(
post_activation_bypass_pattern_b)
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, bypass_op,
post_activation_bypass_op, bias_add_op))
# Now, we match the basic layer ending at an activation. We may get duplicate
# matches from above, but we don't add them to layer_matches.
layer_matcher = graph_matcher.GraphMatcher(activation_pattern)
for match_result in layer_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(folded_weight_pattern)
activation_op = match_result.get_op(activation_pattern)
bias_add_op = match_result.get_op(bias_add_pattern)
if bias_add_op is None:
bias_add_op = match_result.get_op(folded_bias_add_pattern)
bypass_op = match_result.get_op(bypass_pattern_a)
if bypass_op is None:
bypass_op = match_result.get_op(bypass_pattern_b)
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, bypass_op, None,
bias_add_op))
# Match the final layer, where there may not be an activation and instead
# the output of the final BiasAdd must be quantized. So we treat the BiasAdd
# as the 'activation_op' in the _LayerMatch, to ensure that it's output is
# quantized.
final_layer_matcher = graph_matcher.GraphMatcher(
graph_matcher.OneofPattern([bias_add_pattern, folded_bias_add_pattern]))
for match_result in final_layer_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(folded_weight_pattern)
activation_op = match_result.get_op(bias_add_pattern)
if activation_op is None:
activation_op = match_result.get_op(folded_bias_add_pattern)
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, None, None, None))
return layer_matches
def _HasPostActivationBypass(activation_op):
for activation_tensor in activation_op.outputs:
for output_op in activation_tensor.consumers():
if output_op.type == 'Add':
return True
return False
class _LayerMatch(object):
"""Contains all information related to a matched Layer."""
def __init__(self, layer_op, weight_tensor, activation_op, bypass_op,
post_activation_bypass_op, bias_add_op):
self._layer_op = layer_op
self._weight_tensor = weight_tensor
self._activation_op = activation_op
self._bypass_op = bypass_op
self._post_activation_bypass_op = post_activation_bypass_op
self._bias_add_op = bias_add_op
@property
def layer_op(self):
return self._layer_op
@property
def weight_tensor(self):
return self._weight_tensor
@property
def activation_op(self):
return self._activation_op
@property
def bypass_op(self):
return self._bypass_op
@property
def post_activation_bypass_op(self):
return self._post_activation_bypass_op
@property
def bias_add_op(self):
return self._bias_add_op
def _InsertQuantOp(context,
name,
producer,
consumers,
is_training,
moving_avg=True,
init_min=-6.0,
init_max=6.0,
bits=8,
ema_decay=0.999,
quant_delay=None,
vars_collection=ops.GraphKeys.GLOBAL_VARIABLES,
narrow_range=False,
producer_scope=None,
consumer_scope=None):
"""Inserts a quant op between a producer op and (multiple) consumer ops.
Args:
context: Context where producer and consumer operations are nested.
name: Name for the new quantization op within the context.
producer: Producer operation of the pairs where quantization will be
inserted.
consumers: Consumer operations of the pairs.
is_training: Whether quantizing training graph or eval graph.
moving_avg: Specifies whether to use exponential moving average or just
the last value seen.
init_min: Starting minimum value for the new quantization op.
init_max: Starting maximum value for the new quantization op.
bits: Number of bits to use for quantization, must be between 2 and 8.
ema_decay: (Optional) Float, EMA decay parameter. EMA is used to update
quantization intervals for quantizing activations (see here about EMA:
https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
quant_delay: (Optional, default None) Int, count of global steps for which
to delay quantization. This helps weights stabilize at the start of
training.
vars_collection: (Optional) Collection where to store the variables for
quantization interval ends.
narrow_range: Whether to use the narrow quantization range
[1; 2^bits - 1] or wide range [0; 2^bits - 1].
producer_scope: The restriction of producer scope. If not None, the new op
will be inserted only when the producer is in this scope.
consumer_scope: The restriction of producer scope. If not None, the new op
will be inserted only when all the consumers are in this scope.
Raises:
ValueError: When producer operation is not directly connected to the
consumer operation.
"""
if producer_scope and not producer.name.startswith(producer_scope):
logging.info(
'_InsertQuantOp ignores context="%s" name="%s" '
'because producer "%s" is not in scope "%s"',
context, name, producer.name, producer_scope)
return
if consumer_scope:
consumers_in_scope = []
for consumer in consumers:
if consumer.name.startswith(consumer_scope):
consumers_in_scope.append(consumer)
else:
logging.info(
'_InsertQuantOp context="%s" name="%s" ignores '
'consumer "%s" because it is not in scope "%s"',
context, name, consumer.name, consumer_scope)
return
consumers = consumers_in_scope
name_prefix = _AddContextToName(context, name)
# This is needed on TPU where name_scope == 'TPUReplicate/loop', and
# name_prefix starts with 'TPUReplicate/loop/'; without dropping it
# variables are created as TPUReplicate/loop/TPUReplicate/loop/..., which
# breaks things later.
name_scope = ops.get_name_scope()
if name_scope:
name_prefix = common.DropStringPrefix(name_prefix, name_scope + '/')
inputs = producer.outputs[0]
# Prevent ops from being quantized multiple times. Bypass ops can sometimes
# overlap between multiple matches, so we need to ensure that we don't
# add duplicate FakeQuant operations.
fake_quant_ops = set([
'FakeQuantWithMinMaxVars',
'FakeQuantWithMinMaxArgs'
])
if fake_quant_ops.intersection(set([c.type for c in inputs.consumers()])):
return
if moving_avg:
quant = (
quant_ops.MovingAvgQuantize(
inputs,
init_min=init_min,
init_max=init_max,
ema_decay=ema_decay,
is_training=is_training,
num_bits=bits,
narrow_range=narrow_range,
vars_collection=vars_collection,
name_prefix=name_prefix))
else:
quant = (
quant_ops.LastValueQuantize(
inputs,
init_min=init_min,
init_max=init_max,
is_training=is_training,
num_bits=bits,
narrow_range=narrow_range,
vars_collection=vars_collection,
name_prefix=name_prefix))
if quant_delay and quant_delay > 0:
activate_quant = math_ops.greater_equal(
common.CreateOrGetQuantizationStep(),
quant_delay,
name=name_prefix + '/activate_quant')
quant = control_flow_ops.cond(
activate_quant,
lambda: quant,
lambda: inputs,
name=name_prefix + '/delayed_quant')
if consumers:
tensors_modified_count = graph_editor.reroute_ts(
[quant], [inputs], can_modify=consumers)
# Some operations can have multiple output tensors going to the same
# consumer. Since consumers is a set, we need to ensure that
# tensors_modified_count is greater than or equal to the length of the set
# of consumers.
if tensors_modified_count < len(consumers):
raise ValueError('No inputs quantized for ops: [%s]' % ', '.join(
[consumer.name for consumer in consumers]))
def _GetContextFromOp(op):
"""Gets the root context name from the op name."""
context_re = re.search(r'^(.*)/([^/]+)', op.name)
if context_re:
return context_re.group(1)
return ''
def _AddContextToName(context, name):
"""Adds the context to the name if it exists."""
if not context:
return name
return context + '/' + name
|
1c5102a23d9cc3031a2a16761c880e58fb9fa720
|
a61bf859ceeb1ba98de3863225e07b29e1d7ce8a
|
/thonny/locale/register_updates.py
|
9974850ea6dc04564015534d6c062ee29a81e367
|
[
"MIT"
] |
permissive
|
thonny/thonny
|
3974b1860703e8450b837863682117f525a886c6
|
8fc9f5c7cbbe1d1c82aa5503ec4b684e28aa608c
|
refs/heads/master
| 2023-08-31T03:04:34.685140
| 2023-08-24T11:38:36
| 2023-08-24T11:38:36
| 163,728,962
| 2,788
| 1,048
|
MIT
| 2023-08-10T18:59:37
| 2019-01-01T10:29:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,554
|
py
|
register_updates.py
|
import json
import os.path
import polib
import pyperclip
locale_dir = os.path.dirname(__file__)
def register_locale(name: str) -> None:
print(f"Processing {name}")
po_path = os.path.join(locale_dir, name, "LC_MESSAGES", "thonny.po")
po = polib.pofile(po_path)
registered_path = os.path.join(locale_dir, name, "LC_MESSAGES", "registered.json")
if not os.path.exists(registered_path):
with open(registered_path, "w", encoding="utf-8") as fp:
fp.write("{}")
with open(registered_path, encoding="utf-8") as fp:
registered = json.load(fp)
new_registered = {}
review_messages = []
for entry in po:
if entry.msgstr and (
entry.msgid not in registered or registered[entry.msgid] != entry.msgstr
):
msg = entry.msgstr.strip().replace("\n", " ")
if not msg.endswith("."):
msg = msg + "."
review_messages.append(msg)
if entry.msgstr:
new_registered[entry.msgid] = entry.msgstr
if review_messages:
print("\n".join(review_messages))
pyperclip.copy("\n".join(review_messages))
input(f"... Press ENTER to confirm {name}! ...")
with open(registered_path, "w", encoding="utf-8") as fp:
json.dump(new_registered, fp, sort_keys=True, indent=4, ensure_ascii=False)
print("--------------------------------------")
for name in os.listdir(locale_dir):
path = os.path.join(locale_dir, name)
if os.path.isdir(path):
register_locale(name)
|
5855490725f0f94cc7bec638489bf2cd70cdfc40
|
9784a90cac667e8e0aaba0ca599b4255b215ec67
|
/gluon/gluoncv2/models/crunet.py
|
38409e79421febc9e21ecbadc68e756106494888
|
[
"MIT"
] |
permissive
|
osmr/imgclsmob
|
d2f48f01ca541b20119871393eca383001a96019
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
refs/heads/master
| 2022-07-09T14:24:37.591824
| 2021-12-14T10:15:31
| 2021-12-14T10:15:31
| 140,285,687
| 3,017
| 624
|
MIT
| 2022-07-04T15:18:37
| 2018-07-09T12:57:46
|
Python
|
UTF-8
|
Python
| false
| false
| 21,164
|
py
|
crunet.py
|
"""
CRU-Net, implemented in Gluon.
Original paper: 'Sharing Residual Units Through Collective Tensor Factorization To Improve Deep Neural Networks,'
https://www.ijcai.org/proceedings/2018/88.
"""
__all__ = ['CRUNet', 'crunet56', 'crunet116']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import pre_conv1x1_block, pre_conv3x3_block
from .resnet import ResInitBlock
from .preresnet import PreResActivation
def cru_conv3x3(in_channels,
out_channels,
strides=1,
padding=1,
groups=1,
use_bias=False,
conv_params=None):
"""
CRU-Net specific convolution 3x3 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2D(
channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
groups=groups,
use_bias=use_bias,
in_channels=in_channels,
params=conv_params)
class CRUConvBlock(HybridBlock):
"""
CRU-Net specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
conv_params : ParameterDict, default None
Weights for the convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
groups=1,
bn_use_global_stats=False,
return_preact=False,
conv_params=None,
**kwargs):
super(CRUConvBlock, self).__init__(**kwargs)
self.return_preact = return_preact
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
use_bias=False,
in_channels=in_channels,
params=conv_params)
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def cru_conv1x1_block(in_channels,
out_channels,
strides=1,
bn_use_global_stats=False,
return_preact=False,
conv_params=None):
"""
1x1 version of the CRU-Net specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation.
conv_params : ParameterDict, default None
Weights for the convolution layer.
"""
return CRUConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
bn_use_global_stats=bn_use_global_stats,
return_preact=return_preact,
conv_params=conv_params)
class ResBottleneck(HybridBlock):
"""
Pre-ResNeXt bottleneck block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
**kwargs):
super(ResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=group_width,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv3x3_block(
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=cardinality,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = pre_conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class CRUBottleneck(HybridBlock):
"""
CRU-Net bottleneck block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
group_width: int
Group width parameter.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
conv1_params : ParameterDict, default None
Weights for the convolution layer #1.
conv2_params : ParameterDict, default None
Weights for the convolution layer #2.
"""
def __init__(self,
in_channels,
out_channels,
strides,
group_width,
bn_use_global_stats,
conv1_params=None,
conv2_params=None,
**kwargs):
super(CRUBottleneck, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = cru_conv1x1_block(
in_channels=in_channels,
out_channels=group_width,
bn_use_global_stats=bn_use_global_stats,
conv_params=conv1_params)
self.conv2 = cru_conv3x3(
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=group_width,
conv_params=conv2_params)
self.conv3 = pre_conv1x1_block(
in_channels=group_width,
out_channels=group_width,
bn_use_global_stats=bn_use_global_stats)
self.conv4 = pre_conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
return x
class ResUnit(HybridBlock):
"""
CRU-Net residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
**kwargs):
super(ResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
return x
class CRUUnit(HybridBlock):
"""
CRU-Net collective residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
group_width: int
Group width parameter.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
conv1_params : ParameterDict, default None
Weights for the convolution layer #1.
conv2_params : ParameterDict, default None
Weights for the convolution layer #2.
"""
def __init__(self,
in_channels,
out_channels,
strides,
group_width,
bn_use_global_stats,
conv1_params=None,
conv2_params=None,
**kwargs):
super(CRUUnit, self).__init__(**kwargs)
assert (strides == 1) or ((conv1_params is None) and (conv2_params is None))
self.resize_input = (in_channels != out_channels)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if self.resize_input:
self.input_conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.body = CRUBottleneck(
in_channels=out_channels,
out_channels=out_channels,
strides=strides,
group_width=group_width,
bn_use_global_stats=bn_use_global_stats,
conv1_params=conv1_params,
conv2_params=conv2_params)
if self.resize_identity:
self.identity_conv = cru_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
conv_params=self.input_conv.conv.params)
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
if self.resize_input:
x = self.input_conv(x)
x = self.body(x)
x = x + identity
return x
class CRUNet(HybridBlock):
"""
CRU-Net model from 'Sharing Residual Units Through Collective Tensor Factorization To Improve Deep Neural Networks,'
https://www.ijcai.org/proceedings/2018/88.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
group_widths: list of int
List of group width parameters.
refresh_steps: list of int
List of refresh step parameters.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
group_widths,
refresh_steps,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(CRUNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
group_width = group_widths[i]
refresh_step = refresh_steps[i]
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
if group_width != 0:
if ((refresh_step == 0) and (j == 0)) or ((refresh_step != 0) and (j % refresh_step == 0)):
conv1_params = None
conv2_params = None
unit = CRUUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
group_width=group_width,
bn_use_global_stats=bn_use_global_stats,
conv1_params=conv1_params,
conv2_params=conv2_params)
if conv1_params is None:
conv1_params = unit.body.conv1.conv.params
conv2_params = unit.body.conv2.params
stage.add(unit)
else:
stage.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_crunet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create CRU-Net model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
cardinality = 32
bottleneck_width = 4
if blocks == 56:
layers = [3, 4, 6, 3]
group_widths = [0, 0, 640, 0]
refresh_steps = [0, 0, 0, 0]
elif blocks == 116:
layers = [3, 6, 18, 3]
group_widths = [0, 352, 704, 0]
refresh_steps = [0, 0, 6, 0]
else:
raise ValueError("Unsupported CRU-Net with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CRUNet(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
group_widths=group_widths,
refresh_steps=refresh_steps,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def crunet56(**kwargs):
"""
CRU-Net-56 model from 'Sharing Residual Units Through Collective Tensor Factorization To Improve Deep Neural
Networks,' https://www.ijcai.org/proceedings/2018/88.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_crunet(blocks=56, model_name="crunet56", **kwargs)
def crunet116(**kwargs):
"""
CRU-Net-116 model from 'Sharing Residual Units Through Collective Tensor Factorization To Improve Deep Neural
Networks,' https://www.ijcai.org/proceedings/2018/88.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_crunet(blocks=116, model_name="crunet116", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
crunet56,
crunet116,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != crunet56 or weight_count == 25609384)
assert (model != crunet116 or weight_count == 43656136)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
|
5f0d1b987c2726efb211cf861c277753d90622a6
|
975b2d421d3661e6770b601929d5f11d981d8985
|
/msgraph/generated/models/mobile_app_content_file.py
|
d487143c68cf245982ab22403f848783c817854b
|
[
"MIT"
] |
permissive
|
microsoftgraph/msgraph-sdk-python
|
a7c551b85daadeebf76ec4ae12668664ea639b42
|
27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949
|
refs/heads/main
| 2023-09-03T21:45:27.989672
| 2023-08-31T06:22:18
| 2023-08-31T06:22:18
| 534,665,999
| 135
| 18
|
MIT
| 2023-09-14T11:04:11
| 2022-09-09T14:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 4,470
|
py
|
mobile_app_content_file.py
|
from __future__ import annotations
import datetime
from dataclasses import dataclass, field
from kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from .entity import Entity
from .mobile_app_content_file_upload_state import MobileAppContentFileUploadState
from .entity import Entity
@dataclass
class MobileAppContentFile(Entity):
"""
Contains properties for a single installer file that is associated with a given mobileAppContent version.
"""
# The Azure Storage URI.
azure_storage_uri: Optional[str] = None
# The time the Azure storage Uri expires.
azure_storage_uri_expiration_date_time: Optional[datetime.datetime] = None
# The time the file was created.
created_date_time: Optional[datetime.datetime] = None
# A value indicating whether the file is committed.
is_committed: Optional[bool] = None
# The manifest information.
manifest: Optional[bytes] = None
# the file name.
name: Optional[str] = None
# The OdataType property
odata_type: Optional[str] = None
# The size of the file prior to encryption.
size: Optional[int] = None
# The size of the file after encryption.
size_encrypted: Optional[int] = None
# Contains properties for upload request states.
upload_state: Optional[MobileAppContentFileUploadState] = None
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> MobileAppContentFile:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parse_node: The parse node to use to read the discriminator value and create the object
Returns: MobileAppContentFile
"""
if not parse_node:
raise TypeError("parse_node cannot be null.")
return MobileAppContentFile()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
from .entity import Entity
from .mobile_app_content_file_upload_state import MobileAppContentFileUploadState
from .entity import Entity
from .mobile_app_content_file_upload_state import MobileAppContentFileUploadState
fields: Dict[str, Callable[[Any], None]] = {
"azureStorageUri": lambda n : setattr(self, 'azure_storage_uri', n.get_str_value()),
"azureStorageUriExpirationDateTime": lambda n : setattr(self, 'azure_storage_uri_expiration_date_time', n.get_datetime_value()),
"createdDateTime": lambda n : setattr(self, 'created_date_time', n.get_datetime_value()),
"isCommitted": lambda n : setattr(self, 'is_committed', n.get_bool_value()),
"manifest": lambda n : setattr(self, 'manifest', n.get_bytes_value()),
"name": lambda n : setattr(self, 'name', n.get_str_value()),
"size": lambda n : setattr(self, 'size', n.get_int_value()),
"sizeEncrypted": lambda n : setattr(self, 'size_encrypted', n.get_int_value()),
"uploadState": lambda n : setattr(self, 'upload_state', n.get_enum_value(MobileAppContentFileUploadState)),
}
super_fields = super().get_field_deserializers()
fields.update(super_fields)
return fields
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if not writer:
raise TypeError("writer cannot be null.")
super().serialize(writer)
writer.write_str_value("azureStorageUri", self.azure_storage_uri)
writer.write_datetime_value("azureStorageUriExpirationDateTime", self.azure_storage_uri_expiration_date_time)
writer.write_datetime_value("createdDateTime", self.created_date_time)
writer.write_bool_value("isCommitted", self.is_committed)
writer.write_bytes_value("manifest", self.manifest)
writer.write_str_value("name", self.name)
writer.write_int_value("size", self.size)
writer.write_int_value("sizeEncrypted", self.size_encrypted)
writer.write_enum_value("uploadState", self.upload_state)
|
81c6bac6151560187fa410c73ea06cb74abd7200
|
89420cda57f03791a5448ed4eeb967d06a4aade3
|
/arviz/data/converters.py
|
2961f0aaf1f35f3ccac1472b024cb8942978f056
|
[
"Apache-2.0"
] |
permissive
|
arviz-devs/arviz
|
fa2423e28f7a8c1b22986dbef317579c00744f75
|
24c260a0390d030e106943f21811652ea82aebc7
|
refs/heads/main
| 2023-09-03T12:22:12.075948
| 2023-07-18T22:29:35
| 2023-07-18T22:29:35
| 39,890,704
| 1,421
| 413
|
Apache-2.0
| 2023-09-13T15:53:19
| 2015-07-29T11:51:10
|
Python
|
UTF-8
|
Python
| false
| false
| 7,662
|
py
|
converters.py
|
"""High level conversion functions."""
import numpy as np
import xarray as xr
from .base import dict_to_dataset
from .inference_data import InferenceData
from .io_beanmachine import from_beanmachine
from .io_cmdstan import from_cmdstan
from .io_cmdstanpy import from_cmdstanpy
from .io_emcee import from_emcee
from .io_numpyro import from_numpyro
from .io_pyro import from_pyro
from .io_pystan import from_pystan
# pylint: disable=too-many-return-statements
def convert_to_inference_data(obj, *, group="posterior", coords=None, dims=None, **kwargs):
r"""Convert a supported object to an InferenceData object.
This function sends `obj` to the right conversion function. It is idempotent,
in that it will return arviz.InferenceData objects unchanged.
Parameters
----------
obj : dict, str, np.ndarray, xr.Dataset, pystan fit
A supported object to convert to InferenceData:
| InferenceData: returns unchanged
| str: Attempts to load the cmdstan csv or netcdf dataset from disk
| pystan fit: Automatically extracts data
| cmdstanpy fit: Automatically extracts data
| cmdstan csv-list: Automatically extracts data
| emcee sampler: Automatically extracts data
| pyro MCMC: Automatically extracts data
| beanmachine MonteCarloSamples: Automatically extracts data
| xarray.Dataset: adds to InferenceData as only group
| xarray.DataArray: creates an xarray dataset as the only group, gives the
array an arbitrary name, if name not set
| dict: creates an xarray dataset as the only group
| numpy array: creates an xarray dataset as the only group, gives the
array an arbitrary name
group : str
If `obj` is a dict or numpy array, assigns the resulting xarray
dataset to this group. Default: "posterior".
coords : dict[str, iterable]
A dictionary containing the values that are used as index. The key
is the name of the dimension, the values are the index values.
dims : dict[str, List(str)]
A mapping from variables to a list of coordinate names for the variable
kwargs
Rest of the supported keyword arguments transferred to conversion function.
Returns
-------
InferenceData
"""
kwargs[group] = obj
kwargs["coords"] = coords
kwargs["dims"] = dims
# Cases that convert to InferenceData
if isinstance(obj, InferenceData):
if coords is not None or dims is not None:
raise TypeError("Cannot use coords or dims arguments with InferenceData value.")
return obj
elif isinstance(obj, str):
if obj.endswith(".csv"):
if group == "sample_stats":
kwargs["posterior"] = kwargs.pop(group)
elif group == "sample_stats_prior":
kwargs["prior"] = kwargs.pop(group)
return from_cmdstan(**kwargs)
else:
if coords is not None or dims is not None:
raise TypeError(
"Cannot use coords or dims arguments reading InferenceData from netcdf."
)
return InferenceData.from_netcdf(obj)
elif (
obj.__class__.__name__ in {"StanFit4Model", "CmdStanMCMC"}
or obj.__class__.__module__ == "stan.fit"
):
if group == "sample_stats":
kwargs["posterior"] = kwargs.pop(group)
elif group == "sample_stats_prior":
kwargs["prior"] = kwargs.pop(group)
if obj.__class__.__name__ == "CmdStanMCMC":
return from_cmdstanpy(**kwargs)
else: # pystan or pystan3
return from_pystan(**kwargs)
elif obj.__class__.__name__ == "EnsembleSampler": # ugly, but doesn't make emcee a requirement
return from_emcee(sampler=kwargs.pop(group), **kwargs)
elif obj.__class__.__name__ == "MonteCarloSamples":
return from_beanmachine(sampler=kwargs.pop(group), **kwargs)
elif obj.__class__.__name__ == "MCMC" and obj.__class__.__module__.startswith("pyro"):
return from_pyro(posterior=kwargs.pop(group), **kwargs)
elif obj.__class__.__name__ == "MCMC" and obj.__class__.__module__.startswith("numpyro"):
return from_numpyro(posterior=kwargs.pop(group), **kwargs)
# Cases that convert to xarray
if isinstance(obj, xr.Dataset):
dataset = obj
elif isinstance(obj, xr.DataArray):
if obj.name is None:
obj.name = "x"
dataset = obj.to_dataset()
elif isinstance(obj, dict):
dataset = dict_to_dataset(obj, coords=coords, dims=dims)
elif isinstance(obj, np.ndarray):
dataset = dict_to_dataset({"x": obj}, coords=coords, dims=dims)
elif isinstance(obj, (list, tuple)) and isinstance(obj[0], str) and obj[0].endswith(".csv"):
if group == "sample_stats":
kwargs["posterior"] = kwargs.pop(group)
elif group == "sample_stats_prior":
kwargs["prior"] = kwargs.pop(group)
return from_cmdstan(**kwargs)
else:
allowable_types = (
"xarray dataarray",
"xarray dataset",
"dict",
"netcdf filename",
"numpy array",
"pystan fit",
"emcee fit",
"pyro mcmc fit",
"numpyro mcmc fit",
"cmdstan fit csv filename",
"cmdstanpy fit",
)
raise ValueError(
f'Can only convert {", ".join(allowable_types)} to InferenceData, '
f"not {obj.__class__.__name__}"
)
return InferenceData(**{group: dataset})
def convert_to_dataset(obj, *, group="posterior", coords=None, dims=None):
"""Convert a supported object to an xarray dataset.
This function is idempotent, in that it will return xarray.Dataset functions
unchanged. Raises `ValueError` if the desired group can not be extracted.
Note this goes through a DataInference object. See `convert_to_inference_data`
for more details. Raises ValueError if it can not work out the desired
conversion.
Parameters
----------
obj : dict, str, np.ndarray, xr.Dataset, pystan fit
A supported object to convert to InferenceData:
- InferenceData: returns unchanged
- str: Attempts to load the netcdf dataset from disk
- pystan fit: Automatically extracts data
- xarray.Dataset: adds to InferenceData as only group
- xarray.DataArray: creates an xarray dataset as the only group, gives the
array an arbitrary name, if name not set
- dict: creates an xarray dataset as the only group
- numpy array: creates an xarray dataset as the only group, gives the
array an arbitrary name
group : str
If `obj` is a dict or numpy array, assigns the resulting xarray
dataset to this group.
coords : dict[str, iterable]
A dictionary containing the values that are used as index. The key
is the name of the dimension, the values are the index values.
dims : dict[str, List(str)]
A mapping from variables to a list of coordinate names for the variable
Returns
-------
xarray.Dataset
"""
inference_data = convert_to_inference_data(obj, group=group, coords=coords, dims=dims)
dataset = getattr(inference_data, group, None)
if dataset is None:
raise ValueError(
"Can not extract {group} from {obj}! See {filename} for other "
"conversion utilities.".format(group=group, obj=obj, filename=__file__)
)
return dataset
|
1a8ed51b31b9014c642e0f1a2c2a1d60f7f8b5cd
|
c2fd38213935cc65cdb6df69aee6960f1c9b2afb
|
/tests_integration/helpers/get_advertiser_by_name.py
|
4b71b33112888988cac031283b1fe67be9f4fcdb
|
[
"MIT"
] |
permissive
|
kmjennison/dfp-prebid-setup
|
5b1cf7fce94f019693c7f024fe1e48df3ab366cf
|
f17baee334e9b5c9ec70a3b1d3585bd6ca4826fc
|
refs/heads/master
| 2022-10-27T06:13:17.854584
| 2022-10-11T19:50:17
| 2022-10-11T19:50:17
| 83,600,304
| 117
| 92
|
MIT
| 2023-08-24T21:46:49
| 2017-03-01T20:49:55
|
Python
|
UTF-8
|
Python
| false
| false
| 746
|
py
|
get_advertiser_by_name.py
|
#!/usr/bin/env python
import logging
from googleads import ad_manager
from dfp.client import get_client
def get_advertiser_by_name(advertiser_name):
"""
Returns a DFP company ID from company name.
Args:
name (str): the name of the DFP advertiser
Returns:
an integer: the advertiser's DFP ID
"""
client = get_client()
company_service = client.GetService('CompanyService',
version='v202208')
statement = (ad_manager.StatementBuilder()
.Where('name = :name')
.WithBindVariable('name', advertiser_name))
response = company_service.getCompaniesByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']) > 0:
return response['results'][0]
else:
return None
|
4b5e17d22aef4ab8b3b862d71bb7f423dc8f9c07
|
90e76adae07c81392d64fdfcb95f659e8a0c3f11
|
/tests/unit/retries/test_quota.py
|
9a735950c382a8a6589e19b9f4540d3b7c3ea397
|
[
"Apache-2.0",
"MPL-2.0",
"MIT"
] |
permissive
|
boto/botocore
|
b9468d08c83372cf6930643a15f87801b79ffddd
|
7275c5d6e9273caf3804e0ce9491af080518798c
|
refs/heads/develop
| 2023-09-01T18:11:40.617674
| 2023-08-31T18:58:50
| 2023-08-31T18:58:50
| 6,670,942
| 1,289
| 1,234
|
Apache-2.0
| 2023-09-13T17:23:42
| 2012-11-13T13:25:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
test_quota.py
|
from botocore.retries import quota
from tests import unittest
class TestRetryQuota(unittest.TestCase):
def setUp(self):
self.retry_quota = quota.RetryQuota(50)
def test_can_acquire_amount(self):
self.assertTrue(self.retry_quota.acquire(5))
self.assertEqual(self.retry_quota.available_capacity, 45)
def test_can_release_amount(self):
self.assertTrue(self.retry_quota.acquire(5))
self.assertEqual(self.retry_quota.available_capacity, 45)
self.retry_quota.release(5)
self.assertEqual(self.retry_quota.available_capacity, 50)
def test_cant_exceed_max_capacity(self):
self.assertTrue(self.retry_quota.acquire(5))
self.assertEqual(self.retry_quota.available_capacity, 45)
self.retry_quota.release(10)
self.assertEqual(self.retry_quota.available_capacity, 50)
def test_noop_if_at_max_capacity(self):
self.retry_quota.release(10)
self.assertEqual(self.retry_quota.available_capacity, 50)
def test_cant_go_below_zero(self):
self.assertTrue(self.retry_quota.acquire(49))
self.assertEqual(self.retry_quota.available_capacity, 1)
self.assertFalse(self.retry_quota.acquire(10))
self.assertEqual(self.retry_quota.available_capacity, 1)
|
8660c177c18005e7f04795c095860bd847fe7bd8
|
e8e934415e66ff45c22eecd37c3201c64dfd955b
|
/vocab/wd/test_frames.py
|
766d97845f6e3f7036a1c53ecda95194d6431b51
|
[] |
no_license
|
w3c/web-annotation
|
883156ca37a31e679db32391bba759d442047b0a
|
74992e5d3ca3ac10b3289f6c855c876cc9b024a1
|
refs/heads/gh-pages
| 2021-04-22T06:43:12.205460
| 2017-05-04T09:27:14
| 2017-05-04T09:27:14
| 24,441,429
| 149
| 49
| null | 2017-03-24T10:59:09
| 2014-09-25T02:59:09
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,100
|
py
|
test_frames.py
|
from pyld import jsonld
from pyld.jsonld import compact, expand, frame
import json
import os
contextUri = "http://www.w3.org/ns/anno.jsonld"
contextmap = {
contextUri: "../../jsonld/anno.jsonld"
}
# Stop code from looking up the contexts online EVERY TIME
def load_document_local(url):
doc = {
'contextUrl': None,
'documentUrl': None,
'document': ''
}
fn = contextmap.get(url, "")
if fn:
fh = file(fn)
data = fh.read()
fh.close()
doc['document'] = data;
return doc
jsonld.set_document_loader(load_document_local)
fh = file('../../jsonld/annotation_frame.jsonld')
data = fh.read()
fh.close()
annoframe = json.loads(data)
# read in examples and reframe
egdir = '../../model/wd2/examples/correct/'
examples = os.listdir(egdir)
for eg in examples:
if eg.endswith('.json'):
print eg
fh = file(os.path.join(egdir, eg))
data = fh.read()
fh.close()
anno = json.loads(data)
anno['@context'] = contextUri
framed = frame(anno, annoframe)
out = compact(framed, contextUri)
print json.dumps(out, sort_keys=True, indent=2)
|
df1fbcdfce60e7891887c5a323a50726aca2dc5f
|
74967d0f8d918b86993a18aa5727753ff841070c
|
/src/wechaty/user/mini_program.py
|
4d6b2466c4e712eb3ef13fb955e6ee7b729b738a
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
wechaty/python-wechaty
|
799969661efa70f2b0ba448a80163f685e406e07
|
e9a04a98a3b01f287760e2d2a4514e4a80ecd15f
|
refs/heads/main
| 2023-09-05T21:30:49.806145
| 2023-07-15T10:11:02
| 2023-07-15T10:11:02
| 146,103,950
| 1,266
| 239
|
Apache-2.0
| 2023-08-16T10:02:57
| 2018-08-25T14:51:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,871
|
py
|
mini_program.py
|
"""
Python Wechaty - https://github.com/wechaty/python-wechaty
Authors: Huan LI (李卓桓) <https://github.com/huan>
Jingjing WU (吴京京) <https://github.com/wj-Mcat>
2020-now @ Copyright Wechaty
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an 'AS IS' BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from dataclasses import asdict
from wechaty import Accessory
from wechaty_puppet import MiniProgramPayload, get_logger
from wechaty.utils import default_str
if TYPE_CHECKING:
from wechaty.user import Message
log = get_logger('MiniProgram')
class MiniProgram(Accessory[MiniProgramPayload]):
"""
mini_program object which handle the url_link content
"""
def __init__(self, payload: MiniProgramPayload):
"""
initialization for mini_program
:param payload:
"""
super().__init__()
log.info('MiniProgram created')
self._payload: MiniProgramPayload = payload
@classmethod
async def create_from_message(cls, message: Message) -> MiniProgram:
"""
static create MiniProgram method
:return:
"""
log.info(f'loading the mini-program from message <{message}>')
mini_program_payload = await cls.get_puppet().message_mini_program(
message_id=message.message_id)
mini_program = MiniProgram(mini_program_payload)
return mini_program
@classmethod
def create_from_json(cls, payload_data: dict) -> MiniProgram:
"""
create the mini_program from json data
"""
log.info(f'loading the mini-program from json data <{payload_data}>')
payload = MiniProgramPayload(**payload_data)
mini_program = cls(payload=payload)
return mini_program
def to_json(self) -> dict:
"""
save the mini-program to dict data
"""
log.info(f'save the mini-program to json data : <{self.payload}>')
mini_program_data = asdict(self.payload)
return mini_program_data
@property
def app_id(self) -> str:
"""
get mini_program app_id
:return:
"""
return default_str(self._payload.appid)
@property
def title(self) -> str:
"""
get mini_program title
:return:
"""
return default_str(self._payload.title)
@property
def icon_url(self) -> str:
"""
get mini_program icon url
"""
return default_str(self._payload.iconUrl)
@property
def page_path(self) -> str:
"""
get mini_program page_path
:return:
"""
return default_str(self._payload.pagePath)
@property
def user_name(self) -> str:
"""
get mini_program user_name
:return:
"""
return default_str(self._payload.username)
@property
def description(self) -> str:
"""
get mini_program description
:return:
"""
return default_str(self._payload.description)
@property
def thumb_url(self) -> str:
"""
get mini_program thumb_url
:return:
"""
return default_str(self._payload.thumbUrl)
@property
def thumb_key(self) -> str:
"""
get mini_program thumb_key
:return:
"""
return default_str(self._payload.thumbKey)
|
48b4aad10abfa0b6d66406df04ff8acc77d87cad
|
3c2ee998c99a693b3b04d44f8c5af0fc5fb2c49d
|
/migrations/versions/ee5315dcf3e1_.py
|
e78b03d4747d0dab127be0e272d5eb1f04e5a3f8
|
[
"BSD-2-Clause"
] |
permissive
|
hotosm/tasking-manager
|
4520a56b31b35ebfc82a337bc7e676f1f8bc946a
|
45bf3937c74902226096aee5b49e7abea62df524
|
refs/heads/develop
| 2023-09-01T02:43:43.875659
| 2023-08-16T21:26:02
| 2023-08-29T13:15:52
| 80,733,077
| 526
| 316
|
BSD-2-Clause
| 2023-09-14T10:15:55
| 2017-02-02T14:31:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
ee5315dcf3e1_.py
|
"""empty message
Revision ID: ee5315dcf3e1
Revises: 9f5b73af01db
Create Date: 2017-05-24 10:39:46.586986
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "ee5315dcf3e1"
down_revision = "9f5b73af01db"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("users", sa.Column("email_address", sa.String(), nullable=True))
op.add_column("users", sa.Column("facebook_id", sa.String(), nullable=True))
op.add_column("users", sa.Column("is_email_verified", sa.Boolean(), nullable=True))
op.add_column("users", sa.Column("linkedin_id", sa.String(), nullable=True))
op.add_column("users", sa.Column("twitter_id", sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("users", "twitter_id")
op.drop_column("users", "linkedin_id")
op.drop_column("users", "is_email_verified")
op.drop_column("users", "facebook_id")
op.drop_column("users", "email_address")
# ### end Alembic commands ###
|
158a761dc20d935cabb85e51622a3f2bbf96820a
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/compiler/tests/einsum_op_test.py
|
445420723116e49ab821be0a2218021ca5f9b8c2
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 3,332
|
py
|
einsum_op_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for einsum op."""
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.platform import googletest
class EinsumOpTest(xla_test.XLATestCase):
"""Test cases for einsum op."""
def _testUnary(self, op, inp, expected):
"""Verifies that unary 'op' produces 'expected' when fed input 'inp'."""
with self.session() as session:
with self.test_scope():
pinp = array_ops.placeholder(
dtypes.as_dtype(inp.dtype), inp.shape, name='a')
output = op(pinp)
result = session.run(output, {pinp: inp})
self.assertEqual(output.dtype, expected.dtype)
self.assertAllCloseAccordingToType(
expected, result, rtol=1e-3, atol=1e-5, bfloat16_rtol=0.03)
def _testBinary(self, op, a, b, expected):
"""Verifies that binary 'op' produces 'expected' when fed 'a' and 'b'."""
with self.session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name='a')
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name='b')
output = op(pa, pb)
result = session.run(output, {pa: a, pb: b})
self.assertAllCloseAccordingToType(result, expected, rtol=1e-3)
def testMatMul(self):
for dtype in self.float_types:
self._testBinary(
lambda x, y: special_math_ops.einsum('ij,jk->ik', x, y),
np.array([[-0.25]], dtype=dtype),
np.array([[8]], dtype=dtype),
expected=np.array([[-2]], dtype=dtype))
def testImplicitForm(self):
for dtype in self.float_types:
self._testBinary(
lambda x, y: special_math_ops.einsum('ijk,kji', x, y),
np.array([[[1, 3], [2, 5], [6, 8]]], dtype=dtype),
np.array([[[1], [3], [2]], [[5], [6], [8]]], dtype=dtype),
expected=np.array(128, dtype=dtype))
def testReducedIndices(self):
for dtype in self.float_types:
self._testBinary(
lambda x, y: special_math_ops.einsum('ij,j->', x, y),
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
np.array([3, 2], dtype=dtype),
expected=np.array(59, dtype=dtype))
def testUnary(self):
for dtype in self.float_types:
self._testUnary(
lambda x: special_math_ops.einsum('ijk->kji', x),
np.array([[[1, 3], [2, 5], [6, 8]]], dtype=dtype),
expected=np.array([[[1], [2], [6]], [[3], [5], [8]]], dtype=dtype))
if __name__ == '__main__':
googletest.main()
|
82a930f739e72f6da303b4cfa3f015d7718d3ce3
|
04142fdda9b3fb29fb7456d5bc3e504985f24cbe
|
/mmcv/ops/three_interpolate.py
|
286bd0472ebae83f405534178a19fefe9ffbc384
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmcv
|
419e301bbc1d7d45331d67eccfd673f290a796d5
|
6e9ee26718b22961d5c34caca4108413b1b7b3af
|
refs/heads/main
| 2023-08-31T07:08:27.223321
| 2023-08-28T09:02:10
| 2023-08-28T09:02:10
| 145,670,155
| 5,319
| 1,900
|
Apache-2.0
| 2023-09-14T02:37:16
| 2018-08-22T07:05:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,240
|
py
|
three_interpolate.py
|
from typing import Any, Tuple
import torch
from torch.autograd import Function
from ..utils import ext_loader
ext_module = ext_loader.load_ext(
'_ext', ['three_interpolate_forward', 'three_interpolate_backward'])
class ThreeInterpolate(Function):
"""Performs weighted linear interpolation on 3 features.
Please refer to `Paper of PointNet++ <https://arxiv.org/abs/1706.02413>`_
for more details.
"""
@staticmethod
def forward(ctx: Any, features: torch.Tensor, indices: torch.Tensor,
weight: torch.Tensor) -> torch.Tensor:
"""
Args:
features (torch.Tensor): (B, C, M) Features descriptors to be
interpolated.
indices (torch.Tensor): (B, n, 3) indices of three nearest
neighbor features for the target features.
weight (torch.Tensor): (B, n, 3) weights of three nearest
neighbor features for the target features.
Returns:
torch.Tensor: (B, C, N) tensor of the interpolated features
"""
assert features.is_contiguous()
assert indices.is_contiguous()
assert weight.is_contiguous()
B, c, m = features.size()
n = indices.size(1)
ctx.three_interpolate_for_backward = (indices, weight, m)
output = features.new_empty(B, c, n)
ext_module.three_interpolate_forward(
features, indices, weight, output, b=B, c=c, m=m, n=n)
return output
@staticmethod
def backward(
ctx, grad_out: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Args:
grad_out (torch.Tensor): (B, C, N) tensor with gradients of outputs
Returns:
torch.Tensor: (B, C, M) tensor with gradients of features
"""
idx, weight, m = ctx.three_interpolate_for_backward
B, c, n = grad_out.size()
grad_features = grad_out.new_zeros(B, c, m)
grad_out_data = grad_out.data.contiguous()
ext_module.three_interpolate_backward(
grad_out_data, idx, weight, grad_features.data, b=B, c=c, n=n, m=m)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
|
35289967afbb81a7cdcafdebb49bfb5090467759
|
66a9c25cf0c53e2c3029b423018b856103d709d4
|
/sleekxmpp/plugins/xep_0045.py
|
858b3cbeda840167b52a9d1f4a7e505b084f28a1
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
fritzy/SleekXMPP
|
1b02d3e2b22efeb6bf3f8f487e6c0343b9b85baf
|
cc1d470397de768ffcc41d2ed5ac3118d19f09f5
|
refs/heads/develop
| 2020-05-22T04:14:58.568822
| 2020-02-18T22:54:57
| 2020-02-18T22:54:57
| 463,405
| 658
| 254
|
NOASSERTION
| 2023-06-27T20:05:54
| 2010-01-08T05:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 15,689
|
py
|
xep_0045.py
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from __future__ import with_statement
import logging
from sleekxmpp import Presence
from sleekxmpp.plugins import BasePlugin, register_plugin
from sleekxmpp.xmlstream import register_stanza_plugin, ElementBase, JID, ET
from sleekxmpp.xmlstream.handler.callback import Callback
from sleekxmpp.xmlstream.matcher.xpath import MatchXPath
from sleekxmpp.xmlstream.matcher.xmlmask import MatchXMLMask
from sleekxmpp.exceptions import IqError, IqTimeout
log = logging.getLogger(__name__)
class MUCPresence(ElementBase):
name = 'x'
namespace = 'http://jabber.org/protocol/muc#user'
plugin_attrib = 'muc'
interfaces = set(('affiliation', 'role', 'jid', 'nick', 'room'))
affiliations = set(('', ))
roles = set(('', ))
def getXMLItem(self):
item = self.xml.find('{http://jabber.org/protocol/muc#user}item')
if item is None:
item = ET.Element('{http://jabber.org/protocol/muc#user}item')
self.xml.append(item)
return item
def getAffiliation(self):
#TODO if no affilation, set it to the default and return default
item = self.getXMLItem()
return item.get('affiliation', '')
def setAffiliation(self, value):
item = self.getXMLItem()
#TODO check for valid affiliation
item.attrib['affiliation'] = value
return self
def delAffiliation(self):
item = self.getXMLItem()
#TODO set default affiliation
if 'affiliation' in item.attrib: del item.attrib['affiliation']
return self
def getJid(self):
item = self.getXMLItem()
return JID(item.get('jid', ''))
def setJid(self, value):
item = self.getXMLItem()
if not isinstance(value, str):
value = str(value)
item.attrib['jid'] = value
return self
def delJid(self):
item = self.getXMLItem()
if 'jid' in item.attrib: del item.attrib['jid']
return self
def getRole(self):
item = self.getXMLItem()
#TODO get default role, set default role if none
return item.get('role', '')
def setRole(self, value):
item = self.getXMLItem()
#TODO check for valid role
item.attrib['role'] = value
return self
def delRole(self):
item = self.getXMLItem()
#TODO set default role
if 'role' in item.attrib: del item.attrib['role']
return self
def getNick(self):
return self.parent()['from'].resource
def getRoom(self):
return self.parent()['from'].bare
def setNick(self, value):
log.warning("Cannot set nick through mucpresence plugin.")
return self
def setRoom(self, value):
log.warning("Cannot set room through mucpresence plugin.")
return self
def delNick(self):
log.warning("Cannot delete nick through mucpresence plugin.")
return self
def delRoom(self):
log.warning("Cannot delete room through mucpresence plugin.")
return self
class XEP_0045(BasePlugin):
"""
Implements XEP-0045 Multi-User Chat
"""
name = 'xep_0045'
description = 'XEP-0045: Multi-User Chat'
dependencies = set(['xep_0030', 'xep_0004'])
def plugin_init(self):
self.rooms = {}
self.ourNicks = {}
self.xep = '0045'
# load MUC support in presence stanzas
register_stanza_plugin(Presence, MUCPresence)
self.xmpp.register_handler(Callback('MUCPresence', MatchXMLMask("<presence xmlns='%s' />" % self.xmpp.default_ns), self.handle_groupchat_presence))
self.xmpp.register_handler(Callback('MUCError', MatchXMLMask("<message xmlns='%s' type='error'><error/></message>" % self.xmpp.default_ns), self.handle_groupchat_error_message))
self.xmpp.register_handler(Callback('MUCMessage', MatchXMLMask("<message xmlns='%s' type='groupchat'><body/></message>" % self.xmpp.default_ns), self.handle_groupchat_message))
self.xmpp.register_handler(Callback('MUCSubject', MatchXMLMask("<message xmlns='%s' type='groupchat'><subject/></message>" % self.xmpp.default_ns), self.handle_groupchat_subject))
self.xmpp.register_handler(Callback('MUCConfig', MatchXMLMask("<message xmlns='%s' type='groupchat'><x xmlns='http://jabber.org/protocol/muc#user'><status/></x></message>" % self.xmpp.default_ns), self.handle_config_change))
self.xmpp.register_handler(Callback('MUCInvite', MatchXPath("{%s}message/{%s}x/{%s}invite" % (
self.xmpp.default_ns,
'http://jabber.org/protocol/muc#user',
'http://jabber.org/protocol/muc#user')), self.handle_groupchat_invite))
def handle_groupchat_invite(self, inv):
""" Handle an invite into a muc.
"""
logging.debug("MUC invite to %s from %s: %s", inv['to'], inv["from"], inv)
if inv['from'] not in self.rooms.keys():
self.xmpp.event("groupchat_invite", inv)
def handle_config_change(self, msg):
"""Handle a MUC configuration change (with status code)."""
self.xmpp.event('groupchat_config_status', msg)
self.xmpp.event('muc::%s::config_status' % msg['from'].bare , msg)
def handle_groupchat_presence(self, pr):
""" Handle a presence in a muc.
"""
got_offline = False
got_online = False
if pr['muc']['room'] not in self.rooms.keys():
return
entry = pr['muc'].getStanzaValues()
entry['show'] = pr['show']
entry['status'] = pr['status']
entry['alt_nick'] = pr['nick']
if pr['type'] == 'unavailable':
if entry['nick'] in self.rooms[entry['room']]:
del self.rooms[entry['room']][entry['nick']]
if '{}/{}'.format(entry['room'], entry['nick']) == self.getOurJidInRoom(entry['room']):
log.debug("I got kicked :( from %s" % entry['room'])
del self.rooms[entry['room']]
got_offline = True
else:
if entry['nick'] not in self.rooms[entry['room']]:
got_online = True
self.rooms[entry['room']][entry['nick']] = entry
log.debug("MUC presence from %s/%s : %s", entry['room'],entry['nick'], entry)
self.xmpp.event("groupchat_presence", pr)
self.xmpp.event("muc::%s::presence" % entry['room'], pr)
if got_offline:
self.xmpp.event("muc::%s::got_offline" % entry['room'], pr)
if got_online:
self.xmpp.event("muc::%s::got_online" % entry['room'], pr)
def handle_groupchat_message(self, msg):
""" Handle a message event in a muc.
"""
self.xmpp.event('groupchat_message', msg)
self.xmpp.event("muc::%s::message" % msg['from'].bare, msg)
def handle_groupchat_error_message(self, msg):
""" Handle a message error event in a muc.
"""
self.xmpp.event('groupchat_message_error', msg)
self.xmpp.event("muc::%s::message_error" % msg['from'].bare, msg)
def handle_groupchat_subject(self, msg):
""" Handle a message coming from a muc indicating
a change of subject (or announcing it when joining the room)
"""
self.xmpp.event('groupchat_subject', msg)
def jidInRoom(self, room, jid):
for nick in self.rooms[room]:
entry = self.rooms[room][nick]
if entry is not None and entry['jid'].full == jid:
return True
return False
def getNick(self, room, jid):
for nick in self.rooms[room]:
entry = self.rooms[room][nick]
if entry is not None and entry['jid'].full == jid:
return nick
def configureRoom(self, room, form=None, ifrom=None):
if form is None:
form = self.getRoomConfig(room, ifrom=ifrom)
iq = self.xmpp.makeIqSet()
iq['to'] = room
if ifrom is not None:
iq['from'] = ifrom
query = ET.Element('{http://jabber.org/protocol/muc#owner}query')
form = form.getXML('submit')
query.append(form)
iq.append(query)
# For now, swallow errors to preserve existing API
try:
result = iq.send()
except IqError:
return False
except IqTimeout:
return False
return True
def joinMUC(self, room, nick, maxhistory="0", password='', wait=False, pstatus=None, pshow=None, pfrom=None):
""" Join the specified room, requesting 'maxhistory' lines of history.
"""
stanza = self.xmpp.makePresence(pto="%s/%s" % (room, nick), pstatus=pstatus, pshow=pshow, pfrom=pfrom)
x = ET.Element('{http://jabber.org/protocol/muc}x')
if password:
passelement = ET.Element('{http://jabber.org/protocol/muc}password')
passelement.text = password
x.append(passelement)
if maxhistory:
history = ET.Element('{http://jabber.org/protocol/muc}history')
if maxhistory == "0":
history.attrib['maxchars'] = maxhistory
else:
history.attrib['maxstanzas'] = maxhistory
x.append(history)
stanza.append(x)
if not wait:
self.xmpp.send(stanza)
else:
#wait for our own room presence back
expect = ET.Element("{%s}presence" % self.xmpp.default_ns, {'from':"%s/%s" % (room, nick)})
self.xmpp.send(stanza, expect)
self.rooms[room] = {}
self.ourNicks[room] = nick
def destroy(self, room, reason='', altroom = '', ifrom=None):
iq = self.xmpp.makeIqSet()
if ifrom is not None:
iq['from'] = ifrom
iq['to'] = room
query = ET.Element('{http://jabber.org/protocol/muc#owner}query')
destroy = ET.Element('{http://jabber.org/protocol/muc#owner}destroy')
if altroom:
destroy.attrib['jid'] = altroom
xreason = ET.Element('{http://jabber.org/protocol/muc#owner}reason')
xreason.text = reason
destroy.append(xreason)
query.append(destroy)
iq.append(query)
# For now, swallow errors to preserve existing API
try:
r = iq.send()
except IqError:
return False
except IqTimeout:
return False
return True
def setAffiliation(self, room, jid=None, nick=None, affiliation='member', ifrom=None):
""" Change room affiliation."""
if affiliation not in ('outcast', 'member', 'admin', 'owner', 'none'):
raise TypeError
query = ET.Element('{http://jabber.org/protocol/muc#admin}query')
if nick is not None:
item = ET.Element('{http://jabber.org/protocol/muc#admin}item', {'affiliation':affiliation, 'nick':nick})
else:
item = ET.Element('{http://jabber.org/protocol/muc#admin}item', {'affiliation':affiliation, 'jid':jid})
query.append(item)
iq = self.xmpp.makeIqSet(query)
iq['to'] = room
iq['from'] = ifrom
# For now, swallow errors to preserve existing API
try:
result = iq.send()
except IqError:
return False
except IqTimeout:
return False
return True
def setRole(self, room, nick, role):
""" Change role property of a nick in a room.
Typically, roles are temporary (they last only as long as you are in the
room), whereas affiliations are permanent (they last across groupchat
sessions).
"""
if role not in ('moderator', 'participant', 'visitor', 'none'):
raise TypeError
query = ET.Element('{http://jabber.org/protocol/muc#admin}query')
item = ET.Element('item', {'role':role, 'nick':nick})
query.append(item)
iq = self.xmpp.makeIqSet(query)
iq['to'] = room
result = iq.send()
if result is False or result['type'] != 'result':
raise ValueError
return True
def invite(self, room, jid, reason='', mfrom=''):
""" Invite a jid to a room."""
msg = self.xmpp.makeMessage(room)
msg['from'] = mfrom
x = ET.Element('{http://jabber.org/protocol/muc#user}x')
invite = ET.Element('{http://jabber.org/protocol/muc#user}invite', {'to': jid})
if reason:
rxml = ET.Element('{http://jabber.org/protocol/muc#user}reason')
rxml.text = reason
invite.append(rxml)
x.append(invite)
msg.append(x)
self.xmpp.send(msg)
def leaveMUC(self, room, nick, msg='', pfrom=None):
""" Leave the specified room.
"""
if msg:
self.xmpp.sendPresence(pshow='unavailable', pto="%s/%s" % (room, nick), pstatus=msg, pfrom=pfrom)
else:
self.xmpp.sendPresence(pshow='unavailable', pto="%s/%s" % (room, nick), pfrom=pfrom)
del self.rooms[room]
def getRoomConfig(self, room, ifrom=''):
iq = self.xmpp.makeIqGet('http://jabber.org/protocol/muc#owner')
iq['to'] = room
iq['from'] = ifrom
# For now, swallow errors to preserve existing API
try:
result = iq.send()
except IqError:
raise ValueError
except IqTimeout:
raise ValueError
form = result.xml.find('{http://jabber.org/protocol/muc#owner}query/{jabber:x:data}x')
if form is None:
raise ValueError
return self.xmpp.plugin['xep_0004'].buildForm(form)
def cancelConfig(self, room, ifrom=None):
query = ET.Element('{http://jabber.org/protocol/muc#owner}query')
x = ET.Element('{jabber:x:data}x', type='cancel')
query.append(x)
iq = self.xmpp.makeIqSet(query)
iq['to'] = room
iq['from'] = ifrom
iq.send()
def setRoomConfig(self, room, config, ifrom=''):
query = ET.Element('{http://jabber.org/protocol/muc#owner}query')
x = config.getXML('submit')
query.append(x)
iq = self.xmpp.makeIqSet(query)
iq['to'] = room
iq['from'] = ifrom
iq.send()
def getJoinedRooms(self):
return self.rooms.keys()
def getOurJidInRoom(self, roomJid):
""" Return the jid we're using in a room.
"""
return "%s/%s" % (roomJid, self.ourNicks[roomJid])
def getJidProperty(self, room, nick, jidProperty):
""" Get the property of a nick in a room, such as its 'jid' or 'affiliation'
If not found, return None.
"""
if room in self.rooms and nick in self.rooms[room] and jidProperty in self.rooms[room][nick]:
return self.rooms[room][nick][jidProperty]
else:
return None
def getRoster(self, room):
""" Get the list of nicks in a room.
"""
if room not in self.rooms.keys():
return None
return self.rooms[room].keys()
def getUsersByAffiliation(cls, room, affiliation='member', ifrom=None):
if affiliation not in ('outcast', 'member', 'admin', 'owner', 'none'):
raise TypeError
query = ET.Element('{http://jabber.org/protocol/muc#admin}query')
item = ET.Element('{http://jabber.org/protocol/muc#admin}item', {'affiliation': affiliation})
query.append(item)
iq = cls.xmpp.Iq(sto=room, sfrom=ifrom, stype='get')
iq.append(query)
return iq.send()
xep_0045 = XEP_0045
register_plugin(XEP_0045)
|
f975f5a6f9bc487f0a6cd1b82b88ebe552ebee21
|
68a76875beffd7636bb6913f8e97b83b1638a3cb
|
/flaskshop/product/views.py
|
b1b1e374e3bc835a4960114afda4411c498bd66b
|
[] |
permissive
|
hjlarry/flask-shop
|
57d0d7f4ee82753041bdfbbcdd8c75517cffffc3
|
3ef48ed3b1899438df9ca9ae4a8ca8c722eab1f7
|
refs/heads/master
| 2023-07-20T07:19:13.168919
| 2023-07-16T09:35:18
| 2023-07-16T09:35:18
| 137,964,415
| 257
| 117
|
BSD-3-Clause
| 2023-07-16T09:35:19
| 2018-06-20T01:30:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,018
|
py
|
views.py
|
# -*- coding: utf-8 -*-
"""Product views."""
from flask import Blueprint, jsonify, redirect, render_template, request, url_for
from flask_login import login_required
from pluggy import HookimplMarker
from flaskshop.checkout.models import Cart
from .forms import AddCartForm
from .models import Category, Product, ProductCollection, ProductVariant
impl = HookimplMarker("flaskshop")
def show(id, form=None):
product = Product.get_by_id(id)
if not form:
form = AddCartForm(request.form, product=product)
return render_template("products/details.html", product=product, form=form)
@login_required
def product_add_to_cart(id):
"""this method return to the show method and use a form instance for display validater errors"""
product = Product.get_by_id(id)
form = AddCartForm(request.form, product=product)
if form.validate_on_submit():
Cart.add_to_currentuser_cart(form.quantity.data, form.variant.data)
return redirect(url_for("product.show", id=id))
def variant_price(id):
variant = ProductVariant.get_by_id(id)
return jsonify({"price": float(variant.price), "stock": variant.stock})
def show_category(id):
page = request.args.get("page", 1, type=int)
ctx = Category.get_product_by_category(id, page)
return render_template("category/index.html", **ctx)
def show_collection(id):
page = request.args.get("page", 1, type=int)
ctx = ProductCollection.get_product_by_collection(id, page)
return render_template("category/index.html", **ctx)
@impl
def flaskshop_load_blueprints(app):
bp = Blueprint("product", __name__)
bp.add_url_rule("/<int:id>", view_func=show)
bp.add_url_rule("/api/variant_price/<int:id>", view_func=variant_price)
bp.add_url_rule("/<int:id>/add", view_func=product_add_to_cart, methods=["POST"])
bp.add_url_rule("/category/<int:id>", view_func=show_category)
bp.add_url_rule("/collection/<int:id>", view_func=show_collection)
app.register_blueprint(bp, url_prefix="/products")
|
3ab41de242ae496e897c542a35d7dfff6956a6f8
|
abe6c00f9790df7e6ef20dc02d0b1b225b5020cb
|
/src/prefect/server/database/migrations/versions/postgresql/2022_06_29_135432_813ddf14e2de_add_descriptions_to_deployments.py
|
20265f886952c08b695e79ef3bbbfc90eeb1fe37
|
[
"Apache-2.0"
] |
permissive
|
PrefectHQ/prefect
|
000e6c5f7df80f76a181f0a30f8661c96417c8bd
|
2c50d2b64c811c364cbc5faa2b5c80a742572090
|
refs/heads/main
| 2023-09-05T20:25:42.965208
| 2023-09-05T18:58:06
| 2023-09-05T18:58:06
| 139,199,684
| 12,917
| 1,539
|
Apache-2.0
| 2023-09-14T20:25:45
| 2018-06-29T21:59:26
|
Python
|
UTF-8
|
Python
| false
| false
| 483
|
py
|
2022_06_29_135432_813ddf14e2de_add_descriptions_to_deployments.py
|
"""Add descriptions to deployments.
Revision ID: 813ddf14e2de
Revises: 2f46fc3f3beb
Create Date: 2022-06-29 13:54:32.981105
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "813ddf14e2de"
down_revision = "2f46fc3f3beb"
branch_labels = None
depends_on = None
def upgrade():
op.add_column("deployment", sa.Column("description", sa.TEXT(), nullable=True))
def downgrade():
op.drop_column("deployment", "description")
|
59b3a03ba54fa9bef2ea13101d9b75d687bc0f7c
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/winapi__windows__ctypes/ctypes__MessageBox.py
|
beaa1d2ec605be422ff76663ae45b6bfef375a87
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 838
|
py
|
ctypes__MessageBox.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import ctypes
# SOURCE: https://docs.microsoft.com/en-us/windows/desktop/api/Winuser/nf-winuser-messagebox
# int MessageBox(
# HWND hWnd,
# LPCTSTR lpText,
# LPCTSTR lpCaption,
# UINT uType
# );
MessageBox = ctypes.windll.user32.MessageBoxW
MB_ICONWARNING = 0x00000030
MB_CANCELTRYCONTINUE = 0x00000006
MB_DEFBUTTON2 = 0x00000100
IDCANCEL = 2
IDTRYAGAIN = 10
IDCONTINUE = 11
button_id = MessageBox(
None,
"Resource not available\nDo you want to try again?",
"Account Details",
MB_ICONWARNING | MB_CANCELTRYCONTINUE | MB_DEFBUTTON2,
)
print("button_id:", button_id)
if button_id == IDCANCEL:
print("IDCANCEL")
elif button_id == IDTRYAGAIN:
print("IDTRYAGAIN")
elif button_id == IDCONTINUE:
print("IDCONTINUE")
|
e2ff8ff67c0b25d633fa4b7d5245cb51fd44794a
|
55c5ebdfa89ba924fe81323de79002d597d5f4c1
|
/arraytool/src/misc/Fourier_related.py
|
034ecf85171a3c801554ff988a70c69797242c8a
|
[
"BSD-3-Clause"
] |
permissive
|
zinka/arraytool
|
4ce31dc17a25e457974e7220e4024f47f167a051
|
9acc5a90a1771750e70590a3d508e2c24a55be87
|
refs/heads/master
| 2021-01-18T15:18:56.202570
| 2016-10-22T05:42:27
| 2016-10-22T05:42:27
| 871,685
| 116
| 30
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,017
|
py
|
Fourier_related.py
|
#! /usr/bin/env python
# Author: Srinivasa Rao Zinka (srinivas . zinka [at] gmail . com)
# Copyright (c) 2014 Srinivasa Rao Zinka
# License: New BSD License.
import numpy as np
from scipy import integrate
def FS(fun_str_re, fun_str_im='0', T0=2 * np.pi, m_start= -5, m_stop=5, err_lim=1e-8):
"""Function to generate a finite number of Fourier series coefficients of
a periodic function."""
N = m_stop - m_start + 1
FS = np.zeros((N, 1), dtype='complex')
m_index = range(m_start, m_stop + 1)
w0 = 2 * np.pi / T0
for m in m_index:
fun_re = lambda x: (eval(fun_str_re)) * np.cos(m * w0 * x) + (eval(fun_str_im)) * np.sin(m * w0 * x)
fun_img = lambda x:-(eval(fun_str_re)) * np.sin(m * w0 * x) + (eval(fun_str_im)) * np.cos(m * w0 * x)
FS_re = integrate.quad(fun_re, 0, 2 * np.pi)
FS_img = integrate.quad(fun_img, 0, 2 * np.pi)
if ((FS_re[1] + FS_img[1]) < err_lim):
FS[m - m_start] = (1 / T0) * (FS_re[0] + 1j * FS_img[0])
else:
print "Absolute error of the integration is not less than 1e-10 while calculating Fourier series"
print "error(FS_re): ", FS_re[1]
print "error(FS_img): ", FS_img[1]
m_index = np.array(m_index) * (2 * np.pi / T0)
m_index = np.reshape(m_index, (m_index.size, -1))
return m_index, FS
def IFS(FS, T0=2 * np.pi, m_start= -4, m_stop=4, x_min=0, x_max=2 * np.pi, x_num=10):
"""Function to reconstruct (or check) the periodic function from the
obtained Fourier coefficients"""
m = np.arange(m_start, m_stop + 1)
m = np.reshape(m, (-1, m.size))
M = np.tile(m, (x_num, 1))
x = np.linspace(x_min, x_max, num=x_num)
x = np.reshape(x, (x.size, -1))
X = np.tile(x, (1, m.size))
FS = np.reshape(FS, (FS.size,-1))
# Evaluating the inverse of the Fourier series
IFS = np.dot(np.exp(1j * M * (2 * np.pi / T0) * X), FS)
return x, IFS
if __name__ == '__main__':
|
dddb5e43d6793a97e9dc12e4ff92f4e97d2dfe10
|
98e36c61361f94c37a909e7c28b5978a232e8748
|
/deepgp/inference/svi_ratio.py
|
9958c4b6ab338fc43d736ec20a305be7459bf798
|
[
"BSD-3-Clause"
] |
permissive
|
SheffieldML/PyDeepGP
|
327e8b7ad6496fc523021a070b41eb0d7585aa7d
|
f2a1f568a7462633a58ed433520dcf7f0c98515c
|
refs/heads/master
| 2023-04-12T11:53:58.712752
| 2021-05-04T14:29:51
| 2021-05-04T14:29:51
| 57,047,931
| 227
| 64
|
BSD-3-Clause
| 2019-10-08T19:07:50
| 2016-04-25T14:21:54
|
Python
|
UTF-8
|
Python
| false
| false
| 7,212
|
py
|
svi_ratio.py
|
#from .posterior import Posterior
from GPy.util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri,pdinv
from GPy.util import diag
from GPy.core.parameterization.variational import VariationalPosterior
import numpy as np
from GPy.inference.latent_function_inference import LatentFunctionInference
from GPy.inference.latent_function_inference.posterior import Posterior
log_2_pi = np.log(2*np.pi)
try:
from mpi4py import MPI
except:
pass
class SVI_Ratio(LatentFunctionInference):
"""
Inference the marginal likelihood through \frac{p(y,y*)}{p(y)}
"""
const_jitter = 1e-6
def __init__(self, mpi_comm=None):
self.mpi_comm = mpi_comm
def get_trYYT(self, Y):
return np.sum(np.square(Y))
def get_YYTfactor(self, Y):
N, D = Y.shape
if (N>=D):
return Y.view(np.ndarray)
else:
return jitchol(tdot(Y))
def gatherPsiStat(self, kern, X, Z, Y, beta, uncertain_inputs, D, missing_data):
assert beta.size == 1
if uncertain_inputs:
psi0 = kern.psi0(Z, X)
psi1 = kern.psi1(Z, X)*beta
psi2 = kern.psi2(Z, X)*beta if not missing_data else kern.psi2n(Z, X)*beta
else:
psi0 = kern.Kdiag(X)
psi1 = kern.K(X, Z)
if missing_data:
psi2 = psi1[:,None,:]*psi1[:,:,None]*beta
else:
psi2 = tdot(psi1.T)*beta
psi1 = psi1*beta
if isinstance(Y, VariationalPosterior):
m, s = Y.mean, Y.variance
psi1Y = np.dot(m.T,psi1) # DxM
YRY = (np.square(m).sum()+s.sum())*beta
psi0 = (D*psi0).sum()*beta
elif missing_data:
psi1Y = np.dot((Y).T,psi1) # DxM
trYYT = self.get_trYYT(Y)
YRY = trYYT*beta
psi0 = (psi0*D).sum()*beta
else:
psi1Y = np.dot(Y.T,psi1) # DxM
trYYT = self.get_trYYT(Y)
YRY = trYYT*beta
psi0 = (psi0*D).sum()*beta
return psi0, psi2, YRY, psi1, psi1Y
def inference(self, kern, X, Z, likelihood, Y, qU):
"""
The SVI-VarDTC inference
"""
if isinstance(Y, np.ndarray) and np.any(np.isnan(Y)):
missing_data = True
N, M, Q = Y.shape[0], Z.shape[0], Z.shape[1]
Ds = Y.shape[1] - (np.isnan(Y)*1).sum(1)
Ymask = 1-np.isnan(Y)*1
Y_masked = np.zeros_like(Y)
Y_masked[Ymask==1] = Y[Ymask==1]
ND = Ymask.sum()
else:
missing_data = False
N, D, M, Q = Y.shape[0], Y.shape[1], Z.shape[0], Z.shape[1]
ND = N*D
uncertain_inputs = isinstance(X, VariationalPosterior)
uncertain_outputs = isinstance(Y, VariationalPosterior)
beta = 1./np.fmax(likelihood.variance, 1e-6)
psi0, psi2, YRY, psi1, psi1Y = self.gatherPsiStat(kern, X, Z, Y if not missing_data else Y_masked, beta, uncertain_inputs, D if not missing_data else Ds, missing_data)
#======================================================================
# Compute Common Components
#======================================================================
mu, S = qU.mean, qU.covariance
mupsi1Y = mu.dot(psi1Y)
Kmm = kern.K(Z).copy()
diag.add(Kmm, self.const_jitter)
Lm = jitchol(Kmm)
if missing_data:
S_mu = S[None,:,:]+mu.T[:,:,None]*mu.T[:,None,:]
NS_mu = S_mu.T.dot(Ymask.T).T
LmInv = dtrtri(Lm)
LmInvPsi2LmInvT = np.swapaxes(psi2.dot(LmInv.T),1,2).dot(LmInv.T)
LmInvSmuLmInvT = np.swapaxes(NS_mu.dot(LmInv.T),1,2).dot(LmInv.T)
B = mupsi1Y+ mupsi1Y.T +(Ds[:,None,None]*psi2).sum(0)
tmp = backsub_both_sides(Lm, B,'right')
logL = -ND*log_2_pi/2. +ND*np.log(beta)/2. - psi0/2. - YRY/2. \
-(LmInvSmuLmInvT*LmInvPsi2LmInvT).sum()/2. +np.trace(tmp)/2.
else:
S_mu = S*D+tdot(mu)
if uncertain_inputs:
LmInvPsi2LmInvT = backsub_both_sides(Lm, psi2, 'right')
else:
LmInvPsi2LmInvT = tdot(dtrtrs(Lm, psi1.T)[0])/beta #tdot(psi1.dot(LmInv.T).T) /beta
LmInvSmuLmInvT = backsub_both_sides(Lm, S_mu, 'right')
B = mupsi1Y+ mupsi1Y.T +D*psi2
tmp = backsub_both_sides(Lm, B,'right')
logL = -ND*log_2_pi/2. +ND*np.log(beta)/2. - psi0/2. - YRY/2. \
-(LmInvSmuLmInvT*LmInvPsi2LmInvT).sum()/2. +np.trace(tmp)/2.
#======================================================================
# Compute dL_dKmm
#======================================================================
dL_dKmm = np.eye(M)
#======================================================================
# Compute dL_dthetaL for uncertian input and non-heter noise
#======================================================================
dL_dthetaL = None #(YRY*beta + beta*output_dim*psi0 - num_data*output_dim*beta)/2. - beta*(dL_dpsi2R*psi2).sum() - beta*np.trace(LLinvPsi1TYYTPsi1LLinvT)
#======================================================================
# Compute dL_dpsi
#======================================================================
if missing_data:
dL_dpsi0 = -Ds * (beta * np.ones((N,)))/2.
else:
dL_dpsi0 = -D * (beta * np.ones((N,)))/2.
if uncertain_outputs:
Ym,Ys = Y.mean, Y.variance
dL_dpsi1 = dtrtrs(Lm, dtrtrs(Lm, Ym.dot(mu.T).T)[0], trans=1)[0].T*beta
else:
if missing_data:
dL_dpsi1 = dtrtrs(Lm, dtrtrs(Lm, (Y_masked).dot(mu.T).T)[0], trans=1)[0].T*beta
else:
dL_dpsi1 = dtrtrs(Lm, dtrtrs(Lm, Y.dot(mu.T).T)[0], trans=1)[0].T*beta
if uncertain_inputs:
if missing_data:
dL_dpsi2 = np.swapaxes((Ds[:,None,None]*np.eye(M)[None,:,:]-LmInvSmuLmInvT).dot(LmInv),1,2).dot(LmInv)*beta/2.
else:
dL_dpsi2 = beta*backsub_both_sides(Lm, D*np.eye(M)-LmInvSmuLmInvT, 'left')/2.
else:
dL_dpsi1 += beta*psi1.dot(dL_dpsi2+dL_dpsi2.T)
dL_dpsi2 = None
if uncertain_inputs:
grad_dict = {'dL_dKmm': dL_dKmm,
'dL_dpsi0':dL_dpsi0,
'dL_dpsi1':dL_dpsi1,
'dL_dpsi2':dL_dpsi2,
'dL_dthetaL':dL_dthetaL}
else:
grad_dict = {'dL_dKmm': dL_dKmm,
'dL_dKdiag':dL_dpsi0,
'dL_dKnm':dL_dpsi1,
'dL_dthetaL':dL_dthetaL}
if uncertain_outputs:
Ym = Y.mean
grad_dict['dL_dYmean'] = -Ym*beta+ dtrtrs(Lm,psi1.T)[0].T.dot(dtrtrs(Lm,mu)[0])
grad_dict['dL_dYvar'] = beta/-2.
return logL, grad_dict
|
46e5ebcffc8aa30d6e8a59b1215f9277bc17eda3
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/search/azure-search-documents/tests/async_tests/test_search_indexer_client_live_async.py
|
0a59e36daa6fdc4a474f3519b85e8afa0d686dd0
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 8,005
|
py
|
test_search_indexer_client_live_async.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from azure.core import MatchConditions
from azure.core.exceptions import HttpResponseError
from azure.search.documents.indexes.aio import SearchIndexClient, SearchIndexerClient
from azure.search.documents.indexes.models import (
SearchIndex,
SearchIndexer,
SearchIndexerDataContainer,
SearchIndexerDataSourceConnection,
)
from devtools_testutils import AzureRecordedTestCase
from devtools_testutils.aio import recorded_by_proxy_async
from search_service_preparer import SearchEnvVarPreparer, search_decorator
class TestSearchIndexerClientTestAsync(AzureRecordedTestCase):
@SearchEnvVarPreparer()
@search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json")
@recorded_by_proxy_async
async def test_search_indexers(self, endpoint, api_key, **kwargs):
storage_cs = kwargs.get("search_storage_connection_string")
container_name = kwargs.get("search_storage_container_name")
client = SearchIndexerClient(endpoint, api_key, retry_backoff_factor=60)
index_client = SearchIndexClient(endpoint, api_key, retry_backoff_factor=60)
async with client:
async with index_client:
await self._test_create_indexer(client, index_client, storage_cs, container_name)
await self._test_delete_indexer(client, index_client, storage_cs, container_name)
await self._test_get_indexer(client, index_client, storage_cs, container_name)
await self._test_list_indexer(client, index_client, storage_cs, container_name)
await self._test_create_or_update_indexer(client, index_client, storage_cs, container_name)
await self._test_reset_indexer(client, index_client, storage_cs, container_name)
await self._test_run_indexer(client, index_client, storage_cs, container_name)
await self._test_get_indexer_status(client, index_client, storage_cs, container_name)
await self._test_create_or_update_indexer_if_unchanged(client, index_client, storage_cs, container_name)
await self._test_delete_indexer_if_unchanged(client, index_client, storage_cs, container_name)
async def _prepare_indexer(self, client, index_client, storage_cs, name, container_name):
data_source_connection = SearchIndexerDataSourceConnection(
name=f"{name}-ds",
type="azureblob",
connection_string=storage_cs,
container=SearchIndexerDataContainer(name=container_name),
)
ds = await client.create_data_source_connection(data_source_connection)
fields = [{"name": "hotelId", "type": "Edm.String", "key": True, "searchable": False}]
index = SearchIndex(name=f"{name}-hotels", fields=fields)
ind = await index_client.create_index(index)
return SearchIndexer(name=name, data_source_name=ds.name, target_index_name=ind.name)
async def _test_create_indexer(self, client, index_client, storage_cs, container_name):
name = "create"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
result = await client.create_indexer(indexer)
assert result.name == name
assert result.target_index_name == f"{name}-hotels"
assert result.data_source_name == f"{name}-ds"
async def _test_delete_indexer(self, client, index_client, storage_cs, container_name):
name = "delete"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
await client.create_indexer(indexer)
expected = len(await client.get_indexers()) - 1
await client.delete_indexer(name)
assert len(await client.get_indexers()) == expected
async def _test_get_indexer(self, client, index_client, storage_cs, container_name):
name = "get"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
await client.create_indexer(indexer)
result = await client.get_indexer(name)
assert result.name == name
async def _test_list_indexer(self, client, index_client, storage_cs, container_name):
name1 = "list1"
name2 = "list2"
indexer1 = await self._prepare_indexer(client, index_client, storage_cs, name1, container_name)
indexer2 = await self._prepare_indexer(client, index_client, storage_cs, name2, container_name)
await client.create_indexer(indexer1)
await client.create_indexer(indexer2)
result = await client.get_indexers()
assert isinstance(result, list)
assert set(x.name for x in result).intersection([name1, name2]) == set([name1, name2])
async def _test_create_or_update_indexer(self, client, index_client, storage_cs, container_name):
name = "cou"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
await client.create_indexer(indexer)
expected = len(await client.get_indexers())
indexer.description = "updated"
await client.create_or_update_indexer(indexer)
assert len(await client.get_indexers()) == expected
result = await client.get_indexer(name)
assert result.name == name
assert result.description == "updated"
async def _test_reset_indexer(self, client, index_client, storage_cs, container_name):
name = "reset"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
await client.create_indexer(indexer)
await client.reset_indexer(name)
assert (await client.get_indexer_status(name)).last_result.status.lower() in ("inprogress", "reset")
async def _test_run_indexer(self, client, index_client, storage_cs, container_name):
name = "run"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
await client.create_indexer(indexer)
await client.run_indexer(name)
assert (await client.get_indexer_status(name)).status == "running"
async def _test_get_indexer_status(self, client, index_client, storage_cs, container_name):
name = "get-status"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
await client.create_indexer(indexer)
status = await client.get_indexer_status(name)
assert status.status is not None
async def _test_create_or_update_indexer_if_unchanged(self, client, index_client, storage_cs, container_name):
name = "couunch"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
created = await client.create_indexer(indexer)
etag = created.e_tag
indexer.description = "updated"
await client.create_or_update_indexer(indexer)
indexer.e_tag = etag
with pytest.raises(HttpResponseError):
await client.create_or_update_indexer(indexer, match_condition=MatchConditions.IfNotModified)
async def _test_delete_indexer_if_unchanged(self, client, index_client, storage_cs, container_name):
name = "delunch"
indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name)
result = await client.create_indexer(indexer)
etag = result.e_tag
indexer.description = "updated"
await client.create_or_update_indexer(indexer)
indexer.e_tag = etag
with pytest.raises(HttpResponseError):
await client.delete_indexer(indexer, match_condition=MatchConditions.IfNotModified)
|
5ff9d312895efc0043353f9673e703ba6d1a187a
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/legacy_test/test_dist_tree_index.py
|
b336de40cb02296de94e88c650daccef277a6b79
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 5,876
|
py
|
test_dist_tree_index.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import paddle
from paddle.dataset.common import download
from paddle.distributed.fleet.dataset import TreeIndex
paddle.enable_static()
def create_feeds():
user_input = paddle.static.data(
name="item_id", shape=[-1, 1], dtype="int64", lod_level=1
)
item = paddle.static.data(
name="unit_id", shape=[-1, 1], dtype="int64", lod_level=1
)
label = paddle.static.data(
name="label", shape=[-1, 1], dtype="int64", lod_level=1
)
labels = paddle.static.data(
name="labels", shape=[-1, 1], dtype="int64", lod_level=1
)
feed_list = [user_input, item, label, labels]
return feed_list
class TestTreeIndex(unittest.TestCase):
def test_tree_index(self):
path = download(
"https://paddlerec.bj.bcebos.com/tree-based/data/mini_tree.pb",
"tree_index_unittest",
"e2ba4561c2e9432b532df40546390efa",
)
'''
path = download(
"https://paddlerec.bj.bcebos.com/tree-based/data/mini_tree.pb",
"tree_index_unittest", "cadec20089f5a8a44d320e117d9f9f1a")
'''
tree = TreeIndex("demo", path)
height = tree.height()
branch = tree.branch()
self.assertTrue(height == 5)
self.assertTrue(branch == 2)
self.assertEqual(tree.total_node_nums(), 25)
self.assertEqual(tree.emb_size(), 30)
# get_layer_codes
layer_node_ids = []
layer_node_codes = []
for i in range(tree.height()):
layer_node_codes.append(tree.get_layer_codes(i))
layer_node_ids.append(
[node.id() for node in tree.get_nodes(layer_node_codes[-1])]
)
all_leaf_ids = [node.id() for node in tree.get_all_leafs()]
self.assertEqual(sum(all_leaf_ids), sum(layer_node_ids[-1]))
# get_travel
travel_codes = tree.get_travel_codes(all_leaf_ids[0])
travel_ids = [node.id() for node in tree.get_nodes(travel_codes)]
for i in range(height):
self.assertIn(travel_ids[i], layer_node_ids[height - 1 - i])
self.assertIn(travel_codes[i], layer_node_codes[height - 1 - i])
# get_ancestor
ancestor_codes = tree.get_ancestor_codes([all_leaf_ids[0]], height - 2)
ancestor_ids = [node.id() for node in tree.get_nodes(ancestor_codes)]
self.assertEqual(ancestor_ids[0], travel_ids[1])
self.assertEqual(ancestor_codes[0], travel_codes[1])
# get_pi_relation
pi_relation = tree.get_pi_relation([all_leaf_ids[0]], height - 2)
self.assertEqual(pi_relation[all_leaf_ids[0]], ancestor_codes[0])
# get_travel_path
travel_path_codes = tree.get_travel_path(
travel_codes[0], travel_codes[-1]
)
travel_path_ids = [
node.id() for node in tree.get_nodes(travel_path_codes)
]
self.assertEqual(travel_path_ids + [travel_ids[-1]], travel_ids)
self.assertEqual(travel_path_codes + [travel_codes[-1]], travel_codes)
# get_children
children_codes = tree.get_children_codes(travel_codes[1], height - 1)
children_ids = [node.id() for node in tree.get_nodes(children_codes)]
self.assertIn(all_leaf_ids[0], children_ids)
class TestIndexSampler(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_layerwise_sampler(self):
path = download(
"https://paddlerec.bj.bcebos.com/tree-based/data/mini_tree.pb",
"tree_index_unittest",
"e2ba4561c2e9432b532df40546390efa",
)
tdm_layer_counts = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# tree = TreeIndex("demo", path)
file_name = os.path.join(
self.temp_dir.name, "test_in_memory_dataset_tdm_sample_run.txt"
)
with open(file_name, "w") as f:
# data = "29 d 29 d 29 29 29 29 29 29 29 29 29 29 29 29\n"
data = "1 1 1 15 15 15\n"
data += "1 1 1 15 15 15\n"
f.write(data)
slots = ["slot1", "slot2", "slot3"]
slots_vars = []
for slot in slots:
var = paddle.static.data(name=slot, shape=[-1, 1], dtype="int64")
slots_vars.append(var)
dataset = paddle.distributed.InMemoryDataset()
dataset.init(
batch_size=1,
pipe_command="cat",
download_cmd="cat",
use_var=slots_vars,
)
dataset.set_filelist([file_name])
# dataset.update_settings(pipe_command="cat")
# dataset._init_distributed_settings(
# parse_ins_id=True,
# parse_content=True,
# fea_eval=True,
# candidate_size=10000)
dataset.load_into_memory()
dataset.tdm_sample(
'demo',
tree_path=path,
tdm_layer_counts=tdm_layer_counts,
start_sample_layer=1,
with_hierachy=False,
seed=0,
id_slot=2,
)
self.assertTrue(dataset.get_shuffle_data_size() == 8)
if __name__ == '__main__':
unittest.main()
|
e22126b163cfbb7b1ff137d14beb67498c62f84e
|
86f3973554eb61b12528835851cbdc96aba9ccc0
|
/io_scene_xray/prefs/ops.py
|
76bc7ee288dd8937b1447a0683e923be097f1c67
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
PavelBlend/blender-xray
|
02d68e424ae9088221bafc1d0d9019690323d9da
|
a3abb9eb805182eec8ed8de4058dd744aee0e291
|
refs/heads/develop
| 2023-09-03T15:10:56.022070
| 2023-08-22T17:50:23
| 2023-08-22T17:50:23
| 20,459,902
| 150
| 40
|
BSD-2-Clause
| 2023-08-10T15:01:24
| 2014-06-03T21:12:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,889
|
py
|
ops.py
|
# blender modules
import bpy
# addon modules
from . import props
from .. import utils
class XRAY_OT_reset_prefs_settings(utils.ie.BaseOperator):
bl_idname = 'io_scene_xray.reset_preferences_settings'
bl_label = 'Reset All Settings'
def execute(self, context):
prefs = utils.version.get_preferences()
# reset main settings
for prop_name in props.plugin_preferences_props:
prefs.property_unset(prop_name)
# reset custom properties settings
for prop_name in props.xray_custom_properties:
prefs.custom_props.property_unset(prop_name)
return {'FINISHED'}
def invoke(self, context, event): # pragma: no cover
return context.window_manager.invoke_confirm(self, event)
op_props = {
'path': bpy.props.StringProperty(),
}
class XRAY_OT_explicit_path(utils.ie.BaseOperator):
bl_idname = 'io_scene_xray.explicit_path'
bl_label = 'Make Explicit'
bl_description = 'Make this path explicit using the automatically calculated value'
props = op_props
if not utils.version.IS_28:
for prop_name, prop_value in props.items():
exec('{0} = props.get("{0}")'.format(prop_name))
def execute(self, context):
pref = utils.version.get_preferences()
if pref.paths_mode == 'BASE':
settings = pref
else:
settings = pref.paths_presets[pref.paths_presets_index]
auto_prop = props.build_auto_id(self.path)
value = getattr(settings, auto_prop)
setattr(settings, self.path, value)
setattr(settings, auto_prop, '')
return {'FINISHED'}
classes = (
XRAY_OT_explicit_path,
XRAY_OT_reset_prefs_settings
)
def register():
utils.version.register_operators(classes)
def unregister():
for clas in reversed(classes):
bpy.utils.unregister_class(clas)
|
14bcdcbc9211b037794363f441dd96eea5d406d5
|
f7fc6aef0330115cc46af1ad7e68f98402bda86c
|
/src/package-py/02/zipf.py
|
c004bc8cbb0943448e5259ed4c17e56c1b751c02
|
[
"LicenseRef-scancode-other-permissive",
"MIT",
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
merely-useful/py-rse
|
56a55ebbca21bb16a4800b26c000cf903d50874f
|
f732112a4a6569f08d92eb10d95ab289f22a442f
|
refs/heads/book
| 2023-05-23T03:44:48.319108
| 2021-11-09T21:55:12
| 2021-11-09T21:55:12
| 144,218,444
| 212
| 51
|
NOASSERTION
| 2021-11-09T21:55:16
| 2018-08-10T00:52:26
|
TeX
|
UTF-8
|
Python
| false
| false
| 742
|
py
|
zipf.py
|
import sys
from pytest import approx
USAGE = '''zipf num [num...]: are the given values Zipfy?'''
RELATIVE_ERROR = 0.05
def make_zipf(length):
assert length > 0, 'Zipf distribution must have at least one element'
result = [1/(1 + i) for i in range(length)]
return result
def is_zipf(hist, rel=RELATIVE_ERROR):
assert len(hist) > 0, 'Cannot test Zipfiness without data'
scaled = [h/hist[0] for h in hist]
perfect = make_zipf(len(hist))
return scaled == approx(perfect, rel=rel)
if __name__ == '__main__':
if len(sys.argv) == 1:
print(USAGE)
else:
values = [int(a) for a in sys.argv[1:]]
result = is_zipf(values)
print('{}: {}'.format(result, values))
sys.exit(0)
|
226927449c9371d22851a45b2e650bd054bbbbc7
|
3a50c0712e0a31b88d0a5e80a0c01dbefc6a6e75
|
/thrift/test/py/adapter_test.py
|
71e81b8607b58e63dd479f11d3d2e263fe9ac74b
|
[
"Apache-2.0"
] |
permissive
|
facebook/fbthrift
|
3b7b94a533666c965ce69cfd6054041218b1ea6f
|
53cf6f138a7648efe5aef9a263aabed3d282df91
|
refs/heads/main
| 2023-08-24T12:51:32.367985
| 2023-08-24T08:28:35
| 2023-08-24T08:28:35
| 11,131,631
| 2,347
| 666
|
Apache-2.0
| 2023-09-01T01:44:39
| 2013-07-02T18:15:51
|
C++
|
UTF-8
|
Python
| false
| false
| 3,977
|
py
|
adapter_test.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest.mock import patch
from thrift.protocol import ( # type: ignore # noqa: F401
fastproto,
TBinaryProtocol,
TCompactProtocol,
TJSONProtocol,
TSimpleJSONProtocol,
)
from thrift.util import Serializer
from .adapter.ttypes import Foo, FooWithoutAdapters
from .adapter_bar.ttypes import Bar
PROTOCOLS = [
TBinaryProtocol.TBinaryProtocolFactory(),
TBinaryProtocol.TBinaryProtocolAcceleratedFactory(),
TCompactProtocol.TCompactProtocolFactory(),
TCompactProtocol.TCompactProtocolAcceleratedFactory(),
TJSONProtocol.TJSONProtocolFactory(),
TSimpleJSONProtocol.TSimpleJSONProtocolFactory(),
]
class AdapterTest(unittest.TestCase):
def test_roundtrip(self) -> None:
INPUTS = {
"empty": (Foo(), FooWithoutAdapters()),
"default_values": (
Foo(
structField={},
oStructField={},
mapField={},
),
FooWithoutAdapters(
structField=Bar(),
oStructField=Bar(),
mapField={},
),
),
"basic": (
Foo(
structField={"field": 42},
oStructField={"field": 43},
mapField={
1: {"field": 44},
2: {"field": 45},
},
),
FooWithoutAdapters(
structField=Bar(field=42),
oStructField=Bar(field=43),
mapField={
1: Bar(field=44),
2: Bar(field=45),
},
),
),
}
for protocol in PROTOCOLS:
for (name, (foo, foo_without_adapters)) in INPUTS.items():
with self.subTest(case=name, protocol=type(protocol).__name__):
serialized = Serializer.serialize(protocol, foo)
deserialized = Serializer.deserialize(protocol, serialized, Foo())
self.assertEqual(deserialized, foo)
no_adapter = Serializer.deserialize(
protocol, serialized, FooWithoutAdapters()
)
self.assertEqual(no_adapter, foo_without_adapters)
def test_exception_safety(self) -> None:
for protocol in PROTOCOLS:
with self.subTest(protocol=type(protocol).__name__):
foo = Foo(structField={})
with patch(
"thrift.test.py.adapter_for_tests.AdapterTestStructToDict.to_thrift"
) as mock_to_thrift, self.assertRaises(RuntimeError):
mock_to_thrift.side_effect = RuntimeError()
Serializer.serialize(protocol, foo)
serialized = Serializer.serialize(
protocol,
FooWithoutAdapters(structField=Bar()),
)
with patch(
"thrift.test.py.adapter_for_tests.AdapterTestStructToDict.from_thrift"
) as mock_from_thrift, self.assertRaises(RuntimeError):
mock_from_thrift.side_effect = RuntimeError()
Serializer.deserialize(protocol, serialized, Foo())
|
0f635979a404081e61d0074fbfdd74530d909fd1
|
568910f475177d168c4a0cf7bf2744ad782e37b3
|
/docs/source/code_snippets/tg51_class.py
|
f9f0bedfbfc4f74d78fd15e305d0ee47f9769651
|
[
"MIT"
] |
permissive
|
jrkerns/pylinac
|
2366304d318d3a274d8236cb9c3fa0245265c5ed
|
5c2cfe971f2f6a8d27b0d81159e0f1bacba007b8
|
refs/heads/master
| 2023-08-13T15:02:07.959095
| 2023-08-08T21:21:26
| 2023-08-08T21:21:26
| 25,171,290
| 129
| 96
|
MIT
| 2023-09-12T16:58:59
| 2014-10-13T18:17:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
tg51_class.py
|
"""A script to calculate TG-51 dose using pylinac classes and following the TG-51 photon form"""
from pylinac.calibration import tg51
ENERGY = 6
TEMP = 22.1
PRESS = tg51.mmHg2kPa(755.0)
CHAMBER = "30013" # PTW
P_ELEC = 1.000
ND_w = 5.443 # Gy/nC
MU = 200
CLINICAL_PDD = 66.5
tg51_6x = tg51.TG51Photon(
unit="TrueBeam1",
chamber=CHAMBER,
temp=TEMP,
press=PRESS,
n_dw=ND_w,
p_elec=P_ELEC,
measured_pdd10=66.4,
lead_foil=None,
clinical_pdd10=66.5,
energy=ENERGY,
voltage_reference=-300,
voltage_reduced=-150,
m_reference=(25.65, 25.66, 25.65),
m_opposite=(25.64, 25.65, 25.65),
m_reduced=(25.64, 25.63, 25.63),
mu=MU,
tissue_correction=1.0,
)
# Done!
print(tg51_6x.dose_mu_dmax)
# examine other parameters
print(tg51_6x.pddx)
print(tg51_6x.kq)
print(tg51_6x.p_ion)
# change readings if you adjust output
tg51_6x.m_reference_adjusted = (25.44, 25.44, 25.43)
# print new dose value
print(tg51_6x.dose_mu_dmax_adjusted)
# generate a PDF for record-keeping
tg51_6x.publish_pdf(
"TB1 6MV TG-51.pdf",
notes=["My notes", "I used Pylinac to do this; so easy!"],
open_file=False,
)
|
67e44407a01b63a65c613d4c34b3a98e7ca7ce8c
|
99fcc34c668b9728fc8488d30774d4dcbb7da6fd
|
/backend/django/core/migrations/0070_merge_20220727_1406.py
|
f5f4fcb8c9ab4af61d0ebae16622ffea6afeaffc
|
[
"MIT"
] |
permissive
|
RTIInternational/SMART
|
7ce21daafb94b698bde2147b2859ef791271af8e
|
cfbb7f001cb1774f9cd6cc9e72b8abdf74cb1323
|
refs/heads/master
| 2023-09-04T08:12:04.525439
| 2023-04-28T19:30:02
| 2023-04-28T19:30:02
| 150,633,826
| 221
| 35
|
MIT
| 2023-08-25T19:35:14
| 2018-09-27T18:53:25
|
Python
|
UTF-8
|
Python
| false
| false
| 274
|
py
|
0070_merge_20220727_1406.py
|
# Generated by Django 3.2.9 on 2022-07-27 14:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("core", "0069_externaldatabase_daily_ingest"),
("core", "0069_merge_20220628_1849"),
]
operations = []
|
4fbc3f598e8e992a25a604f865b5c026ede553d7
|
ce1c91c33d9b612e97361527e5a974996208c90d
|
/glue/utils/geometry.py
|
2b6ae1dbaf33dea02a6998beddff9b9872baf1b3
|
[
"BSD-3-Clause"
] |
permissive
|
glue-viz/glue
|
5f52faaf91e1ca4822d3983b6a4b9b60e8807f38
|
1a5c7676c025a1a025068b806f6f90ed53bba543
|
refs/heads/main
| 2023-09-04T09:24:00.519833
| 2023-08-17T09:40:04
| 2023-08-17T09:40:04
| 1,768,238
| 609
| 149
|
NOASSERTION
| 2023-09-13T20:56:14
| 2011-05-18T20:58:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,865
|
py
|
geometry.py
|
import numpy as np
from glue.utils import unbroadcast
__all__ = ['points_inside_poly', 'polygon_line_intersections', 'floodfill', 'rotation_matrix_2d']
def rotation_matrix_2d(alpha):
"""
Return rotation matrix for angle alpha around origin.
Parameters
----------
alpha : float
Rotation angle in radian, increasing for anticlockwise rotation.
"""
if np.asarray(alpha).ndim > 0:
# In principle this works on an array as well; would have to return matrix.T then
raise ValueError("Only scalar input for angle accepted")
return np.array([[np.cos(alpha), -np.sin(alpha)], [np.sin(alpha), np.cos(alpha)]])
def points_inside_poly(x, y, vx, vy):
"""
Test if coordinates ``x``, ``y`` fall inside polygon of vertices ``vx``, ``vy``.
Parameters
----------
x, y : `~numpy.ndarray`
Coordinates of the points to test
vx, vy : `~numpy.ndarray`
The vertices of the polygon
Returns
-------
contains : `~numpy.ndarray` of bool
Array indicating whether each coordinate pair is inside the polygon.
"""
if x.dtype.kind == 'M' and vx.dtype.kind == 'M':
vx = vx.astype(x.dtype).astype(float)
x = x.astype(float)
if y.dtype.kind == 'M' and vy.dtype.kind == 'M':
vy = vy.astype(y.dtype).astype(float)
y = y.astype(float)
original_shape = x.shape
x = unbroadcast(x)
y = unbroadcast(y)
x = x.astype(float)
y = y.astype(float)
x, y = np.broadcast_arrays(x, y)
reduced_shape = x.shape
x = x.flat
y = y.flat
from matplotlib.path import Path
p = Path(np.column_stack((vx, vy)))
keep = ((x >= np.min(vx)) &
(x <= np.max(vx)) &
(y >= np.min(vy)) &
(y <= np.max(vy)))
inside = np.zeros(len(x), bool)
x = x[keep]
y = y[keep]
coords = np.column_stack((x, y))
inside[keep] = p.contains_points(coords).astype(bool)
good = np.isfinite(x) & np.isfinite(y)
inside[keep][~good] = False
inside = inside.reshape(reduced_shape)
inside = np.broadcast_to(inside, original_shape)
return inside
def polygon_line_intersections(px, py, xval=None, yval=None):
"""
Find all the segments of intersection between a polygon and an infinite
horizontal/vertical line.
The polygon is assumed to be closed. Due to numerical precision, the
behavior at the edges of polygons is not always predictable, i.e. a point
on the edge of a polygon may be considered inside or outside the polygon.
Parameters
----------
px, py : `~numpy.ndarray`
The vertices of the polygon
xval : float, optional
The x coordinate of the line (for vertical lines). This should only be
specified if yval is not specified.
yval : float, optional
The y coordinate of the line (for horizontal lines). This should only be
specified if xval is not specified.
Returns
-------
segments : list
A list of segments given as tuples of coordinates along the line.
"""
if xval is not None and yval is not None:
raise ValueError("Only one of xval or yval should be specified")
elif xval is None and yval is None:
raise ValueError("xval or yval should be specified")
if yval is not None:
return polygon_line_intersections(py, px, xval=yval)
px = np.asarray(px, dtype=float)
py = np.asarray(py, dtype=float)
# Make sure that the polygon is closed
if px[0] != px[-1] or py[0] != py[-1]:
px = np.hstack([px, px[0]])
py = np.hstack([py, py[0]])
# For convenience
x1, x2 = px[:-1], px[1:]
y1, y2 = py[:-1], py[1:]
# Vertices that intersect
keep1 = (px == xval)
points1 = py[keep1]
# Segments (excluding vertices) that intersect
keep2 = ((x1 < xval) & (x2 > xval)) | ((x2 < xval) & (x1 > xval))
points2 = (y1 + (y2 - y1) * (xval - x1) / (x2 - x1))[keep2]
# Make unique and sort
points = np.array(np.sort(np.unique(np.hstack([points1, points2]))))
# Because of various corner cases, we don't actually know which pairs of
# points are inside the polygon, so we check this using the mid-points
ymid = 0.5 * (points[:-1] + points[1:])
xmid = np.repeat(xval, len(ymid))
keep = points_inside_poly(xmid, ymid, px, py)
segments = list(zip(points[:-1][keep], points[1:][keep]))
return segments
def floodfill(data, start_coords, threshold):
from scipy.ndimage import label
# Determine value at the starting coordinates
value = data[start_coords]
# Determine all pixels that match
mask = (data > value * (2 - threshold)) & (data < value * threshold)
# Determine all individual chunks
labels, num_features = label(mask)
mask = labels == labels[start_coords]
return mask
|
fdb256073de785149322a6ad9b1151fda638591d
|
082cb56436631f16585dc6c667a8b384cee3335f
|
/script/talk/source/t400131.py
|
23bdb120d783398fd5ea7bc3f54355f9629c926c
|
[] |
no_license
|
vawser/Cinders-DS3
|
abf2c5e1c163f2e556a0d89e437eead3ddd6992c
|
d086ebce45b27806f757e04778dad1615e405dab
|
refs/heads/master
| 2023-09-01T00:48:00.500866
| 2023-08-07T12:25:24
| 2023-08-07T12:25:24
| 230,333,994
| 192
| 203
| null | 2022-02-13T21:09:26
| 2019-12-26T22:08:06
|
Python
|
UTF-8
|
Python
| false
| false
| 8,702
|
py
|
t400131.py
|
# -*- coding: utf-8 -*-
def t400131_1():
""" State 0,1 """
assert GetCurrentStateElapsedTime() > 1
""" State 2 """
while True:
call = t400131_x11()
assert IsClientPlayer() == 1
""" State 3 """
call = t400131_x12()
assert not IsClientPlayer()
def t400131_x0(weapon1=6260000, weapon2=6280000):
""" State 0,3 """
# action:12003001:Transpose <?weaponNameId@6260000?> \nand <?weaponNameId@6280000?> into a single weapon?
call = t400131_x6(action2=12003001)
if call.Get() == 0:
""" State 1,5 """
assert t400131_x5(weapon1=weapon1, weapon2=weapon2)
""" State 4 """
# action:13003000:The brothers' blades were made one
assert t400131_x10(action1=13003000)
elif call.Done():
""" State 2 """
pass
""" State 6 """
return 0
def t400131_x1(weapon1=6260000, weapon2=6280000):
""" State 0,7 """
c1110()
""" State 1 """
while True:
ClearTalkListData()
""" State 10 """
assert t400131_x2(weapon1=weapon1, weapon2=weapon2)
""" State 2 """
# Transpose
AddTalkListDataIf(GetEventStatus(25009850) == 0, 1, 15003000, 74000171)
# Transpose brothers' blades
AddTalkListDataIf(GetEventStatus(25009850) == 0 and GetEventStatus(74000171) == 1 and GetEventStatus(74000182) == 1, 3, 15003002,
-1)
# Leave
AddTalkListData(99, 15000005, -1)
""" State 3 """
ShowShopMessage(1)
if GetTalkListEntryResult() == 1:
""" State 4,5 """
c1111(30000, 31000)
assert not (CheckSpecificPersonMenuIsOpen(18, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0))
elif GetTalkListEntryResult() == 3:
""" State 8,9 """
assert t400131_x0(weapon1=weapon1, weapon2=weapon2)
elif not (CheckSpecificPersonMenuIsOpen(1, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0)):
""" State 6,11 """
return 0
def t400131_x2(weapon1=6260000, weapon2=6280000):
""" State 0,1 """
# weapon:6260000:Lorian's Greatsword
if (ComparePlayerInventoryNumber(0, weapon1 + 0, 0, 0, 0) and ComparePlayerInventoryNumber(0, weapon1
+ 1, 0, 0, 0) and ComparePlayerInventoryNumber(0, weapon1 + 2, 0, 0, 0) and ComparePlayerInventoryNumber(0,
weapon1 + 3, 0, 0, 0) and ComparePlayerInventoryNumber(0, weapon1 + 4, 0, 0, 0) and ComparePlayerInventoryNumber(0,
weapon1 + 5, 0, 0, 0)):
""" State 2,4 """
Label('L0')
""" State 3 """
SetEventState(74000182, 0)
else:
""" State 5,6 """
# weapon:6280000:Lothric's Holy Sword
if (ComparePlayerInventoryNumber(0, weapon2 + 0, 0, 0, 0) and ComparePlayerInventoryNumber(0,
weapon2 + 1, 0, 0, 0) and ComparePlayerInventoryNumber(0, weapon2 + 2, 0, 0, 0) and ComparePlayerInventoryNumber(0,
weapon2 + 3, 0, 0, 0) and ComparePlayerInventoryNumber(0, weapon2 + 4, 0, 0, 0) and ComparePlayerInventoryNumber(0,
weapon2 + 5, 0, 0, 0)):
Goto('L0')
else:
""" State 7,8 """
SetEventState(74000182, 1)
""" State 9 """
return 0
def t400131_x3(weapon1=_):
""" State 0,2 """
# weapon:6260000:Lorian's Greatsword, weapon:6280000:Lothric's Holy Sword
call = t400131_x4(weapon1=weapon1, weapon3=0)
if call.Get() == 0:
""" State 3 """
call = t400131_x4(weapon1=weapon1, weapon3=1)
if call.Get() == 0:
""" State 4 """
call = t400131_x4(weapon1=weapon1, weapon3=2)
if call.Get() == 0:
""" State 5 """
call = t400131_x4(weapon1=weapon1, weapon3=3)
if call.Get() == 0:
""" State 6 """
call = t400131_x4(weapon1=weapon1, weapon3=4)
if call.Get() == 0:
""" State 7 """
call = t400131_x4(weapon1=weapon1, weapon3=5)
if call.Get() == 0:
""" State 1 """
Quit()
elif call.Done():
pass
elif call.Done():
pass
elif call.Done():
pass
elif call.Done():
pass
elif call.Done():
pass
elif call.Done():
pass
""" State 8 """
return 0
def t400131_x4(weapon1=_, weapon3=_):
""" State 0,2 """
if ComparePlayerInventoryNumber(0, weapon1 + weapon3, 0, 0, 0):
""" State 3,5 """
return 0
else:
""" State 4,1 """
PlayerEquipmentQuantityChange(0, weapon1 + weapon3, -1)
""" State 6 """
return 1
def t400131_x5(weapon1=6260000, weapon2=6280000):
""" State 0,1 """
assert t400131_x3(weapon1=weapon1)
""" State 2 """
assert t400131_x3(weapon1=weapon2)
""" State 3 """
# lot:60310:Twin Princes' Greatsword
assert t400131_x9(lot1=60310)
""" State 4 """
return 0
def t400131_x6(action2=12003001):
""" State 0,1 """
# action:12003001:Transpose <?weaponNameId@6260000?> \nand <?weaponNameId@6280000?> into a single weapon?
OpenGenericDialog(8, action2, 3, 4, 2)
assert not CheckSpecificPersonGenericDialogIsOpen(0)
""" State 2 """
if GetGenericDialogButtonResult() == 1:
""" State 3 """
return 0
else:
""" State 4 """
return 1
def t400131_x7(z3=6132, flag1=6001, flag2=6000, flag3=6000, flag4=6000, flag5=6000):
""" State 0,1 """
while True:
assert (not GetOneLineHelpStatus() and not IsTalkingToSomeoneElse() and not IsClientPlayer()
and not IsPlayerDead() and not IsCharacterDisabled())
""" State 3 """
assert (GetEventStatus(flag1) == 1 or GetEventStatus(flag2) == 1 or GetEventStatus(flag3) ==
1 or GetEventStatus(flag4) == 1 or GetEventStatus(flag5) == 1)
""" State 2 """
if (not (not GetOneLineHelpStatus() and not IsTalkingToSomeoneElse() and not IsClientPlayer()
and not IsPlayerDead() and not IsCharacterDisabled())):
pass
elif (not GetEventStatus(flag1) and not GetEventStatus(flag2) and not GetEventStatus(flag3) and
not GetEventStatus(flag4) and not GetEventStatus(flag5)):
pass
elif CheckActionButtonArea(z3):
break
""" State 4 """
return 0
def t400131_x8():
""" State 0,1 """
if not CheckSpecificPersonTalkHasEnded(0):
""" State 7 """
ClearTalkProgressData()
StopEventAnimWithoutForcingConversationEnd(0)
""" State 6 """
ReportConversationEndToHavokBehavior()
else:
pass
""" State 2 """
if CheckSpecificPersonGenericDialogIsOpen(0) == 1:
""" State 3 """
ForceCloseGenericDialog()
else:
pass
""" State 4 """
if CheckSpecificPersonMenuIsOpen(-1, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0):
""" State 5 """
ForceCloseMenu()
else:
pass
""" State 8 """
return 0
def t400131_x9(lot1=60310):
""" State 0,1 """
# lot:60310:Twin Princes' Greatsword
GetItemFromItemLot(lot1)
assert not IsMenuOpen(63) and GetCurrentStateElapsedFrames() > 1
""" State 2 """
return 0
def t400131_x10(action1=13003000):
""" State 0,1 """
# action:13003000:The brothers' blades were made one
OpenGenericDialog(7, action1, 1, 0, 1)
assert not CheckSpecificPersonGenericDialogIsOpen(0)
""" State 2 """
return 0
def t400131_x11():
""" State 0,1 """
while True:
call = t400131_x13(z1=2135, z2=390)
assert not GetEventStatus(1021) or not GetEventStatus(74000171)
""" State 2 """
call = t400131_x14()
assert GetEventStatus(1021) == 1 and GetEventStatus(74000171) == 1
def t400131_x12():
""" State 0,1 """
assert t400131_x8()
""" State 2 """
return 0
def t400131_x13(z1=2135, z2=390):
""" State 0,1 """
while True:
assert t400131_x7(z3=6132, flag1=6001, flag2=6000, flag3=6000, flag4=6000, flag5=6000)
""" State 3 """
# weapon:6260000:Lorian's Greatsword, weapon:6280000:Lothric's Holy Sword
call = t400131_x1(weapon1=6260000, weapon2=6280000)
if call.Done():
pass
elif GetDistanceToPlayer() > 6:
""" State 2 """
assert t400131_x8() and GetDistanceToPlayer() < 5.9
def t400131_x14():
""" State 0 """
|
6a7998b6bff821faa26da8742be93c5555a0d681
|
8e6bb9c1a620a162b7d017c2373dd01be54ea86d
|
/utils/datavzrd/wrapper.py
|
4159f810d422c66e9665e9f82036a915c3009798
|
[] |
no_license
|
snakemake/snakemake-wrappers
|
5d0963502c26eb709513567e25422871fe477cf2
|
996bdcf2a96535b967dfa483c363a5496f4b3906
|
refs/heads/master
| 2023-08-19T05:18:44.337503
| 2023-08-18T12:03:38
| 2023-08-18T12:03:38
| 213,319,194
| 184
| 189
| null | 2023-09-12T11:38:35
| 2019-10-07T07:20:59
|
CAP CDS
|
UTF-8
|
Python
| false
| false
| 373
|
py
|
wrapper.py
|
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2017, Johannes Köster"
__email__ = "johannes.koester@protonmail.com"
__license__ = "MIT"
from snakemake.shell import shell
log = snakemake.log_fmt_shell(stdout=True, stderr=True)
extra = snakemake.params.get("extra", "")
shell("datavzrd {snakemake.input.config} {extra} --output {snakemake.output[0]} {log}")
|
3160c00532ba5cd31a389acc18b77de1d7b6982b
|
b08798b5b9b1aefa557fcf5aae2d7fcfc8310f32
|
/test/test_vfs.py
|
4cd04e21314154617072970c844e58b32ff64a09
|
[
"MIT"
] |
permissive
|
beetbox/beets
|
f0f361fafd57977497e1981f27946fd52d428b27
|
0e5ade4f711dbf563d35c290affb0254eee41235
|
refs/heads/master
| 2023-09-01T20:50:06.125904
| 2023-08-27T19:07:13
| 2023-08-27T19:07:13
| 827,590
| 8,977
| 1,768
|
MIT
| 2023-09-13T02:33:14
| 2010-08-09T23:17:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,615
|
py
|
test_vfs.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the virtual filesystem builder.."""
import unittest
from test import _common
from beets import library
from beets import vfs
class VFSTest(_common.TestCase):
def setUp(self):
super().setUp()
self.lib = library.Library(':memory:', path_formats=[
('default', 'albums/$album/$title'),
('singleton:true', 'tracks/$artist/$title'),
])
self.lib.add(_common.item())
self.lib.add_album([_common.item()])
self.tree = vfs.libtree(self.lib)
def test_singleton_item(self):
self.assertEqual(self.tree.dirs['tracks'].dirs['the artist'].
files['the title'], 1)
def test_album_item(self):
self.assertEqual(self.tree.dirs['albums'].dirs['the album'].
files['the title'], 2)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
50fe7c4fcbc99f7fb154c40dbca78f360c566d5b
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/7_graph/带权图最短路和最小生成树/floyd多源/acwing习题/343. 排序-传递闭包/拓扑排序.py
|
cbd4c352fe01b7e3f7dd3027a2d9cde1ac8c3fcf
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
拓扑排序.py
|
# 2≤n≤26
# 变量只可能为大写字母 A∼Z
# 这题拓扑排序也可以
# m次循环,`每次加入一条边到图中`,再跑一遍拓扑排序
# 排序后,排序数组不为n个,则表示有环,矛盾,跳出循环
# 排序后,排序数组为n个,但是在过程中,有2个或以上的点在队列中,表示拓扑序并不唯一,那么此时并不能确定所有点的顺序,因此进行下一次循环
# 排序后,排序数组为n个,且在过程中,队列中一直只有一个,拓扑序唯一,输出结果,跳出循环
|
f23f8f4846575acc3b863d246af13e53e71bf6ef
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/0485. Max Consecutive Ones/0485.py
|
f9b4afde74425f2b2fce40369d60031e66104484
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 236
|
py
|
0485.py
|
class Solution:
def findMaxConsecutiveOnes(self, nums: List[int]) -> int:
ans = 0
summ = 0
for num in nums:
if num == 0:
summ = 0
else:
summ += num
ans = max(ans, summ)
return ans
|
02ce96546a9f47bbd9db499413c27d4370a08251
|
2989f47a57cf23935a159180283463bc92d0ac18
|
/tests/learning/test_evaluators.py
|
44e4ea6866df933964c5fa39a2b2db8210dc0647
|
[
"Apache-2.0"
] |
permissive
|
Rostlab/nalaf
|
a5d9fa7931242c8b757064da4870a176a6364994
|
f266480174107d5c8fbff0f4431b2bb54565907e
|
refs/heads/develop
| 2022-12-13T22:08:56.464434
| 2021-06-02T11:32:07
| 2021-06-02T11:32:07
| 33,038,465
| 112
| 29
|
Apache-2.0
| 2022-12-08T05:12:45
| 2015-03-28T15:27:20
|
Python
|
UTF-8
|
Python
| false
| false
| 26,003
|
py
|
test_evaluators.py
|
import unittest
from nalaf.structures.data import Dataset, Document, Part, Entity, Relation
from nalaf.learning.evaluators import Evaluator, MentionLevelEvaluator, DocumentLevelRelationEvaluator
from nalaf.preprocessing.spliters import NLTKSplitter
from nalaf.preprocessing.tokenizers import NLTK_TOKENIZER
STUB_E_ID_1 = 'e_x_1'
STUB_E_ID_2 = 'e_x_2'
STUB_R_ID_1 = 'r_x_1'
class TestEvaluators(unittest.TestCase):
@classmethod
def setUpClass(cls):
# create a sample dataset1 (1) to test
cls.dataset1 = Dataset()
doc_1 = Document()
text = '.... aaaa .... bbbb .... cccc .... dddd .... eeee .... ffff .... gggg .... hhhh .... jjjj'
part_1 = Part(text)
cls.dataset1.documents['doc_1'] = doc_1
doc_1.parts['part_1'] = part_1
exact_1 = Entity(STUB_E_ID_1, 5, 'aaaa')
exact_1.subclass = 1
exact_2 = Entity(STUB_E_ID_1, 55, 'ffff')
exact_2.subclass = 2
exact_3 = Entity(STUB_E_ID_1, 75, 'hhhh')
exact_3.subclass = 2
overlap_1_1 = Entity(STUB_E_ID_1, 25, 'cccc')
overlap_1_1.subclass = 1
overlap_1_2 = Entity(STUB_E_ID_1, 26, 'cc')
overlap_1_2.subclass = 1
overlap_2_1 = Entity(STUB_E_ID_1, 32, '.. ddd')
overlap_2_1.subclass = 2
overlap_2_2 = Entity(STUB_E_ID_1, 36, 'ddd ...')
overlap_2_2.subclass = 2
overlap_3_1 = Entity(STUB_E_ID_1, 65, 'gggg')
overlap_3_1.subclass = 1
overlap_3_2 = Entity(STUB_E_ID_1, 62, '.. gggg ..')
overlap_3_2.subclass = 2
missing_1 = Entity('e2', 45, 'eeee')
missing_1.subclass = 1
missing_2 = Entity('e2', 84, 'jjjj')
missing_2.subclass = 1
spurios = Entity('e2', 15, 'bbbb')
spurios.subclass = 1
part_1.annotations = [exact_1, exact_2, exact_3, overlap_1_1, overlap_2_1, overlap_3_1, missing_1, missing_2]
part_1.predicted_annotations = [exact_1, exact_2, exact_3, overlap_1_2, overlap_2_2, overlap_3_2, spurios]
def test_implements_evaluator_interface(self):
self.assertIsInstance(MentionLevelEvaluator(), Evaluator)
def test_exact_strictness(self):
evaluator = MentionLevelEvaluator()
evaluation = (evaluator.evaluate(self.dataset1))(MentionLevelEvaluator.TOTAL_LABEL)
self.assertEqual(evaluation.tp, 3) # the 3 exact matches
self.assertEqual(evaluation.fp, 4) # the 3 overlapping + 1 spurious
self.assertEqual(evaluation.fn, 5) # the 3 overlapping + 2 missing
ret = evaluation.compute('exact')
self.assertEqual(ret.precision, 3 / 7)
self.assertEqual(ret.recall, 3 / 8)
self.assertEqual(ret.f_measure, 2 * (3 / 7 * 3 / 8) / (3 / 7 + 3 / 8))
def test_overlapping_strictness(self):
evaluator = MentionLevelEvaluator()
evaluation = (evaluator.evaluate(self.dataset1))(MentionLevelEvaluator.TOTAL_LABEL)
self.assertEqual(evaluation.tp, 3) # the 3 exact matches
self.assertEqual(evaluation.fp - evaluation.fp_ov, 1) # the 1 spurious
self.assertEqual(evaluation.fn - evaluation.fn_ov, 2) # the 2 missing
self.assertEqual(evaluation.fp_ov, 3) # the 3 overlapping
self.assertEqual(evaluation.fn_ov, 3) # the 3 overlapping
ret = evaluation.compute('overlapping')
self.assertEqual(ret.precision, 9 / 10)
self.assertEqual(ret.recall, 9 / 11)
self.assertAlmostEqual(ret.f_measure, 2 * (9 / 10 * 9 / 11) / (9 / 10 + 9 / 11), places=5)
def test_half_overlapping_strictness(self):
evaluator = MentionLevelEvaluator()
evaluation = (evaluator.evaluate(self.dataset1))(MentionLevelEvaluator.TOTAL_LABEL)
self.assertEqual(evaluation.tp, 3) # the 3 exact matches
self.assertEqual(evaluation.fp - evaluation.fp_ov, 1) # the 1 spurious
self.assertEqual(evaluation.fn - evaluation.fn_ov, 2) # the 2 missing
self.assertEqual(evaluation.fp_ov, 3) # the 3 overlapping
self.assertEqual(evaluation.fn_ov, 3) # the 3 overlapping
ret = evaluation.compute('half_overlapping')
self.assertEqual(ret.precision, (3 + 6 / 2) / 10)
self.assertEqual(ret.recall, (3 + 6 / 2) / 11)
self.assertEqual(ret.f_measure, 2 * ((3 + 6 / 2) / 10 * (3 + 6 / 2) / 11) / ((3 + 6 / 2) / 10 + (3 + 6 / 2) / 11))
def test_exception_on_equality_operator(self):
ann_1 = Entity(STUB_E_ID_1, 1, 'text_1')
ann_2 = Entity(STUB_E_ID_1, 2, 'text_2')
Entity.equality_operator = 'not valid'
self.assertRaises(ValueError, lambda: ann_1 == ann_2)
def test_exception_on_strictness(self):
evaluator = MentionLevelEvaluator() # this is fine
evaluation = (evaluator.evaluate(self.dataset1))(MentionLevelEvaluator.TOTAL_LABEL) # this is fine
self.assertRaises(ValueError, evaluation.compute, 'strictness not valid')
def test_subclass_analysis(self):
evaluator = MentionLevelEvaluator(subclass_analysis=True)
evaluations = evaluator.evaluate(self.dataset1)
self.assertEqual(evaluations(1).tp, 1)
self.assertEqual(evaluations(2).tp, 2)
self.assertEqual(evaluations(1).fp, 3)
self.assertEqual(evaluations(2).fp, 1)
self.assertEqual(evaluations(1).fn, 4)
self.assertEqual(evaluations(2).fn, 1)
self.assertEqual(evaluations(1).fp_ov, 2)
self.assertEqual(evaluations(1).fn_ov, 2)
self.assertEqual(evaluations(2).fp_ov, 1)
self.assertEqual(evaluations(2).fn_ov, 1)
# -------
def _apply_pipeline(self, dataset):
# Apply through pipeline
NLTKSplitter().split(dataset)
NLTK_TOKENIZER.tokenize(dataset)
# nlp = get_spacy_nlp_english(load_parser=False)
# cls.parser = SpacyParser(nlp)
# cls.parser.parse(cls.dataset)
return dataset
def test_DocumentLevelRelationEvaluator_default_entities_case_irrelevant(self):
evaluator = DocumentLevelRelationEvaluator(rel_type=STUB_R_ID_1)
dataset = Dataset()
doc_1 = Document()
part_1 = Part('_irrelevant_')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_1'] = part_1
part_1.relations = [
Relation(
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "TOOL"),
Entity(STUB_E_ID_2, 0, "maynard")
),
]
# -
part_1.predicted_relations = [
# empty
]
self._apply_pipeline(dataset)
# -
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 0)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 0.0)
# ---
part_1.predicted_relations = [
Relation(
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "TOOL"),
Entity(STUB_E_ID_2, 0, "maynard")
),
]
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 1)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 1.0)
# -
part_1.predicted_relations = [
Relation(
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "tool"),
Entity(STUB_E_ID_2, 0, "MAYNARD")
),
]
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 1)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 1.0)
def test_DocumentLevelRelationEvaluator_order_irrelevant(self):
evaluator = DocumentLevelRelationEvaluator(rel_type=STUB_R_ID_1)
dataset = Dataset()
doc_1 = Document()
part_1 = Part('_irrelevant_')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_1'] = part_1
part_1.relations = [
Relation(
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "TOOL"),
Entity(STUB_E_ID_2, 0, "maynard")
),
]
# -
part_1.predicted_relations = [
Relation(
STUB_R_ID_1,
Entity(STUB_E_ID_2, 0, "maynard"),
Entity(STUB_E_ID_1, 0, "TOOL")
),
]
self._apply_pipeline(dataset)
# ---
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 1)
self.assertEqual(evaluation.fn, 0)
self.assertEqual(evaluation.fp, 0)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 1.0)
def test_DocumentLevelRelationEvaluator_false_positives(self):
evaluator = DocumentLevelRelationEvaluator(rel_type=STUB_R_ID_1)
dataset = Dataset()
doc_1 = Document()
part_1 = Part('_irrelevant_ PART *1*')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_1'] = part_1
part_2 = Part('_irrelevant_ PART *2*')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_2'] = part_2
part_1.relations = [
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "TOOL"), Entity(STUB_E_ID_2, 0, "Maynard")),
]
# -
part_2.predicted_relations = [
Relation(STUB_R_ID_1, Entity(STUB_E_ID_2, 0, "TOOL"), Entity(STUB_E_ID_1, 0, "Snoop Dog")),
]
self._apply_pipeline(dataset)
# ---
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 0)
self.assertEqual(evaluation.fn, 1)
self.assertEqual(evaluation.fp, 1)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 0.0)
def test_DocumentLevelRelationEvaluator_parts_irrelevant(self):
evaluator = DocumentLevelRelationEvaluator(rel_type=STUB_R_ID_1)
dataset = Dataset()
doc_1 = Document()
part_1 = Part('_irrelevant_ PART *1*')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_1'] = part_1
part_2 = Part('_irrelevant_ PART *2*')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_2'] = part_2
part_1.relations = [
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "TOOL"), Entity(STUB_E_ID_2, 0, "maynard")),
]
# -
part_2.predicted_relations = [
Relation(STUB_R_ID_1, Entity(STUB_E_ID_2, 0, "maynard"), Entity(STUB_E_ID_1, 0, "TOOL")),
]
self._apply_pipeline(dataset)
# ---
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 1)
self.assertEqual(evaluation.fn, 0)
self.assertEqual(evaluation.fp, 0)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 1.0)
def test_DocumentLevelRelationEvaluator_repeated_relations_irrelevant(self):
evaluator = DocumentLevelRelationEvaluator(rel_type=STUB_R_ID_1)
dataset = Dataset()
doc_1 = Document()
part_1 = Part('_irrelevant_')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_1'] = part_1
part_1.relations = [
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "TOOL"), Entity(STUB_E_ID_2, 0, "maynard")),
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "TOOL"), Entity(STUB_E_ID_2, 0, "Danny Carey")),
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 1, "TOOL"), Entity(STUB_E_ID_2, 1, "Danny Carey")),
]
# -
part_1.predicted_relations = [
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "TOOL"), Entity(STUB_E_ID_2, 0, "maynard")),
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 1, "TOOL"), Entity(STUB_E_ID_2, 1, "maynard")),
]
self._apply_pipeline(dataset)
# ---
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 1)
self.assertEqual(evaluation.fn, 1)
self.assertEqual(evaluation.fp, 0)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 0.6666666666666666)
# -
part_1.predicted_relations = [
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 2, "TOOL"), Entity(STUB_E_ID_2, 2, "maynard")),
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 3, "TOOL"), Entity(STUB_E_ID_2, 3, "maynard")),
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 4, "TOOL"), Entity(STUB_E_ID_2, 4, "Danny Carey")),
]
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 2)
self.assertEqual(evaluation.fn, 0)
self.assertEqual(evaluation.fp, 0)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 1.0)
def test_DocumentLevelRelationEvaluator_normalized_entities(self):
evaluator = DocumentLevelRelationEvaluator(
rel_type=STUB_R_ID_1,
entity_map_fun=DocumentLevelRelationEvaluator.COMMON_ENTITY_MAP_FUNS['normalized_fun'](
{STUB_E_ID_1: 'n_1', STUB_E_ID_2: 'n_1'},
penalize_unknown_normalizations="no")
)
dataset = Dataset()
doc_1 = Document()
part_1 = Part('_irrelevant_')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_1'] = part_1
part_1.relations = [
Relation(
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "Tool", norms={"n_1": "1964"}),
Entity(STUB_E_ID_2, 0, "Maynard", norms={"n_1": "1961"})),
]
# -
part_1.predicted_relations = [
Relation(
# One without normalization, one with another different normalization
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "Tool"),
Entity(STUB_E_ID_2, 0, "Maynard", norms={"n_x": "1961"})),
Relation(
# One with different normalization, one with another different normalization
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "Tool", norms={"n_1": "666"}),
Entity(STUB_E_ID_2, 0, "Maynard", norms={"n_x": "1961"})),
Relation(
# Both with the correct normalization ids, but one has wrong normalization
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "Tool", norms={"n_1": "666"}),
Entity(STUB_E_ID_2, 0, "Maynard", norms={"n_1": "1961"})),
Relation(
# Both with another different normalization
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "Tool", norms={"n_another_key": "1964"}),
Entity(STUB_E_ID_2, 0, "Maynard", norms={"n_another_key": "1961"})),
]
self._apply_pipeline(dataset)
# ---
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 0)
self.assertEqual(evaluation.fn, 1)
self.assertEqual(evaluation.fp, 1)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 0.0)
# -
part_1.predicted_relations = [
Relation(
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "Tool band", norms={"n_1": "1964"}),
Entity(STUB_E_ID_2, 0, "Maynard James Keenan", norms={"n_1": "1961"})),
]
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 1)
self.assertEqual(evaluation.fn, 0)
self.assertEqual(evaluation.fp, 0)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 1.0)
def _create_basic_dataset(self):
dataset = Dataset()
doc_1 = Document()
part_1 = Part('_irrelevant_')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_1'] = part_1
self._apply_pipeline(dataset)
return (dataset, part_1)
def test_DocumentLevelRelationEvaluator_arbitrary_relation_accept_fun_order_does_not_matter(self):
entity_map_fun = (lambda e: "SAME")
def relation_accept_fun(gold, pred):
print('gold:', gold, ' <---> ', 'pred:', pred)
return gold == pred
r1 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "yin"), Entity(STUB_E_ID_2, 0, "yan"))
r2 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "yan"), Entity(STUB_E_ID_2, 0, "yin"))
self.assertTrue(relation_accept_fun(r1.map(entity_map_fun), r1.map(entity_map_fun)))
self.assertTrue(relation_accept_fun(r1.map(entity_map_fun), r2.map(entity_map_fun)))
self.assertTrue(relation_accept_fun(r2.map(entity_map_fun), r1.map(entity_map_fun)))
evaluator = DocumentLevelRelationEvaluator(STUB_R_ID_1, entity_map_fun, relation_accept_fun)
(dataset, part) = self._create_basic_dataset()
# -
part.relations = [r1]
part.predicted_relations = [r1]
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
print(evaluation)
self.assertEqual(evaluation.tp, 1)
self.assertEqual(evaluation.fn, 0)
self.assertEqual(evaluation.fp, 0)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 1.0)
def test_DocumentLevelRelationEvaluator_arbitrary_relation_accept_fun_order_matters(self):
entity_map_fun = (lambda e: e.text)
def relation_accept_fun(gold, pred):
print('gold:', gold, ' <---> ', 'pred:', pred)
return gold < pred
r1 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "1"), Entity(STUB_E_ID_2, 0, "2"))
r2 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "2"), Entity(STUB_E_ID_2, 0, "1"))
# r1 not equiv r1 because this IS NOT equals (r1 not < r1)
self.assertFalse(relation_accept_fun(r1.map(entity_map_fun), r1.map(entity_map_fun)))
# r1 < r2
self.assertTrue(relation_accept_fun(r1.map(entity_map_fun), r2.map(entity_map_fun)))
# r2 not < r1
self.assertFalse(relation_accept_fun(r2.map(entity_map_fun), r1.map(entity_map_fun)))
evaluator = DocumentLevelRelationEvaluator(STUB_R_ID_1, entity_map_fun, relation_accept_fun)
(dataset, part) = self._create_basic_dataset()
# -
part.relations = [r1]
part.predicted_relations = [r1]
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
print(evaluation)
self.assertEqual(evaluation.tp, 0)
self.assertEqual(evaluation.fn, 1)
self.assertEqual(evaluation.fp, 1)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 0.0)
# -
part.relations = [r1]
part.predicted_relations = [r2]
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
print(evaluation)
self.assertEqual(evaluation.tp, 1)
self.assertEqual(evaluation.fn, 0)
self.assertEqual(evaluation.fp, 0)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 1.0)
# -
part.relations = [r2]
part.predicted_relations = [r1]
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 0)
self.assertEqual(evaluation.fn, 1)
self.assertEqual(evaluation.fp, 1)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 0.0)
def test_DocumentLevelRelationEvaluator_arbitrary_relation_accept_fun_ignore_some_predictions(self):
entity_map_fun = (lambda e: e.text)
def relation_accept_fun(gold, pred):
gold_pred_char_num = int(gold[-1])
pred_last_char_num = int(pred[-1])
print('gold:', gold, ' <---> ', 'pred:', pred,)
if gold == pred: # 1 == 1
return True
elif gold < pred: # 1 < 2
return None
else:
return False # 1 !<= 0
return gold == pred
r1 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "xxx"), Entity(STUB_E_ID_2, 0, "1"))
r2 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "xxx"), Entity(STUB_E_ID_2, 0, "1")) # Accept
r3 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "xxx"), Entity(STUB_E_ID_2, 0, "2")) # Ignore
r4 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "xxx"), Entity(STUB_E_ID_2, 0, "0")) # Reject
self.assertEqual(True, relation_accept_fun(r1.map(entity_map_fun), r2.map(entity_map_fun)))
self.assertEqual(None, relation_accept_fun(r1.map(entity_map_fun), r3.map(entity_map_fun)))
self.assertEqual(False, relation_accept_fun(r1.map(entity_map_fun), r4.map(entity_map_fun)))
evaluator = DocumentLevelRelationEvaluator(STUB_R_ID_1, entity_map_fun, relation_accept_fun)
(dataset, part) = self._create_basic_dataset()
# -
part.relations = [r1]
part.predicted_relations = [r2, r4] + [r3, r3, r3, r3, r3] # All the r3's should be ignored
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
print(evaluation)
self.assertEqual(evaluation.tp, 1)
self.assertEqual(evaluation.fn, 0)
self.assertEqual(evaluation.fp, 1)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 0.6666666666666666)
def test_DocumentLevelRelationEvaluator_arbitrary_relation_accept_fun_dont_count_multiple_same_hits(self):
entity_map_fun = (lambda e: e.text)
def relation_accept_fun(gold, pred):
print('gold:', gold, ' <---> ', 'pred:', pred,)
gold = int(gold[-1])
pred = int(pred[-1])
if gold <= pred and ((pred - gold) < 3): # e.g., 1 <= 1, 2, 3
return True
else:
return False
return gold == pred
r1 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "xxx"), Entity(STUB_E_ID_2, 0, "1"))
r5 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "xxx"), Entity(STUB_E_ID_2, 0, "9")) # Missing == fn
r6 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "xxx"), Entity(STUB_E_ID_2, 0, "5"))
r8 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "xxx"), Entity(STUB_E_ID_2, 0, "2")) # (maps to 1) Own repetition in gold, so 1 should be counted twice
r2 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "xxx"), Entity(STUB_E_ID_2, 0, "1")) # Accept 1 --> do count == tp
r3 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "xxx"), Entity(STUB_E_ID_2, 0, "2")) # repeated Accept 1,2 --> do count because of own repetition in gold == tp
r4 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "xxx"), Entity(STUB_E_ID_2, 0, "3")) # repeated Accept 1,2 --> do not count because it's over repetition
r7 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "xxx"), Entity(STUB_E_ID_2, 0, "6")) # Accept 5 --> do count == tp
r9 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "xxx"), Entity(STUB_E_ID_2, 0, "5")) # Accept 5 --> do not count because it's over repetition
self.assertEqual(True, relation_accept_fun(r1.map(entity_map_fun), r2.map(entity_map_fun)))
self.assertEqual(True, relation_accept_fun(r1.map(entity_map_fun), r3.map(entity_map_fun)))
self.assertEqual(True, relation_accept_fun(r1.map(entity_map_fun), r4.map(entity_map_fun)))
self.assertEqual(False, relation_accept_fun(r1.map(entity_map_fun), r7.map(entity_map_fun)))
self.assertEqual(False, relation_accept_fun(r5.map(entity_map_fun), r2.map(entity_map_fun)))
self.assertEqual(False, relation_accept_fun(r5.map(entity_map_fun), r3.map(entity_map_fun)))
self.assertEqual(False, relation_accept_fun(r5.map(entity_map_fun), r4.map(entity_map_fun)))
self.assertEqual(False, relation_accept_fun(r5.map(entity_map_fun), r7.map(entity_map_fun)))
self.assertEqual(True, relation_accept_fun(r6.map(entity_map_fun), r7.map(entity_map_fun)))
self.assertEqual(False, relation_accept_fun(r8.map(entity_map_fun), r2.map(entity_map_fun)))
self.assertEqual(True, relation_accept_fun(r8.map(entity_map_fun), r3.map(entity_map_fun)))
self.assertEqual(True, relation_accept_fun(r8.map(entity_map_fun), r4.map(entity_map_fun)))
self.assertEqual(False, relation_accept_fun(r8.map(entity_map_fun), r7.map(entity_map_fun)))
evaluator = DocumentLevelRelationEvaluator(STUB_R_ID_1, entity_map_fun, relation_accept_fun)
(dataset, part) = self._create_basic_dataset()
# -
part.relations = [r1, r5, r6, r8]
part.predicted_relations = [r2, r3, r4, r7, r9] # Only one shold be accepted
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
print(evaluation)
self.assertEqual(evaluation.tp, 3, evaluation)
self.assertEqual(evaluation.fn, 1)
self.assertEqual(evaluation.fp, 0)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 0.8571428571428571)
if __name__ == '__main__':
unittest.main()
|
d6bbfc3f90daa01f7a1fb8777f6ff0ac7659e351
|
448bf7d804b6300514c15d43468e591414baead9
|
/voila/tornado/handler.py
|
13d8756a65bdf318fbaf41ebe9a80ddf354d7ab2
|
[
"BSD-3-Clause"
] |
permissive
|
voila-dashboards/voila
|
f505122f7b7534eb1dbbbfc65e9a14c1cac78ed6
|
d52a6486e2b99481c5ca8e2a4ac69fc8d5fb6929
|
refs/heads/main
| 2023-09-04T19:35:51.729159
| 2023-09-04T09:40:15
| 2023-09-04T09:40:15
| 145,574,616
| 3,930
| 435
|
NOASSERTION
| 2023-09-14T10:00:24
| 2018-08-21T14:21:42
|
Python
|
UTF-8
|
Python
| false
| false
| 919
|
py
|
handler.py
|
#############################################################################
# Copyright (c) 2018, Voilà Contributors #
# Copyright (c) 2018, QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
import tornado.web
from ..handler import VoilaHandler
class TornadoVoilaHandler(VoilaHandler):
@tornado.web.authenticated
async def get(self, path=None):
gen = self.get_generator(path=path)
async for html in gen:
self.write(html)
self.flush()
|
b6430888af7c233b7c29da2c106d1d9d9ee8c55d
|
d28eb4c3c4bc8c3e3fa8714a8ee34400cd3b1b65
|
/script/deploy_platform.py
|
635ee4b52630351eb770e68d52e759942dce266e
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/skija
|
98d0c18a7ca2b9885b4a038db180e63288ae347a
|
8581a6c04808c0ada7863aabed9f2a9d77353b39
|
refs/heads/master
| 2023-08-29T22:47:49.168005
| 2023-08-14T13:50:36
| 2023-08-14T13:50:36
| 253,568,386
| 2,768
| 143
|
Apache-2.0
| 2023-08-14T13:50:38
| 2020-04-06T17:25:23
|
Java
|
UTF-8
|
Python
| false
| false
| 1,746
|
py
|
deploy_platform.py
|
#! /usr/bin/env python3
import argparse, build, clean, common, glob, os, platform, revision, subprocess, sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--dry-run", action="store_true")
(args, _) = parser.parse_known_args()
# Build
build.main()
# Update poms
rev = revision.revision()
os.chdir(common.root + '/platform')
artifact = "skija-" + common.classifier
with common.replaced('deploy/META-INF/maven/org.jetbrains.skija/' + artifact + '/pom.xml', {'${version}': rev}):
with common.replaced('deploy/META-INF/maven/org.jetbrains.skija/' + artifact + '/pom.properties', {'${version}': rev}):
with open('target/classes/org/jetbrains/skija/' + common.classifier.replace('-', '/') + '/skija.version', 'w') as f:
f.write(rev)
print("Packaging " + artifact + "-" + rev + ".jar")
subprocess.check_call(["jar",
"--create",
"--file", "target/" + artifact + "-" + rev + ".jar",
"-C", "target/classes", ".",
"-C", "deploy", "META-INF/maven/org.jetbrains.skija/" + artifact
])
if not args.dry_run:
print("Deploying", artifact + "-" + rev + ".jar")
subprocess.check_call([
common.mvn,
"--batch-mode",
"--settings", "deploy/settings.xml",
"-Dspace.username=" + os.getenv("USER_NAME"),
"-Dspace.password=" + os.getenv("SPACE_TOKEN"),
"deploy:deploy-file",
"-Dfile=target/" + artifact + "-" + rev + ".jar",
"-DpomFile=deploy/META-INF/maven/org.jetbrains.skija/" + artifact + "/pom.xml",
"-DrepositoryId=space-maven",
"-Durl=" + common.space_skija,
])
return 0
if __name__ == "__main__":
sys.exit(main())
|
5e03706e6d4aaf513fd3785b0e555b537b02f9f6
|
92768f2f4c732583d469212c7e1b00a9ab136752
|
/rplugin/python/floobits/view.py
|
04d360208292d63320129a4fe826314d7d381ff9
|
[
"Apache-2.0"
] |
permissive
|
Floobits/floobits-neovim
|
c02e7ccfcb32c80cd1adece8b1896e3defbe120e
|
dbfa051e4f097dfa3f46997a2019556a62861258
|
refs/heads/master
| 2021-11-22T19:23:22.984948
| 2021-10-18T05:48:14
| 2021-10-18T05:48:14
| 26,030,646
| 170
| 11
|
Apache-2.0
| 2021-11-03T16:41:50
| 2014-10-31T19:40:06
|
Python
|
UTF-8
|
Python
| false
| false
| 8,238
|
py
|
view.py
|
import editor
from common import msg, utils, shared as G
from collections import defaultdict
vim = None
# Foreground: background
COLORS = (
('white', 'red'),
('black', 'yellow'),
('black', 'green'),
('white', 'blue'),
)
HL_RULES = ['ctermfg=%s ctermbg=%s guifg=%s guibg=%s' % (fg, bg, fg, bg) for fg, bg in COLORS]
def user_id_to_region(user_id):
return "floobitsuser%s" % user_id
def vim_buf_to_text(vim_buf):
# Work around EOF new line handling in Vim. Vim always puts a newline at the end of a file,
# but never exposes that newline in the view text.
tail = '\n'
if vim_buf[-1] == '':
tail = ''
text = '\n'.join(vim_buf[:]) + tail
return text.decode('utf-8')
class View(object):
"""editors representation of the buffer"""
current_highlights = defaultdict(list)
pending_highlights = {}
def __init__(self, vim_buf):
self.vim_buf = vim_buf
def __repr__(self):
return '%s %s' % (self.native_id, self.vim_buf.name)
def __str__(self):
return repr(self)
def _offset_to_vim(self, offset):
current_offset = 0
for line_num, line in enumerate(self.vim_buf):
next_offset = len(line) + 1
if current_offset + next_offset > offset:
break
current_offset += next_offset
col = offset - current_offset
msg.debug('offset %s is line %s column %s' % (offset, line_num + 1, col + 1))
return line_num + 1, col + 1
@property
def native_id(self):
return self.vim_buf.number
def is_loading(self):
return False
def get_text(self):
return vim_buf_to_text(self.vim_buf)
def update(self, data, message=True):
self.set_text(data["buf"])
def set_text(self, text):
msg.debug('About to patch %s %s' % (str(self), self.vim_buf.name))
lines = text.encode('utf-8').split('\n')
new_len = len(lines)
end = start = -1
i = 0
def stomp_buffer():
msg.debug('Stomping buffer.')
G.AGENT.patching += 1
self.vim_buf[:] = lines
try:
if new_len != len(self.vim_buf):
stomp_buffer()
return
while i < new_len:
if lines[i] != self.vim_buf[i]:
msg.debug('Lines are not the same. "%s" "%s"' % (self.vim_buf[i], lines[i]))
if start > -1:
if end > -1:
stomp_buffer() # More than one contiguous change in patch.
return
else:
start = i
else:
msg.debug('Lines are the same. "%s"' % lines[i])
if start > -1 and end == -1:
end = i
i += 1
if start == -1 and end == -1:
msg.debug("Nothing to do here, buffers are the same.")
return
if start > -1 and end == -1:
end = i
msg.debug('Stomping lines %d to %d: "%s" -> "%s"' % (start, end, self.vim_buf[start:end],
lines[start:end]))
G.AGENT.patching += 1
self.vim_buf[start:end] = lines[start:end]
except Exception as e:
msg.error('Couldn\'t apply patches because: %s!\nThe unencoded text was: "%s"' % (
str(e), text))
raise
msg.debug('All done patching.')
def set_read_only(self, read_only=True):
pass
def set_status(self, *args):
pass
def apply_patches(self, buf, patches, username):
cursor_offset = self.get_cursor_offset()
msg.debug('cursor offset is %s bytes' % cursor_offset)
self.set_text(patches[0])
for patch in patches[2]:
offset = patch[0]
length = patch[1]
patch_text = patch[2]
if cursor_offset > offset:
new_offset = len(patch_text) - length
cursor_offset += new_offset
self.set_cursor_position(cursor_offset)
def focus(self):
editor.open_file(self.vim_buf.name)
def set_cursor_position(self, offset):
line_num, col = self._offset_to_vim(offset)
command = ':silent! call setpos(".", [%s, %s, %s, %s])' % (self.native_id, line_num, col, 0)
msg.debug('setting pos: %s' % command)
vim.command(command)
def get_cursor_offset(self):
return int(vim.eval('line2byte(line("."))+col(".")')) - 2
def get_selections(self):
# Vim likes to return strings for numbers even if you use str2nr:
return [[int(pos) for pos in range_] for range_ in vim.eval("g:FloobitsGetSelection()")]
def clear_highlight(self, user_id):
msg.debug('clearing selections for user %s in view %s' % (user_id, self.vim_buf.name))
if user_id not in self.current_highlights:
return
for hl in self.current_highlights[user_id]:
vim.command(":silent! :call matchdelete(%s)" % (hl,))
del self.current_highlights[user_id]
def clear_all_highlights(self):
for user_id in self.current_highlights.keys():
self.clear_highlight(user_id)
def highlight(self, ranges, user_id):
msg.debug("got a highlight %s" % ranges)
def doit():
msg.debug("doing timed highlights")
stored_ranges = self.pending_highlights[user_id]
del self.pending_highlights[user_id]
self._set_highlight(stored_ranges, user_id)
if user_id not in self.pending_highlights:
utils.set_timeout(doit, 150)
self.pending_highlights[user_id] = ranges
def _set_highlight(self, ranges, user_id):
msg.debug('highlighting ranges %s' % (ranges))
if vim.current.buffer.number != self.vim_buf.number:
return
region = user_id_to_region(user_id)
hl_rule = HL_RULES[user_id % len(HL_RULES)]
vim.command(":silent! highlight %s %s" % (region, hl_rule))
self.clear_highlight(user_id)
for _range in ranges:
start_row, start_col = self._offset_to_vim(_range[0])
end_row, end_col = self._offset_to_vim(_range[1])
if start_row == end_row and start_col == end_col:
if end_col >= len(self.vim_buf[end_row - 1]):
end_row += 1
end_col = 1
else:
end_col += 1
vim_region = "matchadd('{region}', '\%{start_row}l\%{start_col}v\_.*\%{end_row}l\%{end_col}v', 100)".\
format(region=region, start_row=start_row, start_col=start_col, end_row=end_row, end_col=end_col)
msg.debug("vim_region: %s" % (vim_region,))
try:
self.current_highlights[user_id].append(vim.eval(vim_region))
except vim.api.NvimError:
pass
def rename(self, name):
msg.debug('renaming %s to %s' % (self.vim_buf.name, name))
current = vim.current.buffer
text = self.get_text()
old_name = self.vim_buf.name
old_number = self.native_id
with open(name, 'wb') as fd:
fd.write(text.encode('utf-8'))
vim.command('edit! %s' % name)
self.vim_buf = vim.current.buffer
vim.command('edit! %s' % current.name)
try:
vim.command('bdelete! %s' % old_number)
except Exception as e:
msg.debug("couldn't bdelete %s... maybe thats OK? err: %s" % (old_number, str(e)))
try:
utils.rm(old_name)
except Exception as e:
msg.debug("couldn't delete %s... maybe thats OK? err: %s" % (old_name, str(e)))
def save(self):
# TODO: switch to the correct buffer, then save, then switch back (or use writefile)
if vim.current.buffer.name != self.vim_buf.name:
return
try:
vim.command('silent w!')
except Exception as e:
msg.log('Error saving %s: %s' % (self.vim_buf.name, str(e)))
def file_name(self):
return self.vim_buf.name
|
518520c995c6a523a20ed6cd5a2dcedc95234e4f
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/govern/data-meta/OpenMetadata/ingestion/src/metadata/ingestion/source/dashboard/metabase/metadata.py
|
6b8bf78849036a89a9d9ab8a0364c62143bc44b4
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 12,537
|
py
|
metadata.py
|
# Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metabase source module"""
import traceback
from typing import Iterable, List, Optional
import requests
from metadata.generated.schema.api.data.createChart import CreateChartRequest
from metadata.generated.schema.api.data.createDashboard import CreateDashboardRequest
from metadata.generated.schema.api.lineage.addLineage import AddLineageRequest
from metadata.generated.schema.entity.data.dashboard import (
Dashboard as LineageDashboard,
)
from metadata.generated.schema.entity.services.connections.dashboard.metabaseConnection import (
MetabaseConnection,
)
from metadata.generated.schema.entity.services.connections.metadata.openMetadataConnection import (
OpenMetadataConnection,
)
from metadata.generated.schema.metadataIngestion.workflow import (
Source as WorkflowSource,
)
from metadata.generated.schema.type.entityReference import EntityReference
from metadata.ingestion.api.source import InvalidSourceException
from metadata.ingestion.lineage.parser import LineageParser
from metadata.ingestion.lineage.sql_lineage import search_table_entities
from metadata.ingestion.source.dashboard.dashboard_service import DashboardServiceSource
from metadata.ingestion.source.database.common_db_source import SQLSourceStatus
from metadata.utils import fqn
from metadata.utils.filters import filter_by_chart
from metadata.utils.helpers import get_standard_chart_type, replace_special_with
from metadata.utils.logger import ingestion_logger
HEADERS = {"Content-Type": "application/json", "Accept": "*/*"}
logger = ingestion_logger()
class MetabaseSource(DashboardServiceSource):
"""
Metabase Source Class
"""
config: WorkflowSource
metadata_config: OpenMetadataConnection
status: SQLSourceStatus
def __init__(
self,
config: WorkflowSource,
metadata_config: OpenMetadataConnection,
):
super().__init__(config, metadata_config)
self.metabase_session = self.client["metabase_session"]
@classmethod
def create(cls, config_dict, metadata_config: OpenMetadataConnection):
config = WorkflowSource.parse_obj(config_dict)
connection: MetabaseConnection = config.serviceConnection.__root__.config
if not isinstance(connection, MetabaseConnection):
raise InvalidSourceException(
f"Expected MetabaseConnection, but got {connection}"
)
return cls(config, metadata_config)
def get_dashboards_list(self) -> Optional[List[dict]]:
"""
Get List of all dashboards
"""
resp_dashboards = self.req_get("/api/dashboard")
if resp_dashboards.status_code == 200:
return resp_dashboards.json()
return []
def get_dashboard_name(self, dashboard: dict) -> str:
"""
Get Dashboard Name
"""
return dashboard["name"]
def get_dashboard_details(self, dashboard: dict) -> dict:
"""
Get Dashboard Details
"""
resp_dashboard = self.req_get(f"/api/dashboard/{dashboard['id']}")
return resp_dashboard.json()
def yield_dashboard(
self, dashboard_details: dict
) -> Iterable[CreateDashboardRequest]:
"""
Method to Get Dashboard Entity
"""
dashboard_url = (
f"/dashboard/{dashboard_details['id']}-"
f"{replace_special_with(raw=dashboard_details['name'].lower(), replacement='-')}"
)
yield CreateDashboardRequest(
name=dashboard_details["id"],
dashboardUrl=dashboard_url,
displayName=dashboard_details.get("name"),
description=dashboard_details.get("description", ""),
charts=[
EntityReference(id=chart.id.__root__, type="chart")
for chart in self.context.charts
],
service=EntityReference(
id=self.context.dashboard_service.id.__root__, type="dashboardService"
),
)
def yield_dashboard_chart(
self, dashboard_details: dict
) -> Optional[Iterable[CreateChartRequest]]:
"""Get chart method
Args:
dashboard_details:
Returns:
Iterable[CreateChartRequest]
"""
charts = dashboard_details["ordered_cards"]
for chart in charts:
try:
chart_details = chart["card"]
if "id" not in chart_details:
continue
chart_url = (
f"/question/{chart_details['id']}-"
f"{replace_special_with(raw=chart_details['name'].lower(), replacement='-')}"
)
if "name" not in chart_details:
continue
if filter_by_chart(
self.source_config.chartFilterPattern, chart_details["name"]
):
self.status.filter(
chart_details["name"], "Chart Pattern not allowed"
)
continue
yield CreateChartRequest(
name=chart_details["id"],
displayName=chart_details.get("name"),
description=chart_details.get("description", ""),
chartType=get_standard_chart_type(
str(chart_details["display"])
).value,
chartUrl=chart_url,
service=EntityReference(
id=self.context.dashboard_service.id.__root__,
type="dashboardService",
),
)
self.status.scanned(chart_details["name"])
except Exception as exc: # pylint: disable=broad-except
logger.debug(traceback.format_exc())
logger.warning(f"Error creating chart [{chart}]: {exc}")
continue
def yield_dashboard_lineage_details(
self, dashboard_details: dict, db_service_name
) -> Optional[Iterable[AddLineageRequest]]:
"""Get lineage method
Args:
dashboard_details
"""
if not db_service_name:
return
chart_list, dashboard_name = (
dashboard_details["ordered_cards"],
str(dashboard_details["id"]),
)
for chart in chart_list:
try:
chart_details = chart["card"]
if (
"dataset_query" not in chart_details
or "type" not in chart_details["dataset_query"]
):
continue
if chart_details["dataset_query"]["type"] == "native":
if not chart_details.get("database_id"):
continue
yield from self._yield_lineage_from_query(
chart_details=chart_details,
db_service_name=db_service_name,
dashboard_name=dashboard_name,
) or []
# TODO: this method below only gets a single table, but if the chart of type query has a join the other
# table_ids will be ignored within a nested object
elif chart_details["dataset_query"]["type"] == "query":
if not chart_details.get("table_id"):
continue
yield from self._yield_lineage_from_api(
chart_details=chart_details,
db_service_name=db_service_name,
dashboard_name=dashboard_name,
) or []
except Exception as exc: # pylint: disable=broad-except
logger.debug(traceback.format_exc())
logger.error(f"Error creating chart [{chart}]: {exc}")
def req_get(self, path):
"""Send get request method
Args:
path:
"""
return requests.get(
self.service_connection.hostPort + path,
headers=self.metabase_session,
timeout=30,
)
def _yield_lineage_from_query(
self, chart_details: dict, db_service_name: str, dashboard_name: str
) -> Optional[AddLineageRequest]:
resp_database = self.req_get(f"/api/database/{chart_details['database_id']}")
if resp_database.status_code == 200:
database = resp_database.json()
query = (
chart_details.get("dataset_query", {})
.get("native", {})
.get("query", "")
)
lineage_parser = LineageParser(query)
for table in lineage_parser.source_tables:
database_schema_name, table = fqn.split(str(table))[-2:]
database_schema_name = (
None
if database_schema_name == "<default>"
else database_schema_name
)
database = database.get("details", {}).get("db", None)
if database:
from_entities = search_table_entities(
metadata=self.metadata,
database=database,
service_name=db_service_name,
database_schema=database_schema_name,
table=table,
)
else:
from_entities = search_table_entities(
metadata=self.metadata,
service_name=db_service_name,
database=None,
database_schema=database_schema_name,
table=table,
)
to_fqn = fqn.build(
self.metadata,
entity_type=LineageDashboard,
service_name=self.config.serviceName,
dashboard_name=dashboard_name,
)
to_entity = self.metadata.get_by_name(
entity=LineageDashboard,
fqn=to_fqn,
)
for from_entity in from_entities:
yield self._get_add_lineage_request(
to_entity=to_entity, from_entity=from_entity
)
def _yield_lineage_from_api(
self, chart_details: dict, db_service_name: str, dashboard_name: str
) -> Optional[AddLineageRequest]:
resp_tables = self.req_get(f"/api/table/{chart_details['table_id']}")
if resp_tables.status_code == 200:
table = resp_tables.json()
database_name = table.get("db", {}).get("details", {}).get("db", None)
if database_name:
from_entities = search_table_entities(
metadata=self.metadata,
database=database_name,
service_name=db_service_name,
database_schema=table.get("schema"),
table=table.get("display_name"),
)
else:
from_entities = search_table_entities(
metadata=self.metadata,
service_name=db_service_name,
database=None,
database_schema=table.get("schema"),
table=table.get("display_name"),
)
to_fqn = fqn.build(
self.metadata,
entity_type=LineageDashboard,
service_name=self.config.serviceName,
dashboard_name=dashboard_name,
)
to_entity = self.metadata.get_by_name(
entity=LineageDashboard,
fqn=to_fqn,
)
for from_entity in from_entities:
yield self._get_add_lineage_request(
to_entity=to_entity, from_entity=from_entity
)
|
28833bd99ec5db07523c8f1d9a511f0dfb03ace0
|
dab10c721000fd9eb38676d6b2730f155eedd54e
|
/recirq/kpz/experiment.py
|
49d0cc5d9a662052e8c30b33d768e0d668417640
|
[
"Apache-2.0"
] |
permissive
|
quantumlib/ReCirq
|
f45e55e432f2e29fb8f2fe35a3d436a629219e86
|
d021621a3837693ae9c5fdc5c05058de20fba314
|
refs/heads/master
| 2023-09-03T19:35:55.281836
| 2023-09-01T01:12:40
| 2023-09-01T01:12:40
| 246,951,354
| 260
| 116
|
Apache-2.0
| 2023-09-09T00:41:35
| 2020-03-12T23:51:33
|
Python
|
UTF-8
|
Python
| false
| false
| 23,807
|
py
|
experiment.py
|
# Copyright 2023 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for simulating 1D Floquet XXZ dynamics and measuring the transferred magnetization.
This module allows you to perform a numerical version of the experiment detailed in
[arXiv:2306.09333](https://arxiv.org/abs/2306.09333). In particular, it is conjectured that the
late-time dynamics of the 1D Heisenberg spin chain (or the corresponding Floquet system) are
described by the Kardar-Parisi-Zhang (KPZ) universality class. By studying higher moments of
the transferred magnetization, one can see disagreement from the KPZ predictions, at least at
times accessible to us. To measure these moments, we initialize a 1D chain of qubits in product
states sampled from Eq. 3 of [arXiv:2306.09333](https://arxiv.org/abs/2306.09333). Then we apply
alternating layers of fSim gates to implement the Floquet dynamics. Finally, we measure the qubits
and check how many excitations crossed the center of the chain. Twice this quantity is the
transferred magnetization.
The class `KPZExperiment` sets up this experiment for a particular number, $t$, of cycles of fSim
gates, where a cycle is depicted in Figure 1 of [arXiv:2306.09333](https://arxiv.org/abs/2306.09333).
By default, the simulation uses the minimum number of qubits, $2t$. You can then run the
experiment using either the `run_experiment_amplitudes()` method or the `run_experiment()` method.
The former requires you to input a Cirq sampler that supports statevector simulations, whereas the
latter requires only one that can sample bitstrings, closer to what is done in the experiment. A
`KPZExperimentResultsFromAmplitudes` or `KPZExperimentResults` object is returned, from which the
probability distribution of the transferred magnetization can be seen, as well as its first
four moments. The statistical uncertainties of the moments can be computed using the methods
`jackknife_mean()`, `jackknife_variance()`, `jackknife_skew()`, and `jackknife_kurtosis()`.
The `run_experiment()` method differs from what is done on hardware in several important ways.
It does not include any of the post-selection that we do as part of our error mitigation
(since in this tutorial, it is run on a noiseless simulator). Further, in the experiment,
we use 46 qubits for all cycles instead of $2t$ qubits, which is beyond brute-force classical
simulation. On hardware, we also use the same initial bitstrings across cycle numbers, whereas
here they are chosen independently.
"""
from typing import Iterator, List, Set, Tuple, Union, Optional
import cirq
import numpy as np
import warnings
from tqdm import tqdm
import scipy.stats as sstats
import matplotlib.pyplot as plt
# initialize a random number generator
rng = np.random.default_rng()
def _dec_to_binary_right(d: Union[np.ndarray, int], n: int) -> Union[np.ndarray, int]:
i = np.arange(n // 2)
return (
np.floor(np.outer(d, 1 / 2**i)) - np.floor(np.outer(d, 1 / 2 ** (i + 1))) * 2
).astype("int")
class KPZExperimentResultsFromAmplitudes:
"""A class for processing and storing numerical KPZ results.
An object of this type is returned by `KPZExperiment.run_experiment_amplitudes()`,
which uses statevector simulations to obtain the probabilities, `prob_right[trial, nR]`, of having
`nR` excitations on the right side of the chain, given the initial state `initial_states[trial]`.
The probabilities and initial bitstrings are used to obtain the probability distribution of
the transferred magnetization, `transferred_magnetization_probs`, which can be plotted with the
`plot_histogram()` method and is also used to compute the first four moments.
"""
def __init__(self, prob_right: np.ndarray, initial_states: np.ndarray):
"""
Args:
`prob_right`: `prob_right[trial, num_right]` is the probability of measuring num_right excitations
on the right side of the chain, given the initial state `initial_states[trial]`.
`initial_states`: An array of the initial bitstrings used in the experiment.
"""
num_trials, n = initial_states.shape
num_right = np.sum(initial_states[:, n // 2 :], 1)
self.num_initial_states = num_trials
self.num_right_initial = num_right
self.transferred_magnetization_vals = np.arange(-n // 2, n // 2 + 1) * 2
transferred_magnetization_probs = np.zeros((num_trials, n + 1))
for trial in range(num_trials):
transferred_magnetization_probs[
trial, (n // 2 - num_right[trial]) : (n - num_right[trial] + 1)
] = prob_right[trial, :]
self.transferred_magnetization_probs_all = transferred_magnetization_probs
self.transferred_magnetization_probs = np.mean(
transferred_magnetization_probs, 0
)
self.mean = self._mean()
self.variance = self._variance()
self.skewness = self._skewness()
self.kurtosis = self._kurtosis()
def _mean(self) -> float:
return (
self.transferred_magnetization_probs @ self.transferred_magnetization_vals
)
def _variance(self) -> float:
return (
self.transferred_magnetization_probs
@ (self.transferred_magnetization_vals - self.mean) ** 2
)
def _skewness(self) -> float:
return (
self.transferred_magnetization_probs
@ (self.transferred_magnetization_vals - self.mean) ** 3
/ self.variance ** (3 / 2)
)
def _kurtosis(self) -> float:
return (
self.transferred_magnetization_probs
@ (self.transferred_magnetization_vals - self.mean) ** 4
/ self.variance**2
- 3
)
def _mean_excluding_i(self, i: int) -> float:
p = np.mean(
np.delete(self.transferred_magnetization_probs_all, i, axis=0), axis=0
)
return p @ self.transferred_magnetization_vals
def _variance_excluding_i(self, i: int) -> float:
p = np.mean(
np.delete(self.transferred_magnetization_probs_all, i, axis=0), axis=0
)
mean_i = p @ self.transferred_magnetization_vals
return p @ (self.transferred_magnetization_vals - mean_i) ** 2
def _skew_excluding_i(self, i: int) -> float:
p = np.mean(
np.delete(self.transferred_magnetization_probs_all, i, axis=0), axis=0
)
mean_i = p @ self.transferred_magnetization_vals
variance_i = p @ (self.transferred_magnetization_vals - mean_i) ** 2
return (
p
@ (self.transferred_magnetization_vals - mean_i) ** 3
/ variance_i ** (3 / 2)
)
def _kurtosis_excluding_i(self, i: int) -> float:
p = np.mean(
np.delete(self.transferred_magnetization_probs_all, i, axis=0), axis=0
)
mean_i = p @ self.transferred_magnetization_vals
variance_i = p @ (self.transferred_magnetization_vals - mean_i) ** 2
return (
p @ (self.transferred_magnetization_vals - mean_i) ** 4 / variance_i**2
- 3
)
def jackknife_mean(self) -> float:
"""Compute the statistical uncertainty of the mean using the remove-one jackknife.
If there is only one initial state (for example if $\mu = \infty$), zero uncertainty
is returned.
"""
if self.num_initial_states == 1:
return 0
mean_i = [self._mean_excluding_i(i) for i in range(self.num_initial_states)]
return np.std(mean_i) * np.sqrt(self.num_initial_states - 1)
def jackknife_variance(self) -> float:
"""Compute the statistical uncertainty of the variance using the remove-one jackknife.
If there is only one initial state (for example if $\mu = \infty$), zero uncertainty
is returned.
"""
if self.num_initial_states == 1:
return 0
variance_i = [
self._variance_excluding_i(i) for i in range(self.num_initial_states)
]
return np.std(variance_i) * np.sqrt(self.num_initial_states - 1)
def jackknife_skew(self) -> float:
"""Compute the statistical uncertainty of the skewness using the remove-one jackknife.
If there is only one initial state (for example if $\mu = \infty$), zero uncertainty
is returned.
"""
if self.num_initial_states == 1:
return 0
skew_i = [self._skew_excluding_i(i) for i in range(self.num_initial_states)]
return np.std(skew_i) * np.sqrt(self.num_initial_states - 1)
def jackknife_kurtosis(self) -> float:
"""Compute the statistical uncertainty of the kurtosis using the remove-one jackknife.
If there is only one initial state (for example if $\mu = \infty$), zero uncertainty
is returned.
"""
if self.num_initial_states == 1:
return 0
kurtosis_i = [
self._kurtosis_excluding_i(i) for i in range(self.num_initial_states)
]
return np.std(kurtosis_i) * np.sqrt(self.num_initial_states - 1)
def plot_histogram(self, ax: Optional[Union[None, plt.Axes]] = None) -> plt.Axes:
"""Plot a histogram of transferred magnetization.
Args:
ax: Optional. A matplotlib axes on which to draw the histogram.
Returns:
A matplotlib axes on which the histogram is drawn.
"""
if not ax:
fig, ax = plt.subplots(facecolor="white", dpi=200)
bins = (
np.append(
self.transferred_magnetization_vals // 2,
self.transferred_magnetization_vals[-1] // 2 + 1,
)
- 0.5
)
ax.hist(
self.transferred_magnetization_vals // 2,
weights=self.transferred_magnetization_probs,
bins=bins,
edgecolor="k",
)
ax.tick_params(direction="in", top=True, right=True)
ax.set_xlabel("Number of 1s that crossed center, $\mathcal{M}/2$")
ax.set_ylabel("Probability")
return ax
class KPZExperimentResults:
"""A class for processing and storing KPZ experiment results.
An object of this type is returned by `KPZExperiment.run_experiment()`, which uses
the `run()` method of the Cirq sampler to sample final bitstrings given the initial
bitstrings specified by the initial_states array. The outputs of `sampler.run()` are
inputted as `raw_results`. Pooling these results together, the transferred
magnetization is computed. Its histogram can be visualized using the `plot_histogram()`
method. The first four moments can also be computed, as well as their statistical
uncertainties.
"""
def __init__(
self,
raw_results: List[List[cirq.study.result.ResultDict]],
initial_states: np.ndarray,
):
num_trials, n = initial_states.shape
num_right = np.sum(initial_states[:, n // 2 :], 1)
self.num_initial_states = num_trials
self.num_right_initial = num_right
self.bitstrs = np.array([res.measurements["m"] for res in raw_results])
self.num_right_final = np.sum(self.bitstrs[:, :, n // 2 :], 2)
self.transferred_magnetization = self._transferred_magnetization()
self.mean = self._mean()
self.variance = self._variance()
self.skewness = self._skewness()
self.kurtosis = self._kurtosis()
def _transferred_magnetization(self) -> np.ndarray:
final = self.num_right_final
num_trials, num_reps = final.shape
initial = np.outer(self.num_right_initial, np.ones(num_reps, dtype=int))
return 2 * (final - initial)
def _mean(self) -> float:
return np.mean(self.transferred_magnetization.flatten())
def _variance(self) -> float:
return np.var(self.transferred_magnetization.flatten())
def _skewness(self) -> float:
return sstats.skew(self.transferred_magnetization.flatten())
def _kurtosis(self) -> float:
return sstats.kurtosis(self.transferred_magnetization.flatten(), fisher=True)
def _mean_excluding_i(self, i: int, axis: Optional[int] = 0) -> float:
tm = np.delete(self.transferred_magnetization, i, axis=axis)
return np.mean(tm.flatten())
def _variance_excluding_i(self, i: int, axis: Optional[int] = 0) -> float:
tm = np.delete(self.transferred_magnetization, i, axis=axis)
return np.var(tm.flatten())
def _skew_excluding_i(self, i: int, axis: Optional[int] = 0) -> float:
tm = np.delete(self.transferred_magnetization, i, axis=axis)
return sstats.skew(tm.flatten())
def _kurtosis_excluding_i(self, i: int, axis: Optional[int] = 0) -> float:
tm = np.delete(self.transferred_magnetization, i, axis=axis)
return sstats.kurtosis(tm.flatten(), fisher=True)
def jackknife_mean(self) -> float:
"""Compute the statistical uncertainty of the mean using the remove-one jackknife.
In the case that there is only one initial state, use the standard deviation of
the measured transferred magnetization to estimate the uncertainty instead.
"""
if self.num_initial_states == 1:
tm = self.transferred_magnetization.flatten()
return np.std(tm) / np.sqrt(len(tm))
mean_i = [self._mean_excluding_i(i) for i in range(self.num_initial_states)]
return np.std(mean_i) * np.sqrt(self.num_initial_states - 1)
def jackknife_variance(self) -> float:
"""Compute the statistical uncertainty of the variance using the remove-one jackknife.
One initial state is removed, and the variation depending on which state is removed
is used to estimate the uncertainty. In the case that there is only one initial state,
a repetition is removed instead.
"""
if self.num_initial_states == 1:
axis = 1
tot = self.transferred_magnetization.size
else:
axis = 0
tot = self.num_initial_states
variance_i = [self._variance_excluding_i(i, axis=axis) for i in range(tot)]
return np.std(variance_i) * np.sqrt(tot - 1)
def jackknife_skew(self) -> float:
"""Compute the statistical uncertainty of the skewness using the remove-one jackknife.
One initial state is removed, and the variation depending on which state is removed
is used to estimate the uncertainty. In the case that there is only one initial state,
a repetition is removed instead.
"""
if self.num_initial_states == 1:
axis = 1
tot = self.transferred_magnetization.size
else:
axis = 0
tot = self.num_initial_states
skew_i = [self._skew_excluding_i(i, axis=axis) for i in range(tot)]
return np.std(skew_i) * np.sqrt(tot - 1)
def jackknife_kurtosis(self) -> float:
"""Compute the statistical uncertainty of the kurtosis using the remove-one jackknife.
One initial state is removed, and the variation depending on which state is removed
is used to estimate the uncertainty. In the case that there is only one initial state,
a repetition is removed instead.
"""
if self.num_initial_states == 1:
axis = 1
tot = self.transferred_magnetization.size
else:
axis = 0
tot = self.num_initial_states
kurtosis_i = [self._kurtosis_excluding_i(i, axis=axis) for i in range(tot)]
return np.std(kurtosis_i) * np.sqrt(tot - 1)
def plot_histogram(self, ax: Optional[Union[None, plt.Axes]] = None) -> plt.Axes:
"""
Plot a histogram of transferred magnetization.
Args:
ax: Optional. A matplotlib axes on which to draw the histogram.
Returns:
A matplotlib axes on which the histogram is drawn.
"""
if not ax:
fig, ax = plt.subplots(facecolor="white", dpi=200)
num_crossed = self.transferred_magnetization // 2
lower = min(num_crossed.flatten())
upper = max(num_crossed.flatten())
ax.hist(
num_crossed.flatten(),
bins=np.arange(lower - 0.5, upper + 1.5),
density=True,
edgecolor="k",
)
ax.tick_params(direction="in", top=True, right=True)
ax.set_xlabel("Number of 1s that crossed center, $\mathcal{M}/2$")
ax.set_ylabel("Probability")
return ax
class KPZExperiment:
"""A class for running/simulating the KPZ experiment.
This class implements 1D Floquet XXZ dynamics, realized as alternating layers of fSim
gates. The initial states, parameterized by mu, interpolate between an
infinite-temperature/maximally mixed state at $\mu=0$ and a pure domain wall
state at $\mu=\infty$. (See Eq. 3 of [arXiv:2306.09333](https://arxiv.org/pdf/2306.09333.pdf).) The transferred
magnetization (the number of 1s that cross the center) is measured and
its moments are computed.
The fSim gates are parameterized by `theta` and `phi`. The isotropic Heisenberg point, at which
the KPZ conjecture applies, corresponds to `phi = 2*theta`.
The transferred magnetization is independent of system size up to $N/2$ cycles, where $N$
is the number of qubits. Therefore, in this class, we use $2t$ qubits to simulate cycle $t$.
In the experiment, we use 46 qubits to simulate cycles 0-23.
"""
def __init__(
self,
num_cycles: int,
mu: float,
num_init_states: int,
theta: float,
phi: float,
num_qubits: Optional[Union[None, int]] = None,
):
"""
Args:
`num_cycles`: The number of cycles to simulate.
`mu`: A parameter that controls the initial state.
`num_init_states`: The number of initial bitstrings to sample.
`theta`: fSim swap angle in radians.
`phi`: fSim cphase angle in radians.
`num_qubits`: The number of qubits to use. Defaults to `2*num_cycles`. The actual
experiment uses 46.
"""
if mu == np.inf and num_init_states > 1:
warnings.warn(
"When mu=inf, there is only 1 initial state. Setting num_init_states = 1"
)
num_init_states = 1
self.num_cycles = num_cycles
self.mu = mu
self.num_init_states = num_init_states
self.num_qubits = 2 * num_cycles if num_qubits == None else num_qubits
if self.num_qubits == 0:
self.num_qubits = 2
self.initial_states = self._generate_initial_states()
self.theta = theta
self.phi = phi
self.circuits = self._generate_all_circuits()
def _generate_initial_states(self) -> np.ndarray:
"""Generate the initial bitstrings."""
mu = self.mu
n = self.num_qubits
nTrials = self.num_init_states
if mu == np.inf:
bitstrs = np.array(
[np.append(np.ones(n // 2, dtype=int), np.zeros(n // 2, dtype=int))]
)
elif mu < np.inf:
p = np.exp(mu) / (np.exp(mu) + np.exp(-mu))
bitstrs_L = rng.choice(2, p=[1 - p, p], size=(nTrials, n // 2))
bitstrs_R = rng.choice(2, p=[p, 1 - p], size=(nTrials, n // 2))
bitstrs = np.append(bitstrs_L, bitstrs_R, axis=1)
return bitstrs
def _generate_cycle(self) -> cirq.Circuit:
"""Generate the Cirq circuit for one cycle."""
n = self.num_qubits
theta = self.theta
phi = self.phi
qubits = cirq.LineQubit.range(n)
qc = cirq.Circuit(
cirq.FSimGate(theta, phi)(*qubits[q : q + 2]) for q in range(0, n - 1, 2)
)
qc += cirq.Circuit(
cirq.FSimGate(theta, phi)(*qubits[q : q + 2]) for q in range(1, n - 1, 2)
)
return qc
def _generate_circuit(self, locs: Iterator[int]) -> cirq.Circuit:
"""Generate a single circuit.
Args:
locs: Locations of 1 in the initial bitstring.
Returns:
A Cirq circuit that implements the Floquet dynamics for the specified initial state.
"""
n = self.num_qubits
num_cycles = self.num_cycles
qubits = cirq.LineQubit.range(n)
qc = cirq.Circuit(cirq.X(qubits[q]) for q in locs)
if num_cycles > 0:
cycle = self._generate_cycle()
qc += cycle * num_cycles
qc.append(cirq.measure(*qubits, key="m"))
return qc
def _generate_all_circuits(self) -> List[cirq.Circuit]:
"""Generate Cirq circuits for all of the initial states."""
locs_all = [np.nonzero(bitstr)[0] for bitstr in self.initial_states]
return [self._generate_circuit(locs) for locs in locs_all]
def run_experiment(self, sampler: cirq.Sampler, reps: int) -> KPZExperimentResults:
"""Run the experiment using the provided Cirq sampler.
Args:
`sampler`: The cirq sampler to use for the simulation.
`reps`: The number of bitstrings to sample per initial state.
Returns:
A KPZExperimentResults object containing the measured bitstrings and histogram of
transferred magnetization, as well as the extracted moments.
"""
result = [
sampler.run(circuit, repetitions=reps)
for circuit in tqdm(self.circuits, total=self.num_init_states)
]
return KPZExperimentResults(result, self.initial_states)
def run_experiment_amplitudes(
self, sampler: cirq.SimulatesAmplitudes
) -> KPZExperimentResultsFromAmplitudes:
"""Run the experiment using the provided Cirq sampler. Computes amplitudes instead of sampling bitstrings.
Args:
`sampler`: The cirq sampler to use for the simulation.
Returns:
A `KPZExperimentResultsFromAmplitudes` object containing the measured bitstrings and histogram of
transferred magnetization, as well as the extracted moments.
"""
all_states = np.arange(2**self.num_qubits)
binary_states = _dec_to_binary_right(all_states, self.num_qubits)
num_right = np.sum(binary_states, 1)
del binary_states # this is exponentially large; delete to save memory
prob_right = np.zeros((self.num_init_states, self.num_qubits // 2 + 1))
for idx, (qc, initial_bitstr) in tqdm(
enumerate(zip(self.circuits, self.initial_states)),
total=self.num_init_states,
):
num_right_initial = np.sum(initial_bitstr)
probs = (
np.abs(
sampler.compute_amplitudes(
cirq.drop_terminal_measurements(qc), all_states
)
)
** 2
)
for num_right_val in np.arange(
min(self.num_qubits // 2 + 1, num_right_initial + 1)
):
prob_right[idx, num_right_val] = probs @ (num_right == num_right_val)
return KPZExperimentResultsFromAmplitudes(prob_right, self.initial_states)
|
e304c4afec0cf3ab95cb8475a22860871c34973a
|
c001930958cb94f8b91b1f734108671f1db9e9f1
|
/tests/integration/dash/org.py
|
9cf7a3aecabadee88b6f638a55fcee4bef0c3408
|
[
"MIT"
] |
permissive
|
plotly/dash
|
73c752135937e27975071fbd144e3fb21618e7b4
|
6eaf2e17c25f7ca1847c41aafeb18e87c586cb9f
|
refs/heads/dev
| 2023-08-30T21:21:06.056499
| 2023-08-29T16:49:04
| 2023-08-29T16:49:04
| 33,702,544
| 20,553
| 2,355
|
MIT
| 2023-08-31T20:51:14
| 2015-04-10T01:53:08
|
Python
|
UTF-8
|
Python
| false
| false
| 140
|
py
|
org.py
|
# used implicitly by dash_import_test
# to test https://github.com/plotly/dash/issues/1143
import dash_core_components as dcc # noqa: F401
|
f0968572f50a97d3b31a4f7fe18292b3e7e72488
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-cdk/python/airbyte_cdk/sources/declarative/extractors/__init__.py
|
5c361598d351d2508a8d64eb6fe476b7f14deb30
|
[
"MIT",
"Elastic-2.0",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
__init__.py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from airbyte_cdk.sources.declarative.extractors.dpath_extractor import DpathExtractor
from airbyte_cdk.sources.declarative.extractors.http_selector import HttpSelector
from airbyte_cdk.sources.declarative.extractors.record_filter import RecordFilter
from airbyte_cdk.sources.declarative.extractors.record_selector import RecordSelector
__all__ = ["HttpSelector", "DpathExtractor", "RecordFilter", "RecordSelector"]
|
7e74f16b6489e386bc86b71abfc67dc4e366643d
|
c5d140c4e6ba0c98c34a76048a819bca84f30026
|
/docs/code.py
|
e8cc92763944bf89e4c915a0f6dded959527c73f
|
[
"MIT"
] |
permissive
|
tadeuzagallo/verve-lang
|
76af344ed1a5f17de244cbddbe17767f99870837
|
c7db1f5d4bb399b6c2623dd2444a981b5aba1aa4
|
refs/heads/master
| 2021-12-03T06:09:56.044643
| 2021-11-30T02:18:36
| 2021-11-30T02:18:36
| 54,283,802
| 378
| 12
|
MIT
| 2018-01-10T21:31:29
| 2016-03-19T19:37:51
|
Haskell
|
UTF-8
|
Python
| false
| false
| 936
|
py
|
code.py
|
import cgi
from docutils import nodes
from docutils.parsers.rst import Directive, directives
class code_node(nodes.Structural, nodes.Element):
pass
class CodeDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
code = code_node()
code['language'] = self.arguments[0]
code['source'] = '\n'.join(self.content)
return [code]
def visit_code_node(self, node):
pass
def depart_code_node(self, node):
klass = 'class="language-{}"'.format(node['language'])
source = cgi.escape(node['source']).encode('ascii', 'xmlcharrefreplace')
link = """<pre {0}><code {0}>{1}</code></pre>""".format(klass, source)
self.body.append(link)
def setup(app):
app.add_directive('prismjs', CodeDirective)
app.add_node(code_node, html=(visit_code_node, depart_code_node))
|
804002f2e3202eda04c364e2fa2010fdea5f1ea2
|
b04cc98a746d1df457183bc14908094a8be00ba1
|
/demo/models/__init__.py
|
4c9deb0855393b41c7cd55046fd8ff7992513f2f
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleSlim
|
a3bcaef0c92016b7f6946d58787f87c7db8ff3f8
|
bb02b103a89a09635941bc0bbbd38506d7412468
|
refs/heads/develop
| 2023-08-31T01:47:27.824722
| 2023-08-25T08:06:08
| 2023-08-25T08:06:08
| 228,290,594
| 1,534
| 402
|
Apache-2.0
| 2023-08-29T09:37:55
| 2019-12-16T02:56:50
|
Python
|
UTF-8
|
Python
| false
| false
| 737
|
py
|
__init__.py
|
from __future__ import absolute_import
from .mobilenet import MobileNet
from .resnet import ResNet34, ResNet50
from .resnet_vd import ResNet50_vd, ResNet101_vd
from .mobilenet_v2 import MobileNetV2_x0_25, MobileNetV2
from .pvanet import PVANet
from .slimfacenet import SlimFaceNet_A_x0_60, SlimFaceNet_B_x0_75, SlimFaceNet_C_x0_75
from .mobilenet_v3 import *
from .dygraph import *
__all__ = [
"model_list", "MobileNet", "ResNet34", "ResNet50", "MobileNetV2", "PVANet",
"ResNet50_vd", "ResNet101_vd", "MobileNetV2_x0_25"
]
model_list = [
'MobileNet', 'ResNet34', 'ResNet50', 'MobileNetV2', 'PVANet', 'ResNet50_vd',
"ResNet101_vd", "MobileNetV2_x0_25"
]
__all__ += mobilenet_v3.__all__
model_list += mobilenet_v3.__all__
|
4cb990acec9eea0be3f056bcdfb00b4ca523f522
|
e7e536df0263ae2a7ac44ef30f19110f891213a9
|
/src/tests/schedule/test_schedule_models_slot.py
|
c0a9ac506d20b8ac6674edb6b3936a6c869a5081
|
[
"Apache-2.0"
] |
permissive
|
pretalx/pretalx
|
b3b3808266f4810dfc8445dc1ed33ba398e7a9c2
|
269dce90a6fb1ce0064008c40ce5dd4dad61e2e3
|
refs/heads/main
| 2023-09-05T11:09:23.538325
| 2023-09-04T19:57:47
| 2023-09-04T19:57:47
| 83,081,285
| 563
| 195
|
Apache-2.0
| 2023-09-13T19:12:28
| 2017-02-24T20:46:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,735
|
py
|
test_schedule_models_slot.py
|
import datetime as dt
import pytest
from django.utils.timezone import now
from django_scopes import scope
from pretalx.schedule.models import TalkSlot
@pytest.mark.django_db
@pytest.mark.parametrize(
"start,end,duration,has_submission",
(
(None, None, "sub", True),
(0, None, "sub", True),
(None, 0, "sub", True),
(0, 1, 1, True),
(None, None, None, False),
(0, None, None, False),
(None, 0, None, False),
(0, 1, 1, False),
),
)
def test_slot_duration(submission, start, end, duration, has_submission):
_now = now()
if start is not None:
start = _now + dt.timedelta(minutes=start)
if end is not None:
end = _now + dt.timedelta(minutes=end)
slot = TalkSlot(
start=start, end=end, submission=submission if has_submission else None
)
if duration == "sub":
assert slot.duration == submission.get_duration()
else:
assert slot.duration == duration
@pytest.mark.django_db
def test_slot_string(slot, room):
str(slot)
str(room)
@pytest.mark.django_db
def test_slot_build_empty_ical(slot):
slot.room = None
assert not slot.build_ical(None)
@pytest.mark.django_db
def test_slot_warnings_without_room(slot):
with scope(event=slot.event):
slot.room = None
slot.save()
assert slot.start
assert not slot.schedule.get_talk_warnings(slot)
@pytest.mark.django_db
def test_slot_warnings_when_room_unavailable(slot, room_availability):
with scope(event=slot.event):
slot.start = room_availability.end
slot.end = slot.start + dt.timedelta(minutes=30)
slot.save()
warnings = slot.schedule.get_talk_warnings(slot)
assert len(warnings) == 1
assert warnings[0]["type"] == "room"
@pytest.mark.django_db
def test_slot_no_warnings_when_room_available(slot, room_availability):
with scope(event=slot.event):
warnings = slot.schedule.get_talk_warnings(slot)
assert not warnings
@pytest.mark.django_db
def test_slot_warning_when_speaker_unavailable(slot, availability, room_availability):
with scope(event=slot.event):
availability.start -= dt.timedelta(days=7)
availability.end -= dt.timedelta(days=7)
availability.person = slot.submission.speakers.first().event_profile(slot.event)
availability.pk = None
availability.save()
warnings = slot.schedule.get_talk_warnings(slot)
assert len(warnings) == 1
assert warnings[0]["type"] == "speaker"
assert (
warnings[0]["message"]
== "Jane Speaker is not available at the scheduled time."
)
@pytest.mark.django_db
def test_slot_warning_when_speaker_overbooked(
slot, availability, other_slot, room_availability
):
with scope(event=slot.event):
availability.person = slot.submission.speakers.first().event_profile(slot.event)
availability.pk = None
availability.save()
other_slot.start = slot.start + dt.timedelta(minutes=10)
other_slot.end = slot.end - dt.timedelta(minutes=10)
other_slot.submission.speakers.add(availability.person.user)
other_slot.save()
other_slot.submission.save()
warnings = slot.schedule.get_talk_warnings(slot)
assert len(warnings) == 2
assert warnings[0]["type"] == "room_overlap"
assert (
warnings[0]["message"]
== "Another session in the same room overlaps with this one."
)
assert warnings[1]["type"] == "speaker"
assert (
warnings[1]["message"]
== "Jane Speaker is scheduled for another session at the same time."
)
|
25aa7e4c9acec1226445a5133279a2875845e1ce
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/braintree/braintree/disbursement.pyi
|
fc4634d3eedf8d3c0cd4cf5ff86ce291ee9a7ad9
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 504
|
pyi
|
disbursement.pyi
|
from typing import Any
from braintree.merchant_account import MerchantAccount as MerchantAccount
from braintree.resource import Resource as Resource
from braintree.transaction_search import TransactionSearch as TransactionSearch
class Disbursement(Resource):
class Type:
Credit: str
Debit: str
amount: Any
merchant_account: Any
def __init__(self, gateway, attributes) -> None: ...
def transactions(self): ...
def is_credit(self): ...
def is_debit(self): ...
|
b4b3d692e9af7c0de03b9a3cb31c7843ecd766d8
|
077ad1330f77172a59c7e7621c72bb653d9ff40a
|
/gfauto/gfauto/recipe_pb2.pyi
|
463f04b8c74e4a37250c4ec8d38a6ded84e4581a
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
google/graphicsfuzz
|
48e82803e0354c4a92dc913fe36b8f6bae2eaa11
|
aa32d4cb556647ddaaf2048815bd6bca07d1bdab
|
refs/heads/master
| 2023-08-22T18:28:49.278862
| 2022-03-10T09:08:51
| 2022-03-10T09:08:51
| 150,133,859
| 573
| 155
|
Apache-2.0
| 2023-09-06T18:14:58
| 2018-09-24T16:31:05
|
Java
|
UTF-8
|
Python
| false
| false
| 3,045
|
pyi
|
recipe_pb2.pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from gfauto.common_pb2 import (
ArchiveSet as gfauto___common_pb2___ArchiveSet,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
FileDescriptor as google___protobuf___descriptor___FileDescriptor,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Optional as typing___Optional,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
DESCRIPTOR: google___protobuf___descriptor___FileDescriptor = ...
class Recipe(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
@property
def download_and_extract_archive_set(self) -> type___RecipeDownloadAndExtractArchiveSet: ...
def __init__(self,
*,
download_and_extract_archive_set : typing___Optional[type___RecipeDownloadAndExtractArchiveSet] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Recipe: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Recipe: ...
def HasField(self, field_name: typing_extensions___Literal[u"download_and_extract_archive_set",b"download_and_extract_archive_set",u"recipe",b"recipe"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"download_and_extract_archive_set",b"download_and_extract_archive_set",u"recipe",b"recipe"]) -> None: ...
def WhichOneof(self, oneof_group: typing_extensions___Literal[u"recipe",b"recipe"]) -> typing_extensions___Literal["download_and_extract_archive_set"]: ...
type___Recipe = Recipe
class RecipeDownloadAndExtractArchiveSet(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
@property
def archive_set(self) -> gfauto___common_pb2___ArchiveSet: ...
def __init__(self,
*,
archive_set : typing___Optional[gfauto___common_pb2___ArchiveSet] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> RecipeDownloadAndExtractArchiveSet: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> RecipeDownloadAndExtractArchiveSet: ...
def HasField(self, field_name: typing_extensions___Literal[u"archive_set",b"archive_set"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"archive_set",b"archive_set"]) -> None: ...
type___RecipeDownloadAndExtractArchiveSet = RecipeDownloadAndExtractArchiveSet
|
72524f60253a4cc9792e52737b0d2cc2da6219c6
|
d1c2d00078520cd556f60b7213c27856f8b3460d
|
/sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor.py
|
2a6fc3ee6093496d16b66b8b5ef3ea4ac85fe412
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-protobuf",
"Apache-2.0",
"Python-2.0"
] |
permissive
|
apache/beam
|
ed11b9e043465c720659eac20ac71b5b171bfa88
|
6d5048e05087ea54abc889ce402ae2a0abb9252b
|
refs/heads/master
| 2023-09-04T07:41:07.002653
| 2023-09-01T23:01:05
| 2023-09-01T23:01:05
| 50,904,245
| 7,061
| 4,522
|
Apache-2.0
| 2023-09-14T21:43:38
| 2016-02-02T08:00:06
|
Java
|
UTF-8
|
Python
| false
| false
| 2,709
|
py
|
consumer_tracking_pipeline_visitor.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""ConsumerTrackingPipelineVisitor, a PipelineVisitor object."""
# pytype: skip-file
from typing import TYPE_CHECKING
from typing import Dict
from typing import Set
from apache_beam import pvalue
from apache_beam.pipeline import PipelineVisitor
if TYPE_CHECKING:
from apache_beam.pipeline import AppliedPTransform
class ConsumerTrackingPipelineVisitor(PipelineVisitor):
"""For internal use only; no backwards-compatibility guarantees.
Visitor for extracting value-consumer relations from the graph.
Tracks the AppliedPTransforms that consume each PValue in the Pipeline. This
is used to schedule consuming PTransforms to consume input after the upstream
transform has produced and committed output.
"""
def __init__(self):
self.value_to_consumers = {
} # type: Dict[pvalue.PValue, Set[AppliedPTransform]]
self.root_transforms = set() # type: Set[AppliedPTransform]
self.step_names = {} # type: Dict[AppliedPTransform, str]
self._num_transforms = 0
self._views = set()
@property
def views(self):
"""Returns a list of side intputs extracted from the graph.
Returns:
A list of pvalue.AsSideInput.
"""
return list(self._views)
def visit_transform(self, applied_ptransform):
# type: (AppliedPTransform) -> None
inputs = list(applied_ptransform.inputs)
if inputs:
for input_value in inputs:
if isinstance(input_value, pvalue.PBegin):
self.root_transforms.add(applied_ptransform)
if input_value not in self.value_to_consumers:
self.value_to_consumers[input_value] = set()
self.value_to_consumers[input_value].add(applied_ptransform)
else:
self.root_transforms.add(applied_ptransform)
self.step_names[applied_ptransform] = 's%d' % (self._num_transforms)
self._num_transforms += 1
for side_input in applied_ptransform.side_inputs:
self._views.add(side_input)
|
ba4465c9d5d77d325386e2a050f1c832a4cdad2c
|
c1ff870879152fba2b54eddfb7591ec322eb3061
|
/plugins/languageAPI/jsAPI/3rdParty/nbind/src/nbind.gypi
|
49c9f42f40044c6e77b5d864e86eaec9454bb776
|
[
"LicenseRef-scancode-free-unknown",
"MIT"
] |
permissive
|
MTASZTAKI/ApertusVR
|
1a9809fb7af81c3cd7fb732ed481ebe4ce66fefa
|
424ec5515ae08780542f33cc4841a8f9a96337b3
|
refs/heads/0.9
| 2022-12-11T20:03:42.926813
| 2019-10-11T09:29:45
| 2019-10-11T09:29:45
| 73,708,854
| 188
| 55
|
MIT
| 2022-12-11T08:53:21
| 2016-11-14T13:48:00
|
C++
|
UTF-8
|
Python
| false
| false
| 2,075
|
gypi
|
nbind.gypi
|
{
"variables": {
"asmjs%": 0
},
"target_name": "nbind",
"type": "loadable_module",
"sources": [
"common.cc",
"reflect.cc"
],
"include_dirs": [
"../include"
],
"conditions": [
['asmjs==1', {
"product_name": "nbind.js",
"type": "executable",
"sources": [ "em/Binding.cc" ],
"ldflags": [ "<@(_cflags)" ],
"copies": [{"destination": "<(INTERMEDIATE_DIR)", "files": ["pre.js", "post.js", "../dist/em-api.js"]}],
"prejs_path": "<(INTERMEDIATE_DIR)/pre.js",
"postjs_path": "<(INTERMEDIATE_DIR)/post.js",
"jslib_path": "<(INTERMEDIATE_DIR)/em-api.js",
"cflags": [
"-O3",
"--pre-js", "<(_prejs_path)",
"--post-js", "<(_postjs_path)",
"--js-library", "<(_jslib_path)",
"-s", "NO_FILESYSTEM=1",
"-s", "EXPORTED_FUNCTIONS=[\"_nbind_init\",\"_nbind_value\"]",
"-s", "DEFAULT_LIBRARY_FUNCS_TO_INCLUDE=[\"nbind_value\",\"\\$$Browser\"]"
],
"cflags_cc": [
"-std=c++11",
"-fno-exceptions"
],
"xcode_settings": {
"GCC_GENERATE_DEBUGGING_SYMBOLS": "NO",
"OTHER_CFLAGS": [ "<@(_cflags)" ],
"OTHER_CPLUSPLUSFLAGS": [ "<@(_cflags_cc)" ],
"OTHER_LDFLAGS": [ "<@(_cflags)" ]
}
}, {
"copies": [{"destination": "<(INTERMEDIATE_DIR)", "files": ["symbols.txt"]}],
"symbols_path": "<(INTERMEDIATE_DIR)/symbols.txt",
"sources": [
"v8/Buffer.cc",
"v8/Binding.cc"
],
"cflags": [
"-O3",
"-fPIC"
],
"cflags_cc": [
"-std=c++11",
"-fexceptions",
"-fPIC"
],
"msbuild_settings": {
"ClCompile": {
"RuntimeTypeInfo": "false",
"ExceptionHandling": "Sync", # /EHsc
"MultiProcessorCompilation": "true"
}
},
"xcode_settings": {
"GCC_ENABLE_CPP_EXCEPTIONS": "YES",
"CLANG_CXX_LANGUAGE_STANDARD": "c++11",
"MACOSX_DEPLOYMENT_TARGET": "10.7",
"OTHER_CFLAGS": [ "<@(_cflags)" ],
"OTHER_CPLUSPLUSFLAGS": [
"<@(_cflags_cc)",
"-stdlib=libc++"
],
"OTHER_LDFLAGS": [
"-stdlib=libc++",
"-exported_symbols_list", "<(_symbols_path)"
]
}
}]
]
}
|
4e598ab69b3f94051f5c3ff8839ee8daf6db7c0f
|
0f3125015ab1f484d22ff77705e464f1ff844370
|
/projects/causal_scene_generation/causal_model/game_characters/vae_svi/utils/game_character_dataloader.py
|
d2465abb0c96e0b7c590ed9497ef053eac66a6cc
|
[
"MIT"
] |
permissive
|
altdeep/causalML
|
8498ff2abc38b992593c1ae9e96a9652ba2ae9d7
|
06c825c30b56a13d2092be2df7e06dbfc2f54cdb
|
refs/heads/master
| 2023-08-22T03:51:55.707797
| 2023-08-15T17:50:41
| 2023-08-15T17:50:41
| 162,078,689
| 318
| 78
|
MIT
| 2023-07-20T13:37:09
| 2018-12-17T05:22:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,239
|
py
|
game_character_dataloader.py
|
import os
import torch
import pandas as pd
import numpy as np
from PIL import Image
from torch.utils.data import Dataset, DataLoader ,random_split
from torchvision import datasets ,models , transforms
from torch import nn
import torch.nn.functional as F
from functools import reduce
def split_tensor(idx, sample, length):
if idx==0:
return sample[..., 0:length]
return sample[..., length:]
values = {
"action": ["Attacking", "Taunt", "Walking"],
"reaction": ["Dying", "Hurt", "Idle", "Attacking"],
"strength": ["Low", "High"],
"defense": ["Low", "High"],
"attack": ["Low", "High"],
"actor": ["Satyr", "Golem"],
"reactor": ["Satyr", "Golem"],
"Satyr": ["satyr1", "satyr2", "satyr3"],
"Golem": ["golem1", "golem2", "golem3"]
}
class GameCharacterFullData(Dataset):
def __init__(self, transforms, root_path, mode):
# Change the following path to a more generalizable form. Like download it from
# github or something like that. Make it usable to anyone.
self.root_path = root_path
self.train_path = self.root_path + 'train/'
self.test_path = self.root_path + 'test/'
self.train_csv = self.train_path + 'train.csv'
self.test_csv = self.test_path + 'test.csv'
self.mode = mode
self.train_df = pd.read_csv(self.train_csv)
self.test_df = pd.read_csv(self.test_csv)
self.transforms = transforms
def __getitem__(self, idx):
if self.mode == "train":
d = self.train_df.iloc[idx]
image = Image.open(self.train_path + d["img_name"] + ".png").convert("RGB")
else:
d = self.test_df.iloc[idx]
image = Image.open(self.test_path + d["img_name"] + ".png").convert("RGB")
# Extracting only the action reaction labels, coz that's what we condition on.
actor = torch.tensor(d[["actor_name_Satyr", "actor_name_Golem"]].tolist(), dtype=torch.float32)
reactor = torch.tensor(d[["reactor_name_Satyr", "reactor_name_Golem"]].tolist(), dtype=torch.float32)
actor_type = torch.tensor(d[["actor_type_type1", "actor_type_type2", "actor_type_type3"]].tolist(), dtype=torch.float32)
#actor_type = split_tensor(actor_idx, actor_type, len(values[(values["actor"][actor_idx])]))
reactor_type = torch.tensor(d[["reactor_type_type1", "reactor_type_type2", "reactor_type_type3"]].tolist(), dtype=torch.float32)
#reactor_type = split_tensor(reactor_idx, reactor_type, len(values[(values["actor"][reactor_idx])]))
action = torch.tensor(d[["actor_action_Attacking", "actor_action_Taunt", "actor_action_Walking"]].tolist(), dtype=torch.float32)
reaction = torch.tensor(d[["reactor_action_Dying", "reactor_action_Hurt", "reactor_action_Idle", "reactor_action_Attacking", ]].tolist(), dtype=torch.float32)
cols_order = ["actor_name_Satyr", "actor_name_Golem", "actor_type_type1",
"actor_type_type2", "actor_type_type3", "actor_action_Attacking", "actor_action_Taunt", "actor_action_Walking",
"reactor_name_Satyr", "reactor_name_Golem", "reactor_type_type1", "reactor_type_type2",
"reactor_type_type3","reactor_action_Dying", "reactor_action_Hurt", "reactor_action_Idle", "reactor_action_Attacking"]
label = torch.tensor(d[cols_order].tolist(), dtype=torch.float32)
if self.transforms is not None:
xp = self.transforms(image)
# transform x to a linear tensor from bx * a1 * a2 * ... --> bs * A
#xp_1d_size = reduce(lambda a, b: a * b, xp.size()[1:])
#xp = xp.view(-1, xp_1d_size)
#xp = xp.squeeze(0)
assert not np.isnan(xp.sum())
return xp, label, actor, reactor, actor_type, reactor_type, action, reaction
def __len__(self):
if self.mode == "train":
return self.train_df.shape[0]
else:
return self.test_df.shape[0]
def setup_data_loaders(dataset, root_path, batch_size, transforms):
train_dataset = dataset(transforms["train"], root_path, mode="train")
test_dataset = dataset(transforms["test"], root_path, mode="test")
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_loader = DataLoader(test_dataset, shuffle=True, batch_size=batch_size)
return train_loader, test_loader
|
517c7db8f6a25b1d0f71e488d66811a587e3348c
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/govern/data-quality/soda-core/soda/core/soda/sodacl/antlr/SodaCLAntlrVisitor.py
|
3c4bfd4d808d35e803803fd89cedf190e90d14ad
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 8,712
|
py
|
SodaCLAntlrVisitor.py
|
# Generated from /Users/vijay/work/soda/code/soda-core/soda/core/soda/sodacl/antlr/SodaCLAntlr.g4 by ANTLR 4.11.1
from antlr4 import *
if __name__ is not None and "." in __name__:
from .SodaCLAntlrParser import SodaCLAntlrParser
else:
from SodaCLAntlrParser import SodaCLAntlrParser
# This class defines a complete generic visitor for a parse tree produced by SodaCLAntlrParser.
class SodaCLAntlrVisitor(ParseTreeVisitor):
# Visit a parse tree produced by SodaCLAntlrParser#check.
def visitCheck(self, ctx:SodaCLAntlrParser.CheckContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#freshness_check.
def visitFreshness_check(self, ctx:SodaCLAntlrParser.Freshness_checkContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#freshness_variable.
def visitFreshness_variable(self, ctx:SodaCLAntlrParser.Freshness_variableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#warn_qualifier.
def visitWarn_qualifier(self, ctx:SodaCLAntlrParser.Warn_qualifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#failed_rows_check.
def visitFailed_rows_check(self, ctx:SodaCLAntlrParser.Failed_rows_checkContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#group_by_check.
def visitGroup_by_check(self, ctx:SodaCLAntlrParser.Group_by_checkContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#row_count_comparison_check.
def visitRow_count_comparison_check(self, ctx:SodaCLAntlrParser.Row_count_comparison_checkContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#metric_check.
def visitMetric_check(self, ctx:SodaCLAntlrParser.Metric_checkContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#default_anomaly_threshold.
def visitDefault_anomaly_threshold(self, ctx:SodaCLAntlrParser.Default_anomaly_thresholdContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#change_over_time.
def visitChange_over_time(self, ctx:SodaCLAntlrParser.Change_over_timeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#change_over_time_config.
def visitChange_over_time_config(self, ctx:SodaCLAntlrParser.Change_over_time_configContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#change_aggregation.
def visitChange_aggregation(self, ctx:SodaCLAntlrParser.Change_aggregationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#same_day_last_week.
def visitSame_day_last_week(self, ctx:SodaCLAntlrParser.Same_day_last_weekContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#percent.
def visitPercent(self, ctx:SodaCLAntlrParser.PercentContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#anomaly_score.
def visitAnomaly_score(self, ctx:SodaCLAntlrParser.Anomaly_scoreContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#metric.
def visitMetric(self, ctx:SodaCLAntlrParser.MetricContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#metric_name.
def visitMetric_name(self, ctx:SodaCLAntlrParser.Metric_nameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#metric_args.
def visitMetric_args(self, ctx:SodaCLAntlrParser.Metric_argsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#metric_arg.
def visitMetric_arg(self, ctx:SodaCLAntlrParser.Metric_argContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#threshold.
def visitThreshold(self, ctx:SodaCLAntlrParser.ThresholdContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#between_threshold.
def visitBetween_threshold(self, ctx:SodaCLAntlrParser.Between_thresholdContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#comparator_threshold.
def visitComparator_threshold(self, ctx:SodaCLAntlrParser.Comparator_thresholdContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#zones_threshold.
def visitZones_threshold(self, ctx:SodaCLAntlrParser.Zones_thresholdContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#outcome.
def visitOutcome(self, ctx:SodaCLAntlrParser.OutcomeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#zone_comparator.
def visitZone_comparator(self, ctx:SodaCLAntlrParser.Zone_comparatorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#comparator.
def visitComparator(self, ctx:SodaCLAntlrParser.ComparatorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#threshold_value.
def visitThreshold_value(self, ctx:SodaCLAntlrParser.Threshold_valueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#freshness_threshold_value.
def visitFreshness_threshold_value(self, ctx:SodaCLAntlrParser.Freshness_threshold_valueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#reference_check.
def visitReference_check(self, ctx:SodaCLAntlrParser.Reference_checkContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#source_column_name.
def visitSource_column_name(self, ctx:SodaCLAntlrParser.Source_column_nameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#target_column_name.
def visitTarget_column_name(self, ctx:SodaCLAntlrParser.Target_column_nameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#section_header.
def visitSection_header(self, ctx:SodaCLAntlrParser.Section_headerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#table_checks_header.
def visitTable_checks_header(self, ctx:SodaCLAntlrParser.Table_checks_headerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#partition_name.
def visitPartition_name(self, ctx:SodaCLAntlrParser.Partition_nameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#table_filter_header.
def visitTable_filter_header(self, ctx:SodaCLAntlrParser.Table_filter_headerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#column_configurations_header.
def visitColumn_configurations_header(self, ctx:SodaCLAntlrParser.Column_configurations_headerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#checks_for_each_dataset_header.
def visitChecks_for_each_dataset_header(self, ctx:SodaCLAntlrParser.Checks_for_each_dataset_headerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#checks_for_each_column_header.
def visitChecks_for_each_column_header(self, ctx:SodaCLAntlrParser.Checks_for_each_column_headerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#signed_number.
def visitSigned_number(self, ctx:SodaCLAntlrParser.Signed_numberContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#number.
def visitNumber(self, ctx:SodaCLAntlrParser.NumberContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#integer.
def visitInteger(self, ctx:SodaCLAntlrParser.IntegerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SodaCLAntlrParser#identifier.
def visitIdentifier(self, ctx:SodaCLAntlrParser.IdentifierContext):
return self.visitChildren(ctx)
del SodaCLAntlrParser
|
bf0121bddb63ed61a74523b05ffc06b3ee6b4176
|
b2bcf07493b5a1bbfb7e29c7f13ac0b380cefead
|
/deprecated/scripts/ica_demo_uniform.py
|
0213484484ff108ff8f4fdfcf4ce5c7f8c341924
|
[
"MIT"
] |
permissive
|
probml/pyprobml
|
e1952927bceec676eb414f9342470ba4b8e6703b
|
9cc22f3238ae092c2b9bff65d6283c93d38d25d4
|
refs/heads/master
| 2023-08-31T07:36:11.603301
| 2023-08-13T02:47:12
| 2023-08-13T02:47:12
| 65,924,871
| 6,263
| 1,598
|
MIT
| 2023-01-20T23:34:23
| 2016-08-17T16:42:24
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,264
|
py
|
ica_demo_uniform.py
|
# Comparing ICA and PCA on data from a 2d uniform distribution
# Author : Aleyna Kara
# This file is based on https://github.com/probml/pmtk3/blob/master/demos/icaDemoUniform.m
import superimport
from sklearn.decomposition import PCA, FastICA
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
def plot_samples(S, title, file_name):
min_x, max_x = -4, 4
min_y, max_y = -3, 3
plt.scatter(S[:, 0], S[:, 1], marker='o',s=16)
plt.hlines(0, min_x, max_x, linewidth=2)
plt.vlines(0, min_y, max_y, linewidth=2)
plt.xlim(min_x, max_x)
plt.ylim(min_y, max_y)
plt.title(title)
pml.savefig(f'{file_name}.pdf')
plt.show()
np.random.seed(2)
N = 100
A = np.array([[2,3],[2,1]])* 0.3 # Mixing matrix
S_uni = (np.random.rand(N, 2)* 2 - 1)* np.sqrt(3)
X_uni = S_uni @ A.T
pca = PCA(whiten=True)
S_pca = pca.fit(X_uni).transform(X_uni)
ica = FastICA()
S_ica = ica.fit_transform(X_uni)
S_ica /= S_ica.std(axis=0)
plot_samples(S_uni, 'Uniform Data', 'ica-uniform-source')
plot_samples(X_uni, 'Uniform Data after Linear Mixing', 'ica-uniform-mixed')
plot_samples(S_pca, 'PCA Applied to Mixed Data from Uniform Source', 'ica-uniform-PCA')
plot_samples(S_ica, 'ICA Applied to Mixed Data from Uniform Source', 'ica-uniform-ICA')
|
217c4077a32ac64f55b3bd2fa8e9308f7a643385
|
e5e0d729f082999a9bec142611365b00f7bfc684
|
/tensorflow/contrib/model_pruning/python/layers/rnn_cells_test.py
|
586c6c7bfcbd188fc6fca4a9accbe0e5533c90c7
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/tensorflow
|
ed6294098c7354dfc9f09631fc5ae22dbc278138
|
7cbba04a2ee16d21309eefad5be6585183a2d5a9
|
refs/heads/r1.15.5+nv23.03
| 2023-08-16T22:25:18.037979
| 2023-08-03T22:09:23
| 2023-08-03T22:09:23
| 263,748,045
| 763
| 117
|
Apache-2.0
| 2023-07-03T15:45:19
| 2020-05-13T21:34:32
|
C++
|
UTF-8
|
Python
| false
| false
| 3,613
|
py
|
rnn_cells_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for creating different number of masks in rnn_cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.model_pruning.python import pruning
from tensorflow.contrib.model_pruning.python.layers import rnn_cells
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell as tf_rnn_cells
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RnnCellsTest(test.TestCase):
def setUp(self):
super(RnnCellsTest, self).setUp()
self.batch_size = 8
self.dim = 10
def testMaskedBasicLSTMCell(self):
expected_num_masks = 1
expected_num_rows = 2 * self.dim
expected_num_cols = 4 * self.dim
with self.cached_session():
inputs = variables.Variable(
random_ops.random_normal([self.batch_size, self.dim]))
c = variables.Variable(
random_ops.random_normal([self.batch_size, self.dim]))
h = variables.Variable(
random_ops.random_normal([self.batch_size, self.dim]))
state = tf_rnn_cells.LSTMStateTuple(c, h)
lstm_cell = rnn_cells.MaskedBasicLSTMCell(self.dim)
lstm_cell(inputs, state)
self.assertEqual(len(pruning.get_masks()), expected_num_masks)
self.assertEqual(len(pruning.get_masked_weights()), expected_num_masks)
self.assertEqual(len(pruning.get_thresholds()), expected_num_masks)
self.assertEqual(len(pruning.get_weights()), expected_num_masks)
for mask in pruning.get_masks():
self.assertEqual(mask.shape, (expected_num_rows, expected_num_cols))
for weight in pruning.get_weights():
self.assertEqual(weight.shape, (expected_num_rows, expected_num_cols))
def testMaskedLSTMCell(self):
expected_num_masks = 1
expected_num_rows = 2 * self.dim
expected_num_cols = 4 * self.dim
with self.cached_session():
inputs = variables.Variable(
random_ops.random_normal([self.batch_size, self.dim]))
c = variables.Variable(
random_ops.random_normal([self.batch_size, self.dim]))
h = variables.Variable(
random_ops.random_normal([self.batch_size, self.dim]))
state = tf_rnn_cells.LSTMStateTuple(c, h)
lstm_cell = rnn_cells.MaskedLSTMCell(self.dim)
lstm_cell(inputs, state)
self.assertEqual(len(pruning.get_masks()), expected_num_masks)
self.assertEqual(len(pruning.get_masked_weights()), expected_num_masks)
self.assertEqual(len(pruning.get_thresholds()), expected_num_masks)
self.assertEqual(len(pruning.get_weights()), expected_num_masks)
for mask in pruning.get_masks():
self.assertEqual(mask.shape, (expected_num_rows, expected_num_cols))
for weight in pruning.get_weights():
self.assertEqual(weight.shape, (expected_num_rows, expected_num_cols))
if __name__ == '__main__':
test.main()
|
1141ee781f35cb009f1124dd059214817d297925
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/escea/discovery.py
|
0d7f3024bfc2254f297d6a73bb4825d583073003
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,469
|
py
|
discovery.py
|
"""Internal discovery service for Escea Fireplace."""
from __future__ import annotations
from pescea import (
AbstractDiscoveryService,
Controller,
Listener,
discovery_service as pescea_discovery_service,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
DATA_DISCOVERY_SERVICE,
DISPATCH_CONTROLLER_DISCONNECTED,
DISPATCH_CONTROLLER_DISCOVERED,
DISPATCH_CONTROLLER_RECONNECTED,
DISPATCH_CONTROLLER_UPDATE,
)
class DiscoveryServiceListener(Listener):
"""Discovery data and interfacing with pescea library."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialise discovery service."""
super().__init__()
self.hass = hass
# Listener interface
def controller_discovered(self, ctrl: Controller) -> None:
"""Handle new controller discoverery."""
async_dispatcher_send(self.hass, DISPATCH_CONTROLLER_DISCOVERED, ctrl)
def controller_disconnected(self, ctrl: Controller, ex: Exception) -> None:
"""On disconnect from controller."""
async_dispatcher_send(self.hass, DISPATCH_CONTROLLER_DISCONNECTED, ctrl, ex)
def controller_reconnected(self, ctrl: Controller) -> None:
"""On reconnect to controller."""
async_dispatcher_send(self.hass, DISPATCH_CONTROLLER_RECONNECTED, ctrl)
def controller_update(self, ctrl: Controller) -> None:
"""System update message is received from the controller."""
async_dispatcher_send(self.hass, DISPATCH_CONTROLLER_UPDATE, ctrl)
async def async_start_discovery_service(
hass: HomeAssistant,
) -> AbstractDiscoveryService:
"""Set up the pescea internal discovery."""
discovery_service = hass.data.get(DATA_DISCOVERY_SERVICE)
if discovery_service:
# Already started
return discovery_service
# discovery local services
listener = DiscoveryServiceListener(hass)
discovery_service = pescea_discovery_service(listener)
hass.data[DATA_DISCOVERY_SERVICE] = discovery_service
await discovery_service.start_discovery()
return discovery_service
async def async_stop_discovery_service(hass: HomeAssistant) -> None:
"""Stop the discovery service."""
discovery_service = hass.data.get(DATA_DISCOVERY_SERVICE)
if not discovery_service:
return
await discovery_service.close()
del hass.data[DATA_DISCOVERY_SERVICE]
|
29226e5956f063d18d6e5fde7ad19b65eeb5a7a7
|
ffd5befd5ac5cc31b8ee41cb0992c1ad33c101f3
|
/stacker/blueprints/variables/types.py
|
53e20a9b852f45668f449e277b9be350b444ee75
|
[
"BSD-2-Clause"
] |
permissive
|
cloudtools/stacker
|
53334320b858a83a4b9f130902cca6805e4c6549
|
b357f83596e0f2044a147553ac4fbc16fe3ef97c
|
refs/heads/master
| 2023-08-28T21:55:41.064554
| 2023-08-15T16:22:56
| 2023-08-15T16:22:56
| 28,885,080
| 396
| 104
|
BSD-2-Clause
| 2023-08-15T16:22:58
| 2015-01-06T21:46:05
|
Python
|
UTF-8
|
Python
| false
| false
| 8,785
|
py
|
types.py
|
class TroposphereType(object):
def __init__(self, defined_type, many=False, optional=False,
validate=True):
"""Represents a Troposphere type.
:class:`Troposphere` will convert the value provided to the variable to
the specified Troposphere type.
Both resource and parameter classes (which are just used to configure
other resources) are acceptable as configuration values.
Complete resource definitions must be dictionaries, with the keys
identifying the resource titles, and the values being used as the
constructor parameters.
Parameter classes can be defined as dictionariy or a list of
dictionaries. In either case, the keys and values will be used directly
as constructor parameters.
Args:
defined_type (type): Troposphere type
many (bool): Whether or not multiple resources can be constructed.
If the defined type is a resource, multiple resources can be
passed as a dictionary of dictionaries.
If it is a parameter class, multiple resources are passed as
a list.
optional (bool): Whether an undefined/null configured value is
acceptable. In that case a value of ``None`` will be passed to
the template, even if ``many`` is enabled.
validate (bool): Whether to validate the generated object on
creation. Should be left enabled unless the object will be
augmented with mandatory parameters in the template code, such
that it must be validated at a later point.
"""
self._validate_type(defined_type)
self._type = defined_type
self._many = many
self._optional = optional
self._validate = validate
def _validate_type(self, defined_type):
if not hasattr(defined_type, "from_dict"):
raise ValueError("Type must have `from_dict` attribute")
@property
def resource_name(self):
return (
getattr(self._type, 'resource_name', None) or self._type.__name__
)
def create(self, value):
"""Create the troposphere type from the value.
Args:
value (Union[dict, list]): A dictionary or list of dictionaries
(see class documentation for details) to use as parameters to
create the Troposphere type instance.
Each dictionary will be passed to the `from_dict` method of the
type.
Returns:
Union[list, type]: Returns the value converted to the troposphere
type
"""
# Explicitly check with len such that non-sequence types throw.
if self._optional and (value is None or len(value) == 0):
return None
if hasattr(self._type, 'resource_type'):
# Our type is a resource, so ensure we have a dict of title to
# parameters
if not isinstance(value, dict):
raise ValueError("Resources must be specified as a dict of "
"title to parameters")
if not self._many and len(value) > 1:
raise ValueError("Only one resource can be provided for this "
"TroposphereType variable")
result = [
self._type.from_dict(title, v) for title, v in value.items()
]
else:
# Our type is for properties, not a resource, so don't use
# titles
if self._many:
result = [self._type.from_dict(None, v) for v in value]
elif not isinstance(value, dict):
raise ValueError("TroposphereType for a single non-resource"
"type must be specified as a dict of "
"parameters")
else:
result = [self._type.from_dict(None, value)]
if self._validate:
for v in result:
v._validate_props()
return result[0] if not self._many else result
class CFNType(object):
def __init__(self, parameter_type):
"""Represents a CloudFormation Parameter Type.
:class:`CFNType`` can be used as the `type` for a Blueprint variable.
Unlike other variables, a variable with `type` :class:`CFNType`, will
be submitted to CloudFormation as a Parameter.
Args:
parameter_type (str): An AWS specific parameter type
(http://goo.gl/PthovJ)
"""
self.parameter_type = parameter_type
# General CFN types
CFNString = CFNType("String")
CFNNumber = CFNType("Number")
CFNNumberList = CFNType("List<Number>")
CFNCommaDelimitedList = CFNType("CommaDelimitedList")
# AWS-Specific Parameter Types
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#aws-specific-parameter-types
EC2AvailabilityZoneName = CFNType("AWS::EC2::AvailabilityZone::Name")
EC2ImageId = CFNType("AWS::EC2::Image::Id")
EC2InstanceId = CFNType("AWS::EC2::Instance::Id")
EC2KeyPairKeyName = CFNType("AWS::EC2::KeyPair::KeyName")
EC2SecurityGroupGroupName = CFNType("AWS::EC2::SecurityGroup::GroupName")
EC2SecurityGroupId = CFNType("AWS::EC2::SecurityGroup::Id")
EC2SubnetId = CFNType("AWS::EC2::Subnet::Id")
EC2VolumeId = CFNType("AWS::EC2::Volume::Id")
EC2VPCId = CFNType("AWS::EC2::VPC::Id")
Route53HostedZoneId = CFNType("AWS::Route53::HostedZone::Id")
EC2AvailabilityZoneNameList = CFNType("List<AWS::EC2::AvailabilityZone::Name>")
EC2ImageIdList = CFNType("List<AWS::EC2::Image::Id>")
EC2InstanceIdList = CFNType("List<AWS::EC2::Instance::Id>")
EC2SecurityGroupGroupNameList = CFNType(
"List<AWS::EC2::SecurityGroup::GroupName>")
EC2SecurityGroupIdList = CFNType("List<AWS::EC2::SecurityGroup::Id>")
EC2SubnetIdList = CFNType("List<AWS::EC2::Subnet::Id>")
EC2VolumeIdList = CFNType("List<AWS::EC2::Volume::Id>")
EC2VPCIdList = CFNType("List<AWS::EC2::VPC::Id>")
Route53HostedZoneIdList = CFNType("List<AWS::Route53::HostedZone::Id>")
# SSM Parameter Types
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#aws-ssm-parameter-types
SSMParameterName = CFNType("AWS::SSM::Parameter::Name")
SSMParameterValueString = CFNType("AWS::SSM::Parameter::Value<String>")
SSMParameterValueStringList = CFNType(
"AWS::SSM::Parameter::Value<List<String>>")
SSMParameterValueCommaDelimitedList = CFNType(
"AWS::SSM::Parameter::Value<CommaDelimitedList>")
# Each AWS-specific type here is repeated from the the list above
SSMParameterValueEC2AvailabilityZoneName = CFNType(
"AWS::SSM::Parameter::Value<AWS::EC2::AvailabilityZone::Name>")
SSMParameterValueEC2ImageId = CFNType(
"AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>")
SSMParameterValueEC2InstanceId = CFNType(
"AWS::SSM::Parameter::Value<AWS::EC2::Instance::Id>")
SSMParameterValueEC2KeyPairKeyName = CFNType(
"AWS::SSM::Parameter::Value<AWS::EC2::KeyPair::KeyName>")
SSMParameterValueEC2SecurityGroupGroupName = CFNType(
"AWS::SSM::Parameter::Value<AWS::EC2::SecurityGroup::GroupName>")
SSMParameterValueEC2SecurityGroupId = CFNType(
"AWS::SSM::Parameter::Value<AWS::EC2::SecurityGroup::Id>")
SSMParameterValueEC2SubnetId = CFNType(
"AWS::SSM::Parameter::Value<AWS::EC2::Subnet::Id>")
SSMParameterValueEC2VolumeId = CFNType(
"AWS::SSM::Parameter::Value<AWS::EC2::Volume::Id>")
SSMParameterValueEC2VPCId = CFNType(
"AWS::SSM::Parameter::Value<AWS::EC2::VPC::Id>")
SSMParameterValueRoute53HostedZoneId = CFNType(
"AWS::SSM::Parameter::Value<AWS::Route53::HostedZone::Id>")
SSMParameterValueEC2AvailabilityZoneNameList = CFNType(
"AWS::SSM::Parameter::Value<List<AWS::EC2::AvailabilityZone::Name>>")
SSMParameterValueEC2ImageIdList = CFNType(
"AWS::SSM::Parameter::Value<List<AWS::EC2::Image::Id>>")
SSMParameterValueEC2InstanceIdList = CFNType(
"AWS::SSM::Parameter::Value<List<AWS::EC2::Instance::Id>>")
SSMParameterValueEC2SecurityGroupGroupNameList = CFNType(
"AWS::SSM::Parameter::Value<List<AWS::EC2::SecurityGroup::GroupName>>")
SSMParameterValueEC2SecurityGroupIdList = CFNType(
"AWS::SSM::Parameter::Value<List<AWS::EC2::SecurityGroup::Id>>")
SSMParameterValueEC2SubnetIdList = CFNType(
"AWS::SSM::Parameter::Value<List<AWS::EC2::Subnet::Id>>")
SSMParameterValueEC2VolumeIdList = CFNType(
"AWS::SSM::Parameter::Value<List<AWS::EC2::Volume::Id>>")
SSMParameterValueEC2VPCIdList = CFNType(
"AWS::SSM::Parameter::Value<List<AWS::EC2::VPC::Id>>")
SSMParameterValueRoute53HostedZoneIdList = CFNType(
"AWS::SSM::Parameter::Value<List<AWS::Route53::HostedZone::Id>>")
|
c2d175830a1bcad21e63ba9075acc2bb49beed83
|
977f7a7386899a5d0152b29b57ec26682b430437
|
/tools/tbl2gff3/tbl2gff3.py
|
29c622e8e93222e1e23cca18c8baf0e74a0460c2
|
[
"GPL-3.0-only",
"MIT"
] |
permissive
|
galaxyproject/tools-iuc
|
0b87e21e1cb075ca6dc6b12622bc4e538a7c6507
|
96f8a533278b4b6394aebd7a8f537513b0d29b1a
|
refs/heads/main
| 2023-08-31T16:14:34.563541
| 2023-08-31T04:31:22
| 2023-08-31T04:31:22
| 23,992,530
| 164
| 508
|
MIT
| 2023-09-13T19:41:14
| 2014-09-13T11:18:49
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,986
|
py
|
tbl2gff3.py
|
#!/usr/bin/env python
import argparse
import collections
import csv
import sys
from BCBio import GFF
from Bio.Seq import Seq
from Bio.SeqFeature import FeatureLocation, SeqFeature
from Bio.SeqRecord import SeqRecord
def c(row, v, default=None):
if v is None:
return default
try:
_ = int(v)
return row[int(v) - 1]
except ValueError:
return v
def tbl2gff3(
table,
rid,
begin,
end,
source=None,
type=None,
score=None,
frame=None,
a=None,
strand_column=None,
strand_value=None,
strand_infer=False,
):
records = collections.OrderedDict()
for row in csv.reader(table, delimiter="\t"):
# print(', '.join(row))
# if we haven't seen this record before, populate it.
recid = c(row, rid)
if recid not in records:
records[recid] = SeqRecord(Seq("ACTG"), id=recid)
r = records[recid]
q = {}
if c(row, score) is not None:
q["score"] = float(c(row, score))
q["source"] = c(row, source, "tbl2gff3")
begin_i = int(c(row, begin))
end_i = int(c(row, end))
begin_f = min(begin_i, end_i)
end_f = max(begin_i, end_i)
_str = None
if strand_column is not None:
_str = int(c(row, strand_column))
elif strand_value is not None:
_str = int(strand_value)
if strand_infer:
if begin_i > begin_f:
_str = -1
else:
_str = 1
if a is not None:
for x in a:
k, v = x.split(":", 1)
_v = c(row, v)
if k in q:
q[k].append(_v)
else:
q[k] = [_v]
f = SeqFeature(
FeatureLocation(begin_f, end_f),
type=c(row, type),
strand=_str,
qualifiers=q,
)
r.features.append(f)
return records
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert tables to gff3", epilog="")
parser.add_argument("table", type=argparse.FileType("r"), help="Tabular Input")
parser.add_argument("rid", help="id column")
parser.add_argument("begin", help="begin column")
parser.add_argument("end", help="end column")
parser.add_argument("--type", help="feature type column")
parser.add_argument("--score", help="score column")
parser.add_argument("--source", help="source column")
parser.add_argument("--strand_infer", action='store_true', help="infer strand")
parser.add_argument("--strand_column", help="strand column")
parser.add_argument("--strand_value", help="strand value")
# parser.add_argument('--frame', help='frame column')
parser.add_argument("-a", action="append", help="attribute column (-a k:v)")
args = parser.parse_args()
for rid, rec in tbl2gff3(**vars(args)).items():
GFF.write([rec], sys.stdout)
|
d64d3ef95a878a152653353d4c08a4245aa6f714
|
3395a234e7c80d011607e79c49cd48bf516f256b
|
/dependencies/jedi/inference/compiled/__init__.py
|
b5d435fc4e2f3802928043ee3f809ab1829bcbcb
|
[
"MIT"
] |
permissive
|
srusskih/SublimeJEDI
|
67329b72e184bc9584843968dcc534a002c797a1
|
95c185d778425c04536d53517b0e3fe6dedf8e59
|
refs/heads/master
| 2023-08-24T11:30:37.801834
| 2022-08-30T09:04:17
| 2022-08-30T09:04:17
| 6,241,108
| 669
| 125
|
MIT
| 2022-08-30T09:04:18
| 2012-10-16T08:23:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,590
|
py
|
__init__.py
|
from jedi._compatibility import unicode
from jedi.inference.compiled.value import CompiledValue, CompiledName, \
CompiledValueFilter, CompiledValueName, create_from_access_path
from jedi.inference.base_value import LazyValueWrapper
def builtin_from_name(inference_state, string):
typing_builtins_module = inference_state.builtins_module
if string in ('None', 'True', 'False'):
builtins, = typing_builtins_module.non_stub_value_set
filter_ = next(builtins.get_filters())
else:
filter_ = next(typing_builtins_module.get_filters())
name, = filter_.get(string)
value, = name.infer()
return value
class ExactValue(LazyValueWrapper):
"""
This class represents exact values, that makes operations like additions
and exact boolean values possible, while still being a "normal" stub.
"""
def __init__(self, compiled_value):
self.inference_state = compiled_value.inference_state
self._compiled_value = compiled_value
def __getattribute__(self, name):
if name in ('get_safe_value', 'execute_operation', 'access_handle',
'negate', 'py__bool__', 'is_compiled'):
return getattr(self._compiled_value, name)
return super(ExactValue, self).__getattribute__(name)
def _get_wrapped_value(self):
instance, = builtin_from_name(
self.inference_state, self._compiled_value.name.string_name).execute_with_values()
return instance
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._compiled_value)
def create_simple_object(inference_state, obj):
"""
Only allows creations of objects that are easily picklable across Python
versions.
"""
assert type(obj) in (int, float, str, bytes, unicode, slice, complex, bool), obj
compiled_value = create_from_access_path(
inference_state,
inference_state.compiled_subprocess.create_simple_object(obj)
)
return ExactValue(compiled_value)
def get_string_value_set(inference_state):
return builtin_from_name(inference_state, u'str').execute_with_values()
def load_module(inference_state, dotted_name, **kwargs):
# Temporary, some tensorflow builtins cannot be loaded, so it's tried again
# and again and it's really slow.
if dotted_name.startswith('tensorflow.'):
return None
access_path = inference_state.compiled_subprocess.load_module(dotted_name=dotted_name, **kwargs)
if access_path is None:
return None
return create_from_access_path(inference_state, access_path)
|
09c3e033224070c58ab14e0d39e2c73effa625b1
|
9b1a345a2377bbac949856f2aac8947ab2e26205
|
/tests/unit-tests/mocks/gdblib.py
|
0d5fdf71b19372e557035ce3bef30b029d8e893d
|
[
"MIT"
] |
permissive
|
pwndbg/pwndbg
|
4577c02d213fa347891d7e0f5415beb2a497c002
|
f642efbd92e30c7b892b5e816d353cc5a03e5cb5
|
refs/heads/dev
| 2023-08-31T10:48:45.172197
| 2023-08-27T09:55:46
| 2023-08-27T10:12:02
| 31,181,767
| 5,954
| 1,080
|
MIT
| 2023-09-11T11:03:02
| 2015-02-22T21:35:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
gdblib.py
|
from __future__ import annotations
import sys
import types
from unittest.mock import MagicMock
from mocks.arch import Amd64Arch
from mocks.config import Config
from mocks.typeinfo import Amd64TypeInfo
class GdbLib(types.ModuleType):
def __init__(self, module_name):
super().__init__(module_name)
self.config_mod = Config(module_name + ".config")
self.arch = Amd64Arch(module_name + ".arch")
self.typeinfo = Amd64TypeInfo(module_name + ".typeinfo")
self.regs = MagicMock(__name__=module_name + ".regs")
self.prompt = MagicMock()
sys.modules[self.config_mod.__name__] = self.config_mod
sys.modules[self.arch.__name__] = self.arch
sys.modules[self.typeinfo.__name__] = self.typeinfo
sys.modules[self.regs.__name__] = self.regs
def load_gdblib(self):
pass
module_name = "pwndbg.gdblib"
module = GdbLib(module_name)
sys.modules[module_name] = module
import pwndbg
pwndbg.gdblib = sys.modules["pwndbg.gdblib"]
|
36838f739fbad35a7970639f2d9c3d6cd4bd95e2
|
767b09cdf51803d533ebb5906042ed1f92f91a7c
|
/tests/lm/modules/token_embedders/bidirectional_lm_test.py
|
2151baafaa3ea2e80e47ee3331c781d1c1135933
|
[
"Apache-2.0"
] |
permissive
|
allenai/allennlp-models
|
e93bb3b084e99e211d5ebb515b765de117e41970
|
b1f372248c17ad12684d344955fbcd98e957e77e
|
refs/heads/main
| 2023-09-05T01:57:37.434101
| 2022-11-24T00:06:05
| 2022-11-24T00:06:05
| 246,170,605
| 520
| 172
|
Apache-2.0
| 2022-11-24T00:06:06
| 2020-03-10T00:22:21
|
Python
|
UTF-8
|
Python
| false
| false
| 911
|
py
|
bidirectional_lm_test.py
|
from tests import FIXTURES_ROOT
from tests.lm.modules.token_embedders.language_model_test import TestLanguageModelTokenEmbedder
class TestBidirectionalLanguageModelTokenEmbedder(TestLanguageModelTokenEmbedder):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT
/ "lm"
/ "language_model"
/ "bidirectional_lm_characters_token_embedder.jsonnet",
FIXTURES_ROOT / "lm" / "conll2003.txt",
)
class TestBidirectionalLanguageModelTokenEmbedderWithoutBosEos(TestLanguageModelTokenEmbedder):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT
/ "lm"
/ "language_model"
/ "bidirectional_lm_characters_token_embedder_without_bos_eos.jsonnet",
FIXTURES_ROOT / "lm" / "conll2003.txt",
)
|
26d4a54dba1da8255e067b19957ad060d125b831
|
b8d80a23cb27af08a1c4d34b478c76228ae5fbb4
|
/insights/parsers/initscript.py
|
863e310a89fc6ef323ddb0f632460d28ee6ad401
|
[
"Apache-2.0"
] |
permissive
|
RedHatInsights/insights-core
|
bb243e2bf8a52446fefb95ebe05478d6e35efe2e
|
b0ea07fc3f4dd8801b505fe70e9b36e628152c4a
|
refs/heads/master
| 2023-09-04T21:15:40.456257
| 2023-09-04T10:46:56
| 2023-09-04T10:46:56
| 92,518,221
| 144
| 290
|
Apache-2.0
| 2023-09-14T02:40:13
| 2017-05-26T14:23:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,456
|
py
|
initscript.py
|
"""
InitScript - files ``/etc/rc.d/init.d/*``
=========================================
InitScript is a parser for the initscripts in ``/etc/rc.d/init.d``.
Because this parser read multiple files, the initscripts are stored as a list
within the parser and need to be iterated through in order to find specific
initscripts.
Examples:
>>> for initscript in shared[InitScript]: # Parser contains list of all initscripts
... print "Name:", initscript.file_name
...
Name: netconsole
Name: rhnsd
"""
import re
from insights.core import Parser
from insights.core.exceptions import ParseException
from insights.core.plugins import parser
from insights.specs import Specs
CHKCONFIG_REGEX = re.compile(r"^#\s+chkconfig:\s+.*$")
LSB_REGEX = re.compile(r"^#\s+Provides:\s+.*$")
SHEBANG_REGEX = re.compile(r"^#!\s*/.*$")
COMMENT_REGEX = re.compile(r"^\s*#.*$")
STARTSTOPSTATUS_REGEX = re.compile(r"\b(start|stop|status)\b")
class EmptyFileException(ParseException):
pass
class NotInitscriptException(ParseException):
pass
@parser(Specs.initscript)
class InitScript(Parser):
"""
Parse initscript files. Each item is a dictionary with following fields:
Attributes:
file_name (str): initscript name
file_path (str): initscript path without leading '/'
file_content (list): initscript content, line by line
Because some files may not be real initscripts, to determine whether a file
in ``etc/rc.d/init.d/`` is an initscript, the parser checks for
``# chkconfig: <values>`` or ``# Provides: <names>`` strings in the script.
If that matches, then it assumes it is an initscript.
Otherwise, it tries to find out if it is by searching for
* shebang (e.g. ``#!/bin/bash``) on first line
* ``start``/``stop``/``status`` tokens in non-commented out lines
If 3 or more items are found (half the items searched for + 1), called
*confidence* in the code (e.g. shebang + start + stop), then we assume it
is an initscript.
Otherwise the parser raises a ``ParseException``.
"""
def parse_content(self, content):
"""
Raises:
EmptyFileException: Raised if file is empty.
NotInitscriptException: Raised if likely not an initscript.
"""
self.file_content = content
# If we find 'chkconfig: XYZ' or 'Provides: NAME', assume we have an
# initscript
for line in content:
if CHKCONFIG_REGEX.match(line) or LSB_REGEX.match(line):
return
# Otherwise, check for hints (shebang + presence of start/stop/status
# keywords)
#
# Allocate 1 point for each item found. And declare as valid if
# confidence >= 3 (half of the number of items + 1).
confidence = 0
try:
line = content[0]
if SHEBANG_REGEX.match(line):
confidence += 1
except IndexError:
raise EmptyFileException(self.file_path)
key = {'start': 0, 'stop': 0, 'status': 0}
for line in content:
if COMMENT_REGEX.match(line):
continue
m = STARTSTOPSTATUS_REGEX.search(line)
if m:
key[m.group(1)] += 1
confidence += len([v for v in key.values() if v != 0])
if confidence < 3:
raise NotInitscriptException("path: %s, confidence: %d" % (self.file_path, confidence))
|
dc39f36d442bf78f5b575b2a2ea63c44fd19bf58
|
2c6e9374fe1216a99d833354213ca676407e1197
|
/mqtt_as/esp32_gateway/gwconfig.py
|
8e1293f9d3bab9e90cb62893fe7850bfe1461421
|
[
"MIT"
] |
permissive
|
peterhinch/micropython-mqtt
|
7729d2b00ad2e41440fe278a0aec938537edebf8
|
041b0213caee0d990cb0eb454b9e627bf515edde
|
refs/heads/master
| 2023-09-01T08:37:26.372051
| 2023-08-24T09:52:42
| 2023-08-24T09:52:42
| 94,690,722
| 456
| 118
|
MIT
| 2023-02-03T14:54:07
| 2017-06-18T14:00:48
|
Python
|
UTF-8
|
Python
| false
| false
| 967
|
py
|
gwconfig.py
|
# gwconfig.py Config file for ESPNow gateway
from collections import namedtuple, defaultdict
PubIn = namedtuple('PubIn', 'topic qos') # Publication to gateway/nodes from outside
PubOut = namedtuple('PubOut', 'topic retain qos') # Publication by gateway
gwcfg = defaultdict(lambda : None)
# Mandatory keys
gwcfg["debug"] = True # Print debug info. Also causes more status messages to be published.
gwcfg["qlen"] = 10 # No. of messages to queue (for each node)
gwcfg["lpmode"] = True # Set True if all nodes are micropower
gwcfg["use_ap_if"] = True # Enable ESP8266 nodes by using AP interface
gwcfg["pub_all"] = PubIn("allnodes", 1) # Publish to all nodes
# Optional keys
gwcfg["errors"] = PubOut("gw_errors", False, 1) # Gateway publishes any errors.
gwcfg["status"] = PubOut("gw_status", False, 0) # Status report
gwcfg["statreq"] = PubIn("gw_query", 0) # Status request (not yet implemented)
gwcfg["ntp_host"] = "192.168.0.10"
gwcfg["ntp_offset"] = 1
|
8b134c9bf252998443a7424f17256cf119944f7c
|
1c790b0adc648ff466913cf4aed28ace905357ff
|
/model_zoo/jag_utils/python/sanity.py
|
01f5c7e9636e8ed30a3755a6074ddfd87397b9f1
|
[
"Apache-2.0"
] |
permissive
|
LLNL/lbann
|
04d5fdf443d6b467be4fa91446d40b620eade765
|
e8cf85eed2acbd3383892bf7cb2d88b44c194f4f
|
refs/heads/develop
| 2023-08-23T18:59:29.075981
| 2023-08-22T22:16:48
| 2023-08-22T22:16:48
| 58,576,874
| 225
| 87
|
NOASSERTION
| 2023-09-11T22:43:32
| 2016-05-11T20:04:20
|
C++
|
UTF-8
|
Python
| false
| false
| 4,409
|
py
|
sanity.py
|
#!/usr/tce/bin/python
'''
quick script to test that sample lists generated by build_trainer_lists.py
contain unique indices.
usage: sanity.py id_mapping_fn bar_fn t0_fn [t1_fn, ...]
'''
import sys
if len(sys.argv) == 1 :
print '''
usage: sanity.py id_mapping_fn sample_list_dir sample_list_base_name num_sample_lists
where: bar_fn, t0_fn, etc, are outputs from build_trainer_lists.py
function: test that the intersection of the sample IDs in the
sample lists are empty, and that every sample_ID
is in either one sample list or in the exclusion (bar) file\n
example usage:
python sanity.py \\
/p/lustre2/brainusr/datasets/10MJAG/1M_A/id_mapping.txt \\
/p/lustre2/brainusr/datasets/10MJAG/1M_A/select_samples_test/another_dir \\
my_samples.txt \\
10
CAUTION: this script is fragile: it may break if/when model_zoo/jag_utils/select_samples.cpp is modified
'''
exit(9)
#======================================================================
def buildInc(mp, fn) :
r = set()
print 'buildInc; opening:', fn
a = open(fn)
a.readline()
a.readline()
a.readline()
for line in a :
t = line.split()
for j in t[3:] :
r.add(j)
print ' num sample IDs:', len(r)
return r
#======================================================================
#returns (excluded, included) sample IDs from an input EXCLUSION sample list
def buildExc(mp, fn) :
s = set()
print 'buildExc; opening:', fn
a = open(fn)
a.readline()
a.readline()
a.readline()
for line in a :
t = line.split()
for j in t[3:] :
s.add(j)
#at this point, 's' contains all excluded sample IDs (these are the IDs
#that are explicitly listed in the exclusion bar file);
#mp is the set of all sample IDs, whether included, or excluded
#(unsuccessfule)
r = set()
for sample_id in mp :
if sample_id not in s :
r.add(sample_id)
print ' num sample IDs:', len(r)
return (s, r)
#======================================================================
#build set that contains all sample names
mp = set()
a = open(sys.argv[1])
for line in a :
t = line.split()
for j in t[1:] :
mp.add(j)
print '\nlen(map):', len(mp)
sample_list_dir = sys.argv[2]
sample_list_base_name = sys.argv[3]
#build exclusion set; this set contains all valid (successful) sample IDs
(excluded, included) = buildExc(mp, sample_list_dir + '/t_exclusion_' + sample_list_base_name + '_bar')
print '\nlen(included):', len(included), 'len(excluded):', len(excluded), 'intersection:', len(included.intersection(excluded))
data = []
data.append(included)
#build bar inclusion set
(included2, excluded2) = buildExc(mp, sample_list_dir + '/t_inclusion_' + sample_list_base_name + '_bar')
#(excluded2, included2) = buildExc(mp, sample_list_dir + '/t_inclusion_' + sample_list_base_name + '_bar')
print '\nlen(included):', len(included2), 'len(excluded):', len(excluded2), 'intersection:', len(included2.intersection(excluded2))
print
print 'checking that the bar files do not intersect'
r = len(excluded.intersection(included2))
if r != 0 :
print 'FAILED!'
print 'len(intersection):', r
exit(0)
#print 'bar inclusion file contains', len(bar), 'sample IDs'
#data.append(bar)
for j in range(int(sys.argv[4])) :
s2 = buildInc(mp, sample_list_dir + '/t' + str(j) + '_' + sample_list_base_name)
data.append(s2)
print len(s2)
print
print '===================================================================='
print 'running intersection test ...'
success = True
for j in range(0, len(data)-1) :
for k in range(1, len(data)) :
if j != k :
a = data[j]
b = data[k]
print 'testing', j, 'against', k, 'len:', len(a), len(b)
r = len(a.intersection(b))
if r != 0 :
print 'FAILED: ', j, 'intersection with',k, '=' , r
tt = 0
for x in a :
if x in b :
print x,
tt += 1
print
print 'total:', tt
exit(9)
success = False
if success :
print ' SUCCESS!'
print
print 'testing that all samples appear in one sample list, or the exclusion bar file'
s2 = set()
for j in range(0, len(data)) :
for sample_id in data[j] :
assert(sample_id in mp)
mp.remove(sample_id)
if len(mp) == 0 :
print ' SUCCESS!'
else :
print ' FAILED; len(mp)= ', len(mp), 'should be zero'
|
f559846f0c5383ee2f163ed85d151ef98e0741da
|
f623b9536e91b7f2e4303e3f59a012432c3eef44
|
/tests/test_archive.py
|
637800b76a0fe2ee1e54ea08b68fb1ecdde38c63
|
[
"MIT"
] |
permissive
|
ezbz/gitlabber
|
511480e8181127c0decf7143763ac614f3011e21
|
15e5c011c65ae4b92bf2ef3e9e84f14cd1fc21fc
|
refs/heads/master
| 2023-08-21T14:55:54.238535
| 2023-06-18T09:19:39
| 2023-06-18T09:19:39
| 255,058,951
| 451
| 78
|
MIT
| 2023-09-06T18:27:30
| 2020-04-12T10:31:58
|
Python
|
UTF-8
|
Python
| false
| false
| 688
|
py
|
test_archive.py
|
from gitlabber.archive import ArchivedResults
import pytest
import re
def test_archive_parse():
assert ArchivedResults.INCLUDE == ArchivedResults.argparse("include")
def test_archive_string():
assert "exclude" == ArchivedResults.__str__(ArchivedResults.EXCLUDE)
def test_repr():
retval = repr(ArchivedResults.ONLY)
match = re.match("^<ArchivedResults: ({.*})>$", retval)
def test_archive_api_value():
assert True == ArchivedResults.ONLY.api_value
assert False == ArchivedResults.EXCLUDE.api_value
assert None == ArchivedResults.INCLUDE.api_value
def test_archive_invalid():
assert "invalid_value" == ArchivedResults.argparse("invalid_value")
|
bf88a8c0cbdbfd02a53d6976764ad768bec4c668
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/es/tests/test_esqueryset.py
|
e9cad4658dfac52a739f39b22f95f3bff7cd7aaa
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 3,830
|
py
|
test_esqueryset.py
|
from unittest import TestCase, skipIf
from django.conf import settings
from corehq.apps.es.es_query import ESQuerySet, HQESQuery
from corehq.apps.es.tests.utils import es_test
from corehq.elastic import ESError
@es_test
class TestESQuerySet(TestCase):
example_response = {
'_shards': {'failed': 0, 'successful': 5, 'total': 5},
'hits': {'hits': [ {
'_id': '8063dff5-460b-46f2-b4d0-5871abfd97d4',
'_index': 'xforms_1cce1f049a1b4d864c9c25dc42648a45',
'_score': 1.0,
'_type': 'xform',
'_source': {
'app_id': 'fe8481a39c3738749e6a4766fca99efd',
'doc_type': 'xforminstance',
'domain': 'mikesproject',
'xmlns': 'http://openrosa.org/formdesigner/3a7cc07c-551c-4651-ab1a-d60be3017485'
}
},
{
'_id': 'dc1376cd-0869-4c13-a267-365dfc2fa754',
'_index': 'xforms_1cce1f049a1b4d864c9c25dc42648a45',
'_score': 1.0,
'_type': 'xform',
'_source': {
'app_id': '3d622620ca00d7709625220751a7b1f9',
'doc_type': 'xforminstance',
'domain': 'mikesproject',
'xmlns': 'http://openrosa.org/formdesigner/54db1962-b938-4e2b-b00e-08414163ead4'
}
}
],
'max_score': 1.0,
'total': 5247
},
'timed_out': False,
'took': 4
}
example_error = {'error': 'IndexMissingException[[xforms_123jlajlaf] missing]',
'status': 404}
def test_response(self):
hits = [
{
'app_id': 'fe8481a39c3738749e6a4766fca99efd',
'doc_type': 'xforminstance',
'domain': 'mikesproject',
'xmlns': 'http://openrosa.org/formdesigner/3a7cc07c-551c-4651-ab1a-d60be3017485'
},
{
'app_id': '3d622620ca00d7709625220751a7b1f9',
'doc_type': 'xforminstance',
'domain': 'mikesproject',
'xmlns': 'http://openrosa.org/formdesigner/54db1962-b938-4e2b-b00e-08414163ead4'
}
]
fields = ['app_id', 'doc_type', 'domain', 'xmlns']
response = ESQuerySet(
self.example_response,
HQESQuery('forms').fields(fields)
)
self.assertEqual(response.total, 5247)
self.assertEqual(response.hits, hits)
def test_error(self):
with self.assertRaises(ESError):
ESQuerySet(self.example_error, HQESQuery('forms'))
@skipIf(settings.ELASTICSEARCH_MAJOR_VERSION > 2, 'Only applicable for older versions')
def test_flatten_field_dicts(self):
example_response = {
'hits': {'hits': [{
'_source': {
'domains': ['joesproject'],
}
},
{
'_source': {
'domains': ['mikesproject']
}
}
],
},
}
hits = [
{
'domains': 'joesproject',
},
{
'domains': 'mikesproject',
}
]
fields = ['domain']
response = ESQuerySet(
example_response,
HQESQuery('forms').fields(fields)
)
self.assertEqual(response.hits, hits)
def test_exclude_source(self):
hits = ['8063dff5-460b-46f2-b4d0-5871abfd97d4', 'dc1376cd-0869-4c13-a267-365dfc2fa754']
response = ESQuerySet(
self.example_response,
HQESQuery('forms').exclude_source()
)
self.assertEqual(response.hits, hits)
|
c218b3ad6665c0b85a37259689647332578a2178
|
4ae34a5179d7adf1037eb9a3cb249f9a5c06684e
|
/cmd/metricscollector/v1beta1/tfevent-metricscollector/main.py
|
29c95849bff7a1707d60ce351282fba8f33cb91f
|
[
"Apache-2.0"
] |
permissive
|
kubeflow/katib
|
367373c0452d49a7a115b86893f4dab9e1f278ea
|
e3e0aa24aeea1edfab0fd42f55392af651d2b3ae
|
refs/heads/master
| 2023-09-04T05:02:05.752156
| 2023-08-24T22:40:54
| 2023-08-24T22:40:54
| 127,941,481
| 1,385
| 422
|
Apache-2.0
| 2023-09-14T13:17:29
| 2018-04-03T17:07:12
|
Go
|
UTF-8
|
Python
| false
| false
| 3,132
|
py
|
main.py
|
# Copyright 2022 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
import argparse
import api_pb2
from pns import WaitMainProcesses
import const
from tfevent_loader import MetricsCollector
from logging import getLogger, StreamHandler, INFO
timeout_in_seconds = 60
def parse_options():
parser = argparse.ArgumentParser(
description='TF-Event MetricsCollector',
add_help=True
)
# TODO (andreyvelich): Add early stopping flags.
parser.add_argument("-s-db", "--db_manager_server_addr", type=str, default="")
parser.add_argument("-t", "--trial_name", type=str, default="")
parser.add_argument("-path", "--metrics_file_dir", type=str, default=const.DEFAULT_METRICS_FILE_DIR)
parser.add_argument("-m", "--metric_names", type=str, default="")
parser.add_argument("-o-type", "--objective_type", type=str, default="")
parser.add_argument("-f", "--metric_filters", type=str, default="")
parser.add_argument("-p", "--poll_interval", type=int, default=const.DEFAULT_POLL_INTERVAL)
parser.add_argument("-timeout", "--timeout", type=int, default=const.DEFAULT_TIMEOUT)
parser.add_argument("-w", "--wait_all_processes", type=str, default=const.DEFAULT_WAIT_ALL_PROCESSES)
opt = parser.parse_args()
return opt
if __name__ == '__main__':
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(INFO)
logger.setLevel(INFO)
logger.addHandler(handler)
logger.propagate = False
opt = parse_options()
wait_all_processes = opt.wait_all_processes.lower() == "true"
db_manager_server = opt.db_manager_server_addr.split(':')
if len(db_manager_server) != 2:
raise Exception("Invalid Katib DB manager service address: %s" %
opt.db_manager_server_addr)
WaitMainProcesses(
pool_interval=opt.poll_interval,
timout=opt.timeout,
wait_all=wait_all_processes,
completed_marked_dir=opt.metrics_file_dir)
mc = MetricsCollector(opt.metric_names.split(';'))
observation_log = mc.parse_file(opt.metrics_file_dir)
channel = grpc.beta.implementations.insecure_channel(
db_manager_server[0], int(db_manager_server[1]))
with api_pb2.beta_create_DBManager_stub(channel) as client:
logger.info("In " + opt.trial_name + " " +
str(len(observation_log.metric_logs)) + " metrics will be reported.")
client.ReportObservationLog(api_pb2.ReportObservationLogRequest(
trial_name=opt.trial_name,
observation_log=observation_log
), timeout=timeout_in_seconds)
|
c3ad30793e917280d2908ec08495ab097a0d9291
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Geometry/RPCGeometry/test/rpcgeo2.py
|
bf88e43458466c257b5be145dba4853abd865917
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 517
|
py
|
rpcgeo2.py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.load("Geometry.MuonCommonData.muonIdealGeometryXML_cfi")
process.load("Geometry.RPCGeometry.rpcGeometry_cfi")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource")
process.MessageLogger = cms.Service("MessageLogger")
process.demo = cms.EDAnalyzer("RPCGEO2")
process.p = cms.Path(process.demo)
|
8f9284e02e2f675cd4bfdd754c230275f262ff58
|
86092e7c45eeb677576edab301cebb592a8baa31
|
/anycastcheck.py
|
71621efa596323521963a04660f2c267008cbf2f
|
[] |
no_license
|
RIPE-Atlas-Community/ripe-atlas-community-contrib
|
73b745aec4845452ba7eaa30932e7bee934171e8
|
2fb7d7e8143ee3884d0ad556612ba759ee414955
|
refs/heads/master
| 2023-06-08T18:13:14.845089
| 2023-06-06T14:08:08
| 2023-06-06T14:08:08
| 8,092,696
| 144
| 31
| null | 2021-11-25T14:15:35
| 2013-02-08T11:52:59
|
Python
|
UTF-8
|
Python
| false
| false
| 5,742
|
py
|
anycastcheck.py
|
#!/usr/bin/env python
#
# https://atlas.ripe.net/contrib/root_anycast.json?msm_id=1&af=6&qst=IS
# https://atlas.ripe.net/contrib/root_anycast.json?msm_id=1&af=4&qst=IS
#
# http://www.infornografia.net/atlas/anycastcheck.html
# http://www.infornografia.net/atlas/anycastcheck.kml
#
# UNK/UNK -> invalid
# inst4 = inst6 -> ok
# inst4 != inst6 -> nok
# inst4 != inst6 (UNK) -> nok-unknowns
# Arguably, nok-unknowns could be set as invalid, as well
#
# ["id_3_31", "52.2985", "4.9375",
# [
# "Probe ID", "2 (<a href='https://stat.ripe.net/AS5615' target='_new'>AS5615</a>)",
# "Instance", "linx",
# "Reply time", "27 ms",
# "As of (UTC)", "2012-02-24 13:35:56"
# ]
# ]
import json
from xml.dom.minidom import getDOMImplementation
import urllib
import re
def xml_newdoc():
impl= getDOMImplementation()
newdoc= impl.createDocument(None, "kml", None)
return newdoc
def main():
doc= xml_newdoc()
kml_el= doc.documentElement
kml_el.setAttribute("xmlns", "http://www.opengis.net/kml/2.2")
doc_el = doc.createElement("Document")
kml_el.appendChild(doc_el)
add_style(doc, doc_el, "chk_ok", "http://maps.google.com/mapfiles/kml/paddle/grn-blank.png")
add_style(doc, doc_el, "chk_nok", "http://maps.google.com/mapfiles/kml/paddle/pink-blank.png")
add_style(doc, doc_el, "chk_unknowns", "http://maps.google.com/mapfiles/kml/paddle/pink-circle.png")
add_style(doc, doc_el, "chk_invalid", "http://maps.google.com/mapfiles/kml/paddle/wht-blank.png")
probes4= get_probes("4")
probes6= get_probes("6")
for probe, list in probes6.iteritems():
probe_id= probe
if (probe_id in probes4):
chk_result= check_instance(probe_id,probes4,probes6)
descr = print_descr(probe_id,probes4,probes6)
lat = get_lat(probe_id,probes6)
lon = get_lon(probe_id,probes6)
add_probe(doc, doc_el, lat, lon, descr, chk_result)
print doc.toprettyxml(encoding="UTF-8")
def get_lat(probe_id,probes6):
data6= {}
list = probes6[probe_id]
for i in range(0,len(list),2):
data6[list[i]]= list[i+1]
return data6["Latitude"]
def get_lon(probe_id,probes6):
data6= {}
list= probes6[probe_id]
for i in range(0,len(list),2):
data6[list[i]]= list[i+1]
return data6["Longitude"]
def check_instance(probe_id,probes4,probes6):
data4= {}
data6= {}
list= probes6[probe_id]
for i in range(0,len(list),2):
data6[str(list[i])]= str(list[i+1])
list= probes4[probe_id]
for i in range(0,len(list),2):
data4[str(list[i])]= str(list[i+1])
if data6["Response"]==data4["Response"]:
if (data6["Response"] == "UNKNOWN") or (data4["Response"] == "UNKNOWN"):
result= "invalid"
else:
result= "ok"
elif (data6["Response"] == "UNKNOWN") or (data4["Response"] == "UNKNOWN"):
result= "unknowns"
else:
result= "nok"
return result
def get_probes(af):
probes= {}
url= "https://atlas.ripe.net/contrib/root_anycast.json?msm_id=1&af=" + str(af) + "&qst=IS"
data = json.load(urllib.urlopen(url))
for probe in data["probes"]:
items= probe[3]
# xxx populate items properly
items.append(str("Latitude"))
items.append(str(probe[1]))
items.append(str("Longitude"))
items.append(str(probe[2]))
d= {}
for i in range(0,len(items),2):
d[str(items[i])]= str(items[i+1])
probeid= re.search('^(\d+)',str(d["Probe ID"]))
probes[str(probeid.group(0))]= items
return probes
def print_descr(probe_id, probes4, probes6):
data4= {}
data6= {}
list= probes6[probe_id]
for i in range(0,len(list),2):
data6[str(list[i])]= str(list[i+1])
list= probes4[probe_id]
for i in range(0,len(list),2):
data4[str(list[i])]= str(list[i+1])
table= "<table>"
table += "<tr><td><b>Probe ID:</b></td><td>" + str(data6["Probe ID"]) + "</td></tr>"
table += "<tr><td><b>Instance (v6):</b></td><td>" + data6["Response"] + " (" + data6["Reply time"] + ")</td></tr>"
table += "<tr><td><b>Instance (v4):</b></td><td>" + data4["Response"] + " (" + data4["Reply time"] + ")</td></tr>"
table += "<tr><td><b>As of (UTC):</b></td><td>" + data6["As of (UTC)"] + "</td></tr>"
table += "</table>"
return str(table)
def add_probe(doc, parent, lat_str, lon_str, descr_str, result_str):
place_el = doc.createElement("Placemark")
parent.appendChild(place_el)
descr_el = doc.createElement("description")
place_el.appendChild(descr_el)
text_node= doc.createCDATASection(str(descr_str))
descr_el.appendChild(text_node)
if str(result_str) == "ok":
styleurl= "#chk_ok"
elif str(result_str) == "nok":
styleurl= "#chk_nok"
elif str(result_str) == "unknowns":
styleurl= "#chk_unknowns"
else:
styleurl= "#chk_invalid"
styleurl_el = doc.createElement("styleUrl")
place_el.appendChild(styleurl_el)
text_node= doc.createTextNode(styleurl)
styleurl_el.appendChild(text_node)
point_el = doc.createElement("Point")
place_el.appendChild(point_el)
coor_el = doc.createElement("coordinates")
point_el.appendChild(coor_el)
lat= float(lat_str)
lon= float(lon_str)
text_node= doc.createTextNode("%f,%f,0" % (lon,lat))
coor_el.appendChild(text_node)
def add_style(doc, parent, name, url):
style_el= doc.createElement("Style")
parent.appendChild(style_el)
style_el.setAttribute("id", name)
iconstyle_el= doc.createElement("IconStyle")
style_el.appendChild(iconstyle_el)
icon_el= doc.createElement("Icon")
iconstyle_el.appendChild(icon_el)
href_el= doc.createElement("href")
icon_el.appendChild(href_el)
text_node= doc.createTextNode(url)
href_el.appendChild(text_node)
main()
|
c28d138366be0150354cf48683293c1102125967
|
6fd2ab69501d71844a7329f62a3e62718fe9a9dd
|
/tests/test_fast_scan_ivf.py
|
5a57a39ca9d53282ea68d29b109a477e2b4e6adf
|
[
"MIT"
] |
permissive
|
facebookresearch/faiss
|
f6a7b9df838309e8a231653df2dc764ef43d355e
|
9dc75d026d25b340771a7ef5d99b0f81a0dc5e34
|
refs/heads/main
| 2023-09-02T15:12:01.311542
| 2023-09-01T14:06:14
| 2023-09-01T14:06:14
| 81,227,005
| 24,723
| 3,437
|
MIT
| 2023-09-14T20:41:39
| 2017-02-07T16:07:05
|
C++
|
UTF-8
|
Python
| false
| false
| 26,274
|
py
|
test_fast_scan_ivf.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import tempfile
import numpy as np
import faiss
from faiss.contrib import datasets
from faiss.contrib.inspect_tools import get_invlist
# the tests tend to timeout in stress modes + dev otherwise
faiss.omp_set_num_threads(4)
class TestLUTQuantization(unittest.TestCase):
def compute_dis_float(self, codes, LUT, bias):
nprobe, nt, M = codes.shape
dis = np.zeros((nprobe, nt), dtype='float32')
if bias is not None:
dis[:] = bias.reshape(-1, 1)
if LUT.ndim == 2:
LUTp = LUT
for p in range(nprobe):
if LUT.ndim == 3:
LUTp = LUT[p]
for i in range(nt):
dis[p, i] += LUTp[np.arange(M), codes[p, i]].sum()
return dis
def compute_dis_quant(self, codes, LUT, bias, a, b):
nprobe, nt, M = codes.shape
dis = np.zeros((nprobe, nt), dtype='uint16')
if bias is not None:
dis[:] = bias.reshape(-1, 1)
if LUT.ndim == 2:
LUTp = LUT
for p in range(nprobe):
if LUT.ndim == 3:
LUTp = LUT[p]
for i in range(nt):
dis[p, i] += LUTp[np.arange(M), codes[p, i]].astype('uint16').sum()
return dis / a + b
def do_test(self, LUT, bias, nprobe, alt_3d=False):
M, ksub = LUT.shape[-2:]
nt = 200
rs = np.random.RandomState(123)
codes = rs.randint(ksub, size=(nprobe, nt, M)).astype('uint8')
dis_ref = self.compute_dis_float(codes, LUT, bias)
LUTq = np.zeros(LUT.shape, dtype='uint8')
biasq = (
np.zeros(bias.shape, dtype='uint16')
if (bias is not None) and not alt_3d else None
)
atab = np.zeros(1, dtype='float32')
btab = np.zeros(1, dtype='float32')
def sp(x):
return faiss.swig_ptr(x) if x is not None else None
faiss.quantize_LUT_and_bias(
nprobe, M, ksub, LUT.ndim == 3,
sp(LUT), sp(bias), sp(LUTq), M, sp(biasq),
sp(atab), sp(btab)
)
a = atab[0]
b = btab[0]
dis_new = self.compute_dis_quant(codes, LUTq, biasq, a, b)
# print(a, b, dis_ref.sum())
avg_realtive_error = np.abs(dis_new - dis_ref).sum() / dis_ref.sum()
# print('a=', a, 'avg_relative_error=', avg_realtive_error)
self.assertLess(avg_realtive_error, 0.0005)
def test_no_residual_ip(self):
ksub = 16
M = 20
nprobe = 10
rs = np.random.RandomState(1234)
LUT = rs.rand(M, ksub).astype('float32')
bias = None
self.do_test(LUT, bias, nprobe)
def test_by_residual_ip(self):
ksub = 16
M = 20
nprobe = 10
rs = np.random.RandomState(1234)
LUT = rs.rand(M, ksub).astype('float32')
bias = rs.rand(nprobe).astype('float32')
bias *= 10
self.do_test(LUT, bias, nprobe)
def test_by_residual_L2(self):
ksub = 16
M = 20
nprobe = 10
rs = np.random.RandomState(1234)
LUT = rs.rand(nprobe, M, ksub).astype('float32')
bias = rs.rand(nprobe).astype('float32')
bias *= 10
self.do_test(LUT, bias, nprobe)
def test_by_residual_L2_v2(self):
ksub = 16
M = 20
nprobe = 10
rs = np.random.RandomState(1234)
LUT = rs.rand(nprobe, M, ksub).astype('float32')
bias = rs.rand(nprobe).astype('float32')
bias *= 10
self.do_test(LUT, bias, nprobe, alt_3d=True)
##########################################################
# Tests for various IndexPQFastScan implementations
##########################################################
def verify_with_draws(testcase, Dref, Iref, Dnew, Inew):
""" verify a list of results where there are draws in the distances (because
they are integer). """
np.testing.assert_array_almost_equal(Dref, Dnew, decimal=5)
# here we have to be careful because of draws
for i in range(len(Iref)):
if np.all(Iref[i] == Inew[i]): # easy case
continue
# we can deduce nothing about the latest line
skip_dis = Dref[i, -1]
for dis in np.unique(Dref):
if dis == skip_dis: continue
mask = Dref[i, :] == dis
testcase.assertEqual(set(Iref[i, mask]), set(Inew[i, mask]))
def three_metrics(Dref, Iref, Dnew, Inew):
nq = Iref.shape[0]
recall_at_1 = (Iref[:, 0] == Inew[:, 0]).sum() / nq
recall_at_10 = (Iref[:, :1] == Inew[:, :10]).sum() / nq
ninter = 0
for i in range(nq):
ninter += len(np.intersect1d(Inew[i], Iref[i]))
intersection_at_10 = ninter / nq
return recall_at_1, recall_at_10, intersection_at_10
##########################################################
# Tests for various IndexIVFPQFastScan implementations
##########################################################
class TestIVFImplem1(unittest.TestCase):
""" Verify implem 1 (search from original invlists)
against IndexIVFPQ """
def do_test(self, by_residual, metric_type=faiss.METRIC_L2,
use_precomputed_table=0):
ds = datasets.SyntheticDataset(32, 2000, 5000, 1000)
index = faiss.index_factory(32, "IVF32,PQ16x4np", metric_type)
index.use_precomputed_table
index.use_precomputed_table = use_precomputed_table
index.train(ds.get_train())
index.add(ds.get_database())
index.nprobe = 4
index.by_residual = by_residual
Da, Ia = index.search(ds.get_queries(), 10)
index2 = faiss.IndexIVFPQFastScan(index)
index2.implem = 1
Db, Ib = index2.search(ds.get_queries(), 10)
# self.assertLess((Ia != Ib).sum(), Ia.size * 0.005)
np.testing.assert_array_equal(Ia, Ib)
np.testing.assert_almost_equal(Da, Db, decimal=5)
def test_no_residual(self):
self.do_test(False)
def test_by_residual(self):
self.do_test(True)
def test_by_residual_no_precomputed(self):
self.do_test(True, use_precomputed_table=-1)
def test_no_residual_ip(self):
self.do_test(False, faiss.METRIC_INNER_PRODUCT)
def test_by_residual_ip(self):
self.do_test(True, faiss.METRIC_INNER_PRODUCT)
class TestIVFImplem2(unittest.TestCase):
""" Verify implem 2 (search with original invlists with uint8 LUTs)
against IndexIVFPQ. Entails some loss in accuracy. """
def eval_quant_loss(self, by_residual, metric=faiss.METRIC_L2):
ds = datasets.SyntheticDataset(32, 2000, 5000, 1000)
index = faiss.index_factory(32, "IVF32,PQ16x4np", metric)
index.train(ds.get_train())
index.add(ds.get_database())
index.nprobe = 4
index.by_residual = by_residual
Da, Ia = index.search(ds.get_queries(), 10)
# loss due to int8 quantization of LUTs
index2 = faiss.IndexIVFPQFastScan(index)
index2.implem = 2
Db, Ib = index2.search(ds.get_queries(), 10)
m3 = three_metrics(Da, Ia, Db, Ib)
# print(by_residual, metric, recall_at_1, recall_at_10, intersection_at_10)
ref_results = {
(True, 1): [0.985, 1.0, 9.872],
(True, 0): [ 0.987, 1.0, 9.914],
(False, 1): [0.991, 1.0, 9.907],
(False, 0): [0.986, 1.0, 9.917],
}
ref = ref_results[(by_residual, metric)]
self.assertGreaterEqual(m3[0], ref[0] * 0.995)
self.assertGreaterEqual(m3[1], ref[1] * 0.995)
self.assertGreaterEqual(m3[2], ref[2] * 0.995)
def test_qloss_no_residual(self):
self.eval_quant_loss(False)
def test_qloss_by_residual(self):
self.eval_quant_loss(True)
def test_qloss_no_residual_ip(self):
self.eval_quant_loss(False, faiss.METRIC_INNER_PRODUCT)
def test_qloss_by_residual_ip(self):
self.eval_quant_loss(True, faiss.METRIC_INNER_PRODUCT)
class TestEquivPQ(unittest.TestCase):
def test_equiv_pq(self):
ds = datasets.SyntheticDataset(32, 2000, 200, 4)
index = faiss.index_factory(32, "IVF1,PQ16x4np")
index.by_residual = False
# force coarse quantizer
index.quantizer.add(np.zeros((1, 32), dtype='float32'))
index.train(ds.get_train())
index.add(ds.get_database())
Dref, Iref = index.search(ds.get_queries(), 4)
index_pq = faiss.index_factory(32, "PQ16x4np")
index_pq.pq = index.pq
index_pq.is_trained = True
index_pq.codes = faiss. downcast_InvertedLists(
index.invlists).codes.at(0)
index_pq.ntotal = index.ntotal
Dnew, Inew = index_pq.search(ds.get_queries(), 4)
np.testing.assert_array_equal(Iref, Inew)
np.testing.assert_array_equal(Dref, Dnew)
index_pq2 = faiss.IndexPQFastScan(index_pq)
index_pq2.implem = 12
Dref, Iref = index_pq2.search(ds.get_queries(), 4)
index2 = faiss.IndexIVFPQFastScan(index)
index2.implem = 12
Dnew, Inew = index2.search(ds.get_queries(), 4)
np.testing.assert_array_equal(Iref, Inew)
np.testing.assert_array_equal(Dref, Dnew)
class TestIVFImplem12(unittest.TestCase):
IMPLEM = 12
def do_test(self, by_residual, metric=faiss.METRIC_L2, d=32, nq=200):
ds = datasets.SyntheticDataset(d, 2000, 5000, nq)
index = faiss.index_factory(d, f"IVF32,PQ{d//2}x4np", metric)
# force coarse quantizer
# index.quantizer.add(np.zeros((1, 32), dtype='float32'))
index.by_residual = by_residual
index.train(ds.get_train())
index.add(ds.get_database())
index.nprobe = 4
index2 = faiss.IndexIVFPQFastScan(index)
index2.implem = 2
Dref, Iref = index2.search(ds.get_queries(), 4)
index2 = faiss.IndexIVFPQFastScan(index)
index2.implem = self.IMPLEM
Dnew, Inew = index2.search(ds.get_queries(), 4)
verify_with_draws(self, Dref, Iref, Dnew, Inew)
stats = faiss.cvar.indexIVF_stats
stats.reset()
# also verify with single result
Dnew, Inew = index2.search(ds.get_queries(), 1)
for q in range(len(Dref)):
if Dref[q, 1] == Dref[q, 0]:
# then we cannot conclude
continue
self.assertEqual(Iref[q, 0], Inew[q, 0])
np.testing.assert_almost_equal(Dref[q, 0], Dnew[q, 0], decimal=5)
self.assertGreater(stats.ndis, 0)
def test_no_residual(self):
self.do_test(False)
def test_by_residual(self):
self.do_test(True)
def test_no_residual_ip(self):
self.do_test(False, metric=faiss.METRIC_INNER_PRODUCT)
def test_by_residual_ip(self):
self.do_test(True, metric=faiss.METRIC_INNER_PRODUCT)
def test_no_residual_odd_dim(self):
self.do_test(False, d=30)
def test_by_residual_odd_dim(self):
self.do_test(True, d=30)
# testin single query
def test_no_residual_single_query(self):
self.do_test(False, nq=1)
def test_by_residual_single_query(self):
self.do_test(True, nq=1)
def test_no_residual_ip_single_query(self):
self.do_test(False, metric=faiss.METRIC_INNER_PRODUCT, nq=1)
def test_by_residual_ip_single_query(self):
self.do_test(True, metric=faiss.METRIC_INNER_PRODUCT, nq=1)
def test_no_residual_odd_dim_single_query(self):
self.do_test(False, d=30, nq=1)
def test_by_residual_odd_dim_single_query(self):
self.do_test(True, d=30, nq=1)
class TestIVFImplem10(TestIVFImplem12):
IMPLEM = 10
class TestIVFImplem11(TestIVFImplem12):
IMPLEM = 11
class TestIVFImplem13(TestIVFImplem12):
IMPLEM = 13
class TestIVFImplem14(TestIVFImplem12):
IMPLEM = 14
class TestIVFImplem15(TestIVFImplem12):
IMPLEM = 15
class TestAdd(unittest.TestCase):
def do_test(self, by_residual=False, metric=faiss.METRIC_L2, d=32, bbs=32):
bbs = 32
ds = datasets.SyntheticDataset(d, 2000, 5000, 200)
index = faiss.index_factory(d, f"IVF32,PQ{d//2}x4np", metric)
index.by_residual = by_residual
index.train(ds.get_train())
index.nprobe = 4
xb = ds.get_database()
index.add(xb[:1235])
index2 = faiss.IndexIVFPQFastScan(index, bbs)
index.add(xb[1235:])
index3 = faiss.IndexIVFPQFastScan(index, bbs)
Dref, Iref = index3.search(ds.get_queries(), 10)
index2.add(xb[1235:])
Dnew, Inew = index2.search(ds.get_queries(), 10)
np.testing.assert_array_equal(Dref, Dnew)
np.testing.assert_array_equal(Iref, Inew)
# direct verification of code content. Not sure the test is correct
# if codes are shuffled.
for list_no in range(32):
ref_ids, ref_codes = get_invlist(index3.invlists, list_no)
new_ids, new_codes = get_invlist(index2.invlists, list_no)
self.assertEqual(set(ref_ids), set(new_ids))
new_code_per_id = {
new_ids[i]: new_codes[i // bbs, :, i % bbs]
for i in range(new_ids.size)
}
for i, the_id in enumerate(ref_ids):
ref_code_i = ref_codes[i // bbs, :, i % bbs]
new_code_i = new_code_per_id[the_id]
np.testing.assert_array_equal(ref_code_i, new_code_i)
def test_add(self):
self.do_test()
def test_odd_d(self):
self.do_test(d=30)
def test_bbs64(self):
self.do_test(bbs=64)
class TestTraining(unittest.TestCase):
def do_test(self, by_residual=False, metric=faiss.METRIC_L2, d=32, bbs=32):
bbs = 32
ds = datasets.SyntheticDataset(d, 2000, 5000, 200)
index = faiss.index_factory(d, f"IVF32,PQ{d//2}x4np", metric)
index.by_residual = by_residual
index.train(ds.get_train())
index.add(ds.get_database())
index.nprobe = 4
Dref, Iref = index.search(ds.get_queries(), 10)
index2 = faiss.IndexIVFPQFastScan(
index.quantizer, d, 32, d // 2, 4, metric, bbs)
index2.by_residual = by_residual
index2.train(ds.get_train())
index2.add(ds.get_database())
index2.nprobe = 4
Dnew, Inew = index2.search(ds.get_queries(), 10)
m3 = three_metrics(Dref, Iref, Dnew, Inew)
# print((by_residual, metric, d), ":", m3)
ref_m3_tab = {
(True, 1, 32): (0.995, 1.0, 9.91),
(True, 0, 32): (0.99, 1.0, 9.91),
(True, 1, 30): (0.989, 1.0, 9.885),
(False, 1, 32): (0.99, 1.0, 9.875),
(False, 0, 32): (0.99, 1.0, 9.92),
(False, 1, 30): (1.0, 1.0, 9.895)
}
ref_m3 = ref_m3_tab[(by_residual, metric, d)]
self.assertGreaterEqual(m3[0], ref_m3[0] * 0.99)
self.assertGreater(m3[1], ref_m3[1] * 0.99)
self.assertGreater(m3[2], ref_m3[2] * 0.99)
# Test I/O
data = faiss.serialize_index(index2)
index3 = faiss.deserialize_index(data)
D3, I3 = index3.search(ds.get_queries(), 10)
np.testing.assert_array_equal(I3, Inew)
np.testing.assert_array_equal(D3, Dnew)
def test_no_residual(self):
self.do_test(by_residual=False)
def test_by_residual(self):
self.do_test(by_residual=True)
def test_no_residual_ip(self):
self.do_test(by_residual=False, metric=faiss.METRIC_INNER_PRODUCT)
def test_by_residual_ip(self):
self.do_test(by_residual=True, metric=faiss.METRIC_INNER_PRODUCT)
def test_no_residual_odd_dim(self):
self.do_test(by_residual=False, d=30)
def test_by_residual_odd_dim(self):
self.do_test(by_residual=True, d=30)
class TestIsTrained(unittest.TestCase):
def test_issue_2019(self):
index = faiss.index_factory(
32,
"PCAR16,IVF200(IVF10,PQ2x4fs,RFlat),PQ4x4fsr"
)
des = faiss.rand((1000, 32))
index.train(des)
class TestIVFAQFastScan(unittest.TestCase):
def subtest_accuracy(self, aq, st, by_residual, implem, metric_type='L2'):
"""
Compare IndexIVFAdditiveQuantizerFastScan with
IndexIVFAdditiveQuantizer
"""
nlist, d = 16, 8
ds = datasets.SyntheticDataset(d, 1000, 1000, 500, metric_type)
gt = ds.get_groundtruth(k=1)
if metric_type == 'L2':
metric = faiss.METRIC_L2
postfix1 = '_Nqint8'
postfix2 = f'_N{st}2x4'
else:
metric = faiss.METRIC_INNER_PRODUCT
postfix1 = postfix2 = ''
index = faiss.index_factory(d, f'IVF{nlist},{aq}3x4{postfix1}', metric)
index.by_residual = by_residual
index.train(ds.get_train())
index.add(ds.get_database())
index.nprobe = 16
Dref, Iref = index.search(ds.get_queries(), 1)
indexfs = faiss.index_factory(
d, f'IVF{nlist},{aq}3x4fs_32{postfix2}', metric)
indexfs.by_residual = by_residual
indexfs.train(ds.get_train())
indexfs.add(ds.get_database())
indexfs.nprobe = 16
indexfs.implem = implem
D1, I1 = indexfs.search(ds.get_queries(), 1)
nq = Iref.shape[0]
recall_ref = (Iref == gt).sum() / nq
recall1 = (I1 == gt).sum() / nq
print(aq, st, by_residual, implem, metric_type, recall_ref, recall1)
assert abs(recall_ref - recall1) < 0.051
def xx_test_accuracy(self):
# generated programatically below
for metric in 'L2', 'IP':
for byr in True, False:
for implem in 0, 10, 11, 12, 13, 14, 15:
self.subtest_accuracy('RQ', 'rq', byr, implem, metric)
self.subtest_accuracy('LSQ', 'lsq', byr, implem, metric)
def subtest_rescale_accuracy(self, aq, st, by_residual, implem):
"""
we set norm_scale to 2 and compare it with IndexIVFAQ
"""
nlist, d = 16, 8
ds = datasets.SyntheticDataset(d, 1000, 1000, 500)
gt = ds.get_groundtruth(k=1)
metric = faiss.METRIC_L2
postfix1 = '_Nqint8'
postfix2 = f'_N{st}2x4'
index = faiss.index_factory(
d, f'IVF{nlist},{aq}3x4{postfix1}', metric)
index.by_residual = by_residual
index.train(ds.get_train())
index.add(ds.get_database())
index.nprobe = 16
Dref, Iref = index.search(ds.get_queries(), 1)
indexfs = faiss.index_factory(
d, f'IVF{nlist},{aq}3x4fs_32{postfix2}', metric)
indexfs.by_residual = by_residual
indexfs.norm_scale = 2
indexfs.train(ds.get_train())
indexfs.add(ds.get_database())
indexfs.nprobe = 16
indexfs.implem = implem
D1, I1 = indexfs.search(ds.get_queries(), 1)
nq = Iref.shape[0]
recall_ref = (Iref == gt).sum() / nq
recall1 = (I1 == gt).sum() / nq
print(aq, st, by_residual, implem, recall_ref, recall1)
assert abs(recall_ref - recall1) < 0.05
def xx_test_rescale_accuracy(self):
for byr in True, False:
for implem in 0, 10, 11, 12, 13, 14, 15:
self.subtest_accuracy('RQ', 'rq', byr, implem, 'L2')
self.subtest_accuracy('LSQ', 'lsq', byr, implem, 'L2')
def subtest_from_ivfaq(self, implem):
d = 8
ds = datasets.SyntheticDataset(d, 1000, 2000, 1000, metric='IP')
gt = ds.get_groundtruth(k=1)
index = faiss.index_factory(d, 'IVF16,RQ8x4', faiss.METRIC_INNER_PRODUCT)
index.train(ds.get_train())
index.add(ds.get_database())
index.nprobe = 16
Dref, Iref = index.search(ds.get_queries(), 1)
indexfs = faiss.IndexIVFAdditiveQuantizerFastScan(index)
D1, I1 = indexfs.search(ds.get_queries(), 1)
nq = Iref.shape[0]
recall_ref = (Iref == gt).sum() / nq
recall1 = (I1 == gt).sum() / nq
print(recall_ref, recall1)
assert abs(recall_ref - recall1) < 0.02
def test_from_ivfaq(self):
for implem in 0, 1, 2:
self.subtest_from_ivfaq(implem)
def subtest_factory(self, aq, M, bbs, st, r='r'):
"""
Format: IVF{nlist},{AQ}{M}x4fs{r}_{bbs}_N{st}
nlist (int): number of inverted lists
AQ (str): `LSQ` or `RQ`
M (int): number of sub-quantizers
bbs (int): build block size
st (str): search type, `lsq2x4` or `rq2x4`
r (str): `r` or ``, by_residual or not
"""
AQ = faiss.AdditiveQuantizer
nlist, d = 128, 16
if bbs > 0:
index = faiss.index_factory(
d, f'IVF{nlist},{aq}{M}x4fs{r}_{bbs}_N{st}2x4')
else:
index = faiss.index_factory(
d, f'IVF{nlist},{aq}{M}x4fs{r}_N{st}2x4')
bbs = 32
assert index.nlist == nlist
assert index.bbs == bbs
q = faiss.downcast_Quantizer(index.aq)
assert q.M == M
if aq == 'LSQ':
assert isinstance(q, faiss.LocalSearchQuantizer)
if aq == 'RQ':
assert isinstance(q, faiss.ResidualQuantizer)
if st == 'lsq':
assert q.search_type == AQ.ST_norm_lsq2x4
if st == 'rq':
assert q.search_type == AQ.ST_norm_rq2x4
assert index.by_residual == (r == 'r')
def test_factory(self):
self.subtest_factory('LSQ', 16, 64, 'lsq')
self.subtest_factory('LSQ', 16, 64, 'rq')
self.subtest_factory('RQ', 16, 64, 'rq')
self.subtest_factory('RQ', 16, 64, 'lsq')
self.subtest_factory('LSQ', 64, 0, 'lsq')
self.subtest_factory('LSQ', 64, 0, 'lsq', r='')
def subtest_io(self, factory_str):
d = 8
ds = datasets.SyntheticDataset(d, 1000, 2000, 1000)
index = faiss.index_factory(d, factory_str)
index.train(ds.get_train())
index.add(ds.get_database())
D1, I1 = index.search(ds.get_queries(), 1)
fd, fname = tempfile.mkstemp()
os.close(fd)
try:
faiss.write_index(index, fname)
index2 = faiss.read_index(fname)
D2, I2 = index2.search(ds.get_queries(), 1)
np.testing.assert_array_equal(I1, I2)
finally:
if os.path.exists(fname):
os.unlink(fname)
def test_io(self):
self.subtest_io('IVF16,LSQ4x4fs_Nlsq2x4')
self.subtest_io('IVF16,LSQ4x4fs_Nrq2x4')
self.subtest_io('IVF16,RQ4x4fs_Nrq2x4')
self.subtest_io('IVF16,RQ4x4fs_Nlsq2x4')
# add more tests programatically
def add_TestIVFAQFastScan_subtest_accuracy(
aq, st, by_residual, implem, metric='L2'):
setattr(
TestIVFAQFastScan,
f"test_accuracy_{metric}_{aq}_implem{implem}_residual{by_residual}",
lambda self:
self.subtest_accuracy(aq, st, by_residual, implem, metric)
)
def add_TestIVFAQFastScan_subtest_rescale_accuracy(aq, st, by_residual, implem):
setattr(
TestIVFAQFastScan,
f"test_rescale_accuracy_{aq}_implem{implem}_residual{by_residual}",
lambda self:
self.subtest_rescale_accuracy(aq, st, by_residual, implem)
)
for byr in True, False:
for implem in 0, 10, 11, 12, 13, 14, 15:
for mt in 'L2', 'IP':
add_TestIVFAQFastScan_subtest_accuracy('RQ', 'rq', byr, implem, mt)
add_TestIVFAQFastScan_subtest_accuracy('LSQ', 'lsq', byr, implem, mt)
add_TestIVFAQFastScan_subtest_rescale_accuracy('LSQ', 'lsq', byr, implem)
add_TestIVFAQFastScan_subtest_rescale_accuracy('RQ', 'rq', byr, implem)
class TestIVFPAQFastScan(unittest.TestCase):
def subtest_accuracy(self, paq):
"""
Compare IndexIVFAdditiveQuantizerFastScan with
IndexIVFAdditiveQuantizer
"""
nlist, d = 16, 8
ds = datasets.SyntheticDataset(d, 1000, 1000, 500)
gt = ds.get_groundtruth(k=1)
index = faiss.index_factory(d, f'IVF{nlist},{paq}2x3x4_Nqint8')
index.train(ds.get_train())
index.add(ds.get_database())
index.nprobe = 4
Dref, Iref = index.search(ds.get_queries(), 1)
indexfs = faiss.index_factory(d, f'IVF{nlist},{paq}2x3x4fsr_Nlsq2x4')
indexfs.train(ds.get_train())
indexfs.add(ds.get_database())
indexfs.nprobe = 4
D1, I1 = indexfs.search(ds.get_queries(), 1)
nq = Iref.shape[0]
recall_ref = (Iref == gt).sum() / nq
recall1 = (I1 == gt).sum() / nq
print(paq, recall_ref, recall1)
assert abs(recall_ref - recall1) < 0.05
def test_accuracy_PLSQ(self):
self.subtest_accuracy("PLSQ")
def test_accuracy_PRQ(self):
self.subtest_accuracy("PRQ")
def subtest_factory(self, paq):
nlist, d = 128, 16
index = faiss.index_factory(d, f'IVF{nlist},{paq}2x3x4fsr_Nlsq2x4')
q = faiss.downcast_Quantizer(index.aq)
self.assertEqual(index.nlist, nlist)
self.assertEqual(q.nsplits, 2)
self.assertEqual(q.subquantizer(0).M, 3)
self.assertTrue(index.by_residual)
def test_factory(self):
self.subtest_factory('PLSQ')
self.subtest_factory('PRQ')
def subtest_io(self, factory_str):
d = 8
ds = datasets.SyntheticDataset(d, 1000, 2000, 1000)
index = faiss.index_factory(d, factory_str)
index.train(ds.get_train())
index.add(ds.get_database())
D1, I1 = index.search(ds.get_queries(), 1)
fd, fname = tempfile.mkstemp()
os.close(fd)
try:
faiss.write_index(index, fname)
index2 = faiss.read_index(fname)
D2, I2 = index2.search(ds.get_queries(), 1)
np.testing.assert_array_equal(I1, I2)
finally:
if os.path.exists(fname):
os.unlink(fname)
def test_io(self):
self.subtest_io('IVF16,PLSQ2x3x4fsr_Nlsq2x4')
self.subtest_io('IVF16,PRQ2x3x4fs_Nrq2x4')
|
06f172396b3314995ad3e352e29118a7d059a82c
|
e145f05e919cad040c23587eadd11bed1e6bd5e8
|
/test/functional/feature_evm_transferdomain.py
|
4f3f763eafcb005f212d6d42f7b1e68ab4ad8606
|
[
"MIT"
] |
permissive
|
DeFiCh/ain
|
f819ea586a9c17f99f8f8ea32982554b7c4fb5e2
|
6908107c4a7ee0e30dabc4ea773820637b42bbfe
|
refs/heads/master
| 2023-08-16T18:51:18.460486
| 2023-08-16T14:11:56
| 2023-08-16T14:11:56
| 228,198,960
| 435
| 146
|
MIT
| 2023-09-14T16:23:37
| 2019-12-15T14:37:57
|
C++
|
UTF-8
|
Python
| false
| false
| 39,024
|
py
|
feature_evm_transferdomain.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) DeFi Blockchain Developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import DefiTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, int_to_eth_u256
from decimal import Decimal
class EVMTest(DefiTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
[
"-txordering=2",
"-dummypos=0",
"-txnotokens=0",
"-amkheight=50",
"-bayfrontheight=51",
"-eunosheight=80",
"-fortcanningheight=82",
"-fortcanninghillheight=84",
"-fortcanningroadheight=86",
"-fortcanningcrunchheight=88",
"-fortcanningspringheight=90",
"-fortcanninggreatworldheight=94",
"-fortcanningepilogueheight=96",
"-grandcentralheight=101",
"-nextnetworkupgradeheight=150",
"-subsidytest=1",
"-txindex=1",
],
[
"-txordering=2",
"-dummypos=0",
"-txnotokens=0",
"-amkheight=50",
"-bayfrontheight=51",
"-eunosheight=80",
"-fortcanningheight=82",
"-fortcanninghillheight=84",
"-fortcanningroadheight=86",
"-fortcanningcrunchheight=88",
"-fortcanningspringheight=90",
"-fortcanninggreatworldheight=94",
"-fortcanningepilogueheight=96",
"-grandcentralheight=101",
"-nextnetworkupgradeheight=150",
"-subsidytest=1",
"-txindex=1",
],
]
def setup(self):
self.address = self.nodes[0].get_genesis_keys().ownerAuthAddress
self.address_2nd = self.nodes[0].get_genesis_keys().ownerAuthAddress
self.address1 = self.nodes[1].get_genesis_keys().ownerAuthAddress
self.eth_address = "0x9b8a4af42140d8a4c153a822f02571a1dd037e89"
self.eth_address_bech32 = "bcrt1qta8meuczw0mhqupzjl5wplz47xajz0dn0wxxr8"
self.eth_address_privkey = (
"af990cc3ba17e776f7f57fcc59942a82846d75833fa17d2ba59ce6858d886e23"
)
self.eth_address1 = self.nodes[0].getnewaddress("", "erc55")
self.no_auth_eth_address = "0x6c34cbb9219d8caa428835d2073e8ec88ba0a110"
symbolDFI = "DFI"
symbolBTC = "BTC"
self.symbolBTCDFI = "BTC-DFI"
symbolUSER = "USER"
# Import eth_address and validate Bech32 eqivilent is part of the wallet
self.nodes[0].importprivkey(self.eth_address_privkey)
result = self.nodes[0].getaddressinfo(self.eth_address_bech32)
assert_equal(
result["scriptPubKey"], "00145f4fbcf30273f770702297e8e0fc55f1bb213db3"
)
assert_equal(
result["pubkey"],
"021286647f7440111ab928bdea4daa42533639c4567d81eca0fff622fb6438eae3",
)
assert_equal(result["ismine"], True)
assert_equal(result["iswitness"], True)
# Generate chain
self.nodes[0].generate(145)
# Create DAT token to be used in tests
self.nodes[0].createtoken(
{
"symbol": symbolBTC,
"name": "BTC token",
"isDAT": True,
"collateralAddress": self.address,
}
)
# Create non-DAT token to be used in tests
userTx = self.nodes[0].createtoken(
{
"symbol": symbolUSER,
"name": "Non-DAT token",
"isDAT": False,
"collateralAddress": self.address,
}
)
# Fund DFI address
self.nodes[0].utxostoaccount({self.address: "201@DFI"})
self.nodes[0].generate(1)
self.nodes[0].minttokens("100@BTC")
self.nodes[0].minttokens("100@USER#128")
idDFI = list(self.nodes[0].gettoken(symbolDFI).keys())[0]
idBTC = list(self.nodes[0].gettoken(symbolBTC).keys())[0]
idUSER = list(self.nodes[0].gettoken(userTx).keys())[0]
self.symbolUSER = self.nodes[0].gettoken(idUSER)[idUSER]["symbolKey"]
# create pool
self.nodes[0].createpoolpair(
{
"tokenA": idBTC,
"tokenB": idDFI,
"commission": 1,
"status": True,
"ownerAddress": self.address,
"pairSymbol": self.symbolBTCDFI,
},
[],
)
self.nodes[0].generate(1)
# check tokens id
pool = self.nodes[0].getpoolpair(self.symbolBTCDFI)
idDFIBTC = list(self.nodes[0].gettoken(self.symbolBTCDFI).keys())[0]
assert pool[idDFIBTC]["idTokenA"] == idBTC
assert pool[idDFIBTC]["idTokenB"] == idDFI
# transfer
self.nodes[0].addpoolliquidity(
{self.address: ["1@" + symbolBTC, "100@" + symbolDFI]}, self.address, []
)
self.nodes[0].generate(1)
def check_initial_balance(self):
# Check initial balances
self.dfi_balance = self.nodes[0].getaccount(self.address, {}, True)["0"]
self.eth_balance = self.nodes[0].eth_getBalance(self.eth_address)
assert_equal(self.dfi_balance, Decimal("101"))
assert_equal(self.eth_balance, int_to_eth_u256(0))
assert_equal(len(self.nodes[0].getaccount(self.eth_address, {}, True)), 0)
def invalid_before_fork_and_disabled(self):
assert_raises_rpc_error(
-32600,
"called before NextNetworkUpgrade height",
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "100@DFI", "domain": 2},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
},
}
],
)
# Move to fork height
self.nodes[0].generate(2)
assert_raises_rpc_error(
-32600,
"Cannot create tx, transfer domain is not enabled",
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "100@DFI", "domain": 2},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
},
}
],
)
# Activate EVM
self.nodes[0].setgov({"ATTRIBUTES": {"v0/params/feature/evm": "true"}})
# TODO: Check EVM disabled on gen +1
# TODO: Check EVM enabled on gen +2
self.nodes[0].generate(2)
# Check error before transferdomain enabled
assert_raises_rpc_error(
-32600,
"Cannot create tx, transfer domain is not enabled",
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "100@DFI", "domain": 2},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
},
}
],
)
# Activate transferdomain
self.nodes[0].setgov(
{
"ATTRIBUTES": {
"v0/params/feature/transferdomain": "true",
"v0/transferdomain/dvm-evm/enabled": "false",
"v0/transferdomain/dvm-evm/src-formats": ["p2pkh", "bech32"],
"v0/transferdomain/dvm-evm/dest-formats": ["erc55"],
"v0/transferdomain/evm-dvm/enabled": "false",
"v0/transferdomain/evm-dvm/src-formats": ["erc55"],
"v0/transferdomain/evm-dvm/auth-formats": ["bech32-erc55"],
"v0/transferdomain/evm-dvm/dest-formats": ["p2pkh", "bech32"],
}
}
)
self.nodes[0].generate(1)
assert_raises_rpc_error(
-32600,
"DVM to EVM is not currently enabled",
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "100@DFI", "domain": 2},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
},
}
],
)
assert_raises_rpc_error(
-32600,
"EVM to DVM is not currently enabled",
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "100@DFI", "domain": 3},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 2,
},
}
],
)
self.nodes[0].setgov(
{
"ATTRIBUTES": {
"v0/transferdomain/dvm-evm/enabled": "true",
"v0/transferdomain/evm-dvm/enabled": "true",
}
}
)
self.nodes[0].generate(1)
assert_raises_rpc_error(
-32600,
"transferdomain for DST20 from DVM to EVM is not enabled",
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "1@BTC", "domain": 2},
"dst": {
"address": self.eth_address,
"amount": "1@BTC",
"domain": 3,
},
}
],
)
assert_raises_rpc_error(
-32600,
"transferdomain for DST20 from EVM to DVM is not enabled",
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "1@BTC", "domain": 3},
"dst": {
"address": self.eth_address,
"amount": "1@BTC",
"domain": 2,
},
}
],
)
# Activate DAT transferdomain
self.nodes[0].setgov(
{
"ATTRIBUTES": {
"v0/transferdomain/dvm-evm/dat-enabled": "true",
"v0/transferdomain/evm-dvm/dat-enabled": "true",
}
}
)
self.nodes[0].generate(1)
self.start_height = self.nodes[0].getblockcount()
def invalid_parameters(self):
# Check for invalid parameters in transferdomain rpc
assert_raises_rpc_error(
-8,
'Invalid parameters, src argument "address" must not be null',
self.nodes[0].transferdomain,
[
{
"src": {"amount": "100@DFI", "domain": 2},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
},
}
],
)
assert_raises_rpc_error(
-8,
'Invalid parameters, src argument "amount" must not be null',
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "domain": 2},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
},
}
],
)
assert_raises_rpc_error(
-8,
'Invalid parameters, src argument "domain" must not be null',
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "100@DFI"},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
},
}
],
)
assert_raises_rpc_error(
-8,
"JSON value is not an integer as expected",
self.nodes[0].transferdomain,
[
{
"src": {
"address": self.address,
"amount": "100@DFI",
"domain": "dvm",
},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
},
}
],
)
assert_raises_rpc_error(
-8,
"JSON value is not an integer as expected",
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "100@DFI", "domain": 2},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": "evm",
},
}
],
)
assert_raises_rpc_error(
-8,
'Invalid parameters, src argument "domain" must be either 2 (DFI token to EVM) or 3 (EVM to DFI token)',
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "100@DFI", "domain": 0},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 2,
},
}
],
)
assert_raises_rpc_error(
-32600,
"Unknown transfer domain aspect",
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "100@DFI", "domain": 2},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 4,
},
}
],
)
assert_raises_rpc_error(
-5,
"recipient (blablabla) does not refer to any valid address",
self.nodes[0].transferdomain,
[
{
"src": {"address": "blablabla", "amount": "100@DFI", "domain": 2},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
},
}
],
)
assert_raises_rpc_error(
-5,
"recipient (blablabla) does not refer to any valid address",
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "100@DFI", "domain": 2},
"dst": {"address": "blablabla", "amount": "100@DFI", "domain": 3},
}
],
)
def invalid_values_dvm_evm(self):
# Check for valid values DVM->EVM in transferdomain rpc
assert_raises_rpc_error(
-32600,
'Src address must be a legacy or Bech32 address in case of "DVM" domain',
self.nodes[0].transferdomain,
[
{
"src": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 2,
},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
},
}
],
)
assert_raises_rpc_error(
-32600,
'Dst address must be an ERC55 address in case of "EVM" domain',
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "100@DFI", "domain": 2},
"dst": {"address": self.address, "amount": "100@DFI", "domain": 3},
}
],
)
assert_raises_rpc_error(
-32600,
"Cannot transfer inside same domain",
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "100@DFI", "domain": 2},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 2,
},
}
],
)
assert_raises_rpc_error(
-32600,
"Source amount must be equal to destination amount",
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "100@DFI", "domain": 2},
"dst": {
"address": self.eth_address,
"amount": "101@DFI",
"domain": 3,
},
}
],
)
assert_raises_rpc_error(
-32600,
"Excess data set, maximum allow is 0",
self.nodes[0].transferdomain,
[
{
"src": {
"address": self.address,
"amount": "100@DFI",
"domain": 2,
"data": "1",
},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
},
}
],
)
assert_raises_rpc_error(
-32600,
"Excess data set, maximum allow is 0",
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "100@DFI", "domain": 2},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
"data": "1",
},
}
],
)
assert_raises_rpc_error(
-32600,
"Non-DAT or LP tokens are not supported for transferdomain",
self.nodes[0].transferdomain,
[
{
"src": {
"address": self.address,
"amount": "1@" + self.symbolUSER,
"domain": 2,
},
"dst": {
"address": self.eth_address,
"amount": "1@" + self.symbolUSER,
"domain": 3,
},
}
],
)
assert_raises_rpc_error(
-32600,
"Non-DAT or LP tokens are not supported for transferdomain",
self.nodes[0].transferdomain,
[
{
"src": {
"address": self.address,
"amount": "1@" + self.symbolBTCDFI,
"domain": 2,
},
"dst": {
"address": self.eth_address,
"amount": "1@" + self.symbolBTCDFI,
"domain": 3,
},
}
],
)
def valid_transfer_dvm_evm(self):
# Transfer 100 DFI from DVM to EVM
tx1 = self.nodes[0].transferdomain(
[
{
"src": {"address": self.address, "amount": "100@DFI", "domain": 2},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
},
}
]
)
self.nodes[0].generate(1)
# Check tx1 fields
result = self.nodes[0].getcustomtx(tx1)["results"]["transfers"][0]
assert_equal(result["src"]["address"], self.address)
assert_equal(result["src"]["amount"], "100.00000000@0")
assert_equal(result["src"]["domain"], "DVM")
assert_equal(result["dst"]["address"], self.eth_address)
assert_equal(result["dst"]["amount"], "100.00000000@0")
assert_equal(result["dst"]["domain"], "EVM")
# Check that EVM balance shows in gettokenbalances
assert_equal(
self.nodes[0].gettokenbalances({}, False, False, True),
["101.00000000@0", "99.00000000@1", "9.99999000@2", "100.00000000@128"],
)
# Check new balances
new_dfi_balance = self.nodes[0].getaccount(self.address, {}, True)["0"]
new_eth_balance = self.nodes[0].eth_getBalance(self.eth_address)
assert_equal(new_dfi_balance, self.dfi_balance - Decimal("100"))
assert_equal(new_eth_balance, int_to_eth_u256(100))
assert_equal(len(self.nodes[0].getaccount(self.eth_address, {}, True)), 1)
assert_equal(self.nodes[0].getaccount(self.eth_address)[0], "100.00000000@DFI")
# Check accounting of DVM->EVM transfer
attributes = self.nodes[0].getgov("ATTRIBUTES")["ATTRIBUTES"]
assert_equal(
attributes["v0/live/economy/transferdomain/dvm-evm/0/total"],
Decimal("100.00000000"),
)
# assert_equal(attributes['v0/live/economy/transferdomain/dvm/0/current'], Decimal('-100.00000000'))
assert_equal(
attributes["v0/live/economy/transferdomain/dvm/0/out"],
Decimal("100.00000000"),
)
# assert_equal(attributes['v0/live/economy/transferdomain/evm/0/current'], Decimal('100.00000000'))
assert_equal(
attributes["v0/live/economy/transferdomain/evm/0/in"],
Decimal("100.00000000"),
)
# Check accounting of EVM fees
attributes = self.nodes[0].getgov("ATTRIBUTES")["ATTRIBUTES"]
assert_equal(attributes["v0/live/economy/evm/block/fee_burnt"], Decimal("0E-8"))
assert_equal(
attributes["v0/live/economy/evm/block/fee_priority"], Decimal("0E-8")
)
def invalid_values_evm_dvm(self):
# Check for valid values EVM->DVM in transferdomain rpc
assert_raises_rpc_error(
-32600,
'Src address must be an ERC55 address in case of "EVM" domain',
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address, "amount": "100@DFI", "domain": 3},
"dst": {"address": self.address, "amount": "100@DFI", "domain": 2},
}
],
)
assert_raises_rpc_error(
-32600,
'Dst address must be a legacy or Bech32 address in case of "DVM" domain',
self.nodes[0].transferdomain,
[
{
"src": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
},
"dst": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 2,
},
}
],
)
assert_raises_rpc_error(
-32600,
"Cannot transfer inside same domain",
self.nodes[0].transferdomain,
[
{
"src": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
},
"dst": {"address": self.address, "amount": "100@DFI", "domain": 3},
}
],
)
assert_raises_rpc_error(
-32600,
"Source amount must be equal to destination amount",
self.nodes[0].transferdomain,
[
{
"src": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
},
"dst": {"address": self.address, "amount": "101@DFI", "domain": 2},
}
],
)
assert_raises_rpc_error(
-32600,
"TransferDomain currently only supports a single transfer per transaction",
self.nodes[0].transferdomain,
[
{
"src": {
"address": self.eth_address1,
"amount": "10@DFI",
"domain": 3,
},
"dst": {"address": self.address, "amount": "10@DFI", "domain": 2},
},
{
"src": {
"address": self.eth_address1,
"amount": "10@DFI",
"domain": 3,
},
"dst": {
"address": self.address_2nd,
"amount": "10@DFI",
"domain": 2,
},
},
],
)
assert_raises_rpc_error(
-32600,
"Non-DAT or LP tokens are not supported for transferdomain",
self.nodes[0].transferdomain,
[
{
"src": {
"address": self.address,
"amount": "1@" + self.symbolUSER,
"domain": 3,
},
"dst": {
"address": self.eth_address,
"amount": "1@" + self.symbolUSER,
"domain": 2,
},
}
],
)
assert_raises_rpc_error(
-32600,
"Non-DAT or LP tokens are not supported for transferdomain",
self.nodes[0].transferdomain,
[
{
"src": {
"address": self.address,
"amount": "1@" + self.symbolBTCDFI,
"domain": 3,
},
"dst": {
"address": self.eth_address,
"amount": "1@" + self.symbolBTCDFI,
"domain": 2,
},
}
],
)
def valid_transfer_evm_dvm(self):
self.rollback_to(self.start_height)
# Transfer 100 DFI from DVM to EVM
self.valid_transfer_dvm_evm()
# Transfer 100 DFI from EVM to DVM
tx = self.nodes[0].transferdomain(
[
{
"src": {
"address": self.eth_address,
"amount": "100@DFI",
"domain": 3,
},
"dst": {"address": self.address, "amount": "100@DFI", "domain": 2},
}
]
)
self.nodes[0].generate(1)
# Check tx fields
result = self.nodes[0].getcustomtx(tx)["results"]["transfers"][0]
assert_equal(result["src"]["address"], self.eth_address)
assert_equal(result["src"]["amount"], "100.00000000@0")
assert_equal(result["src"]["domain"], "EVM")
assert_equal(result["dst"]["address"], self.address)
assert_equal(result["dst"]["amount"], "100.00000000@0")
assert_equal(result["dst"]["domain"], "DVM")
# Check new balances
new_dfi_balance = self.nodes[0].getaccount(self.address, {}, True)["0"]
new_eth_balance = self.nodes[0].eth_getBalance(self.eth_address)
assert_equal(new_dfi_balance, self.dfi_balance)
assert_equal(new_eth_balance, self.eth_balance)
assert_equal(len(self.nodes[0].getaccount(self.eth_address, {}, True)), 0)
# Check accounting of DVM->EVM transfer
attributes = self.nodes[0].getgov("ATTRIBUTES")["ATTRIBUTES"]
assert_equal(
attributes["v0/live/economy/transferdomain/dvm-evm/0/total"],
Decimal("100.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/dvm/0/current"],
Decimal("0.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/dvm/0/out"],
Decimal("100.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/dvm/0/in"],
Decimal("100.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/evm-dvm/0/total"],
Decimal("100.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/evm/0/current"],
Decimal("0.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/evm/0/in"],
Decimal("100.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/evm/0/out"],
Decimal("100.00000000"),
)
# Check accounting of EVM fees
attributes = self.nodes[0].getgov("ATTRIBUTES")["ATTRIBUTES"]
assert_equal(attributes["v0/live/economy/evm/block/fee_burnt"], Decimal("0E-8"))
assert_equal(
attributes["v0/live/economy/evm/block/fee_priority"], Decimal("0E-8")
)
def invalid_transfer_no_auth(self):
assert_raises_rpc_error(
-5,
"Incorrect authorization for " + self.address1,
self.nodes[0].transferdomain,
[
{
"src": {"address": self.address1, "amount": "1@DFI", "domain": 2},
"dst": {
"address": self.eth_address,
"amount": "1@DFI",
"domain": 3,
},
}
],
)
assert_raises_rpc_error(
-5,
"no full public key for address",
self.nodes[0].transferdomain,
[
{
"src": {
"address": self.no_auth_eth_address,
"amount": "1@DFI",
"domain": 3,
},
"dst": {"address": self.address, "amount": "1@DFI", "domain": 2},
}
],
)
def valid_transfer_to_evm_then_move_then_back_to_dvm(self):
self.rollback_to(self.start_height)
# Transfer 100 DFI from DVM to EVM
tx1 = self.nodes[0].transferdomain(
[
{
"src": {"address": self.address, "amount": "101@DFI", "domain": 2},
"dst": {
"address": self.eth_address,
"amount": "101@DFI",
"domain": 3,
},
}
]
)
self.nodes[0].generate(1)
# Check tx1 fields
result = self.nodes[0].getcustomtx(tx1)["results"]["transfers"][0]
assert_equal(result["src"]["address"], self.address)
assert_equal(result["src"]["amount"], "101.00000000@0")
assert_equal(result["src"]["domain"], "DVM")
assert_equal(result["dst"]["address"], self.eth_address)
assert_equal(result["dst"]["amount"], "101.00000000@0")
assert_equal(result["dst"]["domain"], "EVM")
# Check that EVM balance shows in gettokenbalances
assert_equal(
self.nodes[0].gettokenbalances({}, False, False, True),
["101.00000000@0", "99.00000000@1", "9.99999000@2", "100.00000000@128"],
)
# Check new balances
new_eth_balance = self.nodes[0].eth_getBalance(self.eth_address)
assert_equal(new_eth_balance, int_to_eth_u256(101))
assert_equal(len(self.nodes[0].getaccount(self.eth_address, {}, True)), 1)
assert_equal(self.nodes[0].getaccount(self.eth_address)[0], "101.00000000@DFI")
# Check accounting of DVM->EVM transfer
attributes = self.nodes[0].getgov("ATTRIBUTES")["ATTRIBUTES"]
assert_equal(
attributes["v0/live/economy/transferdomain/dvm-evm/0/total"],
Decimal("101.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/dvm/0/current"],
Decimal("-101.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/dvm/0/out"],
Decimal("101.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/evm/0/current"],
Decimal("101.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/evm/0/in"],
Decimal("101.00000000"),
)
# Move from one EVM address to another
self.nodes[0].evmtx(self.eth_address, 0, 21, 21001, self.eth_address1, 100)
self.nodes[0].generate(1)
blockHash = self.nodes[0].getblockhash(self.nodes[0].getblockcount())
new_eth1_balance = self.nodes[0].eth_getBalance(self.eth_address1)
assert_equal(new_eth1_balance, int_to_eth_u256(100))
# Check accounting of EVM fees 21 Gwei * 21000 = 44100 sat, burnt 21000, paid 44100 - 21000 = 23100
attributes = self.nodes[0].getgov("ATTRIBUTES")["ATTRIBUTES"]
self.burnt_fee = Decimal("0.00021000")
self.priority_fee = Decimal("0.00023100")
assert_equal(attributes["v0/live/economy/evm/block/fee_burnt"], self.burnt_fee)
assert_equal(
attributes["v0/live/economy/evm/block/fee_burnt_min"], self.burnt_fee
)
assert_equal(
attributes["v0/live/economy/evm/block/fee_burnt_min_hash"], blockHash
)
assert_equal(
attributes["v0/live/economy/evm/block/fee_burnt_max"], self.burnt_fee
)
assert_equal(
attributes["v0/live/economy/evm/block/fee_burnt_max_hash"], blockHash
)
assert_equal(
attributes["v0/live/economy/evm/block/fee_priority"], self.priority_fee
)
assert_equal(
attributes["v0/live/economy/evm/block/fee_priority_max"], self.priority_fee
)
assert_equal(
attributes["v0/live/economy/evm/block/fee_priority_min_hash"], blockHash
)
assert_equal(
attributes["v0/live/economy/evm/block/fee_priority_max"], self.priority_fee
)
assert_equal(
attributes["v0/live/economy/evm/block/fee_priority_max_hash"], blockHash
)
dfi_balance = self.nodes[0].getaccount(self.address, {}, True)["0"]
# Transfer 100 DFI from EVM to DVM
tx = self.nodes[0].transferdomain(
[
{
"src": {
"address": self.eth_address1,
"amount": "100@DFI",
"domain": 3,
},
"dst": {"address": self.address, "amount": "100@DFI", "domain": 2},
}
]
)
self.nodes[0].generate(1)
# Check tx fields
result = self.nodes[0].getcustomtx(tx)["results"]["transfers"][0]
assert_equal(result["src"]["address"], self.eth_address1)
assert_equal(result["src"]["amount"], "100.00000000@0")
assert_equal(result["src"]["domain"], "EVM")
assert_equal(result["dst"]["address"], self.address)
assert_equal(result["dst"]["amount"], "100.00000000@0")
assert_equal(result["dst"]["domain"], "DVM")
# Check new balances
new_dfi_balance = self.nodes[0].getaccount(self.address, {}, True)["0"]
assert_equal(new_dfi_balance, dfi_balance + Decimal("100"))
new_eth1_balance = self.nodes[0].eth_getBalance(self.eth_address1)
assert_equal(new_eth1_balance, "0x0")
assert_equal(len(self.nodes[0].getaccount(self.eth_address1, {}, True)), 0)
# Check accounting of DVM->EVM transfer
attributes = self.nodes[0].getgov("ATTRIBUTES")["ATTRIBUTES"]
assert_equal(
attributes["v0/live/economy/transferdomain/dvm-evm/0/total"],
Decimal("101.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/dvm/0/current"],
Decimal("-1.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/dvm/0/out"],
Decimal("101.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/dvm/0/in"],
Decimal("100.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/evm-dvm/0/total"],
Decimal("100.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/evm/0/current"],
Decimal("1.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/evm/0/in"],
Decimal("101.00000000"),
)
assert_equal(
attributes["v0/live/economy/transferdomain/evm/0/out"],
Decimal("100.00000000"),
)
def run_test(self):
self.setup()
self.invalid_before_fork_and_disabled()
self.check_initial_balance()
self.invalid_parameters()
# Transfer DVM->EVM
self.invalid_values_dvm_evm()
self.valid_transfer_dvm_evm()
# Transfer EVM->DVM
self.invalid_values_evm_dvm()
self.valid_transfer_evm_dvm()
# Invalid authorisation
self.invalid_transfer_no_auth()
self.valid_transfer_to_evm_then_move_then_back_to_dvm()
if __name__ == "__main__":
EVMTest().main()
|
d6fb87034a42778e86c5b18c810468d337090f2c
|
dea1c40b5bac7e8dfbcc33e587b15b4487fe25f4
|
/azure/durable_functions/models/actions/ActionType.py
|
a7bea219c5582fa2e311edb64606f13b8eaf51d6
|
[
"MIT"
] |
permissive
|
Azure/azure-functions-durable-python
|
93503441d7ec26c7a54acc0843a88440765def1d
|
5d30ae3b6b1158b021eb848629c1399381d783a8
|
refs/heads/dev
| 2023-08-10T22:22:33.381414
| 2023-08-04T17:41:38
| 2023-08-04T17:41:38
| 167,911,661
| 104
| 54
|
MIT
| 2023-09-07T22:58:23
| 2019-01-28T06:38:12
|
Python
|
UTF-8
|
Python
| false
| false
| 489
|
py
|
ActionType.py
|
from enum import IntEnum
class ActionType(IntEnum):
"""Defines the values associated to the types of activities that can be scheduled."""
CALL_ACTIVITY: int = 0
CALL_ACTIVITY_WITH_RETRY: int = 1
CALL_SUB_ORCHESTRATOR: int = 2
CALL_SUB_ORCHESTRATOR_WITH_RETRY: int = 3
CONTINUE_AS_NEW: int = 4
CREATE_TIMER: int = 5
WAIT_FOR_EXTERNAL_EVENT: int = 6
CALL_ENTITY = 7
CALL_HTTP: int = 8
SIGNAL_ENTITY: int = 9
WHEN_ANY = 11
WHEN_ALL = 12
|
d0204b4259b6b129a0406c1a16b2460ee046abab
|
2c1ab7305ef08008d07bd30836d5bf25e717fc18
|
/python/sparsemap/layers_pt/tests/test_tree_layer.py
|
4110f7676e346f91534b5fc9cbcd5e51ef706409
|
[
"MIT"
] |
permissive
|
vene/sparsemap
|
b08e05ea9c0a2c60822125f0694a65c653046f5b
|
98128d5065dcdc5325fe741ccb6ce9dd27da31b7
|
refs/heads/master
| 2020-03-19T03:35:55.026844
| 2018-11-08T10:43:04
| 2018-11-08T10:43:04
| 135,742,630
| 106
| 11
|
MIT
| 2019-02-10T22:04:04
| 2018-06-01T16:53:27
|
C++
|
UTF-8
|
Python
| false
| false
| 1,323
|
py
|
test_tree_layer.py
|
from ..tree_layer import TreeSparseMarginalsFast
import torch
from torch.autograd import gradcheck, Variable
def test_fasttree_sparse_decode():
torch.manual_seed(42)
n_nodes = 5
tsm = TreeSparseMarginalsFast(n_nodes, max_iter=1000)
for _ in range(20):
W = torch.randn(n_nodes, n_nodes + 1).view(-1)
W = Variable(W, requires_grad=True)
res = gradcheck(tsm, (W,), eps=1e-4,
atol=1e-3)
print(res)
assert res
def test_meaning_sparse_decode():
n_nodes = 4
w = torch.zeros(n_nodes, n_nodes + 1)
w[2, 1] = 100
w = Variable(w)
tsm = TreeSparseMarginalsFast(n_nodes, verbose=3)
u = tsm(w.view(-1))
for config in tsm.status['active_set']:
assert config[1 + 2] == 1
def test_fast_tree_ignores_diag():
n_nodes = 4
# w = torch.zeros(n_nodes, n_nodes + 1)
w_init = torch.randn(n_nodes * (n_nodes + 1))
w = Variable(w_init)
tsm = TreeSparseMarginalsFast(n_nodes)
u = tsm(w.view(-1))
k = 0
for m in range(1, n_nodes + 1):
for h in range(0, n_nodes + 1):
if h == m:
w_init[k] = 0
k += 1
w = Variable(w_init)
tsm = TreeSparseMarginalsFast(n_nodes)
u_zeroed = tsm(w.view(-1))
assert (u_zeroed - u).data.norm() < 1e-12
|
251c4132ded0425fdf9a8037a8a9999b7ed32b9d
|
f50f63aafee2b41fa51fd012af66da5fdd93e3f4
|
/unyt/tests/test_unyt_array.py
|
b2b53623606a791394fefb9717f7e55b0c0e9b9e
|
[
"BSD-3-Clause"
] |
permissive
|
yt-project/unyt
|
7f11c6a9806b37026c1ebc1519a59844142a830c
|
369d1c9858ec733428632c9381e67d5377645dd0
|
refs/heads/main
| 2023-09-01T17:01:10.439740
| 2023-08-30T18:21:00
| 2023-08-30T18:21:00
| 127,202,410
| 353
| 59
|
BSD-3-Clause
| 2023-09-09T12:30:29
| 2018-03-28T21:46:56
|
Python
|
UTF-8
|
Python
| false
| false
| 90,499
|
py
|
test_unyt_array.py
|
"""
Test ndarray subclass that handles symbolic units.
"""
import copy
import itertools
import math
import operator
import os
import pickle
import re
import shutil
import tempfile
from importlib.metadata import version
from pathlib import Path
import numpy as np
import pytest
from numpy import array
from numpy.testing import (
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
)
from packaging.version import Version
from unyt import K, R, Unit, degC, degF, delta_degC, delta_degF, dimensions
from unyt._on_demand_imports import _astropy, _h5py, _pint
from unyt._physical_ratios import metallicity_sun, speed_of_light_cm_per_s
from unyt.array import (
binary_operators,
loadtxt,
savetxt,
uconcatenate,
ucross,
udot,
uintersect1d,
unary_operators,
unorm,
unyt_array,
unyt_quantity,
ustack,
uunion1d,
uvstack,
)
from unyt.exceptions import (
InvalidUnitEquivalence,
InvalidUnitOperation,
IterableUnitCoercionError,
UnitConversionError,
UnitOperationError,
UnitParseError,
UnitsNotReducible,
)
from unyt.testing import (
_process_warning,
assert_allclose_units,
assert_array_equal_units,
)
from unyt.unit_registry import UnitRegistry
from unyt.unit_symbols import cm, degree, g, m
NUMPY_VERSION = Version(version("numpy"))
def operate_and_compare(a, b, op, answer):
# Test generator for unyt_arrays tests
assert_array_almost_equal(op(a, b), answer)
def assert_isinstance(a, type):
assert isinstance(a, type)
def test_addition():
"""
Test addition of two unyt_arrays
"""
# Same units
a1 = unyt_array([1, 2, 3], "cm")
a2 = unyt_array([4, 5, 6], "cm")
a3 = [4 * cm, 5 * cm, 6 * cm]
answer = unyt_array([5, 7, 9], "cm")
operate_and_compare(a1, a2, operator.add, answer)
operate_and_compare(a2, a1, operator.add, answer)
operate_and_compare(a1, a3, operator.add, answer)
operate_and_compare(a3, a1, operator.add, answer)
operate_and_compare(a2, a1, np.add, answer)
operate_and_compare(a1, a2, np.add, answer)
operate_and_compare(a1, a3, np.add, answer)
operate_and_compare(a3, a1, np.add, answer)
# different units
a1 = unyt_array([1, 2, 3], "cm")
a2 = unyt_array([4, 5, 6], "m")
a3 = [4 * m, 5 * m, 6 * m]
answer1 = unyt_array([401, 502, 603], "cm")
answer2 = unyt_array([4.01, 5.02, 6.03], "m")
operate_and_compare(a1, a2, operator.add, answer1)
operate_and_compare(a2, a1, operator.add, answer2)
operate_and_compare(a1, a3, operator.add, answer1)
operate_and_compare(a3, a1, operator.add, answer2)
operate_and_compare(a1, a2, np.add, answer1)
operate_and_compare(a2, a1, np.add, answer2)
operate_and_compare(a1, a3, np.add, answer1)
operate_and_compare(a3, a1, np.add, answer2)
# Test dimensionless quantities
a1 = unyt_array([1, 2, 3])
a2 = array([4, 5, 6])
a3 = [4, 5, 6]
answer = unyt_array([5, 7, 9])
operate_and_compare(a1, a2, operator.add, answer)
operate_and_compare(a2, a1, operator.add, answer)
operate_and_compare(a1, a3, operator.add, answer)
operate_and_compare(a3, a1, operator.add, answer)
operate_and_compare(a1, a2, np.add, answer)
operate_and_compare(a2, a1, np.add, answer)
operate_and_compare(a1, a3, np.add, answer)
operate_and_compare(a3, a1, np.add, answer)
# Catch the different dimensions error
a1 = unyt_array([1, 2, 3], "m")
a2 = unyt_array([4, 5, 6], "kg")
a3 = [7, 8, 9]
a4 = unyt_array([10, 11, 12], "")
with pytest.raises(UnitOperationError):
operator.add(a1, a2)
with pytest.raises(UnitOperationError):
operator.iadd(a1, a2)
with pytest.raises(UnitOperationError):
operator.add(a1, a3)
with pytest.raises(UnitOperationError):
operator.iadd(a1, a3)
with pytest.raises(UnitOperationError):
operator.add(a3, a1)
with pytest.raises(UnitOperationError):
operator.iadd(a3, a1)
with pytest.raises(UnitOperationError):
operator.add(a1, a4)
with pytest.raises(UnitOperationError):
operator.iadd(a1, a4)
with pytest.raises(UnitOperationError):
operator.add(a4, a1)
with pytest.raises(UnitOperationError):
operator.iadd(a4, a1)
# adding with zero is allowed irrespective of the units
zeros = np.zeros(3)
zeros_yta_dimless = unyt_array(zeros, "dimensionless")
zeros_yta_length = unyt_array(zeros, "m")
zeros_yta_mass = unyt_array(zeros, "kg")
operands = [0, zeros, zeros_yta_length]
for op in [operator.add, np.add]:
for operand in operands:
operate_and_compare(a1, operand, op, a1)
operate_and_compare(operand, a1, op, a1)
operate_and_compare(4 * m, operand, op, 4 * m)
operate_and_compare(operand, 4 * m, op, 4 * m)
operands = [
unyt_quantity(0),
unyt_quantity(0, "kg"),
zeros_yta_dimless,
zeros_yta_mass,
]
for op in [operator.add, np.add]:
for operand in operands:
with pytest.raises(UnitOperationError):
operate_and_compare(a1, operand, op, a1)
with pytest.raises(UnitOperationError):
operate_and_compare(operand, a1, op, a1)
with pytest.raises(UnitOperationError):
operate_and_compare(4 * m, operand, op, 4 * m)
with pytest.raises(UnitOperationError):
operate_and_compare(operand, 4 * m, op, 4 * m)
def test_subtraction():
"""
Test subtraction of two unyt_arrays
"""
# Same units
a1 = unyt_array([1, 2, 3], "cm")
a2 = unyt_array([4, 5, 6], "cm")
a3 = [4 * cm, 5 * cm, 6 * cm]
answer1 = unyt_array([-3, -3, -3], "cm")
answer2 = unyt_array([3, 3, 3], "cm")
operate_and_compare(a1, a2, operator.sub, answer1)
operate_and_compare(a2, a1, operator.sub, answer2)
operate_and_compare(a1, a3, operator.sub, answer1)
operate_and_compare(a3, a1, operator.sub, answer2)
operate_and_compare(a1, a2, np.subtract, answer1)
operate_and_compare(a2, a1, np.subtract, answer2)
operate_and_compare(a1, a3, np.subtract, answer1)
operate_and_compare(a3, a1, np.subtract, answer2)
# different units
a1 = unyt_array([1, 2, 3], "cm")
a2 = unyt_array([4, 5, 6], "m")
a3 = [4 * m, 5 * m, 6 * m]
answer1 = unyt_array([-399, -498, -597], "cm")
answer2 = unyt_array([3.99, 4.98, 5.97], "m")
answer3 = unyt_array([399, 498, 597], "cm")
operate_and_compare(a1, a2, operator.sub, answer1)
operate_and_compare(a2, a1, operator.sub, answer2)
operate_and_compare(a1, a3, operator.sub, answer1)
operate_and_compare(a3, a1, operator.sub, answer3)
operate_and_compare(a1, a2, np.subtract, answer1)
operate_and_compare(a2, a1, np.subtract, answer2)
operate_and_compare(a1, a3, np.subtract, answer1)
operate_and_compare(a3, a1, np.subtract, answer3)
# Test dimensionless quantities
a1 = unyt_array([1, 2, 3])
a2 = array([4, 5, 6])
a3 = [4, 5, 6]
answer1 = unyt_array([-3, -3, -3])
answer2 = unyt_array([3, 3, 3])
operate_and_compare(a1, a2, operator.sub, answer1)
operate_and_compare(a2, a1, operator.sub, answer2)
operate_and_compare(a1, a3, operator.sub, answer1)
operate_and_compare(a3, a1, operator.sub, answer2)
operate_and_compare(a1, a2, np.subtract, answer1)
operate_and_compare(a2, a1, np.subtract, answer2)
operate_and_compare(a1, a3, np.subtract, answer1)
operate_and_compare(a3, a1, np.subtract, answer2)
# Catch the different dimensions error
a1 = unyt_array([1, 2, 3], "m")
a2 = unyt_array([4, 5, 6], "kg")
a3 = [7, 8, 9]
a4 = unyt_array([10, 11, 12], "")
with pytest.raises(UnitOperationError):
operator.sub(a1, a2)
with pytest.raises(UnitOperationError):
operator.isub(a1, a2)
with pytest.raises(UnitOperationError):
operator.sub(a1, a3)
with pytest.raises(UnitOperationError):
operator.isub(a1, a3)
with pytest.raises(UnitOperationError):
operator.sub(a3, a1)
with pytest.raises(UnitOperationError):
operator.isub(a3, a1)
with pytest.raises(UnitOperationError):
operator.sub(a1, a4)
with pytest.raises(UnitOperationError):
operator.isub(a1, a4)
with pytest.raises(UnitOperationError):
operator.sub(a4, a1)
with pytest.raises(UnitOperationError):
operator.isub(a4, a1)
# subtracting with zero is allowed irrespective of the units
zeros = np.zeros(3)
zeros_yta_dimless = unyt_array(zeros, "dimensionless")
zeros_yta_length = unyt_array(zeros, "m")
zeros_yta_mass = unyt_array(zeros, "kg")
operands = [0, zeros, zeros_yta_length]
for op in [operator.sub, np.subtract]:
for operand in operands:
operate_and_compare(a1, operand, op, a1)
operate_and_compare(operand, a1, op, -a1)
operate_and_compare(4 * m, operand, op, 4 * m)
operate_and_compare(operand, 4 * m, op, -4 * m)
operands = [
unyt_quantity(0),
unyt_quantity(0, "kg"),
zeros_yta_dimless,
zeros_yta_mass,
]
for op in [operator.sub, np.subtract]:
for operand in operands:
with pytest.raises(UnitOperationError):
operate_and_compare(a1, operand, op, a1)
with pytest.raises(UnitOperationError):
operate_and_compare(operand, a1, op, -a1)
with pytest.raises(UnitOperationError):
operate_and_compare(4 * m, operand, op, 4 * m)
with pytest.raises(UnitOperationError):
operate_and_compare(operand, 4 * m, op, -4 * m)
def test_multiplication():
"""
Test multiplication of two unyt_arrays
"""
# Same units
a1 = unyt_array([1, 2, 3], "cm")
a2 = unyt_array([4, 5, 6], "cm")
a3 = [4 * cm, 5 * cm, 6 * cm]
answer = unyt_array([4, 10, 18], "cm**2")
operate_and_compare(a1, a2, operator.mul, answer)
operate_and_compare(a2, a1, operator.mul, answer)
operate_and_compare(a1, a3, operator.mul, answer)
operate_and_compare(a3, a1, operator.mul, answer)
operate_and_compare(a1, a2, np.multiply, answer)
operate_and_compare(a2, a1, np.multiply, answer)
operate_and_compare(a1, a3, np.multiply, answer)
operate_and_compare(a3, a1, np.multiply, answer)
# different units, same dimension
a1 = unyt_array([1, 2, 3], "cm")
a2 = unyt_array([4, 5, 6], "m")
a3 = [4 * m, 5 * m, 6 * m]
answer1 = unyt_array([400, 1000, 1800], "cm**2")
answer2 = unyt_array([0.04, 0.10, 0.18], "m**2")
answer3 = unyt_array([4, 10, 18], "cm*m")
operate_and_compare(a1, a2, operator.mul, answer1)
operate_and_compare(a2, a1, operator.mul, answer2)
operate_and_compare(a1, a3, operator.mul, answer1)
operate_and_compare(a3, a1, operator.mul, answer2)
operate_and_compare(a1, a2, np.multiply, answer3)
operate_and_compare(a2, a1, np.multiply, answer3)
operate_and_compare(a1, a3, np.multiply, answer3)
operate_and_compare(a3, a1, np.multiply, answer3)
# different dimensions
a1 = unyt_array([1, 2, 3], "cm")
a2 = unyt_array([4, 5, 6], "g")
a3 = [4 * g, 5 * g, 6 * g]
answer = unyt_array([4, 10, 18], "cm*g")
operate_and_compare(a1, a2, operator.mul, answer)
operate_and_compare(a2, a1, operator.mul, answer)
operate_and_compare(a1, a3, operator.mul, answer)
operate_and_compare(a3, a1, operator.mul, answer)
operate_and_compare(a1, a2, np.multiply, answer)
operate_and_compare(a2, a1, np.multiply, answer)
operate_and_compare(a1, a3, np.multiply, answer)
operate_and_compare(a3, a1, np.multiply, answer)
# One dimensionless, one unitful
a1 = unyt_array([1, 2, 3], "cm")
a2 = array([4, 5, 6])
a3 = [4, 5, 6]
answer = unyt_array([4, 10, 18], "cm")
operate_and_compare(a1, a2, operator.mul, answer)
operate_and_compare(a2, a1, operator.mul, answer)
operate_and_compare(a1, a3, operator.mul, answer)
operate_and_compare(a3, a1, operator.mul, answer)
operate_and_compare(a1, a2, np.multiply, answer)
operate_and_compare(a2, a1, np.multiply, answer)
operate_and_compare(a1, a3, np.multiply, answer)
operate_and_compare(a3, a1, np.multiply, answer)
# Both dimensionless quantities
a1 = unyt_array([1, 2, 3])
a2 = array([4, 5, 6])
a3 = [4, 5, 6]
answer = unyt_array([4, 10, 18])
operate_and_compare(a1, a2, operator.mul, answer)
operate_and_compare(a2, a1, operator.mul, answer)
operate_and_compare(a1, a3, operator.mul, answer)
operate_and_compare(a3, a1, operator.mul, answer)
operate_and_compare(a1, a2, np.multiply, answer)
operate_and_compare(a2, a1, np.multiply, answer)
operate_and_compare(a1, a3, np.multiply, answer)
operate_and_compare(a3, a1, np.multiply, answer)
# With np.multiply.reduce
a = unyt_array([1.0, 2.0, 3.0], "cm")
answer = unyt_quantity(6.0, "cm**3")
assert_equal(np.multiply.reduce(a), answer)
a = unyt_array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], "cm")
answer = unyt_array([6.0, 120.0], "cm**3")
assert_equal(np.multiply.reduce(a, axis=1), answer)
def test_division():
"""
Test division of two unyt_arrays
"""
# Same units
a1 = unyt_array([1.0, 2.0, 3.0], "cm")
a2 = unyt_array([4.0, 5.0, 6.0], "cm")
a3 = [4 * cm, 5 * cm, 6 * cm]
answer1 = unyt_array([0.25, 0.4, 0.5])
answer2 = unyt_array([4, 2.5, 2])
op = operator.truediv
operate_and_compare(a1, a2, op, answer1)
operate_and_compare(a2, a1, op, answer2)
operate_and_compare(a1, a3, op, answer1)
operate_and_compare(a3, a1, op, answer2)
operate_and_compare(a1, a2, np.divide, answer1)
operate_and_compare(a2, a1, np.divide, answer2)
operate_and_compare(a1, a3, np.divide, answer1)
operate_and_compare(a3, a1, np.divide, answer2)
# different units, same dimension
a1 = unyt_array([1.0, 2.0, 3.0], "cm")
a2 = unyt_array([4.0, 5.0, 6.0], "m")
a3 = [4 * m, 5 * m, 6 * m]
answer1 = unyt_array([0.0025, 0.004, 0.005])
answer2 = unyt_array([400, 250, 200])
operate_and_compare(a1, a2, op, answer1)
operate_and_compare(a2, a1, op, answer2)
operate_and_compare(a1, a3, op, answer1)
operate_and_compare(a3, a1, op, answer2)
operate_and_compare(a1, a2, np.divide, answer1)
operate_and_compare(a2, a1, np.divide, answer2)
operate_and_compare(a1, a3, np.divide, answer1)
operate_and_compare(a3, a1, np.divide, answer2)
# different dimensions
a1 = unyt_array([1.0, 2.0, 3.0], "cm")
a2 = unyt_array([4.0, 5.0, 6.0], "g")
a3 = [4 * g, 5 * g, 6 * g]
answer1 = unyt_array([0.25, 0.4, 0.5], "cm/g")
answer2 = unyt_array([4, 2.5, 2], "g/cm")
operate_and_compare(a1, a2, op, answer1)
operate_and_compare(a2, a1, op, answer2)
operate_and_compare(a1, a3, op, answer1)
operate_and_compare(a3, a1, op, answer2)
operate_and_compare(a1, a2, np.divide, answer1)
operate_and_compare(a2, a1, np.divide, answer2)
operate_and_compare(a1, a3, np.divide, answer1)
operate_and_compare(a3, a1, np.divide, answer2)
# One dimensionless, one unitful
a1 = unyt_array([1.0, 2.0, 3.0], "cm")
a2 = array([4.0, 5.0, 6.0])
a3 = [4, 5, 6]
answer1 = unyt_array([0.25, 0.4, 0.5], "cm")
answer2 = unyt_array([4, 2.5, 2], "1/cm")
operate_and_compare(a1, a2, op, answer1)
operate_and_compare(a2, a1, op, answer2)
operate_and_compare(a1, a3, op, answer1)
operate_and_compare(a3, a1, op, answer2)
operate_and_compare(a1, a2, np.divide, answer1)
operate_and_compare(a2, a1, np.divide, answer2)
operate_and_compare(a1, a3, np.divide, answer1)
operate_and_compare(a3, a1, np.divide, answer2)
# Both dimensionless quantities
a1 = unyt_array([1.0, 2.0, 3.0])
a2 = array([4.0, 5.0, 6.0])
a3 = [4, 5, 6]
answer1 = unyt_array([0.25, 0.4, 0.5])
answer2 = unyt_array([4, 2.5, 2])
operate_and_compare(a1, a2, op, answer1)
operate_and_compare(a2, a1, op, answer2)
operate_and_compare(a1, a3, op, answer1)
operate_and_compare(a3, a1, op, answer2)
operate_and_compare(a1, a3, np.divide, answer1)
operate_and_compare(a3, a1, np.divide, answer2)
operate_and_compare(a1, a3, np.divide, answer1)
operate_and_compare(a3, a1, np.divide, answer2)
# With np.multiply.reduce
a = unyt_array([3.0, 2.0, 1.0], "cm")
answer = unyt_quantity(1.5, "cm**-1")
assert_equal(np.divide.reduce(a), answer)
a = unyt_array([[3.0, 2.0, 1.0], [6.0, 5.0, 4.0]], "cm")
answer = unyt_array([1.5, 0.3], "cm**-1")
assert_equal(np.divide.reduce(a, axis=1), answer)
def test_power():
"""
Test power operator ensure units are correct.
"""
from unyt import cm
cm_arr = np.array([1.0, 1.0]) * cm
cm_quant = 1.0 * cm
assert_equal((1 * cm) ** 3, unyt_quantity(1, "cm**3"))
assert_equal(np.power((1 * cm), 3), unyt_quantity(1, "cm**3"))
assert_equal((1 * cm) ** unyt_quantity(3), unyt_quantity(1, "cm**3"))
with pytest.raises(UnitOperationError):
np.power((1 * cm), unyt_quantity(3, "g"))
with pytest.raises(InvalidUnitOperation):
np.power(cm, cm)
assert_equal(cm_arr**3, unyt_array([1, 1], "cm**3"))
assert_equal(np.power(cm_arr, 3), unyt_array([1, 1], "cm**3"))
assert_equal(cm_arr ** unyt_quantity(3), unyt_array([1, 1], "cm**3"))
with pytest.raises(UnitOperationError):
np.power(cm_arr, unyt_quantity(3, "g"))
try:
np.power(cm_arr, unyt_quantity(3, "g"))
except UnitOperationError as err:
assert isinstance(err.unit1, Unit)
assert isinstance(err.unit2, Unit)
# when the power is 0.0 numpy short-circuits via ones_like so we
# need to test the special handling for that case
assert_array_equal_units(cm_quant**0, unyt_quantity(1.0, "dimensionless"))
assert_array_equal_units(cm_arr**0, unyt_quantity(1.0, "dimensionless"))
def test_comparisons():
"""
Test numpy ufunc comparison operators for unit consistency.
"""
from unyt.array import unyt_array
a1 = unyt_array([1, 2, 3], "cm")
a2 = unyt_array([2, 1, 3], "cm")
a3 = unyt_array([0.02, 0.01, 0.03], "m")
a4 = unyt_array([1, 2, 3], "g")
dimless = np.array([2, 1, 3])
ops = (np.less, np.less_equal, np.greater, np.greater_equal, np.equal, np.not_equal)
answers = (
[True, False, False],
[True, False, True],
[False, True, False],
[False, True, True],
[False, False, True],
[True, True, False],
)
for op, answer in zip(ops, answers):
operate_and_compare(a1, a2, op, answer)
for op, answer in zip(ops, answers):
operate_and_compare(a1, dimless, op, answer)
for op, answer in zip(ops, answers):
operate_and_compare(a1, a3, op, answer)
for op, answer in zip(ops, answers):
operate_and_compare(a1, a3.in_units("cm"), op, answer)
# Check that comparisons with dimensionless quantities work in both
# directions.
operate_and_compare(a3, dimless, np.less, [True, True, True])
operate_and_compare(dimless, a3, np.less, [False, False, False])
assert_equal(a1 < 2, [True, False, False])
assert_equal(a1 < 2, np.less(a1, 2))
assert_equal(2 < a1, [False, False, True])
assert_equal(2 < a1, np.less(2, a1))
# Check that comparisons with arrays that have different units with
# different dimensions work properly
operate_and_compare(a1, a4, np.equal, [False, False, False])
operate_and_compare(a1, a4, np.not_equal, [True, True, True])
# check that comparing quantities returns bools and not 0-D arrays
el1, el4 = a1[0], a4[0]
assert (el1 == el4) is False
assert (el1 != el4) is True
# comparisons that aren't == and !=
with pytest.raises(UnitOperationError):
np.greater(a1, a4)
with pytest.raises(UnitOperationError):
a1 > a4 # noqa: B015
with pytest.raises(UnitOperationError):
np.greater(el1, el4)
with pytest.raises(UnitOperationError):
el1 > el4 # noqa: B015
def test_unit_conversions():
"""
Test operations that convert to different units or cast to ndarray
"""
from unyt.array import unyt_quantity
from unyt.unit_object import Unit
km = unyt_quantity(1.0, "km", dtype="float64")
km_in_cm = km.in_units("cm")
cm_unit = Unit("cm")
kpc_unit = Unit("kpc")
assert_equal(km_in_cm, km)
assert_equal(km_in_cm.in_cgs(), 1e5)
assert_equal(km_in_cm.in_mks(), 1e3)
assert_equal(km_in_cm.units, cm_unit)
km_view = km.ndarray_view()
km.convert_to_units("cm")
assert km_view.base is km.base
assert_equal(km, unyt_quantity(1, "km"))
assert_equal(km.in_cgs(), 1e5)
assert_equal(km.in_mks(), 1e3)
assert_equal(km.units, cm_unit)
km.convert_to_units("kpc")
assert km_view.base is km.base
assert_array_almost_equal(km, unyt_quantity(1, "km"))
assert_array_almost_equal(km.in_cgs(), unyt_quantity(1e5, "cm"))
assert_array_almost_equal(km.in_mks(), unyt_quantity(1e3, "m"))
assert_equal(km.units, kpc_unit)
assert_isinstance(km.to_ndarray(), np.ndarray)
assert_isinstance(km.ndarray_view(), np.ndarray)
dyne = unyt_quantity(1.0, "dyne")
assert_equal(dyne.in_cgs(), dyne)
assert_equal(dyne.in_cgs(), 1.0)
assert_equal(dyne.in_mks(), dyne)
assert_equal(dyne.in_mks(), 1e-5)
assert_equal(str(dyne.in_mks().units), "N")
assert_equal(str(dyne.in_cgs().units), "dyn")
em3 = unyt_quantity(1.0, "erg/m**3")
assert_equal(em3.in_cgs(), em3)
assert_equal(em3.in_cgs(), 1e-6)
assert_equal(em3.in_mks(), em3)
assert_equal(em3.in_mks(), 1e-7)
assert_equal(str(em3.in_mks().units), "Pa")
assert_equal(str(em3.in_cgs().units), "dyn/cm**2")
em3_converted = unyt_quantity(1545436840.386756, "Msun/(Myr**2*kpc)")
assert_equal(em3.in_base(unit_system="galactic"), em3)
assert_array_almost_equal(em3.in_base(unit_system="galactic"), em3_converted)
assert_equal(str(em3.in_base(unit_system="galactic").units), "Msun/(Myr**2*kpc)")
dimless = unyt_quantity(1.0, "")
assert_equal(dimless.in_cgs(), dimless)
assert_equal(dimless.in_cgs(), 1.0)
assert_equal(dimless.in_mks(), dimless)
assert_equal(dimless.in_mks(), 1.0)
assert_equal(str(dimless.in_cgs().units), "dimensionless")
kg = unyt_quantity(1.0, "kg")
assert kg.to(g).v == 1000
assert kg.in_units(g).v == 1000
kg.convert_to_units(g)
assert kg.v == 1000
ten_grams = 10 * g
assert kg.to(ten_grams).v == 100
assert kg.in_units(ten_grams).v == 100
kg.convert_to_units(ten_grams)
assert kg.v == 100
with pytest.raises(UnitParseError):
kg.to([1, 2] * g)
with pytest.raises(UnitParseError):
kg.in_units([1, 2] * g)
with pytest.raises(UnitParseError):
kg.convert_to_units([1, 2] * g)
def test_temperature_conversions():
"""
Test conversions between various supported temperatue scales.
Also ensure we only allow compound units with temperature
scales that have a proper zero point.
"""
from unyt.unit_object import InvalidUnitOperation
km = unyt_quantity(1, "km", dtype="float64")
balmy = unyt_quantity(300, "K", dtype="float64")
balmy_F = unyt_quantity(80.33, "degF")
balmy_C = unyt_quantity(26.85, "degC")
balmy_R = unyt_quantity(540, "R")
assert_array_almost_equal(balmy.in_units("degF").d, balmy_F.d)
assert balmy.in_units("degF").units, balmy_F.units
assert_array_almost_equal(balmy.in_units("degC").d, balmy_C.d)
assert balmy.in_units("degC").units, balmy_C.units
assert_array_almost_equal(balmy.in_units("R").d, balmy_R.d)
assert balmy.in_units("R").units == balmy_R.units
balmy_view = balmy.ndarray_view()
balmy.convert_to_units("degF")
assert balmy_view.base is balmy.base
assert_array_almost_equal(np.array(balmy), np.array(balmy_F))
balmy.convert_to_units("degC")
assert balmy_view.base is balmy.base
assert_array_almost_equal(np.array(balmy), np.array(balmy_C))
balmy.convert_to_units("R")
assert balmy_view.base is balmy.base
assert_array_almost_equal(np.array(balmy), np.array(balmy_R))
balmy.convert_to_units("degF")
assert balmy_view.base is balmy.base
assert_array_almost_equal(np.array(balmy), np.array(balmy_F))
with pytest.raises(InvalidUnitOperation):
np.multiply(balmy, km)
with pytest.raises(InvalidUnitOperation):
np.multiply(balmy, balmy)
with pytest.raises(InvalidUnitOperation):
np.multiply(balmy_F, balmy_F)
with pytest.raises(InvalidUnitOperation):
np.multiply(balmy_F, balmy_C)
with pytest.raises(InvalidUnitOperation):
np.divide(balmy, balmy)
with pytest.raises(InvalidUnitOperation):
np.divide(balmy_F, balmy_F)
with pytest.raises(InvalidUnitOperation):
np.divide(balmy_F, balmy_C)
with pytest.raises(InvalidUnitOperation):
balmy * km
with pytest.raises(InvalidUnitOperation):
balmy * balmy
with pytest.raises(InvalidUnitOperation):
balmy_F * balmy_F
with pytest.raises(InvalidUnitOperation):
balmy_F * balmy_C
with pytest.raises(InvalidUnitOperation):
2 * balmy_F
with pytest.raises(InvalidUnitOperation):
balmy / balmy
with pytest.raises(InvalidUnitOperation):
balmy_F / balmy_F
with pytest.raises(InvalidUnitOperation):
balmy_F / balmy_C
assert np.add(balmy_F, balmy_F) == unyt_quantity(80.33 * 2, "degF")
with pytest.raises(InvalidUnitOperation):
np.add(balmy_F, balmy_C)
with pytest.raises(InvalidUnitOperation):
balmy_F + balmy_C
assert_equal(np.subtract(balmy_C, balmy_C), unyt_quantity(0, "degC"))
with pytest.raises(InvalidUnitOperation):
np.subtract(balmy_F, balmy_C)
with pytest.raises(InvalidUnitOperation):
balmy_F - balmy_C
# Does CGS conversion from F to K work?
assert_array_almost_equal(balmy.in_cgs(), unyt_quantity(300, "K"))
def test_unyt_array_unyt_quantity_ops():
"""
Test operations that combine unyt_array and unyt_quantity
"""
a = unyt_array(range(10, 1), "cm")
b = unyt_quantity(5, "g")
assert_isinstance(a * b, unyt_array)
assert_isinstance(b * a, unyt_array)
assert_isinstance(a / b, unyt_array)
assert_isinstance(b / a, unyt_array)
assert_isinstance(a * a, unyt_array)
assert_isinstance(a / a, unyt_array)
assert_isinstance(b * b, unyt_quantity)
assert_isinstance(b / b, unyt_quantity)
def test_selecting():
"""
Test slicing of two unyt_arrays
"""
a = unyt_array(range(10), "cm")
a_slice = a[:3]
a_fancy_index = a[[1, 1, 3, 5]]
a_array_fancy_index = a[array([[1, 1], [3, 5]])]
a_boolean_index = a[a > 5]
a_selection = a[0]
assert_array_equal(a_slice, unyt_array([0, 1, 2], "cm"))
assert_equal(a_slice.units, a.units)
assert_array_equal(a_fancy_index, unyt_array([1, 1, 3, 5], "cm"))
assert_equal(a_fancy_index.units, a.units)
assert_array_equal(a_array_fancy_index, unyt_array([[1, 1], [3, 5]], "cm"))
assert_equal(a_array_fancy_index.units, a.units)
assert_array_equal(a_boolean_index, unyt_array([6, 7, 8, 9], "cm"))
assert_equal(a_boolean_index.units, a.units)
assert_isinstance(a_selection, unyt_quantity)
assert_equal(a_selection.units, a.units)
# .base points to the original array for a numpy view. If it is not a
# view, .base is None.
assert a_slice.base is a
def test_iteration():
"""
Test that iterating over a unyt_array returns a sequence of unyt_quantity
instances
"""
a = np.arange(3)
b = unyt_array(np.arange(3), "cm")
for ia, ib in zip(a, b):
assert_equal(ia, ib.value)
assert_equal(ib.units, b.units)
def test_unyt_array_pickle():
test_data = [unyt_quantity(12.0, "cm"), unyt_array([1, 2, 3], "km")]
for data in test_data:
tempf = tempfile.NamedTemporaryFile(delete=False)
pickle.dump(data, tempf)
tempf.close()
with open(tempf.name, "rb") as fname:
loaded_data = pickle.load(fname)
os.unlink(tempf.name)
assert_array_equal(data, loaded_data)
assert_equal(data.units, loaded_data.units)
assert_array_equal(array(data.in_cgs()), array(loaded_data.in_cgs()))
assert_equal(float(data.units.base_value), float(loaded_data.units.base_value))
SYMPY_VERSION = Version(version("sympy"))
@pytest.mark.xfail(
condition=(SYMPY_VERSION == Version("1.12")),
reason="regression in sympy 1.12",
raises=AssertionError,
strict=True,
)
@pytest.mark.xfail(
condition=(SYMPY_VERSION in (Version("1.9"), Version("1.10"))),
reason="Not resolved upstream as of sympy 1.10",
raises=AttributeError,
strict=True,
)
def test_unpickling_old_array():
# see https://github.com/sympy/sympy/issues/22241
# the expected error is "AttributeError: 'One' object has no attribute '__dict__'"
PFILE = Path(__file__).parent / "data" / "unyt_array_sympy1.8.pickle"
with open(PFILE, "rb") as fh:
arr = pickle.load(fh)
# this comparison fails with sympy==1.12
# see https://github.com/sympy/sympy/issues/25134
assert arr.units.dimensions == cm.dimensions
def test_copy():
quan = unyt_quantity(1, "g")
arr = unyt_array([1, 2, 3], "cm")
assert_equal(copy.copy(quan), quan)
assert_array_equal(copy.copy(arr), arr)
assert_equal(copy.deepcopy(quan), quan)
assert_array_equal(copy.deepcopy(arr), arr)
memo = {}
assert_equal(copy.deepcopy(quan, memo), quan)
assert_array_equal(copy.deepcopy(arr), arr)
assert_equal(quan.copy(), quan)
assert_array_equal(arr.copy(), arr)
assert_equal(np.copy(quan), quan)
assert_array_equal(np.copy(arr), arr)
# needed so the tests function on older numpy versions that have
# different sets of ufuncs
def yield_np_ufuncs(ufunc_list):
for u in ufunc_list:
ufunc = getattr(np, u, None)
if ufunc is not None:
yield ufunc
def unary_ufunc_comparison(ufunc, a):
out = a.copy()
a_array = a.to_ndarray()
if ufunc in (np.isreal, np.iscomplex):
# According to the numpy docs, these two explicitly do not do
# in-place copies.
ret = ufunc(a)
assert not hasattr(ret, "units")
assert_array_equal(ret, ufunc(a))
elif ufunc in yield_np_ufuncs(
[
"exp",
"exp2",
"log",
"log2",
"log10",
"expm1",
"log1p",
"sin",
"cos",
"tan",
"arcsin",
"arccos",
"arctan",
"sinh",
"cosh",
"tanh",
"arccosh",
"arcsinh",
"arctanh",
"deg2rad",
"rad2deg",
"isfinite",
"isinf",
"isnan",
"signbit",
"sign",
"rint",
"logical_not",
]
):
# These operations should return identical results compared to numpy.
with np.errstate(invalid="ignore"):
ret = ufunc(a, out=out)
assert_array_equal(ret, out)
assert_array_equal(ret, ufunc(a_array))
# In-place copies do not drop units.
assert hasattr(out, "units")
assert not hasattr(ret, "units")
elif ufunc in yield_np_ufuncs(
[
"absolute",
"fabs",
"conjugate",
"floor",
"ceil",
"trunc",
"negative",
"spacing",
"positive",
]
):
ret = ufunc(a, out=out)
assert_array_equal(ret, out)
assert_array_equal(ret.to_ndarray(), ufunc(a_array))
assert ret.units == out.units
elif ufunc in yield_np_ufuncs(["ones_like", "square", "sqrt", "reciprocal"]):
if ufunc is np.ones_like:
ret = ufunc(a)
else:
with np.errstate(invalid="ignore"):
ret = ufunc(a, out=out)
assert_array_equal(ret, out)
with np.errstate(invalid="ignore"):
assert_array_equal(ret.to_ndarray(), ufunc(a_array))
if ufunc is np.square:
assert out.units == a.units**2
assert ret.units == a.units**2
elif ufunc is np.sqrt:
assert out.units == a.units**0.5
assert ret.units == a.units**0.5
elif ufunc is np.reciprocal:
assert out.units == a.units**-1
assert ret.units == a.units**-1
elif ufunc is np.modf:
ret1, ret2 = ufunc(a)
npret1, npret2 = ufunc(a_array)
assert_array_equal(ret1.to_ndarray(), npret1)
assert_array_equal(ret2.to_ndarray(), npret2)
elif ufunc is np.frexp:
ret1, ret2 = ufunc(a)
npret1, npret2 = ufunc(a_array)
assert_array_equal(ret1, npret1)
assert_array_equal(ret2, npret2)
elif ufunc is np.invert:
with pytest.raises(TypeError):
ufunc(a.astype("int64"))
elif hasattr(np, "isnat") and ufunc is np.isnat:
# numpy 1.13 raises ValueError, numpy 1.14 and newer raise TypeError
with pytest.raises((TypeError, ValueError)):
ufunc(a)
# no untested ufuncs
assert ufunc in yield_np_ufuncs(
[
"isreal",
"iscomplex",
"exp",
"exp2",
"log",
"log2",
"log10",
"expm1",
"log1p",
"sin",
"cos",
"tan",
"arcsin",
"arccos",
"arctan",
"sinh",
"cosh",
"tanh",
"arccosh",
"arcsinh",
"arctanh",
"deg2rad",
"rad2deg",
"isfinite",
"isinf",
"isnan",
"signbit",
"sign",
"rint",
"logical_not",
"absolute",
"fabs",
"conjugate",
"floor",
"ceil",
"trunc",
"negative",
"spacing",
"positive",
"ones_like",
"square",
"sqrt",
"reciprocal",
"invert",
"isnat",
"modf",
"frexp",
]
)
def binary_ufunc_comparison(ufunc, a, b):
if ufunc in [np.divmod]:
out = (b.copy(), b.copy())
else:
out = b.copy()
if ufunc in yield_np_ufuncs(
[
"add",
"subtract",
"remainder",
"fmod",
"mod",
"arctan2",
"hypot",
"greater",
"greater_equal",
"less",
"less_equal",
"logical_and",
"logical_or",
"logical_xor",
"maximum",
"minimum",
"fmax",
"fmin",
"nextafter",
"heaviside",
]
):
if a.units != b.units and a.units.dimensions != b.units.dimensions:
with pytest.raises(UnitOperationError):
ufunc(a, b)
return
if ufunc in yield_np_ufuncs(
[
"bitwise_and",
"bitwise_or",
"bitwise_xor",
"left_shift",
"right_shift",
"ldexp",
]
):
with pytest.raises(TypeError):
ufunc(a, b)
return
ret = ufunc(a, b, out=out)
ret = ufunc(a, b)
if ufunc is np.multiply:
assert ret.units == (a.units * b.units).simplify().as_coeff_unit()[1]
elif ufunc in (np.divide, np.true_divide, np.arctan2):
assert ret.units.dimensions == (a.units / b.units).dimensions
elif ufunc in (
np.greater,
np.greater_equal,
np.less,
np.less_equal,
np.not_equal,
np.equal,
np.logical_and,
np.logical_or,
np.logical_xor,
):
assert not isinstance(ret, unyt_array) and isinstance(ret, np.ndarray)
if isinstance(ret, tuple):
assert isinstance(out, tuple)
assert len(out) == len(ret)
for o, r in zip(out, ret):
assert_array_equal(r, o)
else:
assert_array_equal(ret, out)
if ufunc in (np.divide, np.true_divide, np.arctan2) and (
a.units.dimensions == b.units.dimensions
):
assert_array_almost_equal(
np.array(ret), ufunc(np.array(a.in_cgs()), np.array(b.in_cgs()))
)
def test_ufuncs():
for ufunc in unary_operators:
unary_ufunc_comparison(
ufunc, unyt_array([0.3, 0.4, 0.5], "cm", dtype="float64")
)
unary_ufunc_comparison(ufunc, unyt_array([12, 23, 47], "g", dtype="float64"))
unary_ufunc_comparison(
ufunc, unyt_array([2, 4, -6], "erg/m**3", dtype="float64")
)
for ufunc in binary_operators:
# arr**arr is undefined for arrays with units because
# each element of the result would have different units.
if ufunc is np.power:
a = unyt_array([0.3, 0.4, 0.5], "cm")
b = unyt_array([0.1, 0.2, 0.3], "dimensionless")
c = np.array(b)
d = unyt_array([1.0, 2.0, 3.0], "g")
with pytest.raises(UnitOperationError):
ufunc(a, b)
with pytest.raises(UnitOperationError):
ufunc(a, c)
with pytest.raises(UnitOperationError):
ufunc(a, d)
binary_ufunc_comparison(ufunc, np.array(2.0), b)
continue
a = unyt_array([0.3, 0.4, 0.5], "cm")
b = unyt_array([0.1, 0.2, 0.3], "cm")
c = unyt_array([0.1, 0.2, 0.3], "m")
d = unyt_array([0.1, 0.2, 0.3], "g")
e = unyt_array([0.1, 0.2, 0.3], "erg/m**3")
for pair in itertools.product([a, b, c, d, e], repeat=2):
binary_ufunc_comparison(ufunc, pair[0], pair[1])
@pytest.mark.skipif(
np.__version__ < "1.16", reason="matmul is broken on old numpy versions"
)
def test_dot_matmul():
arr = unyt_array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], "cm")
ev_result = arr.dot(unyt_array([1.0, 2.0, 3.0], "kg"))
matmul_result = arr @ unyt_array([1.0, 2.0, 3.0], "kg")
res = unyt_array([14.0, 32.0], "cm*kg")
assert_equal(ev_result, res)
assert_equal(ev_result.units, res.units)
assert_isinstance(ev_result, unyt_array)
assert_equal(matmul_result, res)
assert_equal(matmul_result.units, res.units)
assert_isinstance(matmul_result, unyt_array)
ev_result = arr.dot(np.array([1.0, 2.0, 3.0]))
matmul_result = arr @ np.array([1.0, 2.0, 3.0])
res = unyt_array([14.0, 32.0], "cm")
assert_equal(ev_result, res)
assert_equal(ev_result.units, res.units)
assert_isinstance(ev_result, unyt_array)
assert_equal(matmul_result, res)
assert_equal(matmul_result.units, res.units)
assert_isinstance(matmul_result, unyt_array)
ev_result = arr.dot(arr.T)
matmul_result = arr @ arr.T
res = unyt_array([[14.0, 32.0], [32.0, 77.0]], "cm**2")
assert_equal(ev_result, res)
assert_equal(ev_result.units, res.units)
assert_isinstance(ev_result, unyt_array)
assert_equal(matmul_result, res)
assert_equal(matmul_result.units, res.units)
assert_isinstance(matmul_result, unyt_array)
ev_result = arr.v.dot(arr.T)
matmul_result = arr.v @ arr.T
res = unyt_array([[14.0, 32.0], [32.0, 77.0]], "cm")
assert_equal(ev_result, res)
assert_equal(ev_result.units, res.units)
assert_isinstance(ev_result, unyt_array)
assert_equal(matmul_result, res)
assert_equal(matmul_result.units, res.units)
assert_isinstance(matmul_result, unyt_array)
ev_result = arr.dot(arr.T.v)
matmul_result = arr @ arr.T.v
res = unyt_array([[14.0, 32.0], [32.0, 77.0]], "cm")
assert_equal(ev_result, res)
assert_equal(ev_result.units, res.units)
assert_isinstance(ev_result, unyt_array)
assert_equal(matmul_result, res)
assert_equal(matmul_result.units, res.units)
assert_isinstance(matmul_result, unyt_array)
arr = unyt_array([[1.0, 2.0], [3.0, 4.0]], "kg")
arr.dot(arr.T, out=arr)
res = unyt_array([[5.0, 11.0], [11.0, 25.0]], "kg**2")
assert_equal(arr, res)
assert_equal(arr.units, res.units)
assert_isinstance(arr, unyt_array)
qv = unyt_array([1, 2, 3], "cm").dot(unyt_array([1, 2, 3], "cm"))
mv = unyt_array([1, 2, 3], "cm") @ unyt_array([1, 2, 3], "cm")
qa = unyt_quantity(14, "cm**2")
assert qv == qa
assert qv.units == qa.units
assert_isinstance(qv, unyt_quantity)
assert mv == qa
assert mv.units == qa.units
assert_isinstance(mv, unyt_quantity)
qv = unyt_array([1, 2, 3], "cm").dot(np.array([1, 2, 3]))
mv = unyt_array([1, 2, 3], "cm") @ np.array([1, 2, 3])
qa = unyt_quantity(14, "cm")
assert qv == qa
assert qv.units == qa.units
assert_isinstance(qv, unyt_quantity)
assert mv == qa
assert mv.units == qa.units
assert_isinstance(mv, unyt_quantity)
def test_reductions():
arr = unyt_array([[1, 2, 3], [4, 5, 6]], "cm")
answers = {
"prod": (
unyt_quantity(720, "cm**6"),
unyt_array([4, 10, 18], "cm**2"),
unyt_array([6, 120], "cm**3"),
),
"sum": (
unyt_quantity(21, "cm"),
unyt_array([5.0, 7.0, 9.0], "cm"),
unyt_array([6, 15], "cm"),
),
"mean": (
unyt_quantity(3.5, "cm"),
unyt_array([2.5, 3.5, 4.5], "cm"),
unyt_array([2, 5], "cm"),
),
"std": (
unyt_quantity(1.707825127659933, "cm"),
unyt_array([1.5, 1.5, 1.5], "cm"),
unyt_array([0.81649658, 0.81649658], "cm"),
),
}
for op, (result1, result2, result3) in answers.items():
ev_result = getattr(arr, op)()
assert_almost_equal(ev_result, result1)
assert_equal(ev_result.units, result1.units)
assert_isinstance(ev_result, unyt_quantity)
for axis, result in [(0, result2), (1, result3), (-1, result3)]:
ev_result = getattr(arr, op)(axis=axis)
assert_almost_equal(ev_result, result)
assert_equal(ev_result.units, result.units)
assert_isinstance(ev_result, unyt_array)
def test_convenience():
for orig in [
[1.0, 2.0, 3.0],
(1.0, 2.0, 3.0),
np.array([1.0, 2.0, 3.0]),
[[1.0], [2.0], [3.0]],
np.array([[1.0], [2.0], [3.0]]),
[[1.0, 2.0, 3.0]],
np.array([[1.0, 2.0, 3.0]]),
]:
arr = unyt_array(orig, "cm")
arrou = unyt_array(orig, "1/cm")
uoarr = unyt_array(1.0 / np.array(orig), "cm")
assert_equal(arr.unit_quantity, unyt_quantity(1, "cm"))
assert_equal(arr.uq, unyt_quantity(1, "cm"))
assert_isinstance(arr.unit_quantity, unyt_quantity)
assert_isinstance(arr.uq, unyt_quantity)
assert_array_equal(arr.unit_array, unyt_array(np.ones_like(arr), "cm"))
assert_array_equal(arr.ua, unyt_array(np.ones_like(arr), "cm"))
assert_isinstance(arr.unit_array, unyt_array)
assert_isinstance(arr.ua, unyt_array)
for u in [arr.units, arr.unit_quantity, arr.unit_array, arr.uq, arr.ua]:
assert_array_equal(u * orig, arr)
assert_array_equal(orig * u, arr)
assert_array_equal(orig / u, arrou)
assert_array_equal(u / orig, uoarr)
assert_array_equal(arr.ndview, arr.view(np.ndarray))
assert_array_equal(arr.d, arr.view(np.ndarray))
assert arr.ndview.base is arr.base
assert arr.d.base is arr.base
assert_array_equal(arr.value, np.array(arr))
assert_array_equal(arr.v, np.array(arr))
def test_registry_association():
reg = UnitRegistry()
a = unyt_quantity(3, "cm", registry=reg)
b = unyt_quantity(4, "m")
c = unyt_quantity(6, "", registry=reg)
d = 5
assert_equal(id(a.units.registry), id(reg))
def binary_op_registry_comparison(op):
e = op(a, b)
f = op(b, a)
g = op(c, d)
h = op(d, c)
assert_equal(id(e.units.registry), id(reg))
assert_equal(id(f.units.registry), id(b.units.registry))
assert_equal(id(g.units.registry), id(h.units.registry))
assert_equal(id(g.units.registry), id(reg))
def unary_op_registry_comparison(op):
c = op(a)
d = op(b)
assert_equal(id(c.units.registry), id(reg))
assert_equal(id(d.units.registry), id(b.units.registry))
binary_ops = [operator.add, operator.sub, operator.mul, operator.truediv]
for op in binary_ops:
binary_op_registry_comparison(op)
for op in [operator.abs, operator.neg, operator.pos]:
unary_op_registry_comparison(op)
def test_to_value():
a = unyt_array([1.0, 2.0, 3.0], "kpc")
assert_equal(a.to_value(), np.array([1.0, 2.0, 3.0]))
assert_equal(a.to_value(), a.value)
assert_equal(a.to_value("km"), a.in_units("km").value)
b = unyt_quantity(5.5, "Msun")
assert_equal(b.to_value(), 5.5)
assert_equal(b.to_value("g"), b.in_units("g").value)
def test_astropy():
pytest.importorskip("astropy")
ap_arr = np.arange(10) * _astropy.units.km / _astropy.units.hr
un_arr = unyt_array(np.arange(10), "km/hr")
un_arr2 = unyt_array.from_astropy(ap_arr)
ap_quan = 10.0 * _astropy.units.Msun**0.5 / (_astropy.units.kpc**3)
un_quan = unyt_quantity(10.0, "sqrt(Msun)/kpc**3")
un_quan2 = unyt_quantity.from_astropy(ap_quan)
assert_array_equal(ap_arr, un_arr.to_astropy())
assert_array_equal(un_arr, unyt_array.from_astropy(ap_arr))
assert_array_equal(un_arr, un_arr2)
assert_equal(ap_quan, un_quan.to_astropy())
assert_equal(un_quan, unyt_quantity.from_astropy(ap_quan))
assert_equal(un_quan, un_quan2)
assert_array_equal(un_arr, unyt_array.from_astropy(un_arr.to_astropy()))
assert_equal(un_quan, unyt_quantity.from_astropy(un_quan.to_astropy()))
def test_astropy_dimensionless():
# see https://github.com/yt-project/unyt/issues/436
pytest.importorskip("astropy")
arr = unyt_array([1, 2, 3], "")
ap_arr = np.array([1, 2, 3]) * _astropy.units.Unit("")
assert_array_equal(ap_arr, arr.to_astropy())
assert_array_equal(arr, unyt_array.from_astropy(ap_arr))
def test_pint():
pytest.importorskip("pint")
def assert_pint_array_equal(arr1, arr2):
assert_array_equal(arr1.magnitude, arr2.magnitude)
assert str(arr1.units) == str(arr2.units)
ureg = _pint.UnitRegistry()
p_arr = np.arange(10) * ureg.km / ureg.year
un_arr = unyt_array(np.arange(10), "km/yr")
un_arr2 = unyt_array.from_pint(p_arr)
p_quan = 10.0 * ureg.g**0.5 / (ureg.mm**3)
un_quan = unyt_quantity(10.0, "sqrt(g)/mm**3")
un_quan2 = unyt_quantity.from_pint(p_quan)
assert_pint_array_equal(p_arr, un_arr.to_pint())
assert_array_equal(un_arr, unyt_array.from_pint(p_arr))
assert_array_equal(un_arr, un_arr2)
assert_pint_array_equal(p_quan, un_quan.to_pint())
assert_equal(un_quan, unyt_quantity.from_pint(p_quan))
assert_equal(un_quan, un_quan2)
assert_array_equal(un_arr, unyt_array.from_pint(un_arr.to_pint()))
assert_equal(un_quan, unyt_quantity.from_pint(un_quan.to_pint()))
def test_subclass():
class unyt_a_subclass(unyt_array):
def __new__(
cls, input_array, units=None, registry=None, bypass_validation=None
):
return super().__new__(
cls,
input_array,
units,
registry=registry,
bypass_validation=bypass_validation,
)
a = unyt_a_subclass([4, 5, 6], "g")
b = unyt_a_subclass([7, 8, 9], "kg")
nu = unyt_a_subclass([10, 11, 12], "")
nda = np.array([3, 4, 5])
yta = unyt_array([6, 7, 8], "mg")
loq = [unyt_quantity(6, "mg"), unyt_quantity(7, "mg"), unyt_quantity(8, "mg")]
ytq = unyt_quantity(4, "cm")
ndf = np.float64(3)
def op_comparison(op, inst1, inst2, compare_class):
assert_isinstance(op(inst1, inst2), compare_class)
assert_isinstance(op(inst2, inst1), compare_class)
ops = [operator.mul, operator.truediv]
for op in ops:
for inst in (b, ytq, ndf, yta, nda, loq):
op_comparison(op, a, inst, unyt_a_subclass)
op_comparison(op, ytq, nda, unyt_array)
op_comparison(op, ytq, yta, unyt_array)
for op in (operator.add, operator.sub):
op_comparison(op, nu, nda, unyt_a_subclass)
op_comparison(op, a, b, unyt_a_subclass)
op_comparison(op, a, yta, unyt_a_subclass)
op_comparison(op, a, loq, unyt_a_subclass)
assert_isinstance(a[0], unyt_quantity)
assert_isinstance(a[:], unyt_a_subclass)
assert_isinstance(a[:2], unyt_a_subclass)
assert_isinstance(unyt_a_subclass(yta), unyt_a_subclass)
assert_isinstance(a.to("kg"), unyt_a_subclass)
assert_isinstance(a.copy(), unyt_a_subclass)
assert_isinstance(copy.deepcopy(a), unyt_a_subclass)
def test_string_operations_raise_errors():
a = unyt_array([1, 2, 3], "g")
with pytest.raises(IterableUnitCoercionError):
a + "hello"
with pytest.raises(IterableUnitCoercionError):
a * "hello"
with pytest.raises(IterableUnitCoercionError):
a ** "hello"
def test_string_ne():
a = unyt_array([1, 2, 3], "g")
if NUMPY_VERSION >= Version("1.25.0.dev0"):
ctx = pytest.raises(ValueError)
else:
ctx = pytest.warns(FutureWarning)
with ctx:
assert a != "hello"
def test_string_operations_raise_errors_quantity():
q = 2 * g
with pytest.raises(IterableUnitCoercionError):
q + "hello"
with pytest.raises(IterableUnitCoercionError):
q * "hello"
with pytest.raises(IterableUnitCoercionError):
q ** "hello"
assert q != "hello"
def test_h5_io():
pytest.importorskip("h5py")
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
reg = UnitRegistry()
reg.add("code_length", 10.0, dimensions.length)
warr = unyt_array(np.random.random((256, 256)), "code_length", registry=reg)
warr.write_hdf5("test.h5")
iarr = unyt_array.from_hdf5("test.h5")
assert_equal(warr, iarr)
assert_equal(warr.units.registry["code_length"], iarr.units.registry["code_length"])
# test code to overwrite existing dataset
warr.write_hdf5("test.h5")
giarr = unyt_array.from_hdf5("test.h5")
assert_equal(warr, giarr)
# test code to overwrite existing dataset with data that has a different
# shape
warr = unyt_array(np.random.random((255, 255)), "code_length", registry=reg)
warr.write_hdf5("test.h5")
giarr = unyt_array.from_hdf5("test.h5")
assert_equal(warr, giarr)
os.remove("test.h5")
# write to a group that doesn't exist
warr.write_hdf5(
"test.h5", dataset_name="test_dset", group_name="/arrays/test_group"
)
giarr = unyt_array.from_hdf5(
"test.h5", dataset_name="test_dset", group_name="/arrays/test_group"
)
assert_equal(warr, giarr)
os.remove("test.h5")
# write to a group that does exist
with _h5py.File("test.h5", "a") as f:
f.create_group("/arrays/test_group")
warr.write_hdf5(
"test.h5", dataset_name="test_dset", group_name="/arrays/test_group"
)
giarr = unyt_array.from_hdf5(
"test.h5", dataset_name="test_dset", group_name="/arrays/test_group"
)
assert_equal(warr, giarr)
os.remove("test.h5")
os.chdir(curdir)
shutil.rmtree(tmpdir)
def test_equivalencies():
import unyt as u
# equivalence is ignored if the conversion doesn't need one
data = 12.0 * u.g
data.convert_to_equivalent("kg", None)
assert data.value == 0.012
assert data.units == u.kg
data = 12.0 * u.g
data = data.to_equivalent("kg", None)
assert data.value == 0.012
assert data.units == u.kg
# incorrect usage of an equivalence raises errors
with pytest.raises(InvalidUnitEquivalence):
data.convert_to_equivalent("erg", "thermal")
with pytest.raises(InvalidUnitEquivalence) as excinfo:
data.convert_to_equivalent("m", "mass_energy")
assert (
str(excinfo.value)
== "The unit equivalence 'mass_energy: mass <-> energy' does not "
"exist for units 'kg' to convert to a new unit with dimensions "
"'(length)'."
)
with pytest.raises(InvalidUnitEquivalence):
data.to_equivalent("erg", "thermal")
with pytest.raises(InvalidUnitEquivalence):
data.to_equivalent("m", "mass_energy")
# Mass-energy
mp = u.mp.copy()
mp.convert_to_units("keV", "mass_energy")
assert_allclose_units(u.mp.in_units("keV", "mass_energy"), mp)
assert_allclose_units(mp, u.mp * u.clight * u.clight)
assert_allclose_units(u.mp, mp.in_units("g", "mass_energy"))
mp.convert_to_units("g", "mass_energy")
assert_allclose_units(u.mp, mp)
# Thermal
T = 1e8 * u.K
E = T.in_units("W*hr", "thermal")
assert_allclose_units(E, (u.kboltz * T).in_units("W*hr"))
assert_allclose_units(T, E.in_units("K", "thermal"))
T.convert_to_units("W*hr", "thermal")
assert_allclose_units(E, T)
T.convert_to_units("K", "thermal")
assert_allclose_units(T, 1e8 * u.K)
# Spectral
# wavelength to frequency
lam = 4000 * u.angstrom
nu = lam.in_units("Hz", "spectral")
assert_allclose_units(nu, u.clight / lam)
lam.convert_to_units("MHz", "spectral")
assert_allclose_units(lam, nu)
assert lam.units == u.MHz.units
assert nu.units == u.Hz.units
# wavelength to photon energy
lam = 4000 * u.angstrom
hnu = lam.in_units("erg", "spectral")
assert_allclose_units(hnu, u.h_mks * u.clight / lam)
lam.convert_to_units("eV", "spectral")
assert_allclose_units(lam, hnu)
assert lam.units == u.eV.units
assert hnu.units == u.erg.units
# wavelength to spatial frequency
lam = 4000 * u.angstrom
nubar = lam.in_units("1/angstrom", "spectral")
assert_allclose_units(nubar, 1 / lam)
lam.convert_to_units("1/cm", "spectral")
assert_allclose_units(lam, nubar)
assert lam.units == (1 / u.cm).units
assert nubar.units == (1 / u.angstrom).units
# frequency to wavelength
nu = 1.0 * u.MHz
lam = nu.to("km", "spectral")
assert_allclose_units(lam, u.clight / nu)
nu.convert_to_units("m", "spectral")
assert_allclose_units(lam, nu)
assert lam.units == u.km.units
assert nu.units == u.m.units
# frequency to spatial frequency
nu = 1.0 * u.MHz
nubar = nu.to("1/km", "spectral")
assert_allclose_units(nubar, nu / u.clight)
nu.convert_to_units("1/m", "spectral")
assert_allclose_units(nubar, nu)
assert nubar.units == (1 / u.km).units
assert nu.units == (1 / u.m).units
# frequency to photon energy
nu = 1.0 * u.MHz
E = nu.to("erg", "spectral")
assert_allclose_units(E, u.h_mks * nu)
nu.convert_to_units("J", "spectral")
assert_allclose_units(nu, E)
assert nu.units == u.J.units
assert E.units == u.erg.units
# photon energy to frequency
E = 13.6 * u.eV
nu = E.to("Hz", "spectral")
assert_allclose_units(nu, E / u.h_mks)
E.convert_to_units("MHz", "spectral")
assert_allclose_units(nu, E)
assert E.units == u.MHz.units
assert nu.units == u.Hz.units
# photon energy to wavelength
E = 13.6 * u.eV
lam = E.to("nm", "spectral")
assert_allclose_units(lam, u.h_mks * u.clight / E)
E.convert_to_units("angstrom", "spectral")
assert_allclose_units(E, lam)
assert E.units == u.angstrom.units
assert lam.units == u.nm.units
# photon energy to spatial frequency
E = 13.6 * u.eV
nubar = E.to("1/nm", "spectral")
assert_allclose_units(nubar, E / (u.h_mks * u.clight))
E.convert_to_units("1/angstrom", "spectral")
assert_allclose_units(E, nubar)
assert E.units == (1 / u.angstrom).units
assert nubar.units == (1 / u.nm).units
# spatial frequency to frequency
nubar = 1500.0 / u.cm
nu = nubar.to("Hz", "spectral")
assert_allclose_units(nu, nubar * u.clight)
nubar.convert_to_units("MHz", "spectral")
assert_allclose_units(nu, nubar)
assert nubar.units == u.MHz.units
assert nu.units == u.Hz.units
# spatial frequency to wavelength
nubar = 1500.0 / u.cm
lam = nubar.to("nm", "spectral")
assert_allclose_units(lam, 1 / nubar)
nubar.convert_to_units("angstrom", "spectral")
assert_allclose_units(nubar, lam)
assert nubar.units == u.angstrom.units
assert lam.units == u.nm.units
# spatial frequency to photon energy
nubar = 1500.0 / u.cm
E = nubar.to("erg", "spectral")
assert_allclose_units(E, u.h_mks * u.clight * nubar)
nubar.convert_to_units("J", "spectral")
assert_allclose_units(nubar, E)
assert nubar.units == u.J.units
assert E.units == u.erg.units
# Sound-speed
# tempearature <-> velocity
mu = 0.6
gg = 5.0 / 3.0
T = 1e8 * u.K
c_s = T.in_units("km/s", equivalence="sound_speed")
assert_allclose_units(c_s, np.sqrt(gg * u.kboltz * T / (mu * u.mh)))
assert_allclose_units(T, c_s.in_units("K", "sound_speed"))
T.convert_to_units("m/s", "sound_speed")
assert_allclose_units(c_s, T)
assert T.units == u.m.units / u.s.units
assert c_s.units == u.km.units / u.s.units
mu = 0.5
gg = 4.0 / 3.0
T = 1e8 * u.K
c_s = T.in_units("km/s", "sound_speed", mu=mu, gamma=gg)
assert_allclose_units(c_s, np.sqrt(gg * u.kboltz * T / (mu * u.mh)))
assert_allclose_units(T, c_s.in_units("K", "sound_speed", mu=mu, gamma=gg))
T.convert_to_units("m/s", "sound_speed", mu=mu, gamma=gg)
assert_allclose_units(c_s, T)
assert T.units == u.m.units / u.s.units
assert c_s.units == u.km.units / u.s.units
# tempearture <-> energy
mu = 0.5
gg = 4.0 / 3.0
T = 1e8 * u.K
kT = T.in_units("eV", "sound_speed", mu=mu, gamma=gg)
assert_allclose_units(kT, u.kboltz * T)
T.convert_to_units("erg", "sound_speed", mu=mu, gamma=gg)
assert_allclose_units(T, kT)
assert T.units == u.erg.units
assert kT.units == u.eV.units
assert_allclose_units(T.in_units("K", "sound_speed", mu=mu, gamma=gg), 1e8 * u.K)
kT.convert_to_units("K", "sound_speed", mu=mu, gamma=gg)
assert_allclose_units(kT, 1e8 * u.K)
# velocity <-> energy
c_s = 300 * u.m / u.s
kT = c_s.in_units("erg", "sound_speed", mu=mu, gamma=gg)
assert_allclose_units(kT, c_s**2 * mu * u.mh / gg)
c_s.convert_to_units("J", "sound_speed", mu=mu, gamma=gg)
assert_allclose_units(c_s, kT)
assert c_s.units == u.J.units
assert kT.units == u.erg.units
assert_allclose_units(
kT.in_units("m/s", "sound_speed", mu=mu, gamma=gg), 300 * u.m / u.s
)
c_s.convert_to_units("m/s", "sound_speed", mu=mu, gamma=gg)
assert_allclose_units(c_s, 300 * u.m / u.s)
# Lorentz
v = 0.8 * u.clight
g = v.in_units("dimensionless", "lorentz")
g2 = unyt_quantity(1.0 / np.sqrt(1.0 - 0.8 * 0.8), "dimensionless")
assert_allclose_units(g, g2)
v.convert_to_units("", "lorentz")
assert_allclose_units(v, g2)
v.convert_to_units("c", "lorentz")
v2 = g2.in_units("mile/hr", "lorentz")
assert_allclose_units(v2, v.in_units("mile/hr"))
# Schwarzschild
msun = 1.0 * u.unit_symbols.Msun
msun.convert_to_equivalent("km", "schwarzschild")
R = u.mass_sun_mks.in_units("kpc", "schwarzschild")
assert_allclose_units(msun, R)
assert_allclose_units(R.in_mks(), 2 * u.G * u.mass_sun_mks / (u.clight**2))
assert_allclose_units(u.mass_sun_mks, R.in_units("kg", "schwarzschild"))
R.convert_to_units("Msun", "schwarzschild")
assert_allclose_units(u.mass_sun_mks, R)
assert R.units == u.unit_symbols.Msun.units
assert msun.units == u.km.units
# Compton
me = 1.0 * u.me
me.convert_to_units("nm", "compton")
length = u.me.in_units("angstrom", "compton")
assert_allclose_units(length, me)
assert_allclose_units(length, u.h_mks / (u.me * u.clight))
assert_allclose_units(u.me, length.in_units("g", "compton"))
assert me.units == u.nm.units
assert length.units == u.angstrom.units
me.convert_to_units("me", "compton")
assert_almost_equal(me.value, 1.0)
# Number density
rho = u.mp / u.m**3
n = rho.in_units("m**-3", "number_density")
assert_allclose_units(n, rho / (u.mh * 0.6))
assert_allclose_units(rho, n.in_units("kg/m**3", "number_density"))
rho.convert_to_units("cm**-3", "number_density")
assert rho.units == (1 / u.cm**3).units
assert n.units == (1 / u.m**3).units
assert_allclose_units(n, rho)
rho.convert_to_units("kg/m**3", "number_density")
assert_allclose_units(u.mp / u.m**3, rho)
assert rho.units == (u.kg / u.m**3).units
rho = u.mp / u.m**3
n = rho.in_units("m**-3", equivalence="number_density", mu=0.75)
assert_allclose_units(n, rho / (u.mh * 0.75))
assert_allclose_units(
rho, n.in_units("kg/m**3", equivalence="number_density", mu=0.75)
)
rho.convert_to_units("cm**-3", "number_density", mu=0.75)
assert rho.units == (1 / u.cm**3).units
assert n.units == (1 / u.m**3).units
assert_allclose_units(n, rho)
rho.convert_to_units("kg/m**3", "number_density", mu=0.75)
assert_allclose_units(u.mp / u.m**3, rho)
assert rho.units == (u.kg / u.m**3).units
# Effective temperature
T = 1e4 * u.K
F = T.in_units("W/m**2", equivalence="effective_temperature")
assert_allclose_units(F, u.stefan_boltzmann_constant * T**4)
assert_allclose_units(T, F.in_units("K", equivalence="effective_temperature"))
T.convert_to_units("erg/s/cm**2", "effective_temperature")
assert_allclose_units(T, F)
assert T.units == u.Unit("erg/cm**2/s")
assert F.units == u.W / u.m**2
assert_almost_equal(T.in_units("K", "effective_temperature").value, 1e4)
T.convert_to_units("K", "effective_temperature")
assert_almost_equal(T.value, 1e4)
assert T.units == u.K
# to_value test
assert_allclose_units(
F.value, T.to_value("W/m**2", equivalence="effective_temperature")
)
assert_allclose_units(
n.value, rho.to_value("m**-3", equivalence="number_density", mu=0.75)
)
def test_electromagnetic():
import unyt as u
# Various tests of SI and CGS electromagnetic units
t = 1.0 * u.Tesla
g = 1.0 * u.gauss
assert t.to("gauss") == 1e4 * u.gauss
assert g.to("T") == 1e-4 * u.Tesla
assert t.in_mks() == t
assert g.in_cgs() == g
t.convert_to_mks()
assert t == 1.0 * u.Tesla
g.convert_to_cgs()
assert g == 1.0 * u.gauss
qp_mks = u.qp_cgs.in_units("C")
assert_equal(qp_mks.units.dimensions, dimensions.charge_mks)
assert_almost_equal(qp_mks.v, 10.0 * u.qp.v / speed_of_light_cm_per_s)
qp = 1.0 * u.qp_cgs
assert_equal(qp, u.qp_cgs.in_units("esu"))
qp.convert_to_units("C")
assert_equal(qp.units.dimensions, dimensions.charge_mks)
assert_almost_equal(qp.v, 10 * u.qp.v / u.clight.v)
qp_cgs = u.qp.in_units("esu")
assert_array_almost_equal(qp_cgs, u.qp_cgs)
assert_equal(qp_cgs.units.dimensions, u.qp_cgs.units.dimensions)
qp = u.qp.copy()
qp.convert_to_units("esu")
assert_almost_equal(qp_cgs, qp_cgs)
assert qp.units == u.esu.units
qp.convert_to_units("C")
assert_almost_equal(u.qp, qp)
assert qp.units == u.C.units
qp_mks_k = u.qp_cgs.in_units("kC")
assert_array_almost_equal(qp_mks_k.v, 1.0e-2 * u.qp_cgs.v / speed_of_light_cm_per_s)
qp = 1.0 * u.qp_cgs
qp.convert_to_units("kC")
assert_almost_equal(qp, qp_mks_k)
B = 1.0 * u.T
B_cgs = B.in_units("gauss")
assert_equal(B.units.dimensions, dimensions.magnetic_field_mks)
assert_equal(B_cgs.units.dimensions, dimensions.magnetic_field_cgs)
assert_array_almost_equal(B_cgs, unyt_quantity(1.0e4, "gauss"))
B_cgs = B.in_cgs()
assert_equal(B.units.dimensions, dimensions.magnetic_field_mks)
assert_equal(B_cgs.units.dimensions, dimensions.magnetic_field_cgs)
assert_array_almost_equal(B_cgs, unyt_quantity(1.0e4, "gauss"))
B_cgs = B.in_base("cgs")
assert_equal(B.units.dimensions, dimensions.magnetic_field_mks)
assert_equal(B_cgs.units.dimensions, dimensions.magnetic_field_cgs)
assert_array_almost_equal(B_cgs, unyt_quantity(1.0e4, "gauss"))
B.convert_to_cgs()
assert_almost_equal(B, B_cgs)
B.convert_to_mks()
B_cgs2 = B.to("gauss")
assert_almost_equal(B_cgs, B_cgs2)
B_mks2 = B_cgs2.to("T")
assert_almost_equal(B, B_mks2)
B = 1.0 * u.T
u_mks = B * B / (2 * u.mu_0)
assert_equal(u_mks.units.dimensions, dimensions.pressure)
u_cgs = B_cgs * B_cgs / (8 * np.pi)
assert_equal(u_mks, u_cgs.to(u_mks.units))
assert_equal(u_mks.to(u_cgs.units), u_cgs)
assert_equal(u_mks.in_cgs(), u_cgs)
assert_equal(u_cgs.in_mks(), u_mks)
current = 1.0 * u.A
I_cgs = current.in_units("statA")
assert_array_almost_equal(
I_cgs, unyt_quantity(0.1 * speed_of_light_cm_per_s, "statA")
)
assert_array_almost_equal(I_cgs.in_units("mA"), current.in_units("mA"))
assert_equal(I_cgs.units.dimensions, dimensions.current_cgs)
current.convert_to_units("statA")
assert current.units == u.statA.units
current.convert_to_units("A")
assert current.units == u.A.units
I_cgs2 = current.to("statA")
assert I_cgs2.units == u.statA.units
assert_array_almost_equal(
I_cgs2, unyt_quantity(0.1 * speed_of_light_cm_per_s, "statA")
)
current = 1.0 * u.A
R = unyt_quantity(1.0, "ohm")
R_cgs = R.in_units("statohm")
P_mks = current * current * R
P_cgs = I_cgs * I_cgs * R_cgs
assert_equal(P_mks.units.dimensions, dimensions.power)
assert_equal(P_cgs.units.dimensions, dimensions.power)
assert_almost_equal(P_cgs.in_cgs(), P_cgs)
assert_almost_equal(P_mks.in_cgs(), P_cgs)
assert_almost_equal(P_cgs.in_mks(), P_mks)
assert_almost_equal(P_mks.in_mks(), P_mks)
V = unyt_quantity(1.0, "statV")
V_mks = V.in_units("V")
assert_array_almost_equal(V_mks.v, 1.0e8 * V.v / speed_of_light_cm_per_s)
data = 1.0 * u.C * u.T * u.V
with pytest.raises(UnitConversionError):
data.to("statC*G*statV")
with pytest.raises(UnitConversionError):
data.convert_to_units("statC*G*statV")
with pytest.raises(UnitsNotReducible):
data.in_cgs()
data = 1.0 * u.statC * u.G * u.statV
with pytest.raises(UnitConversionError):
data.to("C*T*V")
with pytest.raises(UnitConversionError):
data.convert_to_units("C*T*V")
assert_almost_equal(data.in_mks(), 6.67408e-18 * u.m**5 / u.s**4)
mu_0 = 4.0e-7 * math.pi * u.N / u.A**2
eps_0 = 8.85418781782e-12 * u.m**-3 / u.kg * u.s**4 * u.A**2
assert_almost_equal((1.0 / (u.clight**2 * mu_0)).in_units(eps_0.units), eps_0)
def test_unyt_array_coercion():
a = unyt_array([1, 2, 3], "cm")
q = unyt_quantity(3, "cm")
na = np.array([1, 2, 3])
assert_isinstance(a * q, unyt_array)
assert_isinstance(q * na, unyt_array)
assert_isinstance(q * 3, unyt_quantity)
assert_isinstance(q * np.float64(3), unyt_quantity)
assert_isinstance(q * np.array(3), unyt_quantity)
def test_numpy_wrappers():
a1 = unyt_array([1, 2, 3], "cm")
a2 = unyt_array([2, 3, 4, 5, 6], "cm")
a3 = unyt_array([[1, 2, 3], [4, 5, 6]], "cm")
a4 = unyt_array([7, 8, 9, 10, 11], "cm")
catenate_answer = [1, 2, 3, 2, 3, 4, 5, 6]
intersect_answer = [2, 3]
union_answer = [1, 2, 3, 4, 5, 6]
vstack_answer = [[2, 3, 4, 5, 6], [7, 8, 9, 10, 11]]
vstack_answer_last_axis = [[2, 7], [3, 8], [4, 9], [5, 10], [6, 11]]
cross_answer = [-2, 4, -2]
norm_answer = np.sqrt(1**2 + 2**2 + 3**2)
arr_norm_answer = [norm_answer, np.sqrt(4**2 + 5**2 + 6**2)]
dot_answer = 14
with pytest.warns(DeprecationWarning):
assert_array_equal(unyt_array(catenate_answer, "cm"), uconcatenate((a1, a2)))
assert_array_equal(catenate_answer, np.concatenate((a1, a2)))
with pytest.warns(DeprecationWarning):
assert_array_equal(unyt_array(intersect_answer, "cm"), uintersect1d(a1, a2))
assert_array_equal(intersect_answer, np.intersect1d(a1, a2))
with pytest.warns(DeprecationWarning):
assert_array_equal(unyt_array(union_answer, "cm"), uunion1d(a1, a2))
assert_array_equal(union_answer, np.union1d(a1, a2))
with pytest.warns(DeprecationWarning):
assert_array_equal(
unyt_array(cross_answer, "cm**2"), ucross(a1, a1 + (2 * a1.units))
)
assert_array_equal(cross_answer, np.cross(a1.v, a1.v + 2))
with pytest.warns(DeprecationWarning):
assert_array_equal(unorm(a1), unyt_quantity(norm_answer, "cm"))
assert_array_equal(np.linalg.norm(a1), norm_answer)
with pytest.warns(DeprecationWarning):
assert_array_equal(unorm(a3, axis=1), unyt_array(arr_norm_answer, "cm"))
assert_array_equal(np.linalg.norm(a3, axis=1), arr_norm_answer)
with pytest.warns(DeprecationWarning):
assert_array_equal(udot(a1, a1), unyt_quantity(dot_answer, "cm**2"))
with pytest.warns(DeprecationWarning):
assert_array_equal(np.array(catenate_answer), uconcatenate((a1.v, a2.v)))
with pytest.raises(RuntimeError):
with pytest.warns(DeprecationWarning):
uconcatenate((a1, a2.v))
with pytest.raises(RuntimeError):
with pytest.warns(DeprecationWarning):
uconcatenate((a1.to("m"), a2))
with pytest.warns(DeprecationWarning):
assert_array_equal(unyt_array(vstack_answer, "cm"), uvstack([a2, a4]))
assert_array_equal(vstack_answer, np.vstack([a2, a4]))
with pytest.warns(DeprecationWarning):
assert_array_equal(unyt_array(vstack_answer, "cm"), ustack([a2, a4]))
assert_array_equal(vstack_answer, np.stack([a2, a4]))
with pytest.warns(DeprecationWarning):
assert_array_equal(
unyt_array(vstack_answer_last_axis, "cm"), ustack([a2, a4], axis=-1)
)
assert_array_equal(vstack_answer_last_axis, np.stack([a2, a4], axis=-1))
def test_dimensionless_conversion():
a = unyt_quantity(1, "Zsun")
b = a.in_units("Zsun")
a.convert_to_units("Zsun")
assert a.units.base_value == metallicity_sun
assert b.units.base_value == metallicity_sun
def test_modified_unit_division():
reg1 = UnitRegistry()
reg2 = UnitRegistry()
reg1.modify("g", 50)
a = unyt_quantity(3, "g", registry=reg1)
b = unyt_quantity(3, "g", registry=reg2)
ret = a / b
assert ret == 50000.0
assert ret.units.is_dimensionless
assert ret.units.base_value == 1.0
def test_loadtxt_and_savetxt():
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
a = unyt_array(np.random.random(10), "kpc")
b = unyt_array(np.random.random(10), "Msun")
c = unyt_array(np.random.random(10), "km/s")
savetxt("arrays.dat", [a, b, c], delimiter=",")
d, e = loadtxt("arrays.dat", usecols=(1, 2), delimiter=",")
assert_array_equal(b, d)
assert_array_equal(c, e)
# adding newlines to the file doesn't matter
savetxt("arrays.dat", [a, b, c], delimiter=",")
with open("arrays.dat", "r+") as f:
content = f.read()
f.seek(0, 0)
f.write("\n" + content)
d, e = loadtxt("arrays.dat", usecols=(1, 2), delimiter=",")
assert_array_equal(b, d)
assert_array_equal(c, e)
# data saved by numpy savetxt are loaded without units
np.savetxt("arrays.dat", np.squeeze(np.transpose([a.v, b.v, c.v])), delimiter=",")
d, e = loadtxt("arrays.dat", usecols=(1, 2), delimiter=",")
assert_array_equal(b.v, d)
assert_array_equal(c.v, e)
# save a single array
savetxt("arrays.dat", a)
d = loadtxt("arrays.dat")
assert_array_equal(a, d)
# save an array with no units and an array with units with a header
savetxt("arrays.dat", [a.v, b], header="this is a header!")
d, e = loadtxt("arrays.dat")
assert_array_equal(a.v, d)
assert_array_equal(b, e)
os.chdir(curdir)
shutil.rmtree(tmpdir)
def test_trig_ufunc_degrees():
for ufunc in (np.sin, np.cos, np.tan):
degree_values = np.random.random(10) * degree
radian_values = degree_values.in_units("radian")
assert_array_equal(ufunc(degree_values), ufunc(radian_values))
def test_builtin_sum():
from unyt import km
arr = [1, 2, 3] * km
assert_equal(sum(arr), 6 * km)
def test_initialization_different_registries():
reg1 = UnitRegistry()
reg2 = UnitRegistry()
reg1.add("code_length", 1.0, dimensions.length)
reg2.add("code_length", 3.0, dimensions.length)
l1 = unyt_quantity(1.0, "code_length", registry=reg1)
l2 = unyt_quantity(1.0, "code_length", registry=reg2)
assert_almost_equal(float(l1.in_mks()), 1.0)
assert_almost_equal(float(l2.in_mks()), 3.0)
def test_ones_and_zeros_like():
data = unyt_array([1, 2, 3], "cm")
zd = np.zeros_like(data)
od = np.ones_like(data)
assert_equal(zd, unyt_array([0, 0, 0], "cm"))
assert_equal(zd.units, data.units)
assert_equal(od, unyt_array([1, 1, 1], "cm"))
assert_equal(od.units, data.units)
def test_coerce_iterable():
from unyt import cm, g, m
a = unyt_array([1, 2, 3], "cm")
b = [1 * cm, 2 * m, 3 * cm]
c = [1 * g, 2 * m, 3 * cm]
assert_equal(a + b, unyt_array([2, 202, 6], "cm"))
assert_equal(b + a, unyt_array([2, 202, 6], "cm"))
with pytest.raises(IterableUnitCoercionError):
a + c
with pytest.raises(IterableUnitCoercionError):
c + a
assert_equal(unyt_array(b), unyt_array([1, 200, 3], "cm"))
with pytest.raises(IterableUnitCoercionError):
unyt_array(c)
def test_bypass_validation():
from unyt import UnitRegistry, cm, unyt_array
obj = unyt_array(np.array([1.0, 2.0, 3.0]), cm, bypass_validation=True)
assert obj.units is cm
reg = UnitRegistry()
obj = unyt_array(
np.array([1.0, 2.0, 3.0]), cm, registry=reg, bypass_validation=True
)
assert obj.units == cm
assert obj.units.registry is reg
def test_creation():
from unyt import UnitRegistry, cm
data = [1, 2, 3] * cm
new_data = unyt_array(data)
assert new_data.units is cm
assert_array_equal(new_data.v, np.array([1, 2, 3], dtype="float64"))
reg = UnitRegistry()
new_data = unyt_array(data, registry=reg)
assert_array_equal(new_data.v, np.array([1, 2, 3], dtype="float64"))
assert new_data.units is not cm
assert new_data.units == cm
assert new_data.units.registry is reg
new_data = unyt_array([1, 2, 3], cm)
assert_array_equal(new_data.v, np.array([1, 2, 3], dtype="float64"))
assert new_data.units is cm
new_data = unyt_array([1, 2, 3], cm, registry=reg)
assert_array_equal(new_data.v, np.array([1, 2, 3], dtype="float64"))
assert new_data.units is not cm
assert new_data.units == cm
assert new_data.units.registry is reg
with pytest.raises(RuntimeError):
unyt_quantity("hello", "cm")
with pytest.raises(RuntimeError):
unyt_quantity(np.array([1, 2, 3]), "cm")
def test_round():
from unyt import km
assert_equal(round(3.3 * km), 3.0)
assert_equal(round(3.5 * km), 4.0)
assert_equal(round(3 * km), 3)
assert_equal(round(3.7 * km), 4)
with pytest.raises(TypeError):
round([1, 2, 3] * km)
@pytest.mark.parametrize("itemsize", (8, 16, 32, 64))
def test_conversion_from_int_types(itemsize):
a = unyt_array([1], "cm", dtype=f"int{itemsize}")
# check copy conversion
a.in_units("m")
# check in place conversion
if itemsize == 8:
with pytest.raises(
ValueError,
match=re.escape(
"Can't convert memory buffer in place. "
"Input dtype (int8) has a smaller itemsize than the "
"smallest floating point representation possible."
),
):
a.convert_to_units("m")
else:
a.convert_to_units("m")
assert a.dtype == f"float{itemsize}"
def test_integer_arrays():
from unyt import km, m, mile, ms, s
def integer_semantics(inp):
arr = inp * km
assert arr.dtype == np.int_
arr = np.array(inp, dtype="int32") * km
assert arr.dtype.name == "int32"
ret = arr.in_units("mile")
assert arr.dtype.name == "int32"
answer = (inp * km).astype("int32").to("mile")
assert_array_equal(ret, answer)
assert ret.dtype.name == "float32"
ret = arr.in_units("m")
assert arr.dtype != ret.dtype
assert ret.dtype.name == "float32"
arr.convert_to_units("m")
assert arr.dtype.name == "float32"
arr = inp * km
arr.convert_to_units("mile")
assert arr.dtype.name == "float" + str(np.int_().dtype.itemsize * 8)
for foo in [[1, 2, 3], 12, -8, 0, [1, -2, 3]]:
integer_semantics(foo)
arr1 = [1, 2, 3] * km
arr2 = [4, 5, 6] * mile
assert (arr1 + arr2).dtype.name == "float64"
assert (arr1 * arr2).dtype == np.int_
assert (arr1 / arr2).dtype.name == "float64"
arr1 = [1, 2, 3] * km
arr2 = [4, 5, 6] * m
assert (arr1 + arr2).dtype.name == "float64"
assert (arr1 * arr2).dtype == np.int_
assert (arr1 / arr2).dtype.name == "float64"
arr1 = [1, 2, 3] * km
arr2 = [4, 5, 6] * km
assert (arr1 + arr2).dtype == np.int_
assert (arr1 * arr2).dtype == np.int_
assert (arr1 / arr2).dtype.name == "float64"
# see issue #118 for details
assert 1000 * ms == 1 * s
assert 1 * s == 1000 * ms
def test_overflow_warnings():
from unyt import km
data = [2**53, 2**54] * km
message = "Overflow encountered while converting to units 'mile'"
_process_warning(data.to, message, RuntimeWarning, ("mile",))
_process_warning(data.in_units, message, RuntimeWarning, ("mile",))
_process_warning(data.convert_to_units, message, RuntimeWarning, ("mile",))
def test_clip():
from unyt import km
data = [1, 2, 3, 4, 5, 6] * km
answer = [2, 2, 3, 4, 4, 4] * km
ret = np.clip(data, 2, 4)
assert_array_equal(ret, answer)
assert ret.units == answer.units
np.clip(data, 2, 4, out=data)
assert_array_equal(data, answer)
assert data.units == answer.units
left_edge = [0.0, 0.0, 0.0] * km
right_edge = [1.0, 1.0, 1.0] * km
positions = [[0.0, 0.0, 0.0], [1.0, 1.0, -0.1], [1.5, 1.0, 0.9]] * km
np.clip(positions, left_edge, right_edge, positions)
assert positions.units == left_edge.units
assert positions.max() == 1.0 * km
assert positions.min() == 0.0 * km
def test_name_attribute():
a = unyt_array([0, 1, 2], "s")
assert a.name is None
a.name = "time"
assert a.name == "time"
assert a[0].name == "time"
a.convert_to_units("ms")
assert a.name == "time"
b = unyt_quantity(1, "m", name="distance")
assert b.name == "distance"
c = b.copy()
assert c.name == "distance"
c_1 = copy.deepcopy(b)
assert c_1.name == "distance"
d = b.in_units("mm")
assert d.name == "distance"
e = b.to("mm")
assert e.name == "distance"
f = unyt_array([3, 4, 5], "K", name="temperature")
g = f.in_units("J", equivalence="thermal")
assert g.name is None
g_1 = f.to_equivalent("J", equivalence="thermal")
assert g_1.name is None
f.convert_to_equivalent("J", equivalence="thermal")
assert f.name is None
h = f.to("J", equivalence="thermal")
assert h.name is None
def test_neper_bel():
assert 0 * Unit("dB") + 20 * Unit("dB") == unyt_quantity(20, "dB")
with pytest.raises(InvalidUnitOperation):
unyt_array([1, 10], "V") * Unit("dB")
with pytest.raises(InvalidUnitOperation):
Unit("Np") * unyt_array([1, 10], "s")
with pytest.raises(InvalidUnitOperation):
unyt_array([0, 20], "dB") ** 2
with pytest.raises(InvalidUnitOperation):
np.power(unyt_array([0, 20], "dB"), -2)
def test_delta_degC():
t1 = 10 * degC
t2 = 1 * K
assert t1 + t2 == 11 * degC
with pytest.raises(UnitOperationError):
t2 + t1
t3 = 1 * delta_degC
assert t1 + t3 == 11 * degC
assert t3 + t1 == 11 * degC
assert 1 * delta_degC + 2 * delta_degC == 3 * delta_degC
assert 2 * delta_degC == unyt_quantity(2, "delta_degC")
def test_delta_degF():
t1 = 10 * degF
t2 = 1 * R
assert t1 + t2 == 11 * degF
with pytest.raises(UnitOperationError):
t2 + t1
t3 = 1 * delta_degF
assert t1 + t3 == 11 * degF
assert t3 + t1 == 11 * degF
assert 1 * delta_degF + 2 * delta_degF == 3 * delta_degF
assert 2 * delta_degF == unyt_quantity(2, "delta_degF")
@pytest.mark.parametrize(
("u0", "u1", "uout"),
[
(K, K, K),
(R, R, R),
(degC, degC, delta_degC),
(degF, degF, delta_degF),
(degC, delta_degC, degC),
(delta_degC, degC, degC),
(degF, delta_degF, degF),
(delta_degF, degF, degF),
],
)
def test_delta_temperature_diff(u0, u1, uout):
# using repr comparison because
# 1) we don't care that Unit instances might not be the same
# 2) some temperature units will compare as equal even when they are not identical (e.g. K and delta_degC)
assert repr((2 * u0 - 1 * u1).units) == repr(uout)
def test_mil():
assert_allclose_units(unyt_quantity(1, "mil"), unyt_quantity(0.001, "inch"))
def test_kip():
assert_allclose_units(unyt_quantity(1, "lbf"), unyt_quantity(0.001, "kip"))
def test_ksi():
assert_allclose_units(unyt_quantity(1, "lbf/inch**2"), unyt_quantity(0.001, "ksi"))
def test_masked_array():
data = unyt_array([1, 2, 3], "s")
mask = [False, False, True]
marr = np.ma.MaskedArray(data, mask)
assert_array_equal(marr.data, data)
assert all(marr.mask == mask)
assert marr.sum() == unyt_quantity(3, "s")
assert np.ma.notmasked_contiguous(marr) == [slice(0, 2, None)]
assert marr.argmax() == 1
assert marr.max() == unyt_quantity(2, "s")
data = unyt_array([1, 2, np.inf], "s")
marr = np.ma.MaskedArray(data)
marr_masked = np.ma.masked_invalid(marr)
assert all(marr_masked.mask == [False, False, True])
marr_masked.set_fill_value(unyt_quantity(3, "s"))
assert_array_equal(marr_masked.filled(), unyt_array([1, 2, 3], "s"))
marr_fixed = np.ma.fix_invalid(marr)
assert_array_equal(marr_fixed.data, unyt_array([1, 2, 1e20], "s"))
assert_array_equal(np.ma.filled(marr, unyt_quantity(3, "s")), data)
assert_array_equal(np.ma.compressed(marr_masked), unyt_array([1, 2], "s"))
# executing the repr should not raise an exception
marr.__repr__()
def test_complexvalued(tmp_path):
freq = unyt_array([1j, 1j * 10], "Hz")
arr = 1 / (Unit("F") * Unit("Ω") * freq)
arr = arr.to("dimensionless")
assert arr.units.is_dimensionless
assert np.all(arr.v == np.asarray([-1j, -1j * 0.1]))
arr = unyt_array([1j, 1j * 10], "mJ")
arr.convert_to_base()
assert_allclose_units(arr, unyt_array([1j * 0.001, 1j * 0.01], "J"))
arr.convert_to_units("mJ")
assert_allclose_units(arr, unyt_array([1j, 1j * 10], "mJ"))
arr.convert_to_mks()
assert_allclose_units(arr, unyt_array([1j * 0.001, 1j * 0.01], "J"))
arr.convert_to_cgs()
assert_allclose_units(arr, unyt_array([1j * 10000, 1j * 100000], "erg"))
arr.convert_to_equivalent("K", "thermal")
assert_allclose_units(
arr, unyt_array([1j * 7.24297157e19, 1j * 7.24297157e20], "K")
)
arr = arr.to_equivalent("J", "thermal")
assert_allclose_units(arr, unyt_array([1j * 0.001, 1j * 0.01], "J"))
assert_allclose_units(arr.to_ndarray(), np.asarray([1j * 0.001, 1j * 0.01]))
assert_allclose_units(arr.to_value(), np.asarray([1j * 0.001, 1j * 0.01]))
assert arr.tolist() == [1j * 0.001, 1j * 0.01]
assert_allclose_units(arr.in_units("mJ"), unyt_array([1j, 1j * 10], "mJ"))
assert_allclose_units(arr.in_base(), unyt_array([1j * 0.001, 1j * 0.01], "J"))
assert_allclose_units(arr.in_cgs(), unyt_array([1j * 10000, 1j * 100000], "erg"))
assert_allclose_units(arr.in_mks(), unyt_array([1j * 0.001, 1j * 0.01], "J"))
fname = tmp_path / "testcomplexvalued.txt"
savetxt(fname, arr)
farr = loadtxt(fname, dtype=np.complex128)
assert_allclose_units(farr, unyt_array([1j * 0.001, 1j * 0.01], "J"))
def test_string_formatting():
d = unyt_array((1, 2, 3), "Msun")
expected = "[1 2 3] Msun"
assert f"{d}" == expected
assert f"{d}" == expected
@pytest.mark.parametrize(
"s, expected, normalized",
[
("+1cm", 1.0 * Unit("cm"), "1 cm"),
("1cm", 1.0 * Unit("cm"), "1 cm"),
("1.cm", 1.0 * Unit("cm"), "1.0 cm"),
("1.0 cm", 1.0 * Unit("cm"), "1.0 cm"),
("1.0\tcm", 1.0 * Unit("cm"), "1.0 cm"),
("1.0\t cm", 1.0 * Unit("cm"), "1.0 cm"),
("1.0 cm", 1.0 * Unit("cm"), "1.0 cm"),
("1.0\t\tcm", 1.0 * Unit("cm"), "1.0 cm"),
("10e-1cm", 1.0 * Unit("cm"), "1.0 cm"),
("10E-1cm", 1.0 * Unit("cm"), "1.0 cm"),
("+1cm", 1.0 * Unit("cm"), "1 cm"),
("1um", 1.0 * Unit("μm"), "1 μm"),
("1μm", 1.0 * Unit("μm"), "1 μm"),
("-5 Msun", -5.0 * Unit("Msun"), "-5 Msun"),
("1e3km", 1e3 * Unit("km"), "1000.0 km"),
("-1e3 km", -1e3 * Unit("km"), "-1000.0 km"),
("1.0 g/cm**3", 1.0 * Unit("g/cm**3"), "1.0 g/cm**3"),
("1 g*cm**-3", 1.0 * Unit("g/cm**3"), "1 g/cm**3"),
("1.0 g*cm", 1.0 * Unit("g*cm"), "1.0 cm*g"),
("nan g", float("nan") * Unit("g"), "nan g"),
("-nan g", float("nan") * Unit("g"), "nan g"),
("inf g", float("inf") * Unit("g"), "inf g"),
("+inf g", float("inf") * Unit("g"), "inf g"),
("-inf g", -float("inf") * Unit("g"), "-inf g"),
("1", 1.0 * Unit(), "1 dimensionless"),
("g", 1.0 * Unit("g"), "1 g"),
# from https://github.com/yt-project/unyt/issues/361
("1 g**2/cm**2", 1.0 * Unit("g") ** 2 / Unit("cm") ** 2, "1 g**2/cm**2"),
("g**2/cm**2", 1.0 * Unit("g") ** 2 / Unit("cm") ** 2, "1 g**2/cm**2"),
("1*cm**2", 1.0 * Unit("cm") ** 2, "1 cm**2"),
("1/cm**2", 1.0 / Unit("cm") ** 2, "1 cm**(-2)"),
("1 / cm**2", 1.0 / Unit("cm") ** 2, "1 cm**(-2)"),
(
"1e-3 g**2 / cm**2",
1e-3 * Unit("g") ** 2 / Unit("cm") ** 2,
"0.001 g**2/cm**2",
),
],
)
def test_valid_quantity_from_string(s, expected, normalized):
actual = unyt_quantity.from_string(s)
assert actual.to_string() == normalized
roundtrip = unyt_quantity.from_string(actual.to_string())
if "nan" not in s:
assert actual == expected
assert roundtrip == expected
assert actual.to_string() == normalized
assert roundtrip.to_string() == normalized
@pytest.mark.parametrize(
"s",
[
"++1cm",
"--1cm",
"cm10",
"cm 10.",
".cm",
"1cm**(-1",
"1cm**/2",
"1cm**3 hello",
],
)
def test_invalid_expression_quantity_from_string(s):
with pytest.raises(ValueError, match=r"^(Received invalid quantity expression )"):
unyt_quantity.from_string(s)
@pytest.mark.parametrize(
"s",
[
"10 cmmmm",
"50. Km",
".6 MSUN",
"infcm", # space sep is required here
],
)
def test_invalid_unit_quantity_from_string(s):
# using a lazy solution here
# this test would need to be refactored if we want to add other cases
# without a space separator between number and unit.
un_str = s.split()[-1]
with pytest.raises(
UnitParseError,
match=f"Could not find unit symbol '{un_str}' in the provided symbols.",
):
unyt_quantity.from_string(s)
def test_constant_type():
# see https://github.com/yt-project/unyt/issues/224
a = [1] * cm
assert type(a) is unyt_array
b = 2 * a
assert type(b) is unyt_array
def test_composite_meshgrid():
# see https://github.com/yt-project/unyt/issues/224
a = np.array(1)
# pure numpy call to illustrate that the problem
# is only with units
np.meshgrid(np.array([1, 2]), a)
np.meshgrid(np.array([1, 2]), a * m)
@pytest.mark.parametrize(
"shape, expected_output_shape",
[
(1, (1,)),
((1,), (1,)),
((1, 1), (1, 1)),
((1, -1), (1, 1)),
],
)
def test_reshape_quantity_to_array(shape, expected_output_shape):
a = unyt_quantity(1, "m")
b = a.reshape(shape)
assert b.shape == expected_output_shape
assert type(b) is unyt_array
@pytest.mark.parametrize("shape", ((), None))
def test_reshape_quantity_noop(shape):
a = unyt_quantity(1, "m")
b = a.reshape(shape)
assert b.shape == a.shape == ()
assert type(b) is unyt_quantity
def test_reshape_quantity_via_shape_tuple():
# this is necessary to support np.tile
a = unyt_quantity(1, "m")
b = a.reshape(-1, 1)
assert b.shape == (1, 1)
assert type(b) is unyt_array
def test_string_comparison():
# exercise comparison between a unyt_quantity object and a string
# see regression https://github.com/numpy/numpy/issues/22744
a = 1 * cm
assert not (a == "hello")
assert a != "hello"
def test_int8_comparison():
# see regression https://github.com/yt-project/unyt/issues/369
a = unyt_array(np.zeros(5, dtype=np.int8))
assert all(e == 0 for e in a)
def test_setitem():
# see https://github.com/yt-project/unyt/issues/373
a = [1, 2, 3] * cm
a[1] = 2 * m
assert a[1].value == 200
assert a[1].units == cm
with pytest.raises(UnitConversionError):
a[1] = 2 * g
a[1] = 2
assert a[1].value == 2
assert a[1].units == cm
a[1] = unyt_quantity(2)
assert a[1].value == 2
assert a[1].units == cm
|
e0f59d54664cc530146f63becf82d1a4bc1be7ea
|
1e4c5a3412e4acdebe87d9a374515b058fc8d99c
|
/chembl_structure_pipeline/standardizer.py
|
a71429b7c6879535b89cab5d0a03acb19f1c090a
|
[
"MIT"
] |
permissive
|
chembl/ChEMBL_Structure_Pipeline
|
38c88a87dd2557b7611adf0f60ce2363cf9f4def
|
02fae41903d35e6bb853ee56cf6b799d774bfe92
|
refs/heads/master
| 2023-05-25T16:54:18.007029
| 2022-11-25T16:21:04
| 2022-11-25T16:21:04
| 169,412,514
| 164
| 35
|
MIT
| 2023-05-02T13:39:09
| 2019-02-06T13:43:57
|
Python
|
UTF-8
|
Python
| false
| false
| 17,834
|
py
|
standardizer.py
|
#
# Copyright (c) 2019 Greg Landrum
# All rights reserved.
#
# This file is part of the ChEMBL_StructurePipeline project.
# The contents are covered by the terms of the MIT license
# which is included in the file LICENSE, found at the root
# of the source tree.
import os
from rdkit import Chem
from rdkit.Chem.MolStandardize import rdMolStandardize
from rdkit.Chem import rdMolTransforms
from .exclude_flag import exclude_flag
import rdkit
import math
rdkversion = rdkit.__version__.split(".")
if rdkversion < ["2019", "09", "2"]:
raise ValueError("need an RDKit version >= 2019.09.2")
def kekulize_mol(m):
Chem.Kekulize(m)
return m
def update_mol_valences(m):
m = Chem.Mol(m)
m.UpdatePropertyCache(strict=False)
return m
# derived from the MolVS set, with ChEMBL-specific additions
_normalization_transforms = """
// Name SMIRKS
Nitro to N+(O-)=O [N;X3:1](=[O:2])=[O:3]>>[*+1:1]([*-1:2])=[*:3]
Diazonium N [*:1]-[N;X2:2]#[N;X1:3]>>[*:1]-[*+1:2]#[*:3]
Quaternary N [N;X4;v4;+0:1]>>[*+1:1]
Trivalent O [*:1]=[O;X2;v3;+0:2]-[#6:3]>>[*:1]=[*+1:2]-[*:3]
Sulfoxide to -S+(O-) [!O:1][S+0;D3:2](=[O:3])[!O:4]>>[*:1][S+1:2]([O-:3])[*:4]
// this form addresses a pathological case that came up a few times in testing:
Sulfoxide to -S+(O-) 2 [!O:1][SH1+1;D3:2](=[O:3])[!O:4]>>[*:1][S+1:2]([O-:3])[*:4]
Trivalent S [O:1]=[S;D2;+0:2]-[#6:3]>>[*:1]=[*+1:2]-[*:3]
// Note that the next one doesn't work propertly because repeated appplications
// don't carry the cations from the previous rounds through. This should be
// fixed by implementing single-molecule transformations, but that's a longer-term
// project
//Alkaline oxide to ions [Li,Na,K;+0:1]-[O+0:2]>>([*+1:1].[O-:2])
Bad amide tautomer1 [C:1]([OH1;D1:2])=;!@[NH1:3]>>[C:1](=[OH0:2])-[NH2:3]
Bad amide tautomer2 [C:1]([OH1;D1:2])=;!@[NH0:3]>>[C:1](=[OH0:2])-[NH1:3]
Halogen with no neighbors [F,Cl,Br,I;X0;+0:1]>>[*-1:1]
Odd pyridine/pyridazine oxide structure [C,N;-;D2,D3:1]-[N+2;D3:2]-[O-;D1:3]>>[*-0:1]=[*+1:2]-[*-:3]
Odd azide [*:1][N-:2][N+:3]#[N:4]>>[*:1][N+0:2]=[N+:3]=[N-:4]
"""
_normalizer_params = rdMolStandardize.CleanupParameters()
_normalizer = rdMolStandardize.NormalizerFromData(
_normalization_transforms, _normalizer_params
)
_alkoxide_pattern = Chem.MolFromSmarts("[Li,Na,K;+0]-[#7,#8;+0]")
def normalize_mol(m):
""" """
Chem.FastFindRings(m)
if m.HasSubstructMatch(_alkoxide_pattern):
m = Chem.RWMol(m)
for match in m.GetSubstructMatches(_alkoxide_pattern):
m.RemoveBond(match[0], match[1])
m.GetAtomWithIdx(match[0]).SetFormalCharge(1)
m.GetAtomWithIdx(match[1]).SetFormalCharge(-1)
res = _normalizer.normalize(m)
return res
def remove_hs_from_mol(m):
"""removes most Hs
Hs that are preserved by the RDKit's Chem.RemoveHs() will not
be removed.
Additional exceptions:
- Hs with a wedged/dashed bond to them
- Hs bonded to atoms with tetrahedral stereochemistry set
- Hs bonded to atoms that have three (or more) ring bonds that are not simply protonated
- Hs bonded to atoms in a non-default valence state that are not simply protonated
For the above, the definition of "simply protonated" is an atom with charge = +1 and
a valence that is one higher than the default.
"""
# we need ring info, so be sure it's there (this won't do anything if the rings
# have already been found)
Chem.FastFindRings(m)
if m.NeedsUpdatePropertyCache():
m.UpdatePropertyCache(strict=False)
SENTINEL = 100
for atom in m.GetAtoms():
if atom.GetAtomicNum() == 1 and atom.GetDegree() == 1 and not atom.GetIsotope():
nbr = atom.GetNeighbors()[0]
bnd = atom.GetBonds()[0]
preserve = False
if bnd.GetBondDir() in (
Chem.BondDir.BEGINWEDGE,
Chem.BondDir.BEGINDASH,
) or (
bnd.HasProp("_MolFileBondStereo")
and bnd.GetUnsignedProp("_MolFileBondStereo") in (1, 6)
):
preserve = True
else:
is_protonated = (
nbr.GetFormalCharge() == 1
and nbr.GetExplicitValence()
== Chem.GetPeriodicTable().GetDefaultValence(nbr.GetAtomicNum()) + 1
)
if nbr.GetChiralTag() in (
Chem.ChiralType.CHI_TETRAHEDRAL_CCW,
Chem.ChiralType.CHI_TETRAHEDRAL_CW,
):
preserve = True
elif not is_protonated:
if (
nbr.GetExplicitValence()
> Chem.GetPeriodicTable().GetDefaultValence(nbr.GetAtomicNum())
):
preserve = True
else:
ringBonds = [
b
for b in nbr.GetBonds()
if m.GetRingInfo().NumBondRings(b.GetIdx())
]
if len(ringBonds) >= 3:
preserve = True
if preserve:
# we're safe picking an arbitrary high value since you can't do this in a mol block:
atom.SetIsotope(SENTINEL)
res = Chem.RemoveHs(m, sanitize=False)
for atom in res.GetAtoms():
if atom.GetAtomicNum() == 1 and atom.GetIsotope() == SENTINEL:
atom.SetIsotope(0)
return res
def remove_sgroups_from_mol(m):
"""removes all Sgroups"""
Chem.ClearMolSubstanceGroups(m)
return m
def uncharge_mol(m):
"""
>>> def uncharge_smiles(smi): return Chem.MolToSmiles(uncharge_mol(Chem.MolFromSmiles(smi)))
>>> uncharge_smiles('[NH3+]CCC')
'CCCN'
>>> uncharge_smiles('[NH3+]CCC[O-]')
'NCCCO'
>>> uncharge_smiles('C[N+](C)(C)CCC[O-]')
'C[N+](C)(C)CCC[O-]'
>>> uncharge_smiles('CC[NH+](C)C.[Cl-]')
'CCN(C)C.Cl'
>>> uncharge_smiles('CC(=O)[O-]')
'CC(=O)O'
>>> uncharge_smiles('CC(=O)[O-].[Na+]')
'CC(=O)[O-].[Na+]'
>>> uncharge_smiles('[NH3+]CC(=O)[O-].[Na+]')
'NCC(=O)[O-].[Na+]'
>>> uncharge_smiles('CC(=O)[O-].C[NH+](C)C')
'CC(=O)O.CN(C)C'
Alcohols are protonated before acids:
>>> uncharge_smiles('[O-]C([N+](C)C)CC(=O)[O-]')
'C[N+](C)C(O)CC(=O)[O-]'
And the neutralization is done in a canonical order, so atom ordering of the input
structure isn't important:
>>> uncharge_smiles('C[N+](C)(C)CC([O-])CC[O-]')
'C[N+](C)(C)CC([O-])CCO'
>>> uncharge_smiles('C[N+](C)(C)CC(CC[O-])[O-]')
'C[N+](C)(C)CC([O-])CCO'
"""
uncharger = rdMolStandardize.Uncharger(canonicalOrder=True)
res = uncharger.uncharge(m)
res.UpdatePropertyCache(strict=False)
return res
def _getAtomsToOtherSide(startAt, bond):
oAt = bond.GetOtherAtomIdx(startAt.GetIdx())
res = []
q = [x for x in startAt.GetNeighbors() if x.GetIdx() != oAt]
while q:
hd = q.pop(0)
if hd.GetIdx() in res:
continue
res.append(hd.GetIdx())
for nbr in hd.GetNeighbors():
if nbr.GetIdx() == startAt.GetIdx():
continue
if nbr.GetIdx() == oAt:
raise ValueError(f"cycle found {oAt} {res}")
if nbr.GetIdx() not in res:
q.append(nbr)
return res
def _check_and_straighten_at_triple_bond(at, bond, conf):
if at.GetDegree() != 2:
raise ValueError("only works with degree 2")
nbrs = [x.GetIdx() for x in at.GetNeighbors()]
angle = rdMolTransforms.GetAngleRad(conf, nbrs[0], at.GetIdx(), nbrs[1])
# are we off by more than a degree?
if abs(abs(angle) - math.pi) > 0.017:
rdMolTransforms.SetAngleRad(conf, nbrs[0], at.GetIdx(), nbrs[1], math.pi)
def _cleanup_triple_bonds(m):
conf = m.GetConformer()
if conf.Is3D():
raise ValueError("can only operate on 2D conformers")
for bond in m.GetBonds():
if (
bond.GetBondType() == Chem.BondType.TRIPLE
and m.GetRingInfo().NumBondRings(bond.GetIdx()) == 0
):
at = bond.GetBeginAtom()
if at.GetDegree() == 2:
_check_and_straighten_at_triple_bond(at, bond, conf)
at = bond.GetEndAtom()
if at.GetDegree() == 2:
_check_and_straighten_at_triple_bond(at, bond, conf)
def _cleanup_allenes(m):
conf = m.GetConformer()
if conf.Is3D():
raise ValueError("can only operate on 2D conformers")
p = Chem.MolFromSmarts("*=[C;R0]=*")
for match in m.GetSubstructMatches(p):
angle = rdMolTransforms.GetAngleRad(conf, match[0], match[1], match[2])
# are we off by more than a degree?
if abs(abs(angle) - math.pi) > 0.017:
rdMolTransforms.SetAngleRad(conf, match[0], match[1], match[2], math.pi)
def cleanup_drawing_mol(m):
m = Chem.Mol(m)
if not m.GetNumConformers():
# if we don't have a conformer, just return
return m
conf = m.GetConformer()
if conf.Is3D():
for i in range(m.GetNumAtoms()):
if abs(conf.GetAtomPosition(i).z) >= 0.0001:
raise ValueError("cleanup_drawing_mol() only works for 2D molecules")
conf.Set3D(False)
Chem.FastFindRings(m)
_cleanup_triple_bonds(m)
_cleanup_allenes(m)
return m
def flatten_tartrate_mol(m):
tartrate = Chem.MolFromSmarts("OC(=O)C(O)C(O)C(=O)O")
# make sure we only match free tartrate/tartaric acid fragments
params = Chem.AdjustQueryParameters.NoAdjustments()
params.adjustDegree = True
params.adjustDegreeFlags = Chem.AdjustQueryWhichFlags.ADJUST_IGNORENONE
tartrate = Chem.AdjustQueryProperties(tartrate, params)
matches = m.GetSubstructMatches(tartrate)
if matches:
m = Chem.Mol(m)
for match in matches:
m.GetAtomWithIdx(match[3]).SetChiralTag(Chem.ChiralType.CHI_UNSPECIFIED)
m.GetAtomWithIdx(match[5]).SetChiralTag(Chem.ChiralType.CHI_UNSPECIFIED)
return m
_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
_solvents_file = os.path.join(_data_dir, "solvents.smi")
_salts_file = os.path.join(_data_dir, "salts.smi")
def get_fragment_parent_mol(m, check_exclusion=False, neutralize=False, verbose=False):
basepath = os.path.dirname(os.path.abspath(__file__))
with open(_solvents_file) as inf:
solvents = []
for l in inf:
if not l or l[0] == "#":
continue
l = l.strip().split("\t")
if len(l) != 2:
continue
solvents.append((l[0], Chem.MolFromSmarts(l[1])))
# there are a number of special cases for the ChEMBL salt stripping, so we
# can't use the salt remover that's built into the RDKit standardizer.
frags = []
inputFrags = Chem.GetMolFrags(m, asMols=True, sanitizeFrags=False)
for frag in inputFrags:
frag = Chem.RemoveHs(frag, sanitize=False)
frag.UpdatePropertyCache(strict=False)
Chem.SetAromaticity(frag)
frags.append(frag)
keep = [1] * len(frags)
for nm, solv in solvents:
for i, frag in enumerate(frags):
if (
keep[i]
and frag.GetNumAtoms() == solv.GetNumAtoms()
and frag.GetNumBonds() == solv.GetNumBonds()
and frag.HasSubstructMatch(solv)
):
keep[i] = 0
if verbose:
print(f"matched solvent {nm}")
if not max(keep):
break
if not max(keep):
# everything removed, we can just return the input molecule:
if check_exclusion:
exclude = exclude_flag(m, includeRDKitSanitization=False)
else:
exclude = False
if neutralize:
res = uncharge_mol(m)
else:
res = Chem.Mol(m)
return res, exclude
with open(_salts_file) as inf:
salts = []
for l in inf:
if not l or l[0] == "#":
continue
l = l.strip().split("\t")
if len(l) != 2:
continue
salts.append((l[0], Chem.MolFromSmarts(l[1])))
keepFrags1 = []
keepFrags2 = []
for i, v in enumerate(keep):
if v:
keepFrags1.append(frags[i])
keepFrags2.append(inputFrags[i])
frags = keepFrags1
inputFrags = keepFrags2
keep = [1] * len(frags)
for nm, salt in salts:
for i, frag in enumerate(frags):
if (
keep[i]
and frag.GetNumAtoms() == salt.GetNumAtoms()
and frag.GetNumBonds() == salt.GetNumBonds()
and frag.HasSubstructMatch(salt)
):
if verbose:
print(f"matched salt {nm}")
keep[i] = 0
if not max(keep):
break
if not max(keep):
# everything removed, keep everything:
keep = [1] * len(frags)
keepFrags = []
seenSmis = set()
for i, v in enumerate(keep):
if not v:
continue
frag = inputFrags[i]
if neutralize:
cfrag = uncharge_mol(frag)
else:
cfrag = Chem.Mol(frag)
keepFrags.append(i)
# make sure there are no extraneous H atoms in the fragment:
cfrag = Chem.RemoveHs(cfrag, sanitize=False)
# need aromaticity perception to get a reasonable SMILES, but don't
# want to risk a full sanitization:
cfrag.ClearComputedProps()
cfrag.UpdatePropertyCache(False)
Chem.SanitizeMol(
cfrag,
sanitizeOps=Chem.SANITIZE_SYMMRINGS
| Chem.SANITIZE_FINDRADICALS
| Chem.SANITIZE_SETAROMATICITY
| Chem.SANITIZE_ADJUSTHS,
)
seenSmis.add(Chem.MolToSmiles(cfrag))
if len(seenSmis) == 1:
# if we just have one fragment left, this is easy:
# just copy the fragment
res = inputFrags[keepFrags[0]]
else:
# otherwise we need to create a molecule from the remaining fragments
res = inputFrags[keepFrags[0]]
for idx in keepFrags[1:]:
frag = inputFrags[idx]
res = Chem.CombineMols(res, frag)
if check_exclusion:
exclude = exclude_flag(res, includeRDKitSanitization=False)
else:
exclude = False
# if we still match the exclude flag after stripping salts, go
# back to the parent species after solvent stripping. These are now
# in the inputFrags list
if exclude:
res = inputFrags[0]
for frag in inputFrags[1:]:
res = Chem.CombineMols(res, frag)
if neutralize:
res = uncharge_mol(res)
return res, exclude
def get_isotope_parent_mol(m):
m = Chem.Mol(m)
for at in m.GetAtoms():
if at.GetIsotope():
at.SetIsotope(0)
return remove_hs_from_mol(m)
def get_parent_mol(m, neutralize=True, check_exclusion=True, verbose=False):
ipar = get_isotope_parent_mol(m)
res, exclude = get_fragment_parent_mol(
ipar, neutralize=neutralize, check_exclusion=check_exclusion, verbose=verbose
)
return res, exclude
def get_parent_molblock(ctab, neutralize=True, check_exclusion=True, verbose=False):
m = Chem.MolFromMolBlock(ctab, sanitize=False, removeHs=False)
reapply_molblock_wedging(m)
parent, exclude = get_parent_mol(
m, neutralize=neutralize, check_exclusion=check_exclusion, verbose=verbose
)
return Chem.MolToMolBlock(parent, kekulize=False), exclude
def standardize_mol(m, check_exclusion=True, sanitize=True):
if check_exclusion:
exclude = exclude_flag(m, includeRDKitSanitization=False)
else:
exclude = False
if not exclude:
m = update_mol_valences(m)
m = remove_sgroups_from_mol(m)
m = kekulize_mol(m)
m = remove_hs_from_mol(m)
m = normalize_mol(m)
m = uncharge_mol(m)
m = flatten_tartrate_mol(m)
m = cleanup_drawing_mol(m)
if sanitize:
Chem.SanitizeMol(m)
return m
def reapply_molblock_wedging(m):
for b in m.GetBonds():
# only do the wedgeing if the bond doesn't already have something there:
if b.GetBondDir() == Chem.BondDir.NONE and b.HasProp("_MolFileBondStereo"):
val = b.GetProp("_MolFileBondStereo")
if val == "1":
b.SetBondDir(Chem.BondDir.BEGINWEDGE)
elif val == "6":
b.SetBondDir(Chem.BondDir.BEGINDASH)
def parse_molblock(ctab, useRDKitChemistry=False):
if useRDKitChemistry:
m = Chem.MolFromMolBlock(ctab, sanitize=True, removeHs=True)
else:
m = Chem.MolFromMolBlock(ctab, sanitize=False, removeHs=False)
if not m:
return None
# the RDKit has, by default, removed bond wedging information from the molecule
# put that back in:
reapply_molblock_wedging(m)
# Set the stereochemistry of double bonds
# This block can be removed if github #X ends up being accepted and fixed
anybonds = []
for bond in m.GetBonds():
if bond.GetStereo() == Chem.BondStereo.STEREOANY:
anybonds.append(bond.GetIdx())
Chem.SetBondStereoFromDirections(m)
for bidx in anybonds:
m.GetBondWithIdx(bidx).SetStereo(Chem.BondStereo.STEREOANY)
return m
def standardize_molblock(ctab, check_exclusion=True):
m = parse_molblock(ctab, useRDKitChemistry=False)
if check_exclusion:
if exclude_flag(m, includeRDKitSanitization=False):
return ctab
return Chem.MolToMolBlock(standardize_mol(m, check_exclusion=False, sanitize=False))
|
1620dc6b043a1b2cb3c551b7f6f80bf874bff468
|
4e558281ab352b745e970936650e479bbb687982
|
/videoflow/processors/vision/pose.py
|
343859b270f7ecf18eecf931392be70c32c8d5e3
|
[
"MIT"
] |
permissive
|
videoflow/videoflow
|
e3b84b3acd3591837d30ce51d5023f3dee9823f3
|
c49d3fe6c814574bcda1a4e907ce52ea86e1617c
|
refs/heads/master
| 2023-01-24T06:51:56.141621
| 2022-01-20T14:23:58
| 2022-01-20T14:23:58
| 181,554,939
| 1,065
| 96
|
MIT
| 2022-01-20T14:23:59
| 2019-04-15T19:47:22
|
Python
|
UTF-8
|
Python
| false
| false
| 108
|
py
|
pose.py
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
|
987a0efb3aa852e73165c6efd6b5db98a52f6ecb
|
9b1eda0abdc5dea7c6e9695ff4e1098abe0a708b
|
/tests/test_message_pump.py
|
c6f9d921ca2890090f7f1dd8984f9daa54681f89
|
[
"MIT"
] |
permissive
|
Textualize/textual
|
b8cf4b5d18069fccc7623b3116436f479e1ef446
|
b74ac1e47fdd16133ca567390c99ea19de278c5a
|
refs/heads/main
| 2023-08-30T21:40:21.563823
| 2023-08-30T10:18:27
| 2023-08-30T10:18:27
| 355,959,597
| 14,818
| 588
|
MIT
| 2023-09-14T20:22:02
| 2021-04-08T15:24:47
|
Python
|
UTF-8
|
Python
| false
| false
| 3,510
|
py
|
test_message_pump.py
|
import pytest
from textual.app import App, ComposeResult
from textual.errors import DuplicateKeyHandlers
from textual.events import Key
from textual.widget import Widget
from textual.widgets import Input
class ValidWidget(Widget):
called_by = None
def key_x(self):
self.called_by = self.key_x
def key_ctrl_i(self):
self.called_by = self.key_ctrl_i
async def test_dispatch_key_valid_key():
widget = ValidWidget()
result = await widget.dispatch_key(Key(key="x", character="x"))
assert result is True
assert widget.called_by == widget.key_x
async def test_dispatch_key_valid_key_alias():
"""When you press tab or ctrl+i, it comes through as a tab key event, but handlers for
tab and ctrl+i are both considered valid."""
widget = ValidWidget()
result = await widget.dispatch_key(Key(key="tab", character="\t"))
assert result is True
assert widget.called_by == widget.key_ctrl_i
class DuplicateHandlersWidget(Widget):
called_by = None
def key_x(self):
self.called_by = self.key_x
def _key_x(self):
self.called_by = self._key_x
def key_tab(self):
self.called_by = self.key_tab
def key_ctrl_i(self):
self.called_by = self.key_ctrl_i
async def test_dispatch_key_raises_when_conflicting_handler_aliases():
"""If you've got a handler for e.g. ctrl+i and a handler for tab, that's probably a mistake.
In the terminal, they're the same thing, so we fail fast via exception here."""
widget = DuplicateHandlersWidget()
with pytest.raises(DuplicateKeyHandlers):
await widget.dispatch_key(Key(key="tab", character="\t"))
assert widget.called_by == widget.key_tab
class PreventTestApp(App):
def __init__(self) -> None:
self.input_changed_events = []
super().__init__()
def compose(self) -> ComposeResult:
yield Input()
def on_input_changed(self, event: Input.Changed) -> None:
self.input_changed_events.append(event)
async def test_prevent() -> None:
app = PreventTestApp()
async with app.run_test() as pilot:
assert not app.input_changed_events
input = app.query_one(Input)
input.value = "foo"
await pilot.pause()
assert len(app.input_changed_events) == 1
assert app.input_changed_events[0].value == "foo"
with input.prevent(Input.Changed):
input.value = "bar"
await pilot.pause()
assert len(app.input_changed_events) == 1
assert app.input_changed_events[0].value == "foo"
async def test_prevent_with_call_next() -> None:
"""Test for https://github.com/Textualize/textual/issues/3166.
Does a callback scheduled with `call_next` respect messages that
were prevented when it was scheduled?
"""
hits = 0
class PreventTestApp(App[None]):
def compose(self) -> ComposeResult:
yield Input()
def change_input(self) -> None:
self.query_one(Input).value += "a"
def on_input_changed(self) -> None:
nonlocal hits
hits += 1
app = PreventTestApp()
async with app.run_test() as pilot:
app.call_next(app.change_input)
await pilot.pause()
assert hits == 1
with app.prevent(Input.Changed):
app.call_next(app.change_input)
await pilot.pause()
assert hits == 1
app.call_next(app.change_input)
await pilot.pause()
assert hits == 2
|
0560c5f188b45701df3ea452289d521c81e79b08
|
d4be43f95987606c377a9ecedf3f3ce4080dc268
|
/linguee_api/utils.py
|
2440ef89025b709e39522a9bb8a4e970b87034e1
|
[
"MIT"
] |
permissive
|
imankulov/linguee-api
|
0220b655037c750a683fe0373d17d447467d8f9c
|
be2c6a496f547f71931a3b51ecbaf7732c729fd3
|
refs/heads/master
| 2023-07-20T05:17:32.046813
| 2023-07-15T01:23:37
| 2023-07-19T22:01:23
| 74,171,706
| 165
| 45
|
MIT
| 2023-07-19T22:01:25
| 2016-11-18T22:34:06
|
Python
|
UTF-8
|
Python
| false
| false
| 592
|
py
|
utils.py
|
import importlib
def import_string(import_name: str):
"""
Import an object based on the import string.
Separate module name from the object name with ":". For example,
"linuguee_api.downloaders:HTTPXDownloader"
"""
if ":" not in import_name:
raise RuntimeError(
f'{import_name} must separate module from object with ":". '
f'For example, "linguee_api.downloaders:HTTPXDownloader"'
)
module_name, object_name = import_name.rsplit(":", 1)
mod = importlib.import_module(module_name)
return getattr(mod, object_name)
|
9a23d87a654a908a8252d60912a30685d6b5dca2
|
11f7558e56bcfb742495cec766baeea7650a3103
|
/python/Multi-Service/content_moderator_cs.py
|
c12dd1c744e5dea194f9d1c35589412955d565fb
|
[
"MIT"
] |
permissive
|
Azure-Samples/cognitive-services-quickstart-code
|
ee6936fae3bdfc902e6e8c74080f598845eb9c49
|
3ec40229ae753720605319e2e4d0955f9039449a
|
refs/heads/master
| 2023-08-21T21:19:28.114774
| 2023-06-06T20:12:38
| 2023-06-06T20:12:38
| 198,896,181
| 323
| 511
|
MIT
| 2023-09-08T06:29:25
| 2019-07-25T20:20:09
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,063
|
py
|
content_moderator_cs.py
|
import os
from pprint import pprint
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.contentmoderator import ContentModeratorClient
from azure.cognitiveservices.vision.contentmoderator.models import ( Evaluate, OCR, FoundFaces )
'''
This quickstart uses Content Moderator to moderate a list of images.
Uses the general Cognitive Services key/endpoint. It's used when you want to
combine many Cognitive Services with just one authentication key/endpoint.
Services are not combined here, but could be potentially.
Install the Content Moderator SDK from a command prompt or IDE terminal:
pip install --upgrade azure-cognitiveservices-vision-contentmoderator
The Content Moderator SDK:
https://docs.microsoft.com/en-us/python/api/azure-cognitiveservices-vision-contentmoderator/?view=azure-python
'''
subscription_key = "PASTE_YOUR_CONTENT_MODERATOR_SUBSCRIPTION_KEY_HERE"
endpoint = "PASTE_YOUR_CONTENT_MODERATOR_ENDPOINT_HERE"
# List of URL images used to moderate.
IMAGE_LIST = [
"https://moderatorsampleimages.blob.core.windows.net/samples/sample2.jpg",
"https://moderatorsampleimages.blob.core.windows.net/samples/sample5.png"
]
'''
AUTHENTICATE
Create a Content Moderator client.
'''
client = ContentModeratorClient(
endpoint=endpoint,
credentials=CognitiveServicesCredentials(subscription_key)
)
'''
CONTENT MODERATOR
This quickstart moderates an image, then text and faces within the image.
'''
print('IMAGE MODERATION')
print()
# Image moderation, using image at [0]
print("Evaluate the image '{}' for adult and racy content:".format(os.path.basename(IMAGE_LIST[0])))
mod_image = client.image_moderation.evaluate_url_input(content_type="application/json", cache_image=True,
data_representation="URL", value=IMAGE_LIST[0])
assert isinstance(mod_image, Evaluate)
# Format for printing
mod_results = list(mod_image.as_dict().items())
for result in mod_results:
print(result)
# Moderating text in an image, using image at [0]
print("\nDetect, extract, and moderate text for image {}:".format(
os.path.basename(IMAGE_LIST[0])))
mod_image = client.image_moderation.ocr_url_input(language="eng", content_type="application/json",
data_representation="URL", value=IMAGE_LIST[0], cache_image=True)
assert isinstance(mod_image, OCR)
# Format for printing
mod_results = list(mod_image.as_dict().items())
for result in mod_results:
print(result)
# Moderating faces in an image, using image at [1]
print("\nDetect faces and moderate for image {}:".format(
os.path.basename(IMAGE_LIST[1])))
mod_image = client.image_moderation.find_faces_url_input(content_type="application/json", cache_image=True,
data_representation="URL", value=IMAGE_LIST[1])
assert isinstance(mod_image, FoundFaces)
# Format for printing
mod_results = list(mod_image.as_dict().items())
for result in mod_results:
print(result)
print()
|
1ccc56b1d943d20577152cfd3120663262326445
|
e5de04caeb8ae7fae9a0c0604aa93008c27569fd
|
/examples/calculations/Parse_Angles.py
|
ac2a9cdac3f26fc1564705f88c8c6e01ada43983
|
[
"BSD-3-Clause"
] |
permissive
|
Unidata/MetPy
|
2c92c93173ce2458b54212fa7c18761048a8520e
|
c7124e6f375eb5810ce49d53c9d5501c2efdfb75
|
refs/heads/main
| 2023-08-31T15:37:55.649855
| 2023-08-30T18:48:58
| 2023-08-30T18:48:58
| 1,409,621
| 1,041
| 395
|
BSD-3-Clause
| 2023-09-14T13:47:18
| 2011-02-25T04:20:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,063
|
py
|
Parse_Angles.py
|
# Copyright (c) 2015-2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
============
Parse angles
============
Demonstrate how to convert direction strings to angles.
The code below shows how to parse directional text into angles.
It also demonstrates the function's flexibility
in handling various string formatting.
"""
import metpy.calc as mpcalc
###########################################
# Create a test value of a directional text
dir_str = 'SOUTH SOUTH EAST'
print(dir_str)
###########################################
# Now throw that string into the function to calculate
# the corresponding angle
angle_deg = mpcalc.parse_angle(dir_str)
print(angle_deg)
###########################################
# The function can also handle arrays of strings
# with different abbreviations and capitalizations
dir_str_list = ['ne', 'NE', 'NORTHEAST', 'NORTH_EAST', 'NORTH east']
angle_deg_list = mpcalc.parse_angle(dir_str_list)
print(angle_deg_list)
|
9d9c45d3eddac28ef34bdbc90c150d3ff350c739
|
7e1c4dd6a2cae0597b4f4e961063cf077acdfd4c
|
/tests/environments/analytics_environment.py
|
490072ac3ba458251f3b8461bbe108da42820280
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
couchbase/couchbase-python-client
|
753fa434db910d175bf9ea53a5829a40ba36e938
|
c7d80434be3f917d6f25439a918aed30273f63f4
|
refs/heads/master
| 2023-08-29T14:04:13.532717
| 2023-08-24T22:53:30
| 2023-08-25T03:35:21
| 2,122,194
| 223
| 87
|
Apache-2.0
| 2023-05-30T16:05:59
| 2011-07-29T04:24:46
|
Python
|
UTF-8
|
Python
| false
| false
| 13,159
|
py
|
analytics_environment.py
|
# Copyright 2016-2023. Couchbase, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import time
from typing import Optional
from couchbase.result import AnalyticsResult
from tests.environments import CollectionType
from tests.environments.test_environment import AsyncTestEnvironment, TestEnvironment
class AnalyticsTestEnvironment(TestEnvironment):
DATASET_NAME = 'test-dataset'
@property
def fqdn(self) -> Optional[str]:
return f'`{self.bucket.name}`.`{self.scope.name}`.`{self.collection.name}`'
def assert_rows(self,
result, # type: AnalyticsResult
expected_count):
count = 0
assert isinstance(result, (AnalyticsResult,))
for row in result.rows():
assert row is not None
count += 1
assert count >= expected_count
def create_analytics_collections(self):
"""
Setup queries:
Create dataverse:
CREATE DATAVERSE `default`.`test-scope` IF NOT EXISTS;
Create dataset:
USE `default`.`test-scope`;
CREATE DATASET IF NOT EXISTS `test-collection` ON `default`.`test-scope`.`test-collection`;
Connect Link:
USE `default`.`test-scope`; CONNECT LINK Local;
"""
dv_fqdn = f'`{self.bucket.name}`.`{self.scope.name}`'
q_str = f'CREATE DATAVERSE {dv_fqdn} IF NOT EXISTS;'
res = self.cluster.analytics_query(q_str)
[_ for _ in res.rows()]
q_str = f'USE {dv_fqdn}; CREATE DATASET IF NOT EXISTS `{self.collection.name}` ON {self.fqdn}'
res = self.cluster.analytics_query(q_str)
[_ for _ in res.rows()]
q_str = f'USE {dv_fqdn}; CONNECT LINK Local;'
res = self.cluster.analytics_query(q_str)
[_ for _ in res.rows()]
def get_batch_id(self):
if hasattr(self, '_batch_id'):
return self._batch_id
doc = list(self._loaded_docs.values())[0]
self._batch_id = doc['batch']
return self._batch_id
def setup(self,
collection_type, # type: CollectionType
):
if collection_type == CollectionType.NAMED:
self.enable_collection_mgmt().enable_named_collections()
TestEnvironment.try_n_times(5, 3, self.setup_named_collections)
self.create_analytics_collections()
query_namespace = f'`{self.collection.name}`'
query_context = f'default:`{self.bucket.name}`.`{self.scope.name}`'
else:
TestEnvironment.try_n_times(10,
3,
self.aixm.create_dataset,
self.DATASET_NAME,
self.bucket.name,
ignore_if_exists=True)
self.aixm.connect_link()
query_namespace = f'`{self.DATASET_NAME}`'
query_context = None
TestEnvironment.try_n_times(5, 3, self.load_data)
for _ in range(5):
row_count_good = self._check_row_count(self.cluster,
query_namespace,
10,
query_context=query_context)
if row_count_good:
break
print('Waiting for index to load, sleeping a bit...')
time.sleep(5)
def teardown(self,
collection_type, # type: CollectionType
):
TestEnvironment.try_n_times(5, 3, self.purge_data)
if collection_type == CollectionType.NAMED:
self.teardown_analytics_collections()
TestEnvironment.try_n_times(5, 3, self.teardown_named_collections)
else:
self.aixm.disconnect_link()
TestEnvironment.try_n_times(10,
3,
self.aixm.drop_dataset,
self.DATASET_NAME,
ignore_if_not_exists=True)
def teardown_analytics_collections(self):
"""
Tear-down queries:
Disconnect Link:
USE `default`.`test-scope`; DISCONNECT LINK Local;
Droo dataset:
USE `default`.`test-scope`; DROP DATASET `test-collection` IF EXISTS;
Drop dataverse:
DROP DATAVERSE `default`.`test-scope` IF EXISTS;
"""
dv_fqdn = f'`{self.bucket.name}`.`{self.scope.name}`'
q_str = f'USE {dv_fqdn}; DISCONNECT LINK Local;'
res = self.cluster.analytics_query(q_str)
[_ for _ in res.rows()]
q_str = f'USE {dv_fqdn}; DROP DATASET `{self.collection.name}` IF EXISTS;'
res = self.cluster.analytics_query(q_str)
[_ for _ in res.rows()]
q_str = f'DROP DATAVERSE {dv_fqdn} IF EXISTS;'
res = self.cluster.analytics_query(q_str)
[_ for _ in res.rows()]
def _check_row_count(self,
cb,
query_namespace, # type: str
min_count, # type: int
query_context=None, # type: Optional[str]
) -> bool:
q_str = f'SELECT COUNT(1) AS doc_count FROM {query_namespace}'
if query_context is not None:
result = cb.analytics_query(q_str, query_context=query_context)
else:
result = cb.analytics_query(q_str)
rows = [r for r in result.rows()]
return len(rows) > 0 and rows[0].get('doc_count', 0) > min_count
@classmethod
def from_environment(cls,
env # type: TestEnvironment
) -> AnalyticsTestEnvironment:
env_args = {
'bucket': env.bucket,
'cluster': env.cluster,
'default_collection': env.default_collection,
'couchbase_config': env.config,
'data_provider': env.data_provider,
}
cb_env = cls(**env_args)
return cb_env
class AsyncAnalyticsTestEnvironment(AsyncTestEnvironment):
DATASET_NAME = 'test-dataset'
@property
def fqdn(self) -> Optional[str]:
return f'`{self.bucket.name}`.`{self.scope.name}`.`{self.collection.name}`'
async def assert_rows(self,
result, # type: AnalyticsResult
expected_count):
count = 0
assert isinstance(result, (AnalyticsResult,))
async for row in result.rows():
assert row is not None
count += 1
assert count >= expected_count
async def create_analytics_collections(self):
"""
Setup queries:
Create dataverse:
CREATE DATAVERSE `default`.`test-scope` IF NOT EXISTS;
Create dataset:
USE `default`.`test-scope`;
CREATE DATASET IF NOT EXISTS `test-collection` ON `default`.`test-scope`.`test-collection`;
Connect Link:
USE `default`.`test-scope`; CONNECT LINK Local;
"""
dv_fqdn = f'`{self.bucket.name}`.`{self.scope.name}`'
q_str = f'CREATE DATAVERSE {dv_fqdn} IF NOT EXISTS;'
res = self.cluster.analytics_query(q_str)
[_ async for _ in res.rows()]
q_str = f'USE {dv_fqdn}; CREATE DATASET IF NOT EXISTS `{self.collection.name}` ON {self.fqdn}'
res = self.cluster.analytics_query(q_str)
[_ async for _ in res.rows()]
q_str = f'USE {dv_fqdn}; CONNECT LINK Local;'
res = self.cluster.analytics_query(q_str)
[_ async for _ in res.rows()]
def get_batch_id(self):
if hasattr(self, '_batch_id'):
return self._batch_id
doc = list(self._loaded_docs.values())[0]
self._batch_id = doc['batch']
return self._batch_id
async def setup(self,
collection_type, # type: CollectionType
):
if collection_type == CollectionType.NAMED:
self.enable_collection_mgmt().enable_named_collections()
await AsyncTestEnvironment.try_n_times(5, 3, self.setup_named_collections)
await self.create_analytics_collections()
query_namespace = f'`{self.collection.name}`'
query_context = f'default:`{self.bucket.name}`.`{self.scope.name}`'
else:
await AsyncTestEnvironment.try_n_times(10,
3,
self.aixm.create_dataset,
self.DATASET_NAME,
self.bucket.name,
ignore_if_exists=True)
await self.aixm.connect_link()
query_namespace = f'`{self.DATASET_NAME}`'
query_context = None
await AsyncTestEnvironment.try_n_times(5, 3, self.load_data)
for _ in range(5):
row_count_good = await self._check_row_count(self.cluster,
query_namespace,
10,
query_context=query_context)
if row_count_good:
break
print('Waiting for index to load, sleeping a bit...')
await AsyncTestEnvironment.sleep(5)
async def teardown(self,
collection_type, # type: CollectionType
):
await AsyncTestEnvironment.try_n_times(5, 3, self.purge_data)
if collection_type == CollectionType.NAMED:
await self.teardown_analytics_collections()
await AsyncTestEnvironment.try_n_times(5, 3, self.teardown_named_collections)
else:
await self.aixm.disconnect_link()
await AsyncTestEnvironment.try_n_times(10,
3,
self.aixm.drop_dataset,
self.DATASET_NAME,
ignore_if_not_exists=True)
async def teardown_analytics_collections(self):
"""
Tear-down queries:
Disconnect Link:
USE `default`.`test-scope`; DISCONNECT LINK Local;
Droo dataset:
USE `default`.`test-scope`; DROP DATASET `test-collection` IF EXISTS;
Drop dataverse:
DROP DATAVERSE `default`.`test-scope` IF EXISTS;
"""
dv_fqdn = f'`{self.bucket.name}`.`{self.scope.name}`'
q_str = f'USE {dv_fqdn}; DISCONNECT LINK Local;'
res = self.cluster.analytics_query(q_str)
[_ async for _ in res.rows()]
q_str = f'USE {dv_fqdn}; DROP DATASET `{self.collection.name}` IF EXISTS;'
res = self.cluster.analytics_query(q_str)
[_ async for _ in res.rows()]
q_str = f'DROP DATAVERSE {dv_fqdn} IF EXISTS;'
res = self.cluster.analytics_query(q_str)
[_ async for _ in res.rows()]
async def _check_row_count(self,
cb,
query_namespace, # type: str
min_count, # type: int
query_context=None, # type: Optional[str]
) -> bool:
q_str = f'SELECT COUNT(1) AS doc_count FROM {query_namespace}'
if query_context is not None:
result = cb.analytics_query(q_str, query_context=query_context)
else:
result = cb.analytics_query(q_str)
rows = [r async for r in result.rows()]
return len(rows) > 0 and rows[0].get('doc_count', 0) > min_count
@classmethod
def from_environment(cls,
env # type: TestEnvironment
) -> AnalyticsTestEnvironment:
env_args = {
'bucket': env.bucket,
'cluster': env.cluster,
'default_collection': env.default_collection,
'couchbase_config': env.config,
'data_provider': env.data_provider,
}
cb_env = cls(**env_args)
return cb_env
|
d48469d441e1f6d07951ff8b5237e22cf25f6f6f
|
40195e6f86bf8620850f0c56e98eae5693e88277
|
/coremltools/converters/mil/mil/passes/defs/optimize_conv.py
|
f31360f12657532014772926b4466e86e3c5fc31
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
apple/coremltools
|
009dfa7154d34cab8edcafa618e689e407521f50
|
feed174188f7773631a3d574e1ff9889a135c986
|
refs/heads/main
| 2023-09-01T23:26:13.491955
| 2023-08-31T18:44:31
| 2023-08-31T18:44:31
| 95,862,535
| 3,742
| 705
|
BSD-3-Clause
| 2023-09-14T17:33:58
| 2017-06-30T07:39:02
|
Python
|
UTF-8
|
Python
| false
| false
| 41,725
|
py
|
optimize_conv.py
|
# Copyright (c) 2023, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import copy
import numpy as np
from coremltools import _logger as logger
from coremltools.converters.mil.mil import Block
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil import Operation, types
from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass
from coremltools.converters.mil.mil.passes.helper import (
_check_child_op_type,
_check_no_output_connection,
block_context_manager,
)
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
from coremltools.converters.mil.mil.types.symbolic import any_symbolic
@register_pass(namespace="common")
class add_conv_transpose_output_shape(AbstractGraphPass):
"""
The ``conv_transpose`` input ``output_shape`` is an optional input.
Since we can infer the output shape from ``type_inference``, we add
``output_shape`` input whenever it is known to be constant at
compile time. For example:
.. code-block::
Given:
%1: (1, 5, 39, fp32) = conv_transpose(...) # no output_shape input.
Result:
%2: (3, i32) = const(val=[1,5,39])
%3: (1, 5, 39, fp32) = conv_transpose(..., output_shape=%2)
"""
def apply(self, prog):
for f in prog.functions.values():
self._handle_block(f)
@staticmethod
def _match_pattern(op):
return (
op.op_type == "conv_transpose"
and op.output_shape is None
and not any_symbolic(op.outputs[0].shape)
)
@block_context_manager
def _handle_block(self, block):
for op in list(block.operations):
for b in op.blocks:
self._handle_block(b)
if not self._match_pattern(op):
continue
# matched pattern
x = mb.conv_transpose(
**op.inputs,
output_shape=op.outputs[0].shape,
name=op.name + "_has_output_shape",
before_op=op,
)
op.enclosing_block.replace_uses_of_var_after_op(
anchor_op=op, old_var=op.outputs[0], new_var=x
)
block.remove_ops([op])
@register_pass(namespace="common")
class compose_conv1d(AbstractGraphPass):
"""
In `TensorFlow <https://github.com/tensorflow/tensorflow/blob/r1.15/tensorflow/python/ops/nn_ops.py#L1657>`_,
``tf.keras.layers.Conv1D`` is a composite op:
.. code-block::
expand a dummy dim -> Conv2D -> squeeze the dummy dim
In `PyTorch <https://github.com/pytorch/pytorch/blob/release/1.13/aten/src/ATen/native/Convolution.cpp#L1087>`_,
this is also true for some backends (``mkldnn`` and ``xpu``).
This decomposition wrecks the coremltools ``conv1d`` graph passes,
so we should recompose the fragments back to MIL ``conv``, which natively supports ``conv1d``:
.. code-block::
Pattern 1:
Given:
%2 = expand_dims(%1, axes=-2) or expand_dims(%1, axes=2), %1.rank = 3
%3 = conv(%2)
%4 = squeeze(%3, axes=-2) or squeeze(%3, axes=2)
...
Result:
%4 = conv(%1)
...
Pattern 2 (TensorFlow channel_last):
Given:
%2 = expand_dims(%1, axes=-3) or expand_dims(%1, axes=1), %1.rank = 3
%3 = transpose(%2, perm=(0, 3, 1, 2))
%4 = conv(%3)
%5 = transpose(%4, perm=(0, 2, 3, 1))
%6 = squeeze(%5, axes=-3) or squeeze(%5, axes=1)
...
Result:
%3 = transpose(%1, perm=(0, 2, 1))
%4 = conv(%3)
%6 = transpose(%4, perm=(0, 2, 1))
...
"""
def apply(self, prog):
for f in prog.functions.values():
self._compose_conv1d_block(f)
@block_context_manager
def _compose_conv1d_block(self, block: Block):
def help_compose_conv1d_block(block: Block) -> bool:
for op in list(block.operations):
for b in op.blocks:
self._compose_conv1d_block(b)
# must start with expanding a 3-D tensor,
# who has batch, channel, length dimensions
if op.op_type != "expand_dims" or op.x.rank != 3:
continue
# try pattern `expand_dim` -> `conv2d` -> `squeeze`
if self._try_match_and_transform_pattern(op, block):
# has to break as the downstream iterator is affected
return True
# try pattern `expand_dim` -> `transpose` -> `conv2d` -> `transpose` -> `squeeze`
if self._try_match_and_transform_pattern_channel_last(op, block):
# has to break as the downstream iterator is affected
return True
return False
block_changed = True
while block_changed:
block_changed = help_compose_conv1d_block(block)
def _try_match_and_transform_pattern(self, expand_op: Operation, block: Block) -> bool:
"""
identify the pattern: `expand_dim` -> `conv2d` -> `squeeze`
"""
# abort composition if dummy dimension is not added as height
if expand_op.axes.rank != 1 or expand_op.axes.val[0] not in (-2, 2):
return False
# `expand_dims` -> `conv`
if not _check_child_op_type(expand_op, "conv"):
return False
conv_op = expand_op.outputs[0].child_ops[0]
# `conv` -> `squeeze`
if not _check_child_op_type(conv_op, "squeeze"):
return False
squeeze_op = conv_op.outputs[0].child_ops[0]
# abort composition if not squeezing the dummy height
if squeeze_op.axes.rank != 1 or squeeze_op.axes.val[0] not in (-2, 2):
return False
# everything looks good
return self._try_apply_transform(expand_op, conv_op, squeeze_op, block)
def _try_match_and_transform_pattern_channel_last(
self, expand_op: Operation, block: Block
) -> bool:
"""
identify the pattern: `expand_dim` -> `transpose` -> `conv2d` -> `transpose` -> `squeeze`
"""
# abort composition if dummy dimension is not added as height
if expand_op.axes.rank != 1 or expand_op.axes.val[0] not in (-3, 1):
return False
# `expand_dims` -> `transpose`
if not _check_child_op_type(expand_op, "transpose"):
return False
transpose1_op = expand_op.outputs[0].child_ops[0]
# abort composition if permutation is not (0, 3, 1, 2)
perm1 = transpose1_op.perm.val.copy()
perm1[np.where(perm1 < 0)] += 4
if np.any(perm1 != (0, 3, 1, 2)):
return False
# `transpose` -> `conv`
if not _check_child_op_type(transpose1_op, "conv"):
return False
conv_op = transpose1_op.outputs[0].child_ops[0]
# `conv` -> `transpose`
if not _check_child_op_type(conv_op, "transpose"):
return False
transpose2_op = conv_op.outputs[0].child_ops[0]
# abort composition if permutation is not (0, 2, 3, 1)
perm2 = transpose2_op.perm.val.copy()
perm2[np.where(perm2 < 0)] += 4
if np.any(perm2 != (0, 2, 3, 1)):
return False
# `transpose` -> `squeeze`
if not _check_child_op_type(transpose2_op, "squeeze"):
return False
squeeze_op = transpose2_op.outputs[0].child_ops[0]
# abort composition if not squeezing the dummy height
if squeeze_op.axes.rank != 1 or squeeze_op.axes.val[0] not in (-3, 1):
return False
# everything looks good
return self._try_apply_transform_channel_last(
expand_op, transpose1_op, conv_op, transpose2_op, squeeze_op, block
)
@staticmethod
def _try_apply_transform(
expand_op: Operation, conv_op: Operation, squeeze_op: Operation, block: Block
) -> bool:
ops_to_remove = [expand_op, conv_op, squeeze_op]
if not _check_no_output_connection(block, ops_to_remove):
return False
# prepare `conv1d`
conv_kwargs = {"name": squeeze_op.outputs[0].name, "before_op": conv_op}
# inherit `x` from `expand_dim`
conv_kwargs["x"] = expand_op.x
# inherit `pad_type`, `groups`, `bias` from `conv2d`
conv_kwargs["pad_type"] = conv_op.inputs["pad_type"].val
conv_kwargs["groups"] = conv_op.inputs["groups"].val
bias = conv_op.inputs.get("bias", None)
if bias is not None:
conv_kwargs["bias"] = bias
# squeeze `weight`, `strides`, `pad`, `dilations` from `conv2d`
conv_kwargs["weight"] = mb.squeeze(
x=conv_op.inputs["weight"], axes=(-2,), before_op=conv_op
)
conv_kwargs["strides"] = (conv_op.inputs["strides"].val[-1],)
conv_kwargs["pad"] = (conv_op.inputs["pad"].val[-2], conv_op.inputs["pad"].val[-1])
conv_kwargs["dilations"] = (conv_op.inputs["dilations"].val[-1],)
# compose `conv1d`
out = mb.conv(**conv_kwargs)
# try replacing `expand_dim` -> `conv2d` -> `squeeze` output
# with the new `conv1d` output
if squeeze_op.enclosing_block.try_replace_uses_of_var_after_op(
anchor_op=squeeze_op, old_var=squeeze_op.outputs[0], new_var=out
):
# remove `expand_dim` -> `conv2d` -> `squeeze`
block.remove_ops(ops_to_remove)
return True
return False
@staticmethod
def _try_apply_transform_channel_last(
expand_op: Operation,
transpose1_op: Operation,
conv_op: Operation,
transpose2_op: Operation,
squeeze_op: Operation,
block: Block,
) -> bool:
ops_to_remove = [expand_op, transpose1_op, conv_op, transpose2_op, squeeze_op]
if not _check_no_output_connection(block, ops_to_remove):
return False
# create `transpose1`
transpose1_out = mb.transpose(
x=expand_op.x, perm=(0, 2, 1), name=transpose1_op.outputs[0].name, before_op=expand_op
)
# prepare `conv1d`
conv_kwargs = {"name": conv_op.outputs[0].name, "x": transpose1_out, "before_op": conv_op}
# inherit `pad_type`, `groups`, `bias` from `conv2d`
conv_kwargs["pad_type"] = conv_op.inputs["pad_type"].val
conv_kwargs["groups"] = conv_op.inputs["groups"].val
bias = conv_op.inputs.get("bias", None)
if bias is not None:
conv_kwargs["bias"] = bias
# squeeze `weight`, `strides`, `pad`, `dilations` from `conv2d`
conv_kwargs["weight"] = mb.squeeze(
x=conv_op.inputs["weight"], axes=(-2,), before_op=conv_op
)
conv_kwargs["strides"] = (conv_op.inputs["strides"].val[-1],)
conv_kwargs["pad"] = (conv_op.inputs["pad"].val[-2], conv_op.inputs["pad"].val[-1])
conv_kwargs["dilations"] = (conv_op.inputs["dilations"].val[-1],)
# compose `conv1d`
conv_out = mb.conv(**conv_kwargs)
# create `transpose2`
transpose2_out = mb.transpose(
x=conv_out, perm=(0, 2, 1), name=squeeze_op.outputs[0].name, before_op=transpose2_op
)
# try replacing `expand_dim` -> `transpose` -> `conv2d` -> `transpose` -> `squeeze` output
# with the new `transpose` -> `conv1d` -> `transpose` output
if squeeze_op.enclosing_block.try_replace_uses_of_var_after_op(
anchor_op=squeeze_op, old_var=squeeze_op.outputs[0], new_var=transpose2_out
):
# remove `expand_dim` -> `transpose` -> `conv2d` -> `transpose` -> `squeeze`
block.remove_ops(ops_to_remove)
return True
return False
@register_pass(namespace="common")
class fuse_conv_batchnorm(AbstractGraphPass):
"""
Fuse the following ``batch_norm`` layer into ``conv`` and ``conv_transpose``.
That is, convert ``conv + batch_norm`` to ``conv``, by modifying the weight and bias in the ``conv`` layer.
.. code-block::
Given:
%2 = conv(%1)
...
%3 = batch_norm(%2)
...
Result:
%3 = conv(%1)
...
"""
def apply(self, prog):
for f in prog.functions.values():
block_changed = True
while block_changed:
block_changed = self._fuse_conv_batchnorm_block(f)
@staticmethod
def _try_to_transform(conv_op, bn_op):
# get parameters from batch_norm layer
gamma = bn_op.gamma.val
beta = bn_op.beta.val
mean = bn_op.mean.val
variance = bn_op.variance.val
epsilon = bn_op.epsilon.val
# get weight, bias and groups from conv layer
if conv_op.weight.val is None:
return False
conv_weight = conv_op.weight.val
conv_bias = conv_op.bias
groups = conv_op.groups.val
# get type of the conv layer
is_deconv = conv_op.op_type == "conv_transpose"
# The deconv weight transpose axes is determined by the dimension of convolution.
# Conv1d should be [1, 0, 2], Conv2d should be [1, 0, 2, 3], Conv3d should be [1, 0, 2, 3, 4]
if not 3 <= len(conv_weight.shape) <= 5:
raise AssertionError(
f"Only supports Conv1/2/3d, which means weight's dimension should"
f"between 3 and 5, but got weight with {len(conv_weight.shape)} "
f"dimensions. "
)
deconv_weight_transpose_axes = [1, 0] + [axis for axis in range(2, len(conv_weight.shape))]
# D_in denotes the spatial dimensions for conv kernel weight
# for conv_transpose, conv_weight has shape [Cin, Cout / groups, *D_in]
# for conv, conv_weight has shape [Cout, Cin / groups, *D_in]
if is_deconv:
Cout = conv_weight.shape[1] * groups
Cin = conv_weight.shape[0]
else:
Cout = conv_weight.shape[0]
Cin = conv_weight.shape[1] * groups
# get the type of the conv weight
conv_weight_type = conv_weight.dtype
# create bias for conv if not exist
if conv_bias is None:
conv_bias = np.zeros(Cout)
else:
conv_bias = conv_bias.val
conv_bias = conv_bias.astype(conv_weight_type)
# get the original shape of weight and bias
origin_weight_shape = conv_weight.shape
origin_bias_shape = conv_bias.shape
# update the weight for conv layer
new_conv_weight = []
new_conv_bias = []
if is_deconv:
conv_weight = np.transpose(conv_weight, deconv_weight_transpose_axes)
conv_weight = np.reshape(
conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:])
)
for i in range(Cout):
# get batch norm parameters for each channel
_gamma = gamma[i]
_beta = beta[i]
_mean = mean[i]
_variance = variance[i]
_scale = _gamma / np.sqrt(_variance + epsilon)
# get conv weight and bias for each channel
_conv_weight = conv_weight[i]
_conv_bias = conv_bias[i]
# update the conv weight and bias
_conv_weight = _conv_weight * _scale
_conv_bias = _scale * (_conv_bias - _mean) + _beta
new_conv_weight.append(_conv_weight)
new_conv_bias.append(_conv_bias)
new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type)
new_conv_bias = np.array(new_conv_bias).astype(conv_weight_type)
if is_deconv:
new_conv_weight = np.reshape(
new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:])
)
new_conv_weight = np.transpose(new_conv_weight, deconv_weight_transpose_axes)
# make sure the updated weight and bias have the same shape as the original ones
if new_conv_weight.shape != origin_weight_shape:
raise AssertionError(
"conv weight should have the same shape before and after the fuse_"
"conv_batchnorm pass. "
)
if new_conv_bias.shape != origin_bias_shape:
raise AssertionError(
"conv bias should have the same shape before and after the fuse_"
"conv_batchnorm pass. "
)
# create a new conv op with the new bias value, copying rest of the attributes
out_name = bn_op.outputs[0].name
conv_kargs = {
"weight": new_conv_weight,
"bias": new_conv_bias,
"name": out_name,
"before_op": conv_op,
}
for k, v in conv_op.inputs.items():
if k in ["weight", "bias"]:
continue
conv_kargs[k] = v
if is_deconv:
x = mb.conv_transpose(**conv_kargs)
else:
x = mb.conv(**conv_kargs)
if bn_op.enclosing_block.try_replace_uses_of_var_after_op(
anchor_op=bn_op,
old_var=bn_op.outputs[0],
new_var=x,
):
bn_op.enclosing_block.remove_ops([conv_op, bn_op])
return True
return False
@block_context_manager
def _fuse_conv_batchnorm_block(self, block):
def _match_pattern(op):
if op.op_type == "conv" or op.op_type == "conv_transpose":
# abort fusion if op output is also a block output
if op.outputs[0] in op.enclosing_block.outputs:
return None
# find batch_norm op
child_ops = op.outputs[0].child_ops
if len(child_ops) == 1:
bn_op_candidate = list(child_ops)[0]
if bn_op_candidate.op_type == "batch_norm":
return bn_op_candidate
return None
fusion_occurred = False
for op in list(block.operations):
for b in op.blocks:
block_changed = True
while block_changed:
block_changed = self._fuse_conv_batchnorm_block(b)
if len(op.blocks) > 0:
# This op can't be conv or conv_transpose
continue
bn_op = _match_pattern(op)
if bn_op is not None:
fusion_occurred = self._try_to_transform(op, bn_op)
# has to break as the downstream iterator is affected.
if fusion_occurred:
return fusion_occurred
return fusion_occurred
@register_pass(namespace="common")
class fuse_conv_bias(AbstractGraphPass):
"""
Fold ``add``/``sub`` into ``bias`` of ``conv`` and ``conv_transpose``.
That is, convert ``conv + add/sub`` to ``conv``, when ``add``/``sub`` is adding a constant.
Two patterns are supported:
.. code-block::
Pattern 1:
Given:
%2 = conv(%1)
...
%3 = add(%2, constant) # where constant has shape (1,C,1)/(C,1) for 1d conv, (1,C,1,1)/(C,1,1) for 2d conv etc
...
Result:
%3 = conv(%1)
...
Pattern 2:
Given:
%2 = conv(%1)
%3 = transpose(%2)
...
%4 = add(%3, constant) # where constant has a broacasable shape
...
Result:
%2 = conv(%1)
%4 = transpose(%2)
...
"""
child_op_types = ["add", "sub"]
def apply(self, prog):
for f in prog.functions.values():
block_changed = True
while block_changed:
block_changed = self._fuse_conv_bias_block(f)
def _match_pattern(self, op):
if op.op_type == "conv" or op.op_type == "conv_transpose":
# abort fusion if op output is also a block output
if op.outputs[0] in op.enclosing_block.outputs:
return None
# find add
child_ops = op.outputs[0].child_ops
if len(child_ops) == 1:
add_op_candidate = list(child_ops)[0]
if add_op_candidate.op_type in self.child_op_types:
return add_op_candidate
return None
@staticmethod
def _try_to_transform_transpose_pattern(conv_op, block):
ops_to_remove = []
# conv layer
if conv_op.op_type != "conv" and conv_op.op_type != "conv_transpose":
return False
is_deconv = conv_op.op_type == "conv_transpose"
ops_to_remove.append(conv_op)
# transpose layer
if not _check_child_op_type(conv_op, "transpose"):
return False
transpose_op = list(conv_op.outputs[0].child_ops)[0]
ops_to_remove.append(transpose_op)
# add/sub layer
if not _check_child_op_type(transpose_op, "add") and not _check_child_op_type(
transpose_op, "sub"
):
return False
add_or_sub_op = list(transpose_op.outputs[0].child_ops)[0]
ops_to_remove.append(add_or_sub_op)
# get the bias
if add_or_sub_op.x.val is None and add_or_sub_op.y.val is None:
return False
bias = add_or_sub_op.x.val if add_or_sub_op.x.val is not None else add_or_sub_op.y.val
is_first_input = add_or_sub_op.y.val is not None
is_sub = add_or_sub_op.op_type == "sub"
# get the conv bias/weight
conv_shape = conv_op.outputs[0].shape
Cout = conv_shape[1]
conv_weight = conv_op.weight.val
conv_weight_type = conv_weight.dtype
conv_bias = (
np.zeros(Cout).astype(conv_weight_type) if conv_op.bias is None else conv_op.bias.val
)
# check if the bias is compatible for fusion
is_bias_scalar = True
if isinstance(bias, np.ndarray):
if bias.shape == ():
bias = bias.tolist()
elif np.prod(bias.shape) == 1:
bias = np.squeeze(bias).tolist()
else:
is_bias_scalar = False
if not is_bias_scalar:
if np.prod(bias.shape) != Cout:
return False
rank = transpose_op.outputs[0].rank
cout_dim = transpose_op.perm.val.tolist().index(1) - rank
if bias.shape[cout_dim] != Cout:
return False
bias = np.reshape(bias, (Cout))
# compute the new bias
if is_sub:
if is_first_input:
bias = -bias
else:
conv_bias = -conv_bias
new_bias = conv_bias + bias
# compute the new weight
if is_sub and not is_first_input:
new_weight = -conv_weight
else:
new_weight = conv_weight
if not _check_no_output_connection(block, ops_to_remove):
return False
# create a new conv op with the new weight, bias value, copying rest of the attributes
conv_kargs = {"weight": new_weight, "bias": new_bias, "before_op": conv_op}
for k, v in conv_op.inputs.items():
if k in ["weight", "bias"]:
continue
conv_kargs[k] = v
if is_deconv:
x = mb.conv_transpose(**conv_kargs)
else:
x = mb.conv(**conv_kargs)
# create a new transpose op
out_name = add_or_sub_op.outputs[0].name
tranpose_kargs = {"x": x, "name": out_name, "before_op": transpose_op}
for k, v in transpose_op.inputs.items():
if k == "x":
continue
tranpose_kargs[k] = v
x = mb.transpose(**tranpose_kargs)
if add_or_sub_op.enclosing_block.try_replace_uses_of_var_after_op(
anchor_op=add_or_sub_op,
old_var=add_or_sub_op.outputs[0],
new_var=x,
):
add_or_sub_op.enclosing_block.remove_ops(ops_to_remove)
return True
return False
@staticmethod
def _try_to_transform(conv_op, add_op):
if add_op.op_type == "sub":
bias_var = add_op.y
else:
bias_var = add_op.x if add_op.x.val is not None else add_op.y
bias_value = bias_var.val
is_conv_op = conv_op.op_type == "conv"
# check that the bias value is a constant array or a scalar constant
if not isinstance(bias_value, (np.ndarray, np.generic)):
return False
is_bias_scalar = False
if not isinstance(bias_value, np.ndarray):
is_bias_scalar = True
# find rank of the conv input
rank = conv_op.x.rank
if rank is None:
return False
if not (rank == 3 or rank == 4 or rank == 5):
return False
# check compatibility of bias value with the rank of the conv op
# either bias value should be a scalar or:
# rank=3 ==> (B,C,D), which means bias must be (1,C,1) or (C,1)
# rank=4 ==> (B,C,D1,D2), which means bias must be (1,C,1,1) or (C,1,1)
# rank=5 ==> (B,C,D1,D2,D3), which means bias must be (1,C,1,1,1) or (C,1,1,1)
if is_bias_scalar:
bias_value = np.array([bias_value])
else:
# check that there is at most one dimension in the shape that is not 1
if len(np.squeeze(bias_value).shape) > 1:
return False
# check that addition is not happening on the batch dimension
if len(bias_value.shape) == rank:
if bias_value.shape[0] != 1:
return False
# check that last rank-2 entries in the shape vector are all 1s
if np.prod(bias_value.shape[-(rank - 2) :]) != 1:
return False
bias_value = np.squeeze(bias_value)
if add_op.op_type == "sub":
bias_value *= -1
# everything looks good, now find the new updated bias
old_bias = conv_op.inputs.get("bias", None)
old_bias_value = None
if old_bias is not None and old_bias.val is not None:
old_bias_value = old_bias.val
if old_bias is None:
# need to create a fresh numpy array for bias
if np.prod(bias_value.shape) == 1:
# its a scalar bias
# need to find the value of Cout to form a new bias
if conv_op.weight.val is None:
return False
# conv_transpose has weight format [K, C_out, spatial dims]
# conv has weight format [C_out, K, spatial dims]
Cout = conv_op.weight.val.shape[0 if is_conv_op else 1]
new_bias_value = np.broadcast_to(bias_value, (Cout,))
else:
new_bias_value = bias_value
else:
# just need to update the existing bias array
try:
new_bias_value = old_bias_value + bias_value
except:
return False
# create a new conv op with the new bias value, copying rest of the attributes
out_name = add_op.outputs[0].name
if new_bias_value.dtype != np.float32 and new_bias_value.dtype != np.float16:
# cast the bias to match the weight type
weight_np_type = types.nptype_from_builtin(
conv_op.inputs["weight"].sym_type.get_primitive()
)
logger.warning(
"conv_bias_fusion pass: casting bias "
"from {} to {} to match the dtype of the weight of the conv layer".format(
new_bias_value.dtype, weight_np_type
)
)
new_bias_value = new_bias_value.astype(weight_np_type)
new_bias_var = mb.const(val=new_bias_value, before_op=conv_op)
conv_kargs = {"bias": new_bias_var, "name": out_name, "before_op": conv_op}
for k, v in conv_op.inputs.items():
if k == "bias":
continue
conv_kargs[k] = v
if is_conv_op:
x = mb.conv(**conv_kargs)
else:
x = mb.conv_transpose(**conv_kargs)
if add_op.enclosing_block.try_replace_uses_of_var_after_op(
anchor_op=add_op,
old_var=add_op.outputs[0],
new_var=x,
):
add_op.enclosing_block.remove_ops([conv_op, add_op])
return True
return False
@block_context_manager
def _fuse_conv_bias_block(self, block):
fusion_status = False
for op in list(block.operations):
for b in op.blocks:
block_changed = True
while block_changed:
block_changed = self._fuse_conv_bias_block(b)
if len(op.blocks) > 0:
# This op can't be conv or conv_transpose
continue
# pattern 1 : conv + add/sub
add_op = self._match_pattern(op)
if add_op is not None:
fusion_status = self._try_to_transform(op, add_op)
# has to break as the downstream iterator is affected.
if fusion_status:
return fusion_status
# pattern 2 : conv + transpose + add/sub
fusion_status = self._try_to_transform_transpose_pattern(op, block)
if fusion_status:
return fusion_status
return fusion_status
@register_pass(namespace="common")
class fuse_conv_scale(AbstractGraphPass):
"""
Fold ``mul``/``div`` into ``conv``/``conv_transpose`` by updating the weight/bias of the convolution layers.
The scale ``const`` can be a single number (scalar) or a vector with a broadcastable shape.
For example, if the output of the ``conv``/``deconv`` layer is ``(B, Cout, H, W)``,
``const`` of shape ``(Cout, 1, 1)`` and ``(1, Cout, 1, 1)`` are allowed.
.. code-block::
Given:
%2 = conv(%1)
...
%3 = mul(%2, constant) # where constant is the scale constant
...
Result:
%3 = conv(%1)
...
"""
def apply(self, prog):
for f in prog.functions.values():
block_changed = True
while block_changed:
block_changed = self._fuse_conv_scale_block(f)
@staticmethod
def _try_to_transform(conv_op, scale_op):
# get the scale
if scale_op.x.val is None and scale_op.y.val is None:
return False
scale_var = scale_op.x if scale_op.x.val is not None else scale_op.y
scale = scale_var.val
# for the scalar case, the scalar can be either
# 1. a python int/float
# 2. a 0d numpy array
# 3. a 1d numpy array with shape (1,)
is_scalar = True
if isinstance(scale, np.ndarray):
if scale.shape == ():
scale = scale.tolist()
elif scale.shape == (1) or scale.shape == (1,):
scale = scale[0]
else:
is_scalar = False
# get weight and bias and groups from conv layer
if conv_op.weight.val is None:
return False
conv_weight = conv_op.weight.val
conv_bias = conv_op.bias
groups = conv_op.groups.val
# get type of the conv layer
is_deconv = conv_op.op_type == "conv_transpose"
is_conv_1d = len(conv_weight.shape) == 3
# D_in denotes the spatial dimensions for conv kernel weight
# for conv_transpose, conv_weight has shape [Cin, Cout / groups, *D_in]
# for conv, conv_weight has shape [Cout, Cin / groups, *D_in]
if is_deconv:
Cout = conv_weight.shape[1] * groups
Cin = conv_weight.shape[0]
else:
Cout = conv_weight.shape[0]
Cin = conv_weight.shape[1] * groups
# for the vector scale case, check if the shape is broacastable
if not is_scalar:
if not np.product(scale.shape) == Cout:
return False
if len(scale.shape) == len(conv_weight.shape):
if not scale.shape[1] == Cout:
return False
elif len(scale.shape) == len(conv_weight.shape) - 1:
if not scale.shape[0] == Cout:
return False
else:
return False
# transform the scale to 1./scale for the real_div case
if scale_op.op_type == "real_div":
scale = 1.0 / scale
# get the type of the conv weight
conv_weight_type = conv_weight.dtype
# create bias for conv if not exist
if conv_bias is None:
conv_bias = np.zeros(Cout)
else:
conv_bias = conv_bias.val
conv_bias = conv_bias.astype(conv_weight_type)
# get the original shape of weight and bias
origin_weight_shape = conv_weight.shape
origin_bias_shape = conv_bias.shape
# update the weight/bias for conv layer
if is_scalar:
new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)
new_conv_weight = np.array(conv_weight * scale).astype(conv_weight_type)
else:
scale = np.reshape(scale, (Cout))
new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)
new_conv_weight = []
if is_deconv:
conv_weight = np.transpose(conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])
conv_weight = np.reshape(
conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:])
)
for i in range(Cout):
_conv_weight = conv_weight[i] * scale[i]
new_conv_weight.append(_conv_weight)
new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type)
if is_deconv:
new_conv_weight = np.reshape(
new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:])
)
new_conv_weight = np.transpose(
new_conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3]
)
# make sure the updated weight and bias have the same shape as the original ones
assert (
new_conv_weight.shape == origin_weight_shape
), "conv weight should have the same shape before and after the fuse_conv_scale pass."
assert (
new_conv_bias.shape == origin_bias_shape
), "conv bias should have the same shape before and after the fuse_conv_scale pass."
# create a new conv op with the new weight, bias value, copying rest of the attributes
out_name = scale_op.outputs[0].name
conv_kargs = {
"weight": new_conv_weight,
"bias": new_conv_bias,
"name": out_name,
"before_op": conv_op,
}
for k, v in conv_op.inputs.items():
if k in ["weight", "bias"]:
continue
conv_kargs[k] = v
if is_deconv:
x = mb.conv_transpose(**conv_kargs)
else:
x = mb.conv(**conv_kargs)
if scale_op.enclosing_block.try_replace_uses_of_var_after_op(
anchor_op=scale_op,
old_var=scale_op.outputs[0],
new_var=x,
):
scale_op.enclosing_block.remove_ops([conv_op, scale_op])
return True
return False
@block_context_manager
def _fuse_conv_scale_block(self, block):
def _match_pattern(op):
if op.op_type == "conv" or op.op_type == "conv_transpose":
# abort fusion if op output is also a block output
if op.outputs[0] in op.enclosing_block.outputs:
return None
# find batch_norm op
child_ops = op.outputs[0].child_ops
if len(child_ops) == 1:
scale_op_candidate = list(child_ops)[0]
if scale_op_candidate.op_type in ["mul", "real_div"]:
return scale_op_candidate
return None
fusion_occurred = False
for op in list(block.operations):
for b in op.blocks:
block_changed = True
while block_changed:
block_changed = self._fuse_conv_scale_block(b)
if len(op.blocks) > 0:
# This op can't be conv or conv_transpose
continue
scale_op = _match_pattern(op)
if scale_op is not None:
fusion_occurred = self._try_to_transform(op, scale_op)
# has to break as the downstream iterator is affected.
if fusion_occurred:
return fusion_occurred
return fusion_occurred
@register_pass(namespace="common")
class fuse_pad_conv(AbstractGraphPass):
"""
When we observe ``pad -> transpose -> conv``, we move the ``pad`` to be next to ``conv``.
This allows us to meld ``pad + conv`` if possible.
.. code-block::
Given:
%1 = pad(%0, ...)
%2 = transpose(%1, ...)
%3 = conv(%2, ...)
...
Result:
%1.a = transpose(%0, ...)
$2.a = pad(%1.a, ...)
%3 = conv(%2.a)
...
"""
def apply(self, prog):
for f in prog.functions.values():
block_changed = True
while block_changed:
block_changed = self._pad_conv_connect_block(f)
@staticmethod
def _match_pattern(op):
ret = set([])
child_ops = op.outputs[0].child_ops
for child_op in child_ops:
if child_op.op_type != "transpose":
continue
skip_ops = child_op.outputs[0].child_ops
for skip_op in skip_ops:
if "conv" not in skip_op.op_type:
continue
ret.update([child_op])
return ret if len(ret) != 0 else None
@staticmethod
def _try_to_transform(pad_op, transpose_ops, block):
def _compute_new_pad_values(transpose_op):
if pad_op.inputs["pad"].val is None:
return None
pad_amounts = np.reshape(pad_op.inputs["pad"].val, [-1, 2])
transpose_axes = transpose_op.inputs["perm"].val
rank_diff = len(transpose_axes) - pad_amounts.shape[0]
pad_amounts_new = copy.deepcopy(pad_amounts)
# append "rank_diff" rows of zeros to the top
pad_amounts_new = np.concatenate(
(np.zeros((2 * rank_diff)).reshape(-1, 2), pad_amounts_new)
)
pad_amounts_new = pad_amounts_new.astype(pad_amounts.dtype)
pad_amounts = np.concatenate((np.zeros((2 * rank_diff)).reshape(-1, 2), pad_amounts))
for i, axis in enumerate(transpose_axes):
pad_amounts_new[i][0] = pad_amounts[axis][0]
pad_amounts_new[i][1] = pad_amounts[axis][1]
# get the top "rank_diff" rows
top_rows = pad_amounts_new[:rank_diff, :]
if not np.all(top_rows == 0):
return False
# cut "rank_diff" from the top
pad_amounts_new = pad_amounts_new[rank_diff:, :]
pad_amounts_new = pad_amounts_new.flatten()
return pad_amounts_new
if pad_op.outputs[0] in pad_op.enclosing_block.outputs:
return False
if len(set(pad_op.outputs[0].child_ops)) != len(transpose_ops):
return False
for transpose_op in transpose_ops:
pad_amounts_new = _compute_new_pad_values(transpose_op)
if pad_amounts_new is None:
continue
with pad_op.enclosing_block:
new_transpose_var = mb.transpose(
x=pad_op.inputs["x"],
perm=transpose_op.inputs["perm"].val,
before_op=transpose_op,
)
new_pad_inputs = {"x": new_transpose_var, "pad": pad_amounts_new}
for k, v in pad_op.inputs.items():
if k not in new_pad_inputs:
new_pad_inputs[k] = v
new_pad_var = mb.pad(before_op=transpose_op, **new_pad_inputs)
pad_op.enclosing_block.replace_uses_of_var_after_op(
anchor_op=transpose_op, old_var=transpose_op.outputs[0], new_var=new_pad_var
)
pad_op.enclosing_block.remove_ops(list(transpose_ops) + [pad_op])
return True
@block_context_manager
def _pad_conv_connect_block(self, block):
fusion_status = False
for op in list(block.operations):
for b in op.blocks:
block_changed = True
while block_changed:
block_changed = self._pad_conv_connect_block(b)
if op.op_type != "pad":
continue
transpose_ops = self._match_pattern(op)
if transpose_ops is not None:
fusion_status = self._try_to_transform(op, transpose_ops, block)
# has to break as the downstream iterator is affected.
if fusion_status:
return fusion_status
return fusion_status
|
871c35613a93a9e32a94a2397ecba40e3ce23d6b
|
ac2f43c8e0d9649a7f063c59b3dffdfed9fd7ed7
|
/common/recipes-rest/rest-api/files/redfish_session_service.py
|
0ee827e24250582d2ca7ef19d23f7176f30df514
|
[] |
no_license
|
facebook/openbmc
|
bef10604ced226288600f55248b7f1be9945aea4
|
32777c66a8410d767eae15baabf71c61a0bef13c
|
refs/heads/helium
| 2023-08-17T03:13:54.729494
| 2023-08-16T23:24:18
| 2023-08-16T23:24:18
| 31,917,712
| 684
| 331
| null | 2023-07-25T21:19:08
| 2015-03-09T19:18:35
|
C
|
UTF-8
|
Python
| false
| false
| 1,208
|
py
|
redfish_session_service.py
|
from aiohttp import web
from common_utils import dumps_bytestr
from redfish_base import validate_keys
async def get_session_service(request: web.Request) -> web.Response:
body = {
"@odata.type": "#SessionService.v1_1_8.SessionService",
"Id": "SessionService",
"Name": "Session Service",
"Description": "Session Service",
"Status": {"State": "Enabled", "Health": "OK"},
"ServiceEnabled": True,
"SessionTimeout": 30,
"Sessions": {
"@odata.id": "/redfish/v1/SessionService/Sessions",
},
"@odata.id": "/redfish/v1/SessionService",
}
await validate_keys(body)
return web.json_response(body, dumps=dumps_bytestr)
async def get_session(request: web.Request) -> web.Response:
headers = {"Link": "</redfish/v1/schemas/SessionCollection.json>; rel=describedby"}
body = {
"@odata.type": "#SessionCollection.SessionCollection",
"Name": "Session Collection",
"Members@odata.count": 0,
"Members": [],
"@odata.id": "/redfish/v1/SessionService/Sessions",
}
await validate_keys(body)
return web.json_response(body, headers=headers, dumps=dumps_bytestr)
|
dce87e925a73f960b5e3433f93fd1aab5b510e62
|
7b8f1903ca25b20f2d62c8a49222e9ae9c3cb35b
|
/RST/ODSAextensions/odsa/odsatoctree/__init__.py
|
79303de421309ee24ae6991cee09a6fb31f6d83e
|
[
"MIT"
] |
permissive
|
OpenDSA/OpenDSA
|
21c1bc9b170e2143dec8c7c209783d2852be73e0
|
13c4d2c614c495b2f9c000f3e6734831698d3c8a
|
refs/heads/master
| 2023-09-06T09:58:51.148497
| 2023-09-05T02:16:37
| 2023-09-05T02:16:37
| 3,028,364
| 271
| 126
|
NOASSERTION
| 2023-08-27T20:45:33
| 2011-12-21T17:20:24
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 28
|
py
|
__init__.py
|
from .odsatoctree import *
|
9b6990e999ba66ab49b580404d6679c5cc886afc
|
682459e3cea53ef14e531597dd612d4b5733fbea
|
/examples/multiple_linear_regression.py
|
a5f759cde44df004893db168a71837acc023e76c
|
[
"BSD-3-Clause"
] |
permissive
|
pymanopt/pymanopt
|
4bcdc2983631befcf88b194449158d5163df37be
|
acb52b216538ba5ed4871f025a0e49080b4475da
|
refs/heads/master
| 2023-09-01T01:50:27.468578
| 2023-04-04T17:52:10
| 2023-04-04T17:52:10
| 45,385,612
| 647
| 155
|
BSD-3-Clause
| 2023-09-13T05:41:44
| 2015-11-02T09:45:08
|
Python
|
UTF-8
|
Python
| false
| false
| 3,011
|
py
|
multiple_linear_regression.py
|
import autograd.numpy as np
import jax.numpy as jnp
import tensorflow as tf
import torch
import pymanopt
from examples._tools import ExampleRunner
from pymanopt.manifolds import Euclidean
from pymanopt.optimizers import TrustRegions
SUPPORTED_BACKENDS = ("autograd", "jax", "numpy", "pytorch", "tensorflow")
def create_cost_and_derivates(manifold, samples, targets, backend):
euclidean_gradient = euclidean_hessian = None
if backend == "autograd":
@pymanopt.function.autograd(manifold)
def cost(weights):
return np.linalg.norm(targets - samples @ weights) ** 2
elif backend == "jax":
@pymanopt.function.jax(manifold)
def cost(weights):
return jnp.linalg.norm(targets - samples @ weights) ** 2
elif backend == "numpy":
@pymanopt.function.numpy(manifold)
def cost(weights):
return np.linalg.norm(targets - samples @ weights) ** 2
@pymanopt.function.numpy(manifold)
def euclidean_gradient(weights):
return -2 * samples.T @ (targets - samples @ weights)
@pymanopt.function.numpy(manifold)
def euclidean_hessian(weights, vector):
return 2 * samples.T @ samples @ vector
elif backend == "pytorch":
samples_ = torch.from_numpy(samples)
targets_ = torch.from_numpy(targets)
@pymanopt.function.pytorch(manifold)
def cost(weights):
return torch.norm(targets_ - samples_ @ weights) ** 2
elif backend == "tensorflow":
@pymanopt.function.tensorflow(manifold)
def cost(weights):
return (
tf.norm(targets - tf.tensordot(samples, weights, axes=1)) ** 2
)
else:
raise ValueError(f"Unsupported backend '{backend}'")
return cost, euclidean_gradient, euclidean_hessian
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
num_samples, num_weights = 200, 3
optimizer = TrustRegions(verbosity=0)
manifold = Euclidean(3)
for k in range(5):
samples = np.random.normal(size=(num_samples, num_weights))
targets = np.random.normal(size=num_samples)
(
cost,
euclidean_gradient,
euclidean_hessian,
) = create_cost_and_derivates(manifold, samples, targets, backend)
problem = pymanopt.Problem(
manifold,
cost,
euclidean_gradient=euclidean_gradient,
euclidean_hessian=euclidean_hessian,
)
estimated_weights = optimizer.run(problem).point
if not quiet:
print(f"Run {k + 1}")
print(
"Weights found by pymanopt (top) / "
"closed form solution (bottom)"
)
print(estimated_weights)
print(np.linalg.pinv(samples) @ targets)
print("")
if __name__ == "__main__":
runner = ExampleRunner(
run, "Multiple linear regression", SUPPORTED_BACKENDS
)
runner.run()
|
e8fd5ac85389eb2c08fec389de03abc205a59661
|
438f82adbaa27bcb97cce0171f377ddc92586f48
|
/pulumi/hcl2_type_reflection/hcl2_type_reflection/hcl2_type_reflection.py
|
49a3ebce9f812c05ee76d4e16098825d5db4ad13
|
[
"Apache-2.0"
] |
permissive
|
grapl-security/grapl
|
5f93599969ec604df25712c1d16648d16de67072
|
b2c7ef263fb8134add2febb770da164ea7b4936f
|
refs/heads/main
| 2023-08-12T11:38:11.167343
| 2022-12-26T15:28:55
| 2022-12-26T15:28:55
| 151,994,099
| 386
| 60
|
Apache-2.0
| 2022-12-10T05:56:55
| 2018-10-07T23:28:27
|
Rust
|
UTF-8
|
Python
| false
| false
| 2,305
|
py
|
hcl2_type_reflection.py
|
from typing import Any
from lark import Lark, Transformer
class HCL2TypeTransformer(Transformer):
def hcl2_type(self, item: list) -> Any:
return item[0]
def value(self, item: list) -> Any:
return item[0]
def map(self, item: list) -> dict[str, Any]:
return {"string": item[0]}
def pair(self, key_value: tuple) -> tuple[str, Any]:
k, v = key_value
return k[1:-1], v
list = list
object = dict
string_type = lambda self, _: "string"
number_type = lambda self, _: "number"
bool_type = lambda self, _: "bool"
class HCL2TypeParser:
def __init__(self) -> None:
self.parser = Lark(
r"""
hcl2_type: "${" value "}"
value: list
| object
| map
| "string" -> string_type
| "number" -> number_type
| "bool" -> bool_type
list : "[" [value ("," value)*] "]"
map : "map(" value ")"
object : "object({" [pair ("," pair)*] "})"
pair : STRING_LITERAL ":" "'" hcl2_type "'"
STRING_LITERAL : "'" _STRING_ESC_INNER "'"
%import common._STRING_ESC_INNER
%import common.WS
%ignore WS
""",
start="hcl2_type",
# Start speedup optimizations
parser="lalr",
# Disabling propagate_positions and placeholders slightly improves speed
propagate_positions=False,
maybe_placeholders=False,
# Using an internal transformer is faster and more memory efficient
transformer=HCL2TypeTransformer(),
)
def mock_hcl2_type(hcl2_type: Any) -> Any:
mock_string = "MOCK_STRING"
mock_bool = True
mock_number = 1
if isinstance(hcl2_type, str):
if hcl2_type == "string":
return mock_string
elif hcl2_type == "bool":
return mock_bool
elif hcl2_type == "number":
return mock_number
else:
return hcl2_type
if isinstance(hcl2_type, dict):
mocked_dict = {
mock_hcl2_type(k): mock_hcl2_type(v) for (k, v) in hcl2_type.items()
}
return mocked_dict
return hcl2_type
|
fa23891fe01f2a82da3da272c9c150ff0365cc9d
|
3405736c71d6224374437dba141815d03e95b89f
|
/test/sanity/user-agent/echo-user-agent.py
|
95326a24830cb2ebe466f632260be9c55d56e56d
|
[
"MIT"
] |
permissive
|
nwjs/nw.js
|
dd1338335ec7306b94203f46629fa83081e1c364
|
c2184a6edae6d6cca99d199ad5fedd37e4af3fad
|
refs/heads/nw78
| 2023-08-31T13:00:22.395612
| 2023-07-30T22:47:46
| 2023-07-30T22:47:46
| 3,100,121
| 28,744
| 3,512
|
MIT
| 2023-08-17T22:23:55
| 2012-01-04T06:21:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 599
|
py
|
echo-user-agent.py
|
#!/usr/bin/env python
from http.server import HTTPServer, BaseHTTPRequestHandler
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
request_path = self.path
print(self.headers['User-Agent'])
self.send_response(200)
self.send_header("Set-Cookie", "foo=bar")
def log_request(self, code):
pass
def main():
port = 3456
print(('Listening on localhost:%s' % port))
server = HTTPServer(('', port), RequestHandler)
server.serve_forever()
if __name__ == "__main__":
main()
|
f774119e676a42a0c2bd067551e85c04f24faf2d
|
66a9c25cf0c53e2c3029b423018b856103d709d4
|
/tests/test_stanza_error.py
|
d95a33ce2fc4c0b9f7c5e7a083240b9cd7113767
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
fritzy/SleekXMPP
|
1b02d3e2b22efeb6bf3f8f487e6c0343b9b85baf
|
cc1d470397de768ffcc41d2ed5ac3118d19f09f5
|
refs/heads/develop
| 2020-05-22T04:14:58.568822
| 2020-02-18T22:54:57
| 2020-02-18T22:54:57
| 463,405
| 658
| 254
|
NOASSERTION
| 2023-06-27T20:05:54
| 2010-01-08T05:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,520
|
py
|
test_stanza_error.py
|
import unittest
from sleekxmpp.test import SleekTest
class TestErrorStanzas(SleekTest):
def setUp(self):
# Ensure that the XEP-0086 plugin has been loaded.
self.stream_start()
self.stream_close()
def testSetup(self):
"""Test setting initial values in error stanza."""
msg = self.Message()
msg.enable('error')
self.check(msg, """
<message type="error">
<error type="cancel" code="501">
<feature-not-implemented xmlns="urn:ietf:params:xml:ns:xmpp-stanzas" />
</error>
</message>
""")
def testCondition(self):
"""Test modifying the error condition."""
msg = self.Message()
msg['error']['condition'] = 'item-not-found'
self.check(msg, """
<message type="error">
<error type="cancel" code="404">
<item-not-found xmlns="urn:ietf:params:xml:ns:xmpp-stanzas" />
</error>
</message>
""")
self.failUnless(msg['error']['condition'] == 'item-not-found', "Error condition doesn't match.")
msg['error']['condition'] = 'resource-constraint'
self.check(msg, """
<message type="error">
<error type="wait" code="500">
<resource-constraint xmlns="urn:ietf:params:xml:ns:xmpp-stanzas" />
</error>
</message>
""")
def testDelCondition(self):
"""Test that deleting error conditions doesn't remove extra elements."""
msg = self.Message()
msg['error']['text'] = 'Error!'
msg['error']['condition'] = 'internal-server-error'
del msg['error']['condition']
self.check(msg, """
<message type="error">
<error type="wait" code="500">
<text xmlns="urn:ietf:params:xml:ns:xmpp-stanzas">Error!</text>
</error>
</message>
""", use_values=False)
def testDelText(self):
"""Test deleting the text of an error."""
msg = self.Message()
msg['error']['test'] = 'Error!'
msg['error']['condition'] = 'internal-server-error'
del msg['error']['text']
self.check(msg, """
<message type="error">
<error type="wait" code="500">
<internal-server-error xmlns="urn:ietf:params:xml:ns:xmpp-stanzas" />
</error>
</message>
""")
suite = unittest.TestLoader().loadTestsFromTestCase(TestErrorStanzas)
|
1039d7976791fe7cba700507abb37e2b7a3555c5
|
fa3f6d4e9169fb95f828013d179d03accdff381b
|
/grr/server/grr_response_server/databases/mem_flows.py
|
ef8051deb8a871330ab87d68084f516ad187f6c0
|
[
"Apache-2.0"
] |
permissive
|
google/grr
|
c51a2bd251ed2f7adae538541990a2cc01fdcc8c
|
44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6
|
refs/heads/master
| 2023-09-05T20:02:36.823914
| 2023-07-26T09:34:09
| 2023-07-26T09:34:09
| 14,909,673
| 4,683
| 927
|
Apache-2.0
| 2023-07-26T09:34:10
| 2013-12-04T00:17:53
|
Python
|
UTF-8
|
Python
| false
| false
| 36,743
|
py
|
mem_flows.py
|
#!/usr/bin/env python
"""The in memory database methods for flow handling."""
import collections
import logging
import sys
import threading
import time
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Sequence
from typing import Text
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_server.databases import db
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import hunt_objects as rdf_hunt_objects
from grr_response_server.rdfvalues import objects as rdf_objects
class Error(Exception):
"""Base class for exceptions triggered in this package."""
class TimeOutWhileWaitingForFlowsToBeProcessedError(Error):
"""Raised by WaitUntilNoFlowsToProcess when waiting longer than time limit."""
class InMemoryDBFlowMixin(object):
"""InMemoryDB mixin for flow handling."""
@utils.Synchronized
def WriteMessageHandlerRequests(self, requests):
"""Writes a list of message handler requests to the database."""
now = rdfvalue.RDFDatetime.Now()
for r in requests:
flow_dict = self.message_handler_requests.setdefault(r.handler_name, {})
cloned_request = r.Copy()
cloned_request.timestamp = now
flow_dict[cloned_request.request_id] = cloned_request
@utils.Synchronized
def ReadMessageHandlerRequests(self):
"""Reads all message handler requests from the database."""
res = []
leases = self.message_handler_leases
for requests in self.message_handler_requests.values():
for r in requests.values():
res.append(r.Copy())
existing_lease = leases.get(r.handler_name, {}).get(r.request_id, None)
res[-1].leased_until = existing_lease
return sorted(res, key=lambda r: r.timestamp, reverse=True)
@utils.Synchronized
def DeleteMessageHandlerRequests(self, requests):
"""Deletes a list of message handler requests from the database."""
for r in requests:
flow_dict = self.message_handler_requests.get(r.handler_name, {})
if r.request_id in flow_dict:
del flow_dict[r.request_id]
flow_dict = self.message_handler_leases.get(r.handler_name, {})
if r.request_id in flow_dict:
del flow_dict[r.request_id]
def RegisterMessageHandler(self, handler, lease_time, limit=1000):
"""Leases a number of message handler requests up to the indicated limit."""
self.UnregisterMessageHandler()
self.handler_stop = False
self.handler_thread = threading.Thread(
name="message_handler",
target=self._MessageHandlerLoop,
args=(handler, lease_time, limit))
self.handler_thread.daemon = True
self.handler_thread.start()
def UnregisterMessageHandler(self, timeout=None):
"""Unregisters any registered message handler."""
if self.handler_thread:
self.handler_stop = True
self.handler_thread.join(timeout)
if self.handler_thread.is_alive():
raise RuntimeError("Message handler thread did not join in time.")
self.handler_thread = None
def _MessageHandlerLoop(self, handler, lease_time, limit):
while not self.handler_stop:
try:
msgs = self._LeaseMessageHandlerRequests(lease_time, limit)
if msgs:
handler(msgs)
else:
time.sleep(0.2)
except Exception as e: # pylint: disable=broad-except
logging.exception("_LeaseMessageHandlerRequests raised %s.", e)
@utils.Synchronized
def _LeaseMessageHandlerRequests(self, lease_time, limit):
"""Read and lease some outstanding message handler requests."""
leased_requests = []
now = rdfvalue.RDFDatetime.Now()
zero = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0)
expiration_time = now + lease_time
leases = self.message_handler_leases
for requests in self.message_handler_requests.values():
for r in requests.values():
existing_lease = leases.get(r.handler_name, {}).get(r.request_id, zero)
if existing_lease < now:
leases.setdefault(r.handler_name, {})[r.request_id] = expiration_time
r.leased_until = expiration_time
r.leased_by = utils.ProcessIdString()
leased_requests.append(r)
if len(leased_requests) >= limit:
break
return leased_requests
@utils.Synchronized
def ReadAllClientActionRequests(self, client_id):
"""Reads all client action requests available for a given client_id."""
res = []
for key, orig_request in self.client_action_requests.items():
request_client_id, _, _ = key
if request_client_id != client_id:
continue
request = orig_request.Copy()
current_lease = self.client_action_request_leases.get(key)
request.ttl = db.Database.CLIENT_MESSAGES_TTL
if current_lease is not None:
request.leased_until, request.leased_by, leased_count = current_lease
request.ttl -= leased_count
else:
request.leased_until = None
request.leased_by = None
res.append(request)
return res
def _DeleteClientActionRequest(self, client_id, flow_id, request_id):
key = (client_id, flow_id, request_id)
self.client_action_requests.pop(key, None)
self.client_action_request_leases.pop(key, None)
@utils.Synchronized
def DeleteClientActionRequests(self, requests):
"""Deletes a list of client action requests from the db."""
to_delete = []
for r in requests:
to_delete.append((r.client_id, r.flow_id, r.request_id))
if len(set(to_delete)) != len(to_delete):
raise ValueError(
"Received multiple copies of the same action request to delete.")
for client_id, flow_id, request_id in to_delete:
self._DeleteClientActionRequest(client_id, flow_id, request_id)
@utils.Synchronized
def LeaseClientActionRequests(self,
client_id,
lease_time=None,
limit=sys.maxsize):
"""Leases available client action requests for a client."""
leased_requests = []
now = rdfvalue.RDFDatetime.Now()
expiration_time = now + lease_time
process_id_str = utils.ProcessIdString()
leases = self.client_action_request_leases
# Can't use an iterator here since the dict might change when requests get
# deleted.
for key, request in sorted(self.client_action_requests.items()):
if key[0] != client_id:
continue
existing_lease = leases.get(key)
if not existing_lease or existing_lease[0] < now:
if existing_lease:
lease_count = existing_lease[-1] + 1
if lease_count > db.Database.CLIENT_MESSAGES_TTL:
self._DeleteClientActionRequest(*key)
continue
else:
lease_count = 1
leases[key] = (expiration_time, process_id_str, lease_count)
request.leased_until = expiration_time
request.leased_by = process_id_str
request.ttl = db.Database.CLIENT_MESSAGES_TTL - lease_count
leased_requests.append(request)
if len(leased_requests) >= limit:
break
return leased_requests
@utils.Synchronized
def WriteClientActionRequests(self, requests):
"""Writes messages that should go to the client to the db."""
for r in requests:
req_dict = self.flow_requests.get((r.client_id, r.flow_id), {})
if r.request_id not in req_dict:
request_keys = [(r.client_id, r.flow_id, r.request_id) for r in requests
]
raise db.AtLeastOneUnknownRequestError(request_keys)
for r in requests:
request_key = (r.client_id, r.flow_id, r.request_id)
self.client_action_requests[request_key] = r
@utils.Synchronized
def WriteFlowObject(self, flow_obj, allow_update=True):
"""Writes a flow object to the database."""
if flow_obj.client_id not in self.metadatas:
raise db.UnknownClientError(flow_obj.client_id)
key = (flow_obj.client_id, flow_obj.flow_id)
if not allow_update and key in self.flows:
raise db.FlowExistsError(flow_obj.client_id, flow_obj.flow_id)
now = rdfvalue.RDFDatetime.Now()
clone = flow_obj.Copy()
clone.last_update_time = now
clone.create_time = now
self.flows[key] = clone
@utils.Synchronized
def ReadFlowObject(self, client_id, flow_id):
"""Reads a flow object from the database."""
try:
return self.flows[(client_id, flow_id)].Copy()
except KeyError:
raise db.UnknownFlowError(client_id, flow_id)
@utils.Synchronized
def ReadAllFlowObjects(
self,
client_id: Optional[Text] = None,
parent_flow_id: Optional[str] = None,
min_create_time: Optional[rdfvalue.RDFDatetime] = None,
max_create_time: Optional[rdfvalue.RDFDatetime] = None,
include_child_flows: bool = True,
not_created_by: Optional[Iterable[str]] = None,
) -> List[rdf_flow_objects.Flow]:
"""Returns all flow objects."""
res = []
for flow in self.flows.values():
if ((client_id is None or flow.client_id == client_id) and
(parent_flow_id is None or flow.parent_flow_id == parent_flow_id) and
(min_create_time is None or flow.create_time >= min_create_time) and
(max_create_time is None or flow.create_time <= max_create_time) and
(include_child_flows or not flow.parent_flow_id) and
(not_created_by is None or flow.creator not in not_created_by)):
res.append(flow.Copy())
return res
@utils.Synchronized
def LeaseFlowForProcessing(self, client_id, flow_id, processing_time):
"""Marks a flow as being processed on this worker and returns it."""
rdf_flow = self.ReadFlowObject(client_id, flow_id)
if rdf_flow.parent_hunt_id:
rdf_hunt = self.ReadHuntObject(rdf_flow.parent_hunt_id)
if not rdf_hunt_objects.IsHuntSuitableForFlowProcessing(
rdf_hunt.hunt_state):
raise db.ParentHuntIsNotRunningError(client_id, flow_id,
rdf_hunt.hunt_id,
rdf_hunt.hunt_state)
now = rdfvalue.RDFDatetime.Now()
if rdf_flow.processing_on and rdf_flow.processing_deadline > now:
raise ValueError("Flow %s on client %s is already being processed." %
(flow_id, client_id))
processing_deadline = now + processing_time
process_id_string = utils.ProcessIdString()
self.UpdateFlow(
client_id,
flow_id,
processing_on=process_id_string,
processing_since=now,
processing_deadline=processing_deadline)
rdf_flow.processing_on = process_id_string
rdf_flow.processing_since = now
rdf_flow.processing_deadline = processing_deadline
return rdf_flow
@utils.Synchronized
def UpdateFlow(self,
client_id,
flow_id,
flow_obj=db.Database.unchanged,
flow_state=db.Database.unchanged,
client_crash_info=db.Database.unchanged,
processing_on=db.Database.unchanged,
processing_since=db.Database.unchanged,
processing_deadline=db.Database.unchanged):
"""Updates flow objects in the database."""
try:
flow = self.flows[(client_id, flow_id)]
except KeyError:
raise db.UnknownFlowError(client_id, flow_id)
if flow_obj != db.Database.unchanged:
new_flow = flow_obj.Copy()
# Some fields cannot be updated.
new_flow.client_id = flow.client_id
new_flow.flow_id = flow.flow_id
new_flow.long_flow_id = flow.long_flow_id
new_flow.parent_flow_id = flow.parent_flow_id
new_flow.parent_hunt_id = flow.parent_hunt_id
new_flow.flow_class_name = flow.flow_class_name
new_flow.creator = flow.creator
self.flows[(client_id, flow_id)] = new_flow
flow = new_flow
if flow_state != db.Database.unchanged:
flow.flow_state = flow_state
if client_crash_info != db.Database.unchanged:
flow.client_crash_info = client_crash_info
if processing_on != db.Database.unchanged:
flow.processing_on = processing_on
if processing_since != db.Database.unchanged:
flow.processing_since = processing_since
if processing_deadline != db.Database.unchanged:
flow.processing_deadline = processing_deadline
flow.last_update_time = rdfvalue.RDFDatetime.Now()
@utils.Synchronized
def WriteFlowRequests(self, requests):
"""Writes a list of flow requests to the database."""
flow_processing_requests = []
for request in requests:
if (request.client_id, request.flow_id) not in self.flows:
raise db.AtLeastOneUnknownFlowError([(request.client_id,
request.flow_id)])
for request in requests:
key = (request.client_id, request.flow_id)
request_dict = self.flow_requests.setdefault(key, {})
request_dict[request.request_id] = request.Copy()
request_dict[request.request_id].timestamp = rdfvalue.RDFDatetime.Now()
if request.needs_processing:
flow = self.flows[(request.client_id, request.flow_id)]
if (
flow.next_request_to_process == request.request_id
or request.start_time is not None
):
flow_processing_requests.append(
rdf_flows.FlowProcessingRequest(
client_id=request.client_id,
flow_id=request.flow_id,
delivery_time=request.start_time))
if flow_processing_requests:
self.WriteFlowProcessingRequests(flow_processing_requests)
@utils.Synchronized
def UpdateIncrementalFlowRequests(
self, client_id: str, flow_id: str,
next_response_id_updates: Dict[int, int]) -> None:
"""Updates incremental flow requests."""
if (client_id, flow_id) not in self.flows:
raise db.UnknownFlowError(client_id, flow_id)
request_dict = self.flow_requests[(client_id, flow_id)]
for request_id, next_response_id in next_response_id_updates.items():
request_dict[request_id].next_response_id = next_response_id
request_dict[request_id].timestamp = rdfvalue.RDFDatetime.Now()
@utils.Synchronized
def DeleteFlowRequests(self, requests):
"""Deletes a list of flow requests from the database."""
for request in requests:
if (request.client_id, request.flow_id) not in self.flows:
raise db.UnknownFlowError(request.client_id, request.flow_id)
for request in requests:
key = (request.client_id, request.flow_id)
request_dict = self.flow_requests.get(key, {})
try:
del request_dict[request.request_id]
except KeyError:
raise db.UnknownFlowRequestError(request.client_id, request.flow_id,
request.request_id)
response_dict = self.flow_responses.get(key, {})
try:
del response_dict[request.request_id]
except KeyError:
pass
@utils.Synchronized
def WriteFlowResponses(self, responses):
"""Writes FlowMessages and updates corresponding requests."""
status_available = {}
requests_updated = set()
task_ids_by_request = {}
for response in responses:
flow_key = (response.client_id, response.flow_id)
if flow_key not in self.flows:
logging.error("Received response for unknown flow %s, %s.",
response.client_id, response.flow_id)
continue
request_dict = self.flow_requests.get(flow_key, {})
if response.request_id not in request_dict:
logging.error("Received response for unknown request %s, %s, %d.",
response.client_id, response.flow_id, response.request_id)
continue
response_dict = self.flow_responses.setdefault(flow_key, {})
clone = response.Copy()
clone.timestamp = rdfvalue.RDFDatetime.Now()
response_dict.setdefault(response.request_id,
{})[response.response_id] = clone
if isinstance(response, rdf_flow_objects.FlowStatus):
status_available[(response.client_id, response.flow_id,
response.request_id, response.response_id)] = response
request_key = (response.client_id, response.flow_id, response.request_id)
requests_updated.add(request_key)
try:
task_ids_by_request[request_key] = response.task_id
except AttributeError:
pass
# Every time we get a status we store how many responses are expected.
for status in status_available.values():
request_dict = self.flow_requests[(status.client_id, status.flow_id)]
request = request_dict[status.request_id]
request.nr_responses_expected = status.response_id
# And we check for all updated requests if we need to process them.
needs_processing = []
for client_id, flow_id, request_id in requests_updated:
flow_key = (client_id, flow_id)
flow = self.flows[flow_key]
request_dict = self.flow_requests[flow_key]
request = request_dict[request_id]
added_for_processing = False
if request.nr_responses_expected and not request.needs_processing:
response_dict = self.flow_responses.setdefault(flow_key, {})
responses = response_dict.get(request_id, {})
if len(responses) == request.nr_responses_expected:
request.needs_processing = True
self._DeleteClientActionRequest(client_id, flow_id, request_id)
if flow.next_request_to_process == request_id:
added_for_processing = True
needs_processing.append(
rdf_flows.FlowProcessingRequest(
client_id=client_id, flow_id=flow_id))
if (request.callback_state and
flow.next_request_to_process == request_id and
not added_for_processing):
needs_processing.append(
rdf_flows.FlowProcessingRequest(
client_id=client_id, flow_id=flow_id))
if needs_processing:
self.WriteFlowProcessingRequests(needs_processing)
return needs_processing
@utils.Synchronized
def ReadAllFlowRequestsAndResponses(self, client_id, flow_id):
"""Reads all requests and responses for a given flow from the database."""
flow_key = (client_id, flow_id)
try:
self.flows[flow_key]
except KeyError:
return []
request_dict = self.flow_requests.get(flow_key, {})
response_dict = self.flow_responses.get(flow_key, {})
res = []
for request_id in sorted(request_dict):
res.append((request_dict[request_id], response_dict.get(request_id, {})))
return res
@utils.Synchronized
def DeleteAllFlowRequestsAndResponses(self, client_id, flow_id):
"""Deletes all requests and responses for a given flow from the database."""
flow_key = (client_id, flow_id)
try:
self.flows[flow_key]
except KeyError:
raise db.UnknownFlowError(client_id, flow_id)
try:
del self.flow_requests[flow_key]
except KeyError:
pass
try:
del self.flow_responses[flow_key]
except KeyError:
pass
@utils.Synchronized
def ReadFlowRequestsReadyForProcessing(self,
client_id,
flow_id,
next_needed_request=None):
"""Reads all requests for a flow that can be processed by the worker."""
request_dict = self.flow_requests.get((client_id, flow_id), {})
response_dict = self.flow_responses.get((client_id, flow_id), {})
# Do a pass for completed requests.
res = {}
for request_id in sorted(request_dict):
# Ignore outdated requests.
if request_id < next_needed_request:
continue
# The request we are currently looking for is not in yet, we are done.
if request_id != next_needed_request:
break
request = request_dict[request_id]
if not request.needs_processing:
break
responses = sorted(
response_dict.get(request_id, {}).values(),
key=lambda response: response.response_id)
# Serialize/deserialize responses to better simulate the
# real DB behavior (where serialization/deserialization is almost
# guaranteed to be done).
# TODO(user): change mem-db implementation to do
# serialization/deserialization everywhere in a generic way.
responses = [
r.__class__.FromSerializedBytes(r.SerializeToBytes())
for r in responses
]
res[request_id] = (request, responses)
next_needed_request += 1
# Do a pass for incremental requests.
for request_id in request_dict:
# Ignore outdated and processed requests.
if request_id < next_needed_request:
continue
request = request_dict[request_id]
if not request.callback_state:
continue
responses = response_dict.get(request_id, {}).values()
responses = [
r for r in responses if r.response_id >= request.next_response_id
]
responses = sorted(responses, key=lambda response: response.response_id)
# Serialize/deserialize responses to better simulate the
# real DB behavior (where serialization/deserialization is almost
# guaranteed to be done).
# TODO(user): change mem-db implementation to do
# serialization/deserialization everywhere in a generic way.
responses = [
r.__class__.FromSerializedBytes(r.SerializeToBytes())
for r in responses
]
res[request_id] = (request, responses)
return res
@utils.Synchronized
def ReleaseProcessedFlow(self, flow_obj):
"""Releases a flow that the worker was processing to the database."""
key = (flow_obj.client_id, flow_obj.flow_id)
next_id_to_process = flow_obj.next_request_to_process
request_dict = self.flow_requests.get(key, {})
if (
next_id_to_process in request_dict
and request_dict[next_id_to_process].needs_processing
):
start_time = request_dict[next_id_to_process].start_time
if start_time is None or start_time < rdfvalue.RDFDatetime.Now():
return False
self.UpdateFlow(
flow_obj.client_id,
flow_obj.flow_id,
flow_obj=flow_obj,
processing_on=None,
processing_since=None,
processing_deadline=None)
return True
def _InlineProcessingOK(self, requests):
for r in requests:
if r.delivery_time is not None:
return False
return True
@utils.Synchronized
def WriteFlowProcessingRequests(self, requests):
"""Writes a list of flow processing requests to the database."""
# If we don't have a handler thread running, we might be able to process the
# requests inline. If we are not, we start the handler thread for real and
# queue the requests normally.
if not self.flow_handler_thread and self.flow_handler_target:
if self._InlineProcessingOK(requests):
for r in requests:
self.flow_handler_target(r)
return
else:
self._RegisterFlowProcessingHandler(self.flow_handler_target)
self.flow_handler_target = None
now = rdfvalue.RDFDatetime.Now()
for r in requests:
cloned_request = r.Copy()
cloned_request.timestamp = now
key = (r.client_id, r.flow_id)
self.flow_processing_requests[key] = cloned_request
@utils.Synchronized
def ReadFlowProcessingRequests(self):
"""Reads all flow processing requests from the database."""
return list(self.flow_processing_requests.values())
@utils.Synchronized
def AckFlowProcessingRequests(self, requests):
"""Deletes a list of flow processing requests from the database."""
for r in requests:
key = (r.client_id, r.flow_id)
if key in self.flow_processing_requests:
del self.flow_processing_requests[key]
@utils.Synchronized
def DeleteAllFlowProcessingRequests(self):
self.flow_processing_requests = {}
def RegisterFlowProcessingHandler(self, handler):
"""Registers a message handler to receive flow processing messages."""
self.UnregisterFlowProcessingHandler()
# For the in memory db, we just call the handler straight away if there is
# no delay in starting times so we don't run the thread here.
self.flow_handler_target = handler
for request in self._GetFlowRequestsReadyForProcessing():
handler(request)
with self.lock:
self.flow_processing_requests.pop((request.client_id, request.flow_id),
None)
def _RegisterFlowProcessingHandler(self, handler):
"""Registers a handler to receive flow processing messages."""
self.flow_handler_stop = False
self.flow_handler_thread = threading.Thread(
name="flow_processing_handler",
target=self._HandleFlowProcessingRequestLoop,
args=(handler,))
self.flow_handler_thread.daemon = True
self.flow_handler_thread.start()
def UnregisterFlowProcessingHandler(self, timeout=None):
"""Unregisters any registered flow processing handler."""
self.flow_handler_target = None
if self.flow_handler_thread:
self.flow_handler_stop = True
self.flow_handler_thread.join(timeout)
if self.flow_handler_thread.is_alive():
raise RuntimeError("Flow processing handler did not join in time.")
self.flow_handler_thread = None
@utils.Synchronized
def _GetFlowRequestsReadyForProcessing(self):
now = rdfvalue.RDFDatetime.Now()
todo = []
for r in list(self.flow_processing_requests.values()):
if r.delivery_time is None or r.delivery_time <= now:
todo.append(r)
return todo
def WaitUntilNoFlowsToProcess(self, timeout=None):
"""Waits until flow processing thread is done processing flows.
Args:
timeout: If specified, is a max number of seconds to spend waiting.
Raises:
TimeOutWhileWaitingForFlowsToBeProcessedError: if timeout is reached.
"""
t = self.flow_handler_thread
if not t:
return
start_time = time.time()
while True:
with self.lock:
# If the thread is dead, or there are no requests
# to be processed/being processed, we stop waiting
# and return from the function.
if (not t.is_alive() or
(not self._GetFlowRequestsReadyForProcessing() and
not self.flow_handler_num_being_processed)):
return
time.sleep(0.2)
if timeout and time.time() - start_time > timeout:
raise TimeOutWhileWaitingForFlowsToBeProcessedError(
"Flow processing didn't finish in time.")
def _HandleFlowProcessingRequestLoop(self, handler):
"""Handler thread for the FlowProcessingRequest queue."""
while not self.flow_handler_stop:
with self.lock:
todo = self._GetFlowRequestsReadyForProcessing()
for request in todo:
self.flow_handler_num_being_processed += 1
del self.flow_processing_requests[(request.client_id,
request.flow_id)]
for request in todo:
handler(request)
with self.lock:
self.flow_handler_num_being_processed -= 1
time.sleep(0.2)
@utils.Synchronized
def _WriteFlowResultsOrErrors(self, container, items):
for i in items:
dest = container.setdefault((i.client_id, i.flow_id), [])
to_write = i.Copy()
to_write.timestamp = rdfvalue.RDFDatetime.Now()
dest.append(to_write)
def WriteFlowResults(self, results):
"""Writes flow results for a given flow."""
self._WriteFlowResultsOrErrors(self.flow_results, results)
@utils.Synchronized
def _ReadFlowResultsOrErrors(self,
container,
client_id,
flow_id,
offset,
count,
with_tag=None,
with_type=None,
with_substring=None):
"""Reads flow results/errors of a given flow using given query options."""
results = sorted(
[x.Copy() for x in container.get((client_id, flow_id), [])],
key=lambda r: r.timestamp)
# This is done in order to pass the tests that try to deserialize
# value of an unrecognized type.
for r in results:
cls_name = r.payload.__class__.__name__
if cls_name not in rdfvalue.RDFValue.classes:
r.payload = rdf_objects.SerializedValueOfUnrecognizedType(
type_name=cls_name, value=r.payload.SerializeToBytes())
if with_tag is not None:
results = [i for i in results if i.tag == with_tag]
if with_type is not None:
results = [
i for i in results if i.payload.__class__.__name__ == with_type
]
if with_substring is not None:
encoded_substring = with_substring.encode("utf8")
results = [
i for i in results
if encoded_substring in i.payload.SerializeToBytes()
]
return results[offset:offset + count]
def ReadFlowResults(self,
client_id,
flow_id,
offset,
count,
with_tag=None,
with_type=None,
with_substring=None):
"""Reads flow results of a given flow using given query options."""
return self._ReadFlowResultsOrErrors(
self.flow_results,
client_id,
flow_id,
offset,
count,
with_tag=with_tag,
with_type=with_type,
with_substring=with_substring)
@utils.Synchronized
def CountFlowResults(self, client_id, flow_id, with_tag=None, with_type=None):
"""Counts flow results of a given flow using given query options."""
return len(
self.ReadFlowResults(
client_id,
flow_id,
0,
sys.maxsize,
with_tag=with_tag,
with_type=with_type))
@utils.Synchronized
def CountFlowResultsByType(self, client_id, flow_id):
"""Returns counts of flow results grouped by result type."""
result = collections.Counter()
for hr in self.ReadFlowResults(client_id, flow_id, 0, sys.maxsize):
key = hr.payload.__class__.__name__
result[key] += 1
return result
def WriteFlowErrors(self, errors):
"""Writes flow errors for a given flow."""
# Errors are similar to results, as they represent a somewhat related
# concept. Error is a kind of a negative result. Given the structural
# similarity, we can share large chunks of implementation between
# errors and results DB code.
self._WriteFlowResultsOrErrors(self.flow_errors, errors)
def ReadFlowErrors(self,
client_id,
flow_id,
offset,
count,
with_tag=None,
with_type=None):
"""Reads flow errors of a given flow using given query options."""
# Errors are similar to results, as they represent a somewhat related
# concept. Error is a kind of a negative result. Given the structural
# similarity, we can share large chunks of implementation between
# errors and results DB code.
return self._ReadFlowResultsOrErrors(
self.flow_errors,
client_id,
flow_id,
offset,
count,
with_tag=with_tag,
with_type=with_type)
@utils.Synchronized
def CountFlowErrors(self, client_id, flow_id, with_tag=None, with_type=None):
"""Counts flow errors of a given flow using given query options."""
return len(
self.ReadFlowErrors(
client_id,
flow_id,
0,
sys.maxsize,
with_tag=with_tag,
with_type=with_type))
@utils.Synchronized
def CountFlowErrorsByType(self, client_id, flow_id):
"""Returns counts of flow errors grouped by error type."""
result = collections.Counter()
for hr in self.ReadFlowErrors(client_id, flow_id, 0, sys.maxsize):
key = hr.payload.__class__.__name__
result[key] += 1
return result
@utils.Synchronized
def WriteFlowLogEntry(self, entry: rdf_flow_objects.FlowLogEntry) -> None:
"""Writes a single flow log entry to the database."""
key = (entry.client_id, entry.flow_id)
if key not in self.flows:
raise db.UnknownFlowError(entry.client_id, entry.flow_id)
entry = entry.Copy()
entry.timestamp = rdfvalue.RDFDatetime.Now()
self.flow_log_entries.setdefault(key, []).append(entry)
@utils.Synchronized
def ReadFlowLogEntries(self,
client_id,
flow_id,
offset,
count,
with_substring=None):
"""Reads flow log entries of a given flow using given query options."""
entries = sorted(
self.flow_log_entries.get((client_id, flow_id), []),
key=lambda e: e.timestamp)
if with_substring is not None:
entries = [i for i in entries if with_substring in i.message]
return entries[offset:offset + count]
@utils.Synchronized
def CountFlowLogEntries(self, client_id, flow_id):
"""Returns number of flow log entries of a given flow."""
return len(self.ReadFlowLogEntries(client_id, flow_id, 0, sys.maxsize))
@utils.Synchronized
def WriteFlowOutputPluginLogEntry(
self,
entry: rdf_flow_objects.FlowOutputPluginLogEntry,
) -> None:
"""Writes a single output plugin log entry to the database."""
key = (entry.client_id, entry.flow_id)
if key not in self.flows:
raise db.UnknownFlowError(entry.client_id, entry.flow_id)
entry = entry.Copy()
entry.timestamp = rdfvalue.RDFDatetime.Now()
self.flow_output_plugin_log_entries.setdefault(key, []).append(entry)
@utils.Synchronized
def ReadFlowOutputPluginLogEntries(self,
client_id,
flow_id,
output_plugin_id,
offset,
count,
with_type=None):
"""Reads flow output plugin log entries."""
entries = sorted(
self.flow_output_plugin_log_entries.get((client_id, flow_id), []),
key=lambda e: e.timestamp)
entries = [e for e in entries if e.output_plugin_id == output_plugin_id]
if with_type is not None:
entries = [e for e in entries if e.log_entry_type == with_type]
return entries[offset:offset + count]
@utils.Synchronized
def CountFlowOutputPluginLogEntries(self,
client_id,
flow_id,
output_plugin_id,
with_type=None):
"""Returns number of flow output plugin log entries of a given flow."""
return len(
self.ReadFlowOutputPluginLogEntries(
client_id,
flow_id,
output_plugin_id,
0,
sys.maxsize,
with_type=with_type))
@utils.Synchronized
def WriteScheduledFlow(
self, scheduled_flow: rdf_flow_objects.ScheduledFlow) -> None:
"""See base class."""
if scheduled_flow.client_id not in self.metadatas:
raise db.UnknownClientError(scheduled_flow.client_id)
if scheduled_flow.creator not in self.users:
raise db.UnknownGRRUserError(scheduled_flow.creator)
full_id = (scheduled_flow.client_id, scheduled_flow.creator,
scheduled_flow.scheduled_flow_id)
self.scheduled_flows[full_id] = scheduled_flow.Copy()
@utils.Synchronized
def DeleteScheduledFlow(self, client_id: str, creator: str,
scheduled_flow_id: str) -> None:
"""See base class."""
try:
self.scheduled_flows.pop((client_id, creator, scheduled_flow_id))
except KeyError:
raise db.UnknownScheduledFlowError(
client_id=client_id,
creator=creator,
scheduled_flow_id=scheduled_flow_id)
@utils.Synchronized
def ListScheduledFlows(
self, client_id: str,
creator: str) -> Sequence[rdf_flow_objects.ScheduledFlow]:
"""See base class."""
return [
sf.Copy()
for sf in self.scheduled_flows.values()
if sf.client_id == client_id and sf.creator == creator
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.