id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
5,300 | test serialize snapshot without sequence number | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint:disable=redefined-outer-name,eval-used
import pytest
from pyiceberg.table.snapshots import Operation, Snapshot, Summary
@pytest.fixture
def snapshot() -> Snapshot:
return Snapshot(
snapshot_id=25,
parent_snapshot_id=19,
sequence_number=200,
timestamp_ms=1602638573590,
manifest_list="s3:/a/b/c.avro",
summary=Summary(Operation.APPEND),
schema_id=3,
)
@pytest.fixture
def snapshot_with_properties() -> Snapshot:
return Snapshot(
snapshot_id=25,
parent_snapshot_id=19,
sequence_number=200,
timestamp_ms=1602638573590,
manifest_list="s3:/a/b/c.avro",
summary=Summary(Operation.APPEND, foo="bar"),
schema_id=3,
)
def test_serialize_summary() -> None:
assert Summary(Operation.APPEND).model_dump_json() == """{"operation":"append"}"""
def test_serialize_summary_with_properties() -> None:
summary = Summary(Operation.APPEND, property="yes")
assert summary.model_dump_json() == """{"operation":"append","property":"yes"}"""
def test_serialize_snapshot(snapshot: Snapshot) -> None:
assert (
snapshot.model_dump_json()
== """{"snapshot-id":25,"parent-snapshot-id":19,"sequence-number":200,"timestamp-ms":1602638573590,"manifest-list":"s3:/a/b/c.avro","summary":{"operation":"append"},"schema-id":3}"""
)
def METHOD_NAME() -> None:
snapshot = Snapshot(
snapshot_id=25,
parent_snapshot_id=19,
timestamp_ms=1602638573590,
manifest_list="s3:/a/b/c.avro",
summary=Summary(Operation.APPEND),
schema_id=3,
)
actual = snapshot.model_dump_json()
expected = """{"snapshot-id":25,"parent-snapshot-id":19,"timestamp-ms":1602638573590,"manifest-list":"s3:/a/b/c.avro","summary":{"operation":"append"},"schema-id":3}"""
assert actual == expected
def test_serialize_snapshot_with_properties(snapshot_with_properties: Snapshot) -> None:
assert (
snapshot_with_properties.model_dump_json()
== """{"snapshot-id":25,"parent-snapshot-id":19,"sequence-number":200,"timestamp-ms":1602638573590,"manifest-list":"s3:/a/b/c.avro","summary":{"operation":"append","foo":"bar"},"schema-id":3}"""
)
def test_deserialize_summary() -> None:
summary = Summary.model_validate_json("""{"operation": "append"}""")
assert summary.operation == Operation.APPEND
def test_deserialize_summary_with_properties() -> None:
summary = Summary.model_validate_json("""{"operation": "append", "property": "yes"}""")
assert summary.operation == Operation.APPEND
assert summary.additional_properties == {"property": "yes"}
def test_deserialize_snapshot(snapshot: Snapshot) -> None:
payload = """{"snapshot-id": 25, "parent-snapshot-id": 19, "sequence-number": 200, "timestamp-ms": 1602638573590, "manifest-list": "s3:/a/b/c.avro", "summary": {"operation": "append"}, "schema-id": 3}"""
actual = Snapshot.model_validate_json(payload)
assert actual == snapshot
def test_deserialize_snapshot_with_properties(snapshot_with_properties: Snapshot) -> None:
payload = """{"snapshot-id":25,"parent-snapshot-id":19,"sequence-number":200,"timestamp-ms":1602638573590,"manifest-list":"s3:/a/b/c.avro","summary":{"operation":"append","foo":"bar"},"schema-id":3}"""
snapshot = Snapshot.model_validate_json(payload)
assert snapshot == snapshot_with_properties
def test_snapshot_repr(snapshot: Snapshot) -> None:
assert (
repr(snapshot)
== """Snapshot(snapshot_id=25, parent_snapshot_id=19, sequence_number=200, timestamp_ms=1602638573590, manifest_list='s3:/a/b/c.avro', summary=Summary(Operation.APPEND), schema_id=3)"""
)
assert snapshot == eval(repr(snapshot))
def test_snapshot_with_properties_repr(snapshot_with_properties: Snapshot) -> None:
assert (
repr(snapshot_with_properties)
== """Snapshot(snapshot_id=25, parent_snapshot_id=19, sequence_number=200, timestamp_ms=1602638573590, manifest_list='s3:/a/b/c.avro', summary=Summary(Operation.APPEND, **{'foo': 'bar'}), schema_id=3)"""
)
assert snapshot_with_properties == eval(repr(snapshot_with_properties)) |
5,301 | tear down | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from argparse import Namespace
from six import StringIO
from knack import CLI
from azure.cli.core._config import GLOBAL_CONFIG_DIR, ENV_VAR_PREFIX
from azure.cli.core.cloud import get_active_cloud
from azure.cli.core.profiles import get_sdk, supported_api_version, register_resource_type
from azure.cli.testsdk import api_version_constraint
from ..._validators import (get_datetime_type, ipv4_range_type, validate_encryption_source,
validate_encryption_services)
from ...profiles import CUSTOM_DATA_STORAGE, CUSTOM_MGMT_STORAGE
class MockCLI(CLI):
def __init__(self):
super(MockCLI, self).__init__(cli_name='mock_cli', config_dir=GLOBAL_CONFIG_DIR,
config_env_var_prefix=ENV_VAR_PREFIX, commands_loader_cls=MockLoader)
self.cloud = get_active_cloud(self)
class MockLoader(object):
def __init__(self, ctx):
self.ctx = ctx
register_resource_type('latest', CUSTOM_DATA_STORAGE, '2018-03-28')
def get_models(self, *attr_args, **_):
from azure.cli.core.profiles import get_sdk
return get_sdk(self.ctx, CUSTOM_DATA_STORAGE, *attr_args, mod='models')
class MockCmd(object):
def __init__(self, ctx):
self.cli_ctx = ctx
self.loader = MockLoader(self.cli_ctx)
def get_models(self, *attr_args, **kwargs):
return get_sdk(self.cli_ctx, CUSTOM_DATA_STORAGE, *attr_args, **kwargs)
class TestStorageValidators(unittest.TestCase):
def setUp(self):
self.io = StringIO()
self.cli = MockCLI()
self.loader = MockLoader(self.cli)
def METHOD_NAME(self):
self.io.close()
def test_datetime_string_type(self):
input = "2017-01-01T12:30Z"
actual = get_datetime_type(True)(input)
expected = "2017-01-01T12:30Z"
self.assertEqual(actual, expected)
input = "2017-01-01 12:30"
with self.assertRaises(ValueError):
get_datetime_type(True)(input)
def test_datetime_type(self):
import datetime
input = "2017-01-01T12:30Z"
actual = get_datetime_type(False)(input)
expected = datetime.datetime(2017, 1, 1, 12, 30, 0)
self.assertEqual(actual, expected)
input = "2017-01-01 12:30"
with self.assertRaises(ValueError):
actual = get_datetime_type(False)(input)
def test_ipv4_range_type(self):
input = "111.22.3.111"
actual = ipv4_range_type(input)
expected = input
self.assertEqual(actual, expected)
input = "111.22.3.111-222.11.44.111"
actual = ipv4_range_type(input)
expected = input
self.assertEqual(actual, expected)
input = "111.22"
with self.assertRaises(ValueError):
actual = ipv4_range_type(input)
input = "111.22.33.44-"
with self.assertRaises(ValueError):
actual = ipv4_range_type(input)
@api_version_constraint(resource_type=CUSTOM_MGMT_STORAGE, min_api='2016-12-01')
class TestEncryptionValidators(unittest.TestCase):
def setUp(self):
self.cli = MockCLI()
def test_validate_encryption_services(self):
ns = Namespace(encryption_services=['blob'], _cmd=MockCmd(self.cli))
validate_encryption_services(MockCmd(self.cli), ns)
self.assertIsNotNone(ns.encryption_services.blob)
self.assertTrue(ns.encryption_services.blob.enabled)
self.assertIsNone(ns.encryption_services.file)
ns = Namespace(encryption_services=['file'], _cmd=MockCmd(self.cli))
validate_encryption_services(MockCmd(self.cli), ns)
self.assertIsNotNone(ns.encryption_services.file)
self.assertTrue(ns.encryption_services.file.enabled)
self.assertIsNone(ns.encryption_services.blob)
ns = Namespace(encryption_services=['blob', 'file'], _cmd=MockCmd(self.cli))
validate_encryption_services(MockCmd(self.cli), ns)
self.assertIsNotNone(ns.encryption_services.blob)
self.assertTrue(ns.encryption_services.blob.enabled)
self.assertIsNotNone(ns.encryption_services.file)
self.assertTrue(ns.encryption_services.file.enabled)
def test_validate_encryption_source(self):
with self.assertRaises(ValueError):
validate_encryption_source(
Namespace(encryption_key_source='Microsoft.Keyvault', encryption_key_name=None,
encryption_key_version=None, encryption_key_vault=None, _cmd=MockCmd(self.cli)))
with self.assertRaises(ValueError):
validate_encryption_source(
Namespace(encryption_key_source='Microsoft.Storage', encryption_key_name='key_name',
encryption_key_version='key_version', encryption_key_vault='https://example.com/key_uri'))
if __name__ == '__main__':
unittest.main() |
5,302 | deserialize rfc | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import isodate
import email
import calendar
import re
import datetime
from base64 import b64encode, b64decode
import xml.etree.ElementTree as ET
try:
from datetime import timezone as _FixedOffset
except ImportError: # Python 2.7
class _FixedOffset(datetime.tzinfo): # type: ignore
"""Fixed offset in minutes east from UTC.
Copy/pasted from Python doc
:param datetime.timedelta offset: offset in timedelta format
"""
def __init__(self, offset):
self.__offset = offset
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return str(self.__offset.total_seconds()/3600)
def __repr__(self):
return "<FixedOffset {}>".format(self.tzname(None))
def dst(self, dt):
return datetime.timedelta(0)
def __getinitargs__(self):
return (self.__offset,)
class UTC(datetime.tzinfo):
"""Time Zone info for handling UTC"""
def utcoffset(self, dt):
"""UTF offset for UTC is 0."""
return datetime.timedelta(0)
def tzname(self, dt):
"""Timestamp representation."""
return "Z"
def dst(self, dt):
"""No daylight saving for UTC."""
return datetime.timedelta(hours=1)
try:
from datetime import timezone
TZ_UTC = timezone.utc # type: ignore
except ImportError:
TZ_UTC = UTC() # type: ignore
DAYS = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu",
4: "Fri", 5: "Sat", 6: "Sun"}
MONTHS = {1: "Jan", 2: "Feb", 3: "Mar", 4: "Apr", 5: "May", 6: "Jun",
7: "Jul", 8: "Aug", 9: "Sep", 10: "Oct", 11: "Nov", 12: "Dec"}
VALID_DATE = re.compile(
r'\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}'
r'\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?')
def serialize_base64(attr):
encoded = b64encode(attr).decode('ascii')
return encoded.strip('=').replace('+', '-').replace('/', '_')
def deserialize_base64(attr):
padding = '=' * (3 - (len(attr) + 3) % 4)
attr = attr + padding
encoded = attr.replace('-', '+').replace('_', '/')
return b64decode(encoded)
def deserialize_date(attr):
return isodate.parse_date(attr)
def deserialize_datetime(attr):
return isodate.parse_datetime(attr)
def serialize_rfc(attr):
try:
utc = attr.utctimetuple()
except AttributeError:
raise TypeError("RFC1123 object must be valid Datetime object.")
return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format(
DAYS[utc.tm_wday], utc.tm_mday,
MONTHS[utc.tm_mon], utc.tm_year,
utc.tm_hour, utc.tm_min, utc.tm_sec
)
def serialize_iso(attr):
if isinstance(attr, str):
attr = isodate.parse_datetime(attr)
utc = attr.utctimetuple()
if utc.tm_year > 9999 or utc.tm_year < 1:
raise OverflowError("Hit max or min date")
microseconds = str(attr.microsecond).rjust(6,'0').rstrip('0').ljust(3, '0')
if microseconds:
microseconds = '.'+microseconds
date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
utc.tm_year, utc.tm_mon, utc.tm_mday,
utc.tm_hour, utc.tm_min, utc.tm_sec)
return date + microseconds + 'Z'
def deserialize_iso(attr):
if isinstance(attr, ET.Element):
attr = attr.text
attr = attr.upper()
match = VALID_DATE.match(attr)
if not match:
raise ValueError("Invalid datetime string: " + attr)
check_decimal = attr.split('.')
if len(check_decimal) > 1:
decimal_str = ""
for digit in check_decimal[1]:
if digit.isdigit():
decimal_str += digit
else:
break
if len(decimal_str) > 6:
attr = attr.replace(decimal_str, decimal_str[0:6])
date_obj = isodate.parse_datetime(attr)
test_utc = date_obj.utctimetuple()
if test_utc.tm_year > 9999 or test_utc.tm_year < 1:
raise OverflowError("Hit max or min date")
return date_obj
def deserialize_bytearray(attr):
if isinstance(attr, ET.Element):
attr = attr.text
return bytearray(b64decode(attr))
def serialize_bytearray(attr):
return b64encode(attr).decode()
def serialize_date(attr):
if isinstance(attr, str):
attr = isodate.parse_date(attr)
t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day)
return t
def deserialize_duration(attr):
if isinstance(attr, ET.Element):
attr = attr.text
return isodate.parse_duration(attr)
def serialize_duration(attr):
if isinstance(attr, str):
attr = isodate.parse_duration(attr)
return isodate.duration_isoformat(attr)
def METHOD_NAME(attr):
if isinstance(attr, ET.Element):
attr = attr.text
parsed_date = email.utils.parsedate_tz(attr)
date_obj = datetime.datetime(
*parsed_date[:6],
tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0)/60))
)
if not date_obj.tzinfo:
date_obj = date_obj.astimezone(tz=TZ_UTC)
return date_obj
def deserialize_unix(attr):
if isinstance(attr, ET.Element):
attr = int(attr.text)
return datetime.datetime.fromtimestamp(attr, TZ_UTC)
def serialize_unix(attr):
if isinstance(attr, int):
return attr
return int(calendar.timegm(attr.utctimetuple()))
def deserialize_time(attr):
if isinstance(attr, ET.Element):
attr = attr.text
return isodate.parse_time(attr)
def serialize_time(attr):
if isinstance(attr, str):
attr = isodate.parse_time(attr)
t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second)
if attr.microsecond:
t += ".{:02}".format(attr.microsecond)
return |
5,303 | main | #!/usr/bin/python
# Wflow is Free software, see below:
#
# Copyright (c) J. Schellekens 2005-2011
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
wflow_extract -- cut out a section of working wflow model and optionally resample to a higher resolution
Usage::
-C CaseName
-N NewCaseName
-c clone map
-I skip input mapstacks if specified
-f force overwrite an existing model
The script uses the pcraster resample program to reduce the maps.
"""
from wflow.wflow_lib import *
import wflow.pcrut as pcrut
import os
import os.path
import glob
import getopt
def usage(*args):
sys.stdout = sys.stderr
for msg in args: print msg
print __doc__
sys.exit(0)
def METHOD_NAME(argv=None):
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
return
opts, args = getopt.getopt(argv, 'fhC:N:Ir:c:')
factor = 1
Verbose=1
inmaps = True
force = False
caseName = "rhineNew"
caseNameNew = "rhineNew_resampaa"
cloneMap = "clone.map"
for o, a in opts:
if o == '-C': caseName = a
if o == '-c': cloneMap = a
if o == '-N': caseNameNew = a
if o == '-r': factor = int(a)
if o == '-I': inmaps = False
if o == '-h': usage()
if o == '-f': force = True
dirs = ['/intbl/', '/inmaps/', '/staticmaps/', '/intss/', '/instate/', '/outstate/']
if os.path.isdir(caseNameNew) and not force:
print "Refusing to write into an existing directory:" + caseNameNew
exit()
if not os.path.isdir(caseNameNew):
for ddir in dirs:
os.makedirs(caseNameNew + ddir)
for inifile in glob.glob(caseName + "/*.ini"):
shutil.copy(inifile, inifile.replace(caseName,caseNameNew))
for ddir in dirs:
for mfile in glob.glob(caseName + ddir + '/*.map'):
mstr = "resample --clone " + cloneMap + ' ' + mfile + " " + mfile.replace(caseName,caseNameNew)
print mstr
os.system(mstr)
if inmaps:
for mfile in glob.glob(caseName + ddir + '/*.[0-9][0-9][0-9]'):
mstr = "resample --clone " + cloneMap + ' ' + mfile + " " + mfile.replace(caseName,caseNameNew)
if not os.path.exists(mfile.replace(caseName,caseNameNew)):
print mstr
os.system(mstr)
else:
print "skipping " + mfile.replace(caseName,caseNameNew)
for mfile in glob.glob(caseName + ddir + '*.tbl'):
shutil.copy(mfile, mfile.replace(caseName,caseNameNew))
for mfile in glob.glob(caseName + ddir + '*.col'):
shutil.copy(mfile, mfile.replace(caseName,caseNameNew))
for mfile in glob.glob(caseName + ddir + '*.tss'):
shutil.copy(mfile, mfile.replace(caseName,caseNameNew))
print "recreating static maps ..."
# Create new ldd using old river network
dem = readmap(caseNameNew + "/staticmaps/wflow_dem.map")
# orig low res river
riverburn = readmap(caseNameNew + "/staticmaps/wflow_river.map")
# save it
report(riverburn,caseNameNew + "/staticmaps/wflow_riverburnin.map")
demburn = cover(ifthen(boolean(riverburn), dem - 600) ,dem)
print "Creating ldd..."
ldd = lddcreate_save(caseNameNew + "/staticmaps/wflow_ldd.map",demburn, True, 10.0E35)
#
## Find catchment (overall)
outlet = find_outlet(ldd)
sub = subcatch(ldd,outlet)
report(sub,caseNameNew + "/staticmaps/wflow_catchment.map")
report(outlet,caseNameNew + "/staticmaps/wflow_outlet.map")
#os.system("col2map --clone " + caseNameNew + "/staticmaps/wflow_subcatch.map " + caseNameNew + "/staticmaps/gauges.col " + caseNameNew + "/staticmaps/wflow_gauges.map")
gmap = readmap(caseNameNew + "/staticmaps/wflow_gauges.map")
scatch = subcatch(ldd,gmap)
report(scatch,caseNameNew + "/staticmaps/wflow_subcatch.map")
if __name__ == "__main__":
METHOD_NAME() |
5,304 | test reset | # -*- coding: utf-8 -*-
#
# test_corr_det.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Feeds correlation detector with two hand-crafted spike trains with
known correlation. Correlation detector parameters are set in model.
Remarks:
The test does not test weighted correlations.
"""
import nest
import pytest
import numpy as np
@pytest.fixture(autouse=True)
def prepare():
nest.ResetKernel()
def test_changing_params():
new_params = {"delta_tau": 2.0, "tau_max": 20.0}
original_model_instance = nest.Create("correlation_detector")
original_model_instance.set(new_params)
nest.SetDefaults("correlation_detector", new_params)
modified_model_instance = nest.Create("correlation_detector")
original = original_model_instance.get(new_params.keys())
modified = modified_model_instance.get(new_params.keys())
assert modified == original
def test_setting_invalid_delta_tau():
nest.resolution = 0.1
with pytest.raises(Exception):
nest.SetDefaults("correlation_detector", {"delta_tau": 0.25})
def test_setting_invalid_tau_max():
nest.resolution = 0.1
with pytest.raises(Exception):
nest.SetDefaults("correlation_detector", {"delta_tau": 1.0, "tau_max": 2.5})
def test_setting_invalid_resolution():
nest.resolution = 0.1
nest.SetDefaults("correlation_detector", {"delta_tau": 0.1})
with pytest.raises(Exception):
nest.resolution = 1.0
detector = nest.Create("correlation_detector")
def test_setting_num_of_histogram_bins():
nest.resolution = 0.2
nest.SetDefaults("correlation_detector", {"delta_tau": 1.0, "tau_max": 5.0})
detector = nest.Create("correlation_detector")
nest.Simulate(1)
histogram_size = len(detector.get("histogram"))
assert histogram_size == 11
def prepare_correlation_detector(spike_times_array):
detector = nest.Create("correlation_detector")
sg1 = nest.Create("spike_generator")
sg1.set(precise_times=False, spike_times=spike_times_array[0])
nest.SetDefaults("static_synapse", {"receptor_type": 0})
nest.Connect(sg1, detector)
sg2 = nest.Create("spike_generator")
sg2.set(precise_times=False, spike_times=spike_times_array[1])
nest.SetDefaults("static_synapse", {"receptor_type": 1})
nest.Connect(sg2, detector)
all_spike_times = []
all_spike_times.extend(spike_times_array[0])
all_spike_times.extend(spike_times_array[1])
max_value = np.max(all_spike_times)
min_delay = nest.GetKernelStatus()["min_delay"]
t_sim = min_delay * 2 + max_value
nest.Simulate(t_sim)
return detector
def diff_at_center():
spikes_times = [[1.0, 2.0, 6.0], [2.0, 4.0]]
histogram = [0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0]
return (spikes_times, histogram)
def diff_at_edge():
spikes_times = [[6.0], [0.5, 5.4, 5.5, 5.6, 6.4, 6.5, 6.6, 11.5]]
histogram = [1, 0, 0, 0, 1, 3, 2, 0, 0, 0, 0]
return (spikes_times, histogram)
@pytest.mark.parametrize("spikes_times, histogram", [diff_at_center(), diff_at_edge()])
def test_histogram_correlation(spikes_times, histogram):
nest.resolution = 0.1
nest.SetDefaults("correlation_detector", {"delta_tau": 1.0, "tau_max": 5.0})
detector = prepare_correlation_detector(spikes_times)
n_events = detector.get("n_events")
spikes_times_size = list(map(lambda x: len(x), spikes_times))
assert (n_events == spikes_times_size).all()
detector_histogram = detector.get("histogram")
assert (detector_histogram == histogram).all()
def test_setting_invalid_n_events():
"""
test to ensure [1 1] not allowed for /n_events
"""
detector = nest.Create("correlation_detector")
with pytest.raises(Exception):
detector.set(n_events=[1, 1])
def METHOD_NAME():
nest.resolution = 0.1
nest.SetDefaults("correlation_detector", {"delta_tau": 1.0, "tau_max": 5.0})
spikes_times = [[1.0, 2.0, 6.0], [2.0, 4.0]]
detector = prepare_correlation_detector(spikes_times)
n_events = detector.get("n_events")
has_zero_entries = np.any(n_events == 0)
if not has_zero_entries:
detector.set(n_events=[0, 0])
assert np.all(detector.get("n_events") == 0)
assert np.all(detector.get("histogram") == 0) |
5,305 | a | # type: ignore
from __future__ import annotations
import sys
import textwrap
from typing import List
from typing_extensions import Annotated
import pytest
import strawberry
from strawberry.printer import print_schema
from strawberry.scalars import JSON
from strawberry.type import StrawberryList, StrawberryOptional
from tests.METHOD_NAME import A
def test_forward_reference():
global MyType
@strawberry.type
class Query:
me: MyType = strawberry.field(name="myself")
@strawberry.type
class MyType:
id: strawberry.ID
scalar: JSON
optional_scalar: JSON | None
expected_representation = '''
"""
The `JSON` scalar type represents JSON values as specified by [ECMA-404](http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf).
"""
scalar JSON @specifiedBy(url: "http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf")
type MyType {
id: ID!
scalar: JSON!
optionalScalar: JSON
}
type Query {
myself: MyType!
}
'''
schema = strawberry.Schema(Query)
assert print_schema(schema) == textwrap.dedent(expected_representation).strip()
del MyType
@pytest.mark.skipif(
sys.version_info < (3, 9),
reason="Python 3.8 and previous can't properly resolve this.",
)
def test_lazy_forward_reference():
@strawberry.type
class Query:
@strawberry.field
async def METHOD_NAME(self) -> A:
return A(id=strawberry.ID("1"))
expected_representation = """
type A {
id: ID!
b: B!
optionalB: B
optionalB2: B
}
type B {
id: ID!
a: A!
optionalA: A
optionalA2: A
}
type Query {
a: A!
}
"""
schema = strawberry.Schema(query=Query)
assert print_schema(schema) == textwrap.dedent(expected_representation).strip()
def test_with_resolver():
global User
@strawberry.type
class User:
name: str
def get_users() -> List[User]:
return []
@strawberry.type
class Query:
users: List[User] = strawberry.field(resolver=get_users)
definition = Query.__strawberry_definition__
assert definition.name == "Query"
[field] = definition.fields
assert field.python_name == "users"
assert isinstance(field.type, StrawberryList)
assert field.type.of_type is User
del User
def test_union_or_notation():
global User
@strawberry.type
class User:
name: str
def get_users() -> List[User] | None:
return []
@strawberry.type
class Query:
users: List[User] | None = strawberry.field(resolver=get_users)
definition = Query.__strawberry_definition__
assert definition.name == "Query"
[field] = definition.fields
assert field.python_name == "users"
assert isinstance(field.type, StrawberryOptional)
assert isinstance(field.type.of_type, StrawberryList)
assert field.type.of_type.of_type is User
del User
@pytest.mark.skipif(
sys.version_info < (3, 9),
reason="generic type alias only available on python 3.9+",
)
def test_union_or_notation_generic_type_alias():
global User
@strawberry.type
class User:
name: str
def get_users() -> list[User] | None:
return []
@strawberry.type
class Query:
users: list[User] | None = strawberry.field(resolver=get_users)
definition = Query.__strawberry_definition__
assert definition.name == "Query"
[field] = definition.fields
assert field.python_name == "users"
assert isinstance(field.type, StrawberryOptional)
assert isinstance(field.type.of_type, StrawberryList)
assert field.type.of_type.of_type is User
del User
def test_annotated():
global User
@strawberry.type
class User:
name: str
def get_users() -> List[User]:
return []
@strawberry.type
class Query:
users: Annotated[List[User], object()] = strawberry.field(resolver=get_users)
definition = Query.__strawberry_definition__
assert definition.name == "Query"
[field] = definition.fields
assert field.python_name == "users"
assert isinstance(field.type, StrawberryList)
assert field.type.of_type is User
del User
def test_annotated_or_notation():
global User
@strawberry.type
class User:
name: str
def get_users() -> List[User] | None:
return []
@strawberry.type
class Query:
users: Annotated[List[User] | None, object()] = strawberry.field(
resolver=get_users
)
definition = Query.__strawberry_definition__
assert definition.name == "Query"
[field] = definition.fields
assert field.python_name == "users"
assert isinstance(field.type, StrawberryOptional)
assert isinstance(field.type.of_type, StrawberryList)
assert field.type.of_type.of_type is User
del User
@pytest.mark.skipif(
sys.version_info < (3, 9),
reason="generic type alias only available on python 3.9+",
)
def test_annotated_or_notation_generic_type_alias():
global User
@strawberry.type
class User:
name: str
def get_users() -> list[User]:
return []
@strawberry.type
class Query:
users: Annotated[list[User] | None, object()] = strawberry.field(
resolver=get_users
)
definition = Query.__strawberry_definition__
assert definition.name == "Query"
[field] = definition.fields
assert field.python_name == "users"
assert isinstance(field.type, StrawberryOptional)
assert isinstance(field.type.of_type, StrawberryList)
assert field.type.of_type.of_type is User
del User |
5,306 | test dim from tblock size | # -*- coding: utf-8 -*-
#
# Copyright (c) 2023, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Unit tests for utilities."""
import unittest
from cclib.parser import utils
import numpy
import scipy.spatial.transform
class FloatTest(unittest.TestCase):
def test_float_basic(self) -> None:
"""Are floats converted from strings correctly?"""
assert utils.float("0.0") == 0.0
assert utils.float("1.0") == 1.0
assert utils.float("-1.0") == -1.0
def test_float_numeric_format(self) -> None:
"""Does numeric formatting get converted correctly?"""
assert utils.float("1.2345E+02") == 123.45
assert utils.float("1.2345D+02") == 123.45
def test_float_stars(self) -> None:
"""Does the function return nan for stars?"""
assert numpy.isnan(utils.float("*"))
assert numpy.isnan(utils.float("*****"))
class ConvertorTest(unittest.TestCase):
def test_convertor(self) -> None:
assert f"{utils.convertor(8.0, 'eV', 'wavenumber'):.3f}" == "64524.354"
class GetRotationTest(unittest.TestCase):
delta = 1e-14
def setUp(self) -> None:
self.r = scipy.spatial.transform.Rotation.from_euler('xyz', [15, 25, 35], degrees=True)
self.t = numpy.array([-1, 0, 2])
self.a = numpy.array([[1., 1., 1.],
[0., 1., 2.],
[0., 0., 0.],
[0., 0., 4.]])
self.b = self.r.apply(self.a + self.t)
def test_default(self) -> None:
"""Is the rotation is correct?"""
_r = utils.get_rotation(self.a, self.b)
# as_dcm is renamed to from_matrix in scipy 1.4.0 and will be removed in sicpy 1.6.0
if hasattr(self.r, "as_matrix"):
numpy.testing.assert_allclose(self.r.as_matrix(), _r.as_matrix(), atol=self.delta)
else:
numpy.testing.assert_allclose(self.r.as_dcm(), _r.as_dcm(), atol=self.delta)
def test_two_atoms(self) -> None:
"""Is the rotation is correct for 2 atoms?"""
a2 = self.a[:2]
b2 = self.b[:2]
rotated_diff = self.r.apply(a2) - utils.get_rotation(a2, b2).apply(a2)
# rotated_diff should be translation
numpy.testing.assert_allclose(rotated_diff[0], rotated_diff[1], atol=self.delta)
def test_one_atom(self) -> None:
"""Is the rotation is identity for 1 atom?"""
a1 = self.a[:1]
b1 = self.b[:1]
if hasattr(self.r, "as_matrix"):
numpy.testing.assert_allclose(numpy.eye(3), utils.get_rotation(a1, b1).as_matrix(), atol=self.delta)
else:
numpy.testing.assert_allclose(numpy.eye(3), utils.get_rotation(a1, b1).as_dcm(), atol=self.delta)
class PeriodicTableTest(unittest.TestCase):
def setUp(self) -> None:
self.t = utils.PeriodicTable()
def test_periodictable(self) -> None:
assert self.t.element[6] == 'C'
assert self.t.number['C'] == 6
assert self.t.element[44] == 'Ru'
assert self.t.number['Au'] == 79
class WidthSplitterTest(unittest.TestCase):
def test_default(self) -> None:
"""Does the splitter remove empty fields by default properly?"""
fixed_splitter = utils.WidthSplitter((4, 3, 5, 6, 10, 10, 10, 10, 10, 10))
line_full = " 60 H 10 s 0.14639 0.00000 0.00000 -0.00000 -0.00000 0.00000"
line_truncated = " 1 C 1 s -0.00000 -0.00000 0.00000"
ref_full = ['60', 'H', '10', 's', '0.14639', '0.00000', '0.00000', '-0.00000', '-0.00000', '0.00000']
ref_truncated = ['1', 'C', '1', 's', '-0.00000', '-0.00000', '0.00000']
tokens_full = fixed_splitter.split(line_full)
tokens_truncated = fixed_splitter.split(line_truncated)
assert ref_full == tokens_full
assert ref_truncated == tokens_truncated
def test_no_truncation(self) -> None:
"""Does the splitter return even the empty fields when asked?"""
fixed_splitter = utils.WidthSplitter((4, 3, 5, 6, 10, 10, 10, 10, 10, 10))
line = " 1 C 1 s -0.00000 -0.00000 0.00000"
ref_not_truncated = ['1', 'C', '1', 's', '-0.00000', '-0.00000', '0.00000', '', '', '']
tokens_not_truncated = fixed_splitter.split(line, truncate=False)
assert ref_not_truncated == tokens_not_truncated
class SymmetrizeTest(unittest.TestCase):
def METHOD_NAME(self) -> None:
assert utils._dim_from_tblock_size(1) == 1
# This isn't possible until we fully move to pytest.
# with pytest.raises(
# RuntimeError,
# match="The number of elements (2) isn't possible for a matrix triangle"
# ):
# assert utils._dim_from_tblock_size(2)
assert utils._dim_from_tblock_size(3) == 2
assert utils._dim_from_tblock_size(6) == 3
assert utils._dim_from_tblock_size(10) == 4
def test_block_to_matrix(self) -> None:
inp = numpy.array([1, 2, 3, 4, 5, 6], dtype=int)
ref = numpy.array([[1, 2, 4], [2, 3, 5], [4, 5, 6]], dtype=int)
numpy.testing.assert_equal(utils.block_to_matrix(inp), ref)
def test_symmetrize(self) -> None:
inp = numpy.array([[1, 9, 7],
[4, 8, 3],
[6, 2, 5]], dtype=int)
ref_lower = numpy.array([[1, 4, 6],
[4, 8, 2],
[6, 2, 5]], dtype=int)
ref_upper = numpy.array([[1, 9, 7],
[9, 8, 3],
[7, 3, 5]], dtype=int)
numpy.testing.assert_equal(utils.symmetrize(inp, "lower"), ref_lower)
numpy.testing.assert_equal(utils.symmetrize(inp, "upper"), ref_upper)
if __name__ == "__main__":
unittest.main() |
5,307 | on sendcmpct | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""Test message sending before handshake completion.
A node should never send anything other than VERSION/VERACK/REJECT until it's
received a VERACK.
This test connects to a node and sends it a few messages, trying to entice it
into sending us something it shouldn't."""
import time
from test_framework.messages import msg_getaddr, msg_ping, msg_verack
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import DefiTestFramework
from test_framework.util import wait_until
banscore = 10
class CLazyNode(P2PInterface):
def __init__(self):
super().__init__()
self.unexpected_msg = False
self.ever_connected = False
def bad_message(self, message):
self.unexpected_msg = True
self.log.info("should not have received message: %s" % message.command)
def on_open(self):
self.ever_connected = True
def on_version(self, message):
self.bad_message(message)
def on_verack(self, message):
self.bad_message(message)
def on_reject(self, message):
self.bad_message(message)
def on_inv(self, message):
self.bad_message(message)
def on_addr(self, message):
self.bad_message(message)
def on_getdata(self, message):
self.bad_message(message)
def on_getblocks(self, message):
self.bad_message(message)
def on_tx(self, message):
self.bad_message(message)
def on_block(self, message):
self.bad_message(message)
def on_getaddr(self, message):
self.bad_message(message)
def on_headers(self, message):
self.bad_message(message)
def on_anchorauth(self, message):
self.bad_message(message)
def on_getheaders(self, message):
self.bad_message(message)
def on_ping(self, message):
self.bad_message(message)
def on_mempool(self, message):
self.bad_message(message)
def on_pong(self, message):
self.bad_message(message)
def on_feefilter(self, message):
self.bad_message(message)
def on_sendheaders(self, message):
self.bad_message(message)
def METHOD_NAME(self, message):
self.bad_message(message)
def on_cmpctblock(self, message):
self.bad_message(message)
def on_getblocktxn(self, message):
self.bad_message(message)
def on_blocktxn(self, message):
self.bad_message(message)
# Node that never sends a version. We'll use this to send a bunch of messages
# anyway, and eventually get disconnected.
class CNodeNoVersionBan(CLazyNode):
# send a bunch of veracks without sending a message. This should get us disconnected.
# NOTE: implementation-specific check here. Remove if defid ban behavior changes
def on_open(self):
super().on_open()
for i in range(banscore):
self.send_message(msg_verack())
def on_reject(self, message):
pass
# Node that never sends a version. This one just sits idle and hopes to receive
# any message (it shouldn't!)
class CNodeNoVersionIdle(CLazyNode):
def __init__(self):
super().__init__()
# Node that sends a version but not a verack.
class CNodeNoVerackIdle(CLazyNode):
def __init__(self):
self.version_received = False
super().__init__()
def on_reject(self, message):
pass
def on_verack(self, message):
pass
# When version is received, don't reply with a verack. Instead, see if the
# node will give us a message that it shouldn't. This is not an exhaustive
# list!
def on_version(self, message):
self.version_received = True
self.send_message(msg_ping())
self.send_message(msg_getaddr())
class P2PLeakTest(DefiTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-banscore=" + str(banscore)]]
def run_test(self):
no_version_bannode = self.nodes[0].add_p2p_connection(
CNodeNoVersionBan(), send_version=False, wait_for_verack=False
)
no_version_idlenode = self.nodes[0].add_p2p_connection(
CNodeNoVersionIdle(), send_version=False, wait_for_verack=False
)
no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle())
wait_until(
lambda: no_version_bannode.ever_connected, timeout=10, lock=mininode_lock
)
wait_until(
lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock
)
wait_until(
lambda: no_verack_idlenode.version_received, timeout=10, lock=mininode_lock
)
# Mine a block and make sure that it's not sent to the connected nodes
self.nodes[0].generate(1)
# Give the node enough time to possibly leak out a message
time.sleep(5)
# This node should have been banned
assert not no_version_bannode.is_connected
self.nodes[0].disconnect_p2ps()
# Wait until all connections are closed
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0)
# Make sure no unexpected messages came in
assert no_version_bannode.unexpected_msg == False
assert no_version_idlenode.unexpected_msg == False
assert no_verack_idlenode.unexpected_msg == False
if __name__ == "__main__":
P2PLeakTest().main() |
5,308 | bq table unversioned | """Methods for working with stable table schemas."""
import json
import tarfile
import urllib.request
from dataclasses import dataclass
from io import BytesIO
from itertools import groupby
from typing import List
from bigquery_etl.config import ConfigLoader
from bigquery_etl.dryrun import DryRun
@dataclass
class SchemaFile:
"""Container for metadata about a JSON schema and corresponding BQ table."""
schema: dict
schema_id: str
bq_dataset_family: str
bq_table: str
document_namespace: str
document_type: str
document_version: int
@property
def METHOD_NAME(self):
"""Return table_id with version suffix stripped."""
return "_".join(self.bq_table.split("_")[:-1])
@property
def stable_table(self):
"""Return BQ stable table name in <dataset>.<table> form."""
return f"{self.bq_dataset_family}_stable.{self.bq_table}"
@property
def user_facing_view(self):
"""Return user-facing view name in <dataset>.<view> form."""
return f"{self.bq_dataset_family}.{self.METHOD_NAME}"
@property
def sortkey(self):
"""Return variant of stable_table with zero-padded version for sorting."""
return (
"_".join(self.stable_table.split("_")[:-1]) + f"{self.document_version:04d}"
)
def prod_schemas_uri():
"""Return URI for the schemas tarball deployed to shared-prod.
We construct a fake query and send it to the dry run service in order
to read dataset labels, which contains the commit hash associated
with the most recent production schemas deploy.
"""
dryrun = DryRun("telemetry_derived/foo/query.sql", content="SELECT 1")
build_id = dryrun.get_dataset_labels()["schemas_build_id"]
commit_hash = build_id.split("_")[-1]
mps_uri = ConfigLoader.get("schema", "mozilla_pipeline_schemas_uri")
return f"{mps_uri}/archive/{commit_hash}.tar.gz"
def get_stable_table_schemas() -> List[SchemaFile]:
"""Fetch last schema metadata per doctype by version."""
schemas_uri = prod_schemas_uri()
with urllib.request.urlopen(schemas_uri) as f:
tarbytes = BytesIO(f.read())
schemas = []
with tarfile.open(fileobj=tarbytes, mode="r:gz") as tar:
for tarinfo in tar:
if tarinfo.name.endswith(".schema.json"):
*_, document_namespace, document_type, basename = tarinfo.name.split(
"/"
)
version = int(basename.split(".")[1])
schema = json.load(tar.extractfile(tarinfo.name)) # type: ignore
bq_schema = {}
# Schemas without pipeline metadata (like glean/glean)
# do not have corresponding BQ tables, so we skip them here.
pipeline_meta = schema.get("mozPipelineMetadata", None)
if pipeline_meta is None:
continue
try:
bq_schema_file = tar.extractfile(
tarinfo.name.replace(".schema.json", ".bq")
)
bq_schema = json.load(bq_schema_file) # type: ignore
except KeyError as e:
print(f"Cannot get Bigquery schema for {tarinfo.name}: {e}")
schemas.append(
SchemaFile(
schema=bq_schema,
schema_id=schema.get("$id", ""),
bq_dataset_family=pipeline_meta["bq_dataset_family"],
bq_table=pipeline_meta["bq_table"],
document_namespace=document_namespace,
document_type=document_type,
document_version=version,
)
)
# Exclude doctypes maintained in separate projects.
for prefix in ConfigLoader.get("schema", "skip_prefixes", fallback=[]):
schemas = [
schema
for schema in schemas
if not schema.document_namespace.startswith(prefix)
]
# Retain only the highest version per doctype.
schemas = sorted(
schemas,
key=lambda t: f"{t.document_namespace}/{t.document_type}/{t.document_version:03d}",
)
schemas = [
last
for k, (*_, last) in groupby(
schemas, lambda t: f"{t.document_namespace}/{t.document_type}"
)
]
return schemas |
5,309 | schools yishuvs api | # pylint: disable=no-member
import json
import logging
import pandas as pd
from flask import Response, request
from sqlalchemy import and_, not_
from anyway.app_and_db import db
from anyway.models import School, SchoolWithDescription2020
def schools_api():
logging.debug("getting schools")
schools = (
db.session.query(School)
.filter(
not_(and_(School.latitude == 0, School.longitude == 0)),
not_(and_(School.latitude == None, School.longitude == None)),
)
.with_entities(
School.yishuv_symbol,
School.yishuv_name,
School.school_name,
School.longitude,
School.latitude,
)
.all()
)
schools_list = [
{
"yishuv_symbol": x.yishuv_symbol,
"yishuv_name": x.yishuv_name,
"school_name": x.school_name,
"longitude": x.longitude,
"latitude": x.latitude,
}
for x in schools
]
response = Response(json.dumps(schools_list, default=str), mimetype="application/json")
response.headers.add("Access-Control-Allow-Origin", "*")
return response
def schools_description_api():
# Disable all the no-member violations in this function
# pylint: disable=no-member
logging.debug("getting schools with description")
query_obj = (
db.session.query(SchoolWithDescription2020)
.filter(
not_(
and_(
SchoolWithDescription2020.latitude == 0,
SchoolWithDescription2020.longitude == 0,
)
),
not_(
and_(
SchoolWithDescription2020.latitude == None,
SchoolWithDescription2020.longitude == None,
)
),
)
.with_entities(
SchoolWithDescription2020.school_id,
SchoolWithDescription2020.school_name,
SchoolWithDescription2020.municipality_name,
SchoolWithDescription2020.yishuv_name,
SchoolWithDescription2020.institution_type,
SchoolWithDescription2020.location_accuracy,
SchoolWithDescription2020.longitude,
SchoolWithDescription2020.latitude,
)
)
df = pd.read_sql_query(query_obj.statement, query_obj.session.bind)
schools_list = df.to_dict(orient="records")
response = Response(json.dumps(schools_list, default=str), mimetype="application/json")
response.headers.add("Access-Control-Allow-Origin", "*")
return response
def METHOD_NAME():
logging.debug("getting schools yishuvs")
schools_yishuvs = (
db.session.query(SchoolWithDescription2020)
.filter(
not_(
and_(
SchoolWithDescription2020.latitude == 0,
SchoolWithDescription2020.longitude == 0,
)
),
not_(
and_(
SchoolWithDescription2020.latitude == None,
SchoolWithDescription2020.longitude == None,
)
),
)
.group_by(SchoolWithDescription2020.yishuv_name)
.with_entities(SchoolWithDescription2020.yishuv_name)
.all()
)
schools_yishuvs_list = sorted([x[0] for x in schools_yishuvs])
response = Response(json.dumps(schools_yishuvs_list, default=str), mimetype="application/json")
response.headers.add("Access-Control-Allow-Origin", "*")
return response
def schools_names_api():
# Disable all the no-member violations in this function
# pylint: disable=no-member
logging.debug("getting schools names")
schools_data = json.load(open("static/data/schools/schools_names.json"))
response = Response(json.dumps(schools_data, default=str), mimetype="application/json")
response.headers.add("Access-Control-Allow-Origin", "*")
return response
def injured_around_schools_api():
# Disable all the no-member violations in this function
# pylint: disable=no-member
logging.debug("getting injured around schools api")
school_id = request.values.get("school_id")
all_data = json.load(open("static/data/schools/injured_around_schools_api_2023.json"))
school_data = all_data[school_id]
response = Response(json.dumps(school_data, default=str), mimetype="application/json")
response.headers.add("Access-Control-Allow-Origin", "*")
return response
def injured_around_schools_sex_graphs_data_api():
# Disable all the no-member violations in this function
# pylint: disable=no-member
logging.debug("getting injured around schools sex graphs data api")
school_id = request.values.get("school_id")
all_data = json.load(
open("static/data/schools/injured_around_schools_sex_graphs_data_api_2023.json")
)
school_data = all_data[school_id]
response = Response(json.dumps(school_data, default=str), mimetype="application/json")
response.headers.add("Access-Control-Allow-Origin", "*")
return response
def injured_around_schools_months_graphs_data_api():
logging.debug("getting injured around schools months graphs data api")
school_id = request.values.get("school_id")
all_data = json.load(
open("static/data/schools/injured_around_schools_months_graphs_data_api_2023.json")
)
school_data = all_data[school_id]
response = Response(json.dumps(school_data, default=str), mimetype="application/json")
response.headers.add("Access-Control-Allow-Origin", "*")
return response |
5,310 | type | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetRegistryResult',
'AwaitableGetRegistryResult',
'get_registry',
'get_registry_output',
]
@pulumi.output_type
class GetRegistryResult:
def __init__(__self__, id=None, identity=None, kind=None, location=None, name=None, registry_properties=None, sku=None, system_data=None, tags=None, METHOD_NAME=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if registry_properties and not isinstance(registry_properties, dict):
raise TypeError("Expected argument 'registry_properties' to be a dict")
pulumi.set(__self__, "registry_properties", registry_properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
Managed service identity (system assigned and/or user assigned identities)
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="registryProperties")
def registry_properties(self) -> 'outputs.RegistryResponse':
"""
[Required] Additional attributes of the entity.
"""
return pulumi.get(self, "registry_properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
Sku details required for ARM contract for Autoscaling.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetRegistryResult(GetRegistryResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegistryResult(
id=self.id,
identity=self.identity,
kind=self.kind,
location=self.location,
name=self.name,
registry_properties=self.registry_properties,
sku=self.sku,
system_data=self.system_data,
tags=self.tags,
METHOD_NAME=self.METHOD_NAME)
def get_registry(registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegistryResult:
"""
Use this data source to access information about an existing resource.
:param str registry_name: Name of Azure Machine Learning registry. This is case-insensitive
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['registryName'] = registry_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20230401:getRegistry', __args__, opts=opts, typ=GetRegistryResult).value
return AwaitableGetRegistryResult(
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
kind=pulumi.get(__ret__, 'kind'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
registry_properties=pulumi.get(__ret__, 'registry_properties'),
sku=pulumi.get(__ret__, 'sku'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_registry)
def get_registry_output(registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRegistryResult]:
"""
Use this data source to access information about an existing resource.
:param str registry_name: Name of Azure Machine Learning registry. This is case-insensitive
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
5,311 | get pattern to quantize handlers | import torch
from torch.fx.graph import (
Node,
)
from .utils import (
all_node_args_have_no_tensors,
)
from torch.ao.quantization.backend_config import (
BackendConfig,
DTypeConfig,
ObservationType,
)
from torch.ao.quantization.utils import (
NodePattern,
Pattern,
QuantizerCls,
)
from abc import ABC
from typing import Callable, Dict, List, Type, Optional
__all__ = [
"QuantizeHandler",
"BinaryOpQuantizeHandler",
"CatQuantizeHandler",
"ConvReluQuantizeHandler",
"LinearReLUQuantizeHandler",
"BatchNormQuantizeHandler",
"EmbeddingQuantizeHandler",
"RNNDynamicQuantizeHandler",
"DefaultNodeQuantizeHandler",
"FixedQParamsOpQuantizeHandler",
"CopyNodeQuantizeHandler",
"GeneralTensorShapeOpQuantizeHandler",
"CustomModuleQuantizeHandler",
"StandaloneModuleQuantizeHandler",
]
def _default_root_node_getter(node_pattern):
if node_pattern is None:
return node_pattern
while not isinstance(node_pattern, Node):
node_pattern = node_pattern[-1]
return node_pattern
# Base Pattern Handler
class QuantizeHandler(ABC):
""" Base handler class for the quantizer patterns
"""
def __init__(
self,
node_pattern: NodePattern,
modules: Dict[str, torch.nn.Module],
root_node_getter: Optional[Callable] = None,
is_custom_module=False,
is_standalone_module=False):
""" Records pattern information in __init__, which will be used
in convert
"""
self.node_pattern = node_pattern
self.modules = modules
if root_node_getter is None:
root_node_getter = _default_root_node_getter
self.root_node = root_node_getter(node_pattern)
self.is_custom_module_ = is_custom_module
self.is_standalone_module_ = is_standalone_module
self.num_tensor_args = 0
# determine how many of the first two args are Tensors (versus scalars)
# this distinguishes things like "x + y" from "x + 2" or "2 + x"
if isinstance(self.root_node, Node):
cache_for_no_tensor_check: Dict[Node, bool] = {}
for arg_idx in range(len(self.root_node.args)):
arg = self.root_node.args[arg_idx]
if isinstance(arg, Node) and (
not all_node_args_have_no_tensors(
arg, self.modules, cache_for_no_tensor_check)):
self.num_tensor_args += 1
def is_general_tensor_value_op(self) -> bool:
"""
Returns True if the operator works for both floating point and
quantized input, and does some computation based on the input Tensor,
or the ops that only re-arranges the Tensor values or query some metadata
about the Tensor
so we need to insert observer/fake_quant for the output of the
operator (same observer instance as input)
since the distribution of values is different for input and output
Tensors (for HistogramObserver) while they share the same quantization
parameters
Example operator: avgpool2d, reshape, transpose, maxpool2d
Example observed operator:
observer_0 - avgpool2d - observer_0 (same observer instance as input)
"""
return False
def is_custom_module(self):
return self.is_custom_module_
def is_standalone_module(self):
return self.is_standalone_module_
def _get_quantize_handler_cls(
observation_type: ObservationType,
dtype_configs: List[DTypeConfig],
num_tensor_args_to_observation_type: Dict[int, ObservationType]) -> Type[QuantizeHandler]:
"""
Return a configurable QuantizeHandler that matches the given specifications from the backend.
"""
class ConfigurableQuantizeHandler(QuantizeHandler):
def __init__(
self,
node_pattern: NodePattern,
modules: Dict[str, torch.nn.Module],
root_node_getter: Optional[Callable] = None):
super().__init__(node_pattern, modules, root_node_getter)
if num_tensor_args_to_observation_type:
assert self.num_tensor_args in num_tensor_args_to_observation_type, \
f"Must provide observation_type config for tensor number {self.num_tensor_args}" \
f" in num_tensor_args_to_observation_type for {node_pattern}"
self.observation_type = num_tensor_args_to_observation_type[self.num_tensor_args]
else:
self.observation_type = observation_type
self.dtype_configs = dtype_configs
def is_general_tensor_value_op(self) -> bool:
return self.observation_type == ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT
return ConfigurableQuantizeHandler
def METHOD_NAME(backend_config: BackendConfig) -> Dict[Pattern, QuantizerCls]:
"""
Note: Quantize handler is just a holder for some check methods like
(should_insert_observer_for_output), maybe this can be a enum as well,
we can refactor this after we convert the path for fbgemm/qnnpack fully to the
new path, this is not exposed to backend developers
"""
pattern_to_quantize_handlers = {}
for pattern, config in backend_config._pattern_complex_format_to_config.items():
observation_type = config.observation_type
dtype_configs = config.dtype_configs
num_tensor_args_to_observation_type = config._num_tensor_args_to_observation_type
pattern_to_quantize_handlers[pattern] = \
_get_quantize_handler_cls(
observation_type,
dtype_configs,
num_tensor_args_to_observation_type)
return pattern_to_quantize_handlers
# TODO: remove this class, this is still exposed in torch.ao.quantization
# but we should be able to break bc
class BinaryOpQuantizeHandler(QuantizeHandler):
pass
class CatQuantizeHandler(QuantizeHandler):
pass
# TODO: remove this class
class ConvReluQuantizeHandler(QuantizeHandler):
pass
# TODO: remove this class
class LinearReLUQuantizeHandler(QuantizeHandler):
pass
# TODO: remove this class
class BatchNormQuantizeHandler(QuantizeHandler):
pass
# TODO: remove this class
class EmbeddingQuantizeHandler(QuantizeHandler):
pass
# TODO: remove this class
class RNNDynamicQuantizeHandler(QuantizeHandler):
pass
# TODO: remove this class
class DefaultNodeQuantizeHandler(QuantizeHandler):
""" Common quantized op, first input and first output will be quantized
"""
pass
# TODO: remove this class
class FixedQParamsOpQuantizeHandler(QuantizeHandler):
pass
# TODO: remove
class CopyNodeQuantizeHandler(QuantizeHandler):
pass
# TODO: remove
class GeneralTensorShapeOpQuantizeHandler(QuantizeHandler):
pass
# TODO: not used, can be removed after torch.ao.quantization namespace is deprecated
class CustomModuleQuantizeHandler(QuantizeHandler):
pass
# TODO: not used, can be removed after torch.ao.quantization namespace is deprecated
class StandaloneModuleQuantizeHandler(QuantizeHandler):
pass |
5,312 | default node factory | #
# Copyright (c) 2022, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__all__ = ["ContainerStructure"]
from collections import deque
from typing import (
Callable,
Generic,
List,
Optional,
TypeVar,
Union,
)
from neptune.exceptions import MetadataInconsistency
from neptune.internal.utils.paths import path_to_str
T = TypeVar("T")
Node = TypeVar("Node")
def METHOD_NAME(path):
return {}
class ContainerStructure(Generic[T, Node]):
def __init__(self, node_factory: Optional[Callable[[List[str]], Node]] = None):
if node_factory is None:
node_factory = METHOD_NAME
self._structure = node_factory(path=[])
self._node_factory = node_factory
self._node_type = type(self._structure)
def get_structure(self) -> Node:
return self._structure
def _iterate_node(self, node, path_prefix: List[str]):
"""this iterates in BFS order in order to more meaningful suggestions before cutoff"""
nodes_queue = deque([(node, path_prefix)])
while nodes_queue:
node, prefix = nodes_queue.popleft()
for key, value in node.items():
if not isinstance(value, self._node_type):
yield prefix + [key]
else:
nodes_queue.append((value, prefix + [key]))
def iterate_subpaths(self, path_prefix: List[str]):
root = self.get(path_prefix)
for path in self._iterate_node(root or {}, path_prefix):
yield path_to_str(path)
def get(self, path: List[str]) -> Union[T, Node, None]:
ref = self._structure
for index, part in enumerate(path):
if not isinstance(ref, self._node_type):
raise MetadataInconsistency(
"Cannot access path '{}': '{}' is already defined as an attribute, "
"not a namespace".format(path_to_str(path), path_to_str(path[:index]))
)
if part not in ref:
return None
ref = ref[part]
return ref
def set(self, path: List[str], attr: T) -> None:
ref = self._structure
location, attribute_name = path[:-1], path[-1]
for idx, part in enumerate(location):
if part not in ref:
ref[part] = self._node_factory(location[: idx + 1])
ref = ref[part]
if not isinstance(ref, self._node_type):
raise MetadataInconsistency(
"Cannot access path '{}': '{}' is already defined as an attribute, "
"not a namespace".format(path_to_str(path), part)
)
if attribute_name in ref and isinstance(ref[attribute_name], self._node_type):
if isinstance(attr, self._node_type):
# in-between nodes are auto-created, so ignore it's OK unless we want to change the type
return
raise MetadataInconsistency("Cannot set attribute '{}'. It's a namespace".format(path_to_str(path)))
ref[attribute_name] = attr
def pop(self, path: List[str]) -> None:
self._pop_impl(self._structure, path, path)
def _pop_impl(self, ref, sub_path: List[str], attr_path: List[str]):
if not sub_path:
return
head, tail = sub_path[0], sub_path[1:]
if head not in ref:
raise MetadataInconsistency("Cannot delete {}. Attribute not found.".format(path_to_str(attr_path)))
if not tail:
if isinstance(ref[head], self._node_type):
raise MetadataInconsistency(
"Cannot delete {}. It's a namespace, not an attribute.".format(path_to_str(attr_path))
)
del ref[head]
else:
self._pop_impl(ref[head], tail, attr_path)
if not ref[head]:
del ref[head]
def clear(self):
self._structure.clear() |
5,313 | tags | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetNamespacePnsCredentialsResult',
'AwaitableGetNamespacePnsCredentialsResult',
'get_namespace_pns_credentials',
'get_namespace_pns_credentials_output',
]
@pulumi.output_type
class GetNamespacePnsCredentialsResult:
"""
Description of a NotificationHub PNS Credentials. This is a response of the POST requests that return namespace or hubs
PNS credentials.
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, system_data=None, METHOD_NAME=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", METHOD_NAME)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. E.g. "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}"
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Deprecated - only for compatibility.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.PnsCredentialsResponse':
"""
Collection of Notification Hub or Notification Hub Namespace PNS credentials.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[Mapping[str, str]]:
"""
Deprecated - only for compatibility.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetNamespacePnsCredentialsResult(GetNamespacePnsCredentialsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNamespacePnsCredentialsResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
system_data=self.system_data,
METHOD_NAME=self.METHOD_NAME,
type=self.type)
def get_namespace_pns_credentials(namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNamespacePnsCredentialsResult:
"""
Description of a NotificationHub PNS Credentials. This is a response of the POST requests that return namespace or hubs
PNS credentials.
:param str namespace_name: Namespace name
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:notificationhubs/v20230101preview:getNamespacePnsCredentials', __args__, opts=opts, typ=GetNamespacePnsCredentialsResult).value
return AwaitableGetNamespacePnsCredentialsResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
METHOD_NAME=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_namespace_pns_credentials)
def get_namespace_pns_credentials_output(namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNamespacePnsCredentialsResult]:
"""
Description of a NotificationHub PNS Credentials. This is a response of the POST requests that return namespace or hubs
PNS credentials.
:param str namespace_name: Namespace name
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
5,314 | test domain range scale oetf inverse b | """
Define the unit tests for the
:mod:`colour.models.rgb.transfer_functions.itur_bt_709` module.
"""
import numpy as np
import unittest
from colour.models.rgb.transfer_functions import oetf_BT709, oetf_inverse_BT709
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"TestOetf_BT709",
"TestOetf_inverse_BT709",
]
class TestOetf_BT709(unittest.TestCase):
"""
Define :func:`colour.models.rgb.transfer_functions.itur_bt_709.oetf_BT709`
definition unit tests methods.
"""
def test_oetf_BT709(self):
"""
Test :func:`colour.models.rgb.transfer_functions.itur_bt_709.\
oetf_BT709` definition.
"""
self.assertAlmostEqual(oetf_BT709(0.0), 0.0, places=7)
self.assertAlmostEqual(oetf_BT709(0.015), 0.067500000000000, places=7)
self.assertAlmostEqual(oetf_BT709(0.18), 0.409007728864150, places=7)
self.assertAlmostEqual(oetf_BT709(1.0), 1.0, places=7)
def test_n_dimensional_oetf_BT709(self):
"""
Test :func:`colour.models.rgb.transfer_functions.itur_bt_709.\
oetf_BT709` definition n-dimensional arrays support.
"""
L = 0.18
V = oetf_BT709(L)
L = np.tile(L, 6)
V = np.tile(V, 6)
np.testing.assert_array_almost_equal(oetf_BT709(L), V, decimal=7)
L = np.reshape(L, (2, 3))
V = np.reshape(V, (2, 3))
np.testing.assert_array_almost_equal(oetf_BT709(L), V, decimal=7)
L = np.reshape(L, (2, 3, 1))
V = np.reshape(V, (2, 3, 1))
np.testing.assert_array_almost_equal(oetf_BT709(L), V, decimal=7)
def test_domain_range_scale_oetf_BT709(self):
"""
Test :func:`colour.models.rgb.transfer_functions.itur_bt_709.\
oetf_BT709` definition domain and range scale support.
"""
L = 0.18
V = oetf_BT709(L)
d_r = (("reference", 1), ("1", 1), ("100", 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_array_almost_equal(
oetf_BT709(L * factor), V * factor, decimal=7
)
@ignore_numpy_errors
def test_nan_oetf_BT709(self):
"""
Test :func:`colour.models.rgb.transfer_functions.itur_bt_709.\
oetf_BT709` definition nan support.
"""
oetf_BT709(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestOetf_inverse_BT709(unittest.TestCase):
"""
Define :func:`colour.models.rgb.transfer_functions.itur_bt_709.\
oetf_inverse_BT709` definition unit tests methods.
"""
def test_oetf_inverse_BT709(self):
"""
Test :func:`colour.models.rgb.transfer_functions.itur_bt_709.\
oetf_inverse_BT709` definition.
"""
self.assertAlmostEqual(oetf_inverse_BT709(0.0), 0.0, places=7)
self.assertAlmostEqual(
oetf_inverse_BT709(0.067500000000000), 0.015, places=7
)
self.assertAlmostEqual(
oetf_inverse_BT709(0.409007728864150), 0.18, places=7
)
self.assertAlmostEqual(oetf_inverse_BT709(1.0), 1.0, places=7)
def test_n_dimensional_oetf_inverse_BT709(self):
"""
Test :func:`colour.models.rgb.transfer_functions.itur_bt_709.\
oetf_inverse_BT709` definition n-dimensional arrays support.
"""
V = 0.409007728864150
L = oetf_inverse_BT709(V)
V = np.tile(V, 6)
L = np.tile(L, 6)
np.testing.assert_array_almost_equal(
oetf_inverse_BT709(V), L, decimal=7
)
V = np.reshape(V, (2, 3))
L = np.reshape(L, (2, 3))
np.testing.assert_array_almost_equal(
oetf_inverse_BT709(V), L, decimal=7
)
V = np.reshape(V, (2, 3, 1))
L = np.reshape(L, (2, 3, 1))
np.testing.assert_array_almost_equal(
oetf_inverse_BT709(V), L, decimal=7
)
def METHOD_NAME(self):
"""
Test :func:`colour.models.rgb.transfer_functions.itur_bt_709.\
oetf_inverse_BT709` definition domain and range scale support.
"""
V = 0.409007728864150
L = oetf_inverse_BT709(V)
d_r = (("reference", 1), ("1", 1), ("100", 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_array_almost_equal(
oetf_inverse_BT709(V * factor), L * factor, decimal=7
)
@ignore_numpy_errors
def test_nan_oetf_inverse_BT709(self):
"""
Test :func:`colour.models.rgb.transfer_functions.itur_bt_709.\
oetf_inverse_BT709` definition nan support.
"""
oetf_inverse_BT709(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
if __name__ == "__main__":
unittest.main() |
5,315 | test create inference graph without mfcc | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data input for speech commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.examples.speech_commands import freeze
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class FreezeTest(test.TestCase):
@test_util.run_deprecated_v1
def testCreateInferenceGraphWithMfcc(self):
with self.cached_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=40,
model_architecture='conv',
preprocess='mfcc')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
ops = [node.op for node in sess.graph_def.node]
self.assertEqual(1, ops.count('Mfcc'))
@test_util.run_deprecated_v1
def METHOD_NAME(self):
with self.cached_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=40,
model_architecture='conv',
preprocess='average')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
ops = [node.op for node in sess.graph_def.node]
self.assertEqual(0, ops.count('Mfcc'))
@test_util.run_deprecated_v1
def testCreateInferenceGraphWithMicro(self):
with self.cached_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=40,
model_architecture='conv',
preprocess='micro')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
@test_util.run_deprecated_v1
def testFeatureBinCount(self):
with self.cached_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=80,
model_architecture='conv',
preprocess='average')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
ops = [node.op for node in sess.graph_def.node]
self.assertEqual(0, ops.count('Mfcc'))
if __name__ == '__main__':
test.main() |
5,316 | feed | from operator import attrgetter
from snakeoil.mappings import defaultdictkey
from snakeoil.strings import pluralism
from .. import addons, results, sources
from . import Check
class RedundantVersion(results.VersionResult, results.Info):
"""Redundant version(s) of a package in a specific slot."""
def __init__(self, slot, later_versions, **kwargs):
super().__init__(**kwargs)
self.slot = slot
self.later_versions = tuple(later_versions)
@property
def desc(self):
s = pluralism(self.later_versions)
versions = ", ".join(self.later_versions)
return f"slot({self.slot}) keywords are overshadowed by version{s}: {versions}"
class RedundantVersionCheck(Check):
"""Scan for overshadowed package versions.
Scan for versions that are likely shadowed by later versions from a
keywords standpoint (ignoring live packages that erroneously have
keywords).
Example: pkga-1 is keyworded amd64, pkga-2 is amd64.
pkga-1 can potentially be removed.
"""
_source = sources.PackageRepoSource
required_addons = (addons.profiles.ProfileAddon,)
known_results = frozenset([RedundantVersion])
@staticmethod
def mangle_argparser(parser):
parser.plugin.add_argument(
"--stable-only",
action="store_true",
help="consider redundant versions only within stable",
docs="""
If enabled, for each slot, only consider redundant versions
with stable keywords. This is useful for cases of cleanup after
successful stabilization.
""",
)
def __init__(self, *args, profile_addon):
super().__init__(*args)
self.keywords_profiles = {
keyword: sorted(profiles, key=attrgetter("name"))
for keyword, profiles in profile_addon.items()
}
def filter_later_profiles_masks(self, visible_cache, pkg, later_versions):
# check both stable/unstable profiles for stable KEYWORDS and only
# unstable profiles for unstable KEYWORDS
keywords = []
for keyword in pkg.sorted_keywords:
if keyword[0] != "~":
keywords.append("~" + keyword)
keywords.append(keyword)
# if a profile exists, where the package is visible, but the later aren't
# then it isn't redundant
visible_profiles = tuple(
profile
for keyword in keywords
for profile in self.keywords_profiles.get(keyword, ())
if visible_cache[(profile, pkg)]
)
return tuple(
later
for later in later_versions
if all(visible_cache[(profile, later)] for profile in visible_profiles)
)
def METHOD_NAME(self, pkgset):
if len(pkgset) == 1:
return
# algo is roughly thus; spot stable versions, hunt for subset
# keyworded pkgs that are less then the max version;
# repeats this for every overshadowing detected
# finally, does version comparison down slot lines
stack = []
bad = []
for pkg in reversed(pkgset):
# reduce false positives for idiot keywords/ebuilds
if pkg.live:
continue
curr_set = {x for x in pkg.keywords if not x.startswith("-")}
if not curr_set:
continue
matches = [
ver for ver, keys in stack if ver.slot == pkg.slot and not curr_set.difference(keys)
]
# we've done our checks; now we inject unstable for any stable
# via this, earlier versions that are unstable only get flagged
# as "not needed" since their unstable flag is a subset of the
# stable.
# also, yes, have to use list comp here- we're adding as we go
curr_set.update([f"~{x}" for x in curr_set if not x.startswith("~")])
stack.append((pkg, curr_set))
if matches:
bad.append((pkg, matches))
visible_cache = defaultdictkey(lambda profile_pkg: profile_pkg[0].visible(profile_pkg[1]))
for pkg, matches in reversed(bad):
if self.options.stable_only and all(
key.startswith("~") for x in matches for key in x.keywords
):
continue
if matches := self.filter_later_profiles_masks(visible_cache, pkg, matches):
later_versions = (x.fullver for x in sorted(matches))
yield RedundantVersion(pkg.slot, later_versions, pkg=pkg) |
5,317 | get noc base addr | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# Copyright 2022-2023 Advanced Micro Devices Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ctypes import *
import logging
import tracer.tracerBase
import copy
import os
idcode_ddrmc_num_dict = {'0x4c9b093':3, '0x4c93093':2, '0x4c98093':3, '0x4ca9093':4, '0x4ca8093':4, '0x4cd2093':3, '0x4cd0093':3,
'0x4c9a093':3, '0x4cc1093':1, '0x4cc0093':1, '0x4cc9093':1, '0x4cc8093':1, '0x4cd2093':3, '0x4cd3093':3}
class APMRecord(Structure):
_fields_ = [('time', c_double), ('data', c_ulonglong * 10)]
def output(self):
return "APM %.7f %d %d %d %d %d %d %d %d %d %d\n" % (self.time, *self.data)
class APM:
def __init__(self, mem_type, devid, path='/usr/lib/libmemperf.so', enable=True):
self.apmLib = cdll.LoadLibrary(path)
self.data = []
self.base_addr = []
self.interval = 0
self.act_period = 0
self.record_data_len = 0
self.type = mem_type
self.enabled = enable
if mem_type == "noc":
self.base_addr = self.METHOD_NAME(self.type, devid)
if self.enabled == False:
return
self.noc_base_addr = (c_int * len(self.base_addr))(*self.base_addr)
self.apmLib.create_noc_instance(
self.type.encode(), self.noc_base_addr, int(len(self.base_addr)))
elif mem_type == "apm":
self.apmLib.create_apm_instance(self.type.encode())
def start(self, interval=1.0):
if self.enabled == False:
return
self.interval = interval
self.apmLib.start.restype = c_double
return self.apmLib.start(c_double(interval))
def pushData(self, data):
if self.enabled == False:
return
for i in range(0, len(data.data)):
data.data[i] = data.data[i]
self.data.append(copy.deepcopy(data))
def printData(self, data):
if self.enabled == False:
return
info = str()
info += "TimeStamp: %.7f\n" % data.time
read = [data.data[i]/self.act_period/1024 /
1024 for i in range(0, 10) if i % 2 == 0]
info += "Read Ports: "
for d in read:
info += "%8.1f" % d
info += " MB/s\n"
write = [data.data[i]/self.act_period/1024 /
1024 for i in range(0, 10) if i % 2 == 1]
info += "Write Ports: "
for d in write:
info += "%8.1f" % d
info += " MB/s\n "
return info
def METHOD_NAME(self, mem_type, devid):
if mem_type != "noc":
return
ddrmc_number = idcode_ddrmc_num_dict.get(devid, 0)
if ddrmc_number == 0:
logging.info("Can not get ddrmc/noc number!")
self.enabled = False
return
pre_base_addr = []
for (dirpath, dirnames, filenames) in os.walk("/proc/device-tree/axi"):
for file in dirnames:
if file.startswith("memory-controller"):
compatible = open(os.path.join(os.path.abspath(os.path.join(
dirpath, file)), "compatible"), "rt").read().strip().split(',')
driver_name = compatible[1].split('-')
if driver_name[1] != 'ddrmc':
continue
reg = os.path.join(os.path.abspath(
os.path.join(dirpath, file)), "reg")
f = open(reg, "rb")
numl = list(f.read())
addr_off = ((numl[20] << 24) + (numl[21] <<
16) + (numl[22] << 8) + (numl[23] << 0))
pre_base_addr.append(int(addr_off))
pre_base_addr.sort()
return pre_base_addr[0:ddrmc_number]
def stop(self):
if self.enabled == False:
return
self.apmLib.stop()
data = APMRecord()
pd = pointer(data)
while (self.apmLib.pop_data(pd) == 0):
self.data.append(copy.deepcopy(data))
self.apmLib.get_act_period.restype = c_double
self.act_period = self.apmLib.get_act_period()
self.apmLib.get_record_data_len.restype = c_int
self.record_data_len = self.apmLib.get_record_data_len()
def transTimebase(self):
if self.enabled == False:
return
for i in range(0, len(self.data)):
for j in range(0, self.record_data_len):
self.data[i].data[j] = int(
self.data[i].data[j] / self.act_period)
def checkAPM():
if os.path.exists("/dev/uio1") == False:
return False
"""/sys/class/uio/uio1/device/of_node/name"""
if os.path.exists("/sys/class/uio/uio1/device/of_node/name") == False:
return False
with open("/sys/class/uio/uio1/device/of_node/name", "rt") as f:
try:
name = f.read().strip()
if name.startswith('perf-monitor') == False:
return False
except:
return False
return True
class xapmTracer(tracer.tracerBase.Tracer):
def __init__(self):
super().__init__('xapm', source=[],
compatible={'machine': ["aarch64"]})
self.apm = None
self.apm_type = None
self.devid = None
self.apm_json_enable = None
self.sample_base_addr = []
def prepare(self, option: dict, debug: bool):
"Handle Input Options"
xapmOption = option.get('tracer', {}).get('xapm', {})
self.interval = xapmOption.get("APM_interval", 0.002)
self.apm = APM(self.apm_type, self.devid)
if self.apm_type == "noc":
self.sample_base_addr = self.apm.base_addr
option.update({"ddrmc_base_addr": self.sample_base_addr})
"Handle Output Options"
self.apm_json_enable = option.get('tracer', {}).get('noc_perf', {}).get('ddrmc_json_enable', {})
option['ddrmc_json_enable'] = self.apm_json_enable
return option
def start(self):
super().start()
# self.timesync = self.apm.start(self.interval)
self.apm.start(self.interval)
def stop(self):
super().stop()
self.apm.stop()
def process(self, data, t_range=[]):
self.apm.transTimebase()
def compatible(self, platform: {}):
if super().compatible(platform) == False:
return False
if platform.get('model').startswith('xlnx,zocl-versal'):
self.apm_type = "noc"
self.devid = '0x' + platform.get('idcode')[-7:]
return True
elif platform.get('model').startswith('xlnx,zocl'):
self.apm_type = "apm"
return checkAPM()
else:
return False
def getData(self):
if self.apm.enabled == False:
return
return [d.output() for d in self.apm.data]
if __name__ == '__main__':
apm = APM()
apm.start(0.001)
time.sleep(1)
apm.stop()
for a in apm.data:
print(apm.printData(a))
else:
tracer.tracerBase.register(xapmTracer()) |
5,318 | service highstate | """
Functions to perform introspection on a minion, and return data in a format
usable by Salt States
"""
import os
def running_service_owners(
exclude=("/dev", "/home", "/media", "/proc", "/run", "/sys/", "/tmp", "/var")
):
"""
Determine which packages own the currently running services. By default,
excludes files whose full path starts with ``/dev``, ``/home``, ``/media``,
``/proc``, ``/run``, ``/sys``, ``/tmp`` and ``/var``. This can be
overridden by passing in a new list to ``exclude``.
CLI Example:
.. code-block:: bash
salt myminion introspect.running_service_owners
"""
error = {}
if "pkg.owner" not in __salt__:
error["Unsupported Package Manager"] = (
"The module for the package manager on this system does not "
"support looking up which package(s) owns which file(s)"
)
if "file.open_files" not in __salt__:
error["Unsupported File Module"] = (
"The file module on this system does not "
"support looking up open files on the system"
)
if error:
return {"Error": error}
ret = {}
open_files = __salt__["file.open_files"]()
execs = __salt__["service.execs"]()
for path in open_files:
ignore = False
for bad_dir in exclude:
if path.startswith(bad_dir):
ignore = True
if ignore:
continue
if not os.access(path, os.X_OK):
continue
for service in execs:
if path == execs[service]:
pkg = __salt__["pkg.owner"](path)
ret[service] = next(iter(pkg.values()))
return ret
def enabled_service_owners():
"""
Return which packages own each of the services that are currently enabled.
CLI Example:
.. code-block:: bash
salt myminion introspect.enabled_service_owners
"""
error = {}
if "pkg.owner" not in __salt__:
error["Unsupported Package Manager"] = (
"The module for the package manager on this system does not "
"support looking up which package(s) owns which file(s)"
)
if "service.show" not in __salt__:
error["Unsupported Service Manager"] = (
"The module for the service manager on this system does not "
"support showing descriptive service data"
)
if error:
return {"Error": error}
ret = {}
services = __salt__["service.get_enabled"]()
for service in services:
data = __salt__["service.show"](service)
if "ExecStart" not in data:
continue
start_cmd = data["ExecStart"]["path"]
pkg = __salt__["pkg.owner"](start_cmd)
ret[service] = next(iter(pkg.values()))
return ret
def METHOD_NAME(requires=True):
"""
Return running and enabled services in a highstate structure. By default
also returns package dependencies for those services, which means that
package definitions must be created outside this function. To drop the
package dependencies, set ``requires`` to False.
CLI Example:
.. code-block:: bash
salt myminion introspect.service_highstate
salt myminion introspect.service_highstate requires=False
"""
ret = {}
running = running_service_owners()
for service in running:
ret[service] = {"service": ["running"]}
if requires:
ret[service]["service"].append({"require": {"pkg": running[service]}})
enabled = enabled_service_owners()
for service in enabled:
if service in ret:
ret[service]["service"].append({"enabled": True})
else:
ret[service] = {"service": [{"enabled": True}]}
if requires:
exists = False
for item in ret[service]["service"]:
if isinstance(item, dict) and next(iter(item.keys())) == "require":
exists = True
if not exists:
ret[service]["service"].append({"require": {"pkg": enabled[service]}})
return ret |
5,319 | list disaster recovery config keys | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListDisasterRecoveryConfigKeysResult',
'AwaitableListDisasterRecoveryConfigKeysResult',
'list_disaster_recovery_config_keys',
'list_disaster_recovery_config_keys_output',
]
@pulumi.output_type
class ListDisasterRecoveryConfigKeysResult:
"""
Namespace/ServiceBus Connection String
"""
def __init__(__self__, alias_primary_connection_string=None, alias_secondary_connection_string=None, key_name=None, primary_connection_string=None, primary_key=None, secondary_connection_string=None, secondary_key=None):
if alias_primary_connection_string and not isinstance(alias_primary_connection_string, str):
raise TypeError("Expected argument 'alias_primary_connection_string' to be a str")
pulumi.set(__self__, "alias_primary_connection_string", alias_primary_connection_string)
if alias_secondary_connection_string and not isinstance(alias_secondary_connection_string, str):
raise TypeError("Expected argument 'alias_secondary_connection_string' to be a str")
pulumi.set(__self__, "alias_secondary_connection_string", alias_secondary_connection_string)
if key_name and not isinstance(key_name, str):
raise TypeError("Expected argument 'key_name' to be a str")
pulumi.set(__self__, "key_name", key_name)
if primary_connection_string and not isinstance(primary_connection_string, str):
raise TypeError("Expected argument 'primary_connection_string' to be a str")
pulumi.set(__self__, "primary_connection_string", primary_connection_string)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if secondary_connection_string and not isinstance(secondary_connection_string, str):
raise TypeError("Expected argument 'secondary_connection_string' to be a str")
pulumi.set(__self__, "secondary_connection_string", secondary_connection_string)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="aliasPrimaryConnectionString")
def alias_primary_connection_string(self) -> str:
"""
Primary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_primary_connection_string")
@property
@pulumi.getter(name="aliasSecondaryConnectionString")
def alias_secondary_connection_string(self) -> str:
"""
Secondary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_secondary_connection_string")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
A string that describes the authorization rule.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="primaryConnectionString")
def primary_connection_string(self) -> str:
"""
Primary connection string of the created namespace authorization rule.
"""
return pulumi.get(self, "primary_connection_string")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryConnectionString")
def secondary_connection_string(self) -> str:
"""
Secondary connection string of the created namespace authorization rule.
"""
return pulumi.get(self, "secondary_connection_string")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListDisasterRecoveryConfigKeysResult(ListDisasterRecoveryConfigKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDisasterRecoveryConfigKeysResult(
alias_primary_connection_string=self.alias_primary_connection_string,
alias_secondary_connection_string=self.alias_secondary_connection_string,
key_name=self.key_name,
primary_connection_string=self.primary_connection_string,
primary_key=self.primary_key,
secondary_connection_string=self.secondary_connection_string,
secondary_key=self.secondary_key)
def METHOD_NAME(alias: Optional[str] = None,
authorization_rule_name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDisasterRecoveryConfigKeysResult:
"""
Gets the primary and secondary connection strings for the namespace.
:param str alias: The Disaster Recovery configuration name
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
__args__ = dict()
__args__['alias'] = alias
__args__['authorizationRuleName'] = authorization_rule_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:servicebus/v20220101preview:listDisasterRecoveryConfigKeys', __args__, opts=opts, typ=ListDisasterRecoveryConfigKeysResult).value
return AwaitableListDisasterRecoveryConfigKeysResult(
alias_primary_connection_string=pulumi.get(__ret__, 'alias_primary_connection_string'),
alias_secondary_connection_string=pulumi.get(__ret__, 'alias_secondary_connection_string'),
key_name=pulumi.get(__ret__, 'key_name'),
primary_connection_string=pulumi.get(__ret__, 'primary_connection_string'),
primary_key=pulumi.get(__ret__, 'primary_key'),
secondary_connection_string=pulumi.get(__ret__, 'secondary_connection_string'),
secondary_key=pulumi.get(__ret__, 'secondary_key'))
@_utilities.lift_output_func(METHOD_NAME)
def list_disaster_recovery_config_keys_output(alias: Optional[pulumi.Input[str]] = None,
authorization_rule_name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListDisasterRecoveryConfigKeysResult]:
"""
Gets the primary and secondary connection strings for the namespace.
:param str alias: The Disaster Recovery configuration name
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
... |
5,320 | handler | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"servicebus namespace private-link-resource show",
)
class Show(AAZCommand):
"""List lists of resources that supports Privatelinks.
"""
_aaz_info = {
"version": "2022-10-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.servicebus/namespaces/{}/privatelinkresources", "2022-10-01-preview"],
]
}
def METHOD_NAME(self, command_args):
super().METHOD_NAME(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.namespace_name = AAZStrArg(
options=["--namespace-name"],
help="The namespace name",
required=True,
fmt=AAZStrArgFormat(
max_length=50,
min_length=6,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.PrivateLinkResourcesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class PrivateLinkResourcesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/privateLinkResources",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"namespaceName", self.ctx.args.namespace_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-10-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType()
properties = cls._schema_on_200.value.Element.properties
properties.group_id = AAZStrType(
serialized_name="groupId",
)
properties.required_members = AAZListType(
serialized_name="requiredMembers",
)
properties.required_zone_names = AAZListType(
serialized_name="requiredZoneNames",
)
required_members = cls._schema_on_200.value.Element.properties.required_members
required_members.Element = AAZStrType()
required_zone_names = cls._schema_on_200.value.Element.properties.required_zone_names
required_zone_names.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"] |
5,321 | set called callback | # This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mapproxy.compat.image import Image
from mapproxy.grid import tile_grid
from mapproxy.image import BlankImageSource
from mapproxy.image import ImageSource
from mapproxy.image.opts import ImageOptions
from mapproxy.layer import MapLayer, DefaultMapExtent
from mapproxy.service.base import Server
from mapproxy.service.tile import TileServer
from mapproxy.service.wms import WMSGroupLayer, WMSServer
from mapproxy.service.wmts import WMTSServer
from mapproxy.test.http import make_wsgi_env
from mapproxy.util.ext.odict import odict
class DummyLayer(MapLayer):
transparent = True
extent = DefaultMapExtent()
has_legend = False
queryable = False
def __init__(self, name):
MapLayer.__init__(self)
self.name = name
self.requested = False
self.queried = False
def get_map(self, query):
self.requested = True
def get_info(self, query):
self.queried = True
def map_layers_for_query(self, query):
return [(self.name, self)]
def info_layers_for_query(self, query):
return [(self.name, self)]
class DummyTileLayer(object):
def __init__(self, name):
self.requested = False
self.name = name
self.grid = tile_grid(900913)
def tile_bbox(self, request, use_profiles=False):
# this dummy code does not handle profiles and different tile origins!
return self.grid.tile_bbox(request.tile)
def render(self, tile_request, use_profiles=None, coverage=None, decorate_img=None):
self.requested = True
resp = BlankImageSource((256, 256), image_opts=ImageOptions(format='image/png'))
resp.timestamp = 0
return resp
class TestDecorateImg(object):
def setup(self):
# Base server
self.server = Server()
# WMS Server
root_layer = WMSGroupLayer(None, 'root layer', None, [DummyLayer('wms_cache')])
self.wms_server = WMSServer(
md={}, root_layer=root_layer, srs=['EPSG:4326'],
image_formats={'image/png': ImageOptions(format='image/png')}
)
# Tile Servers
layers = odict()
layers["wms_cache_EPSG900913"] = DummyTileLayer('wms_cache')
self.tile_server = TileServer(layers, {})
self.wmts_server = WMTSServer(layers, {})
# Common arguments
self.query_extent = ('EPSG:27700', (0, 0, 700000, 1300000))
def test_original_imagesource_returned_when_no_callback(self):
img_src1 = ImageSource(Image.new('RGBA', (100, 100)))
env = make_wsgi_env('', extra_environ={})
img_src2 = self.server.decorate_img(
img_src1, 'wms.map', ['layer1'],
env, self.query_extent
)
assert img_src1 == img_src2
def test_returns_imagesource(self):
img_src1 = ImageSource(Image.new('RGBA', (100, 100)))
env = make_wsgi_env('', extra_environ={})
img_src2 = self.server.decorate_img(
img_src1, 'wms.map', ['layer1'],
env, self.query_extent
)
assert isinstance(img_src2, ImageSource)
def METHOD_NAME(self, img_src, service, layers, **kw):
self.called = True
return img_src
def test_calls_callback(self):
img_src1 = ImageSource(Image.new('RGBA', (100, 100)))
self.called = False
env = make_wsgi_env('', extra_environ={'mapproxy.decorate_img': self.METHOD_NAME})
img_src2 = self.server.decorate_img(
img_src1, 'wms.map', ['layer1'],
env, self.query_extent
)
assert self.called == True
def return_new_imagesource_callback(self, img_src, service, layers, **kw):
new_img_src = ImageSource(Image.new('RGBA', (100, 100)))
self.new_img_src = new_img_src
return new_img_src
def test_returns_callbacks_return_value(self):
img_src1 = ImageSource(Image.new('RGBA', (100, 100)))
env = make_wsgi_env('', extra_environ={'mapproxy.decorate_img': self.return_new_imagesource_callback})
self.new_img_src = None
img_src2 = self.server.decorate_img(
img_src1, 'wms.map', ['layer1'],
env, self.query_extent
)
assert img_src2 == self.new_img_src
def test_wms_server(self):
''' Test that the decorate_img method is available on a WMSServer instance '''
img_src1 = ImageSource(Image.new('RGBA', (100, 100)))
self.called = False
env = make_wsgi_env('', extra_environ={'mapproxy.decorate_img': self.METHOD_NAME})
img_src2 = self.wms_server.decorate_img(
img_src1, 'wms.map', ['layer1'],
env, self.query_extent
)
assert self.called == True
def test_tile_server(self):
''' Test that the decorate_img method is available on a TileServer instance '''
img_src1 = ImageSource(Image.new('RGBA', (100, 100)))
self.called = False
env = make_wsgi_env('', extra_environ={'mapproxy.decorate_img': self.METHOD_NAME})
img_src2 = self.tile_server.decorate_img(
img_src1, 'tms', ['layer1'],
env, self.query_extent
)
assert self.called == True
def test_wmts_server(self):
''' Test that the decorate_img method is available on a WMTSServer instance '''
img_src1 = ImageSource(Image.new('RGBA', (100, 100)))
self.called = False
env = make_wsgi_env('', extra_environ={'mapproxy.decorate_img': self.METHOD_NAME})
img_src2 = self.wmts_server.decorate_img(
img_src1, 'wmts', ['layer1'],
env, self.query_extent
)
assert self.called == True
def test_args(self):
def callback(img_src, service, layers, environ, query_extent, **kw):
assert isinstance(img_src, ImageSource)
assert 'wms.map' == service
assert isinstance(layers, list)
assert isinstance(environ, dict)
assert len(query_extent) == 2
assert len(query_extent[1]) == 4
return img_src
img_src1 = ImageSource(Image.new('RGBA', (100, 100)))
env = make_wsgi_env('', extra_environ={'mapproxy.decorate_img': callback})
img_src2 = self.tile_server.decorate_img(
img_src1, 'wms.map', ['layer1'],
env, self.query_extent
) |
5,322 | add ignore pass case | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
import numpy as np
class TestMulOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP16,
DataLayoutType.NCHW,
thread=[1, 4])
arm_valid_places = [
Place(TargetType.ARM, PrecisionType.INT8, DataLayoutType.NCHW),
Place(TargetType.ARM, PrecisionType.FP32, DataLayoutType.NCHW)
]
self.enable_testing_on_place(places=arm_valid_places, thread=[1, 4])
self.enable_testing_on_place(
TargetType.X86,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
shape0 = draw(st.integers(min_value=1, max_value=32))
shape1 = draw(st.integers(min_value=1, max_value=32))
shape2 = draw(st.integers(min_value=1, max_value=16))
shape3 = draw(st.integers(min_value=1, max_value=16))
shape4 = draw(st.integers(min_value=1, max_value=16))
shape5 = draw(st.integers(min_value=1, max_value=16))
shape6 = shape4 * shape5
case = draw(st.sampled_from([1, 2, 3, 4]))
if case == 1:
x_num_col_dims = 2
y_num_col_dims = 1
shape3 = shape6
shape2 = shape3
X_shape = draw(
st.sampled_from([[shape0, shape5, shape2],
[shape0, shape5, shape4, shape5]]))
Y_shape = draw(
st.sampled_from([[shape3, shape4, shape5], [shape3, shape5]]))
if case == 2:
x_num_col_dims = 2
y_num_col_dims = 2
X_shape = draw(
st.sampled_from([[shape0, shape1, shape6],
[shape4, shape5, shape5, shape4]]))
Y_shape = draw(st.sampled_from([[shape4, shape5, shape5]]))
if case == 3:
x_num_col_dims = 1
y_num_col_dims = 1
X_shape = draw(st.sampled_from([[shape0, shape4, shape5]]))
Y_shape = draw(st.sampled_from([[shape6, shape5, shape5]]))
if case == 4:
x_num_col_dims = 3
y_num_col_dims = 2
X_shape = draw(
st.sampled_from([[shape0, shape3, shape4, shape6],
[shape4, shape6, shape0, shape5, shape4]]))
Y_shape = draw(st.sampled_from([[shape5, shape4, shape5]]))
force_fp32_output = draw(st.booleans())
mul_op = OpConfig(
type="mul",
inputs={"X": ["input_data_x"],
"Y": ["input_data_y"]},
outputs={"Out": ["output_data"]},
attrs={
"x_num_col_dims": x_num_col_dims,
"y_num_col_dims": y_num_col_dims,
"force_fp32_output": force_fp32_output
})
program_config = ProgramConfig(
ops=[mul_op],
weights={},
inputs={
"input_data_x": TensorConfig(shape=X_shape),
"input_data_y": TensorConfig(shape=Y_shape)
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["mul"], (1e-5, 1e-5)
def METHOD_NAME(self):
pass
def test(self, *args, **kwargs):
# int8 case run long time in max_examples=250
self.run_and_statis(quant=False, max_examples=100)
if __name__ == "__main__":
unittest.main(argv=['']) |
5,323 | validate time | from pysys.basetest import BaseTest
import time
import json
"""
Validate tedge-mapper-collectd messages that are published
on tedge/measurements
Given a configured system
When we start the tedge-mapper-collectd with sudo in the background
When we start tedge sub with sudo in the background
When we start two publishers to publish the simulated collectd messages
Publish the messages in 100ms interval
Wait for couple of seconds to publish couple of batch of messages
Then we kill tedge sub with sudo as it is running with a different user account
Then we validate the messages in the output of tedge sub,
"""
class MonitoringSmallInterval(BaseTest):
def setup(self):
self.js_msg = ""
self.time_cnt = 0
self.temp_cnt = 0
self.pres_cnt = 0
self.tedge = "/usr/bin/tedge"
self.sudo = "/usr/bin/sudo"
# stop collectd to avoid mixup of messages
collectd = self.startProcess(
command=self.sudo,
arguments=["systemctl", "stop", "collectd"],
stdouterr="collectd",
)
collectd_mapper = self.startProcess(
command=self.sudo,
arguments=["systemctl", "start", "tedge-mapper-collectd"],
stdouterr="collectd_mapper",
)
self.addCleanupFunction(self.monitoring_cleanup)
def execute(self):
sub = self.startProcess(
command=self.sudo,
arguments=[self.tedge, "mqtt", "sub", "--no-topic", "tedge/#"],
stdouterr="tedge_sub",
background=True,
)
# Wait for a small amount of time to give tedge sub time
# to initialize. This is a heuristic measure.
# Without an additional wait we observe failures in 1% of the test
# runs.
time.sleep(0.1)
for _ in range(10):
timestamp = time.time()
pub = self.startProcess(
command=self.sudo,
arguments=[
self.tedge,
"mqtt",
"pub",
"collectd/host/temperature/temp",
f"{timestamp}:25.5",
],
stdouterr="tedge_temp",
)
timestamp = time.time()
pub = self.startProcess(
command=self.sudo,
arguments=[
self.tedge,
"mqtt",
"pub",
"collectd/host/pressure/pres",
f"{timestamp}:500.5",
],
stdouterr="tedge_pres",
)
# publish every 100ms
time.sleep(0.1)
# wait for tedge-mapper-collectd to batch messages
time.sleep(1)
# Kill the subscriber process explicitly with sudo as PySys does
# not have the rights to do it
kill = self.startProcess(
command=self.sudo,
arguments=["killall", "tedge"],
stdouterr="kill_out",
)
def validate(self):
self.assertThat(
"collectd_msg_validation_result == expected_result",
collectd_msg_validation_result=self.validate_json(),
expected_result=True,
)
def validate_json(self):
f = open(self.output + "/tedge_sub.out", "r")
lines = f.readlines()
for line in lines:
self.log.info(line)
self.js_msg = json.loads(line)
if not self.METHOD_NAME():
reason = "time validation failed in message: " + str(line)
self.abort(False, reason)
if "temperature" in self.js_msg:
if not self.validate_temperature():
reason = "temperature stat validation failed in message: " + str(
line
)
self.abort(False, reason)
if "pressure" in self.js_msg:
if not self.validate_pressure():
reason = "pressure stat validation failed in message: " + str(line)
self.abort(False, reason)
if self.time_cnt >= 10 and self.temp_cnt == 10 and self.pres_cnt == 10:
return True
else:
return False
def METHOD_NAME(self):
if self.js_msg["time"]:
self.time_cnt += 1
return True
else:
return False
def validate_temperature(self):
if self.js_msg["temperature"]:
if "temp" in self.js_msg["temperature"]:
self.temp_cnt += 1
return True
else:
return False
else:
return False
def validate_pressure(self):
if self.js_msg["pressure"]:
if "pres" in self.js_msg["pressure"]:
self.pres_cnt += 1
return True
else:
return False
else:
return False
def monitoring_cleanup(self):
self.log.info("monitoring_cleanup")
collectd = self.startProcess(
command=self.sudo,
arguments=["systemctl", "stop", "tedge-mapper-collectd"],
stdouterr="collectd_mapper",
) |
5,324 | config options | from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration, ConanException
import functools
import os
required_conan_version = ">=1.43.0"
class MagnumIntegrationConan(ConanFile):
name = "magnum-integration"
description = "Integration libraries for the Magnum C++11/C++14 graphics engine"
license = "MIT"
topics = ("magnum", "graphics", "rendering", "3d", "2d", "opengl")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://magnum.graphics"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_bullet": [True, False],
"with_dart": [True, False],
"with_eigen": [True, False],
"with_glm": [True, False],
"with_imgui": [True, False],
"with_ovr": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_bullet": True,
"with_dart": False,
"with_eigen": True,
"with_glm": True,
"with_imgui": True,
"with_ovr": False,
}
exports_sources = "CMakeLists.txt"
generators = "cmake", "cmake_find_package"
short_paths = True
@property
def _source_subfolder(self):
return "source_subfolder"
def METHOD_NAME(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
self.requires("magnum/{}".format(self.version))
if self.options.with_bullet:
self.requires("bullet3/3.22a")
if self.options.with_eigen:
self.requires("eigen/3.4.0")
if self.options.with_glm:
self.requires("glm/0.9.9.8")
if self.options.with_imgui:
self.requires("imgui/1.87")
def validate(self):
if self.options.with_dart:
# FIXME: Add 'dart' requirement
raise ConanInvalidConfiguration("DART library is not available in ConanCenter (yet)")
if self.options.with_ovr:
# FIXME: Add 'ovr' requirement
raise ConanInvalidConfiguration("OVR library is not available in ConanCenter (yet)")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
@functools.lru_cache(1)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["BUILD_STATIC"] = not self.options.shared
cmake.definitions["BUILD_STATIC_PIC"] = self.options.get_safe("fPIC", True)
cmake.definitions["BUILD_TESTS"] = False
cmake.definitions["BUILD_GL_TESTS"] = False
cmake.definitions["WITH_BULLET"] = self.options.with_bullet
cmake.definitions["WITH_DART"] = self.options.with_dart
cmake.definitions["WITH_EIGEN"] = self.options.with_eigen
cmake.definitions["WITH_GLM"] = self.options.with_glm
cmake.definitions["WITH_IMGUI"] = self.options.with_imgui
cmake.definitions["WITH_OVR"] = self.options.with_ovr
cmake.configure()
return cmake
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
'set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/modules/" ${CMAKE_MODULE_PATH})',
"")
# Casing
tools.replace_in_file(os.path.join(self._source_subfolder, "src", "Magnum", "GlmIntegration", "CMakeLists.txt"),
"find_package(GLM REQUIRED)",
"find_package(glm REQUIRED)")
tools.replace_in_file(os.path.join(self._source_subfolder, "src", "Magnum", "GlmIntegration", "CMakeLists.txt"),
"GLM::GLM",
"glm::glm")
tools.replace_in_file(os.path.join(self._source_subfolder, "src", "Magnum", "ImGuiIntegration", "CMakeLists.txt"),
"find_package(ImGui REQUIRED Sources)",
"find_package(imgui REQUIRED Sources)")
tools.replace_in_file(os.path.join(self._source_subfolder, "src", "Magnum", "ImGuiIntegration", "CMakeLists.txt"),
"ImGui::ImGui",
"imgui::imgui")
tools.replace_in_file(os.path.join(self._source_subfolder, "src", "Magnum", "ImGuiIntegration", "CMakeLists.txt"),
"ImGui::Sources",
"")
def build(self):
self._patch_sources()
cm = self._configure_cmake()
cm.build()
def package(self):
cm = self._configure_cmake()
cm.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
self.copy("COPYING", src=self._source_subfolder, dst="licenses")
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "MagnumIntegration")
self.cpp_info.names["cmake_find_package"] = "MagnumIntegration"
self.cpp_info.names["cmake_find_package_multi"] = "MagnumIntegration"
lib_suffix = "-d" if self.settings.build_type == "Debug" else ""
if self.options.with_bullet:
self.cpp_info.components["bullet"].set_property("cmake_target_name", "MagnumIntegration::Bullet")
self.cpp_info.components["bullet"].names["cmake_find_package"] = "Bullet"
self.cpp_info.components["bullet"].names["cmake_find_package_multi"] = "Bullet"
self.cpp_info.components["bullet"].libs = ["MagnumBulletIntegration{}".format(lib_suffix)]
self.cpp_info.components["bullet"].requires = ["magnum::magnum_main", "magnum::gl", "magnum::shaders", "bullet3::bullet3"]
if self.options.with_dart:
raise ConanException("Recipe doesn't define this component 'dart'. Please contribute it")
if self.options.with_eigen:
self.cpp_info.components["eigen"].set_property("cmake_target_name", "MagnumIntegration::Eigen")
self.cpp_info.components["eigen"].names["cmake_find_package"] = "Eigen"
self.cpp_info.components["eigen"].names["cmake_find_package_multi"] = "Eigen"
self.cpp_info.components["eigen"].requires = ["magnum::magnum_main", "eigen::eigen"]
if self.options.with_glm:
self.cpp_info.components["glm"].set_property("cmake_target_name", "MagnumIntegration::Glm")
self.cpp_info.components["glm"].names["cmake_find_package"] = "Glm"
self.cpp_info.components["glm"].names["cmake_find_package_multi"] = "Glm"
self.cpp_info.components["glm"].libs = ["MagnumGlmIntegration{}".format(lib_suffix)]
self.cpp_info.components["glm"].requires = ["magnum::magnum_main", "glm::glm"]
if self.options.with_imgui:
self.cpp_info.components["imgui"].set_property("cmake_target_name", "MagnumIntegration::ImGui")
self.cpp_info.components["imgui"].names["cmake_find_package"] = "ImGui"
self.cpp_info.components["imgui"].names["cmake_find_package_multi"] = "ImGui"
self.cpp_info.components["imgui"].libs = ["MagnumImGuiIntegration{}".format(lib_suffix)]
self.cpp_info.components["imgui"].requires = ["magnum::magnum_main", "magnum::gl", "magnum::shaders", "imgui::imgui"]
if self.options.with_ovr:
raise ConanException("Recipe doesn't define this component 'ovr'. Please contribute it") |
5,325 | conv2d hwcn | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements, unused-argument
"""Schedule for conv2d_hwcn with auto fusion"""
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import SplitEntity
from .. import nn, tag
@autotvm.register_topi_compute("conv2d_hwcn.cuda")
def METHOD_NAME(cfg, data, kernel, strides, padding, dilation, out_dtype="float32"):
"""Compute conv2d with HWCN layout on CUDA"""
return nn.METHOD_NAME(data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_hwcn.cuda")
def schedule_conv2d_hwcn(cfg, outs):
"""Schedule for conv2d_hwcn and any element-wise operations.
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_hwcn in the format
of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d_hwcn.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
sch = te.create_schedule([x.op for x in outs])
def schedule(Apad, W, B):
"""Schedule conv2d_hwcn"""
sch[Apad].compute_inline()
AA = sch.cache_read(Apad, "shared", [B])
WW = sch.cache_read(W, "shared", [B])
AL = sch.cache_read(AA, "local", [B])
WL = sch.cache_read(WW, "local", [B])
if B.op in sch.outputs:
Out = B
BL = sch.cache_write(Out, "local")
else:
Out = sch.outputs[0].output(0)
sch[B].set_scope("local")
BL = B
hi, wi, fi, ni = sch[Out].op.axis
# Create tuning space
n_thread_cand = [1, 2, 4, 8, 16, 32]
vthread_cand = [1, 2, 4, 8]
cfg.define_split(
"tile_fi",
fi,
num_outputs=4,
filter=lambda x: (x.size[1] in vthread_cand and x.size[2] in n_thread_cand),
)
cfg.define_split(
"tile_ni",
ni,
num_outputs=4,
filter=lambda x: (x.size[1] in vthread_cand and x.size[2] in n_thread_cand),
)
if cfg.is_fallback:
cfg["tile_fi"] = SplitEntity([-1, 2, 8, 4])
cfg["tile_ni"] = SplitEntity([-1, 2, 8, 4])
# Scheduling
step = 8
bz = sch[Out].fuse(hi, wi)
by, tyz, ty, fi = cfg["tile_fi"].apply(sch, Out, fi)
bx, txz, tx, ni = cfg["tile_ni"].apply(sch, Out, ni)
sch[Out].reorder(bz, by, bx, tyz, txz, ty, tx, fi, ni)
sch[Out].bind(bz, te.thread_axis("blockIdx.z"))
sch[Out].bind(by, te.thread_axis("blockIdx.y"))
sch[Out].bind(bx, te.thread_axis("blockIdx.x"))
sch[Out].bind(tyz, te.thread_axis("vthread"))
sch[Out].bind(txz, te.thread_axis("vthread"))
sch[Out].bind(ty, te.thread_axis("threadIdx.y"))
sch[Out].bind(tx, te.thread_axis("threadIdx.x"))
# Schedule BL local write
sch[BL].compute_at(sch[Out], tx)
yi, xi, fi, ni = sch[BL].op.axis
ry, rx, rc = sch[BL].op.reduce_axis
rco, rci = sch[BL].split(rc, factor=step)
sch[BL].reorder(rco, ry, rx, rci, fi, ni)
fuse_index = sch[BL].fuse(ry, rx)
fuse_index = sch[BL].fuse(fuse_index, rco)
rx = fuse_index
sch[AA].compute_at(sch[BL], rx)
sch[WW].compute_at(sch[BL], rx)
sch[AL].compute_at(sch[BL], rci)
sch[WL].compute_at(sch[BL], rci)
# Schedule for A's shared memory load
yi, xi, ci, ni = sch[AA].op.axis
ty, ci = sch[AA].split(ci, nparts=cfg["tile_fi"].size[2])
tx, ni = sch[AA].split(ni, nparts=cfg["tile_ni"].size[2])
_, ni = sch[AA].split(ni, factor=4)
sch[AA].reorder(ty, tx, yi, xi, ci, ni)
sch[AA].bind(ty, te.thread_axis("threadIdx.y"))
sch[AA].bind(tx, te.thread_axis("threadIdx.x"))
sch[AA].vectorize(ni)
# Schedule for W's shared memory load
yi, xi, ci, fi = sch[WW].op.axis
ty, ci = sch[WW].split(ci, nparts=cfg["tile_fi"].size[2])
tx, fi = sch[WW].split(fi, nparts=cfg["tile_ni"].size[2])
_, fi = sch[WW].split(fi, factor=4)
sch[WW].reorder(ty, tx, yi, xi, ci, fi)
sch[WW].bind(ty, te.thread_axis("threadIdx.y"))
sch[WW].bind(tx, te.thread_axis("threadIdx.x"))
sch[WW].vectorize(fi)
scheduled_ops = []
def traverse(operator):
"""Traverse operators from computation graph"""
if tag.is_broadcast(operator.tag):
if operator not in sch.outputs:
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
elif operator.tag == "conv2d_hwcn":
Apad = operator.input_tensors[0]
W = operator.input_tensors[1]
if isinstance(W.op, tvm.te.ComputeOp) and "dilate" in W.op.tag:
sch[W].compute_inline()
B = operator.output(0)
schedule(Apad, W, B)
else:
raise RuntimeError(f"Unsupported operator: {operator.tag}")
scheduled_ops.append(operator)
traverse(outs[0].op)
return sch |
5,326 | ipv6 to int | #########################################################################
#
# Utils in this file:
# ip_to_int(ipstr)
# int_to_ip(n)
# incr_ipv4(ipaddr, mask=32, step=1)
# range_ipv4(start_ip, count, mask=32)
# network(ipaddr, mask=24)
# ipv6_to_int(ipv6_addr)
# int_to_ipv6(i)
# incr_ipv6(ipaddr, mask=128, step=1)
# range_ipv6(start_ip, count, mask=128)
# variance(expected, actual, tolerance)
# expand_range(list)
#
#########################################################################
import re
import struct
import socket
import binascii
def ip_to_int(ipstr):
return struct.unpack('!I', socket.inet_aton(ipstr))[0]
def int_to_ip(n):
return socket.inet_ntoa(struct.pack('!I', n))
def incr_ipv4(ipaddr, mask=32, step=1):
# To separate the mask if provided with ip.
ipaddr, save_mask = [ipaddr, ''] if ipaddr.find('/') == -1 else ipaddr.split('/')
ip_int = ip_to_int(ipaddr)
# Saving the diff value.
ip_int_old = ip_int
ip_int >>= 32 - mask
ip_int <<= 32 - mask
ip_diff = ip_int_old - ip_int
# Actual logic.
ip_int >>= 32 - mask
ip_int += step
ip_int <<= 32 - mask
ip_int += ip_diff
ipaddr = int_to_ip(ip_int)
ipaddr = '/'.join([ipaddr, save_mask]) if save_mask != '' else ipaddr
return ipaddr
def range_ipv4(start_ip, count, mask=32):
ip_list = []
count = int(count)
mask = int(mask)
for _ in range(count):
ip_list.append(start_ip)
start_ip = incr_ipv4(start_ip, mask)
return ip_list
def network(ipaddr, mask=24):
ip_int = ip_to_int(ipaddr)
ip_int >>= 32 - mask
ip_int <<= 32 - mask
return int_to_ip(ip_int)
def METHOD_NAME(ipv6_addr):
return int(binascii.hexlify(socket.inet_pton(socket.AF_INET6, ipv6_addr)), 16)
def int_to_ipv6(i):
return socket.inet_ntop(socket.AF_INET6, binascii.unhexlify(hex(i)[2:].replace('L', '')))
def incr_ipv6(ipaddr, mask=128, step=1):
# To separate the mask if provided with ipv6.
ipaddr, save_mask = [ipaddr, ''] if ipaddr.find('/') == -1 else ipaddr.split('/')
ip_int = METHOD_NAME(ipaddr)
# Saving the diff value.
ip_int_old = ip_int
ip_int >>= 128 - mask
ip_int <<= 128 - mask
ip_diff = ip_int_old - ip_int
# Actual logic.
ip_int >>= 128 - mask
ip_int += step
ip_int <<= 128 - mask
ip_int += ip_diff
ipaddr = int_to_ipv6(ip_int)
ipaddr = '/'.join([ipaddr, save_mask]) if save_mask != '' else ipaddr
return ipaddr
def range_ipv6(start_ip, count, mask=128):
ip_list = []
count = int(count)
mask = int(mask)
for _ in range(count):
ip_list.append(start_ip)
start_ip = incr_ipv6(start_ip, mask)
return ip_list
def variance(expected, actual, tolerance):
max_diff = round(float(expected) * tolerance / 100)
if max_diff == 0 and tolerance != 0:
max_diff = 1
low = expected - max_diff
high = expected + max_diff
if high >= actual >= low:
return 0
else:
return -1
def expand_range(list):
VlanVelist = []
for i in list:
if "-" in str(i):
lv = i.split('-')
lv = [str(i) for i in range(int(lv[0]), int(lv[1]) + 1)]
VlanVelist = VlanVelist + lv
else:
VlanVelist.append(str(i))
return VlanVelist
def range_ip(start_ip, count, mask=32):
# start_ip: format can be '1.1.1.1', '1.1.1.1/24', '2000::1', '2000::1/64'.
# It automatically detects ipv4 or ipv6.
# return format ['1.1.1.1', '1.1.1.2'], ['1.1.1.1/24', '1.1.1.2/24']
res = re.search(r'.*:.*', start_ip)
if res is None:
ip_list = range_ipv4(start_ip, count, mask)
else:
ip_list = range_ipv6(start_ip, count, mask)
return ip_list
def mcast_ip2mac(mcast_ip):
mcast_mac = '01:00:5E:'
octets = mcast_ip.split('.')
second_oct = int(octets[1]) & 127
third_oct = int(octets[2])
fourth_oct = int(octets[3])
mcast_mac = mcast_mac + format(second_oct, '02x') + ':' + format(third_oct, '02x') + ':' + format(fourth_oct, '02x')
return mcast_mac |
5,327 | test reset device 192 | # This file is part of the Trezor project.
#
# Copyright (C) 2012-2019 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import pytest
from mnemonic import Mnemonic
from trezorlib import device, messages
from trezorlib.debuglink import TrezorClientDebugLink as Client
from trezorlib.exceptions import TrezorFailure
from ...common import EXTERNAL_ENTROPY, MNEMONIC12, WITH_MOCK_URANDOM, generate_entropy
from ...input_flows import (
InputFlowBip39ResetBackup,
InputFlowBip39ResetFailedCheck,
InputFlowBip39ResetPIN,
)
pytestmark = [pytest.mark.skip_t1]
def reset_device(client: Client, strength: int):
with WITH_MOCK_URANDOM, client:
IF = InputFlowBip39ResetBackup(client)
client.set_input_flow(IF.get())
# No PIN, no passphrase, don't display random
device.reset(
client,
display_random=False,
strength=strength,
passphrase_protection=False,
pin_protection=False,
label="test",
language="en-US",
)
# generate mnemonic locally
internal_entropy = client.debug.state().reset_entropy
entropy = generate_entropy(strength, internal_entropy, EXTERNAL_ENTROPY)
expected_mnemonic = Mnemonic("english").to_mnemonic(entropy)
# Compare that device generated proper mnemonic for given entropies
assert IF.mnemonic == expected_mnemonic
# Check if device is properly initialized
resp = client.call_raw(messages.Initialize())
assert resp.initialized is True
assert resp.needs_backup is False
assert resp.pin_protection is False
assert resp.passphrase_protection is False
assert resp.backup_type is messages.BackupType.Bip39
# backup attempt fails because backup was done in reset
with pytest.raises(TrezorFailure, match="ProcessError: Seed already backed up"):
device.backup(client)
@pytest.mark.setup_client(uninitialized=True)
def test_reset_device(client: Client):
reset_device(client, 128) # 12 words
@pytest.mark.setup_client(uninitialized=True)
def METHOD_NAME(client: Client):
reset_device(client, 192) # 18 words
@pytest.mark.setup_client(uninitialized=True)
def test_reset_device_pin(client: Client):
strength = 256 # 24 words
with WITH_MOCK_URANDOM, client:
IF = InputFlowBip39ResetPIN(client)
client.set_input_flow(IF.get())
# PIN, passphrase, display random
device.reset(
client,
display_random=True,
strength=strength,
passphrase_protection=True,
pin_protection=True,
label="test",
language="en-US",
)
# generate mnemonic locally
internal_entropy = client.debug.state().reset_entropy
entropy = generate_entropy(strength, internal_entropy, EXTERNAL_ENTROPY)
expected_mnemonic = Mnemonic("english").to_mnemonic(entropy)
# Compare that device generated proper mnemonic for given entropies
assert IF.mnemonic == expected_mnemonic
# Check if device is properly initialized
resp = client.call_raw(messages.Initialize())
assert resp.initialized is True
assert resp.needs_backup is False
assert resp.pin_protection is True
assert resp.passphrase_protection is True
@pytest.mark.setup_client(uninitialized=True)
def test_reset_failed_check(client: Client):
strength = 256 # 24 words
with WITH_MOCK_URANDOM, client:
IF = InputFlowBip39ResetFailedCheck(client)
client.set_input_flow(IF.get())
# PIN, passphrase, display random
device.reset(
client,
display_random=False,
strength=strength,
passphrase_protection=False,
pin_protection=False,
label="test",
language="en-US",
)
# generate mnemonic locally
internal_entropy = client.debug.state().reset_entropy
entropy = generate_entropy(strength, internal_entropy, EXTERNAL_ENTROPY)
expected_mnemonic = Mnemonic("english").to_mnemonic(entropy)
# Compare that device generated proper mnemonic for given entropies
assert IF.mnemonic == expected_mnemonic
# Check if device is properly initialized
resp = client.call_raw(messages.Initialize())
assert resp.initialized is True
assert resp.needs_backup is False
assert resp.pin_protection is False
assert resp.passphrase_protection is False
assert resp.backup_type is messages.BackupType.Bip39
@pytest.mark.setup_client(uninitialized=True)
def test_failed_pin(client: Client):
strength = 128
ret = client.call_raw(
messages.ResetDevice(strength=strength, pin_protection=True, label="test")
)
# Confirm Reset
assert isinstance(ret, messages.ButtonRequest)
client.debug.press_yes()
ret = client.call_raw(messages.ButtonAck())
# Enter PIN for first time
assert isinstance(ret, messages.ButtonRequest)
client.debug.input("654")
ret = client.call_raw(messages.ButtonAck())
# Re-enter PIN for TR
if client.debug.model == "R":
assert isinstance(ret, messages.ButtonRequest)
client.debug.press_yes()
ret = client.call_raw(messages.ButtonAck())
# Enter PIN for second time
assert isinstance(ret, messages.ButtonRequest)
client.debug.input("456")
ret = client.call_raw(messages.ButtonAck())
# PIN mismatch
assert isinstance(ret, messages.ButtonRequest)
client.debug.press_yes()
ret = client.call_raw(messages.ButtonAck())
assert isinstance(ret, messages.ButtonRequest)
@pytest.mark.setup_client(mnemonic=MNEMONIC12)
def test_already_initialized(client: Client):
with pytest.raises(Exception):
device.reset(client, False, 128, True, True, "label", "en-US") |
5,328 | add unpack method | import types
import typing
from _typeshed import Incomplete
from collections.abc import Generator
from dataclasses import Field
from mashumaro.config import (
ADD_DIALECT_SUPPORT as ADD_DIALECT_SUPPORT,
ADD_SERIALIZATION_CONTEXT as ADD_SERIALIZATION_CONTEXT,
BaseConfig as BaseConfig,
SerializationStrategyValueType as SerializationStrategyValueType,
TO_DICT_ADD_BY_ALIAS_FLAG as TO_DICT_ADD_BY_ALIAS_FLAG,
TO_DICT_ADD_OMIT_NONE_FLAG as TO_DICT_ADD_OMIT_NONE_FLAG,
)
from mashumaro.core.const import Sentinel as Sentinel
from mashumaro.core.helpers import ConfigValue as ConfigValue
from mashumaro.core.meta.code.lines import CodeLines as CodeLines
from mashumaro.core.meta.helpers import (
get_args as get_args,
get_class_that_defines_field as get_class_that_defines_field,
get_class_that_defines_method as get_class_that_defines_method,
get_literal_values as get_literal_values,
get_name_error_name as get_name_error_name,
hash_type_args as hash_type_args,
is_class_var as is_class_var,
is_dataclass_dict_mixin as is_dataclass_dict_mixin,
is_dialect_subclass as is_dialect_subclass,
is_init_var as is_init_var,
is_literal as is_literal,
is_optional as is_optional,
is_type_var_any as is_type_var_any,
resolve_type_params as resolve_type_params,
substitute_type_params as substitute_type_params,
type_name as type_name,
)
from mashumaro.core.meta.types.common import FieldContext as FieldContext, ValueSpec as ValueSpec
from mashumaro.core.meta.types.pack import PackerRegistry as PackerRegistry
from mashumaro.core.meta.types.unpack import (
SubtypeUnpackerBuilder as SubtypeUnpackerBuilder,
UnpackerRegistry as UnpackerRegistry,
)
from mashumaro.dialect import Dialect as Dialect
from mashumaro.exceptions import (
BadDialect as BadDialect,
BadHookSignature as BadHookSignature,
InvalidFieldValue as InvalidFieldValue,
MissingDiscriminatorError as MissingDiscriminatorError,
MissingField as MissingField,
SuitableVariantNotFoundError as SuitableVariantNotFoundError,
ThirdPartyModuleNotFoundError as ThirdPartyModuleNotFoundError,
UnresolvedTypeReferenceError as UnresolvedTypeReferenceError,
UnserializableDataError as UnserializableDataError,
UnserializableField as UnserializableField,
UnsupportedDeserializationEngine as UnsupportedDeserializationEngine,
UnsupportedSerializationEngine as UnsupportedSerializationEngine,
)
from mashumaro.types import Discriminator as Discriminator
__PRE_SERIALIZE__: str
__PRE_DESERIALIZE__: str
__POST_SERIALIZE__: str
__POST_DESERIALIZE__: str
class CodeBuilder:
cls: Incomplete
lines: Incomplete
globals: Incomplete
resolved_type_params: Incomplete
field_classes: Incomplete
initial_type_args: Incomplete
dialect: Incomplete
default_dialect: Incomplete
allow_postponed_evaluation: Incomplete
format_name: Incomplete
decoder: Incomplete
encoder: Incomplete
encoder_kwargs: Incomplete
def __init__(
self,
cls: typing.Type,
type_args: typing.Tuple[typing.Type, ...] = ...,
dialect: typing.Optional[typing.Type[Dialect]] = ...,
first_method: str = ...,
allow_postponed_evaluation: bool = ...,
format_name: str = ...,
decoder: typing.Optional[typing.Any] = ...,
encoder: typing.Optional[typing.Any] = ...,
encoder_kwargs: typing.Optional[typing.Dict[str, typing.Any]] = ...,
default_dialect: typing.Optional[typing.Type[Dialect]] = ...,
) -> None: ...
def reset(self) -> None: ...
@property
def namespace(self) -> typing.Mapping[typing.Any, typing.Any]: ...
@property
def annotations(self) -> typing.Dict[str, typing.Any]: ...
def get_field_resolved_type_params(
self, field_name: str
) -> typing.Dict[typing.Type, typing.Type]: ...
def get_field_types(self, include_extras: bool = ...) -> typing.Dict[str, typing.Any]: ...
@property
def dataclass_fields(self) -> typing.Dict[str, Field]: ...
@property
def metadatas(self) -> typing.Dict[str, typing.Mapping[str, typing.Any]]: ...
def get_field_default(self, name: str) -> typing.Any: ...
def add_type_modules(self, *types_: typing.Type) -> None: ...
def ensure_module_imported(self, module: types.ModuleType) -> None: ...
def ensure_object_imported(
self, obj: typing.Any, name: typing.Optional[str] = ...
) -> None: ...
def add_line(self, line: str) -> None: ...
def indent(self, expr: typing.Optional[str] = ...) -> typing.Generator[None, None, None]: ...
def compile(self) -> None: ...
def get_declared_hook(self, method_name: str) -> typing.Any: ...
def METHOD_NAME(self) -> None: ...
def get_config(self, cls: Incomplete | None = ..., look_in_parents: bool = ...): ...
def get_discriminator(self) -> typing.Optional[Discriminator]: ...
def get_pack_method_flags(
self, cls: typing.Optional[typing.Type] = ..., pass_encoder: bool = ...
) -> str: ...
def get_unpack_method_flags(
self, cls: typing.Optional[typing.Type] = ..., pass_decoder: bool = ...
) -> str: ...
def get_pack_method_default_flag_values(
self, cls: typing.Optional[typing.Type] = ..., pass_encoder: bool = ...
) -> str: ...
def get_unpack_method_default_flag_values(self, pass_decoder: bool = ...) -> str: ...
def is_code_generation_option_enabled(
self, option: str, cls: typing.Optional[typing.Type] = ...
) -> bool: ...
@classmethod
def get_unpack_method_name(
cls,
type_args: typing.Iterable = ...,
format_name: str = ...,
decoder: typing.Optional[typing.Any] = ...,
) -> str: ...
@classmethod
def get_pack_method_name(
cls,
type_args: typing.Tuple[typing.Type, ...] = ...,
format_name: str = ...,
encoder: typing.Optional[typing.Any] = ...,
) -> str: ...
def add_pack_method(self) -> None: ...
def iter_serialization_strategies(
self, metadata, ftype
) -> Generator[Incomplete, None, None]: ... |
5,329 | deserialize | import array
import datetime
from decimal import Decimal
from io import BytesIO
from uuid import uuid4
import fastavro
from .conftest import assert_naive_datetime_equal_to_tz_datetime
schema = {
"fields": [
{
"name": "union_uuid",
"type": ["null", {"type": "string", "logicalType": "uuid"}],
},
{"name": "array_string", "type": {"type": "array", "items": "string"}},
{
"name": "multi_union_time",
"type": [
"null",
"string",
{"type": "long", "logicalType": "timestamp-micros"},
],
},
{
"name": "array_bytes_decimal",
"type": [
"null",
{
"type": "array",
"items": {
"type": "bytes",
"logicalType": "decimal",
"precision": 18,
"scale": 6,
},
},
],
},
{
"name": "array_fixed_decimal",
"type": [
"null",
{
"type": "array",
"items": {
"type": "fixed",
"name": "FixedDecimal",
"size": 8,
"logicalType": "decimal",
"precision": 18,
"scale": 6,
},
},
],
},
{
"name": "array_record",
"type": {
"type": "array",
"items": {
"type": "record",
"name": "some_record",
"fields": [
{"name": "f1", "type": "string"},
{
"name": "f2",
"type": {
"type": "bytes",
"logicalType": "decimal",
"precision": 18,
"scale": 6,
},
},
],
},
},
},
],
"namespace": "namespace",
"name": "name",
"type": "record",
}
def serialize(schema, data):
bytes_writer = BytesIO()
fastavro.schemaless_writer(bytes_writer, schema, data)
return bytes_writer.getvalue()
def METHOD_NAME(schema, binary):
bytes_writer = BytesIO()
bytes_writer.write(binary)
bytes_writer.seek(0)
res = fastavro.schemaless_reader(bytes_writer, schema)
return res
def test_complex_schema():
data1 = {
"union_uuid": uuid4(),
"array_string": ["a", "b", "c"],
"multi_union_time": datetime.datetime.now(),
"array_bytes_decimal": [Decimal("123.456")],
"array_fixed_decimal": [Decimal("123.456")],
"array_record": [{"f1": "1", "f2": Decimal("123.456")}],
}
binary = serialize(schema, data1)
data2 = METHOD_NAME(schema, binary)
assert len(data1) == len(data2)
for field in [
"array_string",
"array_bytes_decimal",
"array_fixed_decimal",
"array_record",
]:
assert data1[field] == data2[field]
assert_naive_datetime_equal_to_tz_datetime(
data1["multi_union_time"], data2["multi_union_time"]
)
def test_complex_schema_nulls():
data1 = {
"array_string": ["a", "b", "c"],
"array_record": [{"f1": "1", "f2": Decimal("123.456")}],
}
binary = serialize(schema, data1)
data2 = METHOD_NAME(schema, binary)
data1_compare = data1
data1_compare.update(
{
"multi_union_time": None,
"array_bytes_decimal": None,
"array_fixed_decimal": None,
"union_uuid": None,
}
)
assert data1_compare == data2
def test_array_from_tuple():
data_list = serialize({"type": "array", "items": "int"}, [1, 2, 3])
data_tuple = serialize({"type": "array", "items": "int"}, (1, 2, 3))
assert data_list == data_tuple
def test_array_from_array():
schema = {"type": "array", "items": "int"}
using_list = serialize(schema, [1, -2, 3])
using_array = serialize(schema, array.array("l", [1, -2, 3]))
assert using_list == using_array
assert METHOD_NAME(schema, using_list) == [1, -2, 3]
def test_bytes_from_bytearray():
schema = {"type": "bytes"}
using_bytes = serialize(schema, b"\x00\xf1\x02")
using_bytearray = serialize(schema, bytearray(b"\x00\xf1\x02"))
assert using_bytes == using_bytearray
assert METHOD_NAME(schema, using_bytes) == b"\x00\xf1\x02" |
5,330 | test new pr |
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
from ci.tests import SeleniumTester, utils
from selenium.webdriver.common.by import By
from ci import models
from django.urls import reverse
from django.test import override_settings
@override_settings(INSTALLED_GITSERVERS=[utils.github_config()])
class Tests(SeleniumTester.SeleniumTester):
@SeleniumTester.test_drivers()
def test_nothing(self):
utils.create_git_server()
self.get()
self.assertEqual(self.selenium.title, "Civet Home")
self.check_repos()
self.check_events()
@SeleniumTester.test_drivers()
def test_repo_update_all(self):
repo, branch = self.create_repo_with_prs()
self.get()
self.check_repos()
self.check_events()
self.wait_for_js()
branch.status = models.JobStatus.SUCCESS
branch.save()
for pr in repo.pull_requests.all():
pr.status = models.JobStatus.SUCCESS
pr.title = "New title"
pr.username = "foobar"
pr.save()
self.wait_for_js()
self.check_js_error()
self.check_repos()
self.check_events()
@SeleniumTester.test_drivers()
def test_repo_update_branch(self):
repo, branch = self.create_repo_with_prs()
self.get()
self.check_repos()
self.check_events()
# need to sleep so that last_modified will trigger
self.wait_for_js()
branch.status = models.JobStatus.SUCCESS
branch.save()
self.wait_for_js()
self.check_js_error()
self.check_repos()
self.check_events()
@SeleniumTester.test_drivers()
def test_repo_update_pr(self):
repo, branch = self.create_repo_with_prs()
self.get()
self.check_repos()
self.check_events()
self.wait_for_js()
pr = repo.pull_requests.last()
pr.status = models.JobStatus.SUCCESS
pr.title = "New title"
pr.username = "foobar"
pr.save()
self.wait_for_js()
self.check_js_error()
self.check_repos()
self.check_events()
@SeleniumTester.test_drivers()
@override_settings(PERMISSION_CACHE_TIMEOUT=0)
def test_new_repo(self):
repo, branch = self.create_repo_with_prs()
self.get()
self.check_repos()
self.check_events()
self.wait_for_js()
repo2, branch2 = self.create_repo_with_prs(name="repo2")
self.wait_for_js()
self.check_js_error()
self.check_repos()
self.check_events()
@SeleniumTester.test_drivers()
def test_new_branch(self):
repo, branch = self.create_repo_with_prs()
self.get()
self.check_repos()
self.check_events()
self.wait_for_js()
branch2 = utils.create_branch(name="branch2", repo=repo)
branch2.status = models.JobStatus.SUCCESS
branch2.save()
self.wait_for_js()
self.check_js_error()
self.check_repos()
self.check_events()
@SeleniumTester.test_drivers()
def METHOD_NAME(self):
repo, branch = self.create_repo_with_prs()
self.get()
self.check_repos()
self.check_events()
pr = utils.create_pr(repo=repo, number=200)
pr.status = models.JobStatus.RUNNING
pr.save()
self.wait_for_js()
self.check_js_error()
self.check_repos()
self.check_events()
@SeleniumTester.test_drivers()
def test_close_pr(self):
repo, branch = self.create_repo_with_prs()
self.get()
self.check_repos()
self.check_events()
pr = repo.pull_requests.first()
pr.closed = True
pr.save()
self.wait_for_js()
self.check_js_error()
self.check_repos()
self.check_events()
@SeleniumTester.test_drivers()
def test_event_update(self):
ev = self.create_event_with_jobs()
self.get()
self.check_repos()
self.check_events()
ev.status = models.JobStatus.SUCCESS
ev.save()
for job in ev.jobs.all():
job.status = models.JobStatus.SUCCESS
job.failed_step = "Failed"
job.invalidated = True
job.save()
self.wait_for_js()
self.check_js_error()
self.check_repos()
self.check_events()
@SeleniumTester.test_drivers()
def test_new_event(self):
self.create_event_with_jobs()
self.get()
self.check_repos()
self.check_events()
# wait again to make sure new event has different timestamp
self.wait_for_js()
self.create_event_with_jobs(commit='4321')
self.wait_for_js()
self.check_js_error()
self.check_repos()
self.check_events()
@SeleniumTester.test_drivers()
def test_event_new_job(self):
self.create_event_with_jobs()
self.get()
self.check_repos()
self.check_events()
ev = models.Event.objects.first()
r2 = utils.create_recipe(name="r2")
ev.save() # to trigger the update
utils.create_job(event=ev, recipe=r2)
self.wait_for_js()
self.check_js_error()
self.check_repos()
self.check_events()
@SeleniumTester.test_drivers()
@override_settings(DEBUG=True)
@override_settings(PERMISSION_CACHE_TIMEOUT=0)
def test_repo_preferences(self):
repos = []
for i in range(3):
repo, branch = self.create_repo_with_prs(name="repo%s" % i)
self.create_event_with_jobs(user=repo.user, branch1=branch, branch2=branch)
repos.append(repo)
self.wait_for_js(wait=1)
# user not logged in
self.get()
self.check_repos()
self.check_events()
user = repos[0].user
start_session_url = reverse('ci:start_session', args=[user.pk])
self.get(start_session_url)
self.wait_for_js()
# user logged in, no repo prefs
self.get()
self.check_repos()
self.check_events()
for i in range(3):
user.preferred_repos.add(repos[i])
self.get()
if i == (len(repos)-1):
self.check_repos()
self.check_events()
else:
repo_list = self.selenium.find_elements(By.XPATH, "//ul[@id='repo_status']/li")
self.assertEqual(len(repo_list), user.preferred_repos.count())
with self.assertRaises(Exception):
self.check_repos()
with self.assertRaises(Exception):
self.check_events()
events = []
for repo in user.preferred_repos.all():
self.check_repo_status(repo)
for ev in models.Event.objects.filter(base__branch__repository=repo).all():
self.check_event_row(ev)
events.append(ev)
event_rows = self.selenium.find_elements(By.XPATH, "//table[@id='event_table']/tbody/tr")
self.assertEqual(len(event_rows), len(events))
self.get("/?default")
self.check_repos()
self.check_events() |
5,331 | normalize block | #!/usr/bin/env python3
"""
Copyright 2020 Hans-Kristian Arntzen for Valve Corporation
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
"""
"""
Ad-hoc script to display profiling data
"""
import sys
import os
import argparse
import collections
import struct
ProfileCase = collections.namedtuple('ProfileCase', 'name iterations ticks')
def is_valid_block(block):
if len(block) != 64:
return False
ticks = struct.unpack('=Q', block[0:8])[0]
iterations = struct.unpack('=Q', block[8:16])[0]
return ticks != 0 and iterations != 0 and block[16] != 0
def parse_block(block):
ticks = struct.unpack('=Q', block[0:8])[0]
iterations = struct.unpack('=Q', block[8:16])[0]
name = block[16:].split(b'\0', 1)[0].decode('ascii')
return ProfileCase(ticks = ticks, iterations = iterations, name = name)
def filter_name(name, allow):
if allow is None:
return True
ret = name in allow
return ret
def find_record_by_name(blocks, name):
for block in blocks:
if block.name == name:
return block
return None
def METHOD_NAME(block, iter):
return ProfileCase(name = block.name, iterations = block.iterations / iter, ticks = block.ticks / iter)
def per_iteration_normalize(block):
return ProfileCase(name = block.name, iterations = block.iterations, ticks = block.ticks / block.iterations)
def main():
parser = argparse.ArgumentParser(description = 'Script for parsing profiling data.')
parser.add_argument('--divider', type = str, help = 'Represent data in terms of count per divider. Divider is another counter name.')
parser.add_argument('--per-iteration', action = 'store_true', help = 'Represent ticks in terms of ticks / iteration. Cannot be used with --divider.')
parser.add_argument('--name', nargs = '+', type = str, help = 'Only display data for certain counters.')
parser.add_argument('--sort', type = str, default = 'none', help = 'Sorts input data according to "iterations" or "ticks".')
parser.add_argument('--delta', type = str, help = 'Subtract iterations and timing from other profile blob.')
parser.add_argument('profile', help = 'The profile binary blob.')
args = parser.parse_args()
if not args.profile:
raise AssertionError('Need profile folder.')
delta_map = {}
if args.delta is not None:
with open(args.delta, 'rb') as f:
for block in iter(lambda: f.read(64), b''):
if is_valid_block(block):
b = parse_block(block)
delta_map[b.name] = b
blocks = []
with open(args.profile, 'rb') as f:
for block in iter(lambda: f.read(64), b''):
if is_valid_block(block):
b = parse_block(block)
if b.name in delta_map:
d = delta_map[b.name]
b = ProfileCase(ticks = b.ticks - d.ticks,
iterations = b.iterations - d.iterations,
name = b.name)
if b.iterations < 0 or b.ticks < 0:
raise AssertionError('After subtracting, iterations or ticks became negative.')
if b.iterations > 0:
blocks.append(b)
if args.divider is not None:
if args.per_iteration:
raise AssertionError('Cannot use --per-iteration alongside --divider.')
divider_block = find_record_by_name(blocks, args.divider)
if divider_block is None:
raise AssertionError('Divider block: ' + args.divider + ' does not exist.')
print('Dividing other results by number of iterations of {}.'.format(args.divider))
blocks = [METHOD_NAME(block, divider_block.iterations) for block in blocks]
elif args.per_iteration:
blocks = [per_iteration_normalize(block) for block in blocks]
if args.sort == 'iterations':
blocks.sort(reverse = True, key = lambda a: a.iterations)
elif args.sort == 'ticks':
blocks.sort(reverse = True, key = lambda a: a.ticks)
elif args.sort != 'none':
raise AssertionError('Invalid argument for --sort.')
for block in blocks:
if filter_name(block.name, args.name):
print(block.name + ':')
if args.divider is not None:
print(' Normalized iterations (iterations per {}):'.format(args.divider), block.iterations)
else:
print(' Iterations:', block.iterations)
if args.divider is not None:
print(' Time spent per iteration of {}: {:.3f}'.format(args.divider, block.ticks / 1000.0), "Kcycles")
elif args.per_iteration:
print(' Time spent per iteration: {:.3f}'.format(block.ticks / 1000.0), "Kcycles")
else:
print(' Total time spent: {:.3f}'.format(block.ticks / 1000.0), "Kcycles")
if __name__ == '__main__':
main() |
5,332 | handle match | """
Progress Bar.
pymdownx.progressbar
Simple plugin to add support for progress bars
```
/* No label */
[==30%]
/* Label */
[==30% MyLabel]
/* works with attr_list inline style */
[==50/200 MyLabel]{: .additional-class }
```
New line is not required before the progress bar but suggested unless in a table.
Can take percentages and divisions.
Floats are okay. Numbers must be positive. This is an experimental extension.
Functionality is subject to change.
Minimum Recommended Styling
(but you could add gloss, candy striping, animation, or anything else):
```
.progress {
display: block;
width: 300px;
margin: 10px 0;
height: 24px;
border: 1px solid #ccc;
-webkit-border-radius: 3px;
-moz-border-radius: 3px;
border-radius: 3px;
background-color: #F8F8F8;
position: relative;
box-shadow: inset -1px 1px 3px rgba(0, 0, 0, .1);
}
.progress-label {
position: absolute;
text-align: center;
font-weight: bold;
width: 100%; margin: 0;
line-height: 24px;
color: #333;
-webkit-font-smoothing: antialiased !important;
white-space: nowrap;
overflow: hidden;
}
.progress-bar {
height: 24px;
float: left;
border-right: 1px solid #ccc;
-webkit-border-radius: 3px;
-moz-border-radius: 3px;
border-radius: 3px;
background-color: #34c2e3;
box-shadow: inset 0 1px 0px rgba(255, 255, 255, .5);
}
For Level Colors
.progress-100plus .progress-bar {
background-color: #1ee038;
}
.progress-80plus .progress-bar {
background-color: #86e01e;
}
.progress-60plus .progress-bar {
background-color: #f2d31b;
}
.progress-40plus .progress-bar {
background-color: #f2b01e;
}
.progress-20plus .progress-bar {
background-color: #f27011;
}
.progress-0plus .progress-bar {
background-color: #f63a0f;
}
```
MIT license.
Copyright (c) 2014 - 2017 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from markdown import Extension
from markdown.inlinepatterns import InlineProcessor, dequote
import xml.etree.ElementTree as etree
from markdown.extensions.attr_list import AttrListTreeprocessor
from . import util
RE_PROGRESS = r'''(?x)
\[={1,}\s* # Opening
(?:
(?P<percent>100(?:.0+)?|[1-9]?[0-9](?:\.\d+)?)% | # Percent
(?:(?P<frac_num>\d+(?:\.\d+)?)\s*/\s*(?P<frac_den>\d+(?:\.\d+)?)) # Fraction
)
(?P<title>\s+(?P<quote>['"]).*?(?P=quote))?\s* # Title
\] # Closing
(?P<attr_list>\{\:?([^\}]*)\})? # Optional attr list
'''
CLASS_LEVEL = "progress-%dplus"
class ProgressBarTreeProcessor(AttrListTreeprocessor):
"""Used for AttrList compatibility."""
def run(self, elem):
"""Inline check for attributes at start of tail."""
if elem.tail:
m = self.INLINE_RE.match(elem.tail)
if m:
self.assign_attrs(elem, m.group(1))
elem.tail = elem.tail[m.end():]
class ProgressBarPattern(InlineProcessor):
"""Pattern handler for the progress bars."""
def __init__(self, pattern, md):
"""Initialize."""
InlineProcessor.__init__(self, pattern, md)
def create_tag(self, width, label, add_classes, alist):
"""Create the tag."""
# Create list of all classes and remove duplicates
classes = list(
set(
["progress"] +
self.config.get('add_classes', '').split() +
add_classes
)
)
classes.sort()
el = etree.Element("div")
el.set('class', ' '.join(classes))
bar = etree.SubElement(el, 'div')
bar.set('class', "progress-bar")
bar.set('style', 'width:%s%%' % width)
p = etree.SubElement(bar, 'p')
p.set('class', 'progress-label')
p.text = label
if alist is not None:
el.tail = alist
if 'attr_list' in self.md.treeprocessors:
ProgressBarTreeProcessor(self.md).run(el)
return el
def METHOD_NAME(self, m, data):
"""Handle the match."""
label = ""
level_class = self.config.get('level_class', False)
increment = self.config.get('progress_increment', 20)
add_classes = []
alist = None
if m.group(5):
label = dequote(self.unescape(m.group('title').strip()))
if m.group('attr_list'):
alist = m.group('attr_list')
if m.group('percent'):
value = float(m.group('percent'))
else:
try:
num = float(m.group('frac_num'))
except Exception: # pragma: no cover
num = 0.0
try:
den = float(m.group('frac_den'))
except Exception: # pragma: no cover
den = 0.0
if den == 0.0:
value = 0.0
else:
value = (num / den) * 100.0
# We can never get a value < 0,
# but we must check for > 100.
if value > 100.0:
value = 100.0
# Round down to nearest increment step and include class if desired
if level_class:
add_classes.append(CLASS_LEVEL % int(value - (value % increment)))
return self.create_tag('%.2f' % value, label, add_classes, alist), m.start(0), m.end(0)
class ProgressBarExtension(Extension):
"""Add progress bar extension to Markdown class."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
'level_class': [
True,
"Include class that defines progress level - Default: True"
],
'progress_increment': [
20,
"Progress increment step - Default: 20"
],
'add_classes': [
'',
"Add additional classes to the progress tag for styling. "
"Classes are separated by spaces. - Default: None"
]
}
super(ProgressBarExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md):
"""Add the progress bar pattern handler."""
util.escape_chars(md, ['='])
progress = ProgressBarPattern(RE_PROGRESS, md)
progress.config = self.getConfigs()
md.inlinePatterns.register(progress, "progress-bar", 179)
def makeExtension(*args, **kwargs):
"""Return extension."""
return ProgressBarExtension(*args, **kwargs) |
5,333 | test ellipse | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2023, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from bokeh.models import GlyphRenderer, glyphs
from bokeh.plotting import figure
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_annular_wedge() -> None:
p = figure()
gr = p.annular_wedge()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.AnnularWedge)
def test_annulus() -> None:
p = figure()
gr = p.annulus()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Annulus)
def test_arc() -> None:
p = figure()
gr = p.arc()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Arc)
def test_bezier() -> None:
p = figure()
gr = p.bezier()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Bezier)
def test_circle() -> None:
p = figure()
gr = p.circle()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Circle)
def test_block() -> None:
p = figure()
gr = p.block()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Block)
def test_harea() -> None:
p = figure()
gr = p.harea()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.HArea)
def test_harea_step() -> None:
p = figure()
gr = p.step()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Step)
def test_hbar() -> None:
p = figure()
gr = p.hbar()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.HBar)
def test_hspan() -> None:
p = figure()
gr = p.hspan()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.HSpan)
def test_hstrip() -> None:
p = figure()
gr = p.hstrip()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.HStrip)
def METHOD_NAME() -> None:
p = figure()
gr = p.ellipse()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Ellipse)
def test_hex_tile() -> None:
p = figure()
gr = p.hex_tile()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.HexTile)
def test_image() -> None:
p = figure()
gr = p.image()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Image)
def test_image_rgba() -> None:
p = figure()
gr = p.image_rgba()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.ImageRGBA)
def test_image_stack() -> None:
p = figure()
gr = p.image_stack()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.ImageStack)
def test_image_url() -> None:
p = figure()
gr = p.image_url()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.ImageURL)
def test_line() -> None:
p = figure()
gr = p.line()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Line)
def test_multi_line() -> None:
p = figure()
gr = p.multi_line()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.MultiLine)
def test_multi_polygons() -> None:
p = figure()
gr = p.multi_polygons()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.MultiPolygons)
def test_patch() -> None:
p = figure()
gr = p.patch()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Patch)
def test_patches() -> None:
p = figure()
gr = p.patches()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Patches)
def test_quad() -> None:
p = figure()
gr = p.quad()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Quad)
def test_quadratic() -> None:
p = figure()
gr = p.quadratic()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Quadratic)
def test_ray() -> None:
p = figure()
gr = p.ray()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Ray)
def test_rect() -> None:
p = figure()
gr = p.rect()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Rect)
def test_step() -> None:
p = figure()
gr = p.step()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Step)
def test_segment() -> None:
p = figure()
gr = p.segment()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Segment)
def test_text() -> None:
p = figure()
gr = p.text()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Text)
def test_varea() -> None:
p = figure()
gr = p.varea()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.VArea)
def test_varea_step() -> None:
p = figure()
gr = p.varea_step()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.VAreaStep)
def test_vbar() -> None:
p = figure()
gr = p.vbar()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.VBar)
def test_vspan() -> None:
p = figure()
gr = p.vspan()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.VSpan)
def test_vstrip() -> None:
p = figure()
gr = p.vstrip()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.VStrip)
def test_wedge() -> None:
p = figure()
gr = p.wedge()
assert isinstance(gr, GlyphRenderer)
assert isinstance(gr.glyph, glyphs.Wedge)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#----------------------------------------------------------------------------- |
5,334 | test failing different mask ranks | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metrics_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.keras.utils import metrics_utils
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedSizeOpTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters([
{
'x_list': [1],
'y_list': [2]
},
{
'x_list': [1, 2],
'y_list': [2, 3]
},
{
'x_list': [1, 2, 4],
'y_list': [2, 3, 5]
},
{
'x_list': [[1, 2], [3, 4]],
'y_list': [[2, 3], [5, 6]]
},
])
def test_passing_dense_tensors(self, x_list, y_list):
x = constant_op.constant(x_list)
y = constant_op.constant(y_list)
[x,
y], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y])
x.shape.assert_is_compatible_with(y.shape)
@parameterized.parameters([
{
'x_list': [1],
},
{
'x_list': [1, 2],
},
{
'x_list': [1, 2, 4],
},
{
'x_list': [[1, 2], [3, 4]],
},
])
def test_passing_one_dense_tensor(self, x_list):
x = constant_op.constant(x_list)
[x], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x])
@parameterized.parameters([
{
'x_list': [1],
'y_list': [2]
},
{
'x_list': [1, 2],
'y_list': [2, 3]
},
{
'x_list': [1, 2, 4],
'y_list': [2, 3, 5]
},
{
'x_list': [[1, 2], [3, 4]],
'y_list': [[2, 3], [5, 6]]
},
{
'x_list': [[1, 2], [3, 4], [1]],
'y_list': [[2, 3], [5, 6], [3]]
},
{
'x_list': [[1, 2], [], [1]],
'y_list': [[2, 3], [], [3]]
},
])
def test_passing_both_ragged(self, x_list, y_list):
x = ragged_factory_ops.constant(x_list)
y = ragged_factory_ops.constant(y_list)
[x,
y], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y])
x.shape.assert_is_compatible_with(y.shape)
@parameterized.parameters([
{
'x_list': [1],
},
{
'x_list': [1, 2],
},
{
'x_list': [1, 2, 4],
},
{
'x_list': [[1, 2], [3, 4]],
},
{
'x_list': [[1, 2], [3, 4], [1]],
},
{
'x_list': [[1, 2], [], [1]],
},
])
def test_passing_one_ragged(self, x_list):
x = ragged_factory_ops.constant(x_list)
[x], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x])
@parameterized.parameters([
{
'x_list': [1],
'y_list': [2],
'mask_list': [0]
},
{
'x_list': [1, 2],
'y_list': [2, 3],
'mask_list': [0, 1]
},
{
'x_list': [1, 2, 4],
'y_list': [2, 3, 5],
'mask_list': [1, 1, 1]
},
{
'x_list': [[1, 2], [3, 4]],
'y_list': [[2, 3], [5, 6]],
'mask_list': [[1, 1], [0, 1]]
},
{
'x_list': [[1, 2], [3, 4], [1]],
'y_list': [[2, 3], [5, 6], [3]],
'mask_list': [[1, 1], [0, 0], [1]]
},
{
'x_list': [[1, 2], [], [1]],
'y_list': [[2, 3], [], [3]],
'mask_list': [[1, 1], [], [0]]
},
])
def test_passing_both_ragged_with_mask(self, x_list, y_list, mask_list):
x = ragged_factory_ops.constant(x_list)
y = ragged_factory_ops.constant(y_list)
mask = ragged_factory_ops.constant(mask_list)
[x, y], mask = \
metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y], mask)
x.shape.assert_is_compatible_with(y.shape)
y.shape.assert_is_compatible_with(mask.shape)
@parameterized.parameters([
{
'x_list': [1],
'mask_list': [0]
},
{
'x_list': [1, 2],
'mask_list': [0, 1]
},
{
'x_list': [1, 2, 4],
'mask_list': [1, 1, 1]
},
{
'x_list': [[1, 2], [3, 4]],
'mask_list': [[1, 1], [0, 1]]
},
{
'x_list': [[1, 2], [3, 4], [1]],
'mask_list': [[1, 1], [0, 0], [1]]
},
{
'x_list': [[1, 2], [], [1]],
'mask_list': [[1, 1], [], [0]]
},
])
def test_passing_one_ragged_with_mask(self, x_list, mask_list):
x = ragged_factory_ops.constant(x_list)
mask = ragged_factory_ops.constant(mask_list)
[x], mask = \
metrics_utils.ragged_assert_compatible_and_get_flat_values([x], mask)
x.shape.assert_is_compatible_with(mask.shape)
@parameterized.parameters([
{
'x_list': [[[1, 3]]],
'y_list': [[2, 3]]
},
])
def test_failing_different_ragged_and_dense_ranks(self, x_list, y_list):
x = ragged_factory_ops.constant(x_list)
y = ragged_factory_ops.constant(y_list)
with self.assertRaises(ValueError): # pylint: disable=g-error-prone-assert-raises
[x, y
], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y])
@parameterized.parameters([
{
'x_list': [[[1, 3]]],
'y_list': [[[2, 3]]],
'mask_list': [[0, 1]]
},
])
def METHOD_NAME(self, x_list, y_list, mask_list):
x = ragged_factory_ops.constant(x_list)
y = ragged_factory_ops.constant(y_list)
mask = ragged_factory_ops.constant(mask_list)
with self.assertRaises(ValueError): # pylint: disable=g-error-prone-assert-raises
[x, y
], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y],
mask)
# we do not support such cases that ragged_ranks are different but overall
# dimension shapes and sizes are identical due to adding too much performance
# overheads to the overall use cases.
def test_failing_different_ragged_ranks(self):
dt = constant_op.constant([[[1, 2]]])
# adding a ragged dimension
x = ragged_tensor.RaggedTensor.from_row_splits(dt, row_splits=[0, 1])
y = ragged_factory_ops.constant([[[[1, 2]]]])
with self.assertRaises(ValueError): # pylint: disable=g-error-prone-assert-raises
[x, y], _ = \
metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y])
if __name__ == '__main__':
googletest.main() |
5,335 | test element factory is an iterable | import gc
from collections.abc import Container, Iterable
import pytest
from gaphor.core import event_handler
from gaphor.core.modeling.event import (
ElementCreated,
ElementDeleted,
ModelFlushed,
ModelReady,
ServiceEvent,
)
from gaphor.core.modeling.presentation import Presentation
from gaphor.UML import Operation, Parameter
def METHOD_NAME(element_factory):
assert isinstance(element_factory, Iterable)
def test_element_factory_is_a_container(element_factory):
assert isinstance(element_factory, Container)
def test_create(element_factory):
element_factory.create(Parameter)
assert len(list(element_factory.values())) == 1
def test_create_is_idempotent(element_factory):
param = element_factory.create(Parameter)
new_param = element_factory.create_as(Parameter, param.id)
assert param is new_param
def test_create_is_idempotent_but_validates_type(element_factory):
param = element_factory.create(Parameter)
with pytest.raises(TypeError):
element_factory.create_as(Operation, param.id)
def test_should_not_create_presentation_elements(element_factory):
with pytest.raises(TypeError):
element_factory.create(Presentation)
def test_flush(element_factory):
p = element_factory.create(Parameter)
assert len(list(element_factory.values())) == 1
element_factory.flush()
del p
gc.collect()
assert not list(element_factory.values()), list(element_factory.values())
def test_without_application(element_factory):
element_factory.create(Parameter)
assert element_factory.size() == 1, element_factory.size()
element_factory.flush()
assert element_factory.size() == 0, element_factory.size()
p = element_factory.create(Parameter)
assert element_factory.size() == 1, element_factory.size()
p.unlink()
assert element_factory.size() == 0, element_factory.size()
def test_unlink(element_factory):
p = element_factory.create(Parameter)
assert len(list(element_factory.values())) == 1
p.unlink()
assert not list(element_factory.values()), list(element_factory.values())
p = element_factory.create(Parameter)
p.defaultValue = "l"
assert len(list(element_factory.values())) == 1
p.unlink()
del p
assert not list(element_factory.values()), list(element_factory.values())
# Event handlers are registered as persisting top level handlers, since no
# unsubscribe functionality is provided.
handled = False
events = []
last_event = None
@event_handler(ServiceEvent)
def handler(event):
global handled, events, last_event
handled = True
events.append(event)
last_event = event
def clear_events():
global handled, events, last_event
handled = False
events = []
last_event = None
@pytest.fixture(autouse=True)
def subscribe_handlers(event_manager):
event_manager.subscribe(handler)
clear_events()
yield None
clear_events()
def test_create_event(element_factory):
element_factory.create(Parameter)
assert isinstance(last_event, ElementCreated)
assert handled
def test_remove_event(element_factory):
p = element_factory.create(Parameter)
clear_events()
p.unlink()
assert isinstance(last_event, ElementDeleted)
def test_model_event(element_factory):
element_factory.model_ready()
assert isinstance(last_event, ModelReady)
def test_flush_event(element_factory):
element_factory.create(Parameter)
del events[:]
element_factory.flush()
assert len(events) == 1, events
assert isinstance(last_event, ModelFlushed)
def test_no_create_events_when_blocked(element_factory):
with element_factory.block_events():
element_factory.create(Parameter)
assert events == [], events
class TriggerUnlink:
def __init__(self, element):
self.element = element
class CheckModel:
def __init__(self, element):
self.element = element
def test_indirect_delete_of_element(event_manager, element_factory):
@event_handler(CheckModel)
def on_check_model(event):
assert event.element.model
assert event.element in element_factory
@event_handler(TriggerUnlink)
def on_trigger_unlink(event):
event_manager.handle(CheckModel(event.element))
event.element.unlink()
event_manager.subscribe(on_check_model)
event_manager.subscribe(on_trigger_unlink)
operation = element_factory.create(Operation)
event_manager.handle(TriggerUnlink(operation))
assert operation._model is None
assert operation not in element_factory |
5,336 | get | from flask import current_app, request
from flask_restplus import Resource, reqparse, fields, inputs
from app.extensions import api
from app.api.utils.access_decorators import requires_role_edit_permit, requires_role_view_all
from werkzeug.exceptions import BadRequest, NotFound, InternalServerError
from app.api.utils.resources_mixins import UserMixin
from app.api.utils.custom_reqparser import CustomReqparser
from app.api.mines.documents.models.mine_document import MineDocument
from app.api.now_applications.models.now_application_review import NOWApplicationReview
from app.api.now_applications.models.now_application_identity import NOWApplicationIdentity
from app.api.now_applications.models.now_application_document_xref import NOWApplicationDocumentXref
from app.api.now_applications.response_models import NOW_APPLICATION_REVIEW_MODEL
class NOWApplicationReviewListResource(Resource, UserMixin):
parser = CustomReqparser()
parser.add_argument(
'now_application_review_type_code', type=str, help='Type of Review', required=True)
parser.add_argument('now_application_document_type_code', type=str, help='Type of document')
parser.add_argument('response_date', type=inputs.datetime_from_iso8601, help='Date of Response')
parser.add_argument('referee_name', type=str, help='Name of Referee')
parser.add_argument('referral_number', type=str, help='referral number for E-Referral')
parser.add_argument('response_url', type=str, help='CRTS URL')
@api.doc(description='Add new Review to Now Application', params={})
@requires_role_edit_permit
@api.marshal_with(NOW_APPLICATION_REVIEW_MODEL, code=201)
def post(self, application_guid):
now_application = NOWApplicationIdentity.find_by_guid(application_guid)
if not now_application:
raise NotFound('No now_application found')
if not now_application.now_application_id:
raise BadRequest('Now Application not imported, call import endpoint first')
data = self.parser.parse_args()
new_review = NOWApplicationReview.create(now_application.now_application,
data['now_application_review_type_code'],
data.METHOD_NAME('response_date'),
data.METHOD_NAME('referee_name'),
data.METHOD_NAME('referral_number'),
data.METHOD_NAME('response_url'))
new_documents = request.json.METHOD_NAME('uploadedFiles', [])
if 'uploadedFiles' in request.json.keys():
del request.json['uploadedFiles']
for doc in new_documents:
new_mine_doc = MineDocument(
mine_guid=now_application.mine_guid,
document_manager_guid=doc[0],
document_name=doc[1])
new_now_mine_doc = NOWApplicationDocumentXref(
mine_document=new_mine_doc,
now_application_document_type_code=data['now_application_document_type_code'],
now_application_id=now_application.now_application.now_application_id,
)
new_review.documents.append(new_now_mine_doc)
new_review.save()
new_review.save()
return new_review, 201
@api.doc(description='Add new Review to Now Application', params={})
@requires_role_view_all
@api.marshal_with(NOW_APPLICATION_REVIEW_MODEL, envelope='records', code=201)
def METHOD_NAME(self, application_guid):
now_application = NOWApplicationIdentity.find_by_guid(application_guid)
if not now_application:
raise NotFound('No now_application found')
if not now_application.now_application_id:
raise BadRequest('Now Application not imported, call import endpoint first')
return now_application.now_application.reviews
class NOWApplicationReviewResource(Resource, UserMixin):
parser = CustomReqparser()
parser.add_argument(
'now_application_review_type_code', type=str, help='Type of Review', required=True)
parser.add_argument('now_application_document_type_code', type=str, help='Type of document')
parser.add_argument('response_date', type=inputs.datetime_from_iso8601, help='Date of Response')
parser.add_argument('referee_name', type=str, help='Name of Referee')
parser.add_argument('referral_number', type=str, help='referral number for E-Referral')
@api.doc(description='delete review from Now Application', params={})
@requires_role_edit_permit
@api.response(204, 'Successfully deleted.')
def delete(self, application_guid, now_application_review_id):
now_app_review = NOWApplicationReview.query.METHOD_NAME(now_application_review_id)
if not now_app_review or str(
now_app_review.now_application.now_application_guid) != application_guid:
raise NotFound('No now_application found')
if len(now_app_review.documents) > 0:
raise BadRequest('Cannot delete review with documents attached')
# for doc in now_app_review.documents:
# doc.delete()
now_app_review.delete()
return ('', 204)
@api.doc(description='Update Review to Now Application', params={})
@requires_role_edit_permit
@api.marshal_with(NOW_APPLICATION_REVIEW_MODEL, code=201)
def put(self, application_guid, now_application_review_id):
data = self.parser.parse_args()
now_app_review = NOWApplicationReview.query.METHOD_NAME(now_application_review_id)
if not now_app_review or str(
now_app_review.now_application.now_application_guid) != application_guid:
raise NotFound('No now_application found')
new_documents = request.json.METHOD_NAME('uploadedFiles', [])
if 'uploadedFiles' in request.json.keys():
del request.json['uploadedFiles']
now_app_review.deep_update_from_dict(request.json)
for doc in new_documents:
new_mine_doc = MineDocument(
mine_guid=now_app_review.now_application.mine_guid,
document_manager_guid=doc[0],
document_name=doc[1])
new_now_mine_doc = NOWApplicationDocumentXref(
mine_document=new_mine_doc,
now_application_document_type_code=data['now_application_document_type_code'],
now_application_id=now_app_review.now_application_id,
)
now_app_review.documents.append(new_now_mine_doc)
now_app_review.save()
return now_app_review, 200 |
5,337 | clear timings | """
Utility functions to keep performance information
"""
from __future__ import annotations
import collections
from typing import Dict
import pyglet
import time
# Evil globals
_timings: Dict = {}
_pyglets_dispatch_event = None
_frame_times: collections.deque = collections.deque()
_max_history: int = 100
__all__ = [
"print_timings",
"clear_timings",
"get_timings",
"enable_timings",
"disable_timings",
"get_fps",
"timings_enabled"
]
def _dispatch_event(self, *args):
"""
This function will be monkey-patched over Pyglet's dispatch event function.
"""
# Name of the dispatched event, like 'on_draw'
name = args[0]
# Start the clock, and keep the time if this is on_draw for FPS calcs.
start_time = time.perf_counter()
if name == 'on_draw':
_frame_times.append(start_time)
# Call Pyglet's dispatch event function
if _pyglets_dispatch_event is not None:
_pyglets_dispatch_event(self, *args)
# Stop the clock
end_time = time.perf_counter()
processing_time = end_time - start_time
# Get the historical list of timings, or create one if we don't have it.
if name in _timings:
data = _timings[name]
else:
data = collections.deque()
_timings[name] = data
# Add out time to the list
data.append(processing_time)
# Past out history limit? Pop off the first one on the list
if len(data) > _max_history:
data.popleft()
def print_timings():
"""
Print event handler statistics to stdout as a table.
Performance tracking must be enabled with
:func:`arcade.enable_timings` before calling this function.
See :ref:`performance_statistics_example` for an example of how to
use function.
The statistics consist of:
* how many times each registered event was called
* the average time for handling each type of event in seconds
The table looks something like:
.. code-block:: text
Event Count Average Time
-------------- ----- ------------
on_update 60 0.0000
on_mouse_enter 1 0.0000
on_mouse_motion 39 0.0000
on_expose 1 0.0000
on_draw 60 0.0020
"""
global _timings
print()
print("Event Count Average Time")
print("-------------- ----- ------------")
for index in _timings:
data = _timings[index]
call_count = len(data)
average_time = sum(data) / len(data)
print(f"{index:15}{call_count:5} {average_time:11.4f}")
def METHOD_NAME() -> None:
"""
Reset the count & average time for each event type to zero.
Performance tracking must be enabled with
:func:`arcade.enable_timings` before calling this function.
See :ref:`performance_statistics_example` for an example of how to
use function.
"""
global _timings
_timings = {}
def get_timings() -> Dict:
"""
Get a dict of the current dispatch event timings.
Performance tracking must be enabled with
:func:`arcade.enable_timings` before calling this function.
:return: A dict of event timing data, consisting of counts and
average handler duration.
"""
return _timings
def enable_timings(max_history: int = 100) -> None:
"""
Enable recording of performance information.
This function must be called before using any other performance
features, except for :func:`arcade.timings_enabled`, which can
be called at any time.
See :ref:`performance_statistics_example` for an example of how to
use function.
:param max_history: How many frames to keep performance info for.
"""
global _pyglets_dispatch_event, _max_history
if pyglet.window.BaseWindow.dispatch_event == _dispatch_event:
raise ValueError("Timings already enabled.")
# Save the original pyglet dispatch event function
_pyglets_dispatch_event = pyglet.window.BaseWindow.dispatch_event
# Override the pyglet dispatch event function
pyglet.window.BaseWindow.dispatch_event = _dispatch_event
_max_history = max_history
def disable_timings() -> None:
"""
Disable collection of timing information.
Performance tracking must be enabled with
:func:`arcade.enable_timings` before calling this function.
"""
if pyglet.window.BaseWindow.dispatch_event != _dispatch_event:
raise ValueError("Timings are not enabled.")
# Restore the original pyglet dispatch event function
pyglet.window.BaseWindow.dispatch_event = _pyglets_dispatch_event # type: ignore
METHOD_NAME()
def get_fps(frame_count: int = 60) -> float:
"""
Get the FPS over the last ``frame_count`` frames.
Performance tracking must be enabled with
:func:`arcade.enable_timings` before calling this function.
To get the FPS over the last 30 frames, you would pass 30 instead
of the default 60.
See :ref:`performance_statistics_example` for an example of how to
use function.
:param frame_count: How many frames to calculate the FPS over.
"""
cur_time = time.perf_counter()
if len(_frame_times) == 0:
return 0
if len(_frame_times) < frame_count:
frame_count = len(_frame_times)
start_time = _frame_times[-frame_count]
total_time = cur_time - start_time
fps = frame_count / total_time
return fps
def timings_enabled() -> bool:
"""
Return true if timings are enabled, false otherwise.
This function can be used at any time to check if timings are
enabled. See :func:`arcade.enable_timings` for more information.
:return: Whether timings are currently enabled.
"""
return pyglet.window.BaseWindow.dispatch_event == _dispatch_event |
5,338 | stamp | # Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os.path
import pathlib
import typing
import unittest.mock
import alembic
import alembic.config
import pytest
import mlrun.api.utils.db.alembic
from mlrun import mlconf
class Constants(object):
revision_history = ["revision2", "revision1"]
initial_revision = "revision1"
latest_revision = "revision2"
unknown_revision = "revision3"
def test_no_database_exists(mock_alembic, mock_database):
mock_database(db_file_exists=False)
alembic_util = mlrun.api.utils.db.alembic.AlembicUtil(pathlib.Path(""))
alembic_util.init_alembic()
assert mock_alembic.stamp_calls == []
assert mock_alembic.upgrade_calls == ["head"]
def test_database_exists_no_revision(mock_alembic, mock_database):
mock_database()
alembic_util = mlrun.api.utils.db.alembic.AlembicUtil(pathlib.Path(""))
alembic_util.init_alembic()
assert mock_alembic.upgrade_calls == ["head"]
def test_database_exists_known_revision(mock_alembic, mock_database):
mock_database(current_revision=Constants.initial_revision)
alembic_util = mlrun.api.utils.db.alembic.AlembicUtil(pathlib.Path(""))
alembic_util.init_alembic()
assert mock_alembic.stamp_calls == []
assert mock_alembic.upgrade_calls == ["head"]
def test_database_exists_unknown(mock_alembic, mock_database):
mock_database(current_revision=Constants.unknown_revision)
alembic_util = mlrun.api.utils.db.alembic.AlembicUtil(pathlib.Path(""))
alembic_util.init_alembic()
assert mock_alembic.stamp_calls == []
assert mock_alembic.upgrade_calls == ["head"]
@pytest.fixture()
def mock_database(
monkeypatch, mock_alembic, mock_db_file_name
) -> typing.Callable[[typing.List[str], str, bool, bool], None]:
def _mock_database(
revision_history: typing.List[str] = None,
current_revision: str = "",
db_file_exists: bool = True,
db_backup_exists: bool = True,
):
revision_history = revision_history or Constants.revision_history
def _db_file_exists(file_name: str) -> bool:
if file_name == mock_db_file_name:
return db_file_exists
else:
return db_backup_exists
monkeypatch.setattr(os.path, "isfile", _db_file_exists)
def _current_revision(alembic_config: typing.Any):
if current_revision != "" and current_revision not in revision_history:
raise Exception(
f"Can't locate revision identified by '{current_revision}'"
)
alembic_config.print_stdout(current_revision)
mock_alembic.current = _current_revision
def _revision_history(alembic_config: typing.Any):
for revision in revision_history:
alembic_config.print_stdout(f"none -> {revision}, revision name")
mock_alembic.history = _revision_history
return _mock_database
@pytest.fixture()
def mock_db_file_name(monkeypatch) -> str:
db_file_name = "test.db"
monkeypatch.setattr(mlconf.httpdb, "dsn", db_file_name)
return db_file_name
class MockAlembicCommand(object):
def __init__(self):
self.stamp_calls = []
self.upgrade_calls = []
def METHOD_NAME(self, alembic_config: typing.Any, revision: str):
self.stamp_calls.append(revision)
def upgrade(self, alembic_config: typing.Any, revision: str):
self.upgrade_calls.append(revision)
@pytest.fixture()
def mock_alembic(monkeypatch) -> MockAlembicCommand:
mocked_alembic_command = MockAlembicCommand()
monkeypatch.setattr(alembic, "command", mocked_alembic_command)
monkeypatch.setattr(alembic.config, "Config", unittest.mock.Mock())
return mocked_alembic_command |
5,339 | complex | #
# GPT - Grid Python Toolkit
# Copyright (C) 2020 Christoph Lehner (christoph.lehner@ur.de, https://github.com/lehner/gpt)
# 2020 Tilo Wettig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import gpt, sys
from gpt.core.object_type.base import *
from gpt.core.object_type.container import *
from gpt.core.object_type.su_n import *
from gpt.core.object_type.u_1 import *
from gpt.core.object_type.complex_additive_group import *
from gpt.core.object_type.real_additive_group import *
import numpy
###
# Helper to create lattice / tensor of specific type
def gpt_object(first, ot):
if type(first) == gpt.grid:
return gpt.lattice(first, ot)
return gpt.tensor(numpy.array(first, dtype=numpy.complex128), ot)
###
# Container objects without (lie) group structure
def singlet(grid):
return gpt_object(grid, ot_singlet)
def matrix_color(grid, ndim):
return gpt_object(grid, ot_matrix_color(ndim))
def vector_color(grid, ndim):
return gpt_object(grid, ot_vector_color(ndim))
def matrix_spin(grid, ndim):
return gpt_object(grid, ot_matrix_spin(ndim))
def vector_spin(grid, ndim):
return gpt_object(grid, ot_vector_spin(ndim))
def matrix_spin_color(grid, spin_ndim, color_ndim):
return gpt_object(grid, ot_matrix_spin_color(spin_ndim, color_ndim))
def vector_spin_color(grid, spin_ndim, color_ndim):
return gpt_object(grid, ot_vector_spin_color(spin_ndim, color_ndim))
def vector_singlet(grid, n):
return gpt_object(grid, ot_vector_singlet(n))
def matrix_singlet(grid, n):
return gpt_object(grid, ot_matrix_singlet(n))
###
# Container objects with (lie) group structure
def u1(grid):
# there is no need to distinguish matrix/vector here
return gpt_object(grid, ot_u_1_group())
def complex_additive(grid):
return gpt_object(grid, ot_complex_additive_group())
def vector_complex_additive(grid, n):
return gpt_object(grid, ot_vector_complex_additive_group(n))
def matrix_complex_additive(grid, n):
return gpt_object(grid, ot_matrix_complex_additive_group(n))
def real_additive(grid):
return gpt_object(grid, ot_real_additive_group())
def vector_real_additive(grid, n):
return gpt_object(grid, ot_vector_real_additive_group(n))
def matrix_real_additive(grid, n):
return gpt_object(grid, ot_matrix_real_additive_group(n))
def matrix_su2_fundamental(grid):
return gpt_object(grid, ot_matrix_su_n_fundamental_group(2))
def matrix_su2_adjoint(grid):
return gpt_object(grid, ot_matrix_su_n_adjoint_group(2))
def matrix_su3_fundamental(grid):
return gpt_object(grid, ot_matrix_su_n_fundamental_group(3))
###
# String conversion for safe file input
def str_to_otype(s):
# first parse string
a = s.split("(")
if len(a) == 2:
assert a[1][-1] == ")"
root = a[0]
# convert through int to avoid possibility of malicous code being executed in eval below
args = "(%s)" % (
",".join([str(int(x)) for x in filter(lambda x: x != "", a[1][:-1].split(","))])
)
else:
root = a[0]
args = ""
# then map to type
known_types = set(
[
"ot_singlet",
"ot_matrix_spin",
"ot_vector_spin",
"ot_matrix_color",
"ot_vector_color",
"ot_matrix_spin_color",
"ot_vector_spin_color",
"ot_matrix_su_n_fundamental_group",
"ot_matrix_su_n_fundamental_algebra",
"ot_matrix_su_n_adjoint_group",
"ot_matrix_su_n_adjoint_algebra",
"ot_vector_singlet",
"ot_vector_singlet4",
"ot_vector_singlet10",
"ot_vector_singlet60",
"ot_matrix_singlet",
"ot_matrix_singlet4",
"ot_matrix_singlet10",
"ot_matrix_singlet60",
"ot_u_1_group",
"ot_u_1_algebra",
"ot_complex_additive_group",
"ot_real_additive_group",
"ot_vector_real_additive_group",
"ot_matrix_real_additive_group",
"ot_vector_complex_additive_group",
"ot_matrix_complex_additive_group",
]
)
assert root in known_types
return eval(root + args)
###
# aliases
def METHOD_NAME(grid):
return complex_additive(grid)
def real(grid):
return real_additive(grid)
def vcomplex(grid, n):
return vector_complex_additive(grid, n)
def mcomplex(grid, n):
return matrix_complex_additive(grid, n)
def vreal(grid, n):
return vector_real_additive(grid, n)
def mreal(grid, n):
return matrix_real_additive(grid, n)
def mcolor(grid):
return matrix_su3_fundamental(grid)
def vcolor(grid):
return vector_color(grid, 3)
def mspin(grid):
return matrix_spin(grid, 4)
def vspin(grid):
return vector_spin(grid, 4)
def mspincolor(grid):
return matrix_spin_color(grid, 4, 3)
def vspincolor(grid):
return vector_spin_color(grid, 4, 3) |
5,340 | on 200 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"monitor log-analytics workspace data-export list",
)
class List(AAZCommand):
"""List all data export ruleses for a given workspace.
"""
_aaz_info = {
"version": "2020-08-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.operationalinsights/workspaces/{}/dataexports", "2020-08-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.workspace_name = AAZStrArg(
options=["--workspace-name"],
help="The name of the workspace.",
required=True,
fmt=AAZStrArgFormat(
pattern="^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$",
max_length=63,
min_length=4,
),
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.DataExportsListByWorkspace(ctx=self.ctx)()
self.post_operations()
# @register_callback
def pre_operations(self):
pass
# @register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
return result
class DataExportsListByWorkspace(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.METHOD_NAME(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/dataExports",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"workspaceName", self.ctx.args.workspace_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2020-08-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def METHOD_NAME(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"required": True, "client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.value.Element.properties
properties.created_date = AAZStrType(
serialized_name="createdDate",
)
properties.data_export_id = AAZStrType(
serialized_name="dataExportId",
)
properties.destination = AAZObjectType(
flags={"required": True},
)
properties.enable = AAZBoolType()
properties.last_modified_date = AAZStrType(
serialized_name="lastModifiedDate",
)
properties.table_names = AAZListType(
serialized_name="tableNames",
flags={"required": True},
)
destination = cls._schema_on_200.value.Element.properties.destination
destination.meta_data = AAZObjectType(
serialized_name="metaData",
flags={"client_flatten": True},
)
destination.resource_id = AAZStrType(
serialized_name="resourceId",
flags={"required": True},
)
destination.type = AAZStrType(
flags={"read_only": True},
)
meta_data = cls._schema_on_200.value.Element.properties.destination.meta_data
meta_data.event_hub_name = AAZStrType(
serialized_name="eventHubName",
)
table_names = cls._schema_on_200.value.Element.properties.table_names
table_names.Element = AAZStrType()
return cls._schema_on_200
__all__ = ["List"] |
5,341 | test get handle from dummy func | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pylibraft.common.handle import Handle
from sklearn.linear_model import LinearRegression as skreg
from cuml.datasets import make_regression
from cuml.testing.utils import ClassEnumerator
from cuml.explainer.common import model_func_call
from cuml.explainer.common import link_dict
from cuml.explainer.common import get_tag_from_model_func
from cuml.explainer.common import get_link_fn_from_str_or_fn
from cuml.explainer.common import get_handle_from_cuml_model_func
from cuml.explainer.common import get_dtype_from_model_func
from cuml.explainer.common import get_cai_ptr
from cuml import PCA
from cuml import LinearRegression as reg
import pytest
from cuml.internals.safe_imports import cpu_only_import
import cuml
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
models_config = ClassEnumerator(module=cuml)
models = models_config.get_models()
_default_tags = [
"preferred_input_order",
"X_types_gpu",
"non_deterministic",
"requires_positive_X",
"requires_positive_y",
"X_types",
"poor_score",
"no_validation",
"multioutput",
"allow_nan",
"stateless",
"multilabel",
"_skip_test",
"_xfail_checks",
"multioutput_only",
"binary_only",
"requires_fit",
"requires_y",
"pairwise",
]
def test_get_dtype_from_model_func():
X, y = make_regression(
n_samples=81,
n_features=10,
noise=0.1,
random_state=42,
dtype=np.float32,
)
# checking model with float32 dtype
model_f32 = reg().fit(X, y)
assert get_dtype_from_model_func(model_f32.predict) == np.float32
# checking model with float64 dtype
X = X.astype(np.float64)
y = y.astype(np.float64)
model_f64 = reg().fit(X, y)
assert get_dtype_from_model_func(model_f64.predict) == np.float64
# checking model that has not been fitted yet
model_not_fit = reg()
assert get_dtype_from_model_func(model_not_fit.predict) is None
# checking arbitrary function
def dummy_func(x):
return x + x
assert get_dtype_from_model_func(dummy_func) is None
def test_get_gpu_tag_from_model_func():
# test getting the gpu tags from the model that we use in explainers
model = reg()
order = get_tag_from_model_func(
func=model.predict, tag="preferred_input_order", default="C"
)
assert order == "F"
out_types = get_tag_from_model_func(
func=model.predict, tag="X_types_gpu", default=False
)
assert isinstance(out_types, list)
assert "2darray" in out_types
# checking arbitrary function
order = get_tag_from_model_func(
func=dummy_func, tag="preferred_input_order", default="C"
)
assert order == "C"
out_types = get_tag_from_model_func(
func=dummy_func, tag="X_types_gpu", default=False
)
assert out_types is False
model2 = skreg()
out_types = get_tag_from_model_func(
func=model2.predict, tag="X_types_gpu", default=False
)
assert out_types is False
@pytest.mark.parametrize("model", list(models.values()))
def test_get_tag_from_model_func(model):
mod = create_dummy_model(model)
for tag in _default_tags:
res = get_tag_from_model_func(
func=mod.get_param_names, tag=tag, default="FFF"
)
if tag != "preferred_input_order":
assert res != "FFF"
@pytest.mark.parametrize("model", list(models.values()))
def test_get_handle_from_cuml_model_func(model):
mod = create_dummy_model(model)
handle = get_handle_from_cuml_model_func(
mod.get_param_names, create_new=True
)
assert isinstance(handle, Handle)
@pytest.mark.parametrize("create_new", [True, False])
def METHOD_NAME(create_new):
handle = get_handle_from_cuml_model_func(dummy_func, create_new=create_new)
res = isinstance(handle, Handle)
assert res == create_new
def test_model_func_call_gpu():
X, y = make_regression(
n_samples=81,
n_features=10,
noise=0.1,
random_state=42,
dtype=np.float32,
)
model = reg().fit(X, y)
z = model_func_call(X=X, model_func=model.predict, gpu_model=True)
assert isinstance(z, cp.ndarray)
z = model_func_call(
X=cp.asnumpy(X), model_func=dummy_func, gpu_model=False
)
assert isinstance(z, cp.ndarray)
with pytest.raises(TypeError):
z = model_func_call(X=X, model_func=dummy_func, gpu_model=True)
model = PCA(n_components=10).fit(X)
z = model_func_call(X=X, model_func=model.transform, gpu_model=True)
assert isinstance(z, cp.ndarray)
def test_get_cai_ptr():
a = cp.ones(10)
ptr = get_cai_ptr(a)
assert ptr == a.__cuda_array_interface__["data"][0]
b = np.ones(10)
with pytest.raises(TypeError):
ptr = get_cai_ptr(b)
@pytest.mark.parametrize("link_function", ["identity", "logit"])
def test_get_link_fn_from_str(link_function):
fn = get_link_fn_from_str_or_fn(link_function)
a = cp.ones(10)
assert cp.all(fn(a) == link_dict[link_function](a))
assert cp.all(fn.inverse(a) == link_dict[link_function].inverse(a))
def test_get_link_fn_from_wrong_str():
with pytest.raises(ValueError):
get_link_fn_from_str_or_fn("this_is_wrong")
def test_get_link_fn_from_fn():
def dummylink(x):
return 2 * x
# check we raise error if link has no inverse
with pytest.raises(TypeError):
get_link_fn_from_str_or_fn(dummylink)
def dummylink_inv(x):
return x / 2
dummylink.inverse = dummylink_inv
fn = get_link_fn_from_str_or_fn(dummylink)
assert fn(2) == 4
assert fn.inverse(2) == 1
def create_dummy_model(model):
try:
mod = model()
except TypeError:
mod = model(np.zeros(10))
return mod
def dummy_func(x):
if not isinstance(x, np.ndarray):
raise TypeError("x must be a NumPy array")
return np.mean(x, axis=1) |
5,342 | test spectacular view | from unittest import mock
import pytest
import yaml
from django import __version__ as DJANGO_VERSION
from django.http import HttpResponseRedirect
from django.urls import path
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.test import APIClient
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import extend_schema
from drf_spectacular.validation import validate_schema
from drf_spectacular.views import (
SpectacularAPIView, SpectacularRedocView, SpectacularSwaggerOauthRedirectView,
SpectacularSwaggerSplitView, SpectacularSwaggerView,
)
@extend_schema(responses=OpenApiTypes.FLOAT)
@api_view(http_method_names=['GET'])
def pi(request):
return Response(3.1415)
urlpatterns_v1 = [path('api/v1/pi/', pi)]
urlpatterns_v1.append(
path('api/v1/schema/', SpectacularAPIView.as_view(urlconf=urlpatterns_v1))
)
urlpatterns_v2 = [
path('api/v2/pi/', pi),
path('api/v2/pi-fast/', pi),
path('api/v2/schema/swagger-ui/', SpectacularSwaggerView.as_view(), name='swagger'),
path(
"api/v1/schema/swagger-ui/oauth2-redirect.html",
SpectacularSwaggerOauthRedirectView.as_view(),
name="swagger-oauth-redirect"),
path('api/v2/schema/swagger-ui-alt/', SpectacularSwaggerSplitView.as_view(), name='swagger-alt'),
path('api/v2/schema/redoc/', SpectacularRedocView.as_view(), name='redoc'),
]
urlpatterns_v2.append(
path('api/v2/schema/', SpectacularAPIView.as_view(urlconf=urlpatterns_v2), name='schema'),
)
urlpatterns_str_import = [
path('api/schema-str1/', SpectacularAPIView.as_view(urlconf=['tests.test_view']), name='schema_str1'),
path('api/schema-str2/', SpectacularAPIView.as_view(urlconf='tests.test_view'), name='schema_str2'),
path('api/schema-err1/', SpectacularAPIView.as_view(urlconf=['tests.error']), name='schema_err1'),
path('api/schema-err2/', SpectacularAPIView.as_view(urlconf='tests.error'), name='schema_err2'),
]
urlpatterns = urlpatterns_v1 + urlpatterns_v2 + urlpatterns_str_import
@pytest.mark.urls(__name__)
def METHOD_NAME(no_warnings):
response = APIClient().get('/api/v1/schema/')
assert response.status_code == 200
assert response.content.startswith(b'openapi: 3.0.3\n')
assert response.accepted_media_type == 'application/vnd.oai.openapi'
if DJANGO_VERSION > '3':
assert response.headers.get('CONTENT-DISPOSITION') == 'inline; filename="schema.yaml"'
schema = yaml.load(response.content, Loader=yaml.SafeLoader)
validate_schema(schema)
assert len(schema['paths']) == 2
@pytest.mark.urls(__name__)
def test_spectacular_view_custom_urlconf(no_warnings):
response = APIClient().get('/api/v2/schema/')
assert response.status_code == 200
schema = yaml.load(response.content, Loader=yaml.SafeLoader)
validate_schema(schema)
assert len(schema['paths']) == 3
response = APIClient().get('/api/v2/pi-fast/')
assert response.status_code == 200
assert response.content == b'3.1415'
@pytest.mark.parametrize(['accept', 'format', 'indent'], [
('application/vnd.oai.openapi', 'yaml', None),
('application/yaml', 'yaml', None),
('application/vnd.oai.openapi+json', 'json', 4),
('application/json', 'json', 4),
('application/json; indent=8', 'json', 8),
])
@pytest.mark.urls(__name__)
def test_spectacular_view_accept(accept, format, indent):
response = APIClient().get('/api/v1/schema/', HTTP_ACCEPT=accept)
assert response.status_code == 200
assert response.accepted_media_type == accept
if format == 'json':
assert response.content.startswith(b'{\n' + indent * b' ' + b'"openapi": "3.0.3"')
if format == 'yaml':
assert response.content.startswith(b'openapi: 3.0.3\n')
@pytest.mark.urls(__name__)
def test_spectacular_view_accept_unknown(no_warnings):
response = APIClient().get('/api/v1/schema/', HTTP_ACCEPT='application/unknown')
assert response.status_code == 406
assert response.content == (
b'detail:\n string: Could not satisfy the request Accept header.\n'
b' code: not_acceptable\n'
)
@pytest.mark.parametrize('ui', ['redoc', 'swagger-ui'])
@pytest.mark.urls(__name__)
def test_spectacular_ui_view(no_warnings, ui):
from drf_spectacular.settings import spectacular_settings
response = APIClient().get(f'/api/v2/schema/{ui}/')
assert response.status_code == 200
assert response.content.startswith(b'<!DOCTYPE html>')
if ui == 'redoc':
assert b'<title>Redoc</title>' in response.content
assert spectacular_settings.REDOC_DIST.encode() in response.content
else:
assert b'<title>Swagger</title>' in response.content
assert spectacular_settings.SWAGGER_UI_DIST.encode() in response.content
assert b'"/api/v2/schema/"' in response.content
@pytest.mark.urls(__name__)
def test_spectacular_swagger_ui_alternate(no_warnings):
# first request for the html
response = APIClient().get('/api/v2/schema/swagger-ui-alt/')
assert response.status_code == 200
assert response.content.startswith(b'<!DOCTYPE html>')
assert b'"/api/v2/schema/swagger-ui-alt/?script="' in response.content
# second request to obtain js swagger config (CSP self)
response = APIClient().get('/api/v2/schema/swagger-ui-alt/?script=')
assert response.status_code == 200
assert b'"/api/v2/schema/"' in response.content
@mock.patch(
'drf_spectacular.settings.spectacular_settings.SWAGGER_UI_SETTINGS',
'{"deepLinking": true}'
)
@pytest.mark.urls(__name__)
def test_spectacular_ui_with_raw_settings(no_warnings):
response = APIClient().get('/api/v2/schema/swagger-ui/')
assert response.status_code == 200
assert b'const swaggerSettings = {"deepLinking": true};\n' in response.content
@pytest.mark.urls(__name__)
def test_spectacular_ui_param_passthrough(no_warnings):
response = APIClient().get('/api/v2/schema/swagger-ui/?foo=bar&lang=jp&version=v2')
assert response.status_code == 200
assert b'url: "/api/v2/schema/?lang\\u003Djp\\u0026version\\u003Dv2"' in response.content
@pytest.mark.parametrize('url', ['/api/schema-str1/', '/api/schema-str2/'])
@pytest.mark.urls(__name__)
def test_spectacular_urlconf_module_list_import(no_warnings, url):
response = APIClient().get(url)
assert response.status_code == 200
assert b'/api/v1/pi/' in response.content
assert b'/api/v2/pi/' in response.content
@pytest.mark.parametrize('url', ['/api/schema-err1/', '/api/schema-err2/'])
@pytest.mark.urls(__name__)
def test_spectacular_urlconf_module_list_import_error(no_warnings, url):
with pytest.raises(ModuleNotFoundError):
APIClient().get(url)
@pytest.mark.parametrize('get_params', ['', 'code=foobar123&state=xyz&session_state=hello-world'])
@pytest.mark.urls(__name__)
def test_swagger_oauth_redirect_view(get_params):
# act
response = APIClient().get('/api/v1/schema/swagger-ui/oauth2-redirect.html?' + get_params)
# assert
assert response.status_code == 302
if isinstance(response, HttpResponseRedirect):
# older django versions test client directly returns the response instance
assert response.url == '/static/drf_spectacular_sidecar/swagger-ui-dist/oauth2-redirect.html?' + get_params
else:
assert response.headers['Location'] ==\
'/static/drf_spectacular_sidecar/swagger-ui-dist/oauth2-redirect.html?' + get_params |
5,343 | validate measures | # encoding: utf-8
"""
SEED Platform (TM), Copyright (c) Alliance for Sustainable Energy, LLC, and other contributors.
See also https://github.com/seed-platform/seed/main/LICENSE.md
"""
import json
import logging
import re
import string
from django.db import models
from seed.models import Organization
_log = logging.getLogger(__name__)
BUILDINGSYNC_MEASURES = [
{
"name": "RetrofitWithCFLs",
"display_name": "Retrofit with CFLs",
"category": "LightingImprovements",
"category_name": "Lighting Improvements",
}
]
def _snake_case(display_name):
"""
Convert the BuildingSync measure display names into reasonable snake_case for storing into
database.
:param display_name: BuildingSync measure displayname
:return: string
"""
str_re = re.compile('[{0}]'.format(re.escape(string.punctuation)))
str = str_re.sub(' ', display_name)
str = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', str)
str = re.sub('([a-z0-9])([A-Z])', r'\1_\2', str).lower()
return re.sub(' +', '_', str)
class Measure(models.Model):
name = models.CharField(max_length=255)
display_name = models.CharField(max_length=255)
category = models.CharField(max_length=255)
category_display_name = models.CharField(max_length=255)
schema_type = models.CharField(max_length=255, default='BuildingSync')
schema_version = models.CharField(max_length=15, default='1.0.0')
# relationships
properties = models.ManyToManyField('PropertyState', through='PropertyMeasure')
organization = models.ForeignKey(Organization, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
return 'Measure - %s.%s' % (self.category, self.name)
class Meta:
ordering = ['-created']
get_latest_by = 'created'
unique_together = ('organization', 'category', 'name')
@classmethod
def populate_measures(cls, organization_id, schema_type='BuildingSync', schema_version="1.0.0"):
"""
Populate the list of measures from the BuildingSync
Default is BuildingSync 1.0.0
:param organization_id: integer, ID of the organization to populate measures
:return:
"""
filename = "seed/building_sync/lib/enumerations.json"
with open(filename) as f:
data = json.load(f)
for datum in data:
# "name": "MeasureName",
# "sub_name": "AdvancedMeteringSystems",
# "documentation": "Advanced Metering Systems",
# "enumerations": [
# "Install advanced metering systems",
# "Clean and/or repair",
# "Implement training and/or documentation",
# "Upgrade operating protocols, calibration, and/or sequencing",
# "Other"
# ],
if datum["name"] == "MeasureName":
for enum in datum["enumerations"]:
Measure.objects.get_or_create(
organization_id=organization_id,
category=_snake_case(datum["sub_name"]),
category_display_name=datum["documentation"],
name=_snake_case(enum),
display_name=enum,
schema_type=schema_type,
schema_version=schema_version
)
@classmethod
def METHOD_NAME(cls, data):
"""
Take a list of measure ids or measure names and return just a list of ids.
:param data: list, either category.name of measure or primary key
:return: list of integers, the list are primary key of measures
"""
if len(data) > 0:
resp = []
for d in data:
try:
if isinstance(d, int) or d.isdigit():
# validate that the measure exists
resp.append(Measure.objects.get(pk=d).pk)
elif len(d) == 0:
continue
else:
if "." not in d or len(d) == 1:
_log.error("Invalid measure name: {}".format(d))
continue
measure = d.split(".")
resp.append(Measure.objects.get(category=measure[0], name=measure[1]).pk)
except Measure.DoesNotExist:
_log.error("Could not find measure for {}".format(d))
return resp
else:
return [] |
5,344 | test ecs mxnet training gluonnlp cpu | import os
import pytest
from test.test_utils import ECS_AML2_CPU_USWEST2, ECS_AML2_GPU_USWEST2, CONTAINER_TESTS_PREFIX
from test.test_utils import ecs as ecs_utils
from test.test_utils import ec2 as ec2_utils
from test.test_utils import get_framework_and_version_from_tag
from packaging.version import Version
MX_MNIST_TRAINING_SCRIPT = os.path.join(CONTAINER_TESTS_PREFIX, "testMXNet")
MX_DGL_TRAINING_SCRIPT = os.path.join(CONTAINER_TESTS_PREFIX, "dgl_tests", "testMXNetDGL")
MX_GLUON_NLP_TRAINING_SCRIPT = os.path.join(CONTAINER_TESTS_PREFIX, "gluonnlp_tests", "testNLP")
@pytest.mark.model("mnist")
@pytest.mark.parametrize("training_script", [MX_MNIST_TRAINING_SCRIPT], indirect=True)
@pytest.mark.parametrize("ecs_instance_type", ["c4.8xlarge"], indirect=True)
@pytest.mark.parametrize("ecs_ami", [ECS_AML2_CPU_USWEST2], indirect=True)
def test_ecs_mxnet_training_mnist_cpu(
cpu_only, ecs_container_instance, mxnet_training, training_cmd, ecs_cluster_name
):
"""
CPU mnist test for MXNet Training
Instance Type - c4.8xlarge
Given above parameters, registers a task with family named after this test, runs the task, and waits for
the task to be stopped before doing teardown operations of instance and cluster.
"""
instance_id, cluster_arn = ecs_container_instance
ecs_utils.ecs_training_test_executor(
ecs_cluster_name, cluster_arn, training_cmd, mxnet_training, instance_id
)
@pytest.mark.model("mnist")
@pytest.mark.parametrize("training_script", [MX_MNIST_TRAINING_SCRIPT], indirect=True)
@pytest.mark.parametrize("ecs_instance_type", ["p3.8xlarge"], indirect=True)
@pytest.mark.parametrize("ecs_ami", [ECS_AML2_GPU_USWEST2], indirect=True)
def test_ecs_mxnet_training_mnist_gpu(
gpu_only, ecs_container_instance, mxnet_training, training_cmd, ecs_cluster_name
):
"""
GPU mnist test for MXNet Training
Instance Type - p2.8xlarge
Given above parameters, registers a task with family named after this test, runs the task, and waits for
the task to be stopped before doing teardown operations of instance and cluster.
"""
instance_id, cluster_arn = ecs_container_instance
num_gpus = ec2_utils.get_instance_num_gpus(instance_id)
ecs_utils.ecs_training_test_executor(
ecs_cluster_name, cluster_arn, training_cmd, mxnet_training, instance_id, num_gpus=num_gpus
)
@pytest.mark.integration("dgl")
@pytest.mark.model("gcn")
@pytest.mark.parametrize("training_script", [MX_DGL_TRAINING_SCRIPT], indirect=True)
@pytest.mark.parametrize("ecs_instance_type", ["c4.2xlarge"], indirect=True)
@pytest.mark.parametrize("ecs_ami", [ECS_AML2_CPU_USWEST2], indirect=True)
def test_ecs_mxnet_training_dgl_cpu(
cpu_only, py3_only, ecs_container_instance, mxnet_training, training_cmd, ecs_cluster_name
):
"""
CPU DGL test for MXNet Training
Instance Type - c4.2xlarge
DGL is only supported in py3, hence we have used the "py3_only" fixture to ensure py2 images don't run
on this function.
Given above parameters, registers a task with family named after this test, runs the task, and waits for
the task to be stopped before doing teardown operations of instance and cluster.
"""
# TODO: remove/update this when DGL supports MXNet 1.9
_, framework_version = get_framework_and_version_from_tag(mxnet_training)
if Version(framework_version) >= Version("1.9.0"):
pytest.skip("Skipping DGL tests as DGL does not yet support MXNet 1.9")
instance_id, cluster_arn = ecs_container_instance
ecs_utils.ecs_training_test_executor(
ecs_cluster_name, cluster_arn, training_cmd, mxnet_training, instance_id
)
@pytest.mark.integration("dgl")
@pytest.mark.model("gcn")
@pytest.mark.parametrize("training_script", [MX_DGL_TRAINING_SCRIPT], indirect=True)
@pytest.mark.parametrize("ecs_instance_type", ["p2.8xlarge"], indirect=True)
@pytest.mark.parametrize("ecs_ami", [ECS_AML2_GPU_USWEST2], indirect=True)
def test_ecs_mxnet_training_dgl_gpu(
gpu_only, py3_only, ecs_container_instance, mxnet_training, training_cmd, ecs_cluster_name
):
"""
GPU DGL test for MXNet Training
Instance Type - p2.xlarge
DGL is only supported in py3, hence we have used the "py3_only" fixture to ensure py2 images don't run
on this function.
Given above parameters, registers a task with family named after this test, runs the task, and waits for
the task to be stopped before doing teardown operations of instance and cluster.
"""
# TODO: remove/update this when DGL supports MXNet 1.9
_, framework_version = get_framework_and_version_from_tag(mxnet_training)
if Version(framework_version) >= Version("1.9.0"):
pytest.skip("Skipping DGL tests as DGL does not yet support MXNet 1.9")
instance_id, cluster_arn = ecs_container_instance
num_gpus = ec2_utils.get_instance_num_gpus(instance_id)
ecs_utils.ecs_training_test_executor(
ecs_cluster_name, cluster_arn, training_cmd, mxnet_training, instance_id, num_gpus=num_gpus
)
@pytest.mark.integration("gluonnlp")
@pytest.mark.model("TextCNN")
@pytest.mark.parametrize("training_script", [MX_GLUON_NLP_TRAINING_SCRIPT], indirect=True)
@pytest.mark.parametrize("ecs_instance_type", ["c5.9xlarge"], indirect=True)
@pytest.mark.parametrize("ecs_ami", [ECS_AML2_CPU_USWEST2], indirect=True)
def METHOD_NAME(
cpu_only, py3_only, ecs_container_instance, mxnet_training, training_cmd, ecs_cluster_name
):
"""
CPU Gluon NLP for MXNet Training
Instance Type - c5.9xlarge
DGL is only supported in py3, hence we have used the "py3_only" fixture to ensure py2 images don't run
on this function.
Given above parameters, registers a task with family named after this test, runs the task, and waits for
the task to be stopped before doing teardown operations of instance and cluster.
"""
instance_id, cluster_arn = ecs_container_instance
ecs_utils.ecs_training_test_executor(
ecs_cluster_name, cluster_arn, training_cmd, mxnet_training, instance_id
)
@pytest.mark.integration("gluonnlp")
@pytest.mark.model("TextCNN")
@pytest.mark.parametrize("training_script", [MX_GLUON_NLP_TRAINING_SCRIPT], indirect=True)
@pytest.mark.parametrize("ecs_instance_type", ["p3.16xlarge"], indirect=True)
@pytest.mark.parametrize("ecs_ami", [ECS_AML2_GPU_USWEST2], indirect=True)
def test_ecs_mxnet_training_gluonnlp_gpu(
gpu_only, py3_only, ecs_container_instance, mxnet_training, training_cmd, ecs_cluster_name
):
"""
GPU Gluon NLP test for MXNet Training
Instance Type - p2.16xlarge
DGL is only supported in py3, hence we have used the "py3_only" fixture to ensure py2 images don't run
on this function.
Given above parameters, registers a task with family named after this test, runs the task, and waits for
the task to be stopped before doing teardown operations of instance and cluster.
"""
instance_id, cluster_arn = ecs_container_instance
num_gpus = ec2_utils.get_instance_num_gpus(instance_id)
ecs_utils.ecs_training_test_executor(
ecs_cluster_name, cluster_arn, training_cmd, mxnet_training, instance_id, num_gpus=num_gpus
) |
5,345 | test doctests | import doctest
import json
from datetime import datetime
from unittest.mock import Mock
from django.test.testcases import TestCase
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.locations.models import LocationType, SQLLocation
from corehq.apps.users.models import WebUser
from corehq.motech.dhis2.const import DHIS2_DATA_TYPE_DATE, LOCATION_DHIS_ID
from corehq.motech.dhis2.events_helpers import get_event
from corehq.motech.dhis2.forms import Dhis2ConfigForm
from corehq.motech.dhis2.repeaters import Dhis2Repeater
from corehq.motech.models import ConnectionSettings
from corehq.motech.value_source import CaseTriggerInfo, get_form_question_values
DOMAIN = "dhis2-test"
class TestDhis2EventsHelpers(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.domain = create_domain(DOMAIN)
location_type = LocationType.objects.create(
domain=DOMAIN,
name='test_location_type',
)
cls.location = SQLLocation.objects.create(
domain=DOMAIN,
name='test location',
location_id='test_location',
location_type=location_type,
latitude='-33.8655',
longitude='18.6941',
metadata={LOCATION_DHIS_ID: "dhis2_location_id"},
)
cls.user = WebUser.create(DOMAIN, 'test', 'passwordtest', None, None)
cls.user.set_location(DOMAIN, cls.location)
@classmethod
def tearDownClass(cls):
cls.user.delete(cls.domain.name, deleted_by=None)
cls.location.delete()
cls.domain.delete()
super().tearDownClass()
def setUp(self):
self.form = {
"domain": DOMAIN,
"form": {
"@xmlns": "test_xmlns",
"event_date": "2017-05-25T21:06:27.012000",
"completed_date": "2017-05-25T21:06:27.012000",
"event_location": "-33.6543213 19.12344312 abcdefg",
"name": "test event",
"meta": {
"location": '',
"timeEnd": "2017-05-25T21:06:27.012000",
"timeStart": "2017-05-25T21:06:17.739000",
"userID": self.user.user_id,
"username": self.user.username
}
},
"received_on": "2017-05-26T09:17:23.692083Z",
}
self.config = {
'form_configs': json.dumps([{
'xmlns': 'test_xmlns',
'program_id': 'test program',
'event_status': 'COMPLETED',
'completed_date': {
'doc_type': 'FormQuestion',
'form_question': '/data/completed_date',
'external_data_type': DHIS2_DATA_TYPE_DATE
},
'org_unit_id': {
'doc_type': 'FormUserAncestorLocationField',
'form_user_ancestor_location_field': LOCATION_DHIS_ID
},
'event_location': {
'form_question': '/data/event_location'
},
'datavalue_maps': [
{
'data_element_id': 'dhis2_element_id',
'value': {
'doc_type': 'FormQuestion',
'form_question': '/data/name'
}
}
]
}])
}
config_form = Dhis2ConfigForm(data=self.config)
self.assertTrue(config_form.is_valid())
data = config_form.cleaned_data
conn = ConnectionSettings.objects.create(url="http://dummy.com", domain=DOMAIN)
self.repeater = Dhis2Repeater(domain=DOMAIN, connection_settings_id=conn.id)
self.repeater.dhis2_config['form_configs'] = data['form_configs']
self.repeater.save()
def test_form_processing_with_owner(self):
info = CaseTriggerInfo(
domain=DOMAIN,
case_id=None,
owner_id='test_location',
form_question_values=get_form_question_values(self.form),
)
event = get_event(DOMAIN, self.repeater.dhis2_config['form_configs'][0], form_json=self.form, info=info)
self.assertDictEqual(
{
'dataValues': [
{
'dataElement': 'dhis2_element_id',
'value': 'test event'
}
],
'status': 'COMPLETED',
'completedDate': '2017-05-25',
'program': 'test program',
'eventDate': '2017-05-26',
'orgUnit': 'dhis2_location_id',
'coordinate': {
'latitude': -33.6543,
'longitude': 19.1234
}
},
event
)
class TestNothingToSend(TestCase):
def setUp(self):
self.conn = ConnectionSettings.objects.create(
domain=DOMAIN,
name='Example DHIS2 server',
url='https://dhis2.example.com/',
)
def tearDown(self):
self.conn.delete()
def test_204_response(self):
form_xmlns = 'http://example.com/test-xmlns'
dhis2_config = {
'form_configs': [{
'xmlns': form_xmlns,
'program_id': 'abc123',
'program_stage_id': '',
'org_unit_id': '',
'event_date': {'form_question': '/data/event_date'},
'event_status': {'form_question': '/data/event_status'},
'completed_date': '',
'datavalue_maps': [],
'event_location': '',
}]
}
repeater = Dhis2Repeater(
domain=DOMAIN,
connection_settings_id=self.conn.id,
dhis2_config=dhis2_config,
dhis2_version='2.39.1.1',
dhis2_version_last_modified=datetime.utcnow(),
)
repeat_record = Mock(payload_id='abc123')
payload = {'form': {'@xmlns': form_xmlns}}
result = repeater.send_request(repeat_record, payload)
self.assertEqual(result.status_code, 204)
self.assertEqual(result.reason, 'No content')
def METHOD_NAME():
from corehq.motech.dhis2 import events_helpers
results = doctest.testmod(events_helpers)
assert results.failed == 0 |
5,346 | test return at least 1 at the | # Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
# The commented out tests are tests for function that have been converted to pure C for speed
from pytest import raises, skip
from hscommon.testutil import eq_
try:
from core.pe.block import avgdiff, getblocks2, NoBlocksError, DifferentBlockCountError
except ImportError:
skip("Can't import the block module, probably hasn't been compiled.")
def my_avgdiff(first, second, limit=768, min_iter=3): # this is so I don't have to re-write every call
return avgdiff(first, second, limit, min_iter)
BLACK = (0, 0, 0)
RED = (0xFF, 0, 0)
GREEN = (0, 0xFF, 0)
BLUE = (0, 0, 0xFF)
class FakeImage:
def __init__(self, size, data):
self.size = size
self.data = data
def getdata(self):
return self.data
def crop(self, box):
pixels = []
for i in range(box[1], box[3]):
for j in range(box[0], box[2]):
pixel = self.data[i * self.size[0] + j]
pixels.append(pixel)
return FakeImage((box[2] - box[0], box[3] - box[1]), pixels)
def empty():
return FakeImage((0, 0), [])
def single_pixel(): # one red pixel
return FakeImage((1, 1), [(0xFF, 0, 0)])
def four_pixels():
pixels = [RED, (0, 0x80, 0xFF), (0x80, 0, 0), (0, 0x40, 0x80)]
return FakeImage((2, 2), pixels)
class TestCasegetblock:
def test_single_pixel(self):
im = single_pixel()
[b] = getblocks2(im, 1)
eq_(RED, b)
def test_no_pixel(self):
im = empty()
eq_([], getblocks2(im, 1))
def test_four_pixels(self):
im = four_pixels()
[b] = getblocks2(im, 1)
meanred = (0xFF + 0x80) // 4
meangreen = (0x80 + 0x40) // 4
meanblue = (0xFF + 0x80) // 4
eq_((meanred, meangreen, meanblue), b)
class TestCasegetblocks2:
def test_empty_image(self):
im = empty()
blocks = getblocks2(im, 1)
eq_(0, len(blocks))
def test_one_block_image(self):
im = four_pixels()
blocks = getblocks2(im, 1)
eq_(1, len(blocks))
block = blocks[0]
meanred = (0xFF + 0x80) // 4
meangreen = (0x80 + 0x40) // 4
meanblue = (0xFF + 0x80) // 4
eq_((meanred, meangreen, meanblue), block)
def test_four_blocks_all_black(self):
im = FakeImage((2, 2), [BLACK, BLACK, BLACK, BLACK])
blocks = getblocks2(im, 2)
eq_(4, len(blocks))
for block in blocks:
eq_(BLACK, block)
def test_two_pixels_image_horizontal(self):
pixels = [RED, BLUE]
im = FakeImage((2, 1), pixels)
blocks = getblocks2(im, 2)
eq_(4, len(blocks))
eq_(RED, blocks[0])
eq_(BLUE, blocks[1])
eq_(RED, blocks[2])
eq_(BLUE, blocks[3])
def test_two_pixels_image_vertical(self):
pixels = [RED, BLUE]
im = FakeImage((1, 2), pixels)
blocks = getblocks2(im, 2)
eq_(4, len(blocks))
eq_(RED, blocks[0])
eq_(RED, blocks[1])
eq_(BLUE, blocks[2])
eq_(BLUE, blocks[3])
class TestCaseavgdiff:
def test_empty(self):
with raises(NoBlocksError):
my_avgdiff([], [])
def test_two_blocks(self):
b1 = (5, 10, 15)
b2 = (255, 250, 245)
b3 = (0, 0, 0)
b4 = (255, 0, 255)
blocks1 = [b1, b2]
blocks2 = [b3, b4]
expected1 = 5 + 10 + 15
expected2 = 0 + 250 + 10
expected = (expected1 + expected2) // 2
eq_(expected, my_avgdiff(blocks1, blocks2))
def test_blocks_not_the_same_size(self):
b = (0, 0, 0)
with raises(DifferentBlockCountError):
my_avgdiff([b, b], [b])
def test_first_arg_is_empty_but_not_second(self):
# Don't return 0 (as when the 2 lists are empty), raise!
b = (0, 0, 0)
with raises(DifferentBlockCountError):
my_avgdiff([], [b])
def test_limit(self):
ref = (0, 0, 0)
b1 = (10, 10, 10) # avg 30
b2 = (20, 20, 20) # avg 45
b3 = (30, 30, 30) # avg 60
blocks1 = [ref, ref, ref]
blocks2 = [b1, b2, b3]
eq_(45, my_avgdiff(blocks1, blocks2, 44))
def test_min_iterations(self):
ref = (0, 0, 0)
b1 = (10, 10, 10) # avg 30
b2 = (20, 20, 20) # avg 45
b3 = (10, 10, 10) # avg 40
blocks1 = [ref, ref, ref]
blocks2 = [b1, b2, b3]
eq_(40, my_avgdiff(blocks1, blocks2, 45 - 1, 3))
# Bah, I don't know why this test fails, but I don't think it matters very much
# def test_just_over_the_limit(self):
# #A score just over the limit might return exactly the limit due to truncating. We should
# #ceil() the result in this case.
# ref = (0, 0, 0)
# b1 = (10, 0, 0)
# b2 = (11, 0, 0)
# blocks1 = [ref, ref]
# blocks2 = [b1, b2]
# eq_(11, my_avgdiff(blocks1, blocks2, 10))
#
def METHOD_NAME(self):
ref = (0, 0, 0)
b1 = (1, 0, 0)
blocks1 = [ref for _ in range(250)]
blocks2 = [ref for _ in range(250)]
blocks2[0] = b1
eq_(1, my_avgdiff(blocks1, blocks2))
def test_return_0_if_there_is_no_difference(self):
ref = (0, 0, 0)
blocks1 = [ref, ref]
blocks2 = [ref, ref]
eq_(0, my_avgdiff(blocks1, blocks2)) |
5,347 | get instancemaker | import django.forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils import timezone
from collections import OrderedDict
from datetime import timedelta
from postgresqleu.util.time import time_sinceoruntil, datetime_string
from postgresqleu.util.widgets import StaticTextWidget, EmailTextWidget
from postgresqleu.util.backendforms import BackendForm
from postgresqleu.membership.models import Member, MemberLog, Meeting, MembershipConfiguration
from postgresqleu.membership.models import MeetingType, MeetingReminder
from postgresqleu.membership.backendlookups import MemberLookup
class BackendConfigForm(BackendForm):
helplink = 'membership'
class Meta:
model = MembershipConfiguration
fields = ['sender_email', 'sender_name', 'membership_years', 'membership_cost', 'country_validator',
'paymentmethods', ]
widgets = {
'paymentmethods': django.forms.CheckboxSelectMultiple,
}
def fix_fields(self):
self.fields['paymentmethods'].label_from_instance = lambda x: "{0}{1}".format(x.internaldescription, x.active and " " or " (INACTIVE)")
self.fields['membership_cost'].help_text = "Membership cost in {0}".format(settings.CURRENCY_SYMBOL)
class MemberLogManager(object):
title = "Log"
singular = "log"
can_add = False
def get_list(self, instance):
return [(None, line.timestamp, line.message) for line in MemberLog.objects.filter(member=instance).order_by('-timestamp')]
class BackendMemberForm(BackendForm):
helplink = 'membership'
list_fields = ['fullname', 'user', 'paiduntil']
queryset_select_related = ['user', ]
defaultsort = [['paiduntil', 'desc'], ['fullname', 'asc']]
allow_email = True
class Meta:
model = Member
fields = ['fullname', 'country', 'listed', 'country_exception',
'membersince', 'paiduntil', 'expiry_warning_sent', ]
fieldsets = [
{'id': 'user_info', 'Legend': 'User information', 'fields': ['fullname', 'country', 'listed', ]},
{'id': 'admin_info', 'Legend': 'Administrative', 'fields': ['country_exception', ]},
{'id': 'date_info', 'Legend': 'Date info', 'fields': ['membersince', 'paiduntil', 'expiry_warning_sent', ]},
]
readonly_fields = ['membersince', 'paiduntil', 'expiry_warning_sent', ]
linked_objects = OrderedDict({
'log': MemberLogManager(),
})
@classmethod
def get_column_filters(cls, conference):
return {
'Paid until': [], # Empty list triggers the option to choose empty/not empty
}
class BackendMeetingReminderForm(BackendForm):
helplink = 'meetings'
list_fields = ['sendat', 'sentat', ]
readonly_fields = ['sentat', ]
class Meta:
model = MeetingReminder
fields = ['sendat', 'sentat', ]
def clean_sendat(self):
if self.cleaned_data.get('sendat', None):
print("FOO: %s" % self.cleaned_data.get('sendat', None))
print("FOO2: %s" % self.instance.meeting.dateandtime)
if self.cleaned_data.get('sendat') > self.instance.meeting.dateandtime - timedelta(minutes=30):
raise ValidationError("Reminder must be set at least 30 minutes before the meeting starts!")
if self.cleaned_data.get('sendat') < timezone.now():
raise ValidationError("This timestamp is in the past!")
else:
print("BAR")
return self.cleaned_data.get('sendat', None)
def clean(self):
d = super().clean()
if self.instance.sentat:
raise ValidationError("Cannot edit a reminder that has already been sent")
return d
class MeetingReminderManager(object):
title = 'Reminders'
singular = 'Reminder'
can_add = True
def get_list(self, instance):
return [
(r.id, "{} ({})".format(datetime_string(r.sendat),
time_sinceoruntil(r.sendat)),
r.sentat is not None
) for r in MeetingReminder.objects.filter(meeting=instance)]
def get_form(self, obj, POST):
return BackendMeetingReminderForm
def get_object(self, masterobj, subid):
return MeetingReminder.objects.get(meeting=masterobj, pk=subid)
def METHOD_NAME(self, masterobj):
return lambda: MeetingReminder(meeting=masterobj)
class BackendMeetingForm(BackendForm):
helplink = 'meetings'
list_fields = ['name', 'dateandtime', 'meetingtype', 'state']
linked_objects = OrderedDict({
'reminders': MeetingReminderManager(),
})
extrabuttons = [
('View meeting log', 'log/'),
('View attendees', 'attendees/'),
]
class Meta:
model = Meeting
fields = ['name', 'dateandtime', 'allmembers', 'members', 'meetingtype', 'meetingadmins', 'botname', ]
fieldsets = [
{'id': 'meeting_info', 'legend': 'Meeting information', 'fields': ['name', 'dateandtime', 'allmembers', 'members']},
{'id': 'meeting_impl', 'legend': 'Meeting implementation', 'fields': ['meetingtype', 'meetingadmins', 'botname']},
]
selectize_multiple_fields = {
'members': MemberLookup(),
'meetingadmins': MemberLookup(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Remove extra buttons unless we're in a web meeting and this web meeting has started
if self.instance:
if self.instance.meetingtype != MeetingType.WEB or self.instance.state == 0:
self.extrabuttons = []
else:
self.extrabuttons = []
def clean(self):
d = super().clean()
if d.get('meetingtype', None) == MeetingType.WEB:
if d['botname']:
self.add_error('botname', 'Bot name should not be specified for web meetings')
if not d['meetingadmins']:
self.add_error('meetingadmins', 'Meeting administrator(s) must be specified for web meetings')
elif d.get('meetingtype', None) == MeetingType.IRC:
if not d['botname']:
self.add_error('botname', 'Bot name must be specified for IRC meetings')
if d['meetingadmins']:
self.add_error('meetingadmins', 'Meeting administrator(s) cannot be specified for IRC meetings')
return d
def clean_meetingtype(self):
if self.cleaned_data.get('meetingtype', None) == MeetingType.WEB and not settings.MEETINGS_WS_BASE_URL:
raise ValidationError("Web meetings server is not configured in local_settings.py")
if self.instance and self.instance.state > 0 and self.instance.meetingtype != self.cleaned_data['meetingtype']:
raise ValidationError("Cannot change the type of a meeting that has already started")
return self.cleaned_data.get('meetingtype', None)
class BackendMemberSendEmailForm(django.forms.Form):
helplink = 'membership'
_from = django.forms.CharField(max_length=128, disabled=True, label="From")
subject = django.forms.CharField(max_length=128, required=True)
recipients = django.forms.Field(widget=StaticTextWidget, required=False)
message = django.forms.CharField(widget=EmailTextWidget, required=True)
idlist = django.forms.CharField(widget=django.forms.HiddenInput, required=True)
confirm = django.forms.BooleanField(label="Confirm", required=False)
def __init__(self, *args, **kwargs):
super(BackendMemberSendEmailForm, self).__init__(*args, **kwargs)
if not (self.data.get('subject') and self.data.get('message')):
del self.fields['confirm']
def clean_confirm(self):
if not self.cleaned_data['confirm']:
raise ValidationError("Please check this box to confirm that you are really sending this email! There is no going back!") |
5,348 | get urls | # SPDX-License-Identifier: GPL-3.0-or-later
# SPDX-FileCopyrightText: Copyright contributors to the OpenScanHub project.
import os
from glob import glob
from django.contrib import admin
from django.shortcuts import render
from django.urls import path
from django.utils.safestring import mark_safe
from kobo.hub.models import TASK_STATES, Task
from osh.hub.other.autoregister import autoregister_admin
from osh.hub.other.shortcuts import add_link_field
from osh.hub.scan.models import SCAN_STATES, Scan, ScanBinding
from osh.hub.scan.notify import send_scan_notification
from osh.hub.scan.xmlrpc_helper import cancel_scan as h_cancel_scan
from osh.hub.scan.xmlrpc_helper import cancel_scan_tasks
from osh.hub.scan.xmlrpc_helper import fail_scan as h_fail_scan
from osh.hub.scan.xmlrpc_helper import finish_scan as h_finish_scan
autoregister_admin('osh.hub.scan.models',
exclude_models=['Scan'],
reversed_relations={'MockConfig': ['analyzers']},
admin_fields={
'Tag': {'search_fields': ['name', 'mock__name', 'release__tag']},
'ScanBinding': {'search_fields': ['scan__nvr', 'scan__package__name']},
'Package': {'search_fields': ['name']},
'AnalyzerVersion': {'search_fields': ['version', 'analyzer__name', 'mocks__name']},
})
autoregister_admin('django.contrib.admin.models')
@admin.register(Scan)
@add_link_field('scanbinding', 'scanbinding', field_label="Binding")
@add_link_field('scan', 'base', field_label="Base")
@add_link_field('scan', 'parent', field_label="Parent")
@add_link_field('tag', 'tag', field_label="Tag")
@add_link_field('package', 'package', field_label="Package")
class ScanAdmin(admin.ModelAdmin):
list_display = ("id", "nvr", "state", "scan_type", 'base_link',
'parent_link', "tag_link",
'username', 'package_link', 'scanbinding_link', 'enabled')
raw_id_fields = ("base", "tag", "username", "package", "parent")
search_fields = ['package__name', 'nvr']
list_per_page = 15
review_template = 'admin/my_test/myentry/review.html'
def METHOD_NAME(self):
urls = super().METHOD_NAME()
slug = '<int:scan_id>/change'
my_urls = [
path(f'{slug}/notify/', self.admin_site.admin_view(self.notify)),
path(f'{slug}/fail/', self.admin_site.admin_view(self.fail_scan)),
path(f'{slug}/cancel/', self.admin_site.admin_view(self.cancel_scan)),
path(f'{slug}/finish/', self.admin_site.admin_view(self.finish_scan)),
]
return my_urls + urls
def notify(self, request, scan_id):
result = send_scan_notification(request, scan_id)
scan = Scan.objects.get(id=scan_id)
context = {
'title': 'Notify: %s' % scan.nvr,
'object': scan,
'opts': self.model._meta,
'result': mark_safe("Number of e-mails sent: <b>%s</b>" % result),
'app_label': self.model._meta.app_label,
}
return render(request, 'admin/scan/scan/state_change.html', context)
def fail_scan(self, request, scan_id):
sb = ScanBinding.objects.get(scan__id=scan_id)
cancel_scan_tasks(sb.task)
h_fail_scan(scan_id, "set as failed from admin interface.")
scan = Scan.objects.get(id=scan_id)
context = {
'title': 'Fail scan: %s' % scan.nvr,
'object': scan,
'opts': self.model._meta,
'result': "Scan #%s set to failed" % scan_id,
'app_label': self.model._meta.app_label,
}
return render(request, 'admin/scan/scan/state_change.html', context)
def finish_scan(self, request, scan_id):
task = Task.objects.get(scanbinding__scan__id=scan_id)
task.state = TASK_STATES['CLOSED']
task.save()
tb_path = glob(os.path.join(Task.get_task_dir(task.id), '*.tar.xz'))[0]
h_finish_scan(request, scan_id, os.path.basename(tb_path))
scan = Scan.objects.get(id=scan_id)
context = {
'title': 'Finish scan: %s' % scan.nvr,
'object': scan,
'opts': self.model._meta,
'result': "Scan #%s set to %s" % (
scan_id,
SCAN_STATES.get_value(scan.state)
),
'app_label': self.model._meta.app_label,
}
return render(request, 'admin/scan/scan/state_change.html', context)
def cancel_scan(self, request, scan_id):
scan_binding = ScanBinding.objects.by_scan_id(scan_id)
scan = h_cancel_scan(scan_binding)
context = {
'title': 'Cancelation of scan: %s' % scan,
'object': scan,
'opts': self.model._meta,
'result': "Scan %s cancelled." % (scan),
'app_label': self.model._meta.app_label,
}
return render(request, 'admin/scan/scan/state_change.html', context) |
5,349 | test add email wrong otp | """Tests to add a new phone number or email address."""
# pylint: disable=redefined-outer-name
from unittest.mock import patch
import pytest
from werkzeug.datastructures import MultiDict
from coaster.utils import newpin
from funnel import models
PATCH_EMAIL_VALIDATOR = (
'funnel.models.email_address.EmailAddress.is_valid_email_address'
)
PATCH_SMS_OTP_SEND = 'funnel.views.otp.OtpSessionForNewPhone.send_sms'
PATCH_EMAIL_OTP_SEND = 'funnel.views.otp.OtpSessionForNewEmail.send_email'
TEST_NEW_EMAIL = 'rincewind@example.com'
TEST_NEW_PHONE = '+918123456789'
@pytest.fixture()
def useremail_rincewind(user_rincewind: models.User) -> models.AccountEmail:
"""Email address for user fixture."""
return user_rincewind.add_email(TEST_NEW_EMAIL)
@pytest.fixture()
def userphone_rincewind(user_rincewind: models.User) -> models.AccountPhone:
"""Phone number for user fixture."""
return user_rincewind.add_phone(TEST_NEW_PHONE)
def get_wrong_otp(reference: str) -> str:
"""Return a random value that does not match the reference value."""
result = reference
while result == reference:
result = newpin(len(reference))
return result
def METHOD_NAME(
client, csrf_token, login, user_rincewind: models.User
) -> None:
"""Add a new email address with an OTP and confirm an incorrect OTP is rejected."""
login.as_(user_rincewind)
with patch(PATCH_EMAIL_VALIDATOR, return_value=True):
with patch(PATCH_EMAIL_OTP_SEND, autospec=True, return_value=True) as mock:
rv1 = client.post(
'/account/email/new',
data=MultiDict({'csrf_token': csrf_token, 'email': TEST_NEW_EMAIL}),
)
assert rv1.status_code == 303
otp_session = mock.call_args[0][0] # First call, first argument (self)
caught_otp = otp_session.otp
rv2 = client.post(
rv1.location,
data=MultiDict(
{'csrf_token': csrf_token, 'otp': get_wrong_otp(caught_otp)}
),
)
assert 'OTP is incorrect' in rv2.data.decode()
def test_add_email(client, csrf_token, login, user_rincewind: models.User) -> None:
"""Add a new email address with an OTP."""
login.as_(user_rincewind)
assert user_rincewind.emails == []
with patch(PATCH_EMAIL_VALIDATOR, return_value=True):
with patch(PATCH_EMAIL_OTP_SEND, autospec=True, return_value=True) as mock:
rv1 = client.post(
'/account/email/new',
data=MultiDict({'csrf_token': csrf_token, 'email': TEST_NEW_EMAIL}),
)
assert rv1.status_code == 303
otp_session = mock.call_args[0][0] # First call, first argument (self)
caught_otp = otp_session.otp
rv2 = client.post(
rv1.location, data=MultiDict({'csrf_token': csrf_token, 'otp': caught_otp})
)
assert rv2.status_code == 303
assert str(user_rincewind.email) == TEST_NEW_EMAIL
def test_merge_with_email_otp(
client, csrf_token, login, useremail_rincewind, user_mort
) -> None:
"""Providing a valid OTP for another user's email address causes a merge prompt."""
login.as_(user_mort)
assert user_mort.emails == []
with patch(PATCH_EMAIL_VALIDATOR, return_value=True):
with patch(PATCH_EMAIL_OTP_SEND, autospec=True, return_value=True) as mock:
rv1 = client.post(
'/account/email/new',
data=MultiDict({'csrf_token': csrf_token, 'email': TEST_NEW_EMAIL}),
)
assert rv1.status_code == 303
otp_session = mock.call_args[0][0] # First call, first argument (self)
caught_otp = otp_session.otp
rv2 = client.post(
rv1.location, data=MultiDict({'csrf_token': csrf_token, 'otp': caught_otp})
)
assert rv2.status_code == 303
assert user_mort.emails == []
assert rv2.location == '/account/merge'
with client.session_transaction() as session:
assert session['merge_buid'] == useremail_rincewind.account.buid
def test_add_phone_wrong_otp(
client, csrf_token, login, user_rincewind: models.User
) -> None:
"""Add a new phone number with an OTP and confirm an incorrect OTP is rejected."""
login.as_(user_rincewind)
assert user_rincewind.phones == []
with patch(PATCH_SMS_OTP_SEND, autospec=True, return_value=True) as mock:
rv1 = client.post(
'/account/phone/new',
data=MultiDict({'csrf_token': csrf_token, 'phone': TEST_NEW_PHONE}),
)
assert rv1.status_code == 303
otp_session = mock.call_args[0][0] # First call, first argument (self)
caught_otp = otp_session.otp
rv2 = client.post(
rv1.location,
data=MultiDict({'csrf_token': csrf_token, 'otp': get_wrong_otp(caught_otp)}),
)
assert 'OTP is incorrect' in rv2.data.decode()
def test_add_phone(client, csrf_token, login, user_rincewind: models.User) -> None:
"""Add a new phone number with an OTP."""
login.as_(user_rincewind)
assert user_rincewind.phones == []
with patch(PATCH_SMS_OTP_SEND, autospec=True, return_value=True) as mock:
rv1 = client.post(
'/account/phone/new',
data=MultiDict({'csrf_token': csrf_token, 'phone': TEST_NEW_PHONE}),
)
assert rv1.status_code == 303
otp_session = mock.call_args[0][0] # First call, first argument (self)
caught_otp = otp_session.otp
rv2 = client.post(
rv1.location, data=MultiDict({'csrf_token': csrf_token, 'otp': caught_otp})
)
assert rv2.status_code == 303
assert str(user_rincewind.phone) == TEST_NEW_PHONE
def test_merge_with_phone_otp(
client, csrf_token, login, userphone_rincewind, user_mort
) -> None:
"""Providing a valid OTP for another user's phone number causes a merge prompt."""
login.as_(user_mort)
assert user_mort.phones == []
with patch(PATCH_SMS_OTP_SEND, autospec=True, return_value=True) as mock:
rv1 = client.post(
'/account/phone/new',
data=MultiDict({'csrf_token': csrf_token, 'phone': TEST_NEW_PHONE}),
)
assert rv1.status_code == 303
assert rv1.location == '/account/phone/verify'
otp_session = mock.call_args[0][0] # First call, first argument (self)
caught_otp = otp_session.otp
rv2 = client.post(
rv1.location, data=MultiDict({'csrf_token': csrf_token, 'otp': caught_otp})
)
assert rv2.status_code == 303
assert user_mort.phones == []
assert rv2.location == '/account/merge'
with client.session_transaction() as session:
assert session['merge_buid'] == userphone_rincewind.account.buid |
5,350 | w cp u control | # SPDX-License-Identifier: MIT
import struct
from enum import IntEnum
from ..hv import TraceMode
from ..utils import *
from . import ADTDevTracer
from ..hw.asc import *
class DIR(IntEnum):
RX = 0
TX = 1
def msg(message, direction=None, regtype=None, name=None):
def f(x):
x.is_message = True
x.direction = direction
x.message = message
x.regtype = regtype
x.name = name
return x
return f
def msg_log(*args, **kwargs):
def x(self, msg):
return False
return msg(*args, **kwargs)(x)
def msg_ign(*args, **kwargs):
def x(self, msg):
return True
return msg(*args, **kwargs)(x)
class EPState(object):
pass
class EP(object):
NAME = None
BASE_MESSAGE = None
def __init__(self, tracer, epid):
self.tracer = tracer
self.epid = epid
self.present = False
self.started = False
self.name = self.NAME or type(self).__name__.lower()
self.state = EPState()
self.hv = self.tracer.hv
self.msgmap = {}
for name in dir(self):
i = getattr(self, name)
if not callable(i) or not getattr(i, "is_message", False):
continue
self.msgmap[i.direction, i.message] = getattr(self, name), name, i.regtype
def log(self, msg):
self.tracer.log(f"[{self.name}] {msg}")
def start(self):
pass
def handle_msg(self, direction, r0, r1):
msgtype = None
if self.BASE_MESSAGE:
r0 = self.BASE_MESSAGE(r0.value)
msgtype = r0.TYPE
handler = None
name = "<unknown>"
regtype = None
msgids = [
(direction, msgtype),
(None, msgtype),
(direction, None),
(None, None),
]
for msgid in msgids:
handler, name, regtype = self.msgmap.get(msgid, (None, None, None))
if handler:
break
if regtype is not None:
r0 = regtype(r0.value)
if handler:
if handler.name is not None:
name = handler.name
if handler(r0):
return True
d = ">" if direction == DIR.TX else "<"
self.log(f"{d}{msgtype:#x}({name}) {r0.value:016x} ({r0.str_fields()})")
return True
class EPContainer(object):
pass
class BaseASCTracer(ADTDevTracer):
DEFAULT_MODE = TraceMode.SYNC
REGMAPS = [ASCRegs, None]
NAMES = ["asc", None]
ENDPOINTS = {}
def w_OUTBOX_CTRL(self, val):
self.log(f"OUTBOX_CTRL = {val!s}")
def w_INBOX_CTRL(self, val):
self.log(f"INBOX_CTRL = {val!s}")
def METHOD_NAME(self, val):
self.log(f"CPU_CONTROL = {val!s}")
def w_INBOX1(self, inbox1):
inbox0 = self.asc.cached.INBOX0.reg
if self.verbose >= 2:
self.log(f"SEND: {inbox0.value:016x}:{inbox1.value:016x} " +
f"{inbox0.str_fields()} | {inbox1.str_fields()}")
self.handle_msg(DIR.TX, inbox0, inbox1)
def r_OUTBOX1(self, outbox1):
outbox0 = self.asc.cached.OUTBOX0.reg
if self.verbose >= 2:
self.log(f"RECV: {outbox0.value:016x}:{outbox1.value:016x} " +
f"{outbox0.str_fields()} | {outbox1.str_fields()}")
self.handle_msg(DIR.RX, outbox0, outbox1)
def init_state(self):
self.state.ep = {}
def handle_msg(self, direction, r0, r1):
if r1.EP in self.epmap:
if self.epmap[r1.EP].handle_msg(direction, r0, r1):
return
d = ">" if direction == DIR.TX else "<"
self.log(f"{d}ep:{r1.EP:02x} {r0.value:016x} ({r0.str_fields()})")
def ioread(self, dva, size):
if self.dart:
return self.dart.ioread(self.stream, dva & 0xFFFFFFFFF, size)
else:
return self.hv.iface.readmem(dva, size)
def iowrite(self, dva, data):
if self.dart:
return self.dart.iowrite(self.stream, dva & 0xFFFFFFFFF, data)
else:
return self.hv.iface.writemem(dva, data)
def start(self, dart=None, stream=0):
super().start()
self.dart = dart
self.stream = stream
self.msgmap = {}
for name in dir(self):
i = getattr(self, name)
if not callable(i) or not getattr(i, "is_message", False):
continue
self.msgmap[i.direction, i.endpoint, i.message] = getattr(self, name), name, i.regtype
self.epmap = {}
self.ep = EPContainer()
for cls in type(self).mro():
eps = getattr(cls, "ENDPOINTS", None)
if eps is None:
break
for k, v in eps.items():
if k in self.epmap:
continue
ep = v(self, k)
ep.dart = dart
ep.stream = stream
self.epmap[k] = ep
if k in self.state.ep:
ep.state.__dict__.update(self.state.ep[k])
self.state.ep[k] = ep.state.__dict__
if getattr(self.ep, ep.name, None):
ep.name = f"{ep.name}{k:02x}"
setattr(self.ep, ep.name, ep)
ep.start()
# System endpoints
## Management endpoint
from ..fw.asc.mgmt import ManagementMessage, Mgmt_EPMap, Mgmt_EPMap_Ack, Mgmt_StartEP, Mgmt_SetAPPower, Mgmt_SetIOPPower, Mgmt_IOPPowerAck
class Management(EP):
BASE_MESSAGE = ManagementMessage
HELLO = msg_log(1, DIR.RX)
HELLO_ACK = msg_log(2, DIR.TX)
@msg(5, DIR.TX, Mgmt_StartEP)
def StartEP(self, msg):
ep = self.tracer.epmap.get(msg.EP, None)
if ep:
ep.started = True
self.log(f" Starting endpoint #{msg.EP:#02x} ({ep.name})")
else:
self.log(f" Starting endpoint #{msg.EP:#02x}")
#return True
Init = msg_log(6, DIR.TX)
@msg(8, DIR.RX, Mgmt_EPMap)
def EPMap(self, msg):
for i in range(32):
if msg.BITMAP & (1 << i):
epno = 32 * msg.BASE + i
ep = self.tracer.epmap.get(epno, None)
if ep:
ep.present = True
self.log(f" Adding endpoint #{epno:#02x} ({ep.name})")
else:
self.log(f" Adding endpoint #{epno:#02x}")
EPMap_Ack = msg_log(8, DIR.TX, Mgmt_EPMap_Ack)
SetIOPPower = msg_log(6, DIR.TX, Mgmt_SetIOPPower)
SetIOPPowerAck = msg_log(7, DIR.TX, Mgmt_IOPPowerAck)
SetAPPower = msg_log(0x0b, DIR.TX, Mgmt_SetAPPower)
SetAPPowerAck = msg_log(0x0b, DIR.RX, Mgmt_SetAPPower)
## Syslog endpoint
from ..fw.asc.syslog import SyslogMessage, Syslog_Init, Syslog_GetBuf, Syslog_Log
class Syslog(EP):
BASE_MESSAGE = SyslogMessage
@msg(8, DIR.RX, Syslog_Init)
def Init(self, msg):
self.state.count = msg.COUNT
self.state.entrysize = msg.ENTRYSIZE
@msg(1, DIR.RX, Syslog_GetBuf)
def GetBuf(self, msg):
if msg.DVA:
self.state.syslog_buf = msg.DVA
@msg(1, DIR.TX, Syslog_GetBuf)
def GetBuf_Ack(self, msg):
self.state.syslog_buf = msg.DVA
@msg(5, DIR.RX, Syslog_Log)
def Log(self, msg):
buf = self.state.syslog_buf
stride = 0x20 + self.state.entrysize
log = self.tracer.ioread(buf + msg.INDEX * stride, stride)
hdr, unk, context, logmsg = struct.unpack(f"<II24s{self.state.entrysize}s", log)
context = context.split(b"\x00")[0].decode("ascii")
logmsg = logmsg.split(b"\x00")[0].decode("ascii").rstrip("\n")
self.log(f"* [{context}]{logmsg}")
return True
Log_Ack = msg_ign(5, DIR.TX, Syslog_Log)
class ASCTracer(BaseASCTracer):
ENDPOINTS = {
0: Management,
#1: CrashLog,
2: Syslog,
#3: KDebug,
#4: IOReporting,
} |
5,351 | test public task exception | from unittest.mock import patch
import pytest
from allauth.socialaccount.models import SocialAccount
from django.contrib.auth.models import User
from django.test import TestCase
from django_dynamic_fixture import get
from messages_extends.models import Message
from readthedocs.builds import tasks as build_tasks
from readthedocs.builds.constants import BUILD_STATUS_SUCCESS, EXTERNAL, LATEST
from readthedocs.builds.models import Build, Version
from readthedocs.oauth.models import RemoteRepository, RemoteRepositoryRelation
from readthedocs.projects.models import Project
class TestCeleryBuilding(TestCase):
"""
These tests run the build functions directly.
They don't use celery
"""
def setUp(self):
super().setUp()
self.eric = User(username="eric")
self.eric.set_password("test")
self.eric.save()
self.project = Project.objects.create(
name="Test Project",
)
self.project.users.add(self.eric)
self.version = self.project.versions.get(slug=LATEST)
@pytest.mark.skip
def test_check_duplicate_no_reserved_version(self):
create_git_branch(self.repo, "no-reserved")
create_git_tag(self.repo, "no-reserved")
version = self.project.versions.get(slug=LATEST)
self.assertEqual(
self.project.versions.filter(slug__startswith="no-reserved").count(), 0
)
sync_repository_task(version_id=version.pk)
self.assertEqual(
self.project.versions.filter(slug__startswith="no-reserved").count(), 2
)
def METHOD_NAME(self):
"""
Test when a PublicTask rises an Exception.
The exception should be caught and added to the ``info`` attribute of
the result. Besides, the task should be SUCCESS.
"""
from readthedocs.core.utils.tasks import PublicTask
from readthedocs.worker import app
@app.task(name="public_task_exception", base=PublicTask)
def public_task_exception():
raise Exception("Something bad happened")
result = public_task_exception.delay()
# although the task risen an exception, it's success since we add the
# exception into the ``info`` attributes
self.assertEqual(result.status, "SUCCESS")
self.assertEqual(
result.info,
{
"task_name": "public_task_exception",
"context": {},
"public_data": {},
"error": "Something bad happened",
},
)
@patch("readthedocs.oauth.services.github.GitHubService.send_build_status")
def test_send_build_status_with_remote_repo_github(self, send_build_status):
self.project.repo = "https://github.com/test/test/"
self.project.save()
social_account = get(SocialAccount, user=self.eric, provider="gitlab")
remote_repo = get(RemoteRepository)
remote_repo.projects.add(self.project)
get(
RemoteRepositoryRelation,
remote_repository=remote_repo,
user=self.eric,
account=social_account,
)
external_version = get(Version, project=self.project, type=EXTERNAL)
external_build = get(Build, project=self.project, version=external_version)
build_tasks.send_build_status(
external_build.id, external_build.commit, BUILD_STATUS_SUCCESS
)
send_build_status.assert_called_once_with(
external_build,
external_build.commit,
BUILD_STATUS_SUCCESS,
)
self.assertEqual(Message.objects.filter(user=self.eric).count(), 0)
@patch("readthedocs.oauth.services.github.GitHubService.send_build_status")
def test_send_build_status_with_social_account_github(self, send_build_status):
social_account = get(SocialAccount, user=self.eric, provider="github")
self.project.repo = "https://github.com/test/test/"
self.project.save()
external_version = get(Version, project=self.project, type=EXTERNAL)
external_build = get(Build, project=self.project, version=external_version)
build_tasks.send_build_status(
external_build.id, external_build.commit, BUILD_STATUS_SUCCESS
)
send_build_status.assert_called_once_with(
external_build,
external_build.commit,
BUILD_STATUS_SUCCESS,
)
self.assertEqual(Message.objects.filter(user=self.eric).count(), 0)
@patch("readthedocs.oauth.services.github.GitHubService.send_build_status")
def test_send_build_status_no_remote_repo_or_social_account_github(
self, send_build_status
):
self.project.repo = "https://github.com/test/test/"
self.project.save()
external_version = get(Version, project=self.project, type=EXTERNAL)
external_build = get(Build, project=self.project, version=external_version)
build_tasks.send_build_status(
external_build.id, external_build.commit, BUILD_STATUS_SUCCESS
)
send_build_status.assert_not_called()
self.assertEqual(Message.objects.filter(user=self.eric).count(), 1)
@patch("readthedocs.oauth.services.gitlab.GitLabService.send_build_status")
def test_send_build_status_with_remote_repo_gitlab(self, send_build_status):
self.project.repo = "https://gitlab.com/test/test/"
self.project.save()
social_account = get(SocialAccount, user=self.eric, provider="gitlab")
remote_repo = get(RemoteRepository)
remote_repo.projects.add(self.project)
get(
RemoteRepositoryRelation,
remote_repository=remote_repo,
user=self.eric,
account=social_account,
)
external_version = get(Version, project=self.project, type=EXTERNAL)
external_build = get(Build, project=self.project, version=external_version)
build_tasks.send_build_status(
external_build.id, external_build.commit, BUILD_STATUS_SUCCESS
)
send_build_status.assert_called_once_with(
external_build,
external_build.commit,
BUILD_STATUS_SUCCESS,
)
self.assertEqual(Message.objects.filter(user=self.eric).count(), 0)
@patch("readthedocs.oauth.services.gitlab.GitLabService.send_build_status")
def test_send_build_status_with_social_account_gitlab(self, send_build_status):
social_account = get(SocialAccount, user=self.eric, provider="gitlab")
self.project.repo = "https://gitlab.com/test/test/"
self.project.save()
external_version = get(Version, project=self.project, type=EXTERNAL)
external_build = get(Build, project=self.project, version=external_version)
build_tasks.send_build_status(
external_build.id, external_build.commit, BUILD_STATUS_SUCCESS
)
send_build_status.assert_called_once_with(
external_build,
external_build.commit,
BUILD_STATUS_SUCCESS,
)
self.assertEqual(Message.objects.filter(user=self.eric).count(), 0)
@patch("readthedocs.oauth.services.gitlab.GitLabService.send_build_status")
def test_send_build_status_no_remote_repo_or_social_account_gitlab(
self, send_build_status
):
self.project.repo = "https://gitlab.com/test/test/"
self.project.save()
external_version = get(Version, project=self.project, type=EXTERNAL)
external_build = get(Build, project=self.project, version=external_version)
build_tasks.send_build_status(
external_build.id, external_build.commit, BUILD_STATUS_SUCCESS
)
send_build_status.assert_not_called()
self.assertEqual(Message.objects.filter(user=self.eric).count(), 1) |
5,352 | test serine rna sequence 2 | # These tests are auto-generated with test data from:
# https://github.com/exercism/problem-specifications/tree/main/exercises/protein-translation/canonical-data.json
# File last updated on 2023-07-19
import unittest
from protein_translation import (
proteins,
)
class ProteinTranslationTest(unittest.TestCase):
def test_methionine_rna_sequence(self):
value = "AUG"
expected = ["Methionine"]
self.assertEqual(proteins(value), expected)
def test_phenylalanine_rna_sequence_1(self):
value = "UUU"
expected = ["Phenylalanine"]
self.assertEqual(proteins(value), expected)
def test_phenylalanine_rna_sequence_2(self):
value = "UUC"
expected = ["Phenylalanine"]
self.assertEqual(proteins(value), expected)
def test_leucine_rna_sequence_1(self):
value = "UUA"
expected = ["Leucine"]
self.assertEqual(proteins(value), expected)
def test_leucine_rna_sequence_2(self):
value = "UUG"
expected = ["Leucine"]
self.assertEqual(proteins(value), expected)
def test_serine_rna_sequence_1(self):
value = "UCU"
expected = ["Serine"]
self.assertEqual(proteins(value), expected)
def METHOD_NAME(self):
value = "UCC"
expected = ["Serine"]
self.assertEqual(proteins(value), expected)
def test_serine_rna_sequence_3(self):
value = "UCA"
expected = ["Serine"]
self.assertEqual(proteins(value), expected)
def test_serine_rna_sequence_4(self):
value = "UCG"
expected = ["Serine"]
self.assertEqual(proteins(value), expected)
def test_tyrosine_rna_sequence_1(self):
value = "UAU"
expected = ["Tyrosine"]
self.assertEqual(proteins(value), expected)
def test_tyrosine_rna_sequence_2(self):
value = "UAC"
expected = ["Tyrosine"]
self.assertEqual(proteins(value), expected)
def test_cysteine_rna_sequence_1(self):
value = "UGU"
expected = ["Cysteine"]
self.assertEqual(proteins(value), expected)
def test_cysteine_rna_sequence_2(self):
value = "UGC"
expected = ["Cysteine"]
self.assertEqual(proteins(value), expected)
def test_tryptophan_rna_sequence(self):
value = "UGG"
expected = ["Tryptophan"]
self.assertEqual(proteins(value), expected)
def test_stop_codon_rna_sequence_1(self):
value = "UAA"
expected = []
self.assertEqual(proteins(value), expected)
def test_stop_codon_rna_sequence_2(self):
value = "UAG"
expected = []
self.assertEqual(proteins(value), expected)
def test_stop_codon_rna_sequence_3(self):
value = "UGA"
expected = []
self.assertEqual(proteins(value), expected)
def test_sequence_of_two_protein_codons_translates_into_proteins(self):
value = "UUUUUU"
expected = ["Phenylalanine", "Phenylalanine"]
self.assertEqual(proteins(value), expected)
def test_sequence_of_two_different_protein_codons_translates_into_proteins(self):
value = "UUAUUG"
expected = ["Leucine", "Leucine"]
self.assertEqual(proteins(value), expected)
def test_translate_rna_strand_into_correct_protein_list(self):
value = "AUGUUUUGG"
expected = ["Methionine", "Phenylalanine", "Tryptophan"]
self.assertEqual(proteins(value), expected)
def test_translation_stops_if_stop_codon_at_beginning_of_sequence(self):
value = "UAGUGG"
expected = []
self.assertEqual(proteins(value), expected)
def test_translation_stops_if_stop_codon_at_end_of_two_codon_sequence(self):
value = "UGGUAG"
expected = ["Tryptophan"]
self.assertEqual(proteins(value), expected)
def test_translation_stops_if_stop_codon_at_end_of_three_codon_sequence(self):
value = "AUGUUUUAA"
expected = ["Methionine", "Phenylalanine"]
self.assertEqual(proteins(value), expected)
def test_translation_stops_if_stop_codon_in_middle_of_three_codon_sequence(self):
value = "UGGUAGUGG"
expected = ["Tryptophan"]
self.assertEqual(proteins(value), expected)
def test_translation_stops_if_stop_codon_in_middle_of_six_codon_sequence(self):
value = "UGGUGUUAUUAAUGGUUU"
expected = ["Tryptophan", "Cysteine", "Tyrosine"]
self.assertEqual(proteins(value), expected) |
5,353 | drop event | # coding: utf-8
# /*##########################################################################
#
# Copyright (C) 2016-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
from __future__ import absolute_import
__authors__ = ["V. Valls"]
__license__ = "MIT"
__date__ = "17/12/2021"
import logging
from typing import Optional
from silx.gui import qt
import html
import pyFAI.detectors
from pyFAI.detectors import Detector
from ..model.DetectorModel import DetectorModel
_logger = logging.getLogger(__name__)
class DetectorLabel(qt.QLabel):
"""Read-only widget to display a :class:`Detector`.
It can be setup as
- a detector holder (see :meth:`setDetector`, :meth:`detector`)
- a view on top of a model (see :meth:`setDetectorModel`, :meth:`detectorModel`)
"""
_BASE_TEMPLATE = "<html><head/><body>%s</body></html>"
_MANUFACTURER_TEMPLATE = "<span style=\"vertical-align:sub;\">%s</span>"
_TOOLTIP_TEMPLATE = """
<html>
<ul style="margin-top: 0px; margin-bottom: 0px; margin-left: 0px; margin-right: 0px; -qt-list-indent: 0">
<li style="white-space:pre"><b>Model:</b> {model}</li>
<li style="white-space:pre"><b>Manufacturer:</b> {manufacturer}</li>
<li style="white-space:pre"><b>Type:</b> {kind}</li>
</ul>
</html>"""
_MODEL_TEMPLATE = "%s"
def __init__(self, parent=None):
super(DetectorLabel, self).__init__(parent)
self.__model: Optional[DetectorModel] = None
self.__detector: Optional[Detector] = None
def dragEnterEvent(self, event):
if self.__model is not None:
if event.mimeData().hasFormat("text/uri-list"):
event.acceptProposedAction()
def METHOD_NAME(self, event):
mimeData = event.mimeData()
if not mimeData.hasUrls():
qt.QMessageBox.critical(self, "Drop cancelled", "A file is expected")
return
urls = mimeData.urls()
if len(urls) > 1:
qt.QMessageBox.critical(self, "Drop cancelled", "A single file is expected")
return
try:
path = urls[0].toLocalFile()
detector = pyFAI.detectors.detector_factory(path)
except IOError as e:
_logger.error("Error while loading dropped URL %s", e, exc_info=True)
qt.QMessageBox.critical(self, "Drop cancelled", str(e))
return
except Exception as e:
_logger.error("Error while reading dropped URL %s", e, exc_info=True)
qt.QMessageBox.critical(self, "Drop cancelled", str(e))
return
if self.__model is None:
_logger.error("No model defined")
return
self.__model.setDetector(detector)
def __getModelName(self, detector: Detector):
if isinstance(detector, pyFAI.detectors.NexusDetector):
if hasattr(detector, "name"):
name = detector.name
if name is not None:
return name
detectorClass = detector.__class__
modelName = None
if hasattr(detectorClass, "aliases"):
if len(detectorClass.aliases) > 0:
modelName = detectorClass.aliases[0]
if modelName is None:
modelName = detectorClass.__name__
return modelName
def detector(self) -> Optional[Detector]:
if self.__detector is not None:
return self.__detector
if self.__model is not None:
detector = self.__model.detector()
return detector
return None
def __updateDisplay(self):
detector = self.detector()
if detector is None:
self.setText("No detector")
self.setToolTip("No detector")
return
if detector.__class__ is pyFAI.detectors.NexusDetector:
model = self.__getModelName(detector)
manufacturer = "Not specified"
kind = "Nexus definition"
if detector.filename:
kind = "%s (%s)" % (kind, detector.filename)
description = self._MANUFACTURER_TEMPLATE % html.escape("NeXus")
description += self._MODEL_TEMPLATE % html.escape(model)
data = {
"kind": html.escape(kind),
"manufacturer": html.escape(manufacturer),
"model": html.escape(model),
}
tooltip = self._TOOLTIP_TEMPLATE.format(**data)
elif detector.__class__ is pyFAI.detectors.Detector:
description = self._MODEL_TEMPLATE % "Custom detector"
tooltip = description
else:
manufacturer = detector.MANUFACTURER
if isinstance(manufacturer, list):
manufacturer = manufacturer[0]
model = self.__getModelName(detector)
description = self._MODEL_TEMPLATE % html.escape(model)
if manufacturer is not None:
manufacturer = html.escape(manufacturer)
description = self._MANUFACTURER_TEMPLATE % manufacturer + " " + description
else:
manufacturer = "Not specified"
if detector.__class__.__module__.startswith("pyFAI.detectors."):
kind = "pyFAI definition"
else:
kind = "Custom definition"
data = {
"kind": html.escape(kind),
"manufacturer": html.escape(manufacturer),
"model": html.escape(model),
}
tooltip = self._TOOLTIP_TEMPLATE.format(**data)
text = self._BASE_TEMPLATE % description
self.setText(text)
self.setToolTip(tooltip)
def setDetectorModel(self, model: DetectorModel):
self.__detector = None
if self.__model is not None:
self.__model.changed.disconnect(self.__modelChanged)
self.__model = model
if self.__model is not None:
self.__model.changed.connect(self.__modelChanged)
self.__modelChanged()
def __modelChanged(self):
self.__updateDisplay()
def detectorModel(self) -> Optional[DetectorModel]:
return self.__model
def setDetector(self, detector: Optional[Detector]):
self.__model = None
self.__detector = detector
self.__updateDisplay() |
5,354 | load texture | # Copyright 2020-2023 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from pyglet.window import pyglet
import math
import pyglet.gl
import os
from PIL import Image
import cv2
class Background:
def __init__(self, img_dir, img_fl, width, height, xpos, ypos, zpos, placement_colors):
self.xpos = xpos
self.ypos = ypos
self.zpos = zpos
self.labels_cls = placement_colors
self.angle = 0
self.size = 1
self.width = width
self.height = height
x = width / 2.0
y = height / 2.0
self.vlist = pyglet.graphics.vertex_list(4, ('v2f', [-x, -y, x, -y, -x, y, x, y]),
('t2f', [0, 0, 1, 0, 0, 1, 1, 1]))
self.img_dir = img_dir
self.img_fl = img_fl
self.select_texture()
def select_human_pos(self, models_data):
humans_pos2D = []
msks = []
for i in range(len(self.labels_cls)):
msk = cv2.inRange(self.labels, self.labels_cls[i] - 1, self.labels_cls[i] + 1)
msks.append(msk)
msk = msks[0]
for i in range(len(self.labels_cls)):
msk = cv2.bitwise_or(msk, msks[i])
msk_new = np.zeros((msk.shape))
for i1 in range(msk.shape[0]):
for i2 in range(msk.shape[1]):
ok_pix = True
for j1 in range(-6, 7, 4):
for j2 in range(-6, 7, 4):
if ((i1 + j1) > 0) & ((i2 + j2) > 0) & ((i1 + j1) < msk.shape[0]) & ((i2 + j2) < msk.shape[1]):
if msk[i1 + j1][i2 + j2] == 0:
ok_pix = False
if ok_pix:
msk_new[i1][i2] = 255
ids = np.where(msk_new == 255)
pixs = list(zip(ids[0], ids[1]))
pixs = np.array(pixs)
pixs = pixs[(pixs[:, 0] > 50) & (pixs[:, 0] < (self.labels.shape[0] - 50)) & (pixs[:, 1] > 80) & (
pixs[:, 1] < (self.labels.shape[1] - 80))]
for j in range(len(models_data)):
id_l = 0
min_dist = 10000000000
pref_coords = [models_data[j]['img_pos_pref'][0] * self.img.size[1],
models_data[j]['img_pos_pref'][1] * self.img.size[0]]
for k in range(pixs.shape[0]):
dist = math.sqrt(
(pixs[k][0] - pref_coords[0]) * (pixs[k][0] - pref_coords[0]) + (pixs[k][1] - pref_coords[1]) * (
pixs[k][1] - pref_coords[1]))
if dist < min_dist:
id_l = k
min_dist = dist
pos2D = pixs[id_l]
humans_pos2D.append(pos2D)
pixs_new = []
for k in range(pixs.shape[0]):
if not ((pixs[k][0] > (pos2D[0] - 125)) & (pixs[k][1] > (pos2D[1] - 70)) & (
pixs[k][0] < (pos2D[0] + 125)) & (pixs[k][1] < (pos2D[1] + 70))):
pixs_new.append(pixs[k])
pixs = np.array(pixs_new)
for j in range(len(models_data)):
humans_pos2D[j] = np.array([humans_pos2D[j][1] / self.labels.shape[1],
(self.labels.shape[0] - humans_pos2D[j][0]) / self.labels.shape[0]])
models_data[j]['img_coords'] = humans_pos2D[j]
dst = abs(humans_pos2D[j][0] - 0.5) * 0.05 + humans_pos2D[j][1] * 0.975
if dst < 0.1:
d = self.zpos + 27.0
elif dst < 0.15:
d = self.zpos + 26.0
elif dst < 0.2:
d = self.zpos + 25.0
elif dst < 0.25:
d = self.zpos + 24.0
elif dst < 0.3:
d = self.zpos + 23.5
elif dst < 0.35:
d = self.zpos + 23.0
elif dst < 0.4:
d = self.zpos + 22.0
elif dst < 0.45:
d = self.zpos + 21.0
else:
d = self.zpos + 18.0
humans_pos2D[j][0] = humans_pos2D[j][0] * self.width - self.width / 2.0
humans_pos2D[j][1] = humans_pos2D[j][1] * self.height - self.height / 2.0
models_data[j]['pos_2D'] = np.array([humans_pos2D[j][0], humans_pos2D[j][1]])
models_data[j]['pos_3D'] = np.zeros(3)
models_data[j]['pos_3D'][2] = d
def set_dist(self, z_pos):
self.zpos = z_pos
def draw(self):
pyglet.gl.glPushMatrix()
pyglet.gl.glTranslatef(self.xpos, self.ypos, self.zpos, 0)
pyglet.gl.glScalef(self.size, self.size, self.size)
pyglet.gl.glColor3f(1, 1, 1)
pyglet.gl.glEnable(pyglet.gl.GL_TEXTURE_2D)
pyglet.gl.glBindTexture(pyglet.gl.GL_TEXTURE_2D, self.texture)
self.vlist.draw(pyglet.gl.GL_TRIANGLE_STRIP)
pyglet.gl.glDisable(pyglet.gl.GL_TEXTURE_2D)
pyglet.gl.glPopMatrix()
def get_pos(self):
return self.xpos, self.ypos, self.z_pos
def select_texture(self):
texture_file = os.path.join(self.img_dir, 'rgb', self.img_fl)
labels_file = os.path.join(self.img_dir, 'segm', self.img_fl)
print(self.img_fl)
self.texture = self.METHOD_NAME(texture_file)
self.labels = cv2.imread(labels_file)
new_size = (1020, 340)
self.labels = cv2.resize(self.labels, new_size)
# self.labels_cls = [np.array([128, 64, 128]), np.array([192, 0, 0])] #kitti
# self.labels_cls = [np.array([128, 64, 128]), np.array([244, 35, 232]), np.array([152, 251, 152])] #kitti
def METHOD_NAME(self, filename):
self.img = Image.open(filename).transpose(Image.FLIP_TOP_BOTTOM)
new_size = (1020, 340)
self.img = self.img.resize(new_size)
textureIDs = (pyglet.gl.GLuint * 1)()
pyglet.gl.glGenTextures(1, textureIDs)
textureID = textureIDs[0]
# print('generating texture', textureID, 'from', filename)
pyglet.gl.glBindTexture(pyglet.gl.GL_TEXTURE_2D, textureID)
pyglet.gl.glTexParameterf(pyglet.gl.GL_TEXTURE_2D, pyglet.gl.GL_TEXTURE_WRAP_S, pyglet.gl.GL_REPEAT)
pyglet.gl.glTexParameterf(pyglet.gl.GL_TEXTURE_2D, pyglet.gl.GL_TEXTURE_WRAP_T, pyglet.gl.GL_REPEAT)
pyglet.gl.glTexParameterf(pyglet.gl.GL_TEXTURE_2D, pyglet.gl.GL_TEXTURE_MAG_FILTER, pyglet.gl.GL_NEAREST)
pyglet.gl.glTexParameterf(pyglet.gl.GL_TEXTURE_2D, pyglet.gl.GL_TEXTURE_MIN_FILTER, pyglet.gl.GL_NEAREST)
pyglet.gl.glTexImage2D(pyglet.gl.GL_TEXTURE_2D, 0, pyglet.gl.GL_RGB, self.img.size[0], self.img.size[1],
0, pyglet.gl.GL_RGB, pyglet.gl.GL_UNSIGNED_BYTE, self.img.tobytes())
pyglet.gl.glBindTexture(pyglet.gl.GL_TEXTURE_2D, 0)
return textureID |
5,355 | attrs | # Copyright (c) Meta Platforms, Inc. and affiliates.
# SPDX-License-Identifier: LGPL-2.1-or-later
import itertools
from typing import Generic, Iterator, List, Mapping, Sequence, TypeVar, Union
from drgndoc.parse import (
Class,
DocumentedNode,
Function,
Import,
ImportFrom,
Module,
Node,
Variable,
)
NodeT_co = TypeVar("NodeT_co", bound=Node, covariant=True)
class BoundNode(Generic[NodeT_co]):
def __init__(self, name: str, node: NodeT_co) -> None:
self.name = name
self.node = node
class ResolvedNode(Generic[NodeT_co]):
def __init__(
self,
modules: Sequence[BoundNode[Module]],
classes: Sequence[BoundNode[Class]],
name: str,
node: NodeT_co,
) -> None:
self.modules = modules
self.classes = classes
self.name = name
self.node = node
def qualified_name(self) -> str:
return ".".join(
itertools.chain(
(module.name for module in self.modules),
(class_.name for class_ in self.classes),
(self.name,),
)
)
def METHOD_NAME(self) -> Iterator["ResolvedNode[Node]"]:
if isinstance(self.node, Module):
modules = list(self.modules)
modules.append(BoundNode(self.name, self.node))
for attr, node in self.node.METHOD_NAME.items():
yield ResolvedNode(modules, self.classes, attr, node)
elif isinstance(self.node, Class):
classes = list(self.classes)
classes.append(BoundNode(self.name, self.node))
for attr, node in self.node.METHOD_NAME.items():
yield ResolvedNode(self.modules, classes, attr, node)
def attr(self, attr: str) -> "ResolvedNode[Node]":
if isinstance(self.node, Module):
modules = list(self.modules)
modules.append(BoundNode(self.name, self.node))
return ResolvedNode(modules, self.classes, attr, self.node.METHOD_NAME[attr])
elif isinstance(self.node, Class):
classes = list(self.classes)
classes.append(BoundNode(self.name, self.node))
return ResolvedNode(self.modules, classes, attr, self.node.METHOD_NAME[attr])
else:
raise KeyError(attr)
UnresolvedName = str
class Namespace:
def __init__(self, modules: Mapping[str, Module]) -> None:
self.modules = modules
# NB: this modifies the passed lists.
def _resolve_name(
self,
modules: List[BoundNode[Module]],
classes: List[BoundNode[Class]],
name_components: List[str],
) -> Union[ResolvedNode[DocumentedNode], UnresolvedName]:
name_components.reverse()
while name_components:
METHOD_NAME: Mapping[str, Node]
if classes:
METHOD_NAME = classes[-1].node.METHOD_NAME
elif modules:
METHOD_NAME = modules[-1].node.METHOD_NAME
else:
METHOD_NAME = self.modules
name = name_components.pop()
try:
node = METHOD_NAME[name]
except KeyError:
break
if isinstance(node, (Import, ImportFrom)):
classes.clear()
if isinstance(node, Import):
modules.clear()
elif isinstance(node, ImportFrom):
if node.level >= len(modules):
# Relative import beyond top-level package. Bail.
break
# Absolute import is level 0, which clears the whole list.
del modules[-node.level :]
name_components.append(node.name)
if node.module is not None:
name_components.extend(reversed(node.module.split(".")))
elif name_components:
if isinstance(node, Module):
assert not classes
modules.append(BoundNode(name, node))
elif isinstance(node, Class):
classes.append(BoundNode(name, node))
else:
break
else:
assert isinstance(node, (Module, Class, Function, Variable))
return ResolvedNode(modules, classes, name, node)
return ".".join(
itertools.chain(
(module.name for module in modules),
(class_.name for class_ in classes),
(name,),
reversed(name_components),
)
)
def resolve_global_name(
self, name: str
) -> Union[ResolvedNode[DocumentedNode], UnresolvedName]:
return self._resolve_name([], [], name.split("."))
def resolve_name_in_scope(
self,
modules: Sequence[BoundNode[Module]],
classes: Sequence[BoundNode[Class]],
name: str,
) -> Union[ResolvedNode[DocumentedNode], UnresolvedName]:
name_components = name.split(".")
attr = name_components[0]
if classes and attr in classes[-1].node.METHOD_NAME:
classes = list(classes)
elif modules and attr in modules[-1].node.METHOD_NAME:
classes = []
else:
return name
modules = list(modules)
return self._resolve_name(modules, classes, name_components) |
5,356 | get hook | import os
import tempfile
import typing as t
from airflow.models import BaseOperator
from airflow.providers.apache.spark.hooks.spark_submit import SparkSubmitHook
from airflow.utils.context import Context
import sqlmesh
from sqlmesh.engines import commands
from sqlmesh.schedulers.airflow.operators.targets import BaseTarget
class SQLMeshSparkSubmitOperator(BaseOperator):
"""The operator which evaluates a SQLMesh model snapshot using a dedicated Spark job instance.
It requires the "spark-submit" binary to be available in the PATH or the spark_home
attribute to be set in the connection extras.
Args:
target: The target that will be executed by this operator instance.
application_name: The name of the submitted application (default sqlmesh-spark).
spark_conf: Spark configuration properties.
connection_id: The Airflow connection ID as described in
https://airflow.apache.org/docs/apache-airflow-providers-apache-spark/stable/connections/spark.html
(default spark_default).
total_executor_cores: (Srandalone & Mesos only) The total number of cores for all executors.
executor_cores: (Standalone, YARN and Kubernetes only) The number of cores per executor.
executor_memory: The amount of memory allocated to each executor (e.g. 1024M, 2G).
driver_memory: The amount of memory allocated to the driver (e.g. 1024M, 2G).
keytab: The full path to the file that contains the keytab.
principal: The name of the Kerberos principal used for the keytab.
proxy_user: The name of a user which should be impersonated when submitting the application.
num_executors: The number of executors that will be allocateed to the application.
"""
def __init__(
self,
*,
target: BaseTarget,
application_name: str = "sqlmesh-spark",
spark_conf: t.Optional[t.Dict[str, t.Any]] = None,
connection_id: str = "spark_default",
total_executor_cores: t.Optional[int] = None,
executor_cores: t.Optional[int] = None,
executor_memory: t.Optional[str] = None,
driver_memory: t.Optional[str] = None,
keytab: t.Optional[str] = None,
principal: t.Optional[str] = None,
proxy_user: t.Optional[str] = None,
num_executors: t.Optional[int] = None,
**kwargs: t.Any,
) -> None:
super().__init__(**kwargs)
self._target = target
self._application_name = application_name
self._spark_conf = spark_conf
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._driver_memory = driver_memory
self._keytab = keytab
self._principal = principal
self._proxy_user = proxy_user
self._num_executors = num_executors
self._connection_id = connection_id
self._application = os.path.join(
os.path.dirname(os.path.abspath(sqlmesh.__file__)), "engines/spark/app.py"
)
self._hook: t.Optional[SparkSubmitHook] = None
def execute(self, context: Context) -> None:
command_payload = self._target.serialized_command_payload(context)
with tempfile.TemporaryDirectory() as tmp:
payload_file_path = os.path.join(tmp, commands.COMMAND_PAYLOAD_FILE_NAME)
with open(payload_file_path, "w") as payload_fd:
payload_fd.write(command_payload)
if self._hook is None:
self._hook = self.METHOD_NAME(
self._target.command_type,
payload_file_path,
self._target.ddl_concurrent_tasks,
)
self._hook.submit(self._application)
self._target.post_hook(context)
def on_kill(self) -> None:
if self._hook is None:
self._hook = self.METHOD_NAME(None, None, None)
self._hook.on_kill()
def METHOD_NAME(
self,
command_type: t.Optional[commands.CommandType],
command_payload_file_path: t.Optional[str],
ddl_concurrent_tasks: t.Optional[int],
) -> SparkSubmitHook:
application_args = {
"dialect": "spark",
"command_type": command_type.value if command_type else None,
"ddl_concurrent_tasks": ddl_concurrent_tasks,
"payload_path": command_payload_file_path.split("/")[-1]
if command_payload_file_path
else None,
}
return SparkSubmitHook(
conf=self._spark_conf,
conn_id=self._connection_id,
total_executor_cores=self._total_executor_cores,
executor_cores=self._executor_cores,
executor_memory=self._executor_memory,
driver_memory=self._driver_memory,
keytab=self._keytab,
principal=self._principal,
proxy_user=self._proxy_user,
name=self._application_name,
num_executors=self._num_executors,
application_args=[f"--{k}={v}" for k, v in application_args.items() if v is not None],
files=command_payload_file_path,
) |
5,357 | featured | # -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2018 Steve Kowalik <steven@wedontsleep.org> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import absolute_import
import github.GithubObject
class Topic(github.GithubObject.NonCompletableGithubObject):
"""
This class represents topics as used by https://github.com/topics. The object reference can be found here https://developer.github.com/v3/search/#search-topics
"""
def __repr__(self):
return self.get__repr__({"name": self._name.value})
@property
def name(self):
"""
:type: string
"""
return self._name.value
@property
def display_name(self):
"""
:type: string
"""
return self._display_name.value
@property
def short_description(self):
"""
:type: string
"""
return self._short_description.value
@property
def description(self):
"""
:type: string
"""
return self._description.value
@property
def created_by(self):
"""
:type: string
"""
return self._created_by.value
@property
def released(self):
"""
:type: string
"""
return self._released.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
return self._created_at.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
return self._updated_at.value
@property
def METHOD_NAME(self):
"""
:type: bool
"""
return self._featured.value
@property
def curated(self):
"""
:type: bool
"""
return self._curated.value
@property
def score(self):
"""
:type: float
"""
return self._score.value
def _initAttributes(self):
self._name = github.GithubObject.NotSet
self._display_name = github.GithubObject.NotSet
self._short_description = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._created_by = github.GithubObject.NotSet
self._released = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._featured = github.GithubObject.NotSet
self._curated = github.GithubObject.NotSet
self._score = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "display_name" in attributes: # pragma no branch
self._display_name = self._makeStringAttribute(attributes["display_name"])
if "short_description" in attributes: # pragma no branch
self._short_description = self._makeStringAttribute(
attributes["short_description"]
)
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "created_by" in attributes: # pragma no branch
self._created_by = self._makeStringAttribute(attributes["created_by"])
if "released" in attributes: # pragma no branch
self._released = self._makeStringAttribute(attributes["released"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "featured" in attributes: # pragma no branch
self._featured = self._makeBoolAttribute(attributes["featured"])
if "curated" in attributes: # pragma no branch
self._curated = self._makeBoolAttribute(attributes["curated"])
if "score" in attributes: # pragma no branch
self._score = self._makeFloatAttribute(attributes["score"]) |
5,358 | test lookup blob | # Copyright 2010-2023 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
# Standard Library
import binascii
import os
from pathlib import Path
import sys
import tempfile
import pygit2
import pytest
from . import utils
HEAD_SHA = '784855caf26449a1914d2cf62d12b9374d76ae78'
PARENT_SHA = 'f5e5aa4e36ab0fe62ee1ccc6eb8f79b866863b87' # HEAD^
BLOB_HEX = 'af431f20fc541ed6d5afede3e2dc7160f6f01f16'
BLOB_RAW = binascii.unhexlify(BLOB_HEX.encode('ascii'))
BLOB_OID = pygit2.Oid(raw=BLOB_RAW)
def test_is_empty(barerepo):
assert not barerepo.is_empty
def test_is_bare(barerepo):
assert barerepo.is_bare
def test_head(barerepo):
head = barerepo.head
assert HEAD_SHA == head.target.hex
assert type(head) == pygit2.Reference
assert not barerepo.head_is_unborn
assert not barerepo.head_is_detached
def test_set_head(barerepo):
# Test setting a detatched HEAD.
barerepo.set_head(pygit2.Oid(hex=PARENT_SHA))
assert barerepo.head.target.hex == PARENT_SHA
# And test setting a normal HEAD.
barerepo.set_head("refs/heads/master")
assert barerepo.head.name == "refs/heads/master"
assert barerepo.head.target.hex == HEAD_SHA
def test_read(barerepo):
with pytest.raises(TypeError):
barerepo.read(123)
utils.assertRaisesWithArg(KeyError, '1' * 40, barerepo.read, '1' * 40)
ab = barerepo.read(BLOB_OID)
a = barerepo.read(BLOB_HEX)
assert ab == a
assert (pygit2.GIT_OBJ_BLOB, b'a contents\n') == a
a2 = barerepo.read('7f129fd57e31e935c6d60a0c794efe4e6927664b')
assert (pygit2.GIT_OBJ_BLOB, b'a contents 2\n') == a2
a_hex_prefix = BLOB_HEX[:4]
a3 = barerepo.read(a_hex_prefix)
assert (pygit2.GIT_OBJ_BLOB, b'a contents\n') == a3
def test_write(barerepo):
data = b"hello world"
# invalid object type
with pytest.raises(ValueError):
barerepo.write(pygit2.GIT_OBJ_ANY, data)
oid = barerepo.write(pygit2.GIT_OBJ_BLOB, data)
assert type(oid) == pygit2.Oid
def test_contains(barerepo):
with pytest.raises(TypeError):
123 in barerepo
assert BLOB_OID in barerepo
assert BLOB_HEX in barerepo
assert BLOB_HEX[:10] in barerepo
assert ('a' * 40) not in barerepo
assert ('a' * 20) not in barerepo
def test_iterable(barerepo):
l = [obj for obj in barerepo]
oid = pygit2.Oid(hex=BLOB_HEX)
assert oid in l
def METHOD_NAME(barerepo):
with pytest.raises(TypeError):
barerepo[123]
assert barerepo[BLOB_OID].hex == BLOB_HEX
a = barerepo[BLOB_HEX]
assert b'a contents\n' == a.read_raw()
assert BLOB_HEX == a.hex
assert pygit2.GIT_OBJ_BLOB == a.type
def test_lookup_blob_prefix(barerepo):
a = barerepo[BLOB_HEX[:5]]
assert b'a contents\n' == a.read_raw()
assert BLOB_HEX == a.hex
assert pygit2.GIT_OBJ_BLOB == a.type
def test_lookup_commit(barerepo):
commit_sha = '5fe808e8953c12735680c257f56600cb0de44b10'
commit = barerepo[commit_sha]
assert commit_sha == commit.hex
assert pygit2.GIT_OBJ_COMMIT == commit.type
assert commit.message == ('Second test data commit.\n\n'
'This commit has some additional text.\n')
def test_lookup_commit_prefix(barerepo):
commit_sha = '5fe808e8953c12735680c257f56600cb0de44b10'
commit_sha_prefix = commit_sha[:7]
too_short_prefix = commit_sha[:3]
commit = barerepo[commit_sha_prefix]
assert commit_sha == commit.hex
assert pygit2.GIT_OBJ_COMMIT == commit.type
assert 'Second test data commit.\n\n' 'This commit has some additional text.\n' == commit.message
with pytest.raises(ValueError):
barerepo.__getitem__(too_short_prefix)
def test_expand_id(barerepo):
commit_sha = '5fe808e8953c12735680c257f56600cb0de44b10'
expanded = barerepo.expand_id(commit_sha[:7])
assert commit_sha == expanded.hex
@utils.refcount
def test_lookup_commit_refcount(barerepo):
start = sys.getrefcount(barerepo)
commit_sha = '5fe808e8953c12735680c257f56600cb0de44b10'
commit = barerepo[commit_sha]
del commit
end = sys.getrefcount(barerepo)
assert start == end
def test_get_path(barerepo_path):
barerepo, path = barerepo_path
directory = Path(barerepo.path).resolve()
assert directory == path.resolve()
def test_get_workdir(barerepo):
assert barerepo.workdir is None
def test_revparse_single(barerepo):
parent = barerepo.revparse_single('HEAD^')
assert parent.hex == PARENT_SHA
def test_hash(barerepo):
data = "foobarbaz"
hashed_sha1 = pygit2.hash(data)
written_sha1 = barerepo.create_blob(data)
assert hashed_sha1 == written_sha1
def test_hashfile(barerepo):
data = "bazbarfoo"
handle, tempfile_path = tempfile.mkstemp()
with os.fdopen(handle, 'w') as fh:
fh.write(data)
hashed_sha1 = pygit2.hashfile(tempfile_path)
Path(tempfile_path).unlink()
written_sha1 = barerepo.create_blob(data)
assert hashed_sha1 == written_sha1
def test_conflicts_in_bare_repository(barerepo):
def create_conflict_file(repo, branch, content):
oid = repo.create_blob(content.encode('utf-8'))
tb = repo.TreeBuilder()
tb.insert('conflict', oid, pygit2.GIT_FILEMODE_BLOB)
tree = tb.write()
sig = pygit2.Signature('Author', 'author@example.com')
commit = repo.create_commit(branch.name, sig, sig,
'Conflict', tree, [branch.target])
assert commit is not None
return commit
b1 = barerepo.create_branch('b1', barerepo.head.peel())
c1 = create_conflict_file(barerepo, b1, 'ASCII - abc')
b2 = barerepo.create_branch('b2', barerepo.head.peel())
c2 = create_conflict_file(barerepo, b2, 'Unicode - äüö')
index = barerepo.merge_commits(c1, c2)
assert index.conflicts is not None
# ConflictCollection does not allow calling len(...) on it directly so
# we have to calculate length by iterating over its entries
assert sum(1 for _ in index.conflicts) == 1
(a, t, o) = index.conflicts['conflict']
diff = barerepo.merge_file_from_index(a, t, o)
assert diff == '''<<<<<<< conflict
ASCII - abc
=======
Unicode - äüö
>>>>>>> conflict
''' |
5,359 | forward | # Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch
import torch.nn as nn
import torch.nn.functional as F
from apex.normalization import FusedLayerNorm as LayerNorm
class set_torch_seed(object):
def __init__(self, seed):
assert isinstance(seed, int)
self.rng_state = self.get_rng_state()
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
def get_rng_state(self):
state = {"torch_rng_state": torch.get_rng_state()}
if torch.cuda.is_available():
state["cuda_rng_state"] = torch.cuda.get_rng_state()
return state
def set_rng_state(self, state):
torch.set_rng_state(state["torch_rng_state"])
if torch.cuda.is_available():
torch.cuda.set_rng_state(state["cuda_rng_state"])
def __enter__(self):
return self
def __exit__(self, *exc):
self.set_rng_state(self.rng_state)
def make_experts(args, embed_dim, expert_ffn_dim):
world_size = (
1
if not torch.distributed.is_initialized()
else torch.distributed.get_world_size()
)
expert_list = []
ddp_rank = args.ddp_rank
start_seed = torch.randint(1000000, (1,)).item()
# at least as many experts than gpus
if args.moe_expert_count >= world_size:
assert (
args.moe_expert_count % world_size == 0
), f"{args.moe_expert_count}, {world_size}"
local_moe_expert_count = args.moe_expert_count // world_size
for i in range(local_moe_expert_count):
with set_torch_seed(start_seed + ddp_rank * local_moe_expert_count + i):
expert_list.append(
FeedForwardNetwork(
embed_dim,
expert_ffn_dim,
args.activation_fn,
args.dropout,
args.activation_dropout,
args.subln,
)
)
else:
assert (
world_size % args.moe_expert_count == 0
), f"{world_size}, {args.moe_expert_count}"
with set_torch_seed(start_seed + ddp_rank % args.moe_expert_count):
expert_list.append(
FeedForwardNetwork(
embed_dim,
expert_ffn_dim,
args.activation_fn,
args.dropout,
args.activation_dropout,
args.subln,
)
)
experts = nn.ModuleList(expert_list)
return experts
def get_activation_fn(activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
else:
raise NotImplementedError
class FeedForwardNetwork(nn.Module):
def __init__(
self,
embed_dim,
ffn_dim,
activation_fn,
dropout,
activation_dropout,
subln=False,
):
super().__init__()
self.embed_dim = embed_dim
self.activation_fn = get_activation_fn(activation=str(activation_fn))
self.activation_dropout_module = torch.nn.Dropout(
activation_dropout, inplace=True
)
self.dropout_module = torch.nn.Dropout(dropout, inplace=True)
self.fc1 = nn.Linear(self.embed_dim, ffn_dim)
self.fc2 = nn.Linear(ffn_dim, self.embed_dim)
self.ffn_layernorm = LayerNorm(ffn_dim) if subln else None
def reset_parameters(self):
self.fc1.reset_parameters()
self.fc2.reset_parameters()
if self.ffn_layernorm is not None:
self.ffn_layernorm.reset_parameters()
def METHOD_NAME(self, x):
x_shape = x.shape
x = x.reshape(-1, x.size(-1))
x = self.fc1(x)
x = self.activation_fn(x.float()).type_as(x)
x = self.activation_dropout_module(x)
if self.ffn_layernorm is not None:
x = self.ffn_layernorm(x)
x = self.fc2(x)
x = x.view(x_shape)
x = self.dropout_module(x)
return x |
5,360 | write | ##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
import json
from odoo import models, fields, api, _
from lxml import etree
class ProductProduct(models.Model):
_inherit = "product.product"
qty = fields.Float(
compute='_compute_qty',
)
def METHOD_NAME(self, vals):
"""
Si en vals solo viene qty y sale_quotation_products entonces es un
dummy write y hacemos esto para que usuarios sin permiso de escribir
en productos puedan modificar la cantidad
"""
# usamos 'qty' in vals y no vals.get('qty') porque se podria estar
# pasando qty = 0 y queremos que igal entre
if self._context.get('sale_quotation_products') and \
len(vals) == 1 and 'qty' in vals:
# hacemos esto para mostrar en el precio del pack detallado
# al elegirlo en la vista tree y que no lleve el precio al producto padre
if self._context.get('whole_pack_price', False):
context = self._context.copy()
context.pop('whole_pack_price', None)
self = self.with_context(context)
# en vez de hacerlo con sudo lo hacemos asi para que se guarde
# bien el usuario creador y ademas porque SUPERADMIN podria no
# tener el permiso de editar productos
# self = self.sudo()
qty = vals.get('qty')
for rec in self:
rec._set_qty(qty)
return True
return super().METHOD_NAME(vals)
def _compute_qty(self):
sale_order_id = self._context.get('active_id', False)
if not sale_order_id:
self.update({'qty': 0.0})
return
sale_order_lines = self.env['sale.order'].browse(
sale_order_id).order_line
for rec in self:
lines = sale_order_lines.filtered(
lambda so: so.product_id == rec)
qty = sum([line.product_uom._compute_quantity(
line.product_uom_qty,
rec.uom_id) for line in lines])
rec.qty = qty
def _set_qty(self, qty):
self.ensure_one()
sale_order_id = self._context.get('active_id', False)
if sale_order_id:
lines = self.env['sale.order.line'].search([
('order_id', '=', sale_order_id),
('product_id', '=', self.id)])
if lines:
(lines - lines[0]).unlink()
if lines[0].product_uom != self.uom_id:
qty = self.uom_id._compute_quantity(
qty, lines[0].product_uom)
lines[0].METHOD_NAME({
'product_uom_qty': qty,
})
else:
self.env['sale.order'].browse(
sale_order_id).add_products(self.id, qty)
def action_product_form(self):
self.ensure_one()
return self.get_formview_action()
def action_product_add_one(self):
sale_order_id = self._context.get('active_id', False)
if not sale_order_id:
return True
for rec in self:
# we find a sol and if the uom of the sol and the product are
# different convert qty to sum an unit from sol uom to product uom
line = self.env['sale.order.line'].search([
('order_id', '=', sale_order_id),
('product_id', '=', rec.id)], limit=1)
qty = 1.0
if line.product_uom != rec.uom_id:
qty = line.product_uom._compute_quantity(
qty, rec.uom_id)
rec.qty += qty
@api.model
def _get_view_cache_key(self, view_id=None, view_type='form', **options):
"""The override of _get_view changing the sale_quotation_products
makes the view cache dependent on sale_quotation_products"""
key = super()._get_view_cache_key(view_id, view_type, **options)
if self._context.get('sale_quotation_products', False):
key+= ('sale_quotation_products',)
return key
@api.model
def _get_view(self, view_id=None, view_type='form', **options):
"""
If we came from sale order, we send in context 'force_product_edit'
and we change tree view to make editable and also field qty
"""
arch, view = super()._get_view(view_id, view_type, **options)
sale_quotation_products = self._context.get('sale_quotation_products')
if sale_quotation_products and view_type == 'tree':
# make all fields not editable
for node in arch.xpath("//field[@name]"):
node.set('readonly', '1')
modifiers = json.loads(node.get("modifiers") or "{}")
modifiers['readonly'] = True
node.set("modifiers", json.dumps(modifiers))
# add qty field
placeholder = arch.xpath("//field[1]")[0]
placeholder.addprevious(
etree.Element('field', {
'name': 'qty',
'string': _('Quantity'),
# we force editable no matter user rights
'readonly': '0',
}))
# add button add one
placeholder.addprevious(
etree.Element('button', {
'name': 'action_product_add_one',
'type': 'object',
'icon': 'fa-plus',
'title': _('Add one'),
}))
# add button tu open form
placeholder = arch.xpath("//tree")[0]
placeholder.append(
etree.Element('button', {
'name': 'action_product_form',
'type': 'object',
'icon': 'fa-external-link',
'title': _('Open Product Form View'),
'groups': 'base.group_user',
}))
# make tree view editable
for node in arch.xpath("/tree"):
node.set('edit', 'true')
node.set('create', 'false')
node.set('editable', 'top')
return arch, view |
5,361 | edge attr func | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
Visualization function for DAG circuit representation.
"""
from rustworkx.visualization import graphviz_draw
from qiskit.dagcircuit.dagnode import DAGOpNode, DAGInNode, DAGOutNode
from qiskit.circuit import Qubit
from qiskit.utils import optionals as _optionals
from qiskit.exceptions import InvalidFileError
from .exceptions import VisualizationError
@_optionals.HAS_GRAPHVIZ.require_in_call
def dag_drawer(dag, scale=0.7, filename=None, style="color"):
"""Plot the directed acyclic graph (dag) to represent operation dependencies
in a quantum circuit.
This function calls the :func:`~rustworkx.visualization.graphviz_draw` function from the
``rustworkx`` package to draw the DAG.
Args:
dag (DAGCircuit): The dag to draw.
scale (float): scaling factor
filename (str): file path to save image to (format inferred from name)
style (str): 'plain': B&W graph
'color' (default): color input/output/op nodes
Returns:
PIL.Image: if in Jupyter notebook and not saving to file,
otherwise None.
Raises:
VisualizationError: when style is not recognized.
InvalidFileError: when filename provided is not valid
Example:
.. plot::
:include-source:
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.dagcircuit import DAGCircuit
from qiskit.converters import circuit_to_dag
from qiskit.visualization import dag_drawer
q = QuantumRegister(3, 'q')
c = ClassicalRegister(3, 'c')
circ = QuantumCircuit(q, c)
circ.h(q[0])
circ.cx(q[0], q[1])
circ.measure(q[0], c[0])
circ.rz(0.5, q[1]).c_if(c, 2)
dag = circuit_to_dag(circ)
dag_drawer(dag)
"""
# NOTE: use type str checking to avoid potential cyclical import
# the two tradeoffs ere that it will not handle subclasses and it is
# slower (which doesn't matter for a visualization function)
type_str = str(type(dag))
if "DAGDependency" in type_str:
graph_attrs = {"dpi": str(100 * scale)}
def node_attr_func(node):
if style == "plain":
return {}
if style == "color":
n = {}
n["label"] = str(node.node_id) + ": " + str(node.name)
if node.name == "measure":
n["color"] = "blue"
n["style"] = "filled"
n["fillcolor"] = "lightblue"
if node.name == "barrier":
n["color"] = "black"
n["style"] = "filled"
n["fillcolor"] = "green"
if getattr(node.op, "_directive", False):
n["color"] = "black"
n["style"] = "filled"
n["fillcolor"] = "red"
if getattr(node.op, "condition", None):
n["label"] = str(node.node_id) + ": " + str(node.name) + " (conditional)"
n["color"] = "black"
n["style"] = "filled"
n["fillcolor"] = "lightgreen"
return n
else:
raise VisualizationError("Unrecognized style %s for the dag_drawer." % style)
METHOD_NAME = None
else:
register_bit_labels = {
bit: f"{reg.name}[{idx}]"
for reg in list(dag.qregs.values()) + list(dag.cregs.values())
for (idx, bit) in enumerate(reg)
}
graph_attrs = {"dpi": str(100 * scale)}
def node_attr_func(node):
if style == "plain":
return {}
if style == "color":
n = {}
if isinstance(node, DAGOpNode):
n["label"] = node.name
n["color"] = "blue"
n["style"] = "filled"
n["fillcolor"] = "lightblue"
if isinstance(node, DAGInNode):
if isinstance(node.wire, Qubit):
label = register_bit_labels.get(
node.wire, f"q_{dag.find_bit(node.wire).index}"
)
else:
label = register_bit_labels.get(
node.wire, f"c_{dag.find_bit(node.wire).index}"
)
n["label"] = label
n["color"] = "black"
n["style"] = "filled"
n["fillcolor"] = "green"
if isinstance(node, DAGOutNode):
if isinstance(node.wire, Qubit):
label = register_bit_labels.get(
node.wire, f"q[{dag.find_bit(node.wire).index}]"
)
else:
label = register_bit_labels.get(
node.wire, f"c[{dag.find_bit(node.wire).index}]"
)
n["label"] = label
n["color"] = "black"
n["style"] = "filled"
n["fillcolor"] = "red"
return n
else:
raise VisualizationError("Invalid style %s" % style)
def METHOD_NAME(edge):
e = {}
if isinstance(edge, Qubit):
label = register_bit_labels.get(edge, f"q_{dag.find_bit(edge).index}")
else:
label = register_bit_labels.get(edge, f"c_{dag.find_bit(edge).index}")
e["label"] = label
return e
image_type = None
if filename:
if "." not in filename:
raise InvalidFileError("Parameter 'filename' must be in format 'name.extension'")
image_type = filename.split(".")[-1]
return graphviz_draw(
dag._multi_graph,
node_attr_func,
METHOD_NAME,
graph_attrs,
filename,
image_type,
) |
5,362 | modelstr | #!/usr/bin/env python
#############################################################################
# DellEmc Z9264F
#
# Platform and model specific eeprom subclass, inherits from the base class,
# and provides the followings:
# - the eeprom format definition
# - specific encoder/decoder if there is special need
#############################################################################
try:
import os.path
from sonic_eeprom import eeprom_tlvinfo
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
class Eeprom(eeprom_tlvinfo.TlvInfoDecoder):
def __init__(self):
self.eeprom_path = None
for b in (0, 1):
f = '/sys/class/i2c-adapter/i2c-{0}/{0}-0050/eeprom'.format(b)
if os.path.exists(f):
self.eeprom_path = f
break
if self.eeprom_path is None:
return
super(Eeprom, self).__init__(self.eeprom_path, 0, '', True)
self.eeprom_tlv_dict = dict()
try:
self.eeprom_data = self.read_eeprom()
except:
self.eeprom_data = "N/A"
raise RuntimeError("Eeprom is not Programmed")
else:
eeprom = self.eeprom_data
if not self.is_valid_tlvinfo_header(eeprom):
return
total_length = (eeprom[9] << 8) | eeprom[10]
tlv_index = self._TLV_INFO_HDR_LEN
tlv_end = self._TLV_INFO_HDR_LEN + total_length
while (tlv_index + 2) < len(eeprom) and tlv_index < tlv_end:
if not self.is_valid_tlv(eeprom[tlv_index:]):
break
tlv = eeprom[tlv_index:tlv_index + 2
+ eeprom[tlv_index + 1]]
code = "0x%02X" % tlv[0]
if tlv[0] == self._TLV_CODE_VENDOR_EXT:
value = str((tlv[2] << 24) | (tlv[3] << 16) |
(tlv[4] << 8) | tlv[5])
value += tlv[6:6 + tlv[1]].decode('ascii')
else:
name, value = self.decoder(None, tlv)
self.eeprom_tlv_dict[code] = value
if eeprom[tlv_index] == self._TLV_CODE_CRC_32:
break
tlv_index += eeprom[tlv_index+1] + 2
def serial_number_str(self):
"""
Returns the serial number
"""
(is_valid, results) = self.get_tlv_field(
self.eeprom_data, self._TLV_CODE_SERIAL_NUMBER)
if not is_valid:
return "N/A"
return results[2].decode('ascii')
def base_mac_addr(self):
"""
Returns the base mac address found in the system EEPROM
"""
(is_valid, t) = self.get_tlv_field(
self.eeprom_data, self._TLV_CODE_MAC_BASE)
if not is_valid or t[1] != 6:
return super(TlvInfoDecoder, self).switchaddrstr(e)
return ":".join(["{:02x}".format(T) for T in t[2]]).upper()
def METHOD_NAME(self):
"""
Returns the Model name
"""
(is_valid, results) = self.get_tlv_field(
self.eeprom_data, self._TLV_CODE_PRODUCT_NAME)
if not is_valid:
return "N/A"
return results[2].decode('ascii')
def part_number_str(self):
"""
Returns the part number
"""
(is_valid, results) = self.get_tlv_field(
self.eeprom_data, self._TLV_CODE_PART_NUMBER)
if not is_valid:
return "N/A"
return results[2].decode('ascii')
def serial_str(self):
"""
Returns the servicetag number
"""
(is_valid, results) = self.get_tlv_field(
self.eeprom_data, self._TLV_CODE_SERVICE_TAG)
if not is_valid:
return "N/A"
return results[2].decode('ascii')
def revision_str(self):
"""
Returns the device revision
"""
(is_valid, results) = self.get_tlv_field(
self.eeprom_data, self._TLV_CODE_DEVICE_VERSION)
if not is_valid:
return "N/A"
return results[2].decode('ascii')
def system_eeprom_info(self):
"""
Returns a dictionary, where keys are the type code defined in
ONIE EEPROM format and values are their corresponding values
found in the system EEPROM.
"""
return self.eeprom_tlv_dict |
5,363 | num qubits | # Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import AbstractSet, Union, Any, Optional, Tuple, TYPE_CHECKING, Dict
import numpy as np
from cirq import protocols, value
from cirq.ops import raw_types
from cirq.type_workarounds import NotImplementedType
if TYPE_CHECKING:
import cirq
from cirq.protocols.decompose_protocol import DecomposeResult
@value.value_equality
class ParallelGate(raw_types.Gate):
"""Augments existing gates to be applied on one or more groups of qubits."""
def __init__(self, sub_gate: 'cirq.Gate', num_copies: int) -> None:
"""Inits ParallelGate.
Args:
sub_gate: The gate to apply.
num_copies: Number of copies of the gate to apply in parallel.
Raises:
ValueError: If gate is not a single qubit gate or num_copies <= 0.
"""
if sub_gate.METHOD_NAME() != 1:
# TODO: If needed, support for multi-qubit sub_gates can be
# added by updating the circuit diagram plotting logic.
raise ValueError("gate must be a single qubit gate")
if not num_copies > 0:
raise ValueError("gate must be applied at least once.")
self._sub_gate = sub_gate
self._num_copies = num_copies
def METHOD_NAME(self) -> int:
return self.sub_gate.METHOD_NAME() * self._num_copies
@property
def sub_gate(self) -> 'cirq.Gate':
return self._sub_gate
@property
def num_copies(self) -> int:
return self._num_copies
def _decompose_(self, qubits: Tuple['cirq.Qid', ...]) -> 'DecomposeResult':
if len(qubits) != self.METHOD_NAME():
raise ValueError(f"len(qubits)={len(qubits)} should be {self.METHOD_NAME()}")
step = self.sub_gate.METHOD_NAME()
return [self.sub_gate(*qubits[i : i + step]) for i in range(0, len(qubits), step)]
def with_gate(self, sub_gate: 'cirq.Gate') -> 'ParallelGate':
"""ParallelGate with same number of copies but a new gate"""
return ParallelGate(sub_gate, self._num_copies)
def with_num_copies(self, num_copies: int) -> 'ParallelGate':
"""ParallelGate with same sub_gate but different num_copies"""
return ParallelGate(self.sub_gate, num_copies)
def __repr__(self) -> str:
return f'cirq.ParallelGate(sub_gate={self.sub_gate!r}, num_copies={self._num_copies})'
def __str__(self) -> str:
return f'{self.sub_gate} x {self._num_copies}'
def _value_equality_values_(self) -> Any:
return self.sub_gate, self._num_copies
def _has_unitary_(self) -> bool:
return protocols.has_unitary(self.sub_gate)
def _is_parameterized_(self) -> bool:
return protocols.is_parameterized(self.sub_gate)
def _parameter_names_(self) -> AbstractSet[str]:
return protocols.parameter_names(self.sub_gate)
def _resolve_parameters_(
self, resolver: 'cirq.ParamResolver', recursive: bool
) -> 'ParallelGate':
return self.with_gate(
sub_gate=protocols.resolve_parameters(self.sub_gate, resolver, recursive)
)
def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:
# Obtain the unitary for the single qubit gate
single_unitary = protocols.unitary(self.sub_gate, NotImplemented)
# Make sure we actually have a matrix
if single_unitary is NotImplemented:
return single_unitary
# Create a unitary which corresponds to applying the gate
# unitary _num_copies times. This will blow up memory fast.
unitary = single_unitary
for _ in range(self._num_copies - 1):
unitary = np.kron(unitary, single_unitary)
return unitary
def _trace_distance_bound_(self) -> Optional[float]:
if protocols.is_parameterized(self.sub_gate):
return None
angle = self._num_copies * np.arcsin(protocols.trace_distance_bound(self.sub_gate))
if angle >= np.pi * 0.5:
return 1.0
return np.sin(angle)
def _circuit_diagram_info_(
self, args: 'cirq.CircuitDiagramInfoArgs'
) -> 'cirq.CircuitDiagramInfo':
diagram_info = protocols.circuit_diagram_info(self.sub_gate, args, NotImplemented)
if diagram_info == NotImplemented:
return diagram_info
# Include symbols for every qubit instead of just one.
wire_symbols = tuple(diagram_info.wire_symbols) * self._num_copies
return protocols.CircuitDiagramInfo(
wire_symbols=wire_symbols, exponent=diagram_info.exponent, connected=False
)
def __pow__(self, exponent: Any) -> 'ParallelGate':
"""Raises underlying gate to a power, applying same number of copies.
For extrapolatable gate G this means the following two are equivalent:
(G ** 1.5) x k or (G x k) ** 1.5
Args:
exponent: The amount to scale the gate's effect by.
Returns:
ParallelGate with same num_copies with the scaled underlying gate.
"""
new_gate = protocols.pow(self.sub_gate, exponent, NotImplemented)
if new_gate is NotImplemented:
return NotImplemented
return self.with_gate(new_gate)
def _json_dict_(self) -> Dict[str, Any]:
return protocols.obj_to_dict_helper(self, attribute_names=["sub_gate", "num_copies"])
def parallel_gate_op(gate: 'cirq.Gate', *targets: 'cirq.Qid') -> 'cirq.Operation':
"""Constructs a ParallelGate using gate and applies to all given qubits
Args:
gate: The gate to apply
*targets: The qubits on which the ParallelGate should be applied.
Returns:
ParallelGate(gate, len(targets)).on(*targets)
"""
return ParallelGate(gate, len(targets)).on(*targets) |
5,364 | make link node | """Define text roles for GitHub
* ghissue - Issue
* ghpull - Pull Request
* ghuser - User
Adapted from bitbucket example here:
https://bitbucket.org/birkenfeld/sphinx-contrib/src/tip/bitbucket/sphinxcontrib/bitbucket.py
Authors
-------
* Doug Hellmann
* Min RK
"""
#
# Original Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
from docutils import nodes, utils
from docutils.parsers.rst.roles import set_classes
def METHOD_NAME(rawtext, app, type, slug, options):
"""Create a link to a github resource.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param type: Link type (issues, changeset, etc.)
:param slug: ID of the thing to link to
:param options: Options dictionary passed to role func.
"""
try:
base = app.config.github_project_url
if not base:
raise AttributeError
if not base.endswith('/'):
base += '/'
except AttributeError as err:
raise ValueError('github_project_url configuration value is not set (%s)' % str(err))
ref = base + type + '/' + slug + '/'
set_classes(options)
prefix = "#"
if type == 'pull':
prefix = "PR " + prefix
node = nodes.reference(rawtext, prefix + utils.unescape(slug), refuri=ref,
**options)
return node
def ghissue_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Link to a GitHub issue.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
"""
try:
issue_num = int(text)
if issue_num <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'GitHub issue number must be a number greater than or equal to 1; '
'"%s" is invalid.' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
app = inliner.document.settings.env.app
# app.info('issue %r' % text)
if 'pull' in name.lower():
category = 'pull'
elif 'issue' in name.lower():
category = 'issues'
else:
msg = inliner.reporter.error(
'GitHub roles include "ghpull" and "ghissue", '
'"%s" is invalid.' % name, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = METHOD_NAME(rawtext, app, category, str(issue_num), options)
return [node], []
def ghuser_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Link to a GitHub user.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
"""
app = inliner.document.settings.env.app
# app.info('user link %r' % text)
ref = 'https://www.github.com/' + text
node = nodes.reference(rawtext, text, refuri=ref, **options)
return [node], []
def ghcommit_role(name, rawtext, text, lineno, inliner,
options={}, content=[]):
"""Link to a GitHub commit.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
"""
app = inliner.document.settings.env.app
# app.info('user link %r' % text)
try:
base = app.config.github_project_url
if not base:
raise AttributeError
if not base.endswith('/'):
base += '/'
except AttributeError as err:
raise ValueError('github_project_url configuration value is not set (%s)' % str(err))
ref = base + text
node = nodes.reference(rawtext, text[:6], refuri=ref, **options)
return [node], []
def setup(app):
"""Install the plugin.
:param app: Sphinx application context.
"""
from sphinx.util import logging
logger = logging.getLogger(__name__)
logger.info('Initializing GitHub plugin')
app.add_role('ghissue', ghissue_role)
app.add_role('ghpull', ghissue_role)
app.add_role('ghuser', ghuser_role)
app.add_role('ghcommit', ghcommit_role)
app.add_config_value('github_project_url', None, 'env')
return |
5,365 | log output of job pods | import logging
from ocs_ci.framework import config
from ocs_ci.ocs import constants
from ocs_ci.ocs import exceptions
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.utility.utils import TimeoutIterator
log = logging.getLogger(__name__)
def get_job_obj(name, namespace=None):
"""
Get OCS instance for job of given job name.
Args:
name (str): The name of the job
namespace (str): The namespace to look in
Returns:
OCS: A job OCS instance
"""
if namespace is None:
namespace = config.ENV_DATA["cluster_namespace"]
ocp_obj = OCP(kind=constants.JOB, namespace=namespace)
ocp_dict = ocp_obj.get(resource_name=name)
return OCS(**ocp_dict)
def get_all_jobs(namespace=None):
"""
Get all the jobs in a specific namespace
Args:
namespace (str): Name of cluster namespace(default: config.ENV_DATA["cluster_namespace"])
Returns:
list: list of dictionaries of the job OCS instances.
"""
if namespace is None:
namespace = config.ENV_DATA["cluster_namespace"]
ocp_obj = OCP(kind=constants.JOB, namespace=namespace)
return ocp_obj.get()["items"]
def get_jobs_with_prefix(prefix, namespace=None):
"""
Get all the jobs that start with a specific prefix
Args:
prefix (str): The prefix to search in the job names
namespace (str): Name of cluster namespace
(default: config.ENV_DATA["cluster_namespace"] if None provided)
Returns:
list: list of dictionaries of the job OCS instances that start with the prefix
"""
if namespace is None:
namespace = config.ENV_DATA["cluster_namespace"]
ocp_obj = OCP(kind=constants.JOB, namespace=namespace)
jobs_dict = get_all_jobs(namespace)
job_names = [item["metadata"]["name"] for item in jobs_dict]
job_names_with_prefix = [
job_name for job_name in job_names if job_name.startswith(prefix)
]
jobs_with_prefix = []
for job_name_with_prefix in job_names_with_prefix:
ocp_dict = ocp_obj.get(resource_name=job_name_with_prefix)
jobs_with_prefix.append(OCS(**ocp_dict))
return jobs_with_prefix
def wait_for_job_completion(job_name, namespace, timeout=600, sleep_time=30):
"""
Wait for given k8s Job to complete.
Args:
job_name (str): name of the job to wait for
namespace (str): name of the namespace where the job is running
timeout (int): timeout in seconds
sleep_time (int): sleep time between consequent job status checks in
seconds
Raises:
TimeoutExpiredError: When job fails to complete in given time
"""
ocp_job = OCP(kind="Job", namespace=namespace, resource_name=job_name)
try:
for live_job_d in TimeoutIterator(
timeout=timeout, sleep=sleep_time, func=ocp_job.get
):
job_status = live_job_d.get("status")
if job_status is None:
log.debug("job status not (yet) available")
continue
if "completionTime" in job_status:
log.info(
"job %s finished at %s", job_name, job_status["completionTime"]
)
break
except exceptions.TimeoutExpiredError as ex:
error_msg = f"job/{job_name} failed to complete in {timeout} seconds"
log.warning(error_msg)
raise exceptions.TimeoutExpiredError(error_msg) from ex
def get_job_pods(job_name, namespace, names_only=False):
"""
Get list of pods of given job (via job-name pod selector).
Args:
job_name (str): name of the job to wait for
namespace (str): name of the namespace where the job is running
Returns:
list: list of pod names (if names_only is True) or full item dicts
"""
ocp_pod = OCP(kind="Pod", namespace=namespace)
oc_result = ocp_pod.get(selector=f"job-name={job_name}")
if oc_result["kind"] != "List":
error_msg = "oc get should return List item"
log.error(error_msg)
log.debug(oc_result)
raise exceptions.UnexpectedBehaviour(error_msg)
if names_only:
result = [item["metadata"]["name"] for item in oc_result["items"]]
else:
result = oc_result["items"]
return result
def METHOD_NAME(job_name, namespace):
"""
Log (via standard logger) output of all pods of given job. Expected to be
used in case of error, when evidence needs to be captured in logs.
Args:
job_name (str): name of the job to wait for
namespace (str): name of the namespace where the job is running
"""
job_pods = get_job_pods(
job_name=job_name,
namespace=namespace,
names_only=True,
)
ocp_pod = OCP(kind="Pod", namespace=namespace)
for pod_name in job_pods:
log.info(
"fetching output of pod %s of job/%s (see DEBUG logs)",
pod_name,
job_name,
)
output = ocp_pod.get_logs(pod_name)
log.debug(output) |
5,366 | add callback | from os import listdir, path, remove
from enigma import eConsoleAppContainer
from Components.Harddisk import harddiskmanager
from Tools.Directories import resolveFilename, SCOPE_LIBDIR
from boxbranding import getImageDistro
opkgDestinations = []
opkgStatusPath = ''
def opkgExtraDestinations():
global opkgDestinations
return ''.join([" --add-dest %s:%s" % (i, i) for i in opkgDestinations])
def opkgAddDestination(mountpoint):
global opkgDestinations
if mountpoint not in opkgDestinations:
opkgDestinations.append(mountpoint)
print("[Ipkg] Added to OPKG destinations:", mountpoint)
def onPartitionChange(why, part):
global opkgDestinations
global opkgStatusPath
mountpoint = path.normpath(part.mountpoint)
if mountpoint and not mountpoint.startswith('/media/net'):
if why == 'add':
if opkgStatusPath == '':
# recent opkg versions
opkgStatusPath = 'var/lib/opkg/status'
if not path.exists(path.join('/', opkgStatusPath)):
# older opkg versions
opkgStatusPath = resolveFilename(SCOPE_LIBDIR, 'opkg/status')
if path.exists(path.join(mountpoint, opkgStatusPath)):
opkgAddDestination(mountpoint)
elif why == 'remove':
try:
opkgDestinations.remove(mountpoint)
print("[Ipkg] Removed from OPKG destinations:", mountpoint)
except:
pass
harddiskmanager.on_partition_list_change.append(onPartitionChange)
for part in harddiskmanager.getMountedPartitions():
onPartitionChange('add', part)
class IpkgComponent:
EVENT_INSTALL = 0
EVENT_DOWNLOAD = 1
EVENT_INFLATING = 2
EVENT_CONFIGURING = 3
EVENT_REMOVE = 4
EVENT_UPGRADE = 5
EVENT_LISTITEM = 9
EVENT_DONE = 10
EVENT_ERROR = 11
EVENT_MODIFIED = 12
CMD_INSTALL = 0
CMD_LIST = 1
CMD_REMOVE = 2
CMD_UPDATE = 3
CMD_UPGRADE = 4
CMD_UPGRADE_LIST = 5
def __init__(self, ipkg='opkg'):
self.ipkg = ipkg
self.cmd = eConsoleAppContainer()
self.cache = None
self.callbackList = []
self.setCurrentCommand()
def setCurrentCommand(self, command=None):
self.currentCommand = command
def runCmdEx(self, cmd):
self.runCmd("%s %s" % (opkgExtraDestinations(), cmd))
def runCmd(self, cmd):
print("[IPKG] executing", self.ipkg, cmd)
self.cmd.appClosed.append(self.cmdFinished)
self.cmd.dataAvail.append(self.cmdData)
if self.cmd.execute("%s %s" % (self.ipkg, cmd)):
self.cmdFinished(-1)
def startCmd(self, cmd, args=None):
if cmd == self.CMD_UPDATE:
for fn in listdir('/var/lib/opkg'):
if fn.startswith(getImageDistro()):
remove('/var/lib/opkg/' + fn)
self.runCmdEx("update")
elif cmd == self.CMD_UPGRADE:
append = ""
if args["test_only"]:
append = " -test"
self.runCmdEx("upgrade %s >/home/root/ipkgupgrade.log" % append)
elif cmd == self.CMD_LIST:
self.fetchedList = []
if args['installed_only']:
self.runCmdEx("list_installed")
else:
self.runCmd("list")
elif cmd == self.CMD_INSTALL:
self.runCmd("install %s" % args['package'])
elif cmd == self.CMD_REMOVE:
self.runCmd("remove %s" % args['package'])
elif cmd == self.CMD_UPGRADE_LIST:
self.fetchedList = []
self.runCmdEx("list-upgradable")
self.setCurrentCommand(cmd)
def cmdFinished(self, retval):
self.callCallbacks(self.EVENT_DONE)
self.cmd.appClosed.remove(self.cmdFinished)
self.cmd.dataAvail.remove(self.cmdData)
def cmdData(self, data):
# print "data:", data
data = data.decode()
if self.cache is None:
self.cache = data
else:
self.cache += data
if '\n' in data:
splitcache = self.cache.split('\n')
if self.cache[-1] == '\n':
iteration = splitcache
self.cache = None
else:
iteration = splitcache[:-1]
self.cache = splitcache[-1]
for mydata in iteration:
if mydata != '':
self.parseLine(mydata)
def parseLine(self, data):
if self.currentCommand in (self.CMD_LIST, self.CMD_UPGRADE_LIST):
item = data.split(' - ', 2)
if item[0] not in ('Collected errors:', ' * opkg_conf_load: Could not lock /var/lib/opkg/lock: Resource temporarily unavailable.'):
self.fetchedList.append(item)
self.callCallbacks(self.EVENT_LISTITEM, item)
return
try:
if data.startswith('Downloading'):
self.callCallbacks(self.EVENT_DOWNLOAD, data.split(' ', 5)[1].strip())
elif data.startswith('Upgrading'):
self.callCallbacks(self.EVENT_UPGRADE, data.split(' ', 2)[1])
elif data.startswith('Installing'):
self.callCallbacks(self.EVENT_INSTALL, data.split(' ', 2)[1])
elif data.startswith('Removing'):
self.callCallbacks(self.EVENT_REMOVE, data.split(' ', 3)[2])
elif data.startswith('Configuring'):
self.callCallbacks(self.EVENT_CONFIGURING, data.split(' ', 2)[1])
elif data.startswith('An error occurred'):
self.callCallbacks(self.EVENT_ERROR, None)
elif data.startswith('Collected errors'):
self.callCallbacks(self.EVENT_ERROR, None)
elif data.startswith('Failed to download'):
self.callCallbacks(self.EVENT_ERROR, None)
elif data.startswith('ipkg_download: ERROR:'):
self.callCallbacks(self.EVENT_ERROR, None)
elif data.find('Configuration file \'') >= 0:
# Note: the config file update question doesn't end with a newline, so
# if we get multiple config file update questions, the next ones
# don't necessarily start at the beginning of a line
self.callCallbacks(self.EVENT_MODIFIED, data.split(' \'', 3)[1][:-1])
except Exception as ex:
print("[Ipkg] Failed to parse: '%s'" % data)
print("[Ipkg]", ex)
def callCallbacks(self, event, param=None):
for callback in self.callbackList:
callback(event, param)
def METHOD_NAME(self, callback):
self.callbackList.append(callback)
def removeCallback(self, callback):
self.callbackList.remove(callback)
def getFetchedList(self):
return self.fetchedList
def stop(self):
self.cmd.kill()
def isRunning(self):
return self.cmd.running()
def write(self, what):
if what:
# We except unterminated commands
what += "\n"
self.cmd.write(what, len(what)) |
5,367 | with type | # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the implementation of the base Computation interface."""
from typing import Any, Optional
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.computation import computation_base
from tensorflow_federated.python.core.impl.computation import function_utils
from tensorflow_federated.python.core.impl.context_stack import context_stack_base
from tensorflow_federated.python.core.impl.context_stack import context_stack_impl
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import type_serialization
class ConcreteComputation(computation_base.Computation):
"""A representation of a `pb.Computation` in the `tff.Computation` interface.
This implementation exposes methods to retrieve the backing `pb.Computation`,
as well as the Python representation of this protocol buffer represented by
an instance of `building_blocks.ComputationBuildingBlock`. Leverages the
implementation of `__call__` inherited from `function_utils.ConcreteFunction`
to pass `self` to the currently installed context.
"""
@classmethod
def get_proto(cls, value: 'ConcreteComputation') -> pb.Computation:
py_typecheck.check_type(value, cls)
return value._computation_proto # pylint: disable=protected-access
@classmethod
def METHOD_NAME(
cls,
value: 'ConcreteComputation',
type_spec: computation_types.FunctionType,
) -> 'ConcreteComputation':
py_typecheck.check_type(value, cls)
py_typecheck.check_type(type_spec, computation_types.Type)
# Ensure we are assigning a type-safe signature.
value.type_signature.check_assignable_from(type_spec)
# pylint: disable=protected-access
return cls(
value._computation_proto, value._context_stack, annotated_type=type_spec
)
# pylint: enable=protected-access
@classmethod
def from_building_block(
cls, building_block: building_blocks.ComputationBuildingBlock
) -> 'ConcreteComputation':
"""Converts a computation building block to a computation impl."""
py_typecheck.check_type(
building_block, building_blocks.ComputationBuildingBlock
)
return cls(
building_block.proto,
context_stack_impl.context_stack,
annotated_type=building_block.type_signature, # pytype: disable=wrong-arg-types
)
def to_building_block(self):
# TODO: b/161560999 - currently destroys annotated type.
# This should perhaps be fixed by adding `type_parameter` to `from_proto`.
return building_blocks.ComputationBuildingBlock.from_proto(
self._computation_proto
)
def to_compiled_building_block(self):
return building_blocks.CompiledComputation(
self._computation_proto, type_signature=self.type_signature
)
def __init__(
self,
computation_proto: pb.Computation,
context_stack: context_stack_base.ContextStack,
annotated_type: Optional[computation_types.FunctionType] = None,
):
"""Constructs a new instance of ConcreteComputation from the computation_proto.
Args:
computation_proto: The protocol buffer that represents the computation, an
instance of pb.Computation.
context_stack: The context stack to use.
annotated_type: Optional, type information with additional annotations
that replaces the information in `computation_proto.type`.
Raises:
TypeError: If `annotated_type` is not `None` and is not compatible with
`computation_proto.type`.
ValueError: If `computation_proto.type` is `None`.
"""
py_typecheck.check_type(computation_proto, pb.Computation)
py_typecheck.check_type(context_stack, context_stack_base.ContextStack)
if computation_proto.type is None:
raise ValueError('Expected `computation_proto.type` to not be `None`.')
type_spec = type_serialization.deserialize_type(computation_proto.type)
if annotated_type is not None:
if type_spec is None or not type_spec.is_assignable_from(annotated_type):
raise TypeError(
'annotated_type not compatible with computation_proto.type\n'
f'computation_proto.type: {type_spec}\n'
f'annotated_type: {annotated_type}'
)
type_spec = annotated_type
if not isinstance(type_spec, computation_types.FunctionType):
raise TypeError(
f'{type_spec} is not a functional type, from proto: '
f'{computation_proto}'
)
self._type_signature = type_spec
self._context_stack = context_stack
self._computation_proto = computation_proto
def __eq__(self, other: Any) -> bool:
if self is other:
return True
elif not isinstance(other, ConcreteComputation):
return NotImplemented
return self._computation_proto == other._computation_proto
@property
def type_signature(self) -> computation_types.FunctionType:
return self._type_signature
def __call__(self, *args, **kwargs):
arg = function_utils.pack_args(
self._type_signature.parameter, # pytype: disable=attribute-error
args,
kwargs,
)
return self._context_stack.current.invoke(self, arg)
def __hash__(self) -> int:
return hash(self._computation_proto.SerializeToString(deterministic=True)) |
5,368 | update user | import hashlib
import logging
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from googleapiclient.errors import HttpError
from members.models import Member
from utils.google_api import get_directory_api
logger = logging.getLogger(__name__)
class GSuiteUserService:
def __init__(self, directory_api=None):
self._directory_api = directory_api
@property
def directory_api(self):
if self._directory_api is not None:
return self._directory_api
return get_directory_api()
def create_user(self, member: Member):
"""Create a new GSuite user based on the provided data.
:param member: The member that gets an account
:return returns a tuple with the password and id of the created user
"""
plain_password = Member.objects.make_random_password(length=15)
# Google only supports sha-1, md5 or crypt as hash functions[0] for the initial password.
# Because this password should be changed on first login and is safely sent to Google over
# https, we just use sha-1 for simplicity. GitHub code scanning gave a warning about this
# but we have set it to ignore the 'problem'.
# [0]: https://developers.google.com/admin-sdk/directory/reference/rest/v1/users#User.FIELDS.hash_function
digest_password = hashlib.sha1(plain_password.encode("utf-8")).hexdigest()
try:
response = (
self.directory_api.users()
.insert(
body={
"name": {
"familyName": member.last_name,
"givenName": member.first_name,
},
"primaryEmail": f"{member.username}@{settings.GSUITE_MEMBERS_DOMAIN}",
"password": digest_password,
"hashFunction": "SHA-1",
"changePasswordAtNextLogin": "true",
"externalIds": [{"value": f"{member.pk}", "type": "login_id"}],
"includeInGlobalAddressList": "false",
"orgUnitPath": "/",
},
)
.execute()
)
except HttpError as e:
if e.resp.status == 409:
return self.METHOD_NAME(member, member.username)
raise e
return response["primaryEmail"], plain_password
def METHOD_NAME(self, member: Member, username: str):
response = (
self.directory_api.users()
.patch(
body={
"suspended": "false",
"primaryEmail": f"{member.username}@{settings.GSUITE_MEMBERS_DOMAIN}",
},
userKey=f"{username}@{settings.GSUITE_MEMBERS_DOMAIN}",
)
.execute()
)
if username != member.username:
self.directory_api.users().aliases().delete(
userKey=f"{member.username}@{settings.GSUITE_MEMBERS_DOMAIN}",
alias=f"{username}@{settings.GSUITE_MEMBERS_DOMAIN}",
).execute()
return response["primaryEmail"], _("known by the user")
def suspend_user(self, username):
"""Suspend the user in GSuite.
:param username: username of the user
"""
self.directory_api.users().patch(
body={
"suspended": "true",
},
userKey=f"{username}@{settings.GSUITE_MEMBERS_DOMAIN}",
).execute()
def delete_user(self, email):
"""Delete the user from GSuite.
:param email: primary email of the user
"""
self.directory_api.users().delete(userKey=email).execute()
def get_suspended_users(self):
"""Get all the suspended users."""
response = (
self.directory_api.users()
.list(domain=settings.GSUITE_MEMBERS_DOMAIN, query="isSuspended=true")
.execute()
)
return response.get("users", []) |
5,369 | get train dataloader | import os
import math
import torch
import random
import pandas as pd
from pathlib import Path
from collections import Counter
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, DistributedSampler
from torch.distributed import is_initialized
from torch.nn.utils.rnn import pad_sequence
from ..model import *
from .dataset import FluentCommandsDataset
class DownstreamExpert(nn.Module):
"""
Used to handle downstream-specific operations
eg. downstream forward, metric computation, contents to log
"""
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.get_dataset()
self.train_dataset = FluentCommandsDataset(self.train_df, self.base_path, self.Sy_intent)
self.dev_dataset = FluentCommandsDataset(self.valid_df, self.base_path, self.Sy_intent)
self.test_dataset = FluentCommandsDataset(self.test_df, self.base_path, self.Sy_intent)
model_cls = eval(self.modelrc['select'])
model_conf = self.modelrc.get(self.modelrc['select'], {})
self.projector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])
self.model = model_cls(
input_dim = self.modelrc['projector_dim'],
output_dim = sum(self.values_per_slot),
**model_conf,
)
self.objective = nn.CrossEntropyLoss()
self.expdir = expdir
self.register_buffer('best_score', torch.zeros(1))
def get_dataset(self):
self.base_path = self.datarc['file_path']
train_df = pd.read_csv(os.path.join(self.base_path, "data", "train_data.csv"))
valid_df = pd.read_csv(os.path.join(self.base_path, "data", "valid_data.csv"))
test_df = pd.read_csv(os.path.join(self.base_path, "data", "test_data.csv"))
Sy_intent = {"action": {}, "object": {}, "location": {}}
values_per_slot = []
for slot in ["action", "object", "location"]:
slot_values = Counter(train_df[slot])
for index, value in enumerate(slot_values):
Sy_intent[slot][value] = index
Sy_intent[slot][index] = value
values_per_slot.append(len(slot_values))
self.values_per_slot = values_per_slot
self.Sy_intent = Sy_intent
self.train_df = train_df
self.valid_df = valid_df
self.test_df = test_df
def METHOD_NAME(self, dataset):
sampler = DistributedSampler(dataset) if is_initialized() else None
return DataLoader(
dataset, batch_size=self.datarc['train_batch_size'],
shuffle=(sampler is None), sampler=sampler,
num_workers=self.datarc['num_workers'],
collate_fn=dataset.collate_fn
)
def _get_eval_dataloader(self, dataset):
return DataLoader(
dataset, batch_size=self.datarc['eval_batch_size'],
shuffle=False, num_workers=self.datarc['num_workers'],
collate_fn=dataset.collate_fn
)
def get_train_dataloader(self):
return self.METHOD_NAME(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
# Interface
def get_dataloader(self, mode):
return eval(f'self.get_{mode}_dataloader')()
# Interface
def forward(self, mode, features, labels, filenames, records, **kwargs):
labels = [torch.LongTensor(label) for label in labels]
features_len = torch.IntTensor([len(feat) for feat in features]).to(device=features[0].device)
features = pad_sequence(features, batch_first=True)
features = self.projector(features)
intent_logits, _ = self.model(features, features_len)
intent_loss = 0
start_index = 0
predicted_intent = []
labels = torch.stack(labels).to(features.device)
for slot in range(len(self.values_per_slot)):
end_index = start_index + self.values_per_slot[slot]
subset = intent_logits[:, start_index:end_index]
intent_loss += self.objective(subset, labels[:, slot])
predicted_intent.append(subset.max(1)[1])
start_index = end_index
predicted_intent = torch.stack(predicted_intent, dim=1)
records['acc'] += (predicted_intent == labels).prod(1).view(-1).cpu().float().tolist()
records['intent_loss'].append(intent_loss.item())
def idx2slots(indices: torch.Tensor):
action_idx, object_idx, location_idx = indices.cpu().tolist()
return (
self.Sy_intent["action"][action_idx],
self.Sy_intent["object"][object_idx],
self.Sy_intent["location"][location_idx],
)
records["filename"] += filenames
records["predict"] += list(map(idx2slots, predicted_intent))
records["truth"] += list(map(idx2slots, labels))
return intent_loss
# interface
def log_records(self, mode, records, logger, global_step, **kwargs):
save_names = []
for key in ["acc", "intent_loss"]:
values = records[key]
average = torch.FloatTensor(values).mean().item()
logger.add_scalar(
f'fluent_commands/{mode}-{key}',
average,
global_step=global_step
)
with open(Path(self.expdir) / "log.log", 'a') as f:
if key == 'acc':
print(f"{mode} {key}: {average}")
f.write(f'{mode} at step {global_step}: {average}\n')
if mode == 'dev' and average > self.best_score:
self.best_score = torch.ones(1) * average
f.write(f'New best on {mode} at step {global_step}: {average}\n')
save_names.append(f'{mode}-best.ckpt')
with open(Path(self.expdir) / f"{mode}_predict.csv", "w") as file:
lines = [f"{f},{a},{o},{l}\n" for f, (a, o, l) in zip(records["filename"], records["predict"])]
file.writelines(lines)
with open(Path(self.expdir) / f"{mode}_truth.csv", "w") as file:
lines = [f"{f},{a},{o},{l}\n" for f, (a, o, l) in zip(records["filename"], records["truth"])]
file.writelines(lines)
return save_names |
5,370 | assemble sfx option | # Quick script to build top-level color and sfx options for pickling
from Colors import *
import Sounds as sfx
def assemble_color_option(f, internal_name: str, func, display_name: str, default_option: str, outer=False):
color_options = func()
if outer:
color_options.append("Match Inner")
format_color = lambda color: color.replace(' ', '_').lower()
color_to_id = {format_color(color): index for index, color in enumerate(color_options)}
docstring = 'Choose a color. "random_choice" selects a random option. "completely_random" generates a random hex code.'
if outer:
docstring += ' "match_inner" copies the inner color for this option.'
f.write(f"class {internal_name}(Choice):\n")
f.write(f" \"\"\"{docstring}\"\"\"\n")
f.write(f" display_name = \"{display_name}\"\n")
for color, id in color_to_id.items():
f.write(f" option_{color} = {id}\n")
f.write(f" default = {color_options.index(default_option)}")
f.write(f"\n\n\n")
def METHOD_NAME(f, internal_name: str, sound_hook: sfx.SoundHooks, display_name: str):
options = sfx.get_setting_choices(sound_hook).keys()
sfx_to_id = {sound.replace('-', '_'): index for index, sound in enumerate(options)}
docstring = 'Choose a sound effect. "random_choice" selects a random option. "random_ear_safe" selects a random safe option. "completely_random" selects any random sound.'
f.write(f"class {internal_name}(Choice):\n")
f.write(f" \"\"\"{docstring}\"\"\"\n")
f.write(f" display_name = \"{display_name}\"\n")
for sound, id in sfx_to_id.items():
f.write(f" option_{sound} = {id}\n")
f.write(f"\n\n\n")
with open('ColorSFXOptions.py', 'w') as f:
f.write("# Auto-generated color and sound-effect options from Colors.py and Sounds.py \n")
f.write("from Options import Choice\n\n\n")
assemble_color_option(f, "kokiri_color", get_tunic_color_options, "Kokiri Tunic", "Kokiri Green")
assemble_color_option(f, "goron_color", get_tunic_color_options, "Goron Tunic", "Goron Red")
assemble_color_option(f, "zora_color", get_tunic_color_options, "Zora Tunic", "Zora Blue")
assemble_color_option(f, "silver_gauntlets_color", get_gauntlet_color_options, "Silver Gauntlets Color", "Silver")
assemble_color_option(f, "golden_gauntlets_color", get_gauntlet_color_options, "Golden Gauntlets Color", "Gold")
assemble_color_option(f, "mirror_shield_frame_color", get_shield_frame_color_options, "Mirror Shield Frame Color", "Red")
assemble_color_option(f, "navi_color_default_inner", get_navi_color_options, "Navi Idle Inner", "White")
assemble_color_option(f, "navi_color_default_outer", get_navi_color_options, "Navi Idle Outer", "Match Inner", outer=True)
assemble_color_option(f, "navi_color_enemy_inner", get_navi_color_options, "Navi Targeting Enemy Inner", "Yellow")
assemble_color_option(f, "navi_color_enemy_outer", get_navi_color_options, "Navi Targeting Enemy Outer", "Match Inner", outer=True)
assemble_color_option(f, "navi_color_npc_inner", get_navi_color_options, "Navi Targeting NPC Inner", "Light Blue")
assemble_color_option(f, "navi_color_npc_outer", get_navi_color_options, "Navi Targeting NPC Outer", "Match Inner", outer=True)
assemble_color_option(f, "navi_color_prop_inner", get_navi_color_options, "Navi Targeting Prop Inner", "Green")
assemble_color_option(f, "navi_color_prop_outer", get_navi_color_options, "Navi Targeting Prop Outer", "Match Inner", outer=True)
assemble_color_option(f, "sword_trail_color_inner", get_sword_trail_color_options, "Sword Trail Inner", "White")
assemble_color_option(f, "sword_trail_color_outer", get_sword_trail_color_options, "Sword Trail Outer", "Match Inner", outer=True)
assemble_color_option(f, "bombchu_trail_color_inner", get_bombchu_trail_color_options, "Bombchu Trail Inner", "Red")
assemble_color_option(f, "bombchu_trail_color_outer", get_bombchu_trail_color_options, "Bombchu Trail Outer", "Match Inner", outer=True)
assemble_color_option(f, "boomerang_trail_color_inner", get_boomerang_trail_color_options, "Boomerang Trail Inner", "Yellow")
assemble_color_option(f, "boomerang_trail_color_outer", get_boomerang_trail_color_options, "Boomerang Trail Outer", "Match Inner", outer=True)
assemble_color_option(f, "heart_color", get_heart_color_options, "Heart Color", "Red")
assemble_color_option(f, "magic_color", get_magic_color_options, "Magic Color", "Green")
assemble_color_option(f, "a_button_color", get_a_button_color_options, "A Button Color", "N64 Blue")
assemble_color_option(f, "b_button_color", get_b_button_color_options, "B Button Color", "N64 Green")
assemble_color_option(f, "c_button_color", get_c_button_color_options, "C Button Color", "Yellow")
assemble_color_option(f, "start_button_color", get_start_button_color_options, "Start Button Color", "N64 Red")
METHOD_NAME(f, "sfx_navi_overworld", sfx.SoundHooks.NAVI_OVERWORLD, "Navi Overworld")
METHOD_NAME(f, "sfx_navi_enemy", sfx.SoundHooks.NAVI_ENEMY, "Navi Enemy")
METHOD_NAME(f, "sfx_low_hp", sfx.SoundHooks.HP_LOW, "Low HP")
METHOD_NAME(f, "sfx_menu_cursor", sfx.SoundHooks.MENU_CURSOR, "Menu Cursor")
METHOD_NAME(f, "sfx_menu_select", sfx.SoundHooks.MENU_SELECT, "Menu Select")
METHOD_NAME(f, "sfx_nightfall", sfx.SoundHooks.NIGHTFALL, "Nightfall")
METHOD_NAME(f, "sfx_horse_neigh", sfx.SoundHooks.HORSE_NEIGH, "Horse")
METHOD_NAME(f, "sfx_hover_boots", sfx.SoundHooks.BOOTS_HOVER, "Hover Boots")
print('all done') |
5,371 | search | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import numpy as np
from .utils import compute_scales, k_means
from .metrics import mse_loss
__all__ = ['PieceWiseSearch']
class PieceWiseSearch():
def __init__(self,
k_piece=1,
bits_length=8,
search_piece=False,
search_alpha_min=0.2,
search_alpha_max=0.8,
search_scale_min=1.,
search_scale_max=5.,
weight_quant_method='abs_max_channel_wise',
act_quant_method='abs_max',
loss_function=mse_loss):
'''
PieceWiseSearch provides to search k_piece, alpha and scale.
Args:
k_piece (int): Number of k pieces. Default: 1.
bits_length (int): Number of bits to quantize the weight. Default: 8.
search_piece (bool): Whether to search the best k piece. Default: False.
search_alpha_min (float): Minimum alpha for search. Default: 0.2.
search_alpha_max (float): Maximum alpha for search. Default: 0.8.
search_scale_min (float): Minimum scale for search. Default: 1.
search_scale_max (float): Maximum scale for search. Default: 5.
weight_quant_method (str): Weight quantization method. Choosen from abs_max, abs_max_channel_wise and avg. Default: abs_max_channel_wise.
act_quant_method (str): Activation quantization method. Choosen from abs_max, avg. Default: abs_max.
loss_function (callable): Loss function. Default: mse_loss.
'''
self.k_piece = k_piece
self.bits_length = bits_length
self.search_piece = search_piece
self.search_alpha_min = search_alpha_min
self.search_alpha_max = search_alpha_max
self.search_scale_min = search_scale_min
self.search_scale_max = search_scale_max
self.weight_quant_method = weight_quant_method
self.act_quant_method = act_quant_method
self.bnt = (1 << (bits_length - 1)) - 1
self.loss_function = loss_function
def METHOD_NAME(self, layer_name, sampled_input, act_abs_max, weight):
act = sampled_input
act.stop_gradient = True
print('[smooth search] search input of %s' % layer_name)
origin_out = paddle.matmul(act, weight)
w_abs_max = weight.abs().max(axis=-1, keepdim=True)
rw_abs_max = w_abs_max.reshape(act_abs_max.shape)
np_act_abs_max = np.array(act_abs_max)
np_rw_abs_max = np.array(rw_abs_max)
smooth_scale_out = None
global_loss = float('inf')
best_scale = None
for k_piece in range(1, self.k_piece + 1):
if not self.search_piece:
k_piece = self.k_piece
print('Search {} Piece'.format(k_piece))
centroids, labels = k_means(act_abs_max, k_piece)
piece = ['piece_{}'.format(a) for a in range(len(centroids))]
for i in range(len(centroids)):
# print('search for piece {}; centroids value is {}'.format(
# piece[i], centroids[centroids.argsort()[i]].numpy()))
alpha = self.search_alpha_min
alpha_max = self.search_scale_max if self.search_scale_max is not None else self.search_alpha_max
calibration_loss = float('inf')
final_alpha = None
mask_for_search = paddle.where(labels == centroids.argsort()[i],
1., 0.)
mask_for_ones = paddle.where(mask_for_search == 0., 1., 0.)
while alpha <= alpha_max:
if alpha < 1:
alpha += 0.01
if alpha >= self.search_alpha_max:
alpha = self.search_scale_min
if alpha is None:
break
else:
alpha += 0.5
alpha = round(alpha, 2)
if alpha < 1:
s = (np.power(np_act_abs_max, alpha) / np.power(
np_rw_abs_max, 1. - alpha)).clip(min=1e-5)
s = paddle.to_tensor(s, dtype='float32')
smooth_scale = s * mask_for_search
else:
smooth_scale = alpha * mask_for_search
if smooth_scale_out is not None:
mask_for_ones_new = paddle.where(
smooth_scale_out == 0., 1., 0.)
mask_for_ones *= mask_for_ones_new
smooth_scale_ = smooth_scale_out + smooth_scale
smooth_scale_tmp = smooth_scale_ + mask_for_ones
else:
smooth_scale_tmp = smooth_scale + mask_for_ones
new_act = act / smooth_scale_tmp
new_weight = weight * smooth_scale_tmp.reshape(
w_abs_max.shape)
quant_scale = compute_scales(
new_act, method=self.act_quant_method)
quant_act = paddle.clip(
paddle.round(new_act / quant_scale * self.bnt),
-self.bnt - 1, self.bnt)
quant_dequant_act = quant_act / self.bnt * quant_scale
quant_scale = compute_scales(
new_weight, method=self.weight_quant_method)
quant_weight = paddle.clip(
paddle.round(new_weight / quant_scale * self.bnt),
-self.bnt - 1, self.bnt)
quant_dequant_weight = quant_weight / self.bnt * quant_scale
new_out = paddle.matmul(quant_dequant_act,
quant_dequant_weight)
cur_loss = self.loss_function(origin_out, new_out)
if cur_loss <= calibration_loss:
calibration_loss = cur_loss
final_smooth_scale = smooth_scale
final_alpha = alpha
# print("Layer {} Piece {}, loss: {}, alpha : {}".format(
# layer_name, piece[i], float(calibration_loss), final_alpha))
if smooth_scale_out is None:
smooth_scale_out = final_smooth_scale
else:
smooth_scale_out += final_smooth_scale
if calibration_loss < global_loss:
global_loss = calibration_loss
best_scale = smooth_scale_out
if self.search_piece:
print('Find Better K-Piece {}'.format(k_piece))
if not self.search_piece:
break
return best_scale |
5,372 | predict | __all__ = ["FastTextModel"]
import contextlib
import gc
import logging
import os
import tempfile
import numpy as np
import pandas as pd
from autogluon.common.features.types import S_TEXT
from autogluon.common.utils.resource_utils import ResourceManager
from autogluon.common.utils.try_import import try_import_fasttext
from autogluon.core.constants import BINARY, MULTICLASS
from autogluon.core.models import AbstractModel
from .hyperparameters.parameters import get_param_baseline
logger = logging.getLogger(__name__)
class FastTextModel(AbstractModel):
model_bin_file_name = "fasttext.ftz"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._load_model = None # Whether to load inner model when loading.
def _set_default_params(self):
default_params = get_param_baseline()
for param, val in default_params.items():
self._set_default_param_value(param, val)
# TODO: Investigate allowing categorical features as well
def _get_default_auxiliary_params(self) -> dict:
default_auxiliary_params = super()._get_default_auxiliary_params()
extra_auxiliary_params = dict(
get_features_kwargs=dict(
required_special_types=[S_TEXT],
)
)
default_auxiliary_params.update(extra_auxiliary_params)
return default_auxiliary_params
@classmethod
def _get_default_ag_args(cls) -> dict:
default_ag_args = super()._get_default_ag_args()
extra_ag_args = {"valid_stacker": False, "problem_types": [BINARY, MULTICLASS]}
default_ag_args.update(extra_ag_args)
return default_ag_args
def _fit(self, X, y, sample_weight=None, **kwargs):
if self.problem_type not in (BINARY, MULTICLASS):
raise ValueError("FastText model only supports binary or multiclass classification")
try_import_fasttext()
import fasttext
params = self._get_model_params()
quantize_model = params.pop("quantize_model", True)
verbosity = kwargs.get("verbosity", 2)
if "verbose" not in params:
if verbosity <= 2:
params["verbose"] = 0
elif verbosity == 3:
params["verbose"] = 1
else:
params["verbose"] = 2
if sample_weight is not None:
logger.log(15, "sample_weight not yet supported for FastTextModel, this model will ignore them in training.")
X = self.preprocess(X)
self._label_dtype = y.dtype
self._label_map = {label: f"__label__{i}" for i, label in enumerate(y.unique())}
self._label_inv_map = {v: k for k, v in self._label_map.items()}
np.random.seed(0)
idxs = np.random.permutation(list(range(len(X))))
with tempfile.NamedTemporaryFile(mode="w+t") as f:
logger.debug("generate training data")
for label, text in zip(y.iloc[idxs], (X[i] for i in idxs)):
f.write(f"{self._label_map[label]} {text}\n")
f.flush()
mem_start = ResourceManager.get_memory_rss()
logger.debug("train FastText model")
self.model = fasttext.train_supervised(f.name, **params)
if quantize_model:
self.model.quantize(input=f.name, retrain=True)
gc.collect()
mem_curr = ResourceManager.get_memory_rss()
self._model_size_estimate = max(mem_curr - mem_start, 100000000 if quantize_model else 800000000)
logger.debug("finish training FastText model")
# TODO: move logic to self._preprocess_nonadaptive()
# TODO: text features: alternate text preprocessing steps
# TODO: categorical features: special encoding: <feature name>_<feature value>
def _preprocess(self, X: pd.DataFrame, **kwargs) -> list:
X = super()._preprocess(X, **kwargs)
text_col = (
X.astype(str)
.fillna(" ")
.apply(lambda r: " ".join(v for v in r.values), axis=1)
.str.lower()
.str.replace("<.*?>", " ") # remove html tags
# .str.replace('''(\\d[\\d,]*)(\\.\\d+)?''', ' __NUMBER__ ') # process numbers preserve dot
.str.replace("""([\\W])""", " \\1 ") # separate special characters
.str.replace("\\s", " ")
.str.replace("[ ]+", " ")
)
return text_col.to_list()
def METHOD_NAME(self, X: pd.DataFrame, **kwargs) -> np.ndarray:
X = self.preprocess(X, **kwargs)
pred_labels, pred_probs = self.model.METHOD_NAME(X)
y_pred = np.array(
[self._label_inv_map[labels[0]] for labels in pred_labels],
dtype=self._label_dtype,
)
return y_pred
def _predict_proba(self, X: pd.DataFrame, **kwargs) -> np.ndarray:
X = self.preprocess(X, **kwargs)
pred_labels, pred_probs = self.model.METHOD_NAME(X, k=len(self.model.labels))
recs = []
for labels, probs in zip(pred_labels, pred_probs):
recs.append(dict(zip((self._label_inv_map[label] for label in labels), probs)))
y_pred_proba: np.ndarray = pd.DataFrame(recs).sort_index(axis=1).values
return self._convert_proba_to_unified_form(y_pred_proba)
def save(self, path: str = None, verbose=True) -> str:
self._load_model = self.model is not None
# pickle model parts
__model = self.model
self.model = None
path = super().save(path=path, verbose=verbose)
self.model = __model
# save fasttext model: fasttext model cannot be pickled; saved it separately
# TODO: s3 support
if self._load_model:
fasttext_model_file_name = os.path.join(path, self.model_bin_file_name)
self.model.save_model(fasttext_model_file_name)
self._load_model = None
return path
@classmethod
def load(cls, path: str, reset_paths=True, verbose=True):
model: FastTextModel = super().load(path=path, reset_paths=reset_paths, verbose=verbose)
# load binary fasttext model
if model._load_model:
try_import_fasttext()
import fasttext
fasttext_model_file_name = os.path.join(model.path, cls.model_bin_file_name)
# TODO: hack to subpress a deprecation warning from fasttext
# remove it once official fasttext is updated beyond 0.9.2
# https://github.com/facebookresearch/fastText/issues/1067
with open(os.devnull, "w") as f, contextlib.redirect_stderr(f):
model.model = fasttext.load_model(fasttext_model_file_name)
model._load_model = None
return model
def get_memory_size(self) -> int:
return self._model_size_estimate
def _more_tags(self):
# `can_refit_full=True` because validation data is not used and there is no form of early stopping implemented.
return {"can_refit_full": True}
@classmethod
def _class_tags(cls):
return {"handles_text": True} |
5,373 | update request | from logging import getLogger
from django.utils.module_loading import import_string
from django.utils.timezone import now
from axes.conf import settings
from axes.handlers.base import AxesBaseHandler, AbstractAxesHandler, AxesHandler
from axes.helpers import (
get_client_ip_address,
get_client_user_agent,
get_client_path_info,
get_client_http_accept,
toggleable,
)
log = getLogger(__name__)
class AxesProxyHandler(AbstractAxesHandler, AxesBaseHandler):
"""
Proxy interface for configurable Axes signal handler class.
If you wish to implement a custom version of this handler,
you can override the settings.AXES_HANDLER configuration string
with a class that implements a compatible interface and methods.
Defaults to using axes.handlers.proxy.AxesProxyHandler if not overridden.
Refer to axes.handlers.proxy.AxesProxyHandler for default implementation.
"""
implementation = None # type: AxesHandler
@classmethod
def get_implementation(cls, force: bool = False) -> AxesHandler:
"""
Fetch and initialize configured handler implementation and memoize it to avoid reinitialization.
This method is re-entrant and can be called multiple times from e.g. Django application loader.
"""
if force or not cls.implementation:
cls.implementation = import_string(settings.AXES_HANDLER)()
return cls.implementation
@classmethod
def reset_attempts(
cls,
*,
ip_address: str = None,
username: str = None,
ip_or_username: bool = False,
) -> int:
return cls.get_implementation().reset_attempts(
ip_address=ip_address, username=username, ip_or_username=ip_or_username
)
@classmethod
def reset_logs(cls, *, age_days: int = None) -> int:
return cls.get_implementation().reset_logs(age_days=age_days)
@staticmethod
def METHOD_NAME(request):
"""
Update request attributes before passing them into the selected handler class.
"""
if request is None:
log.error(
"AXES: AxesProxyHandler.update_request can not set request attributes to a None request"
)
return
if not hasattr(request, "axes_updated"):
request.axes_locked_out = False
request.axes_attempt_time = now()
request.axes_ip_address = get_client_ip_address(request)
request.axes_user_agent = get_client_user_agent(request)
request.axes_path_info = get_client_path_info(request)
request.axes_http_accept = get_client_http_accept(request)
request.axes_updated = True
@classmethod
def is_locked(cls, request, credentials: dict = None) -> bool:
cls.METHOD_NAME(request)
return cls.get_implementation().is_locked(request, credentials)
@classmethod
def is_allowed(cls, request, credentials: dict = None) -> bool:
cls.METHOD_NAME(request)
return cls.get_implementation().is_allowed(request, credentials)
@classmethod
def get_failures(cls, request, credentials: dict = None) -> int:
cls.METHOD_NAME(request)
return cls.get_implementation().get_failures(request, credentials)
@classmethod
@toggleable
def user_login_failed(cls, sender, credentials: dict, request=None, **kwargs):
cls.METHOD_NAME(request)
return cls.get_implementation().user_login_failed(
sender, credentials, request, **kwargs
)
@classmethod
@toggleable
def user_logged_in(cls, sender, request, user, **kwargs):
cls.METHOD_NAME(request)
return cls.get_implementation().user_logged_in(sender, request, user, **kwargs)
@classmethod
@toggleable
def user_logged_out(cls, sender, request, user, **kwargs):
cls.METHOD_NAME(request)
return cls.get_implementation().user_logged_out(sender, request, user, **kwargs)
@classmethod
@toggleable
def post_save_access_attempt(cls, instance, **kwargs):
return cls.get_implementation().post_save_access_attempt(instance, **kwargs)
@classmethod
@toggleable
def post_delete_access_attempt(cls, instance, **kwargs):
return cls.get_implementation().post_delete_access_attempt(instance, **kwargs) |
5,374 | done | #!/usr/bin/env python
# pylint: disable=unused-argument, import-error
# This program is dedicated to the public domain under the CC0 license.
"""
First, a few callback functions are defined. Then, those functions are passed to
the Application and registered at their respective places.
Then, the bot is started and runs until we press Ctrl-C on the command line.
Usage:
Example of a bot-user conversation using ConversationHandler.
Send /start to initiate the conversation.
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
import logging
from typing import Dict
from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, Update
from telegram.ext import (
Application,
CommandHandler,
ContextTypes,
ConversationHandler,
MessageHandler,
PicklePersistence,
filters,
)
# Enable logging
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
# set higher logging level for httpx to avoid all GET and POST requests being logged
logging.getLogger("httpx").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
CHOOSING, TYPING_REPLY, TYPING_CHOICE = range(3)
reply_keyboard = [
["Age", "Favourite colour"],
["Number of siblings", "Something else..."],
["Done"],
]
markup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)
def facts_to_str(user_data: Dict[str, str]) -> str:
"""Helper function for formatting the gathered user info."""
facts = [f"{key} - {value}" for key, value in user_data.items()]
return "\n".join(facts).join(["\n", "\n"])
async def start(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
"""Start the conversation, display any stored data and ask user for input."""
reply_text = "Hi! My name is Doctor Botter."
if context.user_data:
reply_text += (
f" You already told me your {', '.join(context.user_data.keys())}. Why don't you "
f"tell me something more about yourself? Or change anything I already know."
)
else:
reply_text += (
" I will hold a more complex conversation with you. Why don't you tell me "
"something about yourself?"
)
await update.message.reply_text(reply_text, reply_markup=markup)
return CHOOSING
async def regular_choice(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
"""Ask the user for info about the selected predefined choice."""
text = update.message.text.lower()
context.user_data["choice"] = text
if context.user_data.get(text):
reply_text = (
f"Your {text}? I already know the following about that: {context.user_data[text]}"
)
else:
reply_text = f"Your {text}? Yes, I would love to hear about that!"
await update.message.reply_text(reply_text)
return TYPING_REPLY
async def custom_choice(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
"""Ask the user for a description of a custom category."""
await update.message.reply_text(
'Alright, please send me the category first, for example "Most impressive skill"'
)
return TYPING_CHOICE
async def received_information(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
"""Store info provided by user and ask for the next category."""
text = update.message.text
category = context.user_data["choice"]
context.user_data[category] = text.lower()
del context.user_data["choice"]
await update.message.reply_text(
"Neat! Just so you know, this is what you already told me:"
f"{facts_to_str(context.user_data)}"
"You can tell me more, or change your opinion on something.",
reply_markup=markup,
)
return CHOOSING
async def show_data(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""Display the gathered info."""
await update.message.reply_text(
f"This is what you already told me: {facts_to_str(context.user_data)}"
)
async def METHOD_NAME(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
"""Display the gathered info and end the conversation."""
if "choice" in context.user_data:
del context.user_data["choice"]
await update.message.reply_text(
f"I learned these facts about you: {facts_to_str(context.user_data)}Until next time!",
reply_markup=ReplyKeyboardRemove(),
)
return ConversationHandler.END
def main() -> None:
"""Run the bot."""
# Create the Application and pass it your bot's token.
persistence = PicklePersistence(filepath="conversationbot")
application = Application.builder().token("TOKEN").persistence(persistence).build()
# Add conversation handler with the states CHOOSING, TYPING_CHOICE and TYPING_REPLY
conv_handler = ConversationHandler(
entry_points=[CommandHandler("start", start)],
states={
CHOOSING: [
MessageHandler(
filters.Regex("^(Age|Favourite colour|Number of siblings)$"), regular_choice
),
MessageHandler(filters.Regex("^Something else...$"), custom_choice),
],
TYPING_CHOICE: [
MessageHandler(
filters.TEXT & ~(filters.COMMAND | filters.Regex("^Done$")), regular_choice
)
],
TYPING_REPLY: [
MessageHandler(
filters.TEXT & ~(filters.COMMAND | filters.Regex("^Done$")),
received_information,
)
],
},
fallbacks=[MessageHandler(filters.Regex("^Done$"), METHOD_NAME)],
name="my_conversation",
persistent=True,
)
application.add_handler(conv_handler)
show_data_handler = CommandHandler("show_data", show_data)
application.add_handler(show_data_handler)
# Run the bot until the user presses Ctrl-C
application.run_polling(allowed_updates=Update.ALL_TYPES)
if __name__ == "__main__":
main() |
5,375 | source subfolder | from conans import CMake, ConanFile, tools
from conans.errors import ConanInvalidConfiguration
required_conan_version = ">=1.33.0"
class MsixConan(ConanFile):
name = "msix"
license = "MIT"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/microsoft/msix-packaging"
description = "An SDK for creating MSIX packages"
topics = ("msix", "sdk", "packaging", "conan-recipe")
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
"crypto_lib": ["crypt32", "openssl"],
"pack": [True, False],
"skip_bundles": [True, False],
"use_external_zlib": [True, False],
"use_validation_parser": [True, False],
"xml_parser": ["applexml", "javaxml", "msxml6", "xerces"]
}
default_options = {
"shared": False,
"fPIC": True,
"crypto_lib": "openssl",
"pack": False,
"skip_bundles": False,
"use_external_zlib": True,
"use_validation_parser": False,
"xml_parser": "msxml6"
}
generators = "cmake"
exports_sources = "CMakeLists.txt", "patches/**"
_cmake = None
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "15"
}
@property
def METHOD_NAME(self):
return "source_subfolder"
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
if self.settings.os == "Android":
self._cmake.definitions["AOSP"] = True
if self.settings.os == "Linux":
self._cmake.definitions["LINUX"] = True
if self.settings.os == "Macos":
self._cmake.definitions["MACOS"] = True
self._cmake.definitions["CRYPTO_LIB"] = self.options.crypto_lib
self._cmake.definitions["MSIX_PACK"] = self.options.pack
self._cmake.definitions["MSIX_SAMPLES"] = False
self._cmake.definitions["MSIX_TESTS"] = False
self._cmake.definitions["SKIP_BUNDLES"] = self.options.skip_bundles
self._cmake.definitions["USE_MSIX_SDK_ZLIB"] = self.options.use_external_zlib
self._cmake.definitions["USE_SHARED_ZLIB"] = self.options["zlib"].shared
self._cmake.definitions["USE_VALIDATION_PARSER"] = self.options.use_validation_parser
self._cmake.definitions["XML_PARSER"] = self.options.xml_parser
self._cmake.definitions["CALCULATE_VERSION"] = False
self._cmake.definitions["ENABLE_NUGET_PACKAGING"] = False
self._cmake.configure()
return self._cmake
def _validate_compiler_settings(self):
compiler = self.settings.compiler
if compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, "17")
min_version = self._minimum_compilers_version.get(str(self.settings.compiler))
if not min_version:
self.output.warn("{} recipe lacks information about the {} compiler support.".format(
self.name, self.settings.compiler))
elif tools.Version(self.settings.compiler.version) < min_version:
raise ConanInvalidConfiguration("{} requires C++17 support. The current compiler {} {} does not support it.".format(
self.name, self.settings.compiler, self.settings.compiler.version))
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
self.options.crypto_lib = "crypt32"
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
if self.settings.os == "Linux" and not self.options.skip_bundles:
self.requires("icu/71.1")
if self.options.crypto_lib == "openssl":
self.requires("openssl/1.1.1q")
if self.options.use_external_zlib:
self.requires("zlib/1.2.12")
if self.options.xml_parser == "xerces":
self.requires("xerces-c/3.2.3")
def validate(self):
if self.settings.os != "Android" and self.options.xml_parser == "javaxml":
raise ConanInvalidConfiguration("javaxml is supported only for Android")
if self.settings.os == "Linux" and self.settings.compiler != "clang":
raise ConanInvalidConfiguration("Only clang is supported on Linux")
if self.settings.os != "Macos" and self.options.xml_parser == "applexml":
raise ConanInvalidConfiguration("applexml is supported only for MacOS")
if self.settings.os != "Windows" and self.options.crypto_lib == "crypt32":
raise ConanInvalidConfiguration("crypt32 is supported only for Windows")
if self.settings.os != "Windows" and self.options.xml_parser == "msxml6":
raise ConanInvalidConfiguration("msxml6 is supported only for Windows")
if self.options.pack:
if self.settings.os == "Macos":
if not self.options.use_external_zlib:
raise ConanInvalidConfiguration("Using libCompression APIs and packaging features is not supported")
if self.options.xml_parser != "xerces":
raise ConanInvalidConfiguration("Xerces is the only supported parser for MacOS pack")
if not self.options.use_validation_parser:
raise ConanInvalidConfiguration("Packaging requires validation parser")
if (self.options.xml_parser == "xerces" and
self.options["xerces-c"].char_type != "char16_t"):
raise ConanInvalidConfiguration("Only char16_t is supported for xerces-c")
self._validate_compiler_settings()
def source(self):
tools.get(**self.conan_data["sources"][self.version], destination=self.METHOD_NAME, strip_root=True)
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy("LICENSE", dst="licenses", src=self.METHOD_NAME)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
if self.settings.os == "Windows":
self.cpp_info.system_libs = ["runtimeobject"]
if self.settings.compiler == "Visual Studio":
self.cpp_info.system_libs.append("delayimp")
if self.options.crypto_lib == "crypt32":
self.cpp_info.system_libs.extend(["bcrypt", "crypt32", "wintrust"])
if self.options.xml_parser == "msxml6":
self.cpp_info.system_libs.append("msxml6") |
5,376 | download | # Copyright 2020-2023 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import gym
import os
from pathlib import Path
from urllib.request import urlretrieve
from stable_baselines3 import PPO
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3.common.callbacks import CheckpointCallback
from opendr.engine.learners import LearnerRL
from opendr.engine.constants import OPENDR_SERVER_URL
class EndToEndPlanningRLLearner(LearnerRL):
def __init__(self, env=None, lr=3e-4, n_steps=1024, iters=int(1e5), batch_size=64, checkpoint_after_iter=500,
temp_path='', device='cuda'):
"""
Specifies a proximal policy optimization (PPO) agent that can be trained for end to end planning for obstacle avoidance.
Internally uses Stable-Baselines 3 (https://github.com/DLR-RM/stable-baselines3.git).
"""
super(EndToEndPlanningRLLearner, self).__init__(lr=lr, iters=iters, batch_size=batch_size, optimizer='adam',
network_head='', temp_path=temp_path,
checkpoint_after_iter=checkpoint_after_iter,
device=device, threshold=0.0, scale=1.0)
self.env = env
self.n_steps = n_steps
if self.env is None:
self.agent = PPO.load(os.environ.get(
"OPENDR_HOME") + '/src/opendr/planning/end_to_end_planning/pretrained_model/saved_model.zip')
print("Learner is initiated with pretrained model without a gym model.")
else:
if isinstance(self.env, DummyVecEnv):
self.env = self.env.envs[0]
self.env = DummyVecEnv([lambda: self.env])
self.agent = PPO("MultiInputPolicy", self.env, learning_rate=self.lr, n_steps=self.n_steps,
batch_size=self.batch_size, verbose=1)
def METHOD_NAME(self, path=None,
url=OPENDR_SERVER_URL + "planning/end_to_end_planning"):
if path is None:
path = "./end_to_end_planning_tmp/"
filename = "ardupilot.zip"
file_destination = Path(path) / filename
if not file_destination.exists():
file_destination.parent.mkdir(parents=True, exist_ok=True)
url = os.path.join(url, filename)
urlretrieve(url=url, filename=file_destination)
return file_destination
def fit(self, env=None, logging_path='', verbose=True):
"""
Train the agent on the environment.
:param env: gym.Env, optional, if specified use this env to train
:param logging_path: str, path for logging and checkpointing
:param verbose: bool, enable verbosity
"""
if env is not None:
if isinstance(env, gym.Env):
if isinstance(self.env, gym.Env):
self.env = env
else:
self.env = env
self.agent = PPO("MultiInputPolicy", self.env, learning_rate=self.lr, n_steps=self.n_steps,
batch_size=self.batch_size, verbose=verbose)
else:
print('env should be gym.Env')
return
self.last_checkpoint_time_step = 0
self.logdir = logging_path
if isinstance(self.env, DummyVecEnv):
self.env = self.env.envs[0]
if isinstance(self.env, Monitor):
self.env = self.env.env
self.env = Monitor(self.env, filename=self.logdir)
self.env = DummyVecEnv([lambda: self.env])
self.agent.set_env(self.env)
checkpoint_callback = CheckpointCallback(save_freq=1000, save_path=self.logdir, name_prefix='rl_model')
self.agent.learn(total_timesteps=self.iters, callback=checkpoint_callback)
def eval(self, env):
"""
Evaluate the agent on the specified environment.
:param env: gym.Env, env to evaluate on
:return: sum of rewards through the episode
"""
if isinstance(env, DummyVecEnv):
env = env.envs[0]
if isinstance(env, Monitor):
env = env.env
env = DummyVecEnv([lambda: env])
self.agent.set_env(env)
obs = env.reset()
sum_of_rewards = 0
for i in range(50):
action, _states = self.agent.predict(obs, deterministic=True)
obs, rewards, dones, info = env.step(action)
sum_of_rewards += rewards
if dones:
break
return {"rewards_collected": sum_of_rewards}
def save(self, path):
"""
Saves the model in the path provided.
:param path: Path to save directory
:type path: str
:return: Whether save succeeded or not
:rtype: bool
"""
self.agent.save(path)
def load(self, path):
"""
Loads a model from the path provided.
:param path: Path to saved model
:type path: str
:return: Whether load succeeded or not
:rtype: bool
"""
self.agent = PPO.load(path)
if isinstance(self.env, gym.Env):
self.agent.set_env(self.env)
def infer(self, batch, deterministic: bool = True):
"""
Loads a model from the path provided.
:param batch: single or list of observations
:type batch: dict ot list of dict
:param deterministic: use deterministic actions from the policy
:type deterministic: bool
:return: the selected action
:rtype: int or list
"""
if isinstance(batch, dict):
return self.agent.predict(batch, deterministic=deterministic)
elif isinstance(batch, list) or isinstance(batch, np.ndarray):
return [self.agent.predict(obs, deterministic=deterministic) for obs in batch]
else:
raise ValueError()
def reset(self):
raise NotImplementedError()
def optimize(self, target_device):
raise NotImplementedError() |
5,377 | test bitrate | # Copyright 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
from io import BytesIO
from mutagen import asf
from tests import TestCase, get_data_path
from quodlibet.formats.wma import WMAFile, unpack_image, pack_image
from quodlibet.formats._image import APICType, EmbeddedImage
from .helper import get_temp_copy
class TWMAFile(TestCase):
def setUp(self):
self.f = get_temp_copy(get_data_path('test.wma'))
self.song = WMAFile(self.f)
self.f2 = get_temp_copy(get_data_path('test-2.wma'))
self.song2 = WMAFile(self.f2)
self.f3 = get_temp_copy(get_data_path('test.asf'))
self.song3 = WMAFile(self.f3)
def tearDown(self):
os.unlink(self.f)
os.unlink(self.f2)
os.unlink(self.f3)
def test_basic(self):
self.song["title"] = u"SomeTestValue"
self.song.write()
self.song.reload()
self.assertEqual(self.song("title"), u"SomeTestValue")
def test_multi(self):
self.song["genre"] = u"Rock\nPop"
self.song.write()
self.song.reload()
# XXX: mutagen doesn't preserve order.. fix it!
self.assertEqual(set(self.song.list("genre")), {u"Rock", u"Pop"})
def test_length(self):
self.assertAlmostEqual(self.song("~#length"), 3.7120, 3)
self.assertAlmostEqual(self.song2("~#length"), 3.684, 3)
self.assertAlmostEqual(self.song3("~#length"), 11.38, 2)
def test_channels(self):
assert self.song("~#channels") == 2
assert self.song2("~#channels") == 2
assert self.song3("~#channels") == 1
def METHOD_NAME(self):
self.assertEqual(self.song("~#bitrate"), 64)
self.assertEqual(self.song2("~#bitrate"), 38)
self.assertEqual(self.song3("~#bitrate"), 5)
def test_sample_rate(self):
assert self.song("~#samplerate") == 48000
assert self.song2("~#samplerate") == 44100
assert self.song3("~#samplerate") == 8000
def test_write(self):
self.song.write()
self.song2.write()
self.song3.write()
def test_can_change(self):
self.assertTrue(self.song.can_change("title"))
self.assertFalse(self.song.can_change("foobar"))
self.assertTrue("albumartist" in self.song.can_change())
def test_format(self):
self.assertEqual(self.song("~format"), "ASF")
self.assertEqual(self.song2("~format"), "ASF")
self.assertEqual(self.song3("~format"), "ASF")
def test_codec(self):
self.assertEqual(self.song("~codec"),
u"Windows Media Audio 9 Standard")
self.assertEqual(self.song2("~codec"),
u"Windows Media Audio 9 Professional")
self.assertEqual(self.song3("~codec"),
u"Intel G.723")
def test_encoding(self):
self.assertEqual(
self.song("~encoding"),
u"Windows Media Audio 9.1\n64 kbps, 48 kHz, stereo 2-pass CBR")
self.assertEqual(
self.song2("~encoding"),
(u"Windows Media Audio 9.1 Professional\n192 kbps, 44 kHz, "
"2 channel 24 bit 2-pass VBR"))
self.assertEqual(self.song3("~encoding"),
u"Microsoft G.723.1\n8 kHz Mono, 5333 Bit/s")
def test_mb_release_track_id(self):
tag = asf.ASF(self.f)
tag["MusicBrainz/Release Track Id"] = [u"foo"]
tag.save()
song = WMAFile(self.f)
self.assertEqual(song("musicbrainz_releasetrackid"), u"foo")
song["musicbrainz_releasetrackid"] = u"bla"
song.write()
tag = asf.ASF(self.f)
self.assertEqual(tag["MusicBrainz/Release Track Id"], [u"bla"])
def test_invalid(self):
path = get_data_path('empty.xm')
self.assertTrue(os.path.exists(path))
self.assertRaises(Exception, WMAFile, path)
def test_get_images(self):
tag = asf.ASF(self.f2)
tag["WM/Picture"] = [tag["WM/Picture"][0], tag["WM/Picture"][0]]
tag.save()
self.song2.reload()
images = self.song2.get_images()
self.assertTrue(images and len(images) == 2)
def test_get_image(self):
self.assertFalse(self.song.get_primary_image())
image = self.song2.get_primary_image()
self.assertTrue(image)
self.assertEqual(image.mime_type, "image/jpeg")
self.assertTrue(image.read())
def test_get_image_invalid_data(self):
tag = asf.ASF(self.f)
tag["WM/Picture"] = [asf.ASFValue(b"nope", asf.BYTEARRAY)]
tag.save()
self.assertFalse(self.song.has_images)
self.song.reload()
self.assertTrue(self.song.has_images)
image = self.song.get_primary_image()
self.assertFalse(image)
def test_unpack_image_min(self):
data = b"\x03" + b"\x00" * 4 + b"\x00" * 4
mime, desc, data, type_ = unpack_image(data)
self.assertEqual(mime, u"")
self.assertEqual(desc, u"")
self.assertEqual(data, b"")
self.assertEqual(type_, 3)
def test_unpack_image_invalid(self):
self.assertRaises(ValueError, unpack_image, b"")
self.assertRaises(ValueError, unpack_image, b"\x00" * 6)
self.assertRaises(ValueError, unpack_image, b"\x00" * 8)
self.assertRaises(ValueError, unpack_image, b"\x00" * 100)
def test_pack_image(self):
d = pack_image(
u"image/jpeg", u"Description", b"foo", APICType.COVER_FRONT)
mime, desc, data, type_ = unpack_image(d)
self.assertEqual(mime, u"image/jpeg")
self.assertEqual(desc, u"Description")
self.assertEqual(data, b"foo")
self.assertEqual(type_, APICType.COVER_FRONT)
def test_clear_images(self):
# cover case
image = self.song2.get_primary_image()
self.assertTrue(image)
self.song2.clear_images()
self.assertFalse(self.song2.has_images)
self.song2.reload()
image = self.song2.get_primary_image()
self.assertFalse(image)
# no cover case
self.song.clear_images()
def test_set_image(self):
fileobj = BytesIO(b"foo")
image = EmbeddedImage(fileobj, "image/jpeg", 10, 10, 8)
self.assertFalse(self.song.has_images)
self.song.set_image(image)
self.assertTrue(self.song.has_images)
image = self.song.get_primary_image()
self.assertEqual(image.mime_type, "image/jpeg")
self.assertEqual(image.read(), b"foo")
def test_can_change_images(self):
self.assertTrue(self.song.can_change_images)
def test_can_multiple_values(self):
self.assertTrue("artist" in self.song.can_multiple_values())
self.assertTrue(self.song.can_multiple_values("genre")) |
5,378 | get bios component | #
# Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES.
# Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
from . import utils
DEVICE_DATA = {
'x86_64-mlnx_msn2700-r0': {
'thermal': {
"capability": {
"comex_amb": False
}
}
},
'x86_64-mlnx_msn2740-r0': {
'thermal': {
"capability": {
"cpu_pack": False,
"comex_amb": False
}
}
},
'x86_64-mlnx_msn2100-r0': {
'thermal': {
"capability": {
"cpu_pack": False,
"comex_amb": False
}
}
},
'x86_64-mlnx_msn2410-r0': {
'thermal': {
"capability": {
"comex_amb": False
}
}
},
'x86_64-mlnx_msn2010-r0': {
'thermal': {
"capability": {
"cpu_pack": False,
"comex_amb": False
}
}
},
'x86_64-mlnx_msn3700-r0': {
},
'x86_64-mlnx_msn3700c-r0': {
},
'x86_64-mlnx_msn3800-r0': {
},
'x86_64-mlnx_msn4700-r0': {
},
'x86_64-mlnx_msn4410-r0': {
},
'x86_64-mlnx_msn3420-r0': {
},
'x86_64-mlnx_msn4600c-r0': {
},
'x86_64-mlnx_msn4600-r0': {
},
'x86_64-nvidia_sn4800-r0': {
'thermal': {
"capability": {
"comex_amb": False
}
},
'sfp': {
'max_port_per_line_card': 16
}
},
'x86_64-nvidia_sn2201-r0': {
'thermal': {
"capability": {
"comex_amb": False,
"cpu_amb": True
}
}
},
'x86_64-nvidia_sn5600-r0': {
'thermal': {
"capability": {
"comex_amb": False,
"pch_temp": True
}
}
}
}
class DeviceDataManager:
@classmethod
@utils.read_only_cache()
def get_platform_name(cls):
from sonic_py_common import device_info
return device_info.get_platform()
@classmethod
@utils.read_only_cache()
def is_simx_platform(cls):
platform_name = cls.get_platform_name()
return platform_name and 'simx' in platform_name
@classmethod
@utils.read_only_cache()
def get_fan_drawer_count(cls):
# Here we don't read from /run/hw-management/config/hotplug_fans because the value in it is not
# always correct.
return len(glob.glob('/run/hw-management/thermal/fan*_status')) if cls.is_fan_hotswapable() else 1
@classmethod
@utils.read_only_cache()
def get_fan_count(cls):
return len(glob.glob('/run/hw-management/thermal/fan*_speed_get'))
@classmethod
@utils.read_only_cache()
def is_fan_hotswapable(cls):
return utils.read_int_from_file('/run/hw-management/config/hotplug_fans') > 0
@classmethod
@utils.read_only_cache()
def get_psu_count(cls):
psu_count = utils.read_int_from_file('/run/hw-management/config/hotplug_psus')
# If psu_count == 0, the platform has fixed PSU
return psu_count if psu_count > 0 else len(glob.glob('/run/hw-management/config/psu*_i2c_addr'))
@classmethod
@utils.read_only_cache()
def is_psu_hotswapable(cls):
return utils.read_int_from_file('/run/hw-management/config/hotplug_psus') > 0
@classmethod
@utils.read_only_cache()
def get_sfp_count(cls):
return utils.read_int_from_file('/run/hw-management/config/sfp_counter')
@classmethod
def get_linecard_sfp_count(cls, lc_index):
return utils.read_int_from_file('/run/hw-management/lc{}/config/module_counter'.format(lc_index), log_func=None)
@classmethod
def get_gearbox_count(cls, sysfs_folder):
return utils.read_int_from_file(os.path.join(sysfs_folder, 'gearbox_counter'), log_func=None)
@classmethod
@utils.read_only_cache()
def get_cpu_thermal_count(cls):
return len(glob.glob('run/hw-management/thermal/cpu_core[!_]'))
@classmethod
@utils.read_only_cache()
def get_sodimm_thermal_count(cls):
return len(glob.glob('/run/hw-management/thermal/sodimm*_temp_input'))
@classmethod
@utils.read_only_cache()
def get_thermal_capability(cls):
platform_data = DEVICE_DATA.get(cls.get_platform_name(), None)
if not platform_data:
return None
thermal_data = platform_data.get('thermal', None)
if not thermal_data:
return None
return thermal_data.get('capability', None)
@classmethod
@utils.read_only_cache()
def get_linecard_count(cls):
return utils.read_int_from_file('/run/hw-management/config/hotplug_linecards', log_func=None)
@classmethod
@utils.read_only_cache()
def get_linecard_max_port_count(cls):
platform_data = DEVICE_DATA.get(cls.get_platform_name(), None)
if not platform_data:
return 0
sfp_data = platform_data.get('sfp', None)
if not sfp_data:
return 0
return sfp_data.get('max_port_per_line_card', 0)
@classmethod
def METHOD_NAME(cls):
from .component import ComponentBIOS, ComponentBIOSSN2201
if cls.get_platform_name() in ['x86_64-nvidia_sn2201-r0']:
# For SN2201, special chass is required for handle BIOS
# Currently, only fetching BIOS version is supported
return ComponentBIOSSN2201()
return ComponentBIOS()
@classmethod
def get_cpld_component_list(cls):
from .component import ComponentCPLD, ComponentCPLDSN2201
if cls.get_platform_name() in ['x86_64-nvidia_sn2201-r0']:
# For SN2201, special chass is required for handle BIOS
# Currently, only fetching BIOS version is supported
return ComponentCPLDSN2201.get_component_list()
return ComponentCPLD.get_component_list() |
5,379 | tear down | from __future__ import print_function
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import os
from . import session
from . import settings
from . import resource_suite
from .. import lib
class Test_Irmdir(resource_suite.ResourceBase, unittest.TestCase):
def setUp(self):
super(Test_Irmdir, self).setUp()
def METHOD_NAME(self):
super(Test_Irmdir, self).METHOD_NAME()
def test_irmdir_of_nonexistent_collection(self):
self.admin.assert_icommand(['irmdir', 'garbage_dir'], 'STDOUT_SINGLELINE', 'Collection does not exist')
def test_irmdir_of_dataobj(self):
filename = 'test_irmdir_of_dataobj'
lib.make_file(filename, 1024, 'arbitrary')
rods_filename = self.admin.session_collection + '/' + filename
self.admin.assert_icommand(['iput', filename, rods_filename])
self.admin.assert_icommand(['irmdir', rods_filename], 'STDOUT_SINGLELINE', 'Collection does not exist')
self.admin.assert_icommand(['irm', '-f', rods_filename])
os.unlink(filename)
def test_irmdir_of_collection_containing_dataobj(self):
filename = 'test_dataobj'
collname = 'test_collection'
lib.make_file(filename, 1024, 'arbitrary')
rods_collname = self.admin.session_collection + '/' + collname
rods_filename = rods_collname + '/' + filename
self.admin.assert_icommand(['imkdir', rods_collname])
self.admin.assert_icommand(['iput', filename, rods_filename])
self.admin.assert_icommand(['irmdir', rods_collname], 'STDOUT_SINGLELINE', 'Collection is not empty')
os.unlink(filename)
def test_irmdir_of_collection_containing_collection(self):
collname_1 = 'test_collection_1'
collname_2 = 'test_collection_2'
rods_collname_1 = self.admin.session_collection + '/' + collname_1
rods_collname_2 = rods_collname_1 + '/' + collname_2
self.admin.assert_icommand(['imkdir', rods_collname_1])
self.admin.assert_icommand(['imkdir', rods_collname_2])
self.admin.assert_icommand(['irmdir', rods_collname_1], 'STDOUT_SINGLELINE', 'Collection is not empty')
def test_irmdir_of_empty_collection(self):
collname = 'test_collection'
rods_collname = self.admin.session_collection + '/' + collname
self.admin.assert_icommand(['imkdir', rods_collname])
self.admin.assert_icommand(['irmdir', rods_collname])
# If irmdir failed, attempting to make a directory with the same name will also fail
self.admin.assert_icommand(['imkdir', rods_collname])
def test_irmdir_dash_p(self):
path = 'a/b/c'
col_d = 'd'
col_e = 'd/e'
abs_path = os.path.join(self.admin.session_collection, path)
abs_path_to_col_d = os.path.join(self.admin.session_collection, path, col_d)
self.admin.assert_icommand(['imkdir', '-p', os.path.join(path, col_e)])
self.admin.assert_icommand(['icd', path])
self.admin.assert_icommand(['ils'], 'STDOUT_MULTILINE', [abs_path, 'C- {0}'.format(abs_path_to_col_d)])
self.admin.assert_icommand(['irmdir', '-p', col_e])
self.admin.assert_icommand(['ils', col_e], 'STDERR', '/{0} does not exist '.format(col_e))
self.admin.assert_icommand(['icd', self.admin.session_collection])
self.admin.assert_icommand(['irmdir', '-p', path])
self.admin.assert_icommand(['ils', path], 'STDERR', '/{0} does not exist '.format(path))
# Trying to remove a collection that does not exist produces an error.
self.admin.assert_icommand(['irmdir', '-p', 'x/y/z'], 'STDERR', 'Collection does not exist')
# Trying to remove a collection that is not empty produces an error.
self.admin.assert_icommand(['imkdir', '-p', 'a/b/c'])
self.admin.assert_icommand(['imkdir', '-p', 'a/b/d'])
self.admin.assert_icommand(['irmdir', '-p', 'a/b'], 'STDERR', 'Collection is not empty')
self.admin.assert_icommand(['irmdir', 'a/b/c'])
self.admin.assert_icommand(['irmdir', 'a/b/d'])
self.admin.assert_icommand(['irmdir', '-p', 'a/b'])
# Trying to remove a data object produces an error.
filename = 'test_irmdir_of_dataobj'
lib.make_file(filename, 1024, 'arbitrary')
rods_filename = os.path.join(self.admin.session_collection, filename)
self.admin.assert_icommand(['iput', filename, rods_filename])
self.admin.assert_icommand(['irmdir', '-p', rods_filename], 'STDERR', 'Path does not point to a collection')
self.admin.assert_icommand(['irm', '-f', rods_filename])
os.unlink(filename)
def test_irmdir_no_input(self):
self.admin.assert_icommand('irmdir', 'STDOUT_SINGLELINE', 'No collection names specified.')
def test_irmdir_removes_collection_even_if_sibling_exists__issue_4788(self):
col_a = 'foo'
self.admin.assert_icommand(['imkdir', col_a])
col_b = 'foot'
self.admin.assert_icommand(['imkdir', col_b])
filename = 'issue_4788'
file_path = os.path.join(self.admin.local_session_dir, filename)
lib.make_file(file_path, 1024, 'arbitrary')
self.admin.assert_icommand(['iput', file_path, os.path.join(col_b, filename)])
self.admin.assert_icommand(['irmdir', col_a])
self.admin.assert_icommand(['ils', col_a], 'STDERR', ['{0} does not exist'.format(os.path.join(self.admin.session_collection, col_a))])
|
5,380 | assert feefilter received | #!/usr/bin/env python3
# Copyright (c) 2016-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of feefilter messages."""
from decimal import Decimal
from test_framework.messages import MSG_TX, MSG_WTX, msg_feefilter
from test_framework.p2p import P2PInterface, p2p_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
from test_framework.wallet import MiniWallet
class FeefilterConn(P2PInterface):
feefilter_received = False
def on_feefilter(self, message):
self.feefilter_received = True
def METHOD_NAME(self, recv: bool):
with p2p_lock:
assert_equal(self.feefilter_received, recv)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
def on_inv(self, message):
for i in message.inv:
if (i.type == MSG_TX) or (i.type == MSG_WTX):
self.txinvs.append('{:064x}'.format(i.hash))
def wait_for_invs_to_match(self, invs_expected):
invs_expected.sort()
self.wait_until(lambda: invs_expected == sorted(self.txinvs))
def clear_invs(self):
with p2p_lock:
self.txinvs = []
class FeeFilterTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
# We lower the various required feerates for this test
# to catch a corner-case where feefilter used to slightly undercut
# mempool and wallet feerate calculation based on GetFee
# rounding down 3 places, leading to stranded transactions.
# See issue #16499
# grant noban permission to all peers to speed up tx relay / mempool sync
self.extra_args = [[
"-minrelaytxfee=0.00000100",
"-mintxfee=0.00000100",
"-whitelist=noban@127.0.0.1",
]] * self.num_nodes
def run_test(self):
self.test_feefilter_forcerelay()
self.test_feefilter()
self.test_feefilter_blocksonly()
def test_feefilter_forcerelay(self):
self.log.info('Check that peers without forcerelay permission (default) get a feefilter message')
self.nodes[0].add_p2p_connection(FeefilterConn()).METHOD_NAME(True)
self.log.info('Check that peers with forcerelay permission do not get a feefilter message')
self.restart_node(0, extra_args=['-whitelist=forcerelay@127.0.0.1'])
self.nodes[0].add_p2p_connection(FeefilterConn()).METHOD_NAME(False)
# Restart to disconnect peers and load default extra_args
self.restart_node(0)
self.connect_nodes(1, 0)
def test_feefilter(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
miniwallet = MiniWallet(node1)
conn = self.nodes[0].add_p2p_connection(TestP2PConn())
self.log.info("Test txs paying 0.2 sat/byte are received by test connection")
txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00000200'), from_node=node1)['wtxid'] for _ in range(3)]
conn.wait_for_invs_to_match(txids)
conn.clear_invs()
# Set a fee filter of 0.15 sat/byte on test connection
conn.send_and_ping(msg_feefilter(150))
self.log.info("Test txs paying 0.15 sat/byte are received by test connection")
txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00000150'), from_node=node1)['wtxid'] for _ in range(3)]
conn.wait_for_invs_to_match(txids)
conn.clear_invs()
self.log.info("Test txs paying 0.1 sat/byte are no longer received by test connection")
txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00000100'), from_node=node1)['wtxid'] for _ in range(3)]
self.sync_mempools() # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00020000'), from_node=node0)['wtxid'] for _ in range(1)]
conn.wait_for_invs_to_match(txids)
conn.clear_invs()
self.sync_mempools() # must be sure node 1 has received all txs
self.log.info("Remove fee filter and check txs are received again")
conn.send_and_ping(msg_feefilter(0))
txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00020000'), from_node=node1)['wtxid'] for _ in range(3)]
conn.wait_for_invs_to_match(txids)
conn.clear_invs()
def test_feefilter_blocksonly(self):
"""Test that we don't send fee filters to block-relay-only peers and when we're in blocksonly mode."""
self.log.info("Check that we don't send fee filters to block-relay-only peers.")
feefilter_peer = self.nodes[0].add_outbound_p2p_connection(FeefilterConn(), p2p_idx=0, connection_type="block-relay-only")
feefilter_peer.sync_with_ping()
feefilter_peer.METHOD_NAME(False)
self.log.info("Check that we don't send fee filters when in blocksonly mode.")
self.restart_node(0, ["-blocksonly"])
feefilter_peer = self.nodes[0].add_p2p_connection(FeefilterConn())
feefilter_peer.sync_with_ping()
feefilter_peer.METHOD_NAME(False)
if __name__ == '__main__':
FeeFilterTest().main() |
5,381 | fetch report data | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
from abc import abstractmethod
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional
from urllib.parse import urljoin
import backoff
import requests
from airbyte_cdk.models import SyncMode
from source_pinterest.streams import PinterestAnalyticsStream
from source_pinterest.utils import get_analytics_columns
from .errors import ReportGenerationFailure, ReportGenerationInProgress, ReportStatusError, RetryableException
from .models import ReportInfo, ReportStatus, ReportStatusDetails
class PinterestAnalyticsReportStream(PinterestAnalyticsStream):
"""Class defining the stream of Pinterest Analytics Report
Details - https://developers.pinterest.com/docs/api/v5/#operation/analytics/create_report"""
http_method = "POST"
report_wait_timeout = 180
report_generation_maximum_retries = 5
@property
def window_in_days(self):
return 185 # Set window_in_days to 186 days date range
@property
@abstractmethod
def level(self):
""":return: level on which report should be run"""
@staticmethod
def _build_api_path(account_id: str) -> str:
"""Build the API path for the given account id."""
return f"ad_accounts/{account_id}/reports"
def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str:
"""Get the path (i.e. URL) for the stream."""
return self._build_api_path(stream_slice["parent"]["id"])
def _construct_request_body(self, start_date: str, end_date: str, granularity: str, columns: str) -> dict:
"""Construct the body of the API request."""
return {
"start_date": start_date,
"end_date": end_date,
"granularity": granularity,
"columns": columns.split(","),
"level": self.level,
}
def request_body_json(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> Optional[Mapping]:
"""Return the body of the API request in JSON format."""
return self._construct_request_body(stream_slice["start_date"], stream_slice["end_date"], self.granularity, get_analytics_columns())
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
"""Return the request parameters."""
return {}
def backoff_max_time(func):
def wrapped(self, *args, **kwargs):
return backoff.on_exception(backoff.constant, RetryableException, max_time=self.report_wait_timeout * 60, interval=10)(func)(
self, *args, **kwargs
)
return wrapped
def backoff_max_tries(func):
def wrapped(self, *args, **kwargs):
return backoff.on_exception(backoff.expo, ReportGenerationFailure, max_tries=self.report_generation_maximum_retries)(func)(
self, *args, **kwargs
)
return wrapped
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""Read the records from the stream."""
report_infos = self._init_reports(super().read_records(sync_mode, cursor_field, stream_slice, stream_state))
self._try_read_records(report_infos, stream_slice)
for report_info in report_infos:
metrics = report_info.metrics
for campaign_id, records in metrics.items():
self.logger.info(f"Reports for campaign id: {campaign_id}:")
yield from records
@backoff_max_time
def _try_read_records(self, report_infos, stream_slice):
"""Try to read the records and raise appropriate exceptions in case of failure or in-progress status."""
incomplete_report_infos = self._incomplete_report_infos(report_infos)
for report_info in incomplete_report_infos:
report_status, report_url = self._verify_report_status(report_info, stream_slice)
report_info.report_status = report_status
if report_status in {ReportStatus.DOES_NOT_EXIST, ReportStatus.EXPIRED, ReportStatus.FAILED, ReportStatus.CANCELLED}:
message = "Report generation failed."
raise ReportGenerationFailure(message)
elif report_status == ReportStatus.FINISHED:
try:
report_info.metrics = self.METHOD_NAME(report_url)
except requests.HTTPError as error:
raise ReportGenerationFailure(error)
pending_report_status = [report_info for report_info in report_infos if report_info.report_status != ReportStatus.FINISHED]
if len(pending_report_status) > 0:
message = "Report generation in progress."
raise ReportGenerationInProgress(message)
def _incomplete_report_infos(self, report_infos):
"""Return the report infos which are not yet finished."""
return [r for r in report_infos if r.report_status != ReportStatus.FINISHED]
def parse_response(self, response: requests.Response, stream_state: Mapping[str, Any], **kwargs) -> Iterable[Mapping]:
"""Parse the API response."""
yield response.json()
@backoff_max_tries
def _init_reports(self, init_reports) -> List[ReportInfo]:
"""Initialize the reports and return them as a list."""
report_infos = []
for init_report in init_reports:
status = ReportInfo.parse_raw(json.dumps(init_report))
report_infos.append(
ReportInfo(
token=status.token,
report_status=ReportStatus.IN_PROGRESS,
metrics=[],
)
)
self.logger.info("Initiated successfully.")
return report_infos
def _http_get(self, url, params=None, headers=None):
"""Make a GET request to the given URL and return the response as a JSON."""
response = self._session.get(url, params=params, headers=headers)
response.raise_for_status()
return response.json()
def _verify_report_status(self, report: dict, stream_slice: Mapping[str, Any]) -> tuple:
"""Verify the report status and return it along with the report URL."""
api_path = self._build_api_path(stream_slice["parent"]["id"])
response_data = self._http_get(
urljoin(self.url_base, api_path), params={"token": report.token}, headers=self.authenticator.get_auth_header()
)
try:
report_status = ReportStatusDetails.parse_raw(json.dumps(response_data))
except ValueError as error:
raise ReportStatusError(error)
return report_status.report_status, report_status.url
def METHOD_NAME(self, url: str) -> dict:
"""Fetch the report data from the given URL."""
return self._http_get(url)
class CampaignAnalyticsReport(PinterestAnalyticsReportStream):
@property
def level(self):
return "CAMPAIGN" |
5,382 | lap pause | import json
import os
import rclpy
import cv2
import sys
import base64
import threading
import time
import numpy as np
from datetime import datetime
from websocket_server import WebsocketServer
import multiprocessing
import logging
from interfaces.pose3d import ListenerPose3d
from shared.image import SharedImage
from shared.image import SharedImage
from shared.value import SharedValue
from lap import Lap
from map import Map
# Graphical User Interface Class
class GUI:
# Initialization function
# The actual initialization
def __init__(self, host, circuit):
rclpy.init()
rclpy.create_node('GUI')
self.payload = {'image': '','lap': '', 'map': '', 'v':'','w':''}
self.server = None
self.client = None
self.host = host
# Circuit
self.circuit = circuit
# Image variable host
self.shared_image = SharedImage("guiimage")
# Get HAL variables
self.shared_v = SharedValue("velocity")
self.shared_w = SharedValue("angular")
# Create the lap object
pose3d_object = ListenerPose3d("/odom")
self.lap = Lap(pose3d_object)
self.map = Map(pose3d_object, self.circuit)
# Event objects for multiprocessing
self.ack_event = multiprocessing.Event()
self.cli_event = multiprocessing.Event()
# Start server thread
t = threading.Thread(target=self.run_server)
t.start()
# Function to prepare image payload
# Encodes the image as a JSON string and sends through the WS
def payloadImage(self):
image = self.shared_image.get()
payload = {'image': '', 'shape': ''}
shape = image.shape
frame = cv2.imencode('.JPEG', image)[1]
encoded_image = base64.b64encode(frame)
payload['image'] = encoded_image.decode('utf-8')
payload['shape'] = shape
return payload
# Function for student to call
def showImage(self, image):
self.image_show_lock.acquire()
self.image_to_be_shown = image
self.image_to_be_shown_updated = True
self.image_show_lock.release()
# Function to get the client
# Called when a new client is received
def get_client(self, client, server):
self.client = client
self.cli_event.set()
print(client, 'connected')
# Update the gui
def update_gui(self):
# Payload Image Message
payload = self.payloadImage()
self.payload["image"] = json.dumps(payload)
# Payload Lap Message
lapped = self.lap.check_threshold()
self.payload["lap"] = ""
if(lapped != None):
self.payload["lap"] = str(lapped)
# Payload Map Message
pos_message = str(self.map.getFormulaCoordinates())
self.payload["map"] = pos_message
# Payload V Message
v_message = str(self.shared_v.get())
self.payload["v"] = v_message
# Payload W Message
w_message = str(self.shared_w.get())
self.payload["w"] = w_message
message = "#gui" + json.dumps(self.payload)
self.server.send_message(self.client, message)
# Function to read the message from websocket
# Gets called when there is an incoming message from the client
def get_message(self, client, server, message):
# Acknowledge Message for GUI Thread
if(message[:4] == "#ack"):
# Set acknowledgement flag
self.ack_event.set()
# Pause message
elif(message[:5] == "#paus"):
self.lap.pause()
# Unpause message
elif(message[:5] == "#resu"):
self.lap.unpause()
# Reset message
elif(message[:5] == "#rest"):
self.reset_gui()
# Function that gets called when the connected closes
def handle_close(self, client, server):
print(client, 'closed')
# Activate the server
def run_server(self):
self.server = WebsocketServer(port=2303, host=self.host)
self.server.set_fn_new_client(self.get_client)
self.server.set_fn_message_received(self.get_message)
self.server.set_fn_client_left(self.handle_close)
home_dir = os.path.expanduser('~')
logged = False
while not logged:
try:
f = open(f"{home_dir}/ws_gui.log", "w")
f.write("websocket_gui=ready")
f.close()
logged = True
except:
time.sleep(0.1)
self.server.run_forever()
# Function to reset
def reset_gui(self):
self.lap.reset()
self.map.reset()
# This class decouples the user thread
# and the GUI update thread
class ProcessGUI(multiprocessing.Process):
def __init__(self):
super(ProcessGUI, self).__init__()
self.host = sys.argv[1]
# Circuit
self.circuit = sys.argv[2]
# Time variables
self.time_cycle = SharedValue("gui_time_cycle")
self.ideal_cycle = SharedValue("gui_ideal_cycle")
self.iteration_counter = 0
# Function to initialize events
def initialize_events(self):
# Events
self.ack_event = self.gui.ack_event
self.cli_event = self.gui.cli_event
self.exit_signal = multiprocessing.Event()
# Function to start the execution of threads
def run(self):
# Initialize GUI
self.gui = GUI(self.host, self.circuit)
self.initialize_events()
# Wait for client before starting
self.cli_event.wait()
self.measure_thread = threading.Thread(target=self.measure_thread)
self.thread = threading.Thread(target=self.run_gui)
self.measure_thread.start()
self.thread.start()
print("GUI Process Started!")
self.exit_signal.wait()
# The measuring thread to measure frequency
def measure_thread(self):
previous_time = datetime.now()
while(True):
# Sleep for 2 seconds
time.sleep(2)
# Measure the current time and subtract from previous time to get real time interval
current_time = datetime.now()
dt = current_time - previous_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
previous_time = current_time
# Get the time period
try:
# Division by zero
self.ideal_cycle.add(ms / self.iteration_counter)
except:
self.ideal_cycle.add(0)
# Reset the counter
self.iteration_counter = 0
# The main thread of execution
def run_gui(self):
while(True):
start_time = datetime.now()
# Send update signal
self.gui.update_gui()
# Wait for acknowldege signal
self.ack_event.wait()
self.ack_event.clear()
finish_time = datetime.now()
self.iteration_counter = self.iteration_counter + 1
dt = finish_time - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
time_cycle = self.time_cycle.get()
if(ms < time_cycle):
time.sleep((time_cycle-ms) / 1000.0)
self.exit_signal.set()
# Functions to handle auxillary GUI functions
def reset_gui():
self.gui.reset_gui()
def METHOD_NAME():
self.gui.lap.pause()
def lap_unpause():
self.gui.lap.unpause()
if __name__ == "__main__":
gui = ProcessGUI()
gui.start() |
5,383 | test prediction proba linear | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import os
import sys
import unittest
# noinspection PyProtectedMember
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from sklearn.base import clone
from sklearn.metrics import roc_auc_score
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pyod.models.lmdd import LMDD
from pyod.utils.data import generate_data
class TestCOF(unittest.TestCase):
def setUp(self):
self.n_train = 100
self.n_test = 50
self.contamination = 0.1
self.roc_floor = 0.6
self.X_train, self.X_test, self.y_train, self.y_test = generate_data(
n_train=self.n_train, n_test=self.n_test,
contamination=self.contamination, random_state=42)
self.clf = LMDD(contamination=self.contamination, random_state=42)
self.clf.fit(self.X_train)
def test_sklearn_estimator(self):
# check_estimator(self.clf)
pass
def test_parameters(self):
assert (hasattr(self.clf, 'decision_scores_') and
self.clf.decision_scores_ is not None)
assert (hasattr(self.clf, 'labels_') and
self.clf.labels_ is not None)
assert (hasattr(self.clf, 'threshold_') and
self.clf.threshold_ is not None)
assert (hasattr(self.clf, 'dis_measure_') and
self.clf.dis_measure_ is not None)
assert (hasattr(self.clf, 'n_iter_') and
self.clf.n_iter_ is not None)
assert (hasattr(self.clf, 'random_state_') and
self.clf.random_state_ is not None)
def test_train_scores(self):
assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])
def test_prediction_scores(self):
pred_scores = self.clf.decision_function(self.X_test)
# check score shapes
assert_equal(pred_scores.shape[0], self.X_test.shape[0])
# check performance
assert (roc_auc_score(self.y_test, pred_scores) >= self.roc_floor)
def test_prediction_labels(self):
pred_labels = self.clf.predict(self.X_test)
assert_equal(pred_labels.shape, self.y_test.shape)
def test_prediction_proba(self):
pred_proba = self.clf.predict_proba(self.X_test)
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
def METHOD_NAME(self):
pred_proba = self.clf.predict_proba(self.X_test, method='linear')
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
def test_prediction_proba_unify(self):
pred_proba = self.clf.predict_proba(self.X_test, method='unify')
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
def test_prediction_proba_parameter(self):
with assert_raises(ValueError):
self.clf.predict_proba(self.X_test, method='something')
def test_prediction_labels_confidence(self):
pred_labels, confidence = self.clf.predict(self.X_test,
return_confidence=True)
assert_equal(pred_labels.shape, self.y_test.shape)
assert_equal(confidence.shape, self.y_test.shape)
assert (confidence.min() >= 0)
assert (confidence.max() <= 1)
def test_prediction_proba_linear_confidence(self):
pred_proba, confidence = self.clf.predict_proba(self.X_test,
method='linear',
return_confidence=True)
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
assert_equal(confidence.shape, self.y_test.shape)
assert (confidence.min() >= 0)
assert (confidence.max() <= 1)
def test_fit_predict(self):
pred_labels = self.clf.fit_predict(self.X_train)
assert_equal(pred_labels.shape, self.y_train.shape)
def test_fit_predict_score(self):
self.clf.fit_predict_score(self.X_test, self.y_test)
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='roc_auc_score')
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='prc_n_score')
with assert_raises(NotImplementedError):
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='something')
def test_check_parameters(self):
with assert_raises(ValueError):
LMDD(contamination=10.)
with assert_raises(ValueError):
LMDD(dis_measure='unknown')
with assert_raises(TypeError):
LMDD(dis_measure=5)
with assert_raises(TypeError):
LMDD(n_iter='not int')
with assert_raises(ValueError):
LMDD(n_iter=-1)
with assert_raises(ValueError):
LMDD(random_state='not valid')
with assert_raises(ValueError):
LMDD(random_state=-1)
def test_model_clone(self):
clone_clf = clone(self.clf)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main() |
5,384 | wrapper | def x():
return
#? None
x()
def array(first_param):
#? ['first_param']
first_param
return list()
#? []
array.first_param
#? []
array.first_param.
func = array
#? []
func.first_param
#? list()
array()
#? ['array']
arr
def inputs(param):
return param
#? list
inputs(list)
def variable_middle():
var = 3
return var
#? int()
variable_middle()
def variable_rename(param):
var = param
return var
#? int()
variable_rename(1)
def multi_line_func(a, # comment blabla
b):
return b
#? str()
multi_line_func(1,'')
def multi_line_call(b):
return b
multi_line_call(
#? int()
b=1)
# nothing after comma
def asdf(a):
return a
x = asdf(a=1,
)
#? int()
x
# -----------------
# double execution
# -----------------
def double_exe(param):
return param
#? str()
variable_rename(double_exe)("")
# -> shouldn't work (and throw no error)
#? []
variable_rename(list())().
#? []
variable_rename(1)().
# -----------------
# recursions (should ignore)
# -----------------
def recursion(a, b):
if a:
return b
else:
return recursion(a+".", b+1)
# Does not also return int anymore, because we now support operators in simple cases.
#? float()
recursion("a", 1.0)
def other(a):
return recursion2(a)
def recursion2(a):
if random.choice([0, 1]):
return other(a)
else:
if random.choice([0, 1]):
return recursion2("")
else:
return a
#? int() str()
recursion2(1)
# -----------------
# ordering
# -----------------
def a():
#? int()
b()
return b()
def b():
return 1
#? int()
a()
# -----------------
# keyword arguments
# -----------------
def func(a=1, b=''):
return a, b
exe = func(b=list, a=tuple)
#? tuple
exe[0]
#? list
exe[1]
# -----------------
# default arguments
# -----------------
#? int()
func()[0]
#? str()
func()[1]
#? float()
func(1.0)[0]
#? str()
func(1.0)[1]
#? float()
func(a=1.0)[0]
#? str()
func(a=1.0)[1]
#? int()
func(b=1.0)[0]
#? float()
func(b=1.0)[1]
#? list
func(a=list, b=set)[0]
#? set
func(a=list, b=set)[1]
def func_default(a, b=1):
return a, b
def nested_default(**kwargs):
return func_default(**kwargs)
#? float()
nested_default(a=1.0)[0]
#? int()
nested_default(a=1.0)[1]
#? str()
nested_default(a=1.0, b='')[1]
# Defaults should only work if they are defined before - not after.
def default_function(a=default):
#?
return a
#?
default_function()
default = int()
def default_function(a=default):
#? int()
return a
#? int()
default_function()
def default(a=default):
#? int()
a
# -----------------
# closures
# -----------------
def a():
l = 3
def func_b():
l = ''
#? str()
l
#? ['func_b']
func_b
#? int()
l
# -----------------
# *args
# -----------------
def args_func(*args):
#? tuple()
return args
exe = args_func(1, "")
#? int()
exe[0]
#? str()
exe[1]
# illegal args (TypeError)
#?
args_func(*1)[0]
# iterator
#? int()
args_func(*iter([1]))[0]
# different types
e = args_func(*[1 if UNDEFINED else "", {}])
#? int() str()
e[0]
#? dict()
e[1]
_list = [1,""]
exe2 = args_func(_list)[0]
#? str()
exe2[1]
exe3 = args_func([1,""])[0]
#? str()
exe3[1]
def args_func(arg1, *args):
return arg1, args
exe = args_func(1, "", list)
#? int()
exe[0]
#? tuple()
exe[1]
#? list
exe[1][1]
# In a dynamic search, both inputs should be given.
def simple(a):
#? int() str()
return a
def xargs(*args):
return simple(*args)
xargs(1)
xargs('')
# *args without a self symbol
def memoize(func):
def METHOD_NAME(*args, **kwargs):
return func(*args, **kwargs)
return METHOD_NAME
class Something():
@memoize
def x(self, a, b=1):
return a
#? int()
Something().x(1)
# -----------------
# ** kwargs
# -----------------
def kwargs_func(**kwargs):
#? ['keys']
kwargs.keys
#? dict()
return kwargs
exe = kwargs_func(a=3,b=4.0)
#? dict()
exe
#? int()
exe['a']
#? float()
exe['b']
#? int() float()
exe['c']
a = 'a'
exe2 = kwargs_func(**{a:3,
'b':4.0})
#? int()
exe2['a']
#? float()
exe2['b']
#? int() float()
exe2['c']
exe3 = kwargs_func(**{k: v for k, v in [(a, 3), ('b', 4.0)]})
# Should resolve to the same as 2 but jedi is not smart enough yet
# Here to make sure it doesn't result in crash though
#?
exe3['a']
#?
exe3['b']
#?
exe3['c']
# -----------------
# *args / ** kwargs
# -----------------
def func_without_call(*args, **kwargs):
#? tuple()
args
#? dict()
kwargs
def fu(a=1, b="", *args, **kwargs):
return a, b, args, kwargs
exe = fu(list, 1, "", c=set, d="")
#? list
exe[0]
#? int()
exe[1]
#? tuple()
exe[2]
#? str()
exe[2][0]
#? dict()
exe[3]
#? set
exe[3]['c']
def kwargs_iteration(**kwargs):
return kwargs
for x in kwargs_iteration(d=3):
#? float()
{'d': 1.0, 'c': '1'}[x]
# -----------------
# nested *args
# -----------------
def function_args(a, b, c):
return b
def nested_args(*args):
return function_args(*args)
def nested_args2(*args, **kwargs):
return nested_args(*args)
#? int()
nested_args('', 1, 1.0, list)
#? []
nested_args('').
#? int()
nested_args2('', 1, 1.0)
#? []
nested_args2('').
# -----------------
# nested **kwargs
# -----------------
def nested_kw(**kwargs1):
return function_args(**kwargs1)
def nested_kw2(**kwargs2):
return nested_kw(**kwargs2)
# invalid command, doesn't need to return anything
#?
nested_kw(b=1, c=1.0, list)
#? int()
nested_kw(b=1)
# invalid command, doesn't need to return anything
#?
nested_kw(d=1.0, b=1, list)
#? int()
nested_kw(a=3.0, b=1)
#? int()
nested_kw(b=1, a=r"")
#? []
nested_kw(1, '').
#? []
nested_kw(a='').
#? int()
nested_kw2(b=1)
#? int()
nested_kw2(b=1, c=1.0)
#? int()
nested_kw2(c=1.0, b=1)
#? []
nested_kw2('').
#? []
nested_kw2(a='').
#? []
nested_kw2('', b=1).
# -----------------
# nested *args/**kwargs
# -----------------
def nested_both(*args, **kwargs):
return function_args(*args, **kwargs)
def nested_both2(*args, **kwargs):
return nested_both(*args, **kwargs)
# invalid commands, may return whatever.
#? list
nested_both('', b=1, c=1.0, list)
#? list
nested_both('', c=1.0, b=1, list)
#? []
nested_both('').
#? int()
nested_both2('', b=1, c=1.0)
#? int()
nested_both2('', c=1.0, b=1)
#? []
nested_both2('').
# -----------------
# nested *args/**kwargs with a default arg
# -----------------
def function_def(a, b, c):
return a, b
def nested_def(a, *args, **kwargs):
return function_def(a, *args, **kwargs)
def nested_def2(*args, **kwargs):
return nested_def(*args, **kwargs)
#? str()
nested_def2('', 1, 1.0)[0]
#? str()
nested_def2('', b=1, c=1.0)[0]
#? str()
nested_def2('', c=1.0, b=1)[0]
#? int()
nested_def2('', 1, 1.0)[1]
#? int()
nested_def2('', b=1, c=1.0)[1]
#? int()
nested_def2('', c=1.0, b=1)[1]
#? []
nested_def2('')[1].
# -----------------
# magic methods
# -----------------
def a(): pass
#? ['__closure__']
a.__closure__ |
5,385 | get value rows | import math
import functools
import collections
import statistics
from visidata import vd, Progress, Column, vlen
from visidata import *
vd.option('null_value', None, 'a value to be counted as null', replay=True)
@Column.api
def METHOD_NAME(self, rows):
'Generate (value, row) for each row in *rows* at this column, excluding null and error values.'
f = self.sheet.isNullFunc()
for r in Progress(rows, 'calculating'):
try:
v = self.getTypedValue(r)
if not f(v):
yield v, r
except Exception:
pass
@Column.api
def getValues(self, rows):
'Generate value for each row in *rows* at this column, excluding null and error values.'
for v, r in self.METHOD_NAME(rows):
yield v
vd.aggregators = collections.OrderedDict() # [aggname] -> annotated func, or list of same
Column.init('aggstr', str, copy=True)
def aggregators_get(col):
'A space-separated names of aggregators on this column.'
return list(vd.aggregators[k] for k in (col.aggstr or '').split())
def aggregators_set(col, aggs):
if isinstance(aggs, str):
newaggs = []
for agg in aggs.split():
if agg not in vd.aggregators:
vd.fail(f'unknown aggregator {agg}')
newaggs.append(agg)
elif aggs is None:
newaggs = ''
else:
newaggs = [agg.name for agg in aggs]
col.aggstr = ' '.join(newaggs)
Column.aggregators = property(aggregators_get, aggregators_set)
class Aggregator:
def __init__(self, name, type, func, helpstr='foo'):
'Define aggregator `name` that calls func(col, rows)'
self.type = type
self.func = func
self.helpstr = helpstr
self.name = name
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
_defaggr = Aggregator
@VisiData.api
def aggregator(vd, name, func, helpstr='', *args, type=None):
'Define simple aggregator *name* that calls ``func(values, *args)`` to aggregate *values*. Use *type* to force the default type of the aggregated column.'
def _func(col, rows): # wrap builtins so they can have a .type
vals = list(col.getValues(rows))
try:
return func(vals, *args)
except Exception as e:
if len(vals) == 0:
return None
return e
vd.aggregators[name] = _defaggr(name, type, _func, helpstr)
vd.addGlobals({name: func})
## specific aggregator implementations
def mean(vals):
vals = list(vals)
if vals:
return float(sum(vals))/len(vals)
def vsum(vals):
return sum(vals, start=type(vals[0] if len(vals) else 0)()) #1996
# http://code.activestate.com/recipes/511478-finding-the-percentile-of-the-values/
def _percentile(N, percent, key=lambda x:x):
"""
Find the percentile of a list of values.
@parameter N - is a list of values. Note N MUST BE already sorted.
@parameter percent - a float value from 0.0 to 1.0.
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values
"""
if not N:
return None
k = (len(N)-1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c-k)
d1 = key(N[int(c)]) * (k-f)
return d0+d1
@functools.lru_cache(100)
def percentile(pct, helpstr=''):
return _defaggr('p%s'%pct, None, lambda col,rows,pct=pct: _percentile(sorted(col.getValues(rows)), pct/100), helpstr)
def quantiles(q, helpstr):
return [percentile(round(100*i/q), helpstr) for i in range(1, q)]
vd.aggregator('min', min, 'minimum value')
vd.aggregator('max', max, 'maximum value')
vd.aggregator('avg', mean, 'arithmetic mean of values', type=float)
vd.aggregator('mean', mean, 'arithmetic mean of values', type=float)
vd.aggregator('median', statistics.median, 'median of values')
vd.aggregator('mode', statistics.mode, 'mode of values')
vd.aggregator('sum', vsum, 'sum of values')
vd.aggregator('distinct', set, 'distinct values', type=vlen)
vd.aggregator('count', lambda values: sum(1 for v in values), 'number of values', type=int)
vd.aggregator('list', list, 'list of values')
vd.aggregator('stdev', statistics.stdev, 'standard deviation of values', type=float)
vd.aggregators['q3'] = quantiles(3, 'tertiles (33/66th pctile)')
vd.aggregators['q4'] = quantiles(4, 'quartiles (25/50/75th pctile)')
vd.aggregators['q5'] = quantiles(5, 'quintiles (20/40/60/80th pctiles)')
vd.aggregators['q10'] = quantiles(10, 'deciles (10/20/30/40/50/60/70/80/90th pctiles)')
# since bb29b6e, a record of every aggregator
# is needed in vd.aggregators
for pct in (10, 20, 25, 30, 33, 40, 50, 60, 67, 70, 75, 80, 90, 95, 99):
vd.aggregators[f'p{pct}'] = percentile(pct, f'{pct}th percentile')
# returns keys of the row with the max value
vd.aggregators['keymax'] = _defaggr('keymax', anytype, lambda col, rows: col.sheet.rowkey(max(col.METHOD_NAME(rows))[1]), 'key of the maximum value')
ColumnsSheet.columns += [
Column('aggregators',
getter=lambda c,r:r.aggstr,
setter=lambda c,r,v:setattr(r, 'aggregators', v),
help='change the metrics calculated in every Frequency or Pivot derived from the source sheet')
]
@Sheet.api
def addAggregators(sheet, cols, aggrnames):
'Add each aggregator in list of *aggrnames* to each of *cols*.'
for aggrname in aggrnames:
aggrs = vd.aggregators.get(aggrname)
aggrs = aggrs if isinstance(aggrs, list) else [aggrs]
for aggr in aggrs:
for c in cols:
if not hasattr(c, 'aggregators'):
c.aggregators = []
if aggr and aggr not in c.aggregators:
c.aggregators += [aggr]
@Column.api
def aggname(col, agg):
'Consistent formatting of the name of given aggregator for this column. e.g. "col1_sum"'
return '%s_%s' % (col.name, agg.name)
@Column.api
@asyncthread
def memo_aggregate(col, agg, rows):
'Show aggregated value in status, and add to memory.'
aggval = agg(col, rows)
typedval = wrapply(agg.type or col.type, aggval)
dispval = col.format(typedval)
k = col.name+'_'+agg.name
vd.status(f'{k}={dispval}')
vd.memory[k] = typedval
@VisiData.property
def aggregator_choices(vd):
return [
{'key': agg, 'desc': v[0].helpstr if isinstance(v, list) else v.helpstr} for agg, v in vd.aggregators.items()
]
Sheet.addCommand('+', 'aggregate-col', 'addAggregators([cursorCol], chooseMany(aggregator_choices))', 'Add aggregator to current column')
Sheet.addCommand('z+', 'memo-aggregate', 'for agg in chooseMany(aggregator_choices): cursorCol.memo_aggregate(aggregators[agg], selectedRows or rows)', 'memo result of aggregator over values in selected rows for current column')
ColumnsSheet.addCommand('g+', 'aggregate-cols', 'addAggregators(selectedRows or source[0].nonKeyVisibleCols, chooseMany(aggregator_choices))', 'add aggregators to selected source columns')
vd.addMenuItems('''
Column > Add aggregator > aggregate-col
''') |
5,386 | run | # -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2023, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
""" Thoroughly document Bokeh property attributes.
The ``bokeh-prop`` directive generates documentation for Bokeh model properties,
including cross links to the relevant property types. Additionally, any
per-attribute help strings are also displayed.
This directive takes the name *(class.attr)* of a Bokeh property as its
argument and the module as an option:
.. code-block:: rest
.. bokeh-prop:: Bar.thing
:module: bokeh.sphinxext.sample
Examples
--------
For the following definition of ``bokeh.sphinxext.sample.Bar``:
.. code-block:: python
class Bar(Model):
''' This is a Bar model. '''
thing = List(Int, help="doc for thing")
the above usage yields the output:
.. bokeh-prop:: Bar.thing
:module: bokeh.sphinxext.sample
The ``bokeh-prop`` direction may be used explicitly, but it can also be used
in conjunction with the :ref:`bokeh.sphinxext.bokeh_autodoc` extension.
"""
# -----------------------------------------------------------------------------
# Boilerplate
# -----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
# Standard library imports
import importlib
import textwrap
import warnings
# External imports
from docutils.parsers.rst.directives import unchanged
from sphinx.errors import SphinxError
# Bokeh imports
from bokeh.core.property._sphinx import type_link
from bokeh.util.warnings import BokehDeprecationWarning
# Bokeh imports
from . import PARALLEL_SAFE
from .bokeh_directive import BokehDirective
from .templates import PROP_DETAIL
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
__all__ = (
"BokehPropDirective",
"setup",
)
# -----------------------------------------------------------------------------
# General API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Dev API
# -----------------------------------------------------------------------------
class BokehPropDirective(BokehDirective):
has_content = True
required_arguments = 1
optional_arguments = 2
option_spec = {"module": unchanged, "type": unchanged}
def METHOD_NAME(self):
full_name = self.arguments[0]
model_name, prop_name = full_name.rsplit(".")
module_name = self.options["module"]
try:
module = importlib.import_module(module_name)
except ImportError:
raise SphinxError(f"Could not generate reference docs for {full_name}: could not import module {module_name}")
model = getattr(module, model_name, None)
if model is None:
raise SphinxError(f"Unable to generate reference docs for {full_name}: no model {model_name} in module {module_name}")
# We may need to instantiate deprecated objects as part of documenting
# them in the reference guide. Suppress any warnings here to keep the
# docs build clean just for this case
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=BokehDeprecationWarning)
model_obj = model()
try:
descriptor = model_obj.lookup(prop_name)
except AttributeError:
raise SphinxError(f"Unable to generate reference docs for {full_name}: no property {prop_name} on model {model_name}")
rst_text = PROP_DETAIL.render(
name=prop_name,
module=self.options["module"],
default=repr(descriptor.instance_default(model_obj)),
type_info=type_link(descriptor.property),
doc="" if descriptor.__doc__ is None else textwrap.dedent(descriptor.__doc__),
)
return self.parse(rst_text, "<bokeh-prop>")
def setup(app):
""" Required Sphinx extension setup function. """
app.add_directive_to_domain("py", "bokeh-prop", BokehPropDirective)
return PARALLEL_SAFE
# -----------------------------------------------------------------------------
# Private API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Code
# ----------------------------------------------------------------------------- |
5,387 | get insecure proxy | # Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This template creates HTTP(S) and TCP/SSL proxy resources. """
import copy
HTTP_BASE = True
TCP_BASE = False
def set_optional_property(destination, source, prop_name):
""" Copies the property value if present. """
if prop_name in source:
destination[prop_name] = source[prop_name]
def get_certificate(properties, project_id, res_name):
"""
Gets a link to an existing or newly created SSL Certificate
resource.
"""
if 'url' in properties:
return properties['url'], [], []
name = '{}-ssl-cert'.format(res_name)
resource = {
'name': name,
'type': 'ssl_certificate.py',
'properties': copy.copy(properties)
}
resource['properties']['name'] = properties.get('name', name)
resource['properties']['project'] = project_id
self_link = '$(ref.{}.selfLink)'.format(name)
outputs = [
{
'name': 'certificateName',
'value': '$(ref.{}.name)'.format(name)
},
{
'name': 'certificateSelfLink',
'value': self_link
}
]
return self_link, [resource], outputs
def METHOD_NAME(is_http, res_name, project_id, properties, optional_properties):
""" Creates a TCP or HTTP Proxy resource. """
if is_http:
# https://cloud.google.com/compute/docs/reference/rest/v1/targetHttpProxies
type_name = 'gcp-types/compute-v1:targetHttpProxies'
target_prop = 'urlMap'
else:
# https://cloud.google.com/compute/docs/reference/rest/v1/targetTcpProxies
type_name = 'gcp-types/compute-v1:targetTcpProxies'
target_prop = 'service'
resource_props = {
'name': properties.get('name', res_name),
'project': project_id,
}
resource = {'type': type_name, 'name': res_name, 'properties': resource_props}
resource_props[target_prop] = properties['target']
for prop in optional_properties:
set_optional_property(resource_props, properties, prop)
return [resource], []
def get_secure_proxy(is_http, res_name, project_id, properties, optional_properties):
""" Creates an SSL or HTTPS Proxy resource. """
if is_http:
create_base_proxy = get_http_proxy
# https://cloud.google.com/compute/docs/reference/rest/v1/targetHttpsProxies
target_type = 'gcp-types/compute-v1:targetHttpsProxies'
else:
create_base_proxy = get_tcp_proxy
# https://cloud.google.com/compute/docs/reference/rest/v1/targetSslProxies
target_type = 'gcp-types/compute-v1:targetSslProxies'
# Base proxy settings:
resources, outputs = create_base_proxy(properties, res_name, project_id)
resource = resources[0]
resource['type'] = target_type
resource_prop = resource['properties']
for prop in optional_properties:
set_optional_property(resource_prop, properties, prop)
# SSL settings:
ssl_resources = []
ssl_outputs = []
if 'sslCertificates' not in properties.get('ssl', []):
ssl = properties['ssl']
url, ssl_resources, ssl_outputs = get_certificate(ssl['certificate'], project_id, res_name)
resource_prop['sslCertificates'] = [url]
set_optional_property(resource_prop, ssl, 'sslPolicy')
if 'sslCertificates' in properties.get('ssl', []):
set_optional_property(resource_prop, properties['ssl'], 'sslCertificates')
return resources + ssl_resources, outputs + ssl_outputs
def get_http_proxy(properties, res_name, project_id):
""" Creates the HTTP Proxy resource. """
return METHOD_NAME(HTTP_BASE, res_name, project_id, properties, ['description'])
def get_tcp_proxy(properties, res_name, project_id):
""" Creates the TCP Proxy resource. """
optional_properties = ['description', 'proxyHeader']
return METHOD_NAME(TCP_BASE, res_name, project_id, properties, optional_properties)
def get_https_proxy(properties, res_name, project_id):
""" Creates the HTTPS Proxy resource. """
return get_secure_proxy(HTTP_BASE, res_name, project_id, properties, ['quicOverride'])
def get_ssl_proxy(properties, res_name, project_id):
""" Creates the SSL Proxy resource. """
return get_secure_proxy(TCP_BASE, res_name, project_id, properties, [])
def generate_config(context):
""" Entry point for the deployment resources. """
properties = context.properties
name = properties.get('name', context.env['name'])
project_id = properties.get('project', context.env['project'])
protocol = properties['protocol']
if protocol == 'SSL':
resources, outputs = get_ssl_proxy(properties, context.env['name'], project_id)
elif protocol == 'TCP':
resources, outputs = get_tcp_proxy(properties, context.env['name'], project_id)
elif protocol == 'HTTPS':
resources, outputs = get_https_proxy(properties, context.env['name'], project_id)
else:
resources, outputs = get_http_proxy(properties, context.env['name'], project_id)
return {
'resources':
resources,
'outputs':
outputs + [
{
'name': 'name',
'value': name
},
{
'name': 'selfLink',
'value': '$(ref.{}.selfLink)'.format(context.env['name'])
},
{
'name': 'kind',
'value': '$(ref.{}.kind)'.format(context.env['name'])
},
]
} |
5,388 | pass result | import noodles
from noodles.workflow import (get_workflow)
from noodles.run.coroutines import (Connection, coroutine_sink)
from noodles.run.experimental import (logging_worker)
from noodles.display import SimpleDisplay
import subprocess
import sys
import argparse
import json
import time
import shlex
import os
logFolder = None
class Job:
def __init__(self, task, exclude, state, job, key):
self.task = task
self.exclude = exclude
self.state = state
self.job = job
self.key = key
def dynamic_exclusion_worker(display, n_threads):
"""This worker allows mutualy exclusive jobs to start safely. The
user provides the information on which jobs exclude the simultaneous
execution of other jobs::
a = task()
b = task()
update_hints(a, {'id': '1', 'exclude': ['2']})
update_hints(b, {'id': '2', 'exclude': ['1']})
run(gather(a, b))
Using this worker, when task ``a`` is sent to the underlying worker,
task ``b`` is blocked until ``a`` completes, and vice versa.
"""
result_source, job_sink = logging_worker(n_threads, display).setup()
jobs = {}
key_task = {}
@coroutine_sink
def pass_job():
"""The scheduler sends jobs to this coroutine. If the 'exclude' key
is found in the hints, it is run in exclusive mode. We keep an internal
record of these jobs, and whether they are 'waiting', 'running' or 'done'.
"""
while True:
key, job = yield
if job.hints and 'exclude' in job.hints:
j = Job(task=job.hints['id'],
exclude=job.hints['exclude'],
state='waiting',
job=job,
key=key)
jobs[j.task] = j
key_task[key] = j.task
try_to_start(j.task)
else:
job_sink.send((key, job))
def is_not_running(task):
"""Checks if a task is not running."""
return not (task in jobs and jobs[task].state == 'running')
def try_to_start(task):
"""Try to start a task. This only succeeds if the task hasn't already
run, and no jobs are currently running that is excluded by the task."""
if jobs[task].state != 'waiting':
return
if all(is_not_running(i) for i in jobs[task].exclude):
jobs[task].state = 'running'
key, job = jobs[task].key, jobs[task].job
job_sink.send((key, job))
def finish(key):
"""Finish a job. This function is called when we recieve a result."""
task = key_task[key]
jobs[task].state = 'done'
for i in jobs[task].exclude:
try_to_start(i)
def METHOD_NAME():
"""Recieve a result; finish the task in the register and send the result
back to the scheduler."""
for key, status, result, err in result_source:
if key in key_task:
finish(key)
yield (key, status, result, err)
return Connection(METHOD_NAME, pass_job)
def run(wf, *, display, n_threads=1):
"""Run the workflow using the dynamic-exclusion worker."""
worker = dynamic_exclusion_worker(display, n_threads)
return noodles.Scheduler(error_handler=display.error_handler)\
.run(worker, get_workflow(wf))
@noodles.schedule_hint(display='{cmd}', confirm=True)
def system_command(cmd, task):
cmd_split = shlex.split(cmd) # list(shlex.shlex(cmd))
p = subprocess.run(
cmd_split, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True)
p.check_returncode()
oFile = open(os.path.join(logFolder, task),'w')
oFile.write(p.stdout)
oFile.close()
return p.stdout
def make_job(cmd, task_id, exclude):
j = system_command(cmd, task_id)
noodles.update_hints(j, {'id': str(task_id),
'exclude': [str(x) for x in exclude]})
return j
def error_filter(xcptn):
if isinstance(xcptn, subprocess.CalledProcessError):
return xcptn.stderr
else:
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="SOBA: Run a non-directional exclusion graph job.")
parser.add_argument(
'-j', dest='n_threads', type=int, default=1,
help='number of threads to run simultaneously.')
parser.add_argument(
'-dumb', default=False, action='store_true',
help='print info without special term codes.')
parser.add_argument(
'target', type=str,
help='a JSON file specifying the graph.')
parser.add_argument(
'log', type=str,
help='a log folder.')
args = parser.parse_args(sys.argv[1:])
logFolder = args.log
os.makedirs(logFolder)
input = json.load(open(args.target, 'r'))
jobs = [make_job(td['command'],
td['id'], td['exclude']) for td in input]
wf = noodles.gather(*jobs)
with SimpleDisplay(error_filter) as display:
run(wf, display=display, n_threads=args.n_threads) |
5,389 | set | import sys
from struct import pack
from . import consts
def align4_int(i: int):
return (4 - i) % 4
def align4_data(data):
return data + b"\x00" * align4_int(len(data))
class Norcow:
def __init__(self):
self.sectors = None
self.active_sector = 0
def init(self):
if self.sectors:
for sector in range(consts.NORCOW_SECTOR_COUNT):
if self.sectors[sector][:8] == consts.NORCOW_MAGIC_AND_VERSION:
self.active_sector = sector
self.active_offset = len(consts.NORCOW_MAGIC_AND_VERSION)
break
else:
self.wipe()
def wipe(self, sector: int = None):
if sector is None:
sector = self.active_sector
self.sectors = [
bytearray([0xFF] * consts.NORCOW_SECTOR_SIZE)
for _ in range(consts.NORCOW_SECTOR_COUNT)
]
self.sectors[sector][:8] = consts.NORCOW_MAGIC_AND_VERSION
self.active_sector = sector
self.active_offset = len(consts.NORCOW_MAGIC_AND_VERSION)
def get(self, key: int) -> bytes:
value, _ = self._find_item(key)
return value
def METHOD_NAME(self, key: int, val: bytes):
if key == consts.NORCOW_KEY_FREE:
raise RuntimeError("Norcow: key 0xFFFF is not allowed")
found_value, pos = self._find_item(key)
if found_value is not False:
if self._is_updatable(found_value, val):
self._write(pos, key, val)
return
else:
self._delete_old(pos, found_value)
if self.active_offset + 4 + len(val) > consts.NORCOW_SECTOR_SIZE:
self._compact()
self._append(key, val)
def delete(self, key: int):
if key == consts.NORCOW_KEY_FREE:
raise RuntimeError("Norcow: key 0xFFFF is not allowed")
found_value, pos = self._find_item(key)
if found_value is False:
return False
self._delete_old(pos, found_value)
return True
def replace(self, key: int, new_value: bytes) -> bool:
old_value, offset = self._find_item(key)
if not old_value:
raise RuntimeError("Norcow: key not found")
if len(old_value) != len(new_value):
raise RuntimeError(
"Norcow: replace works only with items of the same length"
)
self._write(offset, key, new_value)
def _is_updatable(self, old: bytes, new: bytes) -> bool:
"""
Item is updatable if the new value is the same or
it changes 1 to 0 only (the flash memory does not
allow to flip 0 to 1 unless you wipe it).
"""
if len(old) != len(new):
return False
if old == new:
return True
for a, b in zip(old, new):
if a & b != b:
return False
return True
def _delete_old(self, pos: int, value: bytes):
wiped_data = b"\x00" * len(value)
self._write(pos, 0x0000, wiped_data)
def _append(self, key: int, value: bytes):
self.active_offset += self._write(self.active_offset, key, value)
def _write(self, pos: int, key: int, new_value: bytes) -> int:
data = pack("<HH", key, len(new_value)) + align4_data(new_value)
if pos + len(data) > consts.NORCOW_SECTOR_SIZE:
raise RuntimeError("Norcow: item too big")
self.sectors[self.active_sector][pos : pos + len(data)] = data
return len(data)
def _find_item(self, key: int) -> (bytes, int):
offset = len(consts.NORCOW_MAGIC_AND_VERSION)
value = False
pos = offset
while True:
try:
k, v = self._read_item(offset)
if k == key:
value = v
pos = offset
except ValueError:
break
offset = offset + self._norcow_item_length(v)
return value, pos
def _get_all_keys(self) -> (bytes, int):
offset = len(consts.NORCOW_MAGIC_AND_VERSION)
keys = METHOD_NAME()
while True:
try:
k, v = self._read_item(offset)
keys.add(k)
except ValueError:
break
offset = offset + self._norcow_item_length(v)
return keys
def _norcow_item_length(self, data: bytes) -> int:
# APP_ID, KEY_ID, LENGTH, DATA, ALIGNMENT
return 1 + 1 + 2 + len(data) + align4_int(len(data))
def _read_item(self, offset: int) -> (int, bytes):
key = self.sectors[self.active_sector][offset : offset + 2]
key = int.from_bytes(key, sys.byteorder)
if key == consts.NORCOW_KEY_FREE:
raise ValueError("Norcow: no data on this offset")
length = self.sectors[self.active_sector][offset + 2 : offset + 4]
length = int.from_bytes(length, sys.byteorder)
value = self.sectors[self.active_sector][offset + 4 : offset + 4 + length]
return key, value
def _compact(self):
offset = len(consts.NORCOW_MAGIC_AND_VERSION)
data = list()
while True:
try:
k, v = self._read_item(offset)
if k != 0x00:
data.append((k, v))
except ValueError:
break
offset = offset + self._norcow_item_length(v)
sector = self.active_sector
self.wipe((sector + 1) % consts.NORCOW_SECTOR_COUNT)
for key, value in data:
self._append(key, value)
def _set_sectors(self, data):
if list(map(len, data)) != [
consts.NORCOW_SECTOR_SIZE,
consts.NORCOW_SECTOR_SIZE,
]:
raise RuntimeError("Norcow: set_sectors called with invalid data length")
self.sectors = [bytearray(sector) for sector in data]
def _dump(self):
return [bytes(sector) for sector in self.sectors] |
5,390 | record event tags management | from django.db import models
from django.contrib.auth.models import User
from django.dispatch import receiver
from zds.tutorialv2.models.database import PublishableContent
from zds.tutorialv2 import signals
from zds.tutorialv2.views.authors import AddAuthorToContent, RemoveAuthorFromContent
from zds.tutorialv2.views.beta import ManageBetaContent
from zds.tutorialv2.views.contributors import AddContributorToContent, RemoveContributorFromContent
from zds.tutorialv2.views.editorialization import EditContentTags, AddSuggestion, RemoveSuggestion
from zds.tutorialv2.views.goals import EditGoals
from zds.tutorialv2.views.labels import EditLabels
from zds.tutorialv2.views.help import ChangeHelp
from zds.tutorialv2.views.validations_contents import (
ReserveValidation,
AskValidationForContent,
CancelValidation,
RejectValidation,
AcceptValidation,
RevokeValidation,
ActivateJSFiddleInContent,
)
from zds.tutorialv2.views.validations_opinions import PublishOpinion, UnpublishOpinion
# Notes on addition/deletion/update of managed signals
#
# * Addition
# 1. Add a key in `types`.
# 2. Modify the template "events/description.part.html" so that it is displayed properly.
# 3. Add the appropriate receiver.
#
# * Deletion
# 1. Remove the key in `types` and the corresponding `@receiver`.
# This will make it impossible to record new events coming from this signal.
# 2. Do not modify the template, so that older events in the database keep being displayed properly.
#
# * Update
# If a type name was to be updated for some reason, two options are possible :
# - cleaner: update the production database to replace the old name with the new and also update the template
# - simpler: update the template so that it knows the new name as well as the old name.
# Map signals to event types
types = {
signals.authors_management: "authors_management",
signals.contributors_management: "contributors_management",
signals.beta_management: "beta_management",
signals.validation_management: "validation_management",
signals.tags_management: "tags_management",
signals.goals_management: "goals_management",
signals.labels_management: "labels_management",
signals.suggestions_management: "suggestions_management",
signals.help_management: "help_management",
signals.jsfiddle_management: "jsfiddle_management",
signals.opinions_management: "opinions_management",
}
class Event(models.Model):
class Meta:
verbose_name = "Événement sur un contenu"
verbose_name_plural = "Événements sur un contenu"
# Base fields
date = models.DateTimeField(auto_now_add=True)
performer = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
type = models.CharField(max_length=100)
content = models.ForeignKey(PublishableContent, on_delete=models.CASCADE)
action = models.CharField(max_length=100)
# Field used by author events
author = models.ForeignKey(User, related_name="event_author", on_delete=models.SET_NULL, null=True)
# Field used by contributor events
contributor = models.ForeignKey(User, related_name="event_contributor", on_delete=models.SET_NULL, null=True)
# Field used by beta and validation events
version = models.CharField(null=True, max_length=80)
# Event recorders
@receiver(signals.beta_management, sender=ManageBetaContent)
def record_event_beta_management(sender, performer, signal, content, version, action, **_):
Event(
performer=performer,
type=types[signal],
content=content,
version=version,
action=action,
).save()
@receiver(signals.authors_management, sender=AddAuthorToContent)
@receiver(signals.authors_management, sender=RemoveAuthorFromContent)
def record_event_author_management(sender, performer, signal, content, author, action, **_):
Event(
performer=performer,
type=types[signal],
content=content,
author=author,
action=action,
).save()
@receiver(signals.contributors_management, sender=AddContributorToContent)
@receiver(signals.contributors_management, sender=RemoveContributorFromContent)
def record_event_contributor_management(sender, performer, signal, content, contributor, action, **_):
Event(
performer=performer,
type=types[signal],
content=content,
contributor=contributor,
action=action,
).save()
@receiver(signals.validation_management, sender=AskValidationForContent)
@receiver(signals.validation_management, sender=CancelValidation)
@receiver(signals.validation_management, sender=AcceptValidation)
@receiver(signals.validation_management, sender=RejectValidation)
@receiver(signals.validation_management, sender=RevokeValidation)
@receiver(signals.validation_management, sender=ReserveValidation)
def record_event_validation_management(sender, performer, signal, content, version, action, **_):
Event(
performer=performer,
type=types[signal],
content=content,
version=version,
action=action,
).save()
@receiver(signals.tags_management, sender=EditContentTags)
def METHOD_NAME(sender, performer, signal, content, **_):
Event(
performer=performer,
type=types[signal],
content=content,
).save()
@receiver(signals.suggestions_management, sender=AddSuggestion)
@receiver(signals.suggestions_management, sender=RemoveSuggestion)
def record_event_suggestion_management(sender, performer, signal, content, action, **_):
Event(
performer=performer,
type=types[signal],
content=content,
action=action,
).save()
@receiver(signals.goals_management, sender=EditGoals)
def record_event_goals_management(sender, performer, signal, content, **_):
Event(
performer=performer,
type=types[signal],
content=content,
).save()
@receiver(signals.labels_management, sender=EditLabels)
def record_event_labels_management(sender, performer, signal, content, **_):
Event(
performer=performer,
type=types[signal],
content=content,
).save()
@receiver(signals.help_management, sender=ChangeHelp)
def record_event_help_management(sender, performer, signal, content, **_):
Event(
performer=performer,
type=types[signal],
content=content,
).save()
@receiver(signals.jsfiddle_management, sender=ActivateJSFiddleInContent)
def record_event_jsfiddle_management(sender, performer, signal, content, action, **_):
Event(
performer=performer,
type=types[signal],
content=content,
action=action,
).save()
@receiver(signals.opinions_management, sender=PublishOpinion)
@receiver(signals.opinions_management, sender=UnpublishOpinion)
def record_event_opinion_publication_management(sender, performer, signal, content, action, **_):
Event(
performer=performer,
type=types[signal],
content=content,
action=action,
).save() |
5,391 | encode | """Extend the Python codecs module with a few encodings that are used in OpenType (name table)
but missing from Python. See https://github.com/fonttools/fonttools/issues/236 for details."""
import codecs
import encodings
class ExtendCodec(codecs.Codec):
def __init__(self, name, base_encoding, mapping):
self.name = name
self.base_encoding = base_encoding
self.mapping = mapping
self.reverse = {v: k for k, v in mapping.items()}
self.max_len = max(len(v) for v in mapping.values())
self.info = codecs.CodecInfo(
name=self.name, METHOD_NAME=self.METHOD_NAME, decode=self.decode
)
codecs.register_error(name, self.error)
def _map(self, mapper, output_type, exc_type, input, errors):
base_error_handler = codecs.lookup_error(errors)
length = len(input)
out = output_type()
while input:
# first try to use self.error as the error handler
try:
part = mapper(input, self.base_encoding, errors=self.name)
out += part
break # All converted
except exc_type as e:
# else convert the correct part, handle error as requested and continue
out += mapper(input[: e.start], self.base_encoding, self.name)
replacement, pos = base_error_handler(e)
out += replacement
input = input[pos:]
return out, length
def METHOD_NAME(self, input, errors="strict"):
return self._map(codecs.METHOD_NAME, bytes, UnicodeEncodeError, input, errors)
def decode(self, input, errors="strict"):
return self._map(codecs.decode, str, UnicodeDecodeError, input, errors)
def error(self, e):
if isinstance(e, UnicodeDecodeError):
for end in range(e.start + 1, e.end + 1):
s = e.object[e.start : end]
if s in self.mapping:
return self.mapping[s], end
elif isinstance(e, UnicodeEncodeError):
for end in range(e.start + 1, e.start + self.max_len + 1):
s = e.object[e.start : end]
if s in self.reverse:
return self.reverse[s], end
e.encoding = self.name
raise e
_extended_encodings = {
"x_mac_japanese_ttx": (
"shift_jis",
{
b"\xFC": chr(0x007C),
b"\x7E": chr(0x007E),
b"\x80": chr(0x005C),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
"x_mac_trad_chinese_ttx": (
"big5",
{
b"\x80": chr(0x005C),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
"x_mac_korean_ttx": (
"euc_kr",
{
b"\x80": chr(0x00A0),
b"\x81": chr(0x20A9),
b"\x82": chr(0x2014),
b"\x83": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
"x_mac_simp_chinese_ttx": (
"gb2312",
{
b"\x80": chr(0x00FC),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
}
_cache = {}
def search_function(name):
name = encodings.normalize_encoding(name) # Rather undocumented...
if name in _extended_encodings:
if name not in _cache:
base_encoding, mapping = _extended_encodings[name]
assert name[-4:] == "_ttx"
# Python 2 didn't have any of the encodings that we are implementing
# in this file. Python 3 added aliases for the East Asian ones, mapping
# them "temporarily" to the same base encoding as us, with a comment
# suggesting that full implementation will appear some time later.
# As such, try the Python version of the x_mac_... first, if that is found,
# use *that* as our base encoding. This would make our encoding upgrade
# to the full encoding when and if Python finally implements that.
# http://bugs.python.org/issue24041
base_encodings = [name[:-4], base_encoding]
for base_encoding in base_encodings:
try:
codecs.lookup(base_encoding)
except LookupError:
continue
_cache[name] = ExtendCodec(name, base_encoding, mapping)
break
return _cache[name].info
return None
codecs.register(search_function) |
5,392 | parse typedefs | """Parses header files, extract declarations of functions and struct, union, enum definitions."""
import logging
import re
from .func_info import FuncInfo
from .func_info import get_declarations
from .func_info import parse_func_declaration
from .header_text_filters import filter_oneline_typedefs
from .header_text_filters import use_filters
from .params_info import Param
from .params_info import parse_func_parameters
from .params_info import split_params
from .parse_enums import get_all_enums
from .parse_enums import parse_enum
from .parse_structs_unions import get_all_structs
from .parse_structs_unions import get_all_unions
from .parse_structs_unions import parse_struct
from .parse_structs_unions import parse_union
def get_types_info_from_text(file, content, output):
"""Extracts all function declarations and struct, union, enum definitions
from input text.
"""
content = use_filters(content)
unions_content = content
structs, content = parse_all_structs(content, file)
unions, unions_content = parse_all_unions(unions_content, file)
enums = parse_all_enums(content, file)
typedefs = METHOD_NAME(content)
content = filter_oneline_typedefs(content)
functions = parse_all_functions(content, output, file)
functions = remove_unwanted_functions(functions)
return functions, typedefs, structs, unions, enums
def parse_all_structs(content, file):
"""Gets structs' definitions from headers. Returns Struct objects in
dictionary and header content without structs.
"""
content, file_structs = get_all_structs(content)
structs = {}
for s in file_structs:
struct_info = parse_struct(s, file)
sname = struct_info.name_text
if not sname:
sname = struct_info.type_name_text
if sname:
if sname in structs.keys():
logging.warning(
'Duplicit struct: {}\nFile: {}\n{}\nFirst '
'struct was:\nFile: {}\n{}\n'
''.format(
sname, struct_info.header_text, struct_info.members_list,
structs[sname].header_text, structs[sname].members_list))
else:
structs[sname] = struct_info
return structs, content
def parse_all_unions(content, file):
"""Get unions' definitions from headers.
Return Union objects in dictionary and header content without unions.
"""
content, file_unions = get_all_unions(content)
unions = {}
for u in file_unions:
union_info = parse_union(u, file)
uname = union_info.name_text
if not uname:
uname = union_info.type_name_text
if uname:
if uname in unions.keys():
logging.warning(
'Duplicit union: {}\nFile: {}\n{}\nFirst '
'union was:\nFile: {}\n{}\n'
''.format(
uname, union_info.header_text, union_info.members_list,
unions[uname].header_text, unions[uname].members_list))
else:
unions[uname] = union_info
return unions, content
def parse_all_functions(content, output, file):
"""Gets functions' declarations from header file.
Returns FuncInfo objects in dictionary.
"""
decls = get_declarations(content)
functions = {}
for decl in decls:
decl = decl.strip()
name, ret, params, call_conv = parse_func_declaration(decl)
if wrong_func_parameters(decl):
continue
if name is not None:
varargs = False
if params.endswith('...'):
varargs = True
params = params[:params.rfind(',')]
params_list = parse_func_parameters(params)
if varargs and output != 'json':
params_list.append(Param('vararg', '...'))
finfo = FuncInfo(decl, name, file, ret, params_list, varargs, call_conv)
finfo.delete_underscores_in_param_names()
if name in functions.keys():
logging.warning(
'Duplicit declaration: {}\nFile: {}\n{}\nFirst '
'declaration was:\nFile: {}\n{}\n'
''.format(
name, finfo.header, finfo.decl,
functions[name].header, functions[name].decl))
else:
functions[name] = finfo
return functions
def remove_unwanted_functions(functions):
"""Removes functions which we do not want in our extracted files.
Returns a new dictionary with filtered functions.
"""
return {
func: func_info for func,
func_info in functions.items() if is_wanted(func_info)
}
def is_wanted(func_info):
"""Do we want to include the given function in our extracted files?"""
# We do not want to include generic Windows functions whose arguments or
# return types are "T" types (e.g. LPCTSTR). They are never present in
# binary files. Instead, their A/W variants are used, depending on whether
# UNICODE was defined during compilation or not.
def is_t_type(type):
t_types_re = r'\b({})\b'.format('|'.join([
'LPCTSTR',
'PCTSTR',
'LPTSTR',
'PTSTR',
'TBYTE',
'PTBYTE',
'TCHAR',
]))
return re.search(t_types_re, type) is not None
if is_t_type(func_info.ret_type):
return False
for param in func_info.params:
if is_t_type(param.type_text):
return False
# Some functions look like declarations but are, in fact, just ordinary
# sentences. We detect this heuristically by searching for declarations
# that start with an uppercase letter and contain "the".
if re.fullmatch(r'[A-Z].*\bthe\b.*', func_info.decl):
return False
return True
def wrong_func_parameters(params):
c = params.count('(')
return (c != params.count(')')) or c > 10
def parse_all_enums(text, file):
"""Gets all enums from header, returns list of Enum objects."""
enums = get_all_enums(text)
return [parse_enum(enum, file) for enum in enums]
def METHOD_NAME(text):
"""Parses typedefs from text except struct, union and enum typedefs.
Parses them as function parameters - same syntax.
"""
typedefs = get_typedefs(text)
to_parse = []
for t_def in typedefs:
t_defs = split_params(t_def)
if len(t_defs) == 0:
continue
to_parse.append(t_defs[0])
if len(t_defs) > 1:
t_type = re.search(r'^([\w\s]+)?(?=\s+(?:\*|\w+|\(\*))', t_defs[0])
t_type = t_type.group(1) if t_type else ''
for next_type in t_defs[1:]:
to_parse.append(t_type + ' ' + next_type)
parsed = []
for t_def in to_parse:
if t_def.endswith(')'):
t_def = remove_brackets_around_pointer(t_def)
parsed = parsed + parse_func_parameters(t_def)
return parsed
def remove_brackets_around_pointer(ptr):
"""Remove brackets around typedef name when it's typedef to pointer,
except pointer to function.
'typedef int (*HANDLER);'
"""
return re.sub(r'\((\s*\*\s*\w+)\)(;?)$', r'\1\2', ptr)
def get_typedefs(text):
"""Gets typedefs from text except struct, union and enum typedefs."""
return re.findall('typedef\s*([\w\s\*\[\]\(\),.+-/]+?)\s*;', text) |
5,393 | do parse old state file | """
Dynamic roster from terraform current state
===========================================
This roster module allows you dynamically generate the roster from the terraform
resources defined with the `Terraform Salt`_ provider.
It exposes all salt_host resources with the same attributes to the salt-ssh
roster, making it completely independent of the type of terraform resource, and
providing the integration using terraform constructs with interpolation.
Basic Example
-------------
Given a simple salt-ssh tree with a Saltfile:
.. code-block:: yaml
salt-ssh:
config_dir: etc/salt
max_procs: 30
wipe_ssh: True
and ``etc/salt/master``:
.. code-block:: yaml
root_dir: .
file_roots:
base:
- srv/salt
pillar_roots:
base:
- srv/pillar
roster: terraform
In the same folder as your ``Saltfile``, create terraform file with resources
like cloud instances, virtual machines, etc. For every single one of those that
you want to manage with Salt, create a ``salt_host`` resource:
.. code-block:: text
resource "salt_host" "dbminion" {
salt_id = "dbserver"
host = "${libvirt_domain.vm-db.network_interface.0.addresses.0}"
user = "root"
passwd = "linux"
}
You can use the count attribute to create multiple roster entries with a single
definition. Please refer to the `Terraform Salt`_ provider for more detailed
examples.
.. _Terraform Salt: https://github.com/dmacvicar/terraform-provider-salt
"""
import logging
import os.path
import salt.utils.files
import salt.utils.json
log = logging.getLogger(__name__)
TF_OUTPUT_PREFIX = "salt.roster."
TF_ROSTER_ATTRS = {
"host": "s",
"user": "s",
"passwd": "s",
"port": "i",
"sudo": "b",
"sudo_user": "s",
"tty": "b",
"priv": "s",
"timeout": "i",
"minion_opts": "m",
"thin_dir": "s",
"cmd_umask": "i",
}
MINION_ID = "salt_id"
def _handle_old_salt_host_resource(resource):
"""
Handles salt_host resources.
See https://github.com/dmacvicar/terraform-provider-salt
Returns roster attributes for the resource or None
"""
ret = {}
attrs = resource.get("primary", {}).get("attributes", {})
ret[MINION_ID] = attrs.get(MINION_ID)
valid_attrs = set(attrs.keys()).intersection(TF_ROSTER_ATTRS.keys())
for attr in valid_attrs:
ret[attr] = _cast_output_to_type(
attr, attrs.get(attr), TF_ROSTER_ATTRS.get(attr)
)
return ret
def _handle_new_salt_host_resource(resource):
"""
Handles salt_host resources.
See https://github.com/dmacvicar/terraform-provider-salt
Returns roster attributes for the resource or None
"""
rets = []
instances = resource.get("instances", [])
for instance in instances:
ret = {}
attrs = instance.get("attributes", {})
ret[MINION_ID] = attrs.get(MINION_ID)
valid_attrs = set(attrs.keys()).intersection(TF_ROSTER_ATTRS.keys())
for attr in valid_attrs:
ret[attr] = _cast_output_to_type(
attr, attrs.get(attr), TF_ROSTER_ATTRS.get(attr)
)
log.info(ret)
rets.append(ret)
return rets
def _add_ssh_key(ret):
"""
Setups the salt-ssh minion to be accessed with salt-ssh default key
"""
priv = None
if __opts__.get("ssh_use_home_key") and os.path.isfile(
os.path.expanduser("~/.ssh/id_rsa")
):
priv = os.path.expanduser("~/.ssh/id_rsa")
else:
priv = __opts__.get(
"ssh_priv",
os.path.abspath(os.path.join(__opts__["pki_dir"], "ssh", "salt-ssh.rsa")),
)
if priv and os.path.isfile(priv):
ret["priv"] = priv
def _cast_output_to_type(attr, value, typ):
"""cast the value depending on the terraform type"""
if value is None:
# Timeout needs to default to 0 if the value is None
# The ssh command that is run cannot handle `-o ConnectTimeout=None`
if attr == "timeout":
return 0
else:
return value
if value is None:
return value
if typ == "b":
return bool(value)
if typ == "i":
return int(value)
return value
def _parse_state_file(state_file_path="terraform.tfstate"):
"""
Parses the terraform state file passing different resource types to the right handler
"""
with salt.utils.files.fopen(state_file_path, "r") as fh_:
tfstate = salt.utils.json.load(fh_)
if "resources" in tfstate:
return _do_parse_new_state_file(tfstate)
elif "modules" in tfstate:
return METHOD_NAME(tfstate)
else:
log.error("Malformed tfstate file.")
return {}
def _do_parse_new_state_file(tfstate):
"""
Parses the terraform state file passing different resource types to the right handler terraform version >= v0.13.0
"""
ret = {}
resources = tfstate.get("resources")
if not resources:
log.error("Malformed tfstate file. No resources found")
return ret
for resource in resources:
if resource["type"] == "salt_host":
roster_entrys = _handle_new_salt_host_resource(resource)
if not roster_entrys or len(roster_entrys) < 1:
continue
for roster_entry in roster_entrys:
if not roster_entry:
continue
minion_id = roster_entry.get(MINION_ID, resource.get("id"))
if not minion_id:
continue
if MINION_ID in roster_entry:
del roster_entry[MINION_ID]
_add_ssh_key(roster_entry)
ret[minion_id] = roster_entry
return ret
def METHOD_NAME(tfstate):
"""
Parses the terraform state file passing different resource types to the right handler terraform version < v0.13.0
"""
ret = {}
modules = tfstate.get("modules")
if not modules:
log.error("Malformed tfstate file. No modules found")
return ret
for module in modules:
resources = module.get("resources", [])
for resource_name, resource in resources.items():
roster_entry = None
if resource["type"] == "salt_host":
roster_entry = _handle_old_salt_host_resource(resource)
if not roster_entry:
continue
minion_id = roster_entry.get(MINION_ID, resource.get("id"))
if not minion_id:
continue
if MINION_ID in roster_entry:
del roster_entry[MINION_ID]
_add_ssh_key(roster_entry)
ret[minion_id] = roster_entry
return ret
def targets(tgt, tgt_type="glob", **kwargs): # pylint: disable=W0613
"""
Returns the roster from the terraform state file, checks opts for location, but defaults to terraform.tfstate
"""
roster_file = os.path.abspath("terraform.tfstate")
if __opts__.get("roster_file"):
roster_file = os.path.abspath(__opts__["roster_file"])
if not os.path.isfile(roster_file):
log.error("Can't find terraform state file '%s'", roster_file)
return {}
log.debug("terraform roster: using %s state file", roster_file)
if not roster_file.endswith(".tfstate"):
log.error("Terraform roster can only be used with terraform state files")
return {}
raw = _parse_state_file(roster_file)
log.debug("%s hosts in terraform state file", len(raw))
return __utils__["roster_matcher.targets"](raw, tgt, tgt_type, "ipv4") |
5,394 | close brace callback | # -*- coding: utf-8 -*-
"""
pygments.lexers.bibtex
~~~~~~~~~~~~~~~~~~~~~~
Lexers for BibTeX bibliography data and styles
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, default, \
words
from pygments.token import Name, Comment, String, Error, Number, Text, \
Keyword, Punctuation
__all__ = ['BibTeXLexer', 'BSTLexer']
class BibTeXLexer(ExtendedRegexLexer):
"""
A lexer for BibTeX bibliography data format.
.. versionadded:: 2.2
"""
name = 'BibTeX'
aliases = ['bib', 'bibtex']
filenames = ['*.bib']
mimetypes = ["text/x-bibtex"]
flags = re.IGNORECASE
ALLOWED_CHARS = r'@!$&*+\-./:;<>?\[\\\]^`|~'
IDENTIFIER = '[{}][{}]*'.format('a-z_' + ALLOWED_CHARS, r'\w' + ALLOWED_CHARS)
def open_brace_callback(self, match, ctx):
opening_brace = match.group()
ctx.opening_brace = opening_brace
yield match.start(), Punctuation, opening_brace
ctx.pos = match.end()
def METHOD_NAME(self, match, ctx):
closing_brace = match.group()
if (
ctx.opening_brace == '{' and closing_brace != '}' or
ctx.opening_brace == '(' and closing_brace != ')'
):
yield match.start(), Error, closing_brace
else:
yield match.start(), Punctuation, closing_brace
del ctx.opening_brace
ctx.pos = match.end()
tokens = {
'root': [
include('whitespace'),
('@comment', Comment),
('@preamble', Name.Class, ('closing-brace', 'value', 'opening-brace')),
('@string', Name.Class, ('closing-brace', 'field', 'opening-brace')),
('@' + IDENTIFIER, Name.Class,
('closing-brace', 'command-body', 'opening-brace')),
('.+', Comment),
],
'opening-brace': [
include('whitespace'),
(r'[{(]', open_brace_callback, '#pop'),
],
'closing-brace': [
include('whitespace'),
(r'[})]', METHOD_NAME, '#pop'),
],
'command-body': [
include('whitespace'),
(r'[^\s\,\}]+', Name.Label, ('#pop', 'fields')),
],
'fields': [
include('whitespace'),
(',', Punctuation, 'field'),
default('#pop'),
],
'field': [
include('whitespace'),
(IDENTIFIER, Name.Attribute, ('value', '=')),
default('#pop'),
],
'=': [
include('whitespace'),
('=', Punctuation, '#pop'),
],
'value': [
include('whitespace'),
(IDENTIFIER, Name.Variable),
('"', String, 'quoted-string'),
(r'\{', String, 'braced-string'),
(r'[\d]+', Number),
('#', Punctuation),
default('#pop'),
],
'quoted-string': [
(r'\{', String, 'braced-string'),
('"', String, '#pop'),
(r'[^\{\"]+', String),
],
'braced-string': [
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
(r'[^\{\}]+', String),
],
'whitespace': [
(r'\s+', Text),
],
}
class BSTLexer(RegexLexer):
"""
A lexer for BibTeX bibliography styles.
.. versionadded:: 2.2
"""
name = 'BST'
aliases = ['bst', 'bst-pybtex']
filenames = ['*.bst']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
(words(['read', 'sort']), Keyword),
(words(['execute', 'integers', 'iterate', 'reverse', 'strings']),
Keyword, ('group')),
(words(['function', 'macro']), Keyword, ('group', 'group')),
(words(['entry']), Keyword, ('group', 'group', 'group')),
],
'group': [
include('whitespace'),
(r'\{', Punctuation, ('#pop', 'group-end', 'body')),
],
'group-end': [
include('whitespace'),
(r'\}', Punctuation, '#pop'),
],
'body': [
include('whitespace'),
(r"\'[^#\"\{\}\s]+", Name.Function),
(r'[^#\"\{\}\s]+\$', Name.Builtin),
(r'[^#\"\{\}\s]+', Name.Variable),
(r'"[^\"]*"', String),
(r'#-?\d+', Number),
(r'\{', Punctuation, ('group-end', 'body')),
default('#pop'),
],
'whitespace': [
(r'\s+', Text),
('%.*?$', Comment.SingleLine),
],
} |
5,395 | sort suggestions | # This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from datetime import datetime, timedelta
from functools import cmp_to_key
from indico.modules.rb import rb_settings
from indico.modules.rb.models.blocked_rooms import BlockedRoomState
from indico.modules.rb.models.reservation_occurrences import ReservationOccurrence
from indico.modules.rb.models.reservations import RepeatFrequency
from indico.modules.rb.operations.blockings import get_blocked_rooms, get_rooms_blockings, group_blocked_rooms
from indico.modules.rb.operations.bookings import get_existing_rooms_occurrences
from indico.modules.rb.operations.conflicts import get_rooms_conflicts
from indico.modules.rb.operations.misc import get_rooms_nonbookable_periods, get_rooms_unbookable_hours
from indico.modules.rb.operations.rooms import search_for_rooms
from indico.modules.rb.util import group_by_occurrence_date
from indico.util.date_time import overlaps
BOOKING_TIME_DIFF = 20 # (minutes)
DURATION_FACTOR = 0.25
def get_suggestions(filters, limit=None):
blocked_rooms = get_blocked_rooms(filters['start_dt'], filters['end_dt'], [BlockedRoomState.accepted])
rooms = [room for room in search_for_rooms(filters, availability=False) if room not in blocked_rooms]
if filters['repeat_frequency'] == RepeatFrequency.NEVER:
suggestions = METHOD_NAME(get_single_booking_suggestions(rooms, filters['start_dt'], filters['end_dt'],
limit=limit))
else:
suggestions = METHOD_NAME(get_recurring_booking_suggestions(rooms, filters['start_dt'], filters['end_dt'],
filters['repeat_frequency'],
filters['repeat_interval'], limit=limit))
for entry in suggestions:
entry['room_id'] = entry.pop('room').id
return suggestions
def get_single_booking_suggestions(rooms, start_dt, end_dt, limit=None):
data = []
new_start_dt = start_dt - timedelta(minutes=BOOKING_TIME_DIFF)
new_end_dt = end_dt + timedelta(minutes=BOOKING_TIME_DIFF)
nonbookable_periods = get_rooms_nonbookable_periods(rooms, start_dt, end_dt)
rooms = [room for room in rooms if room.id not in nonbookable_periods]
if not rooms:
return data
unbookable_hours = get_rooms_unbookable_hours(rooms)
rooms_occurrences = get_existing_rooms_occurrences(rooms, new_start_dt, new_end_dt, RepeatFrequency.NEVER, None,
allow_overlapping=True)
for room in rooms:
if limit and len(data) == limit:
break
suggestions = {}
taken_periods = [(occ.start_dt, occ.end_dt) for occ in rooms_occurrences.get(room.id, [])]
if room.id in unbookable_hours:
taken_periods.extend((datetime.combine(start_dt, uh.start_time), datetime.combine(end_dt, uh.end_time))
for uh in unbookable_hours[room.id])
taken_periods = sorted(taken_periods)
suggested_time = get_start_time_suggestion(taken_periods, start_dt, end_dt)
if suggested_time:
suggested_time_change = (suggested_time - start_dt).total_seconds() / 60
if suggested_time_change and abs(suggested_time_change) <= BOOKING_TIME_DIFF:
suggestions['time'] = suggested_time_change
duration_suggestion = get_duration_suggestion(taken_periods, start_dt, end_dt)
original_duration = (end_dt - start_dt).total_seconds() / 60
if duration_suggestion and duration_suggestion <= DURATION_FACTOR * original_duration:
suggestions['duration'] = duration_suggestion
if suggestions:
data.append({'room': room, 'suggestions': suggestions})
return data
def get_recurring_booking_suggestions(rooms, start_dt, end_dt, repeat_frequency, repeat_interval, limit=None):
data = []
booking_days = end_dt - start_dt
booking_length = booking_days.days + 1
candidates = ReservationOccurrence.create_series(start_dt, end_dt, (repeat_frequency, repeat_interval))
blocked_rooms = group_blocked_rooms(get_rooms_blockings(rooms, start_dt.date(), end_dt.date()))
unbookable_hours = get_rooms_unbookable_hours(rooms)
nonbookable_periods = get_rooms_nonbookable_periods(rooms, start_dt, end_dt)
conflicts = get_rooms_conflicts(rooms, start_dt, end_dt, repeat_frequency, repeat_interval, blocked_rooms,
nonbookable_periods, unbookable_hours)[0]
for room in rooms:
if limit and len(data) == limit:
break
suggestions = {}
booking_limit = room.booking_limit_days or rb_settings.get('booking_limit')
limit_exceeded = booking_limit is not None and booking_limit < booking_length
if limit_exceeded:
excess_days = booking_length - booking_limit
suggestions['shorten'] = excess_days
if not limit_exceeded:
number_of_conflicting_days = len(group_by_occurrence_date(conflicts.get(room.id, [])))
if number_of_conflicting_days and number_of_conflicting_days < len(candidates):
suggestions['skip'] = number_of_conflicting_days
if suggestions:
data.append({'room': room, 'suggestions': suggestions})
return data
def get_start_time_suggestion(occurrences, from_, to):
duration = (to - from_).total_seconds() / 60
new_start_dt = from_ - timedelta(minutes=BOOKING_TIME_DIFF)
new_end_dt = to + timedelta(minutes=BOOKING_TIME_DIFF)
if not occurrences:
return new_start_dt
candidates = []
period_start = new_start_dt
for (occ_start, occ_end) in occurrences:
if period_start < occ_start:
candidates.append((period_start, occ_start))
period_start = occ_end
if period_start < new_end_dt:
candidates.append((period_start, new_end_dt))
for candidate in candidates:
start, end = candidate
candidate_duration = (end - start).total_seconds() / 60
if duration <= candidate_duration:
return start
def get_duration_suggestion(occurrences, from_, to):
old_duration = (to - from_).total_seconds() / 60
duration = old_duration
all_occurrences_overlap = all(overlaps((from_, to), occ) for occ in occurrences)
# Don't calculate duration suggestion, if there are at least
# two existing bookings conflicting with the specified dates
if all_occurrences_overlap and len(occurrences) > 1:
return
for (start, end) in occurrences:
if start <= from_:
continue
if from_ < end < to:
if start > from_:
continue
duration -= (end - from_).total_seconds() / 60
if from_ < start < to:
if end < to:
continue
duration -= (to - start).total_seconds() / 60
return abs(duration - old_duration) if old_duration != duration else None
def METHOD_NAME(suggestions):
def cmp_fn(a, b):
a = a['suggestions']
b = b['suggestions']
a_time, a_duration = abs(a.get('time', 0)), a.get('duration', 0)
b_time, b_duration = abs(b.get('time', 0)), b.get('duration', 0)
return int(a_time + a_duration * 0.2) - int(b_time + b_duration * 0.2)
return sorted(suggestions, key=cmp_to_key(cmp_fn)) |
5,396 | setup driver | from selenium import webdriver
from selenium.webdriver.firefox.options import Options as Options_firefox
from selenium.webdriver.chrome.options import Options as Options_chrome
from pathlib import Path
import pdfkit
def METHOD_NAME():
# use the driver that exists in working directory - chromedriver and geckodriver supported
find_geckodriver = Path("geckodriver")
find_chromedriver = Path("chromedriver")
if find_geckodriver.exists():
driver = 'geckodriver'
print('Using %s' % driver)
PATH_TO_DRIVER = './%s' % driver
firefox_options = Options_firefox()
# run in headless mode
firefox_options.headless = True
# disable cookies to prevent popups
opt = webdriver.FirefoxProfile()
opt.set_preference("network.cookie.cookieBehavior", 2)
browser = webdriver.Firefox(executable_path=PATH_TO_DRIVER, options=firefox_options, firefox_profile=opt)
elif find_chromedriver.exists():
driver = 'chromedriver'
print('Using %s' % driver)
PATH_TO_DRIVER = './%s' % driver
chrome_options = Options_chrome()
# run in headless mode
chrome_options.add_argument('--headless')
# disable cookies to prevent popups
chrome_options.add_experimental_option('prefs', {'profile.default_content_setting_values.cookies': 2})
browser = webdriver.Chrome(executable_path=PATH_TO_DRIVER, options=chrome_options)
else:
print('ERROR: No valid driver found. Only geckodriver or chromedriver is supported!')
exit()
return browser
def scrape_medium(browser, search_term, search_count):
# perform search of keyword in medium
browser.get('https://medium.com/search?q=%s' % search_term)
browser.implicitly_wait(10)
results = browser.find_elements_by_xpath('//a[contains(text(), "Read more")]')
all_articles = []
# get the links for all articles and store in list
for each_article in results:
all_articles.append(each_article.get_attribute('href'))
# go to link of each article and export as PDF
for counter, url in enumerate(all_articles, 1):
print('Exporting article %s/%s: %s' % ((counter), search_count, url))
browser.get(url)
browser.implicitly_wait(10)
# get title
article_title = browser.title
# get url of page to export
url_to_export = browser.current_url
# pdf name will contain article title
pdf = 'Medium-%s.pdf' % article_title
pdf = pdf.replace(' ', '-')
# export pdf
quiet_mode = {'quiet': ''}
pdfkit.from_url(url_to_export, pdf, options=quiet_mode)
# stop when we reach specified search count
if (counter == int(search_count)):
print('Complete. %s medium articles exported to PDF.' % search_count)
browser.close()
return
def process_option_1():
# get user settings
user_input = input('Enter search term followed by number of articles between 1 and 10 (e.g "learn python 5"): ')
user_input = user_input.split(' ')
# last entry should be a number
try:
search_count = int(user_input[-1])
except ValueError:
print('ERROR: Invalid input. The last entry must end with a number!')
return
# remove number from list
user_input.pop()
# join strings to load the phrase to search
search_term = ' '
for each_word in user_input:
search_term = search_term + each_word + ' '
browser = METHOD_NAME()
scrape_medium(browser, search_term, search_count)
exit()
def process_option_2():
# export a single article from the URL
url = input("Paste URL to convert to PDF (must begin with 'https://'): ")
print('Converting to PDF...')
quiet_mode = {'quiet': ''}
pdfkit.from_url(url, 'converted.pdf', options=quiet_mode)
print('Complete. See "converted.pdf"')
exit()
def program_run():
while True:
# get user option
option = input("Enter '1' to search medium or '2' to export a single medium article: ")
if option == '1':
process_option_1()
elif option == '2':
process_option_2()
elif option == 'x':
exit()
else:
print("Invalid option. Try again! Or enter x to exit")
continue
pass
if __name__ == "__main__":
program_run()
pass |
5,397 | processing graph | # SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from typing import Iterator
from libapi.config import UvicornConfig
from libcommon.METHOD_NAME import ProcessingGraph
from libcommon.queue import _clean_queue_database
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import _clean_cache_database
from pytest import MonkeyPatch, fixture
from api.config import AppConfig, EndpointConfig
from api.routes.endpoint import EndpointsDefinition, StepsByInputTypeAndEndpoint
# see https://github.com/pytest-dev/pytest/issues/363#issuecomment-406536200
@fixture(scope="session")
def monkeypatch_session() -> Iterator[MonkeyPatch]:
monkeypatch_session = MonkeyPatch()
monkeypatch_session.setenv("CACHE_MONGO_DATABASE", "datasets_server_cache_test")
monkeypatch_session.setenv("QUEUE_MONGO_DATABASE", "datasets_server_queue_test")
hostname = "localhost"
port = "8888"
monkeypatch_session.setenv("API_HF_TIMEOUT_SECONDS", "10")
monkeypatch_session.setenv("API_UVICORN_HOSTNAME", hostname)
monkeypatch_session.setenv("API_UVICORN_PORT", port)
monkeypatch_session.setenv("COMMON_HF_ENDPOINT", f"http://{hostname}:{port}")
yield monkeypatch_session
monkeypatch_session.undo()
@fixture(scope="session")
def app_config(monkeypatch_session: MonkeyPatch) -> AppConfig:
app_config = AppConfig.from_env()
if "test" not in app_config.cache.mongo_database or "test" not in app_config.queue.mongo_database:
raise ValueError("Test must be launched on a test mongo database")
return app_config
@fixture(scope="session")
def endpoint_config(monkeypatch_session: MonkeyPatch) -> EndpointConfig:
return EndpointConfig(
processing_step_names_by_input_type_and_endpoint={
"/splits": {
"dataset": ["dataset-split-names"],
"config": ["config-split-names-from-streaming"],
},
"/first-rows": {"split": ["split-first-rows-from-streaming"]},
"/parquet": {"config": ["config-parquet"]},
}
)
@fixture(scope="session")
def METHOD_NAME(app_config: AppConfig) -> ProcessingGraph:
return ProcessingGraph(app_config.METHOD_NAME.specification)
@fixture(scope="session")
def endpoint_definition(
endpoint_config: EndpointConfig, METHOD_NAME: ProcessingGraph
) -> StepsByInputTypeAndEndpoint:
return EndpointsDefinition(METHOD_NAME, endpoint_config=endpoint_config).steps_by_input_type_and_endpoint
@fixture(scope="session")
def first_dataset_endpoint(endpoint_definition: StepsByInputTypeAndEndpoint) -> str:
return next(
endpoint
for endpoint, input_types in endpoint_definition.items()
if next((endpoint for input_type, _ in input_types.items() if input_type == "dataset"), None)
)
@fixture(scope="session")
def first_config_endpoint(endpoint_definition: StepsByInputTypeAndEndpoint) -> str:
return next(
endpoint
for endpoint, input_types in endpoint_definition.items()
if next((endpoint for input_type, _ in input_types.items() if input_type == "config"), None)
)
@fixture(scope="session")
def first_split_endpoint(endpoint_definition: StepsByInputTypeAndEndpoint) -> str:
return next(
endpoint
for endpoint, input_types in endpoint_definition.items()
if next((endpoint for input_type, _ in input_types.items() if input_type == "split"), None)
)
@fixture(autouse=True)
def cache_mongo_resource(app_config: AppConfig) -> Iterator[CacheMongoResource]:
with CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url) as resource:
yield resource
_clean_cache_database()
@fixture(autouse=True)
def queue_mongo_resource(app_config: AppConfig) -> Iterator[QueueMongoResource]:
with QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url) as resource:
yield resource
_clean_queue_database()
@fixture(scope="session")
def uvicorn_config(monkeypatch_session: MonkeyPatch) -> UvicornConfig:
return UvicornConfig.from_env()
@fixture(scope="session")
def httpserver_listen_address(uvicorn_config: UvicornConfig) -> tuple[str, int]:
return (uvicorn_config.hostname, uvicorn_config.port)
@fixture(scope="session")
def hf_endpoint(app_config: AppConfig) -> str:
return app_config.common.hf_endpoint
@fixture(scope="session")
def hf_auth_path(app_config: AppConfig) -> str:
return app_config.api.hf_auth_path |
5,398 | set properties | ###############################################################################
#
# App - A class for writing the Excel XLSX App file.
#
# Copyright 2013-2017, John McNamara, jmcnamara@cpan.org
#
# Package imports.
from . import xmlwriter
class App(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX App file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self):
"""
Constructor.
"""
super(App, self).__init__()
self.part_names = []
self.heading_pairs = []
self.properties = {}
def _add_part_name(self, part_name):
# Add the name of a workbook Part such as 'Sheet1' or 'Print_Titles'.
self.part_names.append(part_name)
def _add_heading_pair(self, heading_pair):
# Add the name of a workbook Heading Pair such as 'Worksheets',
# 'Charts' or 'Named Ranges'.
# Ignore empty pairs such as chartsheets.
if not heading_pair[1]:
return
self.heading_pairs.append(('lpstr', heading_pair[0]))
self.heading_pairs.append(('i4', heading_pair[1]))
def METHOD_NAME(self, properties):
# Set the document properties.
self.properties = properties
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self):
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
self._write_properties()
self._write_application()
self._write_doc_security()
self._write_scale_crop()
self._write_heading_pairs()
self._write_titles_of_parts()
self._write_manager()
self._write_company()
self._write_links_up_to_date()
self._write_shared_doc()
self._write_hyperlink_base()
self._write_hyperlinks_changed()
self._write_app_version()
self._xml_end_tag('Properties')
# Close the file.
self._xml_close()
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_properties(self):
# Write the <Properties> element.
schema = 'http://schemas.openxmlformats.org/officeDocument/2006/'
xmlns = schema + 'extended-properties'
xmlns_vt = schema + 'docPropsVTypes'
attributes = [
('xmlns', xmlns),
('xmlns:vt', xmlns_vt),
]
self._xml_start_tag('Properties', attributes)
def _write_application(self):
# Write the <Application> element.
self._xml_data_element('Application', 'Microsoft Excel')
def _write_doc_security(self):
# Write the <DocSecurity> element.
self._xml_data_element('DocSecurity', '0')
def _write_scale_crop(self):
# Write the <ScaleCrop> element.
self._xml_data_element('ScaleCrop', 'false')
def _write_heading_pairs(self):
# Write the <HeadingPairs> element.
self._xml_start_tag('HeadingPairs')
self._write_vt_vector('variant', self.heading_pairs)
self._xml_end_tag('HeadingPairs')
def _write_titles_of_parts(self):
# Write the <TitlesOfParts> element.
parts_data = []
self._xml_start_tag('TitlesOfParts')
for part_name in self.part_names:
parts_data.append(('lpstr', part_name))
self._write_vt_vector('lpstr', parts_data)
self._xml_end_tag('TitlesOfParts')
def _write_vt_vector(self, base_type, vector_data):
# Write the <vt:vector> element.
attributes = [
('size', len(vector_data)),
('baseType', base_type),
]
self._xml_start_tag('vt:vector', attributes)
for vt_data in vector_data:
if base_type == 'variant':
self._xml_start_tag('vt:variant')
self._write_vt_data(vt_data)
if base_type == 'variant':
self._xml_end_tag('vt:variant')
self._xml_end_tag('vt:vector')
def _write_vt_data(self, vt_data):
# Write the <vt:*> elements such as <vt:lpstr> and <vt:if>.
self._xml_data_element("vt:%s" % vt_data[0], vt_data[1])
def _write_company(self):
company = self.properties.get('company', '')
self._xml_data_element('Company', company)
def _write_manager(self):
# Write the <Manager> element.
if 'manager' not in self.properties:
return
self._xml_data_element('Manager', self.properties['manager'])
def _write_links_up_to_date(self):
# Write the <LinksUpToDate> element.
self._xml_data_element('LinksUpToDate', 'false')
def _write_shared_doc(self):
# Write the <SharedDoc> element.
self._xml_data_element('SharedDoc', 'false')
def _write_hyperlink_base(self):
# Write the <HyperlinkBase> element.
hyperlink_base = self.properties.get('hyperlink_base')
if hyperlink_base is None:
return
self._xml_data_element('HyperlinkBase', hyperlink_base)
def _write_hyperlinks_changed(self):
# Write the <HyperlinksChanged> element.
self._xml_data_element('HyperlinksChanged', 'false')
def _write_app_version(self):
# Write the <AppVersion> element.
self._xml_data_element('AppVersion', '12.0000') |
5,399 | set force | # Copyright 1996-2023 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
from .constants import constant
from .device import Device
from .wb import wb
from typing import Union
class Motor(Device):
wb.wb_motor_get_target_position.restype = ctypes.c_double
wb.wb_motor_get_max_position.restype = ctypes.c_double
wb.wb_motor_get_min_position.restype = ctypes.c_double
wb.wb_motor_get_velocity.restype = ctypes.c_double
wb.wb_motor_get_max_velocity.restype = ctypes.c_double
wb.wb_motor_get_acceleration.restype = ctypes.c_double
wb.wb_motor_get_available_force.restype = ctypes.c_double
wb.wb_motor_get_available_torque.restype = ctypes.c_double
wb.wb_motor_get_max_force.restype = ctypes.c_double
wb.wb_motor_get_max_torque.restype = ctypes.c_double
wb.wb_motor_get_multiplier.restype = ctypes.c_double
wb.wb_motor_get_force_feedback.restype = ctypes.c_double
wb.wb_motor_get_torque_feedback.restype = ctypes.c_double
ROTATIONAL = constant('ROTATIONAL')
LINEAR = constant('LINEAR')
def __init__(self, name: Union[str, int]):
super().__init__(name)
def setPosition(self, position: float):
self.target_position = position
def setVelocity(self, velocity: float):
self.target_velocity = velocity
def setAcceleration(self, acceleration: float):
self.target_acceleration = acceleration
def setAvailableForce(self, force: float):
self.available_force = force
def setAvailableTorque(self, torque: float):
self.available_torque = torque
def setControlPID(self, p: float, i: float, d: float):
wb.wb_motor_set_control_pid(self._tag, ctypes.c_double(p), ctypes.c_double(i), ctypes.c_double(d))
def getTargetPosition(self) -> float:
return self.target_position
def getMinPosition(self) -> float:
return self.min_position
def getMaxPosition(self) -> float:
return self.max_position
def getVelocity(self) -> float:
return self.target_velocity
def getMaxVelocity(self) -> float:
return self.max_velocity
def getAcceleration(self) -> float:
return self.target_acceleration
def getAvailableForce(self) -> float:
return self.available_force
def getMaxForce(self) -> float:
return self.max_force
def getAvailableTorque(self) -> float:
return self.available_torque
def getMaxTorque(self) -> float:
return self.max_torque
def getMultiplier(self) -> float:
return self.multiplier
def enableForceFeedback(self, sampling_period: int):
wb.wb_motor_enable_force_feedback(self._tag, sampling_period)
def disableForceFeedback(self):
wb.wb_motor_disable_force_feedback(self._tag)
def getForceFeedbackSamplingPeriod(self) -> int:
return self.force_feedback_sampling_period
def getForceFeedback(self) -> float:
return self.force_feedback
def enableTorqueFeedback(self, sampling_period: int):
wb.wb_motor_enable_torque_feedback(self._tag, sampling_period)
def disableTorqueFeedback(self):
wb.wb_motor_disable_torque_feedback(self._tag)
def getTorqueFeedbackSamplingPeriod(self) -> int:
return self.torque_feedback_sampling_period
def getTorqueFeedback(self) -> float:
return self.torque_feedback
def METHOD_NAME(self, force: float):
wb.wb_motor_set_force(self._tag, ctypes.c_double(force))
def setTorque(self, torque: float):
wb.wb_motor_set_torque(self._tag, ctypes.c_double(torque))
def getType(self) -> int:
return wb.wb_motor_get_type(self._tag)
def getBrake(self):
return self.brake
def getPositionSensor(self):
return self.position_sensor
@property
def brake(self):
from .brake import Brake
tag = wb.wb_motor_get_brake(self._tag)
return None if tag == 0 else Brake(tag)
@property
def position_sensor(self):
from .position_sensor import PositionSensor
tag = wb.wb_motor_get_position_sensor(self._tag)
return None if tag == 0 else PositionSensor(tag)
@property
def force_feedback_sampling_period(self) -> int:
return wb.wb_motor_get_force_feedback_sampling_period(self._tag)
@force_feedback_sampling_period.setter
def force_feedback_sampling_period(self, sampling_period):
wb.wb_motor_enable_force_feedback(self._tag, sampling_period)
@property
def torque_feedback_sampling_period(self) -> int:
return wb.wb_motor_get_torque_feedback_sampling_period(self._tag)
@torque_feedback_sampling_period.setter
def torque_feedback_sampling_period(self, sampling_period):
wb.wb_motor_enable_torque_feedback(self._tag, sampling_period)
@property
def max_position(self) -> float:
return wb.wb_motor_get_max_position(self._tag)
@property
def min_position(self) -> float:
return wb.wb_motor_get_min_position(self._tag)
@property
def max_velocity(self) -> float:
return wb.wb_motor_get_max_velocity(self._tag)
@property
def target_position(self) -> float:
return wb.wb_motor_get_target_position(self._tag)
@target_position.setter
def target_position(self, position: float):
wb.wb_motor_set_position(self._tag, ctypes.c_double(position))
@property
def target_velocity(self) -> float:
return wb.wb_motor_get_velocity(self._tag)
@target_velocity.setter
def target_velocity(self, velocity: float):
wb.wb_motor_set_velocity(self._tag, ctypes.c_double(velocity))
@property
def available_force(self) -> float:
return wb.wb_motor_get_available_force(self._tag)
@available_force.setter
def available_force(self, force: float):
wb.wb_motor_set_available_force(self._tag, ctypes.c_double(force))
@property
def max_force(self) -> float:
return wb.wb_motor_get_max_force(self._tag)
@property
def available_torque(self) -> float:
return wb.wb_motor_get_available_torque(self._tag)
@available_torque.setter
def available_torque(self, torque: float):
wb.wb_motor_set_available_torque(self._tag, ctypes.c_double(torque))
@property
def max_torque(self) -> float:
return wb.wb_motor_get_max_torque(self._tag)
@property
def target_acceleration(self) -> float:
return wb.wb_motor_get_acceleration(self._tag)
@target_acceleration.setter
def target_acceleration(self, acceleration: float):
wb.wb_motor_set_acceleration(self._tag, ctypes.c_double(acceleration))
force = property(fset=METHOD_NAME)
torque = property(fset=setTorque)
@property
def multiplier(self) -> float:
return wb.wb_motor_get_multiplier(self._tag)
@property
def force_feedback(self) -> float:
return wb.wb_motor_get_force_feedback(self._tag)
@property
def torque_feedback(self) -> float:
return wb.wb_motor_get_torque_feedback(self._tag)
@property
def type(self) -> int:
return wb.wb_motor_get_type(self._tag) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.