id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
299,600 | build operation log | # -*- coding: utf-8 -*-
#
# RERO ILS
# Copyright (C) 2019-2023 RERO
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Operation log record extensions."""
import uuid
from datetime import datetime, timezone
from functools import partialmethod
import pytz
from deepdiff import DeepDiff
from flask import request as flask_request
from invenio_records.extensions import RecordExtension
from rero_ils.modules.patrons.api import current_librarian
from .models import OperationLogOperation
from ..utils import extracted_data_from_ref
class OperationLogObserverExtension(RecordExtension):
"""Observe a resource and build operation log when it changes."""
def get_additional_informations(self, record):
"""Get some informations to add into the operation log.
Subclasses can override this property to add some informations into
the operation log dictionary.
:param record: the observed record.
:return a dict with additional informations.
"""
return None
def METHOD_NAME(self, record, operation):
"""Build the operation log dict based on record.
:param record: the updated record.
:param operation: the trigger operation on this record.
:return a dict representing the operation log to register.
"""
oplg = {
'date': datetime.now(timezone.utc).isoformat(),
'record': {
'value': record.get('pid'),
'type': record.provider.pid_type
},
'operation': operation,
'user_name': 'system' # default value, could be override
}
if current_librarian:
oplg |= {
'user_name': current_librarian.formatted_name,
'user': {
'type': 'ptrn',
'value': current_librarian.pid
},
'organisation': {
'type': 'org',
'value': current_librarian.organisation_pid
},
'library': {
'type': 'lib',
'value': current_librarian.library_pid
},
}
if (lib_pid := flask_request.args.get('current_library')) \
and lib_pid in current_librarian.manageable_library_pids:
oplg |= {
'organisation': {
'type': 'org',
'value': current_librarian.organisation_pid
},
'library': {
'type': 'lib',
'value': lib_pid
}
}
# Allow additional informations for the operation log.
# Subclasses can override the ``additional_informations()`` method
# to add some data into the operation log dict
oplg |= (self.get_additional_informations(record) or {})
return oplg
def _create_operation_log(self, record, operation, **kwargs):
"""Build and register an operation log."""
from .api import OperationLog
data = self.METHOD_NAME(record, operation)
OperationLog.create(data)
post_create = partialmethod(
_create_operation_log,
operation=OperationLogOperation.CREATE
)
"""Called after a record is created."""
pre_commit = partialmethod(
_create_operation_log,
operation=OperationLogOperation.UPDATE
)
"""Called before a record is committed."""
post_delete = partialmethod(
_create_operation_log,
operation=OperationLogOperation.DELETE
)
"""Called after a record is deleted."""
class UntrackedFieldsOperationLogObserverExtension\
(OperationLogObserverExtension):
"""Extension to skip Operation log if only some field changed.
If you need to observe a resource but skip changes on some resource
attributes, you can do it using this ``RecordExtension``. When you create
the extension, you need to provide attributes that must be untracked (using
the attribute xpath).
Example:
>> # OperationLog will be created except if 'status' attribute changed.
>> _extensions=[
>> UntrackedFieldsOperationLogObserverExtension(['status'])
>> ]
>> # OperationLog will be created except if '$ref' attribute (from loan
>> # attribute) changed.
>> _extensions=[
>> UntrackedFieldsOperationLogObserverExtension(['loan.$ref'])
>> ]
"""
def __init__(self, fields=None):
"""Init."""
if isinstance(fields, str):
fields = [fields]
self.exclude_path = [f"root['{f}']" for f in fields or []]
def pre_commit(self, record):
"""Called before a record is committed."""
original_record = record.__class__.get_record_by_pid(record.pid)
diff = DeepDiff(
original_record, record,
verbose_level=2,
exclude_paths=self.exclude_path
)
if diff:
super().pre_commit(record)
class ResolveRefsExtension(RecordExtension):
"""Replace all $ref values by a dict of pid, type."""
mod_type = {
'documents': 'doc',
'items': 'item',
'holdings': 'hold',
'loans': 'loan',
'ill_requests': 'illr',
'patrons': 'ptrn',
'organisations': 'org',
'libraries': 'lib'
}
def pre_dump(self, record, dumper=None):
"""Called before a record is dumped.
:param record: the record metadata.
:param dumper: the record dumper.
"""
self._resolve_refs(record)
def _resolve_refs(self, record):
"""Recursively replace the $refs.
Replace in place all $ref to a dict of pid, type values.
:param record: the record metadata.
"""
for k, v in record.items():
if isinstance(v, dict):
if v.get('$ref'):
_type = self.mod_type.get(
extracted_data_from_ref(v, data='resource'))
if _type:
resolved = dict(
pid=extracted_data_from_ref(v),
type=_type
)
record[k] = resolved
else:
self._resolve_refs(v)
class IDExtension(RecordExtension):
"""Generate an unique ID if does not exists."""
def pre_create(self, record):
"""Called before a record is committed.
:param record: the record metadata.
"""
if not record.get('pid'):
record['pid'] = str(uuid.uuid1())
class DatesExtension(RecordExtension):
"""Set the created and updated date if needed."""
def pre_create(self, record):
"""Called before a record is committed.
:param record: the record metadata.
"""
iso_now = pytz.utc.localize(datetime.utcnow()).isoformat()
for date_field in ['_created', '_updated']:
if not record.get(date_field):
record[date_field] = iso_now |
299,601 | coot server | from __future__ import absolute_import, division, print_function
# This is an example of how a 3rd-party program with Python embedded, such
# as Coot or PyMOL, can be interfaced with CCTBX-based software. Something
# much like this is used for the Phenix GUI extensions to those programs.
# I haven't tried this with any other software, but anything with a reasonably
# recent version of Python and support for either persistent Python threads
# or some sort of timer callback should be able to use it.
DEFAULT_PORT = 40000
import os, sys, string, signal
import xmlrpclib
try :
from SimpleXMLRPCServer import SimpleXMLRPCServer
class external_xmlrpc_server(SimpleXMLRPCServer):
def __init__(self, addr, cctbx_interface):
self.cctbx_interface = cctbx_interface
SimpleXMLRPCServer.__init__(self, addr, logRequests=0)
def _dispatch(self, method, params):
if not self.cctbx_interface.enable_xmlrpc :
return -1
result = -1
func = getattr(self.cctbx_interface, method, None)
if not callable(func):
print("%s is not a callable object!" % method)
else :
result = func(*params)
if result is None :
result = -1
return result
class external_xmlrpc_interface(object):
def __init__(self, program_id, auto_start=True, verbose=False):
self.enable_xmlrpc = True
self.xmlrpc_server = None
self.cctbx_server = None
self.verbose = verbose
self.timeout = string.atoi(os.environ.get("CCTBX_XMLRPC_TIMEOUT", "250"))
self.program_id = program_id
self.supported_modules = []
self.setup_modules()
self.setup_server()
if auto_start :
self.start_server()
def setup_modules(self):
pass
def add_module(self, module_object=None, module_path=None):
if module_object is not None :
self.supported_modules.append(module_object)
elif module_path is not None :
module_object = __import__(module_path)
self.supported_modules.append(module_object)
def setup_server(self):
port = os.environ.get("CCTBX_%s_PORT" % self.program_id, DEFAULT_PORT)
if port is not None :
self.port = int(port)
self.xmlrpc_server = external_xmlrpc_server(("127.0.0.1", self.port),
self)
if self.verbose :
print("Listening on port %s" % port)
cctbx_port = os.environ.get("CCTBX_XMLRPC_PORT", None)
if cctbx_port is not None :
uri = "http://localhost:%s/RPC2" % cctbx_port
self.cctbx_server = xmlrpclib.ServerProxy(uri=uri)
if self.verbose :
print("Connecting to XML-RPC server on port %s" % cctbx_port)
def start_server(self):
if self.xmlrpc_server is not None :
print("XML-RPC server started on port %d" % self.port)
self.xmlrpc_server.serve_forever()
def start_server_in_separate_thread(self):
import threading
t = threading.Thread(target=self.start_server)
t.setDaemon(1)
t.start()
def set_socket_timeout(self, timeout):
if self.xmlrpc_server is not None :
self.xmlrpc_server.socket.settimeout(timeout)
def timeout_func(self, *args):
if self.xmlrpc_server is not None :
self.xmlrpc_server.handle_request()
return True
def is_alive(self):
return True
# XXX: this should be replaced by the proper quit function for the program
# being extended - e.g. cmd.quit() in PyMOL.
def quit(self):
print("quitting")
sys.stdout.flush()
os.kill(os.getpid(), signal.SIGKILL)
def __getattr__(self, name):
for module_object in self.supported_modules :
if hasattr(module_object, name):
return getattr(module_object, name)
return None
except KeyboardInterrupt :
raise
except ImportError :
def external_xmlrpc_server(*args, **kwds):
raise Exception("SimpleXMLRPCServer not available on this platform.")
def external_cctbx_interface(*args, **kwds):
raise Exception("SimpleXMLRPCServer not available on this platform.")
def test_server():
class test_module(object):
def echo_test(self):
print("hello, world!")
sys.stdout.flush()
return True
# os.environ["CCTBX_TEST_PORT"] = "48000"
test_server = external_xmlrpc_interface("TEST", auto_start=False,
verbose=False)
module_object = test_module()
test_server.add_module(module_object)
test_server.start_server()
def METHOD_NAME():
server = external_xmlrpc_interface("COOT",
auto_start=False,
verbose=True)
server.set_socket_timeout(0.01)
import coot
import gobject
server.add_module(coot)
gobject.timeout_add(200, server.timeout_func)
if __name__ == "__main__" :
#test_server()
METHOD_NAME()
#---end |
299,602 | test given endpoint equal to start point | import pytest
from presidio_anonymizer.entities import InvalidParamException, RecognizerResult
@pytest.mark.parametrize(
# fmt: off
"start, end",
[
(0, 10),
(2, 8),
(0, 8),
(0, 10),
],
# fmt: on
)
def test_given_recognizer_results_then_one_contains_another(start, end):
first = create_recognizer_result("entity", 0, 0, 10)
second = create_recognizer_result("entity", 0, start, end)
assert first.contains(second)
@pytest.mark.parametrize(
# fmt: off
"start, end",
[
(4, 10),
(5, 11),
(0, 5),
(0, 6),
],
# fmt: on
)
def test_given_recognizer_result_then_they_do_not_contain_one_another(start, end):
first = create_recognizer_result("entity", 0, 5, 10)
second = create_recognizer_result("entity", 0, start, end)
assert not first.contains(second)
def test_given_recognizer_results_with_same_indices_then_indices_are_equal():
first = create_recognizer_result("entity", 0, 0, 10)
second = create_recognizer_result("entity", 0, 0, 10)
assert first.equal_indices(second)
@pytest.mark.parametrize(
# fmt: off
"start, end",
[
(4, 10),
(5, 11),
(0, 5),
(0, 6),
],
# fmt: on
)
def test_given_recognizer_results_with_different_indices_then_indices_are_not_equal(
start, end
):
first = create_recognizer_result("entity", 0, 5, 10)
second = create_recognizer_result("entity", 0, start, end)
assert not first.equal_indices(second)
@pytest.mark.parametrize(
# fmt: off
"start, end, err",
[
("0", 10,
"Invalid parameter value for start. Expecting 'number', but got 'string'."),
(0, "10",
"Invalid parameter value for end. Expecting 'number', but got 'string'."),
],
# fmt: on
)
def test_given_invalid_string_start_instead_of_int_then_we_fail(start, end, err):
with pytest.raises(InvalidParamException, match=err):
create_recognizer_result("bla", 0.2, start, end)
def test_given_identical_recognizer_results_then_they_are_equal():
first = create_recognizer_result("bla", 0.2, 0, 10)
second = create_recognizer_result("bla", 0.2, 0, 10)
assert first == second
@pytest.mark.parametrize(
# fmt: off
"entity_type, score, start, end",
[
("bla", 0.2, 4, 10),
("changed", 0.2, 0, 10),
("bla", 0.2, 0, 11),
("bla", 0.3, 0, 10),
],
# fmt: on
)
def test_given_different_recognizer_result_then_they_are_not_equal(
entity_type, score, start, end
):
first = create_recognizer_result("bla", 0.2, 0, 10)
second = create_recognizer_result(entity_type, score, start, end)
assert first != second
def test_given_recognizer_result_then_their_hash_is_equal():
first = create_recognizer_result("entity", 0, 0, 10)
second = create_recognizer_result("entity", 0, 0, 10)
assert first.__hash__() == second.__hash__()
@pytest.mark.parametrize(
# fmt: off
"entity_type, score, start, end",
[
("bla", 0.2, 4, 10),
("changed", 0.2, 0, 10),
("bla", 0.2, 0, 11),
("bla", 0.3, 0, 10),
],
# fmt: on
)
def test_given_different_recognizer_results_then_hash_is_not_equal(
entity_type, score, start, end
):
first = create_recognizer_result("bla", 0.2, 0, 10)
second = create_recognizer_result(entity_type, score, start, end)
assert first.__hash__() != second.__hash__()
@pytest.mark.parametrize(
# fmt: off
"entity_type, score, start, end",
[
("bla", 0.2, 0, 10),
("changed", 0.2, 2, 10),
("bla", 0.3, 0, 11),
("bla", 0.1, 0, 10),
],
# fmt: on
)
def test_given_recognizer_results_with_conflicting_indices_then_there_is_a_conflict(
entity_type, score, start, end
):
first = create_recognizer_result("bla", 0.2, 2, 10)
second = create_recognizer_result(entity_type, score, start, end)
assert first.has_conflict(second)
@pytest.mark.parametrize(
# fmt: off
"entity_type, score, start, end",
[
("bla", 0.2, 3, 10),
("changed", 0.1, 2, 10),
("bla", 0.3, 0, 9),
],
# fmt: on
)
def test_given_recognizer_results_with_no_conflicting_indices_then_there_is_no_conflict(
entity_type, score, start, end
):
first = create_recognizer_result("bla", 0.2, 2, 10)
second = create_recognizer_result(entity_type, score, start, end)
assert not first.has_conflict(second)
@pytest.mark.parametrize(
# fmt: off
"request_json, result_text",
[
({}, "Invalid input, result must contain start",),
({
"end": 32,
"score": 0.8,
"entity_type": "NUMBER"
}, "Invalid input, result must contain start",),
({
"start": 28,
"score": 0.8,
"entity_type": "NUMBER"
}, "Invalid input, result must contain end",),
({
"start": 28,
"end": 32,
"entity_type": "NUMBER"
}, "Invalid input, analyzer result must contain score",),
({
"start": 28,
"end": 32,
"score": 0.8,
}, "Invalid input, result must contain entity_type",),
],
# fmt: on
)
def test_given_json_for_creating_recognizer_result_without_text_then_creation_fails(
request_json, result_text
):
with pytest.raises(InvalidParamException) as e:
RecognizerResult.from_json(request_json)
assert result_text == e.value.err_msg
def test_given_valid_json_for_creating_recognizer_result_then_creation_is_successful():
data = create_recognizer_result("NUMBER", 0.8, 0, 32)
assert data.start == 0
assert data.end == 32
assert data.score == 0.8
assert data.entity_type == "NUMBER"
@pytest.mark.parametrize(
# fmt: off
"start, end",
[
(4, 10),
(4, 9),
(0, 2),
(5, 9),
],
# fmt: on
)
def test_given_recognizer_results_then_one_is_greater_then_another(start, end):
first = create_recognizer_result("entity", 0, 5, 10)
second = create_recognizer_result("entity", 0, start, end)
assert first.__gt__(second)
@pytest.mark.parametrize(
# fmt: off
"start, end",
[
(5, 10),
(6, 12),
(6, 7),
],
# fmt: on
)
def test_given_recognizer_result_then_one_is_not_greater_then_another(start, end):
first = create_recognizer_result("entity", 0, 5, 10)
second = create_recognizer_result("entity", 0, start, end)
assert not first.__gt__(second)
def test_given_endpoint_larger_then_start_point_then_we_fail():
with pytest.raises(InvalidParamException) as e:
create_recognizer_result("entity", 0, 10, 0)
assert (
e.value.err_msg == "Invalid input, start index '10' "
"must be smaller than end index '0'"
)
def METHOD_NAME():
assert create_recognizer_result("entity", 0, 0, 0)
@pytest.mark.parametrize(
# fmt: off
"start, end",
[
(-1, 10),
(6, -12),
(-2, -2),
],
# fmt: on
)
def test_given_negative_start_or_endpoint_then_we_fail(start, end):
with pytest.raises(
InvalidParamException,
match="Invalid input, result start and end must be positive",
):
create_recognizer_result("entity", 0, start, end)
def create_recognizer_result(entity_type: str, score: float, start: int, end: int):
data = {"entity_type": entity_type, "score": score, "start": start, "end": end}
return RecognizerResult.from_json(data) |
299,603 | is reachable | #!/usr/bin/env python3
import json
import os
import subprocess
import time
import unittest
import socket
import requests
from osv import client
class HttpError(Exception):
def __init__(self, code):
self.code = code
class Basetest(unittest.TestCase):
@classmethod
def set_config(cls, parser):
cls.config = parser.parse_args()
if cls.config.hypervisor == 'firecracker':
module_base = os.path.join(os.path.realpath(os.path.dirname(__file__)), '..')
cls.config.run_script = os.path.join(module_base, "..", "..", "scripts", "firecracker.py")
cls.config.host = '172.16.0.2'
cls._client = client.Client(cls.config)
@classmethod
def get_url(cls, uri):
return cls._client.get_url() + uri
@classmethod
def get_json_api(cls, name):
return cls.get_json_api_from_directory(cls.config.jsondir,name)
@classmethod
def get_json_api_from_directory(cls, directory, name):
json_data = open(os.path.join(directory, name))
data = json.load(json_data)
json_data.close()
return data
def assert_between(self, msg, low, high, val):
self.assertGreaterEqual(val, low, msg=msg)
self.assertLessEqual(val, high, msg=msg)
def assert_key_in(self, key, dic):
self.assertTrue(key in dic, key + " not found in dictionary " + json.dumps(dic))
@classmethod
def get_api(cls, api_definition, nickname):
for api in api_definition["apis"]:
if api["operations"][0]["nickname"] == nickname:
return api
return None
@classmethod
def path_by_nick(cls, api_definition, nickname):
api = cls.get_api(api_definition, nickname)
return api["path"]
@classmethod
def is_jvm_up(cls):
try:
return bool(cls.curl(cls.path_by_nick(cls.jvm_api, "getJavaVersion")))
except HttpError:
return False
@classmethod
def METHOD_NAME(cls):
s = socket.socket()
try:
s.connect((cls._client.get_host(), cls._client.get_port()))
s.close()
if cls.config.check_jvm:
return cls.is_jvm_up()
else:
return True
except socket.error as e:
print(e)
return False
def validate_path(self, api_definition, nickname, value):
path = self.path_by_nick(api_definition, nickname)
self.assertEqual(value, self.curl(path))
def validate_path_regex(self, api_definition, nickname, expr):
path = self.path_by_nick(api_definition, nickname)
self.assertRegex(self.curl(path), expr)
def assertHttpError(self, url, code=404, method='GET', data=None):
try:
self.curl(url, method, data)
except HttpError as e:
if e.code != code:
raise Exception('Expected error code %d but got %d' % (code, e.code))
else:
raise Exception('Expected failure but request succeeded')
@classmethod
def curl(cls, api, method='GET', data=None, timeout=None):
url = cls.get_url(api)
r = {
'GET': requests.get,
'POST': requests.post,
'DELETE': requests.delete,
'PUT': requests.put,
}[method](url, data=data, timeout=timeout, **cls._client.get_request_kwargs())
if r.status_code != 200:
raise HttpError(r.status_code)
if r.text:
return r.json()
@classmethod
def get_client_cert_path(cls):
return cls._client.get_client_cert_path()
@classmethod
def get_client_key_path(cls):
return cls._client.get_client_key_path()
@classmethod
def get_ca_cert_path(cls):
return cls._client.get_cacert_path()
@classmethod
def exec_os(cls):
args = []
if cls.config.hypervisor == 'firecracker':
args += [cls.config.run_script, "-m 2048M", "-n", "-c 4"]
if cls.config.kernel_path:
print('Using kernel at %s' % cls.config.kernel_path)
args += ['-k', cls.config.kernel_path]
elif cls.config.use_sudo:
args += ["/usr/bin/sudo", cls.config.run_script, "-n"]
else:
args += [cls.config.run_script, "--forward", "tcp::" + str(cls._client.get_port()) + "-:" + str(cls._client.get_port())]
if cls.config.kernel_path and cls.config.hypervisor != 'firecracker':
print('Using kernel at %s' % cls.config.kernel_path)
args += ['-k', '--kernel-path', cls.config.kernel_path]
if cls.config.cmd:
args += ["-e", cls.config.cmd]
if cls.config.test_image:
args += ["-i", cls.config.test_image]
return subprocess.Popen(args)
@classmethod
def shutdown(cls):
if cls.config.connect:
return
path = cls.path_by_nick(cls.os_api, "os_poweroff")
try:
cls.curl(path, method='POST', timeout=0.5)
except:
pass
retry = 10
while cls.os_process.poll() == None:
retry -= 1
if retry == 0:
raise Exception("Fail to shutdown server")
time.sleep(1)
@classmethod
def hard_shutdown(cls):
child_pid = subprocess.call(['pgrep', "-P", str(cls.os_process.pid)])
subprocess.call(['kill', '-9', str(child_pid)])
cls.os_process.kill()
cls.os_process.wait()
@classmethod
def start_image(cls):
if cls.config.check_jvm:
jvm_plugin_api_listings_path = \
os.path.join(os.path.realpath(os.path.dirname(__file__)),'../../httpserver-jvm-plugin/api-doc/listings')
cls.jvm_api = cls.get_json_api_from_directory(jvm_plugin_api_listings_path,"jvm.json")
cls.os_api = cls.get_json_api("os.json")
if not cls.config.connect:
cls.os_process = cls.exec_os()
retry = 10
while not cls.METHOD_NAME():
time.sleep(1)
retry -= 1
if retry == 0:
cls.shutdown()
raise Exception("Server is down") |
299,604 | gradient | """
A demo for multi-output regression
==================================
The demo is adopted from scikit-learn:
https://scikit-learn.org/stable/auto_examples/ensemble/plot_random_forest_regression_multioutput.html#sphx-glr-auto-examples-ensemble-plot-random-forest-regression-multioutput-py
See :doc:`/tutorials/multioutput` for more information.
.. note::
The feature is experimental. For the `multi_output_tree` strategy, many features are
missing.
"""
import argparse
from typing import Dict, List, Tuple
import numpy as np
from matplotlib import pyplot as plt
import xgboost as xgb
def plot_predt(y: np.ndarray, y_predt: np.ndarray, name: str) -> None:
s = 25
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, edgecolor="black", label="data")
plt.scatter(
y_predt[:, 0], y_predt[:, 1], c="cornflowerblue", s=s, edgecolor="black"
)
plt.xlim([-1, 2])
plt.ylim([-1, 2])
plt.show()
def gen_circle() -> Tuple[np.ndarray, np.ndarray]:
"Generate a sample dataset that y is a 2 dim circle."
rng = np.random.RandomState(1994)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += 0.5 - rng.rand(20, 2)
y = y - y.min()
y = y / y.max()
return X, y
def rmse_model(plot_result: bool, strategy: str) -> None:
"""Draw a circle with 2-dim coordinate as target variables."""
X, y = gen_circle()
# Train a regressor on it
reg = xgb.XGBRegressor(
tree_method="hist",
n_estimators=128,
n_jobs=16,
max_depth=8,
multi_strategy=strategy,
subsample=0.6,
)
reg.fit(X, y, eval_set=[(X, y)])
y_predt = reg.predict(X)
if plot_result:
plot_predt(y, y_predt, "multi")
def custom_rmse_model(plot_result: bool, strategy: str) -> None:
"""Train using Python implementation of Squared Error."""
def METHOD_NAME(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
"""Compute the gradient squared error."""
y = dtrain.get_label().reshape(predt.shape)
return predt - y
def hessian(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
"""Compute the hessian for squared error."""
return np.ones(predt.shape)
def squared_log(
predt: np.ndarray, dtrain: xgb.DMatrix
) -> Tuple[np.ndarray, np.ndarray]:
grad = METHOD_NAME(predt, dtrain)
hess = hessian(predt, dtrain)
# both numpy.ndarray and cupy.ndarray works.
return grad, hess
def rmse(predt: np.ndarray, dtrain: xgb.DMatrix) -> Tuple[str, float]:
y = dtrain.get_label().reshape(predt.shape)
v = np.sqrt(np.sum(np.power(y - predt, 2)))
return "PyRMSE", v
X, y = gen_circle()
Xy = xgb.DMatrix(X, y)
results: Dict[str, Dict[str, List[float]]] = {}
# Make sure the `num_target` is passed to XGBoost when custom objective is used.
# When builtin objective is used, XGBoost can figure out the number of targets
# automatically.
booster = xgb.train(
{
"tree_method": "hist",
"num_target": y.shape[1],
"multi_strategy": strategy,
},
dtrain=Xy,
num_boost_round=128,
obj=squared_log,
evals=[(Xy, "Train")],
evals_result=results,
custom_metric=rmse,
)
y_predt = booster.inplace_predict(X)
if plot_result:
plot_predt(y, y_predt, "multi")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--plot", choices=[0, 1], type=int, default=1)
args = parser.parse_args()
# Train with builtin RMSE objective
# - One model per output.
rmse_model(args.plot == 1, "one_output_per_tree")
# - One model for all outputs, this is still working in progress, many features are
# missing.
rmse_model(args.plot == 1, "multi_output_tree")
# Train with custom objective.
# - One model per output.
custom_rmse_model(args.plot == 1, "one_output_per_tree")
# - One model for all outputs, this is still working in progress, many features are
# missing.
custom_rmse_model(args.plot == 1, "multi_output_tree") |
299,605 | get serial | #!/usr/bin/env python
########################################################################
# DELLEMC S5224F
#
# Module contains an implementation of SONiC Platform Base API and
# provides the Components' (e.g., BIOS, CPLD, FPGA, BMC etc.) available in
# the platform
#
########################################################################
try:
import subprocess
from sonic_platform_base.component_base import ComponentBase
import sonic_platform.hwaccess as hwaccess
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
def get_bios_version():
return subprocess.check_output(['dmidecode', '-s',
'system-version']).decode('utf-8').strip()
def get_fpga_version():
val = hwaccess.pci_get_value('/sys/bus/pci/devices/0000:03:00.0/resource0', 0)
return '{}.{}'.format((val >> 8) & 0xff, val & 0xff)
def get_bmc_version():
return subprocess.check_output(
['cat', '/sys/class/ipmi/ipmi0/device/bmc/firmware_revision']
).decode('utf-8').strip()
def get_cpld_version(bus, i2caddr):
return '{}.{}'.format(hwaccess.i2c_get(bus, i2caddr, 1),
hwaccess.i2c_get(bus, i2caddr, 0)
)
def get_cpld0_version():
return get_cpld_version(601, 0x31)
def get_cpld1_version():
return get_cpld_version(600, 0x30)
class Component(ComponentBase):
"""DellEMC Platform-specific Component class"""
CHASSIS_COMPONENTS = [
['BIOS',
'Performs initialization of hardware components during booting',
get_bios_version
],
['FPGA',
'Used for managing the system LEDs',
get_fpga_version
],
['BMC',
'Platform management controller for on-board temperature monitoring, in-chassis power, Fan and LED control',
get_bmc_version
],
['System CPLD',
'Used for managing the CPU power sequence and CPU states',
get_cpld0_version
],
['Slave CPLD 1',
'Used for managing SFP28/QSFP28 port transceivers (SFP28 1-24, QSFP28 1-4)',
get_cpld1_version
]
]
def __init__(self, component_index = 0):
self.index = component_index
self.name = self.CHASSIS_COMPONENTS[self.index][0]
self.description = self.CHASSIS_COMPONENTS[self.index][1]
self.version = None
def get_name(self):
"""
Retrieves the name of the component
Returns:
A string containing the name of the component
"""
return self.name
def get_description(self):
"""
Retrieves the description of the component
Returns:
A string containing the description of the component
"""
return self.description
def get_firmware_version(self):
"""
Retrieves the firmware version of the component
Returns:
A string containing the firmware version of the component
"""
if self.version == None:
self.version = self.CHASSIS_COMPONENTS[self.index][2]()
return self.version
def install_firmware(self, image_path):
"""
Installs firmware to the component
Args:
image_path: A string, path to firmware image
Returns:
A boolean, True if install was successful, False if not
"""
return False
def get_presence(self):
"""
Retrieves the presence of the component
Returns:
bool: True if present, False if not
"""
return True
def get_model(self):
"""
Retrieves the part number of the component
Returns:
string: Part number of component
"""
return 'NA'
def METHOD_NAME(self):
"""
Retrieves the serial number of the component
Returns:
string: Serial number of component
"""
return 'NA'
def get_status(self):
"""
Retrieves the operational status of the component
Returns:
bool: True if component is operating properly, False if not
"""
return True
def get_position_in_parent(self):
"""
Retrieves 1-based relative physical position in parent device.
Returns:
integer: The 1-based relative physical position in parent
device or -1 if cannot determine the position
"""
return -1
def is_replaceable(self):
"""
Indicate whether component is replaceable.
Returns:
bool: True if it is replaceable.
"""
return False
def get_available_firmware_version(self, image_path):
"""
Retrieves the available firmware version of the component
Note: the firmware version will be read from image
Args:
image_path: A string, path to firmware image
Returns:
A string containing the available firmware version of the component
"""
return "N/A"
def get_firmware_update_notification(self, image_path):
"""
Retrieves a notification on what should be done in order to complete
the component firmware update
Args:
image_path: A string, path to firmware image
Returns:
A string containing the component firmware update notification if required.
By default 'None' value will be used, which indicates that no actions are required
"""
return "None"
def update_firmware(self, image_path):
"""
Updates firmware of the component
This API performs firmware update: it assumes firmware installation and loading in a single call.
In case platform component requires some extra steps (apart from calling Low Level Utility)
to load the installed firmware (e.g, reboot, power cycle, etc.) - this will be done automatically by API
Args:
image_path: A string, path to firmware image
Raises:
RuntimeError: update failed
"""
return False |
299,606 | test error primary and secondary variables | """
Module containing functional tests for Mandel's problem.
We consider two functional tests:
- The first test compares desired and actual relative errors for the pressure,
displacement, flux, poroelastic force, and degrees of consolidation, for three
different times, namely 10, 30, and 50 seconds.
- The second test checks that the same errors for pressure and displacement (up to
5 decimals) are obtained for scaled and unscaled problems. Scaling is
performed for length and mass.
"""
from __future__ import annotations
import sys
from collections import namedtuple
import numpy as np
import pytest
import porepy as pp
# Append the top PorePy folder to the path, to allow for imports of the examples folder
sys.path.append("../..")
from examples.mandel_biot import (
MandelSaveData,
MandelSetup,
mandel_fluid_constants,
mandel_solid_constants,
)
@pytest.fixture(scope="module")
def results() -> list[MandelSaveData]:
# Run verification setup and retrieve results for three different times
material_constants = {
"fluid": pp.FluidConstants(mandel_fluid_constants),
"solid": pp.SolidConstants(mandel_solid_constants),
}
time_manager = pp.TimeManager([0, 10, 30, 50], 10, True)
params = {
"material_constants": material_constants,
"time_manager": time_manager,
}
setup = MandelSetup(params)
pp.run_time_dependent_model(setup, params)
return setup.results
# Desired errors
DesiredError = namedtuple(
"DesiredError",
"error_pressure, "
"error_flux, "
"error_displacement, "
"error_force, "
"error_consolidation_degree",
)
desired_errors: list[DesiredError] = [
# t = 10 [s]
DesiredError(
error_pressure=0.020915626239924504,
error_flux=0.4673715742577367,
error_displacement=0.00036006562251599753,
error_force=0.006174477658138146,
error_consolidation_degree=(0.004112823366753098, 2.983724378680108e-16),
),
# t = 30 [s]
DesiredError(
error_pressure=0.01002508912855953,
error_flux=0.11759895811865957,
error_displacement=0.0003363008805572937,
error_force=0.003024676802218547,
error_consolidation_degree=(0.002324983805104465, 2.983724378680108e-16),
),
# t = 50 [s]
DesiredError(
error_pressure=0.007059420526689479,
error_flux=0.06788475278093752,
error_displacement=0.0003094287000315224,
error_force=0.002150211938046511,
error_consolidation_degree=(0.0018043500295539042, 2.983724378680108e-16),
),
]
@pytest.mark.parametrize("time_index", [0, 1, 2])
def METHOD_NAME(time_index: int, results):
"""Checks error for pressure, displacement, flux, force, and consolidation degree.
Physical parameters used in this test have been adapted from [1].
Tests should pass as long as a `desired_error` matches the `actual_error` up to
absolute and relative tolerances. In this particular case, we are comparing pressure
errors, flux errors, displacement errors, poroelastic force errors (measured
using the L2-discrete relative error given in [2]) and degrees of consolidation
errors in the horizontal and vertical directions (measured in absolute terms). The
`desired_error` was obtained by copying the simulation results at the time this
test was written.
For this test, we require an absolute tolerance of 1e-5 and a relative tolerance of
1e-3 between the `desired_error` and the `actual_error`. The choice of such
tolerances aim at keeping the test meaningful while minimizing the chances of
failure due to floating-point arithmetic close to machine precision.
References:
- [1] Mikelić, A., Wang, B., & Wheeler, M. F. (2014). Numerical convergence
study of iterative coupling for coupled flow and geomechanics.
Computational Geosciences, 18(3), 325-341.
- [2] Nordbotten, J. M. (2016). Stable cell-centered finite volume
discretization for Biot equations. SIAM Journal on Numerical Analysis,
54(2), 942-968.
"""
actual = results[time_index]
desired = desired_errors[time_index]
np.testing.assert_allclose(
actual.error_pressure, desired.error_pressure, atol=1e-5, rtol=1e-3
)
np.testing.assert_allclose(
actual.error_flux, desired.error_flux, atol=1e-5, rtol=1e-3
)
np.testing.assert_allclose(
actual.error_displacement, desired.error_displacement, atol=1e-5, rtol=1e-3
)
np.testing.assert_allclose(
actual.error_force, desired.error_force, atol=1e-5, rtol=1e-3
)
np.testing.assert_allclose(
actual.error_consolidation_degree[0],
desired.error_consolidation_degree[0],
atol=1e-5,
rtol=1e-3,
)
np.testing.assert_allclose(
actual.error_consolidation_degree[1],
desired.error_consolidation_degree[1],
atol=1e-5,
rtol=1e-3,
)
def test_scaled_vs_unscaled_systems():
"""Checks that the same results are obtained for scaled and unscaled systems."""
# The unscaled problem
material_constants_unscaled = {
"fluid": pp.FluidConstants(mandel_fluid_constants),
"solid": pp.SolidConstants(mandel_solid_constants),
}
time_manager_unscaled = pp.TimeManager([0, 10], 10, True)
params_unscaled = {
"material_constants": material_constants_unscaled,
"time_manager": time_manager_unscaled,
}
unscaled = MandelSetup(params=params_unscaled)
pp.run_time_dependent_model(model=unscaled, params=params_unscaled)
# The scaled problem
material_constants_scaled = {
"fluid": pp.FluidConstants(mandel_fluid_constants),
"solid": pp.SolidConstants(mandel_solid_constants),
}
time_manager_scaled = pp.TimeManager([0, 10], 10, True)
scaling = {"m": 1e-3, "kg": 1e-3} # length in millimeters and mass in grams
units = pp.Units(**scaling)
params_scaled = {
"material_constants": material_constants_scaled,
"time_manager": time_manager_scaled,
"units": units,
}
scaled = MandelSetup(params=params_scaled)
pp.run_time_dependent_model(model=scaled, params=params_scaled)
# Compare results
np.testing.assert_almost_equal(
unscaled.results[-1].error_pressure,
scaled.results[-1].error_pressure,
decimal=5,
)
np.testing.assert_almost_equal(
unscaled.results[-1].error_displacement,
scaled.results[-1].error_displacement,
decimal=5,
) |
299,607 | delete stream | # -*- coding: utf-8 -*-
'''
Copyright © 2015, Robin David - MIT-Licensed
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
The Software is provided "as is", without warranty of any kind, express or implied, including but not limited
to the warranties of merchantability, fitness for a particular purpose and noninfringement. In no event shall
the authors or copyright holders X be liable for any claim, damages or other liability, whether in an action
of contract, tort or otherwise, arising from, out of or in connection with the software or the use or other
dealings in the Software.
Except as contained in this notice, the name of the Robin David shall not be used in advertising or otherwise
to promote the sale, use or other dealings in this Software without prior written authorization from the Robin David.
'''
from ctypes import *
import sys, os
kernel32 = windll.kernel32
LPSTR = c_wchar_p
DWORD = c_ulong
LONG = c_ulong
WCHAR = c_wchar * 296
LONGLONG = c_longlong
class LARGE_INTEGER_UNION(Structure):
_fields_ = [
("LowPart", DWORD),
("HighPart", LONG),]
class LARGE_INTEGER(Union):
_fields_ = [
("large1", LARGE_INTEGER_UNION),
("large2", LARGE_INTEGER_UNION),
("QuadPart", LONGLONG),
]
class WIN32_FIND_STREAM_DATA(Structure):
_fields_ = [
("StreamSize", LARGE_INTEGER),
("cStreamName", WCHAR),
]
'''
typedef struct _WIN32_FIND_STREAM_DATA {
LARGE_INTEGER StreamSize;
WCHAR cStreamName[MAX_PATH + 36];
} WIN32_FIND_STREAM_DATA, *PWIN32_FIND_STREAM_DATA;
'''
class ADS():
def __init__(self, filename):
self.filename = filename
self.streams = self.init_streams()
def init_streams(self):
file_infos = WIN32_FIND_STREAM_DATA()
streamlist = list()
myhandler = kernel32.FindFirstStreamW (LPSTR(self.filename), 0, byref(file_infos), 0)
'''
HANDLE WINAPI FindFirstStreamW(
__in LPCWSTR lpFileName,
__in STREAM_INFO_LEVELS InfoLevel, (0 standard, 1 max infos)
__out LPVOID lpFindStreamData, (return information about file in a WIN32_FIND_STREAM_DATA if 0 is given in infos_level
__reserved DWORD dwFlags (Reserved for future use. This parameter must be zero.) cf: doc
);
https://msdn.microsoft.com/en-us/library/aa364424(v=vs.85).aspx
'''
if file_infos.cStreamName:
streamname = file_infos.cStreamName.split(":")[1]
if streamname: streamlist.append(streamname)
while kernel32.FindNextStreamW(myhandler, byref(file_infos)):
streamlist.append(file_infos.cStreamName.split(":")[1])
kernel32.FindClose(myhandler) #Close the handle
return streamlist
def __iter__(self):
return iter(self.streams)
def has_streams(self):
return len(self.streams) > 0
def full_filename(self, stream):
return "%s:%s" % (self.filename, stream)
def add_stream_from_file(self, filename):
if os.path.exists(filename):
with open(filename, "rb") as f:
content = f.read()
return self.add_stream_from_string(filename, content)
else:
print("Could not find file: {0}".format(filename))
return False
def add_stream_from_string(self, stream_name, string):
fullname = self.full_filename(os.path.basename(stream_name))
if os.path.exists(fullname):
print("Stream name already exists")
return False
else:
fd = open(fullname, "wb")
fd.write(string)
fd.close()
self.streams.append(stream_name)
return True
def METHOD_NAME(self, stream):
try:
os.remove(self.full_filename(stream))
self.streams.remove(stream)
return True
except:
return False
def get_stream_content(self, stream):
fd = open(self.full_filename(stream), "rb")
content = fd.read()
fd.close()
return content |
299,608 | get example tree | #!/usr/bin/env python
#
# Trivial data browser
# This version:
# Copyright (C) 2010 Rob Lanphier
# Derived from browse.py in urwid distribution
# Copyright (C) 2004-2007 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: https://urwid.org/
"""
Urwid example lazy directory browser / tree view
Features:
- custom selectable widgets for files and directories
- custom message widgets to identify access errors and empty directories
- custom list walker for displaying widgets in a tree fashion
"""
from __future__ import annotations
import urwid
class ExampleTreeWidget(urwid.TreeWidget):
""" Display widget for leaf nodes """
def get_display_text(self):
return self.get_node().get_value()['name']
class ExampleNode(urwid.TreeNode):
""" Data storage object for leaf nodes """
def load_widget(self):
return ExampleTreeWidget(self)
class ExampleParentNode(urwid.ParentNode):
""" Data storage object for interior/parent nodes """
def load_widget(self):
return ExampleTreeWidget(self)
def load_child_keys(self):
data = self.get_value()
return range(len(data['children']))
def load_child_node(self, key):
"""Return either an ExampleNode or ExampleParentNode"""
childdata = self.get_value()['children'][key]
childdepth = self.get_depth() + 1
if 'children' in childdata:
childclass = ExampleParentNode
else:
childclass = ExampleNode
return childclass(childdata, parent=self, key=key, depth=childdepth)
class ExampleTreeBrowser:
palette = [
('body', 'black', 'light gray'),
('focus', 'light gray', 'dark blue', 'standout'),
('head', 'yellow', 'black', 'standout'),
('foot', 'light gray', 'black'),
('key', 'light cyan', 'black','underline'),
('title', 'white', 'black', 'bold'),
('flag', 'dark gray', 'light gray'),
('error', 'dark red', 'light gray'),
]
footer_text = [
('title', "Example Data Browser"), " ",
('key', "UP"), ",", ('key', "DOWN"), ",",
('key', "PAGE UP"), ",", ('key', "PAGE DOWN"),
" ",
('key', "+"), ",",
('key', "-"), " ",
('key', "LEFT"), " ",
('key', "HOME"), " ",
('key', "END"), " ",
('key', "Q"),
]
def __init__(self, data=None):
self.topnode = ExampleParentNode(data)
self.listbox = urwid.TreeListBox(urwid.TreeWalker(self.topnode))
self.listbox.offset_rows = 1
self.header = urwid.Text( "" )
self.footer = urwid.AttrWrap( urwid.Text( self.footer_text ),
'foot')
self.view = urwid.Frame(
urwid.AttrWrap( self.listbox, 'body' ),
header=urwid.AttrWrap(self.header, 'head' ),
footer=self.footer )
def main(self):
"""Run the program."""
self.loop = urwid.MainLoop(self.view, self.palette,
unhandled_input=self.unhandled_input)
self.loop.run()
def unhandled_input(self, k):
if k in ('q','Q'):
raise urwid.ExitMainLoop()
def METHOD_NAME():
""" generate a quick 100 leaf tree for demo purposes """
retval = {"name":"parent","children":[]}
for i in range(10):
retval['children'].append({"name":f"child {str(i)}"})
retval['children'][i]['children']=[]
for j in range(10):
retval['children'][i]['children'].append({"name":"grandchild " +
str(i) + "." + str(j)})
return retval
def main():
sample = METHOD_NAME()
ExampleTreeBrowser(sample).main()
if __name__=="__main__":
main()
|
299,609 | roi align nchw python | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-nested-blocks
"Roi align in python"
import math
import numpy as np
def _bilinear(a_np, n, c, y, x, height, width, layout):
if y < -1 or y > height or x < -1 or x > width:
return 0
y = min(max(y, 0), height - 1)
x = min(max(x, 0), width - 1)
y_low = int(math.floor(y))
x_low = int(math.floor(x))
y_high = y_low + 1
x_high = x_low + 1
wy_h = y - y_low
wx_h = x - x_low
wy_l = 1 - wy_h
wx_l = 1 - wx_h
val = 0
for wx, xp in zip((wx_l, wx_h), (x_low, x_high)):
for wy, yp in zip((wy_l, wy_h), (y_low, y_high)):
if 0 <= yp < height and 0 <= xp < width:
if layout == "NCHW":
val += wx * wy * a_np[n, c, yp, xp]
else:
val += wx * wy * a_np[n, yp, xp, c]
return val
def roi_align_common(
a_np,
b_np,
rois_np,
channel,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
avg_mode,
max_mode,
height,
width,
layout,
):
"""Common code used by roi align NCHW and NHWC"""
num_roi = rois_np.shape[0]
for i in range(num_roi):
roi = rois_np[i]
batch_index = int(roi[0])
roi_start_w, roi_start_h, roi_end_w, roi_end_h = roi[1:] * spatial_scale
roi_h = max(roi_end_h - roi_start_h, 1.0)
roi_w = max(roi_end_w - roi_start_w, 1.0)
bin_h = roi_h / pooled_size_h
bin_w = roi_w / pooled_size_w
if sample_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = int(sample_ratio)
else:
roi_bin_grid_h = int(math.ceil(roi_h / pooled_size_h))
roi_bin_grid_w = int(math.ceil(roi_w / pooled_size_w))
count = roi_bin_grid_h * roi_bin_grid_w
for c in range(channel):
for ph in range(pooled_size_h):
for pw in range(pooled_size_w):
if avg_mode:
total = 0.0
if max_mode:
total = float("-inf")
for iy in range(roi_bin_grid_h):
for ix in range(roi_bin_grid_w):
y = roi_start_h + ph * bin_h + (iy + 0.5) * bin_h / roi_bin_grid_h
x = roi_start_w + pw * bin_w + (ix + 0.5) * bin_w / roi_bin_grid_w
if avg_mode:
total += (
_bilinear(a_np, batch_index, c, y, x, height, width, layout)
/ count
)
if max_mode:
total = max(
total,
_bilinear(a_np, batch_index, c, y, x, height, width, layout),
)
if layout == "NCHW":
b_np[i, c, ph, pw] = total
else:
b_np[i, ph, pw, c] = total
return b_np
def METHOD_NAME(a_np, rois_np, pooled_size, spatial_scale, sample_ratio, mode=b"avg"):
"""Roi align NCHW in python"""
avg_mode = mode in (b"avg", "avg", 0)
max_mode = mode in (b"max", "max", 1)
assert avg_mode or max_mode, "Mode must be average or max. Please pass a valid mode."
_, channel, height, width = a_np.shape
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
b_np = np.zeros((rois_np.shape[0], channel, pooled_size_h, pooled_size_w), dtype=a_np.dtype)
return roi_align_common(
a_np,
b_np,
rois_np,
channel,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
avg_mode,
max_mode,
height,
width,
"NCHW",
)
def roi_align_nhwc_python(a_np, rois_np, pooled_size, spatial_scale, sample_ratio, mode=b"avg"):
"""Roi align NHWC in python"""
avg_mode = mode in (b"avg", "avg", 0)
max_mode = mode in (b"max", "max", 1)
assert avg_mode or max_mode, "Mode must be average or max. Please pass a valid mode."
_, height, width, channel = a_np.shape
num_roi = rois_np.shape[0]
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
b_np = np.zeros((num_roi, pooled_size_h, pooled_size_w, channel), dtype=a_np.dtype)
return roi_align_common(
a_np,
b_np,
rois_np,
channel,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
avg_mode,
max_mode,
height,
width,
"NHWC",
) |
299,610 | get gravityspy triggers | # -*- coding: utf-8 -*-
# Copyright (C) Scott Coughlin (2017-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Input/output methods for tabular data.
Access to GravitySpy and O1GlitchClassification triggers requires access
to a PostgresSQL database. Users can set the username and password for
connections in the following environment variables
- ``GRAVITYSPY_DATABASE_USER``
- ``GRAVITYSPY_DATABASE_PASSWORD``
These can be found https://secrets.ligo.org/secrets/144/. The description
is the username and thesecret is the password.
"""
import os
from .sql import fetch
from .fetch import register_fetcher
from .. import GravitySpyTable
from .. import EventTable
__author__ = 'Scott Coughlin <scott.coughlin@ligo.org>'
def METHOD_NAME(tablename, engine=None, **kwargs):
"""Fetch data into an `GravitySpyTable`
Parameters
----------
table : `str`,
The name of table you are attempting to receive triggers
from.
selection
other filters you would like to supply
underlying reader method for the given format
.. note::
For now it will attempt to automatically connect you
to a specific DB. In the future, this may be an input
argument.
Returns
-------
table : `GravitySpyTable`
"""
from sqlalchemy.engine import create_engine
from sqlalchemy.exc import ProgrammingError
# connect if needed
if engine is None:
conn_kw = {}
for key in ('db', 'host', 'user', 'passwd'):
try:
conn_kw[key] = kwargs.pop(key)
except KeyError:
pass
engine = create_engine(get_connection_str(**conn_kw))
try:
return GravitySpyTable(fetch(engine, tablename, **kwargs))
except ProgrammingError as exc:
if 'relation "%s" does not exist' % tablename in str(exc):
msg = exc.args[0]
msg = msg.replace(
'does not exist',
'does not exist, the following tablenames are '
'acceptable:\n %s\n' % '\n '.join(engine.table_names()))
exc.args = (msg,)
raise
# -- utilities ----------------------------------------------------------------
def get_connection_str(db='gravityspy',
host='gravityspyplus.ciera.northwestern.edu',
user=None,
passwd=None):
"""Create string to pass to create_engine
Parameters
----------
db : `str`, default: ``gravityspy``
The name of the SQL database your connecting to.
host : `str`, default: ``gravityspyplus.ciera.northwestern.edu``
The name of the server the database you are connecting to
lives on.
user : `str`, default: `None`
Your username for authentication to this database.
passwd : `str`, default: `None`
Your password for authentication to this database.
.. note::
`user` and `passwd` should be given together, otherwise they will be
ignored and values will be resolved from the
``GRAVITYSPY_DATABASE_USER`` and ``GRAVITYSPY_DATABASE_PASSWD``
environment variables.
Returns
-------
conn_string : `str`
A SQLAlchemy engine compliant connection string
"""
if (not user) or (not passwd):
user = os.getenv('GRAVITYSPY_DATABASE_USER', None)
passwd = os.getenv('GRAVITYSPY_DATABASE_PASSWD', None)
if (not user) or (not passwd):
raise ValueError('Remember to either pass '
'or export GRAVITYSPY_DATABASE_USER '
'and export GRAVITYSPY_DATABASE_PASSWD in order '
'to access the Gravity Spy Data: '
'https://secrets.ligo.org/secrets/144/'
' description is username and secret is password.')
return 'postgresql://{0}:{1}@{2}:5432/{3}'.format(user, passwd, host, db)
register_fetcher('gravityspy', EventTable, METHOD_NAME,
usage="tablename") |
299,611 | open ssb | """Manages the integration of SkyTemple Script Debugger (skytemple-ssb-debugger)."""
# Copyright 2020-2023 Capypara and the SkyTemple Contributors
#
# This file is part of SkyTemple.
#
# SkyTemple is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SkyTemple is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SkyTemple. If not, see <https://www.gnu.org/licenses/>.
import os
from typing import Optional
from gi.repository import Gtk
from skytemple.core.rom_project import RomProject
from skytemple.core.ssb_debugger.context import SkyTempleMainDebuggerControlContext
from skytemple.core.ui_utils import make_builder
from skytemple_ssb_debugger.controller.main import MainController as DebuggerMainController
from skytemple_ssb_debugger.main import get_debugger_package_dir
class DebuggerManager:
def __init__(self):
self._context: Optional[SkyTempleMainDebuggerControlContext] = None
self._opened_main_window: Optional[Gtk.Window] = None
self._opened_main_controller: Optional[DebuggerMainController] = None
self._was_opened_once = False
self.main_window = None
def open(self, main_window):
"""Open the debugger (if not already opened) and focus it's UI."""
if not self.is_opened():
self._was_opened_once = True
self._context = SkyTempleMainDebuggerControlContext(self)
builder = make_builder(os.path.join(get_debugger_package_dir(), "debugger.glade"))
self._opened_main_window = builder.get_object("main_window")
self._opened_main_window.set_role("SkyTemple Script Engine Debugger")
self._opened_main_window.set_title("SkyTemple Script Engine Debugger")
self._opened_main_controller = DebuggerMainController(
builder, self._opened_main_window, self._context
)
self.handle_project_change()
self.main_window = main_window
assert self._opened_main_window is not None
self._opened_main_window.present()
def close(self):
"""
Close the debugger and focus it's UI (to bring dialog boxes of it into foreground).
Returns False if the debugger was not closed.
"""
if not self.is_opened():
return True
assert self._opened_main_controller is not None
return not self._opened_main_controller.on_main_window_delete_event()
def check_save(self):
"""Checks if unsaved files exist, if so asks the user to save them."""
if not self.is_opened():
return True
assert self._opened_main_controller is not None
return self._opened_main_controller.editor_notebook.close_all_tabs()
def destroy(self):
"""Free resources."""
# TODO: Do we need to do something anymore?
def is_opened(self):
"""Returns whether or not the debugger is opened."""
should_be_opened = self._context is not None
if should_be_opened:
if self._opened_main_window not in Gtk.Window.list_toplevels():
self.on_close()
return False
return should_be_opened
def handle_project_change(self):
"""
If the debugger is currently open, handles changing the project for it.
The global singleton instance of RomProject is used.
"""
project = RomProject.get_current()
if project is not None and self.is_opened():
assert self._opened_main_controller is not None
self._opened_main_controller.load_rom()
def METHOD_NAME(self, ssb_filename, main_window):
"""Open a SSB file in the debugger and focus it."""
self.open(main_window)
assert self._opened_main_controller is not None
self._opened_main_controller.editor_notebook.METHOD_NAME(ssb_filename)
def get_context(self) -> Optional[SkyTempleMainDebuggerControlContext]:
"""Returns the managing context for the debugger. Returns None if the debugger is not opened!"""
return self._context
def on_script_added(self, ssb_path, mapname, scene_type, scene_name):
"""Inform the debugger about a newly created SSB file."""
if self.is_opened():
assert self._opened_main_controller is not None
self._opened_main_controller.on_script_added(ssb_path, mapname, scene_type, scene_name)
def on_script_removed(self, ssb_path):
"""Inform the debugger about a removed SSB file."""
if self.is_opened():
assert self._opened_main_controller is not None
self._opened_main_controller.on_script_removed(ssb_path)
# CONTEXT PRIVATE:
def on_close(self):
"""(Internal, only to be called by the Context). Mark as closed."""
self._context = None
self._opened_main_window = None
self._opened_main_controller = None
def get_controller(self) -> Optional[DebuggerMainController]:
return self._opened_main_controller
def get_window(self) -> Optional[Gtk.Window]:
return self._opened_main_window
|
299,612 | set vault provider | """Creation and revocation statements used for Vault role definitions."""
# These strings are passed through the `.format` method so the variables that need to remain in the template # noqa: E501
# to be passed to Vault are wrapped in 4 pairs of braces. TMM 2020-09-01
import json
from enum import Enum
from functools import lru_cache, partial
from pathlib import Path
from string import Template
from typing import Optional
import pulumi
import pulumi_vault
from bridge.secrets.sops import read_yaml_secrets
postgres_role_statements = {
"approle": {
"create": Template("CREATE ROLE ${app_name};"),
"revoke": Template("DROP ROLE ${app_name};"),
},
"admin": {
"create": Template(
"""CREATE USER "{{name}}" WITH PASSWORD '{{password}}'
VALID UNTIL '{{expiration}}' IN ROLE "rds_superuser"
INHERIT CREATEROLE CREATEDB;
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"
WITH GRANT OPTION;
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO "{{name}}"
WITH GRANT OPTION;"""
),
"revoke": Template(
"""REVOKE "${app_name}" FROM "{{name}}";
GRANT CREATE ON SCHEMA public TO ${app_name} WITH GRANT OPTION;
GRANT "{{name}}" TO ${app_name} WITH ADMIN OPTION;
SET ROLE ${app_name};
REASSIGN OWNED BY "{{name}}" TO "${app_name}";
RESET ROLE;
DROP OWNED BY "{{name}}";
REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM "{{name}}";
REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM "{{name}}";
REVOKE USAGE ON SCHEMA public FROM "{{name}}";
DROP USER "{{name}}";"""
),
},
"app": {
"create": Template(
"""CREATE USER "{{name}}" WITH PASSWORD '{{password}}'
VALID UNTIL '{{expiration}}' IN ROLE "${app_name}" INHERIT;
GRANT CREATE ON SCHEMA public TO ${app_name} WITH GRANT OPTION;
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "${app_name}"
WITH GRANT OPTION;
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO "${app_name}"
WITH GRANT OPTION;
SET ROLE "${app_name}";
ALTER DEFAULT PRIVILEGES FOR ROLE "${app_name}" IN SCHEMA public
GRANT ALL PRIVILEGES ON TABLES TO "${app_name}" WITH GRANT OPTION;
ALTER DEFAULT PRIVILEGES FOR ROLE "${app_name}" IN SCHEMA public
GRANT ALL PRIVILEGES ON SEQUENCES TO "${app_name}" WITH GRANT OPTION;
RESET ROLE;
ALTER ROLE "{{name}}" SET ROLE "${app_name}";"""
),
"revoke": Template(
"""REVOKE "${app_name}" FROM "{{name}}";
GRANT "{{name}}" TO ${app_name} WITH ADMIN OPTION;
SET ROLE ${app_name};
REASSIGN OWNED BY "{{name}}" TO "${app_name}";
RESET ROLE;
DROP OWNED BY "{{name}}";
REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM "{{name}}";
REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM "{{name}}";
REVOKE USAGE ON SCHEMA public FROM "{{name}}";
DROP USER "{{name}}";"""
),
},
"readonly": {
"create": Template(
"""CREATE USER "{{name}}" WITH PASSWORD '{{password}}'
VALID UNTIL '{{expiration}}';
GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}";
GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "{{name}}";
SET ROLE "{{name}}";
ALTER DEFAULT PRIVILEGES FOR USER "{{name}}" IN SCHEMA public GRANT SELECT
ON TABLES TO "{{name}}";
ALTER DEFAULT PRIVILEGES FOR USER "{{name}}" IN SCHEMA public GRANT SELECT
ON SEQUENCES TO "{{name}}";
RESET ROLE;"""
),
"revoke": Template(
"""REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM "{{name}}";
REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM "{{name}}";
REVOKE USAGE ON SCHEMA public FROM "{{name}}";
DROP USER "{{name}}";"""
),
},
}
mysql_role_statements = {
"admin": {
"create": Template(
"CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';"
"GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, REFERENCES, INDEX, "
"ALTER, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, CREATE VIEW, "
"SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, EVENT, TRIGGER "
"ON `%`.* TO '{{name}}' WITH GRANT OPTION; "
"GRANT RELOAD, LOCK TABLES ON *.* to '{{name}}';"
),
"revoke": Template("DROP USER '{{name}}';"),
},
"app": {
"create": Template(
"CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';"
"GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, INDEX, DROP, ALTER, "
"REFERENCES, CREATE TEMPORARY TABLES, LOCK TABLES "
"ON ${app_name}.* TO '{{name}}'@'%';"
),
"revoke": Template("DROP USER '{{name}}';"),
},
"readonly": {
"create": Template(
"CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';"
"GRANT SELECT, SHOW VIEW ON `%`.* TO '{{name}}'@'%';"
),
"revoke": Template("DROP USER '{{name}}';"),
},
}
mongodb_role_statements = {
"admin": {
"create": Template(
json.dumps(
{"roles": [{"role": "superuser"}, {"role": "root"}], "db": "admin"}
)
),
"revoke": Template(json.dumps({"db": "admin"})),
},
"app": {
"create": Template(
json.dumps({"roles": [{"role": "readWrite"}], "db": "${app_name}"})
),
"revoke": Template(json.dumps({"db": "${app_name}"})),
},
"readonly": {
"create": Template(json.dumps({"roles": [{"role": "read"}]})),
"revoke": Template(""),
},
}
class VaultPKIKeyTypeBits(int, Enum):
rsa = 4096
ec = 256
@lru_cache
def get_vault_provider(
vault_address: str, vault_env_namespace: str, provider_name: Optional[str] = None
) -> pulumi.ResourceTransformationResult:
pulumi_vault_creds = read_yaml_secrets(
Path().joinpath(
# We are forcing the assumption that the Vault cluster is in the operations
# environment/namespace.(TMM 2021-10-19)
"pulumi",
f"vault.{vault_env_namespace}.yaml",
)
)
return pulumi_vault.Provider(
provider_name or "vault-provider",
address=vault_address,
add_address_to_env=True,
token="",
auth_login_userpass=pulumi_vault.ProviderAuthLoginUserpassArgs(
mount="pulumi",
username=pulumi_vault_creds["auth_username"],
password=pulumi.Output.secret(pulumi_vault_creds["auth_password"]),
),
)
def METHOD_NAME(
vault_address: str,
vault_env_namespace: str,
resource_args: pulumi.ResourceTransformationArgs,
) -> pulumi.ResourceTransformationResult:
if resource_args.type_.split(":")[0] == "vault":
resource_args.opts.provider = get_vault_provider(
vault_address, vault_env_namespace
)
return pulumi.ResourceTransformationResult(
props=resource_args.props,
opts=resource_args.opts,
)
def setup_vault_provider():
vault_address = pulumi.Config("vault").require("address")
vault_env_namespace = pulumi.Config("vault_server").require("env_namespace")
pulumi.runtime.register_stack_transformation(
partial(METHOD_NAME, vault_address, vault_env_namespace)
) |
299,613 | on case | from typing import Any
from qtpy import QtGui
from qtpy.QtWidgets import QWidget, QVBoxLayout, QAbstractItemView
from pyNastran.gui.utils.qt.qtreeview2 import RightClickTreeView, GenericRightClickTreeView
class ResultsWindow(QWidget):
"""
A ResultsWindow creates the box where we actually select our
results case. It does not have an apply button.
"""
def __init__(self, parent, name, data, choices,
actions: list[tuple[str, Any, bool]]=None,
include_clear=True, include_export_case=True,
include_delete=True,
include_results=True):
"""
Parameters
----------
actions : varies
None:
use the default actions
list[tuple[str, Any, bool]]:
action : (name, function, return_icase)
name : str
the name of the action
function : the callback function of the form:
def return_icase(icase):
pass
def dont_return_icase():
pass
the chosen function (return_icase/dont_return_icase) is determined by
return icase
return_icase : bool
selects the corresponding function
"""
QWidget.__init__(self)
self.name = name
self.data = data
self.choices = choices
self.parent = parent
def on_modify(icase: int):
print('modify...%i' % icase)
def METHOD_NAME(icase: int):
print('case...%i' % icase)
def on_delete():
print('delete...')
if actions:
#actions = [
## (right_click_msg, callback, validate?)
##('Clear Results...', self.on_clear_results, False),
##('Apply Results to Fringe...', 'fringe', self.on_fringe, True),
##('Apply Results to Displacement...', self.on_disp, True),
##('Apply Results to Vector...', self.on_vector, True),
#('Delete...', on_delete, False),
#('Modify...', on_modify, True),
#]
self.treeView = GenericRightClickTreeView(
self, self.data, choices, actions,
include_clear=include_clear, include_delete=include_delete,
include_results=include_results)
else:
self.treeView = RightClickTreeView(
self, self.data, choices,
include_clear=include_clear, include_export_case=include_export_case,
include_delete=include_delete,
include_results=include_results, )
self.treeView.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.model = QtGui.QStandardItemModel()
is_single = self.addItems(self.model, data)
self.treeView.setModel(self.model)
self.treeView.set_single(is_single)
self.model.setHorizontalHeaderLabels([self.tr(self.name)])
layout = QVBoxLayout()
layout.addWidget(self.treeView)
self.setLayout(layout)
def update_data(self, data):
self.clear_data()
self.data = data
try:
self.addItems(self.model, data)
except Exception:
raise
raise RuntimeError('cannot add data=\n%s' % data)
#if isinstance(data, str):
#self.addItems(self.model, data)
#else:
#self.addItems(self.model, *tuple(data))
self.treeView.data = data
#layout = QVBoxLayout()
#layout.addWidget(self.treeView)
#self.setLayout(layout)
def clear_data(self):
self.model.clear()
self.treeView.data = []
self.model.setHorizontalHeaderLabels([self.tr(self.name)])
def addItems(self, parent, elements, level=0, count_check=False):
nelements = len(elements)
redo = False
#print(elements[0])
try:
#if len(elements):
#assert len(elements[0]) == 3, 'len=%s elements[0]=%s\nelements=\n%s\n' % (
#len(elements[0]), elements[0], elements)
for element in elements:
#if isinstance(element, str):
#print('elements = %r' % str(elements))
#print('element = %r' % str(element))
if not len(element) == 3:
print('element = %r' % str(element))
try:
text, i, children = element
except ValueError:
# [
# ('Point Data', None, [
# ('NodeID', 0, []),
# ('Displacement T_XYZ_subcase=1', 1, []),
# ('Displacement R_XYZ_subcase=1', 2, []),
# ])
# ]
#
# should be:
# ('Point Data', None, [
# ('NodeID', 0, []),
# ('Displacement T_XYZ_subcase=1', 1, []),
# ('Displacement R_XYZ_subcase=1', 2, []),
# ])
print('failed element = ', element)
raise
nchildren = len(children)
#print('text=%r' % text)
item = QtGui.QStandardItem(text)
parent.appendRow(item)
# TODO: count_check and ???
if nelements == 1 and nchildren == 0 and level == 0:
#self.result_data_window.setEnabled(False)
item.setEnabled(False)
#print(dir(self.treeView))
#self.treeView.setCurrentItem(self, 0)
#item.mousePressEvent(None)
redo = True
#else:
#pass
#print('item=%s count_check=%s nelements=%s nchildren=%s' % (
#text, count_check, nelements, nchildren))
if children:
assert isinstance(children, list), children
self.addItems(item, children, level + 1, count_check=count_check)
#print('*children = %s' % children)
is_single = redo
return is_single
except ValueError:
print()
print(f'elements = {elements}')
print(f'element = {element}')
print(f'len(element) = {len(element)}')
print(f'len(elements)={len(elements)}')
for elem in elements:
print(' e = %s' % str(elem))
raise
#if redo:
# data = [
# ('A', []),
# ('B', []),
# ]
# self.update_data(data) |
299,614 | can edit | # This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import session
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db import db
from indico.core.db.sqlalchemy.attachments import AttachedItemsMixin
from indico.core.db.sqlalchemy.descriptions import RenderMode, SearchableDescriptionMixin
from indico.core.db.sqlalchemy.notes import AttachedNotesMixin
from indico.core.db.sqlalchemy.searchable import SearchableTitleMixin
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.core.db.sqlalchemy.util.queries import increment_and_get
from indico.util.iterables import materialize_iterable
from indico.util.locators import locator_property
from indico.util.string import format_repr, slugify
def _get_next_friendly_id(context):
"""Get the next friendly id for a sub-contribution."""
from indico.modules.events.contributions.models.contributions import Contribution
contribution_id = context.current_parameters['contribution_id']
assert contribution_id is not None
return increment_and_get(Contribution._last_friendly_subcontribution_id, Contribution.id == contribution_id)
def _get_next_position(context):
"""Get the next menu entry position for the event."""
contribution_id = context.current_parameters['contribution_id']
res = (db.session.query(db.func.max(SubContribution.position))
.filter(SubContribution.contribution_id == contribution_id)
.one())
return (res[0] or 0) + 1
class SubContribution(SearchableTitleMixin, SearchableDescriptionMixin, AttachedItemsMixin, AttachedNotesMixin,
db.Model):
__tablename__ = 'subcontributions'
__auto_table_args = (db.Index(None, 'friendly_id', 'contribution_id', unique=True),
db.CheckConstraint("date_trunc('minute', duration) = duration", 'duration_no_seconds'),
{'schema': 'events'})
PRELOAD_EVENT_ATTACHED_ITEMS = True
PRELOAD_EVENT_NOTES = True
ATTACHMENT_FOLDER_ID_COLUMN = 'subcontribution_id'
possible_render_modes = {RenderMode.html, RenderMode.markdown}
default_render_mode = RenderMode.markdown
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
id = db.Column(
db.Integer,
primary_key=True
)
#: The human-friendly ID for the sub-contribution
friendly_id = db.Column(
db.Integer,
nullable=False,
default=_get_next_friendly_id
)
contribution_id = db.Column(
db.Integer,
db.ForeignKey('events.contributions.id'),
index=True,
nullable=False
)
position = db.Column(
db.Integer,
nullable=False,
default=_get_next_position
)
code = db.Column(
db.String,
nullable=False,
default=''
)
duration = db.Column(
db.Interval,
nullable=False
)
is_deleted = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: External references associated with this contribution
references = db.relationship(
'SubContributionReference',
lazy=True,
cascade='all, delete-orphan',
backref=db.backref(
'subcontribution',
lazy=True
)
)
#: Persons associated with this contribution
person_links = db.relationship(
'SubContributionPersonLink',
lazy=True,
cascade='all, delete-orphan',
backref=db.backref(
'subcontribution',
lazy=True
)
)
# relationship backrefs:
# - attachment_folders (AttachmentFolder.subcontribution)
# - contribution (Contribution.subcontributions)
# - legacy_mapping (LegacySubContributionMapping.subcontribution)
# - note (EventNote.subcontribution)
def __init__(self, **kwargs):
# explicitly initialize this relationship with None to avoid
# an extra query to check whether there is an object associated
# when assigning a new one (e.g. during cloning)
kwargs.setdefault('note', None)
super().__init__(**kwargs)
@property
def event(self):
return self.contribution.event
@locator_property
def locator(self):
return dict(self.contribution.locator, subcontrib_id=self.id)
@property
def is_protected(self):
return self.contribution.is_protected
@property
def session(self):
"""Convenience property so all event entities have it."""
return self.contribution.session if self.contribution.session_id is not None else None
@property
def timetable_entry(self):
"""Convenience property so all event entities have it."""
return self.contribution.timetable_entry
@property
def speakers(self):
return self.person_links
@speakers.setter
def speakers(self, value):
self.person_links = list(value.keys())
@property
def slug(self):
return slugify('sc', self.contribution.friendly_id, self.friendly_id, self.title, maxlen=30)
@property
def location_parent(self):
return self.contribution
@property
def venue_name(self):
return self.location_parent.venue_name
@property
def room_name(self):
return self.location_parent.room_name
def get_access_list(self):
return self.contribution.get_access_list()
def get_manager_list(self, recursive=False, include_groups=True):
return self.contribution.get_manager_list(recursive=recursive, include_groups=include_groups)
def __repr__(self):
return format_repr(self, 'id', is_deleted=False, _text=self.title)
def can_access(self, user, **kwargs):
return self.contribution.can_access(user, **kwargs)
def can_manage(self, user, permission=None, **kwargs):
return self.contribution.can_manage(user, permission=permission, **kwargs)
def METHOD_NAME(self, user):
return self.contribution.METHOD_NAME(user)
@materialize_iterable()
def get_manage_button_options(self, *, note_may_exist=False):
if self.event.is_locked:
return
if self.can_edit_note(session.user) and (note_may_exist or not self.has_note):
yield 'notes_edit'
if self.METHOD_NAME(session.user):
yield 'subcontribution_edit'
if self.can_manage_attachments(session.user):
yield 'attachments_edit'
def is_user_associated(self, user):
if user is None:
return False
if self.contribution.is_user_associated(user):
return True
return any(pl.person.user == user for pl in self.person_links if pl.person.user) |
299,615 | name | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetBatchEndpointResult',
'AwaitableGetBatchEndpointResult',
'get_batch_endpoint',
'get_batch_endpoint_output',
]
@pulumi.output_type
class GetBatchEndpointResult:
def __init__(__self__, batch_endpoint_properties=None, id=None, identity=None, kind=None, location=None, METHOD_NAME=None, sku=None, system_data=None, tags=None, type=None):
if batch_endpoint_properties and not isinstance(batch_endpoint_properties, dict):
raise TypeError("Expected argument 'batch_endpoint_properties' to be a dict")
pulumi.set(__self__, "batch_endpoint_properties", batch_endpoint_properties)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(METHOD_NAME="batchEndpointProperties")
def batch_endpoint_properties(self) -> 'outputs.BatchEndpointResponse':
"""
[Required] Additional attributes of the entity.
"""
return pulumi.get(self, "batch_endpoint_properties")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
Managed service identity (system assigned and/or user assigned identities)
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
Sku details required for ARM contract for Autoscaling.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(METHOD_NAME="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetBatchEndpointResult(GetBatchEndpointResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBatchEndpointResult(
batch_endpoint_properties=self.batch_endpoint_properties,
id=self.id,
identity=self.identity,
kind=self.kind,
location=self.location,
METHOD_NAME=self.METHOD_NAME,
sku=self.sku,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_batch_endpoint(endpoint_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBatchEndpointResult:
"""
Use this data source to access information about an existing resource.
:param str endpoint_name: Name for the Batch Endpoint.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['endpointName'] = endpoint_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20230401:getBatchEndpoint', __args__, opts=opts, typ=GetBatchEndpointResult).value
return AwaitableGetBatchEndpointResult(
batch_endpoint_properties=pulumi.get(__ret__, 'batch_endpoint_properties'),
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
kind=pulumi.get(__ret__, 'kind'),
location=pulumi.get(__ret__, 'location'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
sku=pulumi.get(__ret__, 'sku'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_batch_endpoint)
def get_batch_endpoint_output(endpoint_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBatchEndpointResult]:
"""
Use this data source to access information about an existing resource.
:param str endpoint_name: Name for the Batch Endpoint.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
... |
299,616 | to dict | from RLTest import Env
import random
from includes import *
from common import getConnectionByEnv, waitForIndex, toSortedFlatList
def aofTestCommon(env, reloadfn):
# TODO: Change this attribute in rmtest
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'field1', 'text', 'field2', 'numeric')
for x in range(1, 10):
conn.execute_command('hset', 'doc{}'.format(x), 'field1', 'myText{}'.format(x), 'field2', 20 * x)
reloadfn()
waitForIndex(env, 'idx')
exp = [9, 'doc1', ['field1', 'myText1', 'field2', '20'], 'doc2', ['field1', 'myText2', 'field2', '40'],
'doc3', ['field1', 'myText3', 'field2', '60'], 'doc4', ['field1', 'myText4', 'field2', '80'],
'doc5', ['field1', 'myText5', 'field2', '100'], 'doc6', ['field1', 'myText6', 'field2', '120'],
'doc7', ['field1', 'myText7', 'field2', '140'], 'doc8', ['field1', 'myText8', 'field2', '160'],
'doc9', ['field1', 'myText9', 'field2', '180']]
reloadfn()
waitForIndex(env, 'idx')
ret = env.cmd('ft.search', 'idx', 'myt*')
env.assertEqual(toSortedFlatList(ret), toSortedFlatList(exp))
def testAof():
env = Env(useAof=True)
aofTestCommon(env, lambda: env.restart_and_reload())
def testRawAof():
env = Env(useAof=True)
if env.env == 'existing-env':
env.skip()
aofTestCommon(env, lambda: env.broadcast('debug', 'loadaof'))
def testRewriteAofSortables():
env = Env(useAof=True)
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH',
'schema', 'field1', 'TEXT', 'SORTABLE', 'num1', 'NUMERIC', 'SORTABLE')
env.cmd('FT.ADD', 'idx', 'doc', 1.0,
'FIELDS', 'field1', 'Hello World')
env.restart_and_reload()
env.broadcast('SAVE')
# Load some documents
for x in range(100):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(x), 1.0, 'FIELDS',
'field1', 'txt{}'.format(random.random()),
'num1', random.random())
for sspec in [('field1', 'asc'), ('num1', 'desc')]:
cmd = ['FT.SEARCH', 'idx', 'txt', 'SORTBY', sspec[0], sspec[1]]
res = env.cmd(*cmd)
env.restart_and_reload()
res2 = env.cmd(*cmd)
env.assertEqual(res, res2)
def testAofRewriteSortkeys():
env = Env(useAof=True)
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH',
'SCHEMA', 'foo', 'TEXT', 'SORTABLE', 'bar', 'TAG')
env.cmd('FT.ADD', 'idx', '1', '1', 'FIELDS', 'foo', 'A', 'bar', '1')
env.cmd('FT.ADD', 'idx', '2', '1', 'fields', 'foo', 'B', 'bar', '1')
res_exp = env.cmd('FT.SEARCH', 'idx', '@bar:{1}', 'SORTBY', 'foo', 'ASC',
'RETURN', '1', 'foo', 'WITHSORTKEYS')
env.restart_and_reload()
waitForIndex(env, 'idx')
res_got = env.cmd('FT.SEARCH', 'idx', '@bar:{1}', 'SORTBY', 'foo', 'ASC',
'RETURN', '1', 'foo', 'WITHSORTKEYS')
env.assertEqual(res_exp, res_got)
def testAofRewriteTags():
env = Env(useAof=True)
conn = getConnectionByEnv(env)
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH',
'SCHEMA', 'foo', 'TEXT', 'SORTABLE', 'bar', 'TAG')
env.cmd('FT.ADD', 'idx', '1', '1', 'FIELDS', 'foo', 'A', 'bar', '1')
env.cmd('FT.ADD', 'idx', '2', '1', 'fields', 'foo', 'B', 'bar', '1')
info_a = METHOD_NAME(env.cmd('FT.INFO', 'idx'))
env.restart_and_reload()
info_b = METHOD_NAME(env.cmd('FT.INFO', 'idx'))
env.assertEqual(info_a['attributes'], info_b['attributes'])
# Try to drop the schema
env.cmd('FT.DROP', 'idx')
conn.execute_command('del', '1')
conn.execute_command('del', '2')
# Try to create it again - should work!
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH',
'SCHEMA', 'foo', 'TEXT', 'SORTABLE', 'bar', 'TAG')
env.cmd('FT.ADD', 'idx', '1', '1', 'FIELDS', 'foo', 'A', 'bar', '1')
env.cmd('FT.ADD', 'idx', '2', '1', 'fields', 'foo', 'B', 'bar', '1')
res = env.cmd('FT.SEARCH', 'idx', '@bar:{1}', 'SORTBY', 'foo', 'ASC',
'RETURN', '1', 'foo', 'WITHSORTKEYS')
env.assertEqual([2, '1', '$a', ['foo', 'A'],
'2', '$b', ['foo', 'B']], res)
def METHOD_NAME(r):
return {r[i]: r[i + 1] for i in range(0, len(r), 2)} |
299,617 | get token metadata | # coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from google.protobuf.json_format import MessageToJson, Parse
from qrl.generated import qrl_pb2
from qrl.core.misc import logger
from qrl.core.State import State
from qrl.core.txs.TokenTransaction import TokenTransaction
from qrl.core.txs.TransferTokenTransaction import TransferTokenTransaction
class TokenMetadata(object):
def __init__(self, protobuf_data=None):
self._data = protobuf_data
if protobuf_data is None:
self._data = qrl_pb2.TokenMetadata()
@property
def pbdata(self):
"""
Returns a protobuf object that contains persistable data representing this object
:return: A protobuf TokenMetadata object
:rtype: qrl_pb2.TokenMetadata
"""
return self._data
@property
def token_txhash(self):
return self._data.token_txhash
@property
def transfer_token_tx_hashes(self):
return self._data.transfer_token_tx_hashes
@staticmethod
def create(token_txhash: bytes, transfer_token_txhashes: list):
token_metadata = TokenMetadata()
token_metadata._data.token_txhash = token_txhash
token_metadata.update(transfer_token_txhashes)
return token_metadata
def update(self, transfer_token_txhashes: list):
for transfer_token_txhash in transfer_token_txhashes:
self._data.transfer_token_tx_hashes.extend([transfer_token_txhash])
def remove(self, transfer_token_txhash: bytes):
i = 0
while i < len(self._data.transfer_token_tx_hashes):
if self._data.transfer_token_tx_hashes[i] == transfer_token_txhash:
del self._data.transfer_token_tx_hashes[i]
return
i += 1
def to_json(self):
return MessageToJson(self._data, sort_keys=True)
@staticmethod
def from_json(json_data):
pbdata = qrl_pb2.TokenMetadata()
Parse(json_data, pbdata)
return TokenMetadata(pbdata)
def serialize(self) -> str:
return self._data.SerializeToString()
@staticmethod
def deserialize(data):
pbdata = qrl_pb2.TokenMetadata()
pbdata.ParseFromString(bytes(data))
return TokenMetadata(pbdata)
@staticmethod
def METHOD_NAME(state: State, token_txhash: bytes):
try:
data = state._db.get_raw(b'token_' + token_txhash)
return TokenMetadata.deserialize(data)
except KeyError:
pass
except Exception as e:
logger.error('[get_token_metadata] %s', e)
return None
@staticmethod
def update_token_metadata(state: State, transfer_token: TransferTokenTransaction, batch):
token_metadata = TokenMetadata.METHOD_NAME(state, transfer_token.token_txhash)
token_metadata.update([transfer_token.txhash])
state._db.put_raw(b'token_' + transfer_token.token_txhash,
token_metadata.serialize(),
batch)
@staticmethod
def create_token_metadata(state: State, token: TokenTransaction, batch):
token_metadata = TokenMetadata.create(token_txhash=token.txhash, transfer_token_txhashes=[token.txhash])
state._db.put_raw(b'token_' + token.txhash,
token_metadata.serialize(),
batch)
@staticmethod
def remove_transfer_token_metadata(state: State, transfer_token: TransferTokenTransaction, batch):
token_metadata = TokenMetadata.METHOD_NAME(state, transfer_token.token_txhash)
token_metadata.remove(transfer_token.txhash)
state._db.put_raw(b'token_' + transfer_token.token_txhash,
token_metadata.serialize(),
batch)
@staticmethod
def remove_token_metadata(state: State, token: TokenTransaction, batch):
state._db.delete(b'token_' + token.txhash, batch) |
299,618 | get next | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import RecoveryServicesBackupClientMixinABC, _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(vault_name: str, resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-02-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupResourceGuardProxies",
) # pylint: disable=line-too-long
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class ResourceGuardProxiesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicesbackup.activestamp.RecoveryServicesBackupClient`'s
:attr:`resource_guard_proxies` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get(
self, vault_name: str, resource_group_name: str, **kwargs: Any
) -> Iterable["_models.ResourceGuardProxyBaseResource"]:
"""List the ResourceGuardProxies under vault.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceGuardProxyBaseResource or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.recoveryservicesbackup.activestamp.models.ResourceGuardProxyBaseResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ResourceGuardProxyBaseResourceList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_get_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceGuardProxyBaseResourceList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(METHOD_NAME, extract_data)
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupResourceGuardProxies"
} |
299,619 | upfirdn2d | # pylint: skip-file
# type: ignore
# modify from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d.py # noqa:E501
import os
import torch
from torch.autograd import Function
from torch.nn import functional as F
upfirdn2d_ext = None
class UpFirDn2dBackward(Function):
@staticmethod
def forward(
ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_ext.METHOD_NAME(
grad_output,
grad_kernel,
down_x,
down_y,
up_x,
up_y,
g_pad_x0,
g_pad_x1,
g_pad_y0,
g_pad_y1,
)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
(kernel,) = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
gradgrad_out = upfirdn2d_ext.METHOD_NAME(
gradgrad_input,
kernel,
ctx.up_x,
ctx.up_y,
ctx.down_x,
ctx.down_y,
ctx.pad_x0,
ctx.pad_x1,
ctx.pad_y0,
ctx.pad_y1,
)
# gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0],
# ctx.out_size[1], ctx.in_size[3])
gradgrad_out = gradgrad_out.view(
ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
)
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
_, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = (out_h, out_w)
ctx.up = (up_x, up_y)
ctx.down = (down_x, down_y)
ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
out = upfirdn2d_ext.METHOD_NAME(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
)
# out = out.view(major, out_h, out_w, minor)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(
grad_output,
kernel,
grad_kernel,
ctx.up,
ctx.down,
ctx.pad,
ctx.g_pad,
ctx.in_size,
ctx.out_size,
)
return grad_input, None, None, None, None
def METHOD_NAME(input, kernel, up=1, down=1, pad=(0, 0)):
if input.device.type == "cpu":
out = upfirdn2d_native(
input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]
)
else:
out = UpFirDn2d.apply(
input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
)
return out
def upfirdn2d_native(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(
out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
)
out = out[
:,
max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
:,
]
out = out.permute(0, 3, 1, 2)
out = out.reshape(
[-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
)
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(
-1,
minor,
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w) |
299,620 | list | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class LoadBalancerProbesOperations(object):
"""LoadBalancerProbesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-08-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-08-01"
self.config = config
def METHOD_NAME(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancer probes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Probe
:rtype:
~azure.mgmt.network.v2018_08_01.models.ProbePaged[~azure.mgmt.network.v2018_08_01.models.Probe]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.METHOD_NAME.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ProbePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ProbePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
METHOD_NAME.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes'}
def get(
self, resource_group_name, load_balancer_name, probe_name, custom_headers=None, raw=False, **operation_config):
"""Gets load balancer probe.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param probe_name: The name of the probe.
:type probe_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Probe or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2018_08_01.models.Probe or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'probeName': self._serialize.url("probe_name", probe_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Probe', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'} |
299,621 | parse | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains a contains the high-level functions to read the various
VOSI Endpoints.
"""
from astropy.utils.xml.writer import XMLWriter
from astropy.io.votable.util import convert_to_writable_filelike
from ...utils.xml.elements import xmlattribute, parse_for_object
from .tree import JobSummary, Jobs
__all__ = ["parse_job", "parse_job_list", "JobFile"]
def parse_job_list(
source, pedantic=None, filename=None, _debug_python_based_parser=False
):
"""
Parses a job xml file (or file-like object), and returns a
`~pyvo.io.uws.tree.Jobs` object.
Parameters
----------
source : str or readable file-like object
Path or file object containing a tableset xml file.
pedantic : bool, optional
When `True`, raise an error when the file violates the spec,
otherwise issue a warning. Warnings may be controlled using
the standard Python mechanisms. See the `warnings`
module in the Python standard library for more information.
Defaults to False.
filename : str, optional
A filename, URL or other identifier to use in error messages.
If *filename* is None and *source* is a string (i.e. a path),
then *source* will be used as a filename for error messages.
Therefore, *filename* is only required when source is a
file-like object.
Returns
-------
`~pyvo.io.uws.tree.Jobs` object
See also
--------
pyvo.io.vosi.exceptions : The exceptions this function may raise.
"""
return parse_for_object(source, Jobs, pedantic, filename,
_debug_python_based_parser).joblist
def parse_job(
source, pedantic=None, filename=None, _debug_python_based_parser=False
):
"""
Parses a job xml file (or file-like object), and returns a
`~pyvo.io.uws.tree.JobFile` object.
Parameters
----------
source : str or readable file-like object
Path or file object containing a tableset xml file.
pedantic : bool, optional
When `True`, raise an error when the file violates the spec,
otherwise issue a warning. Warnings may be controlled using
the standard Python mechanisms. See the `warnings`
module in the Python standard library for more information.
Defaults to False.
filename : str, optional
A filename, URL or other identifier to use in error messages.
If *filename* is None and *source* is a string (i.e. a path),
then *source* will be used as a filename for error messages.
Therefore, *filename* is only required when source is a
file-like object.
Returns
-------
`~pyvo.io.vosi.endpoint.JobFile` object
See also
--------
pyvo.io.vosi.exceptions : The exceptions this function may raise.
"""
return parse_for_object(source, JobFile, pedantic, filename,
_debug_python_based_parser)
class JobFile(JobSummary):
"""
availability element: represents an entire file.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, config=None, pos=None, **kwargs):
super().__init__(config=config, pos=pos, **kwargs)
self._version = None
@xmlattribute
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
def METHOD_NAME(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag == 'xml':
pass
elif start and tag == 'job':
# version was not required in v1.0, so default to that.
self._version = data.get('version', '1.0')
break
return super().METHOD_NAME(iterator, config)
def to_xml(self, fd):
with convert_to_writable_filelike(fd) as _fd:
w = XMLWriter(_fd)
xml_header = (
'<?xml version="1.0" encoding="utf-8"?>\n'
'<!-- Produced with pyvo.io.uws -->\n'
)
w.write(xml_header)
super().to_xml(w) |
299,622 | test sk classical sol | import unittest
import networkx as nx
import numpy as np
from openqaoa.problems import SK
def terms_list_equality(terms_list1, terms_list2):
"""
Check the terms equality between two terms list
where the order of edges do not matter.
"""
if len(terms_list1) != len(terms_list2):
bool = False
else:
for term1, term2 in zip(terms_list1, terms_list2):
bool = True if (term1 == term2 or term1 == term2[::-1]) else False
return bool
def terms_list_isclose(terms_list1, terms_list2):
"""
Check if the distance between two terms list
where the order of edges do not matter.
"""
if len(terms_list1) != len(terms_list2):
bool = False
else:
for term1, term2 in zip(terms_list1, terms_list2):
bool = (
True
if np.isclose(term1, term2) or np.isclose(term1, term2[::-1])
else False
)
return bool
class TestSK(unittest.TestCase):
"""Tests for the SK class"""
def test_sk_terms_weights_constant(self):
"""Test that SK creates a correct QUBO from the provided graph"""
n_nodes = 5
rng = np.random.default_rng(1234)
G = nx.Graph()
G.add_weighted_edges_from(
[
[i, j, round(2 * rng.random() - 1)]
for i in range(n_nodes)
for j in range(i + 1, n_nodes)
]
)
gr_edges = [[0, 1], [0, 3], [1, 3], [1, 4], [2, 4], [0], [1], [2], [3], [4]]
gr_weights = [-1.0, -1.0, 1.0, 1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0]
sk_prob_qubo = SK(G).qubo
self.assertTrue(terms_list_equality(gr_edges, sk_prob_qubo.terms))
self.assertEqual(gr_weights, sk_prob_qubo.weights)
self.assertEqual(0.0, sk_prob_qubo.constant)
def test_sk_random_problem(self):
"""Test SK random instance method"""
seed = 123
n_nodes = 5
mu = 0
sigma = 1
rng = np.random.default_rng(seed)
G = nx.Graph()
G.add_weighted_edges_from(
[
[i, j, round(rng.normal(loc=mu, scale=sigma), 3)]
for i in range(n_nodes)
for j in range(i + 1, n_nodes)
]
)
sk_manual_prob = SK(G).qubo
sk_random_prob = SK.random_instance(n_nodes=n_nodes, seed=seed).qubo
self.assertTrue(terms_list_equality(sk_manual_prob.terms, sk_random_prob.terms))
self.assertEqual(sk_manual_prob.weights, sk_random_prob.weights)
self.assertEqual(sk_manual_prob.constant, sk_random_prob.constant)
def test_sk_type_checking(self):
"""
Checks if the type-checking returns the right error.
"""
# graph type-check
graph_list = [(1, 2), {"node1": 1, "node2": 2}, np.array([1, 2])]
for each_graph in graph_list:
with self.assertRaises(TypeError) as e:
SK(G=each_graph)
self.assertEqual(
"Input problem graph must be a networkx Graph.", str(e.exception)
)
def METHOD_NAME(self):
"""Test the SK random instance method classical solution"""
seed = 1234
sk_sol = SK.random_instance(n_nodes=2, seed=seed).classical_solution()
sol = {"x_0": 1, "x_1": 0}
self.assertEqual(sk_sol, sol)
def test_sk_plot(self):
"""Test SK random instance method"""
import matplotlib.pyplot as plt
seed = 1234
sk_random_prob = SK.random_instance(n_nodes=10, seed=seed)
sol = {
"x_0": 1.0,
"x_1": 1.0,
"x_2": 0,
"x_3": 1.0,
"x_4": 0,
"x_5": 1.0,
"x_6": 0,
"x_7": 0,
"x_8": 0,
"x_9": 1.0,
}
fig, ax = plt.subplots()
sk_random_prob.plot_solution(sol, ax=ax)
self.assertTrue(isinstance(ax, plt.Axes))
if __name__ == "__main__":
unittest.main() |
299,623 | on sdl notify | # -*- coding: utf-8 -*-
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email legal@cyan.com
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
"""
Module: xAgeSDLIntActEnabler.py
Age: Global
Date: March, 2003
Author: Adam Van Ornum
"""
from Plasma import *
from PlasmaTypes import *
# define the attributes that will be entered in max
stringSDLVarName = ptAttribString(1,"Age SDL Variable")
actActivator = ptAttribActivator(2,"Activator")
stringStartValues = ptAttribString(3,"Active state values")
intDefault = ptAttribInt(4,"Default setting",0)
class xAgeSDLIntActEnabler(ptResponder):
def __init__(self):
ptModifier.__init__(self)
self.id = 5302
version = 1
self.version = version
self.enabledStateList = []
PtDebugPrint("__init__xAgeSDLIntActEnabler v.", version)
def OnFirstUpdate(self):
if not stringSDLVarName.value:
PtDebugPrint("ERROR: xAgeSDLIntActEnabler.OnFirstUpdate():\tERROR: missing SDL var name in max file")
pass
def OnServerInitComplete(self):
ageSDL = PtGetAgeSDL()
PtDebugPrint("DEBUG: xAgeSDLIntActEnabler.OnServerInitComplete:\tOn %s" % stringSDLVarName.value)
try:
self.enabledStateList = stringStartValues.value.split(",")
for i in range(len(self.enabledStateList)):
self.enabledStateList[i] = int(self.enabledStateList[i].strip())
except:
PtDebugPrint("ERROR: xAgeSDLIntActEnabler.OnServerInitComplete():\tERROR: couldn't process start state list")
PtDebugPrint("DEBUG: xAgeSDLIntActEnabler.OnServerInitComplete:\tSetting notify on %s" % stringSDLVarName.value)
ageSDL.setNotify(self.key,stringSDLVarName.value,0.0)
try:
SDLvalue = ageSDL[stringSDLVarName.value][0]
except:
PtDebugPrint("ERROR: xAgeSDLIntActEnabler.OnServerInitComplete():\tERROR: age sdl read failed, SDLvalue = %d by default. stringSDLVarName = %s" % (intDefault.value,stringSDLVarName.value))
SDLvalue = intDefault.value
PtDebugPrint("DEBUG: xAgeSDLIntActEnabler.OnServerInitComplete:\tCurrent SDL value = %d" % SDLvalue)
if SDLvalue in self.enabledStateList:
actActivator.enable()
PtDebugPrint("DEBUG: xAgeSDLIntActEnabler.OnServerInitComplete:\t%s activator enabled" % stringSDLVarName.value)
else:
actActivator.disable()
PtDebugPrint("DEBUG: xAgeSDLIntActEnabler.OnServerInitComplete:\t%s activator disabled" % stringSDLVarName.value)
def METHOD_NAME(self,VARname,SDLname,PlayerID,tag):
if VARname != stringSDLVarName.value:
return
ageSDL = PtGetAgeSDL()
PtDebugPrint("DEBUG: xAgeSDLIntActEnabler.OnSDLNotify received: %s" % VARname)
SDLvalue = ageSDL[stringSDLVarName.value][0]
if SDLvalue in self.enabledStateList:
PtDebugPrint("DEBUG: xAgeSDLIntActEnabler.OnSDLNotify: enabling activator")
actActivator.enable()
else:
PtDebugPrint("DEBUG: xAgeSDLIntActEnabler.OnSDLNotify: disabling activator")
actActivator.disable() |
299,624 | preprocess data locally | from .impl.dataset import DataSet
from .impl.decorators import with_project
from a2ml.api.utils.decorators import error_handler, authenticated
from .impl.cloud.rest_api import RestApi
from .credentials import Credentials
from .config import AugerConfig
from .impl.cloud.experiment_session import AugerExperimentSessionApi
class AugerDataset(object):
def __init__(self, ctx):
self.ctx = ctx
self.credentials = Credentials(ctx).load()
self.ctx.rest_api = RestApi(
self.credentials.api_url, self.credentials.token)
@error_handler
@authenticated
@with_project(autocreate=False)
def list(self, project):
count = 0
selected = AugerConfig(self.ctx).get_dataset() #self.ctx.config.get('dataset', None)
for dataset in iter(DataSet(self.ctx, project).list()):
self.ctx.log(
('[%s] ' % ('x' if selected == dataset.get('name') else ' ')) +
dataset.get('name')
)
count += 1
self.ctx.log('%s DataSet(s) listed' % str(count))
return {'datasets': DataSet(self.ctx, project).list()}
@error_handler
@authenticated
@with_project(autocreate=True)
def create(self, project, source = None, validation=False, name=None, description=None):
dataset = self._create(project, source, validation, name, description)
self.ctx.log('Created DataSet %s' % dataset.name)
return {'created': dataset.name}
def _create(self, project, source = None, validation=False, name=None, description=None):
if source is None:
source = self.ctx.config.get('source', None)
dataset = DataSet(self.ctx, project).create(source, name, description)
AugerConfig(self.ctx).set_data_set(dataset.name, source, validation, name)
return dataset
@error_handler
@authenticated
@with_project(autocreate=False)
def upload(self, project, source = None, name=None):
data_source_file, local_data_source = \
DataSet.verify(source, self.ctx.config.path)
file_url, file_name = DataSet(self.ctx, project, name).do_upload_file(data_source_file, local_data_source=local_data_source)
#self.ctx.log('Upload dataset %s to %s' % (name, file_url))
return file_url
@error_handler
@authenticated
@with_project(autocreate=False)
def delete(self, project, name):
if name is None:
name = AugerConfig(self.ctx).get_dataset() #self.ctx.config.get('dataset', None)
DataSet(self.ctx, project, name).delete()
if name == self.ctx.config.get('dataset', None):
AugerConfig(self.ctx).set_data_set(None, None, False).set_experiment(None, None)
self.ctx.log('Deleted dataset %s' % name)
return {'deleted': name}
@error_handler
def select(self, name):
old_name = AugerConfig(self.ctx).get_dataset() #self.ctx.config.get('dataset', None)
if name != old_name:
AugerConfig(self.ctx).set_data_set(name, None, False).set_experiment(None, None)
self.ctx.log('Selected DataSet %s' % name)
return {'selected': name}
@error_handler
@authenticated
@with_project(autocreate=False)
def download(self, project, name, path_to_download):
from .impl.experiment import Experiment
if name is None:
name = AugerConfig(self.ctx).get_dataset() #self.ctx.config.get('dataset', None)
if name is None:
run_id = self.ctx.config.get('experiment/experiment_session_id')
if not run_id:
run_id = Experiment(self.ctx, None, project=project)._get_latest_run()
if run_id:
session_api = AugerExperimentSessionApi(
self.ctx, None, None, run_id)
session_props = session_api.properties()
name = session_props.get('datasource_name')
if name is None:
raise Exception("No dataset name is found.")
file_name = DataSet(self.ctx, project, name).download(path_to_download)
self.ctx.log('Downloaded dataset %s to %s' % (name, file_name))
return {'dowloaded': name, 'file': file_name}
def preprocess_data(self, data, preprocessors, locally):
if locally:
return self.METHOD_NAME(data, preprocessors)
else:
raise Exception("preprocess_data supported with locally=True only.")
def METHOD_NAME(self, data, preprocessors):
from auger_ml.preprocessors.text import TextPreprocessor
res = data
for p in preprocessors:
name = list(p.keys())[0]
params = list(p.values())[0]
if name != 'text':
raise Exception("Only text preprocessor supported.")
tp = TextPreprocessor(params)
res = tp.fit_transform(res)
return res
|
299,625 | test llvm target features | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import os
import pytest
import re
import sys
import tvm
import tvm.relay
import tvm.testing
import tvm.contrib.hexagon as hexagon
@pytest.fixture(autouse=True)
def register_linker():
original_linker = hexagon.hexagon_link()
# Register a phony linker, so that we can test codegen without a Hexagon toolchain.
hexagon.register_linker(lambda: "/bin/true")
yield None
# Restore registration.
hexagon.register_linker(original_linker)
@tvm.testing.requires_hexagon
def test_basic():
target = tvm.target.hexagon("v66", hvx=128)
def check_add(offload):
A = tvm.te.placeholder((128,), dtype="uint8", name="A")
B = tvm.te.placeholder((128,), dtype="uint8", name="A")
C = tvm.te.compute((128,), lambda i: A[i] + B[i], name="C")
s = tvm.te.create_schedule(C.op)
if offload:
xo, xi = s[C].split(s[C].op.axis[0], nparts=1)
s[C].bind(xo, tvm.te.thread_axis("pipeline"))
m = tvm.build(s, [C, A, B], target=target, name="offload_add")
hexm = m.imported_modules[0]
else:
hexm = tvm.build(
s, [C, A, B], target=tvm.target.Target(target, target), name="native_add"
)
asm = hexm.get_source("s")
vadds = re.findall(r"v[0-9]+.b = vadd\(v[0-9]+.b,v[0-9]+.b\)", asm)
assert vadds # Check that it's non-empty
check_add(True)
check_add(False)
@tvm.testing.requires_hexagon
def METHOD_NAME():
target = tvm.target.hexagon("v66", hvx=128)
# Define some trivial compute
A = tvm.te.placeholder((128,), dtype="uint8", name="A")
C = tvm.te.compute((128,), lambda i: A[i] + 1, name="C")
s = tvm.te.create_schedule(C.op)
m = tvm.build(s, [C, A], target=tvm.target.Target(target, target), name="add_one")
llvm_ir = m.get_source("ll")
# Make sure we find +hvx-length128b in "attributes".
fs = re.findall(r"attributes.*\+hvx-length128b", llvm_ir)
assert fs # Check that it's non-empty
@tvm.testing.requires_hexagon
def test_alloc_vtcm():
target = tvm.target.hexagon("v66")
buf_len = 2048
A = tvm.te.placeholder((buf_len,), name="A", dtype="int8")
B = tvm.te.placeholder((buf_len,), name="B", dtype="int8")
A_buf = tvm.te.compute((buf_len,), lambda *i: A(*i), "A_buf")
B_buf = tvm.te.compute((buf_len,), lambda *i: B(*i), "B_buf")
C = tvm.te.compute((buf_len,), lambda *i: A_buf(*i) + B_buf(*i), name="C")
s = tvm.te.create_schedule(C.op)
# Use VTCM for each buffer.
s[A_buf].set_scope("local.vtcm")
s[B_buf].set_scope("local.vtcm")
config = {"tir.add_lower_pass": hexagon.ir_lower_vtcm_pass()}
with tvm.transform.PassContext(config=config):
irmod = tvm.lower(s, [A, B, C], name="alloc_vtcm")
calls = re.findall("HexagonBackend[A-Za-z]*VTCM", str(irmod["alloc_vtcm"]))
assert "HexagonBackendAllocateVTCM" in calls
assert "HexagonBackendFreeVTCM" in calls
@tvm.testing.requires_hexagon
def test_llvm_options():
target = tvm.target.hexagon("v66", llvm_options="-hexagon-noopt")
Zero = tvm.te.compute((10,), lambda _: tvm.tir.const(0, "int32"))
s = tvm.te.create_schedule(Zero.op)
tvm.build(s, [Zero], target=target, name="zero")
# Check that BuildHexagon hasn't crashed because of target attribute
# type mismatch.
assert re.search("-hexagon-noopt", str(target))
if __name__ == "__main__":
tvm.testing.main() |
299,626 | is selected | #
# Project: MXCuBE
# https://github.com/mxcube
#
# This file is part of MXCuBE software.
#
# MXCuBE is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MXCuBE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Lesser Public License
# along with MXCuBE. If not, see <http://www.gnu.org/licenses/>.
"""Abstract class for a Sample View.
Defines methods to handle snapshots, animation and shapes.
"""
__copyright__ = """2019 by the MXCuBE collaboration """
__license__ = "LGPLv3+"
import abc
from mxcubecore.BaseHardwareObjects import HardwareObject
class AbstractSampleView(HardwareObject):
"""AbstractSampleView Class"""
__metaclass__ = abc.ABCMeta
def __init__(self, name):
super().__init__(name)
self._camera = None
self._focus = None
self._zoom = None
self._frontlight = None
self._backlight = None
self._shapes = None
@abc.abstractmethod
def get_snapshot(self, overlay=True, bw=False, return_as_array=False):
"""Get snappshot(s)
Args:
overlay(bool): Display shapes and other items on the snapshot
bw(bool): return grayscale image
return_as_array(bool): return as np array
"""
@abc.abstractmethod
def save_snapshot(self, filename, overlay=True, bw=False):
"""Save a snapshot to file.
Args:
filename (str): The filename.
overlay(bool): Display shapes and other items on the snapshot.
bw(bool): Return grayscale image.
"""
def save_scene_animation(self, filename, duration=1):
"""Take snapshots and create an animation.
Args:
filename (str): The filename.
duration (int): Duration time [s].
"""
@property
def camera(self):
"""Get camera object.
Returns:
(AbstractCamera): Camera hardware object.
"""
return self._camera
@property
def shapes(self):
"""Get shapes dict.
Returns:
(AbstractShapes): Shapes hardware object.
"""
return self._shapes
@property
def zoom(self):
"""Get zoom object.
Returns:
(AbstractZoom): Zoom gardware object.
"""
return self._zoom
@property
def frontlight(self):
"""Get Front light object
Returns:
(AbstractLight): Front light hardware object.
"""
return self._frontlight
@property
def backlight(self):
"""Get Back light object.
Returns:
(AbstractLight): Back light hardware object.
"""
return self._backlight
@abc.abstractmethod
def start_centring(self, tree_click=True):
"""
Starts centring procedure
"""
return
@abc.abstractmethod
def cancel_centring(self):
"""
Cancels current centring procedure
"""
return
@abc.abstractmethod
def start_auto_centring(self):
"""
Start automatic centring procedure
"""
return
# Not sure these should be abstarct ?
# @abc.abstractmethod
# def create_line(self):
# return
# @abc.abstractmethod
# def create_auto_line(self):
# return
# @abc.abstractmethod
# def create_grid(self, spacing):
# return
@abc.abstractmethod
def add_shape(self, shape):
"""Add the shape <shape> to the dictionary of handled shapes.
Args:
shape(Shape): Shape to add
"""
return
@abc.abstractmethod
def add_shape_from_mpos(self, mpos_list, screen_cord, _type):
"""Add a shape of type <t>, with motor positions from mpos_list and
screen position screen_coord.
Args:
mpos_list (list[mpos_list]): List of motor positions
screen_coord (tuple(x, y): Screen cordinate for shape
_type (str): Type str for shape, P (Point), L (Line), G (Grid)
Returns:
(Shape): Shape of type _type
"""
return
@abc.abstractmethod
def delete_shape(self, sid):
"""Remove the shape with specified id from the list of handled shapes.
Args:
sid (str): The id of the shape to remove
Returns:
(Shape): The removed shape
"""
return
@abc.abstractmethod
def select_shape(self, sid):
"""Select the shape <shape>.
Args:
sid (str): Id of the shape to select.
"""
return
@abc.abstractmethod
def de_select_shape(self, sid):
"""De-select the shape with id <sid>.
Args:
sid (str): The id of the shape to de-select.
"""
return
@abc.abstractmethod
def METHOD_NAME(self, sid):
"""Check if Shape with specified id is selected.
Args:
sid (int): Shape id.
Returns:
(Boolean) True if selected, False otherwise.
"""
@abc.abstractmethod
def get_selected_shapes(self):
"""Get all selected shapes.
Returns:
(list) List of the selected Shapes.
"""
return
@abc.abstractmethod
def de_select_all(self):
"""De-select all shapes."""
return
@abc.abstractmethod
def select_shape_with_cpos(self, cpos):
"""Selects shape with the assocaitaed centred posotion <cpos>
Args:
cpos (CentredPosition): Centred position
"""
return
@abc.abstractmethod
def clear_all(self):
"""
Clear the shapes, remove all contents.
"""
return
@abc.abstractmethod
def get_shape(self, sid):
"""
Get Shape with id <sid>.
Args:
sid (str): id of Shape to retrieve
Returns:
(Shape) All the shapes
"""
return
@abc.abstractmethod
def get_grid(self):
"""Get the first of the selected grids, (the one that was selected
first in a sequence of select operations).
Returns:
(dict): The first selected grid as a dictionary.
"""
return
@abc.abstractmethod
def get_points(self):
"""Get all currently handled centred points.
Returns:
(list): All points currently handled as list.
"""
return
@abc.abstractmethod
def get_lines(self):
"""Get all the currently handled lines.
Returns:
(list): All lines currently handled as list.
"""
return
@abc.abstractmethod
def get_grids(self):
"""Get all currently handled grids.
Returns:
(list): All grids currently handled as list.
"""
return
@abc.abstractmethod
def inc_used_for_collection(self):
# Signature incompatible with SampleView
"""Increase the counter that keepts on collect made on this shape,
shape with associated CentredPosition cpos.
Args:
cpos (CentredPosition): CentredPosition of shape
"""
return |
299,627 | load pathio | import unittest
import arbor as A
from pathlib import Path
from tempfile import TemporaryDirectory as TD
from io import StringIO
from functools import partial
acc = """(arbor-component
(meta-data
(version "0.1-dev"))
(cable-cell
(morphology
(branch 0 -1
(segment 0
(point -3.000000 0.000000 0.000000 3.000000)
(point 3.000000 0.000000 0.000000 3.000000)
1)))
(label-dict
(region-def "soma"
(tag 1))
(locset-def "mid"
(location 0 0.5)))
(decor
(default
(membrane-potential -40.000000))
(default
(ion-internal-concentration "ca" 0.000050))
(default
(ion-external-concentration "ca" 2.000000))
(default
(ion-reversal-potential "ca" 132.457934))
(default
(ion-internal-concentration "k" 54.400000))
(default
(ion-external-concentration "k" 2.500000))
(default
(ion-reversal-potential "k" -77.000000))
(default
(ion-internal-concentration "na" 10.000000))
(default
(ion-external-concentration "na" 140.000000))
(default
(ion-reversal-potential "na" 50.000000))
(paint
(tag 1)
(density
(mechanism "default::hh"
("gnabar" 0.120000)
("el" -54.300000)
("q10" 0.000000)
("gl" 0.000300)
("gkbar" 0.036000))))
(place
(location 0 0.5)
(current-clamp
(envelope
(10.000000 0.800000)
(12.000000 0.000000))
0.000000 0.000000)
"I Clamp 0"))))
"""
swc_arbor = """1 1 -5.0 0.0 0.0 5.0 -1
2 1 0.0 0.0 0.0 5.0 1
3 1 5.0 0.0 0.0 5.0 2
"""
swc_neuron = """1 1 0.1 0.2 0.3 0.4 -1
"""
asc = """((CellBody)\
(0 0 0 4)\
)\
((Dendrite)\
(0 2 0 2)\
(0 5 0 2)\
(\
(-5 5 0 2)\
|\
(6 5 0 2)\
)\
)\
((Axon)\
(0 -2 0 2)\
(0 -5 0 2)\
(\
(-5 -5 0 2)\
|\
(6 -5 0 2)\
)\
)
"""
def load_string(loaders, morph_str):
for loader in loaders:
sio = StringIO(morph_str)
loader(sio)
def load_file(loaders, morph_str, morph_fn):
for loader in loaders:
with TD() as tmp:
tmp = Path(tmp)
with open(tmp / morph_fn, "w") as fd:
fd.write(morph_str)
with open(tmp / morph_fn) as fd:
loader(fd)
def load_name(loaders, morph_str, morph_fn):
for loader in loaders:
with TD() as tmp:
tmp = Path(tmp)
with open(tmp / morph_fn, "w") as fd:
fd.write(morph_str)
loader(str(tmp / morph_fn))
def METHOD_NAME(loaders, morph_str, morph_fn):
for loader in loaders:
with TD() as tmp:
tmp = Path(tmp)
with open(tmp / morph_fn, "w") as fd:
fd.write(morph_str)
loader(tmp / morph_fn)
class TestAccIo(unittest.TestCase):
@staticmethod
def loaders():
return (A.load_component,)
def test_stringio(self):
load_string(self.loaders(), acc)
def test_fileio(self):
load_file(self.loaders(), acc, "test.acc")
def test_nameio(self):
load_name(self.loaders(), acc, "test.acc")
def test_pathio(self):
METHOD_NAME(self.loaders(), acc, "test.acc")
class TestSwcArborIo(unittest.TestCase):
@staticmethod
def loaders():
return (A.load_swc_arbor, partial(A.load_swc_arbor, raw=True))
def test_stringio(self):
load_string(self.loaders(), swc_arbor)
def test_fileio(self):
load_file(self.loaders(), swc_arbor, "test.swc")
def test_nameio(self):
load_name(self.loaders(), swc_arbor, "test.swc")
def test_pathio(self):
METHOD_NAME(self.loaders(), swc_arbor, "test.swc")
class TestSwcNeuronIo(unittest.TestCase):
@staticmethod
def loaders():
return (A.load_swc_neuron, partial(A.load_swc_neuron, raw=True))
def test_stringio(self):
load_string(self.loaders(), swc_neuron)
def test_fileio(self):
load_file(self.loaders(), swc_neuron, "test.swc")
def test_nameio(self):
load_name(self.loaders(), swc_neuron, "test.swc")
def test_pathio(self):
METHOD_NAME(self.loaders(), swc_neuron, "test.swc")
class TestAscIo(unittest.TestCase):
@staticmethod
def loaders():
return (A.load_asc, partial(A.load_asc, raw=True))
def test_stringio(self):
load_string(self.loaders(), asc)
def test_fileio(self):
load_file(self.loaders(), asc, "test.asc")
def test_nameio(self):
load_name(self.loaders(), asc, "test.asc")
def test_pathio(self):
METHOD_NAME(self.loaders(), asc, "test.asc")
class serdes_recipe(A.recipe):
def __init__(self):
A.recipe.__init__(self)
self.the_props = A.neuron_cable_properties()
self.the_props.catalogue = A.default_catalogue()
def num_cells(self):
return 1
def cell_kind(self, _):
return A.cell_kind.cable
def cell_description(self, gid):
tree = A.segment_tree()
s = tree.append(A.mnpos, A.mpoint(-3, 0, 0, 3), A.mpoint(3, 0, 0, 3), tag=1)
_ = tree.append(s, A.mpoint(3, 0, 0, 1), A.mpoint(33, 0, 0, 1), tag=3)
dec = A.decor()
dec.paint("(all)", A.density("pas"))
dec.discretization(A.cv_policy("(max-extent 1)"))
return A.cable_cell(tree, dec)
def global_properties(self, kind):
return self.the_props
# Very simple test for SerDes
class TestSerdes(unittest.TestCase):
def test_serialize(self):
self.maxDiff = 1024 * 1024
rec = serdes_recipe()
sim = A.simulation(rec)
jsn = sim.serialize()
sim.deserialize(jsn) |
299,628 | client | # LIBTBX_SET_DISPATCHER_NAME prime.mpi_scale
"""
Find initial scaling factors for all integration results
"""
from __future__ import absolute_import, division, print_function
from mpi4py import MPI
import sys, os
from prime.postrefine.mod_input import process_input, read_pickles
from prime.postrefine.mod_util import intensities_scaler
from prime.postrefine.mod_merge_data import merge_data_handler
from cctbx.array_family import flex
import time, math
from six.moves import range
from six.moves import zip
# setup mpi
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
assert size>1
def master(frame_objects, iparams, activity):
if activity == "scale":
n_batch = 1
indices = range(0, len(frame_objects), n_batch)
for i in indices:
i_end = i+n_batch if i+n_batch < len(frame_objects) else len(frame_objects)
rankreq = comm.recv(source=MPI.ANY_SOURCE)
comm.send((activity, (frame_objects[i:i_end], iparams)), dest=rankreq)
if activity == "pre_merge":
n_batch = int(len(frame_objects)/(size*3))
if n_batch < 10: n_batch = 10
indices = range(0, len(frame_objects), n_batch)
for i in indices:
i_end = i+n_batch if i+n_batch < len(frame_objects) else len(frame_objects)
rankreq = comm.recv(source=MPI.ANY_SOURCE)
comm.send((activity, (frame_objects[i:i_end], iparams)), dest=rankreq)
if activity == "merge":
its = intensities_scaler()
cpo = its.combine_pre_merge(frame_objects, iparams)
#assign at least 100k reflections at a time
n_batch = int(1e5/(len(cpo[1])/cpo[0]))
if n_batch < 1: n_batch = 1
print("Merging with %d batch size"%(n_batch))
indices = range(0, cpo[0], n_batch)
for i in indices:
rankreq = comm.recv(source=MPI.ANY_SOURCE)
i_end = i+n_batch if i+n_batch < cpo[0] else cpo[0]
sel = flex.bool([sel_l and sel_h for sel_l, sel_h in zip(cpo[1]>=i, cpo[1]<i_end)])
batch_prep = [cpo_elem.select(sel) for cpo_elem in cpo[1:13]]
batch_prep.insert(0, i_end-i)
batch_prep[1] -= i
batch_prep.append(cpo[13])
batch_prep.append(cpo[14])
batch_prep.append(cpo[15].select(sel))
batch_prep.append("")
comm.send((activity, (tuple(batch_prep), iparams)), dest=rankreq)
print("Master for %s is completed. Time to stop all %d clients"%(activity, size-1))
# stop clients
for rankreq in range(size-1):
rankreq = comm.recv(source=MPI.ANY_SOURCE)
comm.send('endrun', dest=rankreq)
def METHOD_NAME():
result = []
while True:
comm.send(rank, dest=0)
msg = comm.recv(source=0)
if str(msg) == 'endrun':
break
#receive contents for processing
activity, act_params = msg
if activity == "scale":
frame_files, iparams = act_params
from prime.postrefine import postref_handler
prh = postref_handler()
for frame_index, frame_file in enumerate(frame_files):
pres, _ = prh.scale_frame_by_mean_I(frame_index, frame_file, iparams, 0, 'average')
result.append(pres)
if activity == "pre_merge":
frame_results, iparams = act_params
its = intensities_scaler()
prep_output = its.prepare_output(frame_results, iparams, 'average')
result.append(prep_output)
if activity == "merge":
batch_prep, iparams = act_params
its = intensities_scaler()
mdh, _, txt_out_rejection = its.calc_avg_I_cpp(batch_prep, iparams, 'average')
result.append([mdh, txt_out_rejection])
return result
def run(argv):
comm.Barrier()
start_time = MPI.Wtime()
#broadcast parameters
if rank == 0:
iparams, txt_out_input = process_input(argv)
iparams.flag_volume_correction = False
iparams.flag_hush = True
print(txt_out_input)
frame_files = read_pickles(iparams.data)
else:
iparams = None
frame_files = None
comm.Barrier()
#assign scaling task
if rank == 0:
master(frame_files, iparams, "scale")
result = []
else:
result = METHOD_NAME()
result = comm.gather(result, root=0)
comm.Barrier()
#pre-merge task
if rank == 0:
results = sum(result, [])
print("Scaling is done on %d cores for %d frames"%(size, len(results)))
master(results, iparams, "pre_merge")
result = []
else:
result = METHOD_NAME()
result = comm.gather(result, root=0)
comm.Barrier()
#merge task
if rank == 0:
print("Pre-merge is done on %d cores"%(len(result)))
master(result, iparams, "merge")
result = []
else:
result = METHOD_NAME()
#finalize merge
result = comm.gather(result, root=0)
comm.Barrier()
if rank == 0:
print("Merge completed on %d cores"%(len(result)))
results = sum(result, [])
mdh = merge_data_handler()
txt_out_rejection = ""
for _mdh, _txt_out_rejection in results:
mdh.extend(_mdh)
txt_out_rejection += _txt_out_rejection
#selet only indices with non-Inf non-Nan stats
selections = flex.bool([False if (math.isnan(r0) or math.isinf(r0) or math.isnan(r1) or math.isinf(r1)) else True for r0, r1 in zip(mdh.r_meas_div, mdh.r_meas_divisor)])
mdh.reduce_by_selection(selections)
its = intensities_scaler()
mdh, txt_merge_mean_table = its.write_output(mdh, iparams, 'test', 'average')
print(txt_merge_mean_table)
#collect time profile
comm.Barrier()
end_time = MPI.Wtime()
txt_time = 'Elapsed Time (s):%10.2f\n'%(end_time-start_time)
#write log output
if rank == 0:
print(txt_time)
with open(os.path.join(iparams.run_no, 'log.txt'), 'w') as f:
f.write(txt_out_input+txt_merge_mean_table+txt_time)
with open(os.path.join(iparams.run_no, 'rejections.txt'), 'w') as f:
f.write(txt_out_rejection)
MPI.Finalize()
if __name__ == "__main__":
argv = sys.argv[1:] if len(sys.argv) > 1 else None
run(argv) |
299,629 | test get ls tree line ignored | import io
from datalad_service.common.annex import parse_ls_tree_line, read_ls_tree_line, compute_rmet, parse_remote_line, parse_rmet_line, read_rmet_file, encode_remote_url
expected_file_object = {
'filename': 'dataset_description.json',
'id': '43502da40903d08b18b533f8897330badd6e1da3',
'key': '838d19644b3296cf32637bbdf9ae5c87db34842f',
'size': 101
}
def test_parse_ls_tree_line():
filename, mode, obj_type, obj_hash, size = parse_ls_tree_line(
"""100644 blob a786c385bd1812410d01177affb6ce834d85facd 459 dataset_description.json""")
assert int(size) > 0
def test_parse_ls_tree_line_annexed():
filename, mode, obj_type, obj_hash, size = parse_ls_tree_line(
"""120000 blob 570cb4a3fd80de6e8491312c935bfe8029066361 141 derivatives/mriqc/reports/sub-01_ses-01_T1w.html""")
assert int(size) > 0
def test_parse_ls_tree_line_submodule():
filename, mode, obj_type, obj_hash, size = parse_ls_tree_line(
"""160000 commit fcafd17fbfa44495c7f5f8a0777e5ab610b09500 - code/facedistid_analysis""")
assert size == '-'
def test_get_ls_tree_line():
files = []
symlinkFilenames = []
symlinkObjects = []
read_ls_tree_line("""100644 blob a786c385bd1812410d01177affb6ce834d85facd 459 dataset_description.json""",
files, symlinkFilenames, symlinkObjects)
assert files == [
{
'filename': 'dataset_description.json',
'size': 459,
'id': '78dd92373749f62af23f3ae499b7a8ac33418fff',
'key': 'a786c385bd1812410d01177affb6ce834d85facd',
'urls': [],
'annexed': False,
'directory': False}]
assert symlinkFilenames == []
assert symlinkObjects == []
def METHOD_NAME():
files = []
symlinkFilenames = []
symlinkObjects = []
read_ls_tree_line("""100644 blob a786c385bd1812410d01177affb6ce834d85facd 459 .gitattributes""",
files, symlinkFilenames, symlinkObjects)
assert files == []
assert symlinkFilenames == []
assert symlinkObjects == []
def test_get_ls_tree_line_annexed():
files = []
symlinkFilenames = []
symlinkObjects = []
read_ls_tree_line("""120000 blob 570cb4a3fd80de6e8491312c935bfe8029066361 141 derivatives/mriqc/reports/sub-01_ses-01_T1w.html""",
files, symlinkFilenames, symlinkObjects)
assert files == []
assert symlinkFilenames == [
'derivatives/mriqc/reports/sub-01_ses-01_T1w.html']
assert symlinkObjects == ['570cb4a3fd80de6e8491312c935bfe8029066361']
def test_get_ls_tree_line_submodule():
files = []
symlinkFilenames = []
symlinkObjects = []
read_ls_tree_line("""160000 commit fcafd17fbfa44495c7f5f8a0777e5ab610b09500 - code/facedistid_analysis""",
files, symlinkFilenames, symlinkObjects)
assert files == []
assert symlinkFilenames == []
assert symlinkObjects == []
def test_compute_rmet_git():
# Test a git SHA1 key
assert compute_rmet(
'99fe93bfea62c16a10488593da870df25d09be81') == '0f5/0b4/GIT--99fe93bfea62c16a10488593da870df25d09be81.log.rmet'
def test_compute_rmet_git_legacy():
# Test a git SHA1 key
assert compute_rmet(
'99fe93bfea62c16a10488593da870df25d09be81', legacy=True) == '9e2/03e/SHA1--99fe93bfea62c16a10488593da870df25d09be81.log.rmet'
def test_compute_rmet_annex():
# Test a git annex MD5E key
assert compute_rmet(
'MD5E-s12102144--d614929593bf2a7cccea90bea67255f4.bdf') == '9ce/c07/MD5E-s12102144--d614929593bf2a7cccea90bea67255f4.bdf.log.rmet'
def test_compute_rmet_sha256_annex():
# Test a git annex MD5E key
assert compute_rmet(
'SHA256E-s311112--c3527d7944a9619afb57863a34e6af7ec3fe4f108e56c860d9e700699ff806fb.nii.gz') == '2ed/6ea/SHA256E-s311112--c3527d7944a9619afb57863a34e6af7ec3fe4f108e56c860d9e700699ff806fb.nii.gz.log.rmet'
def test_parse_remote_line():
remote = parse_remote_line("""57894849-d0c8-4c62-8418-3627be18a196 autoenable=true bucket=openneuro.org datacenter=US encryption=none exporttree=yes fileprefix=ds002778/ host=s3.amazonaws.com name=s3-PUBLIC partsize=1GiB port=80 public=yes publicurl=http://openneuro.org.s3.amazonaws.com/ storageclass=STANDARD type=S3 versioning=yes timestamp=1588743361.538097946s""")
assert remote == {'url': 'http://openneuro.org.s3.amazonaws.com/',
'uuid': '57894849-d0c8-4c62-8418-3627be18a196'}
def test_parse_rmet_line():
remote = {'url': 'http://openneuro.org.s3.amazonaws.com/',
'uuid': '57894849-d0c8-4c62-8418-3627be18a196'}
url = parse_rmet_line(
remote, """1590213748.042921433s 57894849-d0c8-4c62-8418-3627be18a196:V +iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y#ds002778/dataset_description.json""")
assert url == 'http://openneuro.org.s3.amazonaws.com/ds002778/dataset_description.json?versionId=iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y'
def test_parse_rmet_line_https():
remote = {'url': 'https://s3.amazonaws.com/openneuro.org',
'uuid': '57894849-d0c8-4c62-8418-3627be18a196'}
url = parse_rmet_line(
remote, """1590213748.042921433s 57894849-d0c8-4c62-8418-3627be18a196:V +iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y#ds002778/dataset_description.json""")
assert url == 'https://s3.amazonaws.com/openneuro.org/ds002778/dataset_description.json?versionId=iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y'
def test_read_rmet_file():
remote = {'url': 'http://openneuro.org.s3.amazonaws.com/',
'uuid': '57894849-d0c8-4c62-8418-3627be18a196'}
catFile = io.StringIO(""":::99fe93bfea62c16a10488593da870df25d09be81
1590213748.042921433s 57894849-d0c8-4c62-8418-3627be18a196:V +iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y#ds002778/dataset_description.json""")
url = read_rmet_file(remote, catFile)
assert url == 'http://openneuro.org.s3.amazonaws.com/ds002778/dataset_description.json?versionId=iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y'
def test_remote_url_encoding():
assert encode_remote_url(
"https://s3.amazonaws.com/openneuro.org/ds000248/derivatives/freesurfer/subjects/sub-01/mri/aparc+aseg.mgz?versionId=2Wx7w.fCYeGzGWLnW9sxWsPdztl.2HL0") == "https://s3.amazonaws.com/openneuro.org/ds000248/derivatives/freesurfer/subjects/sub-01/mri/aparc%2Baseg.mgz?versionId=2Wx7w.fCYeGzGWLnW9sxWsPdztl.2HL0"
assert encode_remote_url(
"https://s3.amazonaws.com/openneuro.org/ds000248/sub-01/anat/sub-01_T1w.nii.gz?versionId=8uTXIQ10Blcp2GeAVJJCHL5PimkSaQZL") == "https://s3.amazonaws.com/openneuro.org/ds000248/sub-01/anat/sub-01_T1w.nii.gz?versionId=8uTXIQ10Blcp2GeAVJJCHL5PimkSaQZL"
assert encode_remote_url("=") == '=' |
299,630 | predict | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import math
import cv2
import numpy as np
import paddle
import paddleseg
from paddleseg.utils import logger, progbar
from core import infer
import utils
def mkdir(path):
sub_dir = os.path.dirname(path)
if not os.path.exists(sub_dir):
os.makedirs(sub_dir)
def partition_list(arr, m):
"""split the list 'arr' into m pieces"""
n = int(math.ceil(len(arr) / float(m)))
return [arr[i:i + n] for i in range(0, len(arr), n)]
def get_save_name(im_path, im_dir):
"""get the saved name"""
if im_dir is not None:
im_file = im_path.replace(im_dir, '')
else:
im_file = os.path.basename(im_path)
if im_file[0] == '/':
im_file = im_file[1:]
return im_file
def add_info_to_save_path(save_path, info):
"""Add more information to save path"""
fname, fextension = os.path.splitext(save_path)
fname = '_'.join([fname, info])
save_path = ''.join([fname, fextension])
return save_path
def METHOD_NAME(model,
model_path,
image_list,
transforms,
thing_list,
label_divisor,
stuff_area,
ignore_index,
image_dir=None,
save_dir='output',
threshold=0.1,
nms_kernel=7,
top_k=200):
"""
predict and visualize the image_list.
Args:
model (nn.Layer): Used to predict for input image.
model_path (str): The path of pretrained model.
image_list (list): A list of image path to be predicted.
transforms (transform.Compose): Preprocess for input image.
thing_list (list): A List of thing class id.
label_divisor (int): An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id.
stuff_area (int): An Integer, remove stuff whose area is less tan stuff_area.
ignore_index (int): Specifies a value that is ignored.
image_dir (str, optional): The root directory of the images predicted. Default: None.
save_dir (str, optional): The directory to save the visualized results. Default: 'output'.
threshold(float, optional): Threshold applied to center heatmap score. Defalut: 0.1.
nms_kernel(int, optional): NMS max pooling kernel size. Default: 7.
top_k(int, optional): Top k centers to keep. Default: 200.
"""
paddleseg.utils.utils.load_entire_model(model, model_path)
model.eval()
nranks = paddle.distributed.get_world_size()
local_rank = paddle.distributed.get_rank()
if nranks > 1:
img_lists = partition_list(image_list, nranks)
else:
img_lists = [image_list]
semantic_save_dir = os.path.join(save_dir, 'semantic')
instance_save_dir = os.path.join(save_dir, 'instance')
panoptic_save_dir = os.path.join(save_dir, 'panoptic')
colormap = utils.cityscape_colormap()
logger.info("Start to predict...")
progbar_pred = progbar.Progbar(target=len(img_lists[0]), verbose=1)
with paddle.no_grad():
for i, im_path in enumerate(img_lists[local_rank]):
ori_im = cv2.imread(im_path)
ori_shape = ori_im.shape[:2]
data = {'img': ori_im}
data = transforms(data)
im = data['img']
im = im[np.newaxis, ...]
im = paddle.to_tensor(im)
semantic, semantic_softmax, instance, panoptic, ctr_hmp = infer.inference(
model=model,
im=im,
transforms=transforms.transforms,
thing_list=thing_list,
label_divisor=label_divisor,
stuff_area=stuff_area,
ignore_index=ignore_index,
threshold=threshold,
nms_kernel=nms_kernel,
top_k=top_k,
ori_shape=ori_shape)
semantic = semantic.squeeze().numpy()
instance = instance.squeeze().numpy()
panoptic = panoptic.squeeze().numpy()
im_file = get_save_name(im_path, image_dir)
# visual semantic segmentation results
save_path = os.path.join(semantic_save_dir, im_file)
mkdir(save_path)
utils.visualize_semantic(
semantic, save_path=save_path, colormap=colormap)
# Save added image for semantic segmentation results
save_path_ = add_info_to_save_path(save_path, 'add')
utils.visualize_semantic(
semantic, save_path=save_path_, colormap=colormap, image=ori_im)
# panoptic to semantic
ins_mask = panoptic > label_divisor
pan_to_sem = panoptic.copy()
pan_to_sem[ins_mask] = pan_to_sem[ins_mask] // label_divisor
save_path_ = add_info_to_save_path(save_path,
'panoptic_to_semantic')
utils.visualize_semantic(
pan_to_sem, save_path=save_path_, colormap=colormap)
save_path_ = add_info_to_save_path(save_path,
'panoptic_to_semantic_added')
utils.visualize_semantic(
pan_to_sem,
save_path=save_path_,
colormap=colormap,
image=ori_im)
# vusual instance segmentation results
pan_to_ins = panoptic.copy()
ins_mask = pan_to_ins > label_divisor
pan_to_ins[~ins_mask] = 0
save_path = os.path.join(instance_save_dir, im_file)
mkdir(save_path)
utils.visualize_instance(pan_to_ins, save_path=save_path)
# Save added image for instance segmentation results
save_path_ = add_info_to_save_path(save_path, 'added')
utils.visualize_instance(
pan_to_ins, save_path=save_path_, image=ori_im)
# visual panoptic segmentation results
save_path = os.path.join(panoptic_save_dir, im_file)
mkdir(save_path)
utils.visualize_panoptic(
panoptic,
save_path=save_path,
label_divisor=label_divisor,
colormap=colormap,
ignore_index=ignore_index)
# Save added image for panoptic segmentation results
save_path_ = add_info_to_save_path(save_path, 'added')
utils.visualize_panoptic(
panoptic,
save_path=save_path_,
label_divisor=label_divisor,
colormap=colormap,
image=ori_im,
ignore_index=ignore_index)
progbar_pred.update(i + 1) |
299,631 | test webhook | # -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2021-present Kaleidos Ventures SL
import hmac
import hashlib
import requests
from requests.exceptions import RequestException
from django.conf import settings
from taiga.base.api.renderers import UnicodeJSONRenderer
from taiga.base.utils import json, urls
from taiga.base.utils.db import get_typename_for_model_instance
from taiga.celery import app
from .serializers import (EpicSerializer, EpicRelatedUserStorySerializer,
UserStorySerializer, IssueSerializer, TaskSerializer,
WikiPageSerializer, MilestoneSerializer,
HistoryEntrySerializer, UserSerializer)
from .models import WebhookLog
def _serialize(obj):
content_type = get_typename_for_model_instance(obj)
if content_type == "epics.epic":
return EpicSerializer(obj).data
elif content_type == "epics.relateduserstory":
return EpicRelatedUserStorySerializer(obj).data
elif content_type == "userstories.userstory":
return UserStorySerializer(obj).data
elif content_type == "issues.issue":
return IssueSerializer(obj).data
elif content_type == "tasks.task":
return TaskSerializer(obj).data
elif content_type == "wiki.wikipage":
return WikiPageSerializer(obj).data
elif content_type == "milestones.milestone":
return MilestoneSerializer(obj).data
elif content_type == "history.historyentry":
return HistoryEntrySerializer(obj).data
def _get_type(obj):
content_type = get_typename_for_model_instance(obj)
return content_type.split(".")[1]
def _generate_signature(data, key):
mac = hmac.new(key.encode("utf-8"), msg=data, digestmod=hashlib.sha1)
return mac.hexdigest()
def _remove_leftover_webhooklogs(webhook_id):
# Only the last ten webhook logs traces are required
# so remove the leftover
ids = (WebhookLog.objects.filter(webhook_id=webhook_id)
.order_by("-id")
.values_list('id', flat=True)[10:])
WebhookLog.objects.filter(id__in=ids).delete()
def _send_request(webhook_id, url, key, data):
serialized_data = UnicodeJSONRenderer().render(data)
signature = _generate_signature(serialized_data, key)
headers = {
"X-TAIGA-WEBHOOK-SIGNATURE": signature, # For backward compatibility
"X-Hub-Signature": "sha1={}".format(signature),
"Content-Type": "application/json"
}
if not settings.WEBHOOKS_ALLOW_PRIVATE_ADDRESS:
try:
urls.validate_private_url(url)
except (urls.IpAddresValueError, urls.HostnameException) as e:
# Error validating url
webhook_log = WebhookLog.objects.create(webhook_id=webhook_id,
url=url,
status=0,
request_data=data,
request_headers=dict(),
response_data="error-in-request: {}".format(str(e)),
response_headers={},
duration=0)
_remove_leftover_webhooklogs(webhook_id)
return webhook_log
request = requests.Request('POST', url, data=serialized_data, headers=headers)
prepared_request = request.prepare()
with requests.Session() as session:
response = None
try:
response = session.send(prepared_request, allow_redirects=settings.WEBHOOKS_ALLOW_REDIRECTS)
if not settings.WEBHOOKS_ALLOW_REDIRECTS and response.status_code in [301, 302, 303, 307, 308]:
raise RequestException("Redirects are not allowed")
except RequestException as e:
# Error sending the webhook
webhook_log = WebhookLog.objects.create(webhook_id=webhook_id,
url=url,
status=response.status_code if response else 0,
request_data=data,
request_headers=dict(prepared_request.headers),
response_data="error-in-request: {}".format(str(e)),
response_headers={},
duration=0)
else:
# Webhook was sent successfully
# response.content can be a not valid json so we encapsulate it
response_data = json.dumps({"content": response.text})
webhook_log = WebhookLog.objects.create(webhook_id=webhook_id, url=url,
status=response.status_code,
request_data=data,
request_headers=dict(prepared_request.headers),
response_data=response_data,
response_headers=dict(response.headers),
duration=response.elapsed.total_seconds())
finally:
_remove_leftover_webhooklogs(webhook_id)
return webhook_log
@app.task
def create_webhook(webhook_id, url, key, by, date, obj):
data = {}
data['action'] = "create"
data['type'] = _get_type(obj)
data['by'] = UserSerializer(by).data
data['date'] = date
data['data'] = _serialize(obj)
return _send_request(webhook_id, url, key, data)
@app.task
def delete_webhook(webhook_id, url, key, by, date, obj):
data = {}
data['action'] = "delete"
data['type'] = _get_type(obj)
data['by'] = UserSerializer(by).data
data['date'] = date
data['data'] = _serialize(obj)
return _send_request(webhook_id, url, key, data)
@app.task
def change_webhook(webhook_id, url, key, by, date, obj, change):
data = {}
data['action'] = "change"
data['type'] = _get_type(obj)
data['by'] = UserSerializer(by).data
data['date'] = date
data['data'] = _serialize(obj)
data['change'] = _serialize(change)
return _send_request(webhook_id, url, key, data)
@app.task
def resend_webhook(webhook_id, url, key, data):
return _send_request(webhook_id, url, key, data)
@app.task
def METHOD_NAME(webhook_id, url, key, by, date):
data = {}
data['action'] = "test"
data['type'] = "test"
data['by'] = UserSerializer(by).data
data['date'] = date
data['data'] = {"test": "test"}
return _send_request(webhook_id, url, key, data) |
299,632 | plot res | #!/usr/bin/python
##################
# remFitBuf.py
#
# Copyright David Baddeley, 2009
# d.baddeley@auckland.ac.nz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
"""
Test a fit - in general use FitTestJigWC instead
"""
import numpy
from PYME.Acquire.Hardware.Simulator.fakeCam import NoiseMaker
import numpy as np
splitterFitModules = ['SplitterFitFR','SplitterFitQR','SplitterFitCOIR', 'BiplaneFitR', 'SplitterShiftEstFR', 'SplitterObjFindR', 'SplitterFitPsfIR']
#from pylab import *
import copy
from PYME.IO import MetaDataHandler
from PYME.Acquire.Hardware import EMCCDTheory
from scipy import optimize
def emg(v, rg):
return (EMCCDTheory.M((80. + v)/(255 + 80.), 6.6, -70, 536, 2.2) - rg)**2
#[A, x0, y0, 250/2.35, dataMean.min(), .001, .001]
class fitTestJig(object):
def __init__(self, metadata, fitModule = None):
self.md = copy.copy(metadata)
if fitModule is None:
self.fitModule = self.md.getEntry('Analysis.FitModule')
else:
self.fitModule = fitModule
self.md.tIndex = 0
self.bg = 0
if 'Test.Background' in self.md.getEntryNames():
self.bg = float(self.md['Test.Background'])
emGain = optimize.fmin(emg, 150, args=[self.md.Camera.TrueEMGain])[0]
self.noiseM = NoiseMaker(EMGain=emGain, floor=self.md.Camera.ADOffset, background=self.bg, QE=1.0,
fast_read_approx=False)
@classmethod
def fromMDFile(cls, mdfile):
return cls(MetaDataHandler.SimpleMDHandler(mdfile))
def runTests(self, params=None, param_jit=None, nTests=100):
from PYME.localization.FitFactories import import_fit_factory
if not params:
params = self.md['Test.DefaultParams']
if not param_jit:
param_jit = self.md['Test.ParamJitter']
self.fitMod = import_fit_factory(self.fitModule)
self.res = numpy.empty(nTests, self.fitMod.FitResultsDType)
ps = numpy.zeros((nTests, len(params)), 'f4')
rs=11
md2 = copy.copy(self.md)
if 'Test.PSFFile' in self.md.getEntryNames():
md2['PSFFile'] = self.md['Test.PSFFile']
for i in range(nTests):
p = np.array(params) + np.array(param_jit)*(2*np.random.rand(len(param_jit)) - 1)
p[0] = abs(p[0])
ps[i, :] = p
self.data, self.x0, self.y0, self.z0 = self.fitMod.FitFactory.evalModel(p, md2, roiHalfSize=rs)#, roiHalfSize= roiHalfWidth))
#print self.data.shape
#from PYME.DSView import View3D
#View3D(self.data)
# note that the noisemaker converts from photons to ADUs, so intensity related parameters in fit will differ from model
self.d2 = self.noiseM.noisify(self.data)
#print self.d2.min(), self.d2.max(), self.data.min(), self.data.max()
#print self.d2.shape
bg = self.bg*self.md.Camera.TrueEMGain/self.md.Camera.ElectronsPerCount
#print bg, self.md.Camera.ADOffset
self.fitFac = self.fitMod.FitFactory(np.atleast_3d(self.d2), self.md, background = bg + self.md.Camera.ADOffset)
self.res[i] = self.fitFac.FromPoint(rs, rs)#, roiHalfSize=rs)
self.ps = ps.view(self.res['fitResults'].dtype)
#self.calcMEs()
#return ps.view(self.res['fitResults'].dtype), self.res
def calcMEs(self):
for varName in self.ps.dtype.names:
yv = self.res['fitResults'][varName]
if hasattr(self, varName):
yv += self.__getattribute__(varName)
me = ((self.ps[varName].ravel() - yv)**2).mean()
print(('%s: %3.2f' % (varName, me)))
def error(self, varName):
xv = self.ps[varName].ravel()
yv = self.res['fitResults'][varName]
if hasattr(self, varName):
yv = yv + self.__getattribute__(varName)
return yv - xv
def METHOD_NAME(self, varName):
#print self.ps
import matplotlib.pyplot as plt
#from pylab import *
plt.figure()
#print varName
xv = self.ps[varName].ravel()
sp = self.res['startParams'][varName]
yv = self.res['fitResults'][varName]
if hasattr(self, varName):
sp = sp + self.__getattribute__(varName)
yv = yv + self.__getattribute__(varName)
err = self.res['fitError'][varName]
plt.plot([xv.min(), xv.max()], [xv.min(), xv.max()])
plt.plot(xv, sp, '+', label='Start Est')
plt.errorbar(xv, yv, err, fmt='x', label='Fitted')
plt.ylim((yv - np.maximum(err, 0)).min(), (yv + np.maximum(err, 0)).max())
plt.legend()
plt.title(varName)
plt.xlabel('True Position')
plt.ylabel('Estimated Position')
|
299,633 | stash run records | import logging
import time
from dagster import DagsterEvent, DagsterEventType, EventLogEntry
from dagster._core.instance import DagsterInstance
from dagster._core.test_utils import create_run_for_test
from dagster._daemon.auto_run_reexecution.event_log_consumer import (
EventLogConsumerDaemon,
get_new_cursor,
)
TEST_EVENT_LOG_FETCH_LIMIT = 10
class TestEventLogConsumerDaemon(EventLogConsumerDaemon):
"""Override the actual handlers so that we can just test which run records they receive."""
def __init__(self):
super(TestEventLogConsumerDaemon, self).__init__(
event_log_fetch_limit=TEST_EVENT_LOG_FETCH_LIMIT
)
self.run_records = []
@property
def handle_updated_runs_fns(self):
def METHOD_NAME(_ctx, run_records):
self.run_records = run_records
yield
return [METHOD_NAME]
def _create_success_event(instance, run):
dagster_event = DagsterEvent(
event_type_value=DagsterEventType.RUN_SUCCESS.value,
job_name="foo",
message="yay success",
)
event_record = EventLogEntry(
user_message="",
level=logging.INFO,
job_name="foo",
run_id=run.run_id,
error_info=None,
timestamp=time.time(),
dagster_event=dagster_event,
)
instance.handle_new_event(event_record)
def test_daemon(instance: DagsterInstance, empty_workspace_context):
daemon = TestEventLogConsumerDaemon()
list(daemon.run_iteration(empty_workspace_context))
assert daemon.run_records == []
run = create_run_for_test(instance, "test_job")
instance.report_run_failed(run)
list(daemon.run_iteration(empty_workspace_context))
assert [record.dagster_run.run_id for record in daemon.run_records] == [run.run_id]
# not called again for same event
daemon.run_records = [] # reset this since it will keep the value from the last call
list(daemon.run_iteration(empty_workspace_context))
assert daemon.run_records == []
def test_events_exceed_limit(instance: DagsterInstance, empty_workspace_context):
daemon = TestEventLogConsumerDaemon()
list(daemon.run_iteration(empty_workspace_context))
for _ in range(TEST_EVENT_LOG_FETCH_LIMIT + 1):
run = create_run_for_test(instance, "test_job")
instance.report_run_failed(run)
list(daemon.run_iteration(empty_workspace_context))
assert len(daemon.run_records) == TEST_EVENT_LOG_FETCH_LIMIT
list(daemon.run_iteration(empty_workspace_context))
assert len(daemon.run_records) == 1
def test_success_and_failure_events(instance: DagsterInstance, empty_workspace_context):
daemon = TestEventLogConsumerDaemon()
list(daemon.run_iteration(empty_workspace_context))
for _ in range(TEST_EVENT_LOG_FETCH_LIMIT + 1):
run = create_run_for_test(instance, "foo")
instance.report_run_failed(run)
run = create_run_for_test(instance, "foo")
_create_success_event(instance, run)
list(daemon.run_iteration(empty_workspace_context))
assert len(daemon.run_records) == TEST_EVENT_LOG_FETCH_LIMIT * 2
list(daemon.run_iteration(empty_workspace_context))
assert len(daemon.run_records) == 2
FAILURE_KEY = "EVENT_LOG_CONSUMER_CURSOR-PIPELINE_FAILURE"
SUCCESS_KEY = "EVENT_LOG_CONSUMER_CURSOR-PIPELINE_SUCCESS"
def test_cursors(instance: DagsterInstance, empty_workspace_context):
assert instance.run_storage.get_cursor_values({FAILURE_KEY, SUCCESS_KEY}) == {}
daemon = TestEventLogConsumerDaemon()
list(daemon.run_iteration(empty_workspace_context))
assert instance.run_storage.get_cursor_values({FAILURE_KEY, SUCCESS_KEY}) == {
FAILURE_KEY: str(0),
SUCCESS_KEY: str(0),
}
run1 = create_run_for_test(instance, "foo")
run2 = create_run_for_test(instance, "foo")
instance.report_run_failed(run1)
instance.report_run_failed(run2)
list(daemon.run_iteration(empty_workspace_context))
assert len(daemon.run_records) == 2
cursors = instance.run_storage.get_cursor_values({FAILURE_KEY, SUCCESS_KEY})
list(daemon.run_iteration(empty_workspace_context))
assert instance.run_storage.get_cursor_values({FAILURE_KEY, SUCCESS_KEY}) == cursors
for _ in range(5):
instance.report_engine_event("foo", run1)
instance.report_engine_event("foo", run2)
list(daemon.run_iteration(empty_workspace_context))
assert instance.run_storage.get_cursor_values({FAILURE_KEY, SUCCESS_KEY}) == {
FAILURE_KEY: str(int(cursors[FAILURE_KEY]) + 10),
SUCCESS_KEY: str(int(cursors[SUCCESS_KEY]) + 10),
}
run3 = create_run_for_test(instance, "foo")
run4 = create_run_for_test(instance, "foo")
instance.report_run_failed(run3)
instance.report_run_failed(run4)
list(daemon.run_iteration(empty_workspace_context))
assert len(daemon.run_records) == 2
def test_cursor_init(instance: DagsterInstance, empty_workspace_context):
instance.run_storage.wipe()
daemon = TestEventLogConsumerDaemon()
run1 = create_run_for_test(instance, "foo")
run2 = create_run_for_test(instance, "foo")
instance.report_run_failed(run1)
instance.report_run_failed(run2)
list(daemon.run_iteration(empty_workspace_context))
assert len(daemon.run_records) == 0, "Cursors init to latest event"
run3 = create_run_for_test(instance, "foo")
instance.report_run_failed(run3)
list(daemon.run_iteration(empty_workspace_context))
assert len(daemon.run_records) == 1
def test_get_new_cursor():
# hit fetch limit, uses max new_event_ids
assert get_new_cursor(0, 20, 8, [3, 4, 5, 6, 7, 8, 9, 10]) == 10
# hit fetch limit, uses max new_event_ids with overall_max_event_id low
assert get_new_cursor(0, 7, 8, [3, 4, 5, 6, 7, 8, 9, 10]) == 10
# didn't hit fetch limit, uses max new_event_ids with overall_max_event_id low
assert get_new_cursor(0, 7, 8, [3, 4, 5, 6, 7, 8, 9]) == 9
# didn't hit fetch limit, jumps to overall_max_event_id
assert get_new_cursor(0, 20, 4, [1, 2, 3]) == 20
# empty event log
assert get_new_cursor(0, None, 4, []) == 0
# empty overall_max_event_id
assert get_new_cursor(0, None, 5, [2, 3, 4]) == 4
# no new_event_ids
assert get_new_cursor(0, 10, 4, []) == 10 |
299,634 | validate configuration | from typing import Dict, Optional
import scipy.stats as stats
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.core.metric_domain_types import MetricDomainTypes
from great_expectations.execution_engine import ExecutionEngine, PandasExecutionEngine
from great_expectations.expectations.expectation import BatchExpectation
from great_expectations.expectations.metrics.metric_provider import (
MetricConfiguration,
metric_value,
)
from great_expectations.expectations.metrics.table_metric_provider import (
TableMetricProvider,
)
class ColumnKolmogorovSmirnovTestPValueGreaterThan(TableMetricProvider):
# This is the id string that will be used to reference your Metric.
metric_name = "column.p_value_greater_than_threshold"
value_keys = (
"column_a",
"column_b",
)
# This method implements the core logic for the PandasExecutionEngine
@metric_value(engine=PandasExecutionEngine)
def _pandas(
cls,
execution_engine,
metric_domain_kwargs,
metric_value_kwargs,
metrics,
runtime_configuration,
):
df, _, _ = execution_engine.get_compute_domain(
metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE
)
# metric value kwargs: kwargs passed in through the expectation
column_a = metric_value_kwargs.get("column_a")
column_b = metric_value_kwargs.get("column_b")
column_a_values = df[column_a].to_list()
column_b_values = df[column_b].to_list()
test_statistic, p_value = stats.ks_2samp(column_a_values, column_b_values)
return test_statistic, p_value
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
return {
"table.columns": MetricConfiguration(
"table.columns", metric.metric_domain_kwargs
),
}
class ExpectColumnKolmogorovSmirnovTestPValueToBeGreaterThan(BatchExpectation):
"""Calculates chi-squared of 2 columns, checks if p-value > user threshold."""
examples = [
{
"data": {"x": [1, 2, 3, 4, 5], "y": [2, 4, 6, 8, 10]},
"only_for": ["pandas"],
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column_a": "x",
"column_b": "y",
"p_value_threshold": 0.1,
},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column_a": "x",
"column_b": "y",
"p_value_threshold": 0.5,
},
"out": {"success": False},
},
],
}
]
# This is a tuple consisting of all Metrics necessary to evaluate the Expectation.
metric_dependencies = ("column.p_value_greater_than_threshold",)
# This a tuple of parameter names that can affect whether the Expectation evaluates to True or False.
success_keys = (
"p_value_threshold",
"column_a",
"column_b",
)
# This dictionary contains default values for any parameters that should have default values.
default_kwarg_values = {}
def METHOD_NAME(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().METHOD_NAME(configuration)
configuration = configuration or self.configuration
# This method performs a validation of your metrics against your success keys, returning a dict indicating the success or failure of the Expectation.
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
threshold = configuration["kwargs"].get("p_value_threshold")
test_statistic, p_value = metrics.get("column.p_value_greater_than_threshold")
success = p_value >= threshold
return {"success": success, "result": {"observed_value": p_value}}
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": [
"statistical",
"test",
"testing",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@HaebichanGX", # Don't forget to add your github handle here!
],
}
if __name__ == "__main__":
ExpectColumnKolmogorovSmirnovTestPValueToBeGreaterThan().print_diagnostic_checklist() |
299,635 | main | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prepare CHiME3 background data.
Download, unpack and create manifest files.
Manifest file is a json-format file with each line containing the
meta data (i.e. audio filepath, transcript and audio duration)
of each audio file in the data set.
"""
import argparse
import io
import json
import os
import zipfile
import soundfile
import wget
from paddle.v2.dataset.common import md5file
# DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset/speech')
DATA_HOME = os.path.expanduser('.')
URL = "https://d4s.myairbridge.com/packagev2/AG0Y3DNBE5IWRRTV/?dlid=W19XG7T0NNHB027139H0EQ"
MD5 = "c3ff512618d7a67d4f85566ea1bc39ec"
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--target_dir",
default=DATA_HOME + "/chime3_background",
type=str,
help="Directory to save the dataset. (default: %(default)s)")
parser.add_argument(
"--manifest_filepath",
default="manifest.chime3.background",
type=str,
help="Filepath for output manifests. (default: %(default)s)")
args = parser.parse_args()
def download(url, md5sum, target_dir, filename=None):
"""Download file from url to target_dir, and check md5sum."""
if filename is None:
filename = url.split("/")[-1]
if not os.path.exists(target_dir):
os.makedirs(target_dir)
filepath = os.path.join(target_dir, filename)
if not (os.path.exists(filepath) and md5file(filepath) == md5sum):
print("Downloading %s ..." % url)
wget.download(url, target_dir)
print("\nMD5 Chesksum %s ..." % filepath)
if not md5file(filepath) == md5sum:
raise RuntimeError("MD5 checksum failed.")
else:
print("File exists, skip downloading. (%s)" % filepath)
return filepath
def unpack(filepath, target_dir):
"""Unpack the file to the target_dir."""
print("Unpacking %s ..." % filepath)
if filepath.endswith('.zip'):
zip = zipfile.ZipFile(filepath, 'r')
zip.extractall(target_dir)
zip.close()
elif filepath.endswith('.tar') or filepath.endswith('.tar.gz'):
tar = zipfile.open(filepath)
tar.extractall(target_dir)
tar.close()
else:
raise ValueError("File format is not supported for unpacking.")
def create_manifest(data_dir, manifest_path):
"""Create a manifest json file summarizing the data set, with each line
containing the meta data (i.e. audio filepath, transcription text, audio
duration) of each audio file within the data set.
"""
print("Creating manifest %s ..." % manifest_path)
json_lines = []
for subfolder, _, filelist in sorted(os.walk(data_dir)):
for filename in filelist:
if filename.endswith('.wav'):
filepath = os.path.join(data_dir, subfolder, filename)
audio_data, samplerate = soundfile.read(filepath)
duration = float(len(audio_data)) / samplerate
json_lines.append(
json.dumps(
{
'utt': os.path.splitext(os.path.basename(filepath))[
0],
'feat': filepath,
'feat_shape': (duration, ), # second
'type': 'background'
}))
with io.open(manifest_path, mode='w', encoding='utf8') as out_file:
for line in json_lines:
out_file.write(line + '\n')
def prepare_chime3(url, md5sum, target_dir, manifest_path):
"""Download, unpack and create summmary manifest file."""
if not os.path.exists(os.path.join(target_dir, "CHiME3")):
# download
filepath = download(url, md5sum, target_dir,
"myairbridge-AG0Y3DNBE5IWRRTV.zip")
# unpack
unpack(filepath, target_dir)
unpack(
os.path.join(target_dir, 'CHiME3_background_bus.zip'), target_dir)
unpack(
os.path.join(target_dir, 'CHiME3_background_caf.zip'), target_dir)
unpack(
os.path.join(target_dir, 'CHiME3_background_ped.zip'), target_dir)
unpack(
os.path.join(target_dir, 'CHiME3_background_str.zip'), target_dir)
else:
print("Skip downloading and unpacking. Data already exists in %s." %
target_dir)
# create manifest json file
create_manifest(target_dir, manifest_path)
def METHOD_NAME():
prepare_chime3(
url=URL,
md5sum=MD5,
target_dir=args.target_dir,
manifest_path=args.manifest_filepath)
if __name__ == '__main__':
METHOD_NAME() |
299,636 | test create config settings | import pytest
import os
import yaml
# from inspect import signature
# from importlib import import_module
# from improv.config import RepeatedActorError
from improv.config import Config as config
from improv.utils import checks
import logging
logger = logging.getLogger(__name__)
# set global variables
@pytest.fixture()
def set_configdir():
"""Sets the current working directory to the configs file."""
prev = os.getcwd()
os.chdir(os.path.dirname(__file__) + "/configs")
yield None
os.chdir(prev)
@pytest.mark.parametrize("test_input", [("good_config.yaml")])
def test_init(test_input, set_configdir):
"""Checks if cfg.configFile matches the provided configFile.
Asserts:
Whether config has the correct config file.
"""
cfg = config(test_input)
assert cfg.configFile == test_input
# def test_init_attributes():
# """ Tests if config has correct default attributes on initialization.
# Checks if actors, connection, and hasGUI are all empty or
# nonexistent. Detects errors by maintaining a list of errors, and
# then adding to it every time an unexpected behavior is encountered.
# Asserts:
# If the default attributes are empty or nonexistent.
# """
# cfg = config()
# errors = []
# if(cfg.actors != {}):
# errors.append("config.actors is not empty! ")
# if(cfg.connections != {}):
# errors.append("config.connections is not empty! ")
# if(cfg.hasGUI):
# errors.append("config.hasGUI already exists! ")
# assert not errors, "The following errors occurred:\n{}".format(
# "\n".join(errors))
def METHOD_NAME(set_configdir):
"""Check if the default way config creates config.settings is correct.
Asserts:
If the default setting is the dictionary {"use_watcher": "None"}
"""
cfg = config("good_config.yaml")
cfg.createConfig()
assert cfg.settings == {"use_watcher": None}
def test_createConfig_clean(set_configdir):
"""Tests if createConfig runs without error given a good config.
Asserts:
If createConfig does not raise any errors.
"""
cfg = config("good_config.yaml")
try:
cfg.createConfig()
except Exception as exc:
pytest.fail(f"createConfig() raised an exception {exc}")
def test_createConfig_noActor(set_configdir):
"""Tests if AttributeError is raised when there are no actors."""
cfg = config("no_actor.yaml")
with pytest.raises(AttributeError):
cfg.createConfig()
def test_createConfig_ModuleNotFound(set_configdir):
"""Tests if an error is raised when the package can"t be found."""
cfg = config("bad_package.yaml")
with pytest.raises(ModuleNotFoundError):
cfg.createConfig()
def test_createConfig_class_ImportError(set_configdir):
"""Tests if an error is raised when the class name is invalid."""
cfg = config("bad_class.yaml")
with pytest.raises(AttributeError):
cfg.createConfig()
def test_createConfig_AttributeError(set_configdir):
"""Tests if AttributeError is raised."""
cfg = config("bad_class.yaml")
with pytest.raises(AttributeError):
cfg.createConfig()
def test_createConfig_blank_file(set_configdir):
"""Tests if a blank config file raises an error."""
cfg = config("blank_file.yaml")
with pytest.raises(TypeError):
cfg.createConfig()
def test_createConfig_nonsense_file(set_configdir, caplog):
"""Tests if an improperly formatted config raises an error."""
cfg = config("nonsense.yaml")
with pytest.raises(TypeError):
cfg.createConfig()
def test_acyclic_graph(set_configdir):
path = os.getcwd() + "/good_config.yaml"
assert checks.check_if_connections_acyclic(path)
def test_cyclic_graph(set_configdir):
path = os.getcwd() + "/cyclic_config.yaml"
assert not checks.check_if_connections_acyclic(path)
def test_saveActors_clean(set_configdir):
"""Compares internal actor representation to what was saved in the file."""
cfg = config("good_config.yaml")
cfg.createConfig()
cfg.saveActors()
with open("good_config_actors.yaml") as savedConfig:
data = yaml.safe_load(savedConfig)
savedKeys = len(data.keys())
originalKeys = len(cfg.actors.keys())
assert savedKeys == originalKeys |
299,637 | test attributes | import pytest
from praw.models import Message, Redditor, Subreddit, SubredditMessage
from ... import IntegrationTest
class TestMessage(IntegrationTest):
def METHOD_NAME(self, reddit):
reddit.read_only = False
messages = list(reddit.inbox.messages())
count = len(messages)
while messages:
message = messages.pop(0)
messages.extend(message.replies)
count -= 1
try:
assert message.author is None or isinstance(message.author, Redditor)
assert isinstance(message.dest, (Redditor, Subreddit))
assert isinstance(message.replies, list)
assert message.subreddit is None or isinstance(
message.subreddit, Subreddit
)
except Exception:
import pprint
pprint.pprint(vars(message))
raise
assert count < 0
def test_block(self, reddit):
reddit.read_only = False
message = None
for item in reddit.inbox.messages():
if item.author and item.author != pytest.placeholders.username:
message = item
break
else:
msg = "no message found"
raise AssertionError(msg)
message.block()
def test_delete(self, reddit):
reddit.read_only = False
message = next(reddit.inbox.messages())
message.delete()
def test_mark_read(self, reddit):
reddit.read_only = False
message = None
for item in reddit.inbox.unread():
if isinstance(item, Message):
message = item
break
else:
msg = "no message found in unread"
raise AssertionError(msg)
message.mark_read()
def test_mark_unread(self, reddit):
reddit.read_only = False
message = next(reddit.inbox.messages())
message.mark_unread()
def test_message_collapse(self, reddit):
reddit.read_only = False
message = next(reddit.inbox.messages())
message.collapse()
def test_message_uncollapse(self, reddit):
reddit.read_only = False
message = next(reddit.inbox.messages())
message.uncollapse()
def test_parent(self, reddit):
reddit.read_only = False
message = reddit.inbox.message("1ay4xyu")
parent = message.parent
assert isinstance(parent, Message)
assert parent.fullname == message.parent_id
def test_parent__from_inbox_listing(self, reddit):
reddit.read_only = False
message = next(reddit.inbox.sent(limit=1))
parent = message.parent
assert isinstance(parent, Message)
assert parent.fullname == message.parent_id
def test_reply(self, reddit):
reddit.read_only = False
message = next(reddit.inbox.messages())
reply = message.reply(body="Message reply")
assert reply.author == reddit.config.username
assert reply.body == "Message reply"
assert reply.first_message_name == message.fullname
def test_unblock_subreddit(self, reddit):
reddit.read_only = False
message1 = next(reddit.inbox.messages(limit=1))
assert isinstance(message1, SubredditMessage)
message_fullname = message1.fullname
message1.block()
message2 = next(reddit.inbox.messages(limit=1))
assert message2.fullname == message_fullname
assert message2.subject == "[message from blocked subreddit]"
message2.unblock_subreddit()
message3 = next(reddit.inbox.messages(limit=1))
assert message3.fullname == message_fullname
assert message3.subject != "[message from blocked subreddit]"
class TestSubredditMessage(IntegrationTest):
def test_mute(self, reddit):
reddit.read_only = False
message = SubredditMessage(reddit, _data={"id": "5yr8id"})
message.mute()
def test_unmute(self, reddit):
reddit.read_only = False
message = SubredditMessage(reddit, _data={"id": "5yr8id"})
message.unmute() |
299,638 | synth | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A quantum oracle constructed from a logical expression or a string in the DIMACS format."""
from os.path import basename, isfile
from typing import Callable, Optional
from qiskit.circuit import QuantumCircuit
from qiskit.utils.optionals import HAS_TWEEDLEDUM
from .classical_element import ClassicalElement
@HAS_TWEEDLEDUM.require_in_instance
class BooleanExpression(ClassicalElement):
"""The Boolean Expression gate."""
def __init__(self, expression: str, name: str = None, var_order: list = None) -> None:
"""
Args:
expression (str): The logical expression string.
name (str): Optional. Instruction gate name. Otherwise part of the expression is
going to be used.
var_order(list): A list with the order in which variables will be created.
(default: by appearance)
"""
from tweedledum import BoolFunction # pylint: disable=import-error
self._tweedledum_bool_expression = BoolFunction.from_expression(
expression, var_order=var_order
)
short_expr_for_name = (expression[:10] + "...") if len(expression) > 13 else expression
num_qubits = (
self._tweedledum_bool_expression.num_outputs()
+ self._tweedledum_bool_expression.num_inputs()
)
super().__init__(name or short_expr_for_name, num_qubits=num_qubits, params=[])
def simulate(self, bitstring: str) -> bool:
"""Evaluate the expression on a bitstring.
This evaluation is done classically.
Args:
bitstring: The bitstring for which to evaluate.
Returns:
bool: result of the evaluation.
"""
from tweedledum import BitVec # pylint: disable=import-error
bits = []
for bit in bitstring:
bits.append(BitVec(1, bit))
return bool(self._tweedledum_bool_expression.simulate(*bits))
def METHOD_NAME(
self,
registerless: bool = True,
synthesizer: Optional[Callable[["BooleanExpression"], QuantumCircuit]] = None,
):
"""Synthesis the logic network into a :class:`~qiskit.circuit.QuantumCircuit`.
Args:
registerless: Default ``True``. If ``False`` uses the parameter names
to create registers with those names. Otherwise, creates a circuit with a flat
quantum register.
synthesizer: A callable that takes self and returns a Tweedledum
circuit.
Returns:
QuantumCircuit: A circuit implementing the logic network.
"""
if registerless:
qregs = None
else:
qregs = None # TODO: Probably from self._tweedledum_bool_expression._signature
if synthesizer is None:
from .utils import tweedledum2qiskit # Avoid an import cycle
from tweedledum.synthesis import pkrm_synth # pylint: disable=import-error
truth_table = self._tweedledum_bool_expression.truth_table(output_bit=0)
return tweedledum2qiskit(pkrm_synth(truth_table), name=self.name, qregs=qregs)
return synthesizer(self)
def _define(self):
"""The definition of the boolean expression is its synthesis"""
self.definition = self.METHOD_NAME()
@classmethod
def from_dimacs_file(cls, filename: str):
"""Create a BooleanExpression from the string in the DIMACS format.
Args:
filename: A file in DIMACS format.
Returns:
BooleanExpression: A gate for the input string
Raises:
FileNotFoundError: If filename is not found.
"""
HAS_TWEEDLEDUM.require_now("BooleanExpression")
from tweedledum import BoolFunction # pylint: disable=import-error
expr_obj = cls.__new__(cls)
if not isfile(filename):
raise FileNotFoundError("The file %s does not exists." % filename)
expr_obj._tweedledum_bool_expression = BoolFunction.from_dimacs_file(filename)
num_qubits = (
expr_obj._tweedledum_bool_expression.num_inputs()
+ expr_obj._tweedledum_bool_expression.num_outputs()
)
super(BooleanExpression, expr_obj).__init__(
name=basename(filename), num_qubits=num_qubits, params=[]
)
return expr_obj |
299,639 | handle | import requests
from datetime import datetime, timezone
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from api.models import Appeal, AppealDocument
from deployments.models import ERU, PersonnelDeployment, Personnel, DeployedPerson
from api.logger import logger
class Command(BaseCommand):
help = 'Ingest deployments'
def parse_date(self, date_string):
# 21Dec2017
timeformat = '%d%b%Y'
return datetime.strptime(date_string.strip(), timeformat).replace(tzinfo=timezone.utc)
def METHOD_NAME(self, *args, **options):
logger.info('Starting Deployment ingest')
# url = 'https://proxy.hxlstandard.org/data.json?url=https%3A%2F%2Fdocs.google.com%2Fspreadsheets%2Fd%2F1CBvledFYc_uwlvHTvJE0SYS7_mPGU2L-zhrqbB4KNIA%2Fedit%23gid%3D0&header-row=1' # not enough.
url = 'https://proxy.hxlstandard.org/data.json?tagger-match-all=on&' \
+ 'tagger-01-header=year&' \
+ 'tagger-01-tag=%23a1&' \
+ 'tagger-02-header=%2Aappeal+code&' \
+ 'tagger-02-tag=%23a2&' \
+ 'tagger-03-header=region&' \
+ 'tagger-03-tag=%23a3&' \
+ 'tagger-04-header=country&' \
+ 'tagger-04-tag=%23a4&' \
+ 'tagger-05-header=location&' \
+ 'tagger-05-tag=%23a5&' \
+ 'tagger-06-header=disaster+type&' \
+ 'tagger-06-tag=%23a6&' \
+ 'tagger-07-header=%2Adisaster+name&' \
+ 'tagger-07-tag=%23a7&' \
+ 'tagger-08-header=%2Aname&' \
+ 'tagger-08-tag=%23a8&' \
+ 'tagger-09-header=%2Adeploying+ns+%2F+ifrc+office&' \
+ 'tagger-09-tag=%23a9&' \
+ 'tagger-10-header=%2Agender&' \
+ 'tagger-10-tag=%23b1&' \
+ 'tagger-11-header=language&' \
+ 'tagger-11-tag=%23b2&' \
+ 'tagger-12-header=%2Aposition&' \
+ 'tagger-12-tag=%23b3&' \
+ 'tagger-13-header=%2Atype&' \
+ 'tagger-13-tag=%23b4&' \
+ 'tagger-14-header=supported+by+ns&' \
+ 'tagger-14-tag=%23b5&' \
+ 'tagger-15-header=availability&' \
+ 'tagger-15-tag=%23b6&' \
+ 'tagger-16-header=%2Aexp+start+date&' \
+ 'tagger-16-tag=%23b7&' \
+ 'tagger-17-header=%2Aexp+duration&' \
+ 'tagger-17-tag=%23b8&' \
+ 'tagger-18-header=%2Aalert&' \
+ 'tagger-18-tag=%23b9&' \
+ 'tagger-19-header=deployment+message&' \
+ 'tagger-19-tag=%23c1&' \
+ 'tagger-20-header=%2Astart+of+mission&' \
+ 'tagger-20-tag=%23c2&' \
+ 'tagger-21-header=%2Aend+of+mission&' \
+ 'tagger-21-tag=%23c3&' \
+ 'tagger-22-header=deployment+duration&' \
+ 'tagger-22-tag=%23c4&' \
+ 'tagger-23-header=deployed&' \
+ 'tagger-23-tag=%23c5&' \
+ 'tagger-24-header=rotation&' \
+ 'tagger-24-tag=%23c6&' \
+ 'tagger-25-header=comments&' \
+ 'tagger-25-tag=%23c7&' \
+ 'url=https%3A%2F%2Fdocs.google.com%2Fspreadsheets%2Fd%2F1CBvledFYc_uwlvHTvJE0SYS7_mPGU2L-zhrqbB4KNIA%2Fedit%23gid%3D0&' \
+ 'header-row=1'
response = requests.get(url)
if response.status_code != 200:
logger.error('Error querying Deployment HXL API')
raise Exception('Error querying Deployment HXL API')
records = response.json()
# some logging variables
not_found = []
existing = []
created = []
columns = [a.replace('*','').replace(' ','') for a in records[0]]
# ['Year', 'AppealCode', 'Region', 'Country', 'Location', 'Disastertype', 'Disastername', 'Name', 'DeployingNS/IFRCOffice', 'Gender', 'Language', 'Position', 'Type', 'SupportedbyNS', 'Availability', 'Expstartdate', 'expduration', 'Alert', 'Deploymentmessage', 'Startofmission', 'Endofmission', 'DeploymentDuration', 'Deployed', 'Rotation', 'Comments']
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
# if empty name -> Alert, otherwise -> Deployment
# OBSOLETE:
# # group records by appeal code
# acodes = list(set([a[2] for a in records[2:]]))
# for code in acodes:
# try:
# appeal = Appeal.objects.get(code=code)
# except ObjectDoesNotExist:
# not_found.append(code)
# continue
#
# existing_docs = list(appeal.appealdocument_set.all())
# docs = [a for a in records if a[2] == code]
# for doc in docs:
# exists = len([a for a in existing_docs if a.document_url == doc[0]]) > 0
# if exists:
# existing.append(doc[0])
# else:
# try:
# created_at = self.parse_date(doc[5])
# except:
# created_at = None
#
# AppealDocument.objects.create(
# document_url=doc[0],
# name=doc[4],
# created_at=created_at,
# appeal=appeal,
# )
# created.append(doc[0])
logger.info('%s Deployments created' % len(created))
logger.info('%s existing Deployments' % len(existing))
logger.warning('%s documents without appeals in system' % len(not_found)) |
299,640 | encrypt credentials mapping | from typing import Any, Dict, Mapping, Sequence
from pymongo import MongoClient
from common.credentials import Credentials
from monkey_island.cc.repositories import (
ICredentialsRepository,
RemovalError,
RetrievalError,
StorageError,
)
from monkey_island.cc.server_utils.encryption import ILockableEncryptor
from .consts import MONGO_OBJECT_ID_KEY
class MongoCredentialsRepository(ICredentialsRepository):
"""
Store credentials in a mongo database that can be used to propagate around the network.
"""
def __init__(self, mongo: MongoClient, repository_encryptor: ILockableEncryptor):
self._database = mongo.monkey_island
self._repository_encryptor = repository_encryptor
def get_configured_credentials(self) -> Sequence[Credentials]:
return self._get_credentials_from_collection(self._database.configured_credentials)
def get_stolen_credentials(self) -> Sequence[Credentials]:
return self._get_credentials_from_collection(self._database.stolen_credentials)
def get_all_credentials(self) -> Sequence[Credentials]:
configured_credentials = self.get_configured_credentials()
stolen_credentials = self.get_stolen_credentials()
return [*configured_credentials, *stolen_credentials]
def save_configured_credentials(self, credentials: Sequence[Credentials]):
# TODO: Fix deduplication of Credentials in mongo
self._save_credentials_to_collection(credentials, self._database.configured_credentials)
def save_stolen_credentials(self, credentials: Sequence[Credentials]):
self._save_credentials_to_collection(credentials, self._database.stolen_credentials)
def remove_configured_credentials(self):
self._remove_credentials_fom_collection(self._database.configured_credentials)
def remove_stolen_credentials(self):
self._remove_credentials_fom_collection(self._database.stolen_credentials)
def remove_all_credentials(self):
self.remove_configured_credentials()
self.remove_stolen_credentials()
def reset(self):
self.remove_all_credentials()
def _get_credentials_from_collection(self, collection) -> Sequence[Credentials]:
try:
collection_result = []
list_collection_result = list(collection.find({}, {MONGO_OBJECT_ID_KEY: False}))
for encrypted_credentials in list_collection_result:
plaintext_credentials = self._decrypt_credentials_mapping(encrypted_credentials)
collection_result.append(Credentials(**plaintext_credentials))
return collection_result
except Exception as err:
raise RetrievalError(err)
def _save_credentials_to_collection(self, credentials: Sequence[Credentials], collection):
try:
for c in credentials:
encrypted_credentials = self.METHOD_NAME(c.dict(simplify=True))
collection.insert_one(encrypted_credentials)
except Exception as err:
raise StorageError(err)
# TODO: If possible, implement the encryption/decryption as a decorator so it can be reused with
# different ICredentialsRepository implementations
def METHOD_NAME(self, mapping: Mapping[str, Any]) -> Mapping[str, Any]:
encrypted_mapping: Dict[str, Any] = {}
for secret_or_identity, credentials_component in mapping.items():
if credentials_component is None:
encrypted_component = None
else:
encrypted_component = {
key: self._repository_encryptor.encrypt(value.encode())
if value is not None
else value
for key, value in credentials_component.items()
}
encrypted_mapping[secret_or_identity] = encrypted_component
return encrypted_mapping
def _decrypt_credentials_mapping(self, mapping: Mapping[str, Any]) -> Mapping[str, Any]:
decrypted_mapping: Dict[str, Any] = {}
for secret_or_identity, credentials_component in mapping.items():
if credentials_component is None:
decrypted_component = None
else:
decrypted_component = {
key: self._repository_encryptor.decrypt(value).decode()
if value is not None
else value
for key, value in credentials_component.items()
}
decrypted_mapping[secret_or_identity] = decrypted_component
return decrypted_mapping
@staticmethod
def _remove_credentials_fom_collection(collection):
try:
collection.drop()
except Exception as err:
raise RemovalError(f"Error removing credentials: {err}") |
299,641 | get object | import django.forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils import timezone
from collections import OrderedDict
from datetime import timedelta
from postgresqleu.util.time import time_sinceoruntil, datetime_string
from postgresqleu.util.widgets import StaticTextWidget, EmailTextWidget
from postgresqleu.util.backendforms import BackendForm
from postgresqleu.membership.models import Member, MemberLog, Meeting, MembershipConfiguration
from postgresqleu.membership.models import MeetingType, MeetingReminder
from postgresqleu.membership.backendlookups import MemberLookup
class BackendConfigForm(BackendForm):
helplink = 'membership'
class Meta:
model = MembershipConfiguration
fields = ['sender_email', 'sender_name', 'membership_years', 'membership_cost', 'country_validator',
'paymentmethods', ]
widgets = {
'paymentmethods': django.forms.CheckboxSelectMultiple,
}
def fix_fields(self):
self.fields['paymentmethods'].label_from_instance = lambda x: "{0}{1}".format(x.internaldescription, x.active and " " or " (INACTIVE)")
self.fields['membership_cost'].help_text = "Membership cost in {0}".format(settings.CURRENCY_SYMBOL)
class MemberLogManager(object):
title = "Log"
singular = "log"
can_add = False
def get_list(self, instance):
return [(None, line.timestamp, line.message) for line in MemberLog.objects.filter(member=instance).order_by('-timestamp')]
class BackendMemberForm(BackendForm):
helplink = 'membership'
list_fields = ['fullname', 'user', 'paiduntil']
queryset_select_related = ['user', ]
defaultsort = [['paiduntil', 'desc'], ['fullname', 'asc']]
allow_email = True
class Meta:
model = Member
fields = ['fullname', 'country', 'listed', 'country_exception',
'membersince', 'paiduntil', 'expiry_warning_sent', ]
fieldsets = [
{'id': 'user_info', 'Legend': 'User information', 'fields': ['fullname', 'country', 'listed', ]},
{'id': 'admin_info', 'Legend': 'Administrative', 'fields': ['country_exception', ]},
{'id': 'date_info', 'Legend': 'Date info', 'fields': ['membersince', 'paiduntil', 'expiry_warning_sent', ]},
]
readonly_fields = ['membersince', 'paiduntil', 'expiry_warning_sent', ]
linked_objects = OrderedDict({
'log': MemberLogManager(),
})
@classmethod
def get_column_filters(cls, conference):
return {
'Paid until': [], # Empty list triggers the option to choose empty/not empty
}
class BackendMeetingReminderForm(BackendForm):
helplink = 'meetings'
list_fields = ['sendat', 'sentat', ]
readonly_fields = ['sentat', ]
class Meta:
model = MeetingReminder
fields = ['sendat', 'sentat', ]
def clean_sendat(self):
if self.cleaned_data.get('sendat', None):
print("FOO: %s" % self.cleaned_data.get('sendat', None))
print("FOO2: %s" % self.instance.meeting.dateandtime)
if self.cleaned_data.get('sendat') > self.instance.meeting.dateandtime - timedelta(minutes=30):
raise ValidationError("Reminder must be set at least 30 minutes before the meeting starts!")
if self.cleaned_data.get('sendat') < timezone.now():
raise ValidationError("This timestamp is in the past!")
else:
print("BAR")
return self.cleaned_data.get('sendat', None)
def clean(self):
d = super().clean()
if self.instance.sentat:
raise ValidationError("Cannot edit a reminder that has already been sent")
return d
class MeetingReminderManager(object):
title = 'Reminders'
singular = 'Reminder'
can_add = True
def get_list(self, instance):
return [
(r.id, "{} ({})".format(datetime_string(r.sendat),
time_sinceoruntil(r.sendat)),
r.sentat is not None
) for r in MeetingReminder.objects.filter(meeting=instance)]
def get_form(self, obj, POST):
return BackendMeetingReminderForm
def METHOD_NAME(self, masterobj, subid):
return MeetingReminder.objects.get(meeting=masterobj, pk=subid)
def get_instancemaker(self, masterobj):
return lambda: MeetingReminder(meeting=masterobj)
class BackendMeetingForm(BackendForm):
helplink = 'meetings'
list_fields = ['name', 'dateandtime', 'meetingtype', 'state']
linked_objects = OrderedDict({
'reminders': MeetingReminderManager(),
})
extrabuttons = [
('View meeting log', 'log/'),
('View attendees', 'attendees/'),
]
class Meta:
model = Meeting
fields = ['name', 'dateandtime', 'allmembers', 'members', 'meetingtype', 'meetingadmins', 'botname', ]
fieldsets = [
{'id': 'meeting_info', 'legend': 'Meeting information', 'fields': ['name', 'dateandtime', 'allmembers', 'members']},
{'id': 'meeting_impl', 'legend': 'Meeting implementation', 'fields': ['meetingtype', 'meetingadmins', 'botname']},
]
selectize_multiple_fields = {
'members': MemberLookup(),
'meetingadmins': MemberLookup(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Remove extra buttons unless we're in a web meeting and this web meeting has started
if self.instance:
if self.instance.meetingtype != MeetingType.WEB or self.instance.state == 0:
self.extrabuttons = []
else:
self.extrabuttons = []
def clean(self):
d = super().clean()
if d.get('meetingtype', None) == MeetingType.WEB:
if d['botname']:
self.add_error('botname', 'Bot name should not be specified for web meetings')
if not d['meetingadmins']:
self.add_error('meetingadmins', 'Meeting administrator(s) must be specified for web meetings')
elif d.get('meetingtype', None) == MeetingType.IRC:
if not d['botname']:
self.add_error('botname', 'Bot name must be specified for IRC meetings')
if d['meetingadmins']:
self.add_error('meetingadmins', 'Meeting administrator(s) cannot be specified for IRC meetings')
return d
def clean_meetingtype(self):
if self.cleaned_data.get('meetingtype', None) == MeetingType.WEB and not settings.MEETINGS_WS_BASE_URL:
raise ValidationError("Web meetings server is not configured in local_settings.py")
if self.instance and self.instance.state > 0 and self.instance.meetingtype != self.cleaned_data['meetingtype']:
raise ValidationError("Cannot change the type of a meeting that has already started")
return self.cleaned_data.get('meetingtype', None)
class BackendMemberSendEmailForm(django.forms.Form):
helplink = 'membership'
_from = django.forms.CharField(max_length=128, disabled=True, label="From")
subject = django.forms.CharField(max_length=128, required=True)
recipients = django.forms.Field(widget=StaticTextWidget, required=False)
message = django.forms.CharField(widget=EmailTextWidget, required=True)
idlist = django.forms.CharField(widget=django.forms.HiddenInput, required=True)
confirm = django.forms.BooleanField(label="Confirm", required=False)
def __init__(self, *args, **kwargs):
super(BackendMemberSendEmailForm, self).__init__(*args, **kwargs)
if not (self.data.get('subject') and self.data.get('message')):
del self.fields['confirm']
def clean_confirm(self):
if not self.cleaned_data['confirm']:
raise ValidationError("Please check this box to confirm that you are really sending this email! There is no going back!") |
299,642 | process loader | import collections
import os.path
import pickle
from pprint import pprint
from typing import Optional
import hydra
import numpy as np
import pkg_resources
import torch
from hydra.utils import instantiate
from omegaconf import DictConfig
from tqdm import tqdm
from super_gradients import init_trainer, setup_device
from super_gradients.training import utils as core_utils, models, dataloaders
from super_gradients.training.metrics import PoseEstimationMetrics
from super_gradients.training.metrics.pose_estimation_utils import compute_oks
from super_gradients.training.models.pose_estimation_models.dekr_hrnet import DEKRHorisontalFlipWrapper
from super_gradients.training.utils import get_param
def remove_starting_module(key: str):
if key.startswith("module."):
return key[7:]
return key
def METHOD_NAME(model, loader, post_prediction_callback, sigmas, metric: Optional[PoseEstimationMetrics] = None):
samples = []
for inputs, targets, extras in tqdm(loader):
with torch.no_grad(), torch.cuda.amp.autocast(True):
predictions = model(inputs.cuda(non_blocking=True))
all_poses, all_scores = post_prediction_callback(predictions)
if metric is not None:
metric.update(predictions, targets, **extras)
batch_size = len(inputs)
for image_index in range(batch_size):
pred_poses = all_poses[image_index] # [M, J, 3]
pred_scores = all_scores[image_index] # [M]
gt_iscrowd = extras["gt_iscrowd"][image_index]
gt_keypoints = extras["gt_joints"][image_index] # [N, J, 3]
gt_bboxes = extras["gt_bboxes"][image_index] # [N, 4]
gt_areas = extras["gt_areas"][image_index] # [N]
# Filter out poses with no visible keypoints
if len(gt_keypoints) > 0 and len(pred_poses) > 0:
gt_keypoints_xy = gt_keypoints[:, :, 0:2]
gt_keypoints_visibility = gt_keypoints[:, :, 2]
iou = compute_oks(
pred_joints=torch.from_numpy(pred_poses),
gt_joints=torch.from_numpy(gt_keypoints_xy),
gt_keypoint_visibility=torch.from_numpy(gt_keypoints_visibility),
sigmas=sigmas,
gt_bboxes=torch.from_numpy(gt_bboxes),
)
# Here we are not interested in solving the MxN matching problem, but rather
# in getting the largest IoU for each predicted pose with the ground truth poses.
max_iou = iou.max(axis=1).values.numpy() # [M]
else:
max_iou = np.zeros(len(pred_poses), dtype=np.float32)
sample = {
"pred_poses": pred_poses,
"pred_scores": pred_scores,
# Targets
"iou": max_iou,
#
"gt_bboxes": gt_bboxes,
"gt_joints": gt_keypoints,
"gt_iscrowd": gt_iscrowd,
"gt_areas": gt_areas,
}
samples.append(sample)
return samples
@hydra.main(
config_path=pkg_resources.resource_filename("super_gradients.recipes", ""), config_name="script_generate_rescoring_data_dekr_coco2017", version_base="1.2"
)
def main(cfg: DictConfig) -> None:
setup_device(
device=core_utils.get_param(cfg, "device"),
multi_gpu=core_utils.get_param(cfg, "multi_gpu"),
num_gpus=core_utils.get_param(cfg, "num_gpus"),
)
sigmas = torch.from_numpy(np.array(cfg.dataset_params.oks_sigmas))
cfg.dataset_params.train_dataset_params.transforms = cfg.dataset_params.val_dataset_params.transforms
cfg = instantiate(cfg)
# Temporary hack to remove "module." from model state dict saved in checkpoint
if cfg.checkpoint_params.checkpoint_path is not None:
checkpoint = torch.load(cfg.checkpoint_params.checkpoint_path, map_location="cpu")
if "ema_net" in checkpoint:
checkpoint["ema_net"] = collections.OrderedDict((remove_starting_module(k), v) for k, v in checkpoint["ema_net"].items())
if "net" in checkpoint:
checkpoint["net"] = collections.OrderedDict((remove_starting_module(k), v) for k, v in checkpoint["net"].items())
torch.save(checkpoint, cfg.checkpoint_params.checkpoint_path)
# BUILD NETWORK
model = models.get(
model_name=cfg.architecture,
num_classes=cfg.arch_params.num_classes,
arch_params=cfg.arch_params,
strict_load=cfg.checkpoint_params.strict_load,
pretrained_weights=cfg.checkpoint_params.pretrained_weights,
checkpoint_path=cfg.checkpoint_params.checkpoint_path,
)
# model = DEKRWrapper(model, apply_sigmoid=True).cuda().eval()
model = DEKRHorisontalFlipWrapper(model, cfg.dataset_params.flip_indexes, apply_sigmoid=True).cuda().eval()
post_prediction_callback = cfg.post_prediction_callback
pose_estimation_metric = PoseEstimationMetrics(
post_prediction_callback=post_prediction_callback,
max_objects_per_image=post_prediction_callback.max_num_people,
num_joints=cfg.dataset_params.num_joints,
oks_sigmas=cfg.dataset_params.oks_sigmas,
)
os.makedirs(cfg.rescoring_data_dir, exist_ok=True)
val_dataloader = dataloaders.get(
name=get_param(cfg, "val_dataloader"),
dataset_params=cfg.dataset_params.val_dataset_params,
dataloader_params=cfg.dataset_params.val_dataloader_params,
)
valid_samples = METHOD_NAME(model, val_dataloader, post_prediction_callback, sigmas, metric=pose_estimation_metric)
with open(os.path.join(cfg.rescoring_data_dir, "rescoring_data_valid.pkl"), "wb") as f:
pickle.dump(valid_samples, f)
print("Pose estimation metrics on validation set:")
pprint(pose_estimation_metric.compute())
train_dataloader = dataloaders.get(
name=get_param(cfg, "train_dataloader"),
dataset_params=cfg.dataset_params.train_dataset_params,
dataloader_params=cfg.dataset_params.train_dataloader_params,
)
train_samples = METHOD_NAME(model, train_dataloader, post_prediction_callback, sigmas)
with open(os.path.join(cfg.rescoring_data_dir, "rescoring_data_train.pkl"), "wb") as f:
pickle.dump(train_samples, f)
print(f"Train data for rescoring saved to {cfg.rescoring_data_dir}")
def run():
init_trainer()
main()
if __name__ == "__main__":
run() |
299,643 | configure duo access | #!/usr/bin/env python
"""Legacy multi-factor authentication methods module."""
from hvac.api.vault_api_base import VaultApiBase
from hvac import exceptions, utils
SUPPORTED_MFA_TYPES = [
"duo",
]
SUPPORTED_AUTH_METHODS = ["ldap", "okta", "radius", "userpass"]
class LegacyMfa(VaultApiBase):
"""Multi-factor authentication Auth Method (API).
.. warning::
This class's methods correspond to a legacy / unsupported set of Vault API routes. Please see the reference link
for additional context.
Reference: https://www.vaultproject.io/docs/auth/mfa.html
"""
def configure(self, mount_point, mfa_type="duo", force=False):
"""Configure MFA for a supported method.
This endpoint allows you to turn on multi-factor authentication with a given backend.
Currently only Duo is supported.
Supported methods:
POST: /auth/{mount_point}/mfa_config. Produces: 204 (empty body)
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:param mfa_type: Enables MFA with given backend (available: duo)
:type mfa_type: str | unicode
:param force: If `True`, make the `mfa_config` request regardless of circumstance. If `False` (the default), verify
the provided `mount_point` is available and one of the types of methods supported by this feature.
:type force: bool
:return: The response of the configure MFA request.
:rtype: requests.Response
"""
if mfa_type != "duo" and not force:
# The situation described via this exception is not likely to change in the future.
# However we provided that flexibility here just in case.
error_msg = 'Unsupported mfa_type argument provided "{arg}", supported types: "{mfa_types}"'
raise exceptions.ParamValidationError(
error_msg.format(
mfa_types=",".join(SUPPORTED_MFA_TYPES),
arg=mfa_type,
)
)
params = {
"type": mfa_type,
}
api_path = utils.format_url(
"/v1/auth/{mount_point}/mfa_config", mount_point=mount_point
)
return self._adapter.post(
url=api_path,
json=params,
)
def read_configuration(self, mount_point):
"""Read the MFA configuration.
Supported methods:
GET: /auth/{mount_point}/mfa_config. Produces: 200 application/json
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the read_configuration request.
:rtype: dict
"""
api_path = utils.format_url(
"/v1/auth/{mount_point}/mfa_config",
mount_point=mount_point,
)
return self._adapter.get(url=api_path)
def METHOD_NAME(self, mount_point, host, integration_key, secret_key):
"""Configure the access keys and host for Duo API connections.
To authenticate users with Duo, the backend needs to know what host to connect to and must authenticate with an
integration key and secret key. This endpoint is used to configure that information.
Supported methods:
POST: /auth/{mount_point}/duo/access. Produces: 204 (empty body)
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:param host: Duo API host
:type host: str | unicode
:param integration_key: Duo integration key
:type integration_key: str | unicode
:param secret_key: Duo secret key
:type secret_key: str | unicode
:return: The response of the `configure_duo_access` request.
:rtype: requests.Response
"""
params = {
"host": host,
"ikey": integration_key,
"skey": secret_key,
}
api_path = utils.format_url(
"/v1/auth/{mount_point}/duo/access",
mount_point=mount_point,
)
return self._adapter.post(
url=api_path,
json=params,
)
def configure_duo_behavior(
self, mount_point, push_info=None, user_agent=None, username_format="%s"
):
"""Configure Duo second factor behavior.
This endpoint allows you to configure how the original auth method username maps to the Duo username by
providing a template format string.
Supported methods:
POST: /auth/{mount_point}/duo/config. Produces: 204 (empty body)
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:param push_info: A string of URL-encoded key/value pairs that provides additional context about the
authentication attempt in the Duo Mobile app
:type push_info: str | unicode
:param user_agent: User agent to connect to Duo (default is empty string `""`)
:type user_agent: str | unicode
:param username_format: Format string given auth method username as argument to create Duo username
(default `%s`)
:type username_format: str | unicode
:return: The response of the `configure_duo_behavior` request.
:rtype: requests.Response
"""
params = {
"username_format": username_format,
}
if push_info is not None:
params["push_info"] = push_info
if user_agent is not None:
params["user_agent"] = user_agent
api_path = utils.format_url(
"/v1/auth/{mount_point}/duo/config",
mount_point=mount_point,
)
return self._adapter.post(
url=api_path,
json=params,
)
def read_duo_behavior_configuration(self, mount_point):
"""Read the Duo second factor behavior configuration.
Supported methods:
GET: /auth/{mount_point}/duo/config. Produces: 200 application/json
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the `read_duo_behavior_configuration` request.
:rtype: dict
"""
api_path = utils.format_url(
"/v1/auth/{mount_point}/duo/config",
mount_point=mount_point,
)
return self._adapter.get(url=api_path) |
299,644 | generate expected files | import re
import os
import hou
import pyblish.api
from openpype.hosts.houdini.api.lib import (
evalParmNoFrame,
get_color_management_preferences
)
from openpype.hosts.houdini.api import (
colorspace
)
class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
"""Collect Vray Render Products
Collects the instance.data["files"] for the render products.
Provides:
instance -> files
"""
label = "VRay ROP Render Products"
order = pyblish.api.CollectorOrder + 0.4
hosts = ["houdini"]
families = ["vray_rop"]
def process(self, instance):
rop = hou.node(instance.data.get("instance_node"))
# Collect chunkSize
chunk_size_parm = rop.parm("chunkSize")
if chunk_size_parm:
chunk_size = int(chunk_size_parm.eval())
instance.data["chunkSize"] = chunk_size
self.log.debug("Chunk Size: %s" % chunk_size)
default_prefix = evalParmNoFrame(rop, "SettingsOutput_img_file_path")
render_products = []
# TODO: add render elements if render element
beauty_product = self.get_beauty_render_product(default_prefix)
render_products.append(beauty_product)
files_by_aov = {
"RGB Color": self.METHOD_NAME(instance,
beauty_product)}
if instance.data.get("RenderElement", True):
render_element = self.get_render_element_name(rop, default_prefix)
if render_element:
for aov, renderpass in render_element.items():
render_products.append(renderpass)
files_by_aov[aov] = self.METHOD_NAME(instance, renderpass) # noqa
for product in render_products:
self.log.debug("Found render product: %s" % product)
filenames = list(render_products)
instance.data["files"] = filenames
instance.data["renderProducts"] = colorspace.ARenderProduct()
# For now by default do NOT try to publish the rendered output
instance.data["publishJobState"] = "Suspended"
instance.data["attachTo"] = [] # stub required data
if "expectedFiles" not in instance.data:
instance.data["expectedFiles"] = list()
instance.data["expectedFiles"].append(files_by_aov)
self.log.debug("expectedFiles:{}".format(files_by_aov))
# update the colorspace data
colorspace_data = get_color_management_preferences()
instance.data["colorspaceConfig"] = colorspace_data["config"]
instance.data["colorspaceDisplay"] = colorspace_data["display"]
instance.data["colorspaceView"] = colorspace_data["view"]
def get_beauty_render_product(self, prefix, suffix="<reName>"):
"""Return the beauty output filename if render element enabled
"""
# Remove aov suffix from the product: `prefix.aov_suffix` -> `prefix`
aov_parm = ".{}".format(suffix)
return prefix.replace(aov_parm, "")
def get_render_element_name(self, node, prefix, suffix="<reName>"):
"""Return the output filename using the AOV prefix and suffix
"""
render_element_dict = {}
# need a rewrite
re_path = node.evalParm("render_network_render_channels")
if re_path:
node_children = hou.node(re_path).children()
for element in node_children:
if element.shaderName() != "vray:SettingsRenderChannels":
aov = str(element)
render_product = prefix.replace(suffix, aov)
render_element_dict[aov] = render_product
return render_element_dict
def METHOD_NAME(self, instance, path):
"""Create expected files in instance data"""
dir = os.path.dirname(path)
file = os.path.basename(path)
if "#" in file:
def replace(match):
return "%0{}d".format(len(match.group()))
file = re.sub("#+", replace, file)
if "%" not in file:
return path
expected_files = []
start = instance.data["frameStart"]
end = instance.data["frameEnd"]
for i in range(int(start), (int(end) + 1)):
expected_files.append(
os.path.join(dir, (file % i)).replace("\\", "/"))
return expected_files |
299,645 | create subcomponents | """Base module ofr parsing extensions."""
import os
import os.path as op
from pyrevit.coreutils import get_all_subclasses
from pyrevit.coreutils.logger import get_logger
#pylint: disable=W0703,C0302,C0103
mlogger = get_logger(__name__)
def _get_discovered_comps(comp_path, cmp_types_list):
discovered_cmps = []
mlogger.debug('Testing _get_component(s) on: %s ', comp_path)
# comp_path might be a file or a dir,
# but its name should not start with . or _:
for cmp_type in cmp_types_list:
mlogger.debug('Testing sub_directory %s for %s', comp_path, cmp_type)
# if cmp_class can be created for this sub-dir, the add to list
if cmp_type.matches(comp_path):
component = cmp_type(cmp_path=comp_path)
discovered_cmps.append(component)
mlogger.debug('Successfuly created component: %s from: %s',
component, comp_path)
return discovered_cmps
def METHOD_NAME(search_dir,
cmp_types_list,
create_from_search_dir=False):
"""
Parses the provided directory and returns a list of objects of the
types in cmp_types_list.
Arguments:
search_dir: directory to parse
cmp_types_list: This methods checks the subfolders in search_dir
against the _get_component types provided
in this list.
Example:
_create_subcomponents(search_dir,
[LinkButton, PushButton, or ToggleButton])
this method creates LinkButton, PushButton, or ToggleButton for
the parsed sub-directories under search_dir with matching .type_id
identifiers in their names. (e.g. "folder.LINK_BUTTON_POSTFIX")
Returns:
list of created classes of types provided in cmp_types_list
"""
sub_cmp_list = []
if not create_from_search_dir:
mlogger.debug('Searching directory: %s for components of type: %s',
search_dir, cmp_types_list)
for file_or_dir in os.listdir(search_dir):
full_path = op.join(search_dir, file_or_dir)
if not file_or_dir.startswith(('.', '_')):
sub_cmp_list.extend(_get_discovered_comps(full_path,
cmp_types_list))
else:
mlogger.debug('Skipping _get_component. '
'Name can not start with . or _: %s', full_path)
else:
sub_cmp_list.extend(_get_discovered_comps(search_dir,
cmp_types_list))
return sub_cmp_list
def _get_subcomponents_classes(parent_classes):
"""Find available subcomponents for given parent types."""
return [x for x in get_all_subclasses(parent_classes) if x.type_id]
def _parse_for_components(component):
"""Recursively parses _get_component.directory for components of type
_get_component.allowed_sub_cmps. This method uses get_all_subclasses() to
get a list of all subclasses of _get_component.allowed_sub_cmps type.
This ensures that if any new type of component_class is added later,
this method does not need to be updated as the new sub-class will be
listed by .__subclasses__() method of the parent class and this method
will check the directory for its .type_id
"""
for new_cmp in METHOD_NAME(
component.directory,
_get_subcomponents_classes(component.allowed_sub_cmps)):
# add the successfulyl created _get_component to the
# parent _get_component
component.add_component(new_cmp)
if new_cmp.is_container:
# Recursive part: parse each sub-_get_component
# for its allowed sub-sub-components.
_parse_for_components(new_cmp)
def parse_comp_dir(comp_path, comp_class):
return METHOD_NAME(
comp_path,
_get_subcomponents_classes([comp_class]),
create_from_search_dir=True
)
def get_parsed_extension(extension):
"""
Parses package directory and creates and adds components to the package
object. Each package object is the root to a tree of components that exists
under that package. (e.g. tabs, buttons, ...) sub components of package
can be accessed by iterating the _get_component.
See _basecomponents for types.
"""
_parse_for_components(extension)
return extension
def parse_dir_for_ext_type(root_dir, parent_cmp_type):
"""
Parses root_dir and return a list of objects of type parent_cmp_type
for installed extensions. The package objects won't be parsed at this level.
This is useful for collecting basic info on an extension type
for cache cheching or updating extensions using their directory paths.
"""
# making sure the provided directory exists.
# This is mainly for the user defined package directories
if not op.exists(root_dir):
mlogger.debug('Extension search directory does not exist: %s', root_dir)
return []
# try creating extensions in given directory
ext_data_list = []
mlogger.debug('Parsing directory for extensions of type: %s',
parent_cmp_type)
for ext_data in METHOD_NAME(root_dir, [parent_cmp_type]):
mlogger.debug('Extension directory found: %s', ext_data)
ext_data_list.append(ext_data)
return ext_data_list |
299,646 | on detected language | #########################################
# AzureTranslator.py
# description: language translation service
# more info @: http://myrobotlab.org/service/AzureTranslator
#########################################
# you will need a azure translator setup
# this will guid you through the process
# https://docs.microsoft.com/en-us/azure/cognitive-services/translator/quickstart-translator?tabs=csharp
# we will connect the following services together
python = runtime.start("python", "Python")
brain = runtime.start("brain", "ProgramAB")
out_translator = runtime.start("out_translator", "AzureTranslator")
mouth = runtime.start("mouth", "MarySpeech")
# lets set blocking on the speech
mouth.setBlocking(False)
mouth.setLanguage("en")
# load your key here - only need to do it once
# then remove this line completely to keep it secure
# out_translator.setKey("xxxxxxxxxxxxxxxxxxxxxxxx")
out_translator.setLocation("eastus")
out_translator.setFrom("en")
out_translator.setTo("en")
# attach the mouth to the out_translator
mouth.attach(out_translator)
# set the mouth to an appropriate language or voice
# mouth.setVoice('Pierre')
def simple_translate(lang, text):
print('simple_translate ' + lang + ' ' + text)
out_translator.setTo(lang)
# switching voice for mary speech can take a very long time :(
mouth.setLanguage(lang)
voice_name = mouth.getVoice().name
translated = out_translator.translate('now in ' + mouth.getVoice().getLocale().getDisplayLanguage() + ', my name is ' + voice_name + ', ' + text)
print(voice_name + ' translated to ' + translated)
sleep(1)
text = "Hello ! let's make some robots today !"
# mouth.speak('i will translate ' + text)
simple_translate('en', text)
simple_translate('fr', text)
simple_translate('it', text)
simple_translate('de', text)
# lets connect the out_translator to the brain
# the brain will listen to keyboard input and when
# it publishes a response, the response will be sent to the
# out_translator
brain.attachTextListener(out_translator)
brain.startSession('GroG','Alice')
# we'll set our mouth and out_translator to french
out_translator.setTo("fr")
mouth.setLanguage("fr")
# setup a callback that gets the translated response
def on_translated(text):
print('translated response is ' + text)
python.subscribe('out_translator', 'publishText', 'python', 'on_translated')
# now we can talk to the brain in english and it will respond in french
english_response = brain.getResponse("hello, how are you?")
print('non translated response is ' + str(english_response))
english_response = brain.getResponse("what can you do?")
print('non translated response is ' + str(english_response))
english_response = brain.getResponse("what time is it?")
print('non translated response is ' + str(english_response))
# create a new translator for incoming text
# we will detect language and translate to english
in_translator = runtime.start("in_translator", "AzureTranslator")
in_translator.setDetect(True)
in_translator.setTo("en")
# attach the incoming translator to the brain
brain.attachTextPublisher(in_translator)
# subscribe to language detection
python.subscribe('in_translator', 'publishDetectedLanguage')
# Dynamically switching languages based on detected input
# when a language is detected we automatically
# switch our voice and translate "to" setting
# so if the bot is asked in french a question - it
# should reply in french, if asked in italian it
# will reply in italian, but all languages are
# using the same english aiml
def METHOD_NAME(lang):
# detect incoming language and
# set appropriate response voice
print('setting mouth voice to ' + lang)
mouth.setLanguage(lang)
print('setting out_translator to ' + lang)
out_translator.setTo(lang)
# now that we have an incoming translator detecting
english_response = in_translator.translate("Où habitez-vous?")
print('in translated response is ' + str(english_response))
sleep(5)
# now that we have an incoming translator detecting
english_response = in_translator.translate("cosa sai fare?")
print('in translated response is ' + str(english_response))
sleep(5)
# now that we have an incoming translator detecting
english_response = in_translator.translate("what can you do?")
print('in translated response is ' + str(english_response))
|
299,647 | performance get stats | import os
from dataclasses import dataclass
from signal import Signals
from typing import Final
import psutil
from flask import abort, render_template, request
from files.helpers.config.const import PERMS
from files.helpers.time import format_datetime
from files.helpers.wrappers import admin_level_required
from files.__main__ import app
PROCESS_NAME: Final[str] = "gunicorn"
'''
The name of the master and worker processes
'''
INIT_PID: Final[int] = 1
'''
The PID of the init process. Used to check an edge case for orphaned workers.
'''
MEMORY_RSS_WARN_LEVELS_MASTER: dict[int, str] = {
0: '',
50 * 1024 * 1024: 'text-warn',
75 * 1024 * 1024: 'text-danger',
}
'''
Levels to warn for in RAM memory usage for the master process. The master
process shouldn't be using much RAM at all since all it basically does is
orchestrate workers.
'''
MEMORY_RSS_WARN_LEVELS_WORKER: dict[int, str] = {
0: '',
200 * 1024 * 1024: 'text-warn',
300 * 1024 * 1024: 'text-danger',
}
'''
Levels to warn for in RAM memory usage. There are no warning levels for VM
usage because Python seems to intentionally overallocate (presumably to make
the interpreter faster) and doesn't tend to touch many of its owned pages.
'''
@dataclass(frozen=True, slots=True)
class RenderedPerfInfo:
pid:int
started_at_utc:float
memory_rss:int
memory_vms:int
@classmethod
def from_process(cls, p:psutil.Process) -> "RenderedPerfInfo":
with p.oneshot():
mem = p.memory_info()
return cls(pid=p.pid, started_at_utc=p.create_time(),
memory_rss=mem.rss, memory_vms=mem.vms)
@property
def is_master(self) -> bool:
return self.pid == os.getppid() and self.pid != INIT_PID
@property
def is_current(self) -> bool:
return self.pid == os.getpid()
@property
def memory_rss_css_class(self) -> str:
last = ''
levels: dict[int, str] = MEMORY_RSS_WARN_LEVELS_MASTER \
if self.is_master else MEMORY_RSS_WARN_LEVELS_WORKER
for mem, css in levels.items():
if self.memory_rss < mem: return last
last = css
return last
@property
def started_at_utc_str(self) -> str:
return format_datetime(self.started_at_utc)
@app.get('/performance/')
@admin_level_required(PERMS['PERFORMANCE_STATS'])
def METHOD_NAME(v):
system_vm = psutil.virtual_memory()
processes = {p.pid:RenderedPerfInfo.from_process(p)
for p in psutil.process_iter()
if p.name() == PROCESS_NAME}
return render_template('admin/performance/memory.html', v=v, processes=processes, system_vm=system_vm)
def _signal_master_process(signal:int) -> None:
ppid:int = os.getppid()
if ppid == INIT_PID: # shouldn't happen but handle the orphaned worker case just in case
abort(500, "This worker is an orphan!")
os.kill(ppid, signal)
def _signal_worker_process(pid:int, signal:int) -> None:
workers:set[int] = {p.pid
for p in psutil.process_iter()
if p.name() == PROCESS_NAME}
workers.discard(os.getppid()) # don't allow killing the master process
if not pid in workers:
abort(404, "Worker process not found")
os.kill(pid, signal)
@app.post('/performance/workers/reload')
@admin_level_required(PERMS['PERFORMANCE_RELOAD'])
def performance_reload_workers(v):
_signal_master_process(Signals.SIGHUP)
return {'message': 'Sent reload signal successfully'}
@app.post('/performance/workers/<int:pid>/terminate')
@admin_level_required(PERMS['PERFORMANCE_KILL_PROCESS'])
def performance_terminate_worker_process(v, pid:int):
_signal_worker_process(pid, Signals.SIGTERM)
return {"message": f"Gracefully shut down worker PID {pid} successfully"}
@app.post('/performance/workers/<int:pid>/kill')
@admin_level_required(PERMS['PERFORMANCE_KILL_PROCESS'])
def performance_kill_worker_process(v, pid:int):
_signal_worker_process(pid, Signals.SIGKILL)
return {"message": f"Killed worker with PID {pid} successfully"}
@app.post('/performance/workers/+1')
@app.post('/performance/workers/-1')
@admin_level_required(PERMS['PERFORMANCE_SCALE_UP_DOWN'])
def performance_scale_up_down(v):
scale_up:bool = '+1' in request.url
_signal_master_process(Signals.SIGTTIN if scale_up else Signals.SIGTTOU)
return {"message": "Sent signal to master to scale " + ("up" if scale_up else "down")} |
299,648 | open |
"""
Embedded browser windows with a minimal frame. This will use the native web
browser library on Mac (WebKit) and Windows (IE); on Linux it will default to
the simpler wx.html.HtmlWindow. This is used in the Phenix GUI to display
documentation.
"""
from __future__ import absolute_import, division, print_function
import wxtbx.bitmaps
import wx.html
import wx
class browser_frame(wx.Frame):
def __init__(self, *args, **kwds):
wx.Frame.__init__(self, *args, **kwds)
szr = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(szr)
self._was_shown = False
self._is_default_viewer = False
self.viewer = None
_import_error = None
if (wx.Platform == '__WXMAC__'):
try :
from wx import webkit
except ImportError as e :
_import_error = str(e)
else :
self.viewer = webkit.WebKitCtrl(self, -1)
self.Bind(webkit.EVT_WEBKIT_STATE_CHANGED, self.OnChangeState,
self.viewer)
self.Bind(webkit.EVT_WEBKIT_BEFORE_LOAD, self.OnBeforeLoad, self.viewer)
elif (wx.Platform == '__WXMSW__'):
try :
from wx.html2 import WebView
except ImportError as e :
_import_error = str(e)
else :
self.viewer = WebView.New(self)
if (self.viewer is None) : # fallback (default on Linux)
self.viewer = HtmlPanel(self)
self._is_default_viewer = True
szr.Add(self.viewer, 1, wx.EXPAND)
self.SetupToolbar()
self.statusbar = self.CreateStatusBar()
if (wx.Platform != "__WXMSW__"):
self.SetInitialSize((1024,640))
#self.Bind(wx.EVT_WINDOW_CREATE, self.OnShow)
# create keyboard shortcuts for zoom functions in toolbar
if (wx.Platform == '__WXMAC__'):
zoomInId = wx.NewId()
zoomOutId = wx.NewId()
zoomDefaultId = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnZoomIn, id=zoomInId)
self.Bind(wx.EVT_MENU, self.OnZoomOut, id=zoomOutId)
self.Bind(wx.EVT_MENU, self.OnZoomDefault, id=zoomDefaultId)
self.accelerator_table = wx.AcceleratorTable(
[ (wx.ACCEL_CTRL, ord('='), zoomInId),
(wx.ACCEL_CTRL, wx.WXK_NUMPAD_ADD, zoomInId),
(wx.ACCEL_CTRL, ord('-'), zoomOutId),
(wx.ACCEL_CTRL, wx.WXK_NUMPAD_SUBTRACT, zoomOutId),
(wx.ACCEL_CTRL, ord('0'), zoomDefaultId),
(wx.ACCEL_CTRL, wx.WXK_NUMPAD0, zoomDefaultId) ])
self.SetAcceleratorTable(self.accelerator_table)
self.zoom_counter = 0
def SetHomepage(self, url):
self.home_url = url
def SetupToolbar(self):
if (wxtbx.bitmaps.icon_lib is None):
return
self.toolbar = self.CreateToolBar(style=wx.TB_TEXT)
commands = [
("filesystems", "folder_home", "OnHome", "Home"),
("actions", "back", "OnBack", "Back"),
("actions", "forward", "OnForward", "Forward"),
("actions", "stop", "OnStop", "Stop"),
]
if (not self._is_default_viewer):
commands.append(("actions", "reload", "OnReload", "Reload"))
for (icon_class, icon_name, fname, label) in commands :
bmp = wxtbx.bitmaps.fetch_icon_bitmap(icon_class, icon_name, 32)
tool_button = self.toolbar.AddLabelTool(-1, label, bmp,
shortHelp=label, kind=wx.ITEM_NORMAL)
self.Bind(wx.EVT_MENU, getattr(self, fname), tool_button)
self.toolbar.AddSeparator()
# buttons for increasing/decreasing text size
if (wx.Platform == '__WXMAC__'):
commands = [
('actions', 'zoom-in-2', 'Zoom In', 'Increase text size', 'OnZoomIn'),
('actions', 'zoom-out-2', 'Zoom Out', 'Decrease text size', 'OnZoomOut'),
('actions','zoom-fit-best','Default','Default text size','OnZoomDefault')
]
for (icon_class, icon_name, label, shortHelp, fname) in commands:
bmp = wxtbx.bitmaps.fetch_icon_bitmap(icon_class, icon_name, 128)
tool_button = self.toolbar.AddLabelTool(
-1, label, bmp, shortHelp=shortHelp, kind=wx.ITEM_NORMAL)
self.Bind(wx.EVT_MENU, getattr(self, fname), tool_button)
self.toolbar.AddSeparator()
if (not self._is_default_viewer):
phenix_bmp = wxtbx.bitmaps.fetch_custom_icon_bitmap("phenix.refine")
phenix_btn = self.toolbar.AddLabelTool(-1, "PHENIX homepage", phenix_bmp,
shortHelp="PHENIX homepage", kind=wx.ITEM_NORMAL)
self.Bind(wx.EVT_MENU, self.OnPhenixWeb, phenix_btn)
self.toolbar.Realize()
def LoadURL(self, url):
self.viewer.LoadURL(url)
def OnShow(self, event):
if (not self._was_shown):
self.LoadURL(self.home_url)
self._was_shown = True
def OnHome(self, event):
self.LoadURL(self.home_url)
def OnBack(self, event):
if self.viewer.CanGoBack():
self.viewer.GoBack()
def OnForward(self, event):
if self.viewer.CanGoForward():
self.viewer.GoForward()
def OnReload(self, event):
if (wx.Platform == '__WXMAC__'):
self.viewer.Reload()
elif (wx.Platform == '__WXMSW__'):
self.viewer.RefreshPage()
def OnStop(self, event):
self.viewer.Stop()
def OnZoomIn(self, event=None):
if (self.viewer.CanIncreaseTextSize()):
self.viewer.IncreaseTextSize()
self.zoom_counter += 1
def OnZoomOut(self, event=None):
if (self.viewer.CanDecreaseTextSize()):
self.viewer.DecreaseTextSize()
self.zoom_counter -= 1
def OnZoomDefault(self, event):
while (self.zoom_counter != 0):
if (self.zoom_counter > 0):
self.OnZoomOut()
else:
self.OnZoomIn()
def METHOD_NAME(self, url):
self.LoadURL(url)
def OnPhenixWeb(self, event):
self.LoadURL("http://www.phenix-online.org")
# XXX Mac only
def OnChangeState(self, event):
import wx.webkit
state = event.GetState()
url = event.GetURL()
if (state == wx.webkit.WEBKIT_STATE_START):
self.statusbar.SetStatusText("Opening %s" % url)
elif (state == wx.webkit.WEBKIT_STATE_TRANSFERRING):
self.statusbar.SetStatusText("Loading %s" % url)
elif (state == wx.webkit.WEBKIT_STATE_STOP):
self.statusbar.SetStatusText("Viewing %s" % url)
elif (state == wx.webkit.WEBKIT_STATE_FAILED):
self.statusbar.SetStatusText("Failed loading %s" % url)
else :
self.statusbar.SetStatusText("")
def OnBeforeLoad(self, event):
pass
#print event.GetNavigationType()
class HtmlPanel(wx.html.HtmlWindow):
"""
Adapter class to provide an API equivalent to WebKit/IE
"""
def Stop(self):
pass
def CanGoForward(self):
return self.HistoryCanForward()
def GoForward(self):
return self.HistoryForward()
def CanGoBack(self):
return self.HistoryCanBack()
def GoBack(self):
return self.HistoryBack()
def LoadURL(self, url):
fields = url.split("#")
base_url = fields[0]
self.LoadPage(base_url)
if (len(fields) > 1):
anchor = fields[1]
if (self.HasAnchor(anchor)):
# XXX calling self.ScrollToAnchor() directly doesn't work!
wx.CallAfter(self.ScrollToAnchor, anchor)
else :
print("Missing anchor %s" % anchor)
if __name__ == "__main__" :
app = wx.App(0)
frame = browser_frame(None, -1, "wxtbx.browser example")
# size=(800,600))
frame.SetHomepage("http://cci.lbl.gov")
frame.Show()
app.MainLoop() |
299,649 | sort listening history results by play count | from typing import Optional, TypedDict
from sqlalchemy import asc, desc, func, or_
from sqlalchemy.orm.session import Session
from sqlalchemy.sql.functions import coalesce
from src.models.social.aggregate_plays import AggregatePlay
from src.models.tracks.aggregate_track import AggregateTrack
from src.models.tracks.track_with_aggregates import TrackWithAggregates
from src.models.users.user import User
from src.models.users.user_listening_history import UserListeningHistory
from src.queries import response_name_constants
from src.queries.query_helpers import (
SortDirection,
SortMethod,
add_query_pagination,
add_users_to_tracks,
populate_track_metadata,
)
from src.utils import helpers
from src.utils.db_session import get_db_read_replica
class GetUserListeningHistoryArgs(TypedDict):
# The current user logged in (from route param)
user_id: int
# The current user logged in (from query arg)
current_user_id: int
# The maximum number of listens to return
limit: int
# The offset for the listen history
offset: int
# Optional filter for the returned results
query: Optional[str]
# Optional sort method for the returned results
sort_method: Optional[SortMethod]
sort_direction: Optional[SortDirection]
def get_user_listening_history(args: GetUserListeningHistoryArgs):
"""
Returns a user's listening history
Args:
args: GetUserListeningHistoryArgs The parsed args from the request
Returns:
Array of tracks the user listened to starting from most recently listened
"""
db = get_db_read_replica()
with db.scoped_session() as session:
return _get_user_listening_history(session, args)
def _get_user_listening_history(session: Session, args: GetUserListeningHistoryArgs):
user_id = args["user_id"]
current_user_id = args["current_user_id"]
limit = args["limit"]
offset = args["offset"]
query = args["query"]
sort_method = args["sort_method"]
sort_direction = args["sort_direction"]
sort_fn = desc if sort_direction == SortDirection.desc else asc
if user_id != current_user_id:
return []
listening_history_results = (
session.query(UserListeningHistory.listening_history).filter(
UserListeningHistory.user_id == current_user_id
)
).scalar()
if not listening_history_results:
return []
# order listening history entries by their user's play counts so our track ids will be
# correct order when querying for track ids
if sort_method == SortMethod.most_listens_by_user:
listening_history_results = METHOD_NAME(
listening_history_results
)
# Map out all track ids and listen dates
track_ids = []
listen_dates = {}
for listen in listening_history_results:
track_ids.append(listen["track_id"])
listen_dates[listen["track_id"]] = listen["timestamp"]
base_query = (
session.query(TrackWithAggregates)
.filter(TrackWithAggregates.track_id.in_(track_ids))
.filter(TrackWithAggregates.is_current == True)
)
if query is not None:
base_query = base_query.join(TrackWithAggregates.user).filter(
or_(
TrackWithAggregates.title.ilike(f"%{query.lower()}%"),
User.name.ilike(f"%{query.lower()}%"),
)
)
base_query = sort_by_sort_method(sort_method, sort_fn, track_ids, base_query)
# Add pagination
base_query = add_query_pagination(base_query, limit, offset)
query_results = base_query.all()
track_ids = track_ids[offset : offset + limit]
tracks = helpers.query_result_to_list(query_results)
# bundle peripheral info into track results
tracks = populate_track_metadata(
session, track_ids, tracks, current_user_id, track_has_aggregates=True
)
tracks = add_users_to_tracks(session, tracks, current_user_id)
for track in tracks:
track[response_name_constants.activity_timestamp] = listen_dates[
track[response_name_constants.track_id]
]
return tracks
def METHOD_NAME(listening_history_results):
listening_histories_by_plays = [
listening_history for listening_history in listening_history_results
]
listening_histories_by_plays.sort(
key=lambda listen: listen.get("play_count", 1), reverse=True
)
listening_history_results = listening_histories_by_plays
return listening_history_results
def sort_by_sort_method(sort_method, sort_fn, track_ids, base_query):
if sort_method == SortMethod.title:
return base_query.order_by(sort_fn(TrackWithAggregates.title))
elif sort_method == SortMethod.artist_name:
return base_query.join(TrackWithAggregates.user, aliased=True).order_by(
sort_fn(User.name)
)
elif sort_method == SortMethod.release_date:
return base_query.order_by(
sort_fn(
coalesce(
func.to_date_safe(
TrackWithAggregates.release_date, "Dy Mon DD YYYY HH24:MI:SS"
),
TrackWithAggregates.created_at,
)
)
)
elif sort_method == SortMethod.last_listen_date:
return base_query.order_by(
sort_fn(func.array_position(track_ids, TrackWithAggregates.track_id))
)
elif sort_method == SortMethod.plays:
return base_query.join(TrackWithAggregates.aggregate_play).order_by(
sort_fn(AggregatePlay.count)
)
elif sort_method == SortMethod.reposts:
return base_query.join(TrackWithAggregates.aggregate_track).order_by(
sort_fn(AggregateTrack.repost_count)
)
elif sort_method == SortMethod.saves:
return base_query.join(TrackWithAggregates.aggregate_track).order_by(
sort_fn(AggregateTrack.save_count)
)
elif sort_method == SortMethod.most_listens_by_user:
return base_query.order_by(
(func.array_position(track_ids, TrackWithAggregates.track_id))
)
else:
return base_query.order_by(
sort_fn(func.array_position(track_ids, TrackWithAggregates.track_id))
) |
299,650 | test merge multiple filetypes | # Copyright (C) 2016 YouCompleteMe contributors
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from ycm.tests.test_utils import MockVimModule
MockVimModule()
from hamcrest import assert_that, equal_to
from unittest import TestCase
from ycm.diagnostic_filter import DiagnosticFilter
def _assert_accept_equals( filter, text_or_obj, expected ):
if not isinstance( text_or_obj, dict ):
text_or_obj = { 'text': text_or_obj }
assert_that( filter.IsAllowed( text_or_obj ), equal_to( expected ) )
def _assert_accepts( filter, text ):
_assert_accept_equals( filter, text, True )
def _assert_rejects( filter, text ):
_assert_accept_equals( filter, text, False )
def _JavaFilter( config ):
return { 'filter_diagnostics' : { 'java': config } }
def _CreateFilterForTypes( opts, types ):
return DiagnosticFilter.CreateFromOptions( opts ).SubsetForTypes( types )
class DiagnosticFilterTest( TestCase ):
def test_RegexFilter( self ):
opts = _JavaFilter( { 'regex' : 'taco' } )
f = _CreateFilterForTypes( opts, [ 'java' ] )
_assert_rejects( f, 'This is a Taco' )
_assert_accepts( f, 'This is a Burrito' )
def test_RegexSingleList( self ):
opts = _JavaFilter( { 'regex' : [ 'taco' ] } )
f = _CreateFilterForTypes( opts, [ 'java' ] )
_assert_rejects( f, 'This is a Taco' )
_assert_accepts( f, 'This is a Burrito' )
def test_RegexMultiList( self ):
opts = _JavaFilter( { 'regex' : [ 'taco', 'burrito' ] } )
f = _CreateFilterForTypes( opts, [ 'java' ] )
_assert_rejects( f, 'This is a Taco' )
_assert_rejects( f, 'This is a Burrito' )
def test_RegexNotFiltered( self ):
opts = _JavaFilter( { 'regex' : 'taco' } )
f = _CreateFilterForTypes( opts, [ 'cs' ] )
_assert_accepts( f, 'This is a Taco' )
_assert_accepts( f, 'This is a Burrito' )
def test_LevelWarnings( self ):
opts = _JavaFilter( { 'level' : 'warning' } )
f = _CreateFilterForTypes( opts, [ 'java' ] )
_assert_rejects( f, { 'text' : 'This is an unimportant taco',
'kind' : 'WARNING' } )
_assert_accepts( f, { 'text' : 'This taco will be shown',
'kind' : 'ERROR' } )
def test_LevelErrors( self ):
opts = _JavaFilter( { 'level' : 'error' } )
f = _CreateFilterForTypes( opts, [ 'java' ] )
_assert_accepts( f, { 'text' : 'This is an IMPORTANT taco',
'kind' : 'WARNING' } )
_assert_rejects( f, { 'text' : 'This taco will NOT be shown',
'kind' : 'ERROR' } )
def test_MultipleFilterTypesTypeTest( self ):
opts = _JavaFilter( { 'regex' : '.*taco.*',
'level' : 'warning' } )
f = _CreateFilterForTypes( opts, [ 'java' ] )
_assert_rejects( f, { 'text' : 'This is an unimportant taco',
'kind' : 'WARNING' } )
_assert_rejects( f, { 'text' : 'This taco will NOT be shown',
'kind' : 'ERROR' } )
_assert_accepts( f, { 'text' : 'This burrito WILL be shown',
'kind' : 'ERROR' } )
def METHOD_NAME( self ):
opts = { 'filter_diagnostics' : {
'java' : { 'regex' : '.*taco.*' },
'xml' : { 'regex' : '.*burrito.*' } } }
f = _CreateFilterForTypes( opts, [ 'java', 'xml' ] )
_assert_rejects( f, 'This is a Taco' )
_assert_rejects( f, 'This is a Burrito' )
_assert_accepts( f, 'This is some Nachos' )
def test_CommaSeparatedFiletypes( self ):
opts = { 'filter_diagnostics' : {
'java,c,cs' : { 'regex' : '.*taco.*' } } }
f = _CreateFilterForTypes( opts, [ 'cs' ] )
_assert_rejects( f, 'This is a Taco' )
_assert_accepts( f, 'This is a Burrito' ) |
299,651 | validate assignment | import contextlib
import re
from vyper.exceptions import (
CompilerPanic,
NamespaceCollision,
StructureException,
UndeclaredDefinition,
)
from vyper.semantics.analysis.levenshtein_utils import get_levenshtein_error_suggestions
class Namespace(dict):
"""
Dictionary subclass that represents the namespace of a contract.
Attributes
----------
_scopes : List[Set]
List of sets containing the key names for each scope
"""
def __new__(cls, *args, **kwargs):
self = super().__new__(cls, *args, **kwargs)
self._scopes = []
return self
def __init__(self):
super().__init__()
# NOTE cyclic imports!
# TODO: break this cycle by providing an `init_vyper_namespace` in 3rd module
from vyper.builtins.functions import get_builtin_functions
from vyper.semantics import environment
from vyper.semantics.analysis.base import VarInfo
from vyper.semantics.types import PRIMITIVE_TYPES
self.update(PRIMITIVE_TYPES)
self.update(environment.get_constant_vars())
self.update({k: VarInfo(b) for (k, b) in get_builtin_functions().items()})
def __eq__(self, other):
return self is other
def __setitem__(self, attr, obj):
if self._scopes:
self.METHOD_NAME(attr)
self._scopes[-1].add(attr)
assert isinstance(attr, str), f"not a string: {attr}"
super().__setitem__(attr, obj)
def __getitem__(self, key):
if key not in self:
suggestions_str = get_levenshtein_error_suggestions(key, self, 0.2)
raise UndeclaredDefinition(f"'{key}' has not been declared. {suggestions_str}")
return super().__getitem__(key)
def __enter__(self):
if not self._scopes:
raise CompilerPanic("Context manager must be invoked via namespace.enter_scope()")
def __exit__(self, exc_type, exc_value, traceback):
if not self._scopes:
raise CompilerPanic("Bad use of namespace as a context manager")
for key in self._scopes.pop():
del self[key]
def enter_scope(self):
"""
Enter a new scope within the namespace.
Called as a context manager, e.g. `with namespace.enter_scope():`
All items that are added within the context are removed upon exit.
"""
# NOTE cyclic imports!
from vyper.semantics import environment
self._scopes.append(set())
if len(self._scopes) == 1:
# add mutable vars (`self`) to the initial scope
self.update(environment.get_mutable_vars())
return self
def update(self, other):
for key, value in other.items():
self.__setitem__(key, value)
def clear(self):
super().clear()
self.__init__()
def METHOD_NAME(self, attr):
validate_identifier(attr)
if attr in self:
obj = super().__getitem__(attr)
raise NamespaceCollision(f"'{attr}' has already been declared as a {obj}")
def get_namespace():
"""
Get the active namespace object.
"""
global _namespace
try:
return _namespace
except NameError:
_namespace = Namespace()
return _namespace
@contextlib.contextmanager
def override_global_namespace(ns):
global _namespace
tmp = _namespace
try:
# clobber global namespace
_namespace = ns
yield
finally:
# unclobber
_namespace = tmp
def validate_identifier(attr):
if not re.match("^[_a-zA-Z][a-zA-Z0-9_]*$", attr):
raise StructureException(f"'{attr}' contains invalid character(s)")
if attr.lower() in RESERVED_KEYWORDS:
raise StructureException(f"'{attr}' is a reserved keyword")
# https://docs.python.org/3/reference/lexical_analysis.html#keywords
# note we don't technically need to block all python reserved keywords,
# but do it for hygiene
_PYTHON_RESERVED_KEYWORDS = {
"False",
"None",
"True",
"and",
"as",
"assert",
"async",
"await",
"break",
"class",
"continue",
"def",
"del",
"elif",
"else",
"except",
"finally",
"for",
"from",
"global",
"if",
"import",
"in",
"is",
"lambda",
"nonlocal",
"not",
"or",
"pass",
"raise",
"return",
"try",
"while",
"with",
"yield",
}
_PYTHON_RESERVED_KEYWORDS = {s.lower() for s in _PYTHON_RESERVED_KEYWORDS}
# Cannot be used for variable or member naming
RESERVED_KEYWORDS = _PYTHON_RESERVED_KEYWORDS | {
# decorators
"public",
"external",
"nonpayable",
"constant",
"immutable",
"transient",
"internal",
"payable",
"nonreentrant",
# "class" keywords
"interface",
"struct",
"event",
"enum",
# EVM operations
"unreachable",
# special functions (no name mangling)
"init",
"_init_",
"___init___",
"____init____",
"default",
"_default_",
"___default___",
"____default____",
# more control flow and special operations
"range",
# more special operations
"indexed",
# denominations
"ether",
"wei",
"finney",
"szabo",
"shannon",
"lovelace",
"ada",
"babbage",
"gwei",
"kwei",
"mwei",
"twei",
"pwei",
# sentinal constant values
# TODO remove when these are removed from the language
"zero_address",
"empty_bytes32",
"max_int128",
"min_int128",
"max_decimal",
"min_decimal",
"max_uint256",
"zero_wei",
} |
299,652 | test executes with | # Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_federated.python.learning.optimizers import adagrad
from tensorflow_federated.python.learning.optimizers import optimizer as optimizer_base
from tensorflow_federated.python.learning.optimizers import optimizer_test_utils
_SCALAR_SPEC = tf.TensorSpec([1], tf.float32)
_STRUCT_SPEC = [tf.TensorSpec([2], tf.float32), tf.TensorSpec([3], tf.float32)]
_NESTED_SPEC = [
tf.TensorSpec([10], tf.float32),
[tf.TensorSpec([20], tf.float32), [tf.TensorSpec([30], tf.float32)]],
]
class AdagradTest(optimizer_test_utils.TestCase, parameterized.TestCase):
def test_state_structure(self):
optimizer = adagrad.build_adagrad(0.01)
state = optimizer.initialize(_SCALAR_SPEC)
self.assertLen(state, 3)
self.assertIn(optimizer_base.LEARNING_RATE_KEY, state)
self.assertIn(adagrad._EPSILON_KEY, state)
self.assertIn(adagrad._PRECONDITIONER_KEY, state)
def test_math_no_momentum(self):
weights = tf.constant([1.0], tf.float32)
gradients = tf.constant([2.0], tf.float32)
optimizer = adagrad.build_adagrad(
learning_rate=0.01, initial_preconditioner_value=0.0, epsilon=0.0
)
history = [weights]
state = optimizer.initialize(_SCALAR_SPEC)
for _ in range(4):
state, weights = optimizer.next(state, weights, gradients)
history.append(weights)
self.assertAllClose(
[
[1.0], # w0
[0.99], # w1 = w0 - 0.01 * 2.0 / sqrt(4)
[0.9829289], # w2 = w1 - 0.01 * 2.0 / sqrt(8)
[0.9771554], # w3 = w2 - 0.01 * 2.0 / sqrt(12)
[0.9721554], # w4 = w3 - 0.01 * 2.0 / sqrt(16)
],
history,
)
@parameterized.named_parameters(
('scalar_spec', _SCALAR_SPEC),
('struct_spec', _STRUCT_SPEC),
('nested_spec', _NESTED_SPEC),
)
def METHOD_NAME(self, spec):
weights = tf.nest.map_structure(lambda s: tf.ones(s.shape, s.dtype), spec)
gradients = tf.nest.map_structure(lambda s: tf.ones(s.shape, s.dtype), spec)
optimizer = adagrad.build_adagrad(0.01)
state = optimizer.initialize(spec)
for _ in range(10):
state, weights = optimizer.next(state, weights, gradients)
tf.nest.map_structure(
lambda w: self.assertTrue(all(tf.math.is_finite(w))), weights
)
def test_executes_with_indexed_slices(self):
# TF can represent gradients as tf.IndexedSlices. This test makes sure this
# case is supported by the optimizer.
weights = tf.ones([4, 2])
gradients = tf.IndexedSlices(
values=tf.constant([[1.0, 1.0], [1.0, 1.0]]),
indices=tf.constant([0, 2]),
dense_shape=tf.constant([4, 2]),
)
optimizer = adagrad.build_adagrad(0.5, initial_preconditioner_value=0.0)
state = optimizer.initialize(tf.TensorSpec([4, 2]))
_, weights = optimizer.next(state, weights, gradients)
self.assertAllClose(
[[0.5, 0.5], [1.0, 1.0], [0.5, 0.5], [1.0, 1.0]], weights
)
def test_convergence(self):
init_w, fn, grad_fn = optimizer_test_utils.test_quadratic_problem()
weights = init_w()
self.assertGreater(fn(weights), 5.0)
optimizer = adagrad.build_adagrad(0.5)
state = optimizer.initialize(tf.TensorSpec(weights.shape, weights.dtype))
for _ in range(100):
gradients = grad_fn(weights)
state, weights = optimizer.next(state, weights, gradients)
self.assertLess(fn(weights), 0.005)
def test_build_adagrad(self):
optimizer = adagrad.build_adagrad(0.01)
self.assertIsInstance(optimizer, optimizer_base.Optimizer)
def test_match_keras(self):
weight_spec = [
tf.TensorSpec([10, 2], tf.float32),
tf.TensorSpec([2], tf.float32),
]
steps = 10
genarator = tf.random.Generator.from_seed(2021)
def random_vector():
return [
genarator.normal(shape=s.shape, dtype=s.dtype) for s in weight_spec
]
intial_weight = random_vector()
model_variables_fn = lambda: [tf.Variable(v) for v in intial_weight]
gradients = [random_vector() for _ in range(steps)]
tff_optimizer_fn = lambda: adagrad.build_adagrad(0.01)
keras_optimizer_fn = lambda: tf.keras.optimizers.Adagrad(0.01)
self.assert_optimizers_numerically_close(
model_variables_fn, gradients, tff_optimizer_fn, keras_optimizer_fn
)
@parameterized.named_parameters(
('negative_lr', -1.0, 0.1, 1e-7, 'learning_rate'),
(
'negative_preconditioner',
1.0,
-0.1,
1e-7,
'initial_preconditioner_value',
),
('negative_epsilon', 1.0, 0.1, -1e-7, 'epsilon'),
)
def test_invalid_args_raises(self, lr, preconditioner, epsilon, regex):
with self.assertRaisesRegex(ValueError, regex):
adagrad.build_adagrad(lr, preconditioner, epsilon)
def test_weights_gradients_mismatch_raises(self):
optimizer = adagrad.build_adagrad(0.1)
state = optimizer.initialize(_SCALAR_SPEC)
with self.assertRaises(ValueError):
optimizer.next(state, tf.zeros([1]), tf.zeros([2]))
def test_initialize_next_weights_mismatch_raises(self):
optimizer = adagrad.build_adagrad(0.1)
state = optimizer.initialize(_SCALAR_SPEC)
with self.assertRaises(ValueError):
optimizer.next(state, tf.zeros([2]), tf.zeros([2]))
@parameterized.named_parameters(
('scalar_spec', _SCALAR_SPEC),
('struct_spec', _STRUCT_SPEC),
('nested_spec', _NESTED_SPEC),
)
def test_get_hparams_returns_expected_result(self, spec):
optimizer = adagrad.build_adagrad(
learning_rate=0.1, epsilon=0.01, initial_preconditioner_value=0.2
)
state = optimizer.initialize(spec)
expected_hparams = collections.OrderedDict(learning_rate=0.1, epsilon=0.01)
actual_hparams = optimizer.get_hparams(state)
self.assertIsInstance(actual_hparams, collections.OrderedDict)
self.assertEqual(actual_hparams, expected_hparams)
@parameterized.named_parameters(
('scalar_spec', _SCALAR_SPEC),
('struct_spec', _STRUCT_SPEC),
('nested_spec', _NESTED_SPEC),
)
def test_set_hparams_returns_expected_result(self, spec):
optimizer = adagrad.build_adagrad(
learning_rate=0.1, epsilon=0.01, initial_preconditioner_value=0.2
)
state = optimizer.initialize(spec)
hparams = collections.OrderedDict(learning_rate=0.5, epsilon=2.0)
expected_state = copy.deepcopy(state)
for k, v in hparams.items():
expected_state[k] = v
updated_state = optimizer.set_hparams(state, hparams)
self.assertIsInstance(updated_state, collections.OrderedDict)
self.assertEqual(updated_state, expected_state)
@parameterized.named_parameters(
('scalar_spec', _SCALAR_SPEC),
('struct_spec', _STRUCT_SPEC),
('nested_spec', _NESTED_SPEC),
)
def test_set_get_hparams_is_no_op(self, spec):
optimizer = adagrad.build_adagrad(learning_rate=0.1)
state = optimizer.initialize(spec)
hparams = optimizer.get_hparams(state)
updated_state = optimizer.set_hparams(state, hparams)
self.assertEqual(state, updated_state)
if __name__ == '__main__':
tf.test.main() |
299,653 | label candidates | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
from parlai.core.params import ParlaiParser
from parlai.core.opt import Opt
import copy
import json
import os
from parlai.utils.io import PathManager
from parlai.core.teachers import DialogTeacher
from .build import build
MULTINLI = 'MultiNLI'
MULTINLI_VERSION = '1.0'
MULTINLI_PREFIX = 'multinli_'
MULTINLI_PREMISE_PREFIX = 'Premise: '
MULTINLI_HYPO_PREFIX = 'Hypothesis: '
MULTINLI_LABELS = ['entailment', 'contradiction', 'neutral']
MULTINLI_PREMISE_KEY = 'sentence1'
MULTINLI_HYPO_KEY = 'sentence2'
MULTINLI_ANSWER_KEY = 'gold_label'
NOT_CONTRADICT = 'not_contradiction'
BICLASS_DICT = {
'contradiction': 'contradiction',
'entailment': NOT_CONTRADICT,
'neutral': NOT_CONTRADICT,
}
BICLASS_LABELS = ['contradiction', NOT_CONTRADICT]
def _path(opt):
build(opt)
dt = opt['datatype'].split(':')[0]
if dt == 'train':
suffix = 'train'
# Using matched set as valid and mismatched set as test
elif dt == 'valid':
suffix = 'dev_matched'
elif dt == 'test':
suffix = 'dev_mismatched'
else:
raise RuntimeError('Not valid datatype.')
data_path = os.path.join(
opt['datapath'],
MULTINLI,
MULTINLI_PREFIX + MULTINLI_VERSION,
MULTINLI_PREFIX + MULTINLI_VERSION + '_' + suffix + '.jsonl',
)
return data_path
def setup_data(path, dialog_format=False, binary_classes=False):
"""
Set up data in DialogData format from path.
:param path: path to the data file that stores the MNLI dataset
:param dialog_format: if set True, omit the special tokens 'Hypothesis' and 'Premise' in the text.
:param binary_classes: if set True, bucketize neutral and entailment in one (not_contradiction)
:return: a tuple in the parlai.core.teachers.DialogData format ``((x, y, r, c, i), new_episode?)`` where the ``x``
is the query/question and ``y`` is the answer/label, ``clas`` represents the ``c`` the avaiable choices.
``new_episode`` is set True in any NLI teacher.
"""
print('loading: ' + path)
with PathManager.open(path, 'r') as data_file:
for pair_line in data_file:
pair = json.loads(pair_line)
if pair[MULTINLI_ANSWER_KEY] == '-':
continue
question, answers, clas = convert_to_dialogData(
premise_raw=pair[MULTINLI_PREMISE_KEY],
hypo_raw=pair[MULTINLI_HYPO_KEY],
answer_raw=pair[MULTINLI_ANSWER_KEY],
dialog_format=dialog_format,
binary_classes=binary_classes,
)
yield (question, answers, None, clas), True
def convert_to_dialogData(
premise_raw, hypo_raw, answer_raw, dialog_format=False, binary_classes=False
):
"""
Convert from NLI context to dialog text.
:param premise_raw: raw premise extracted from jsonl file.
:param hypo_raw: raw hypothesis extracted from jsonl file.
:param answer_raw: raw answer extracted from jsonl file.
:param dialog_format: if set True, omit the special tokens 'Hypothesis' and 'Premise' in the text.
:param binary_classes: if set True, bucketize (neutral, entailment) into one (no_contradiction)
:return: a tuple (question, answer, clas)
- ``question`` (str) is a query and possibly context
- ``answers`` (iter) is an iterable of label(s) for that query
- ``clas`` (iter) is an iterable of label candidates that the student can choose from
"""
premise_raw = premise_raw.strip('\n').strip('\t')
hypo_raw = hypo_raw.strip('\n').strip('\t')
clas = MULTINLI_LABELS
if binary_classes:
answer_raw = BICLASS_DICT[answer_raw]
clas = BICLASS_LABELS
if not dialog_format:
premise_raw = MULTINLI_PREMISE_PREFIX + premise_raw
hypo_raw = MULTINLI_HYPO_PREFIX + hypo_raw
question = premise_raw + '\n' + hypo_raw
answers = [answer_raw]
return question, answers, clas
class DefaultTeacher(DialogTeacher):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
parser = parser.add_argument_group('MNLI Teacher Args')
parser.add_argument(
'-dfm',
'--dialog-format',
type='bool',
default=False,
help="True if one would like to convert to a dialogue format without special tokens such as 'Premise'"
" and 'Hypothesis' (default: False).",
)
parser.add_argument(
'-bcl',
'--binary-classes',
type='bool',
default=False,
help="True if label candidates are (contradiction, not_contradiction), and (entailment, contradiction, "
"neutral) otherwise (default: False).",
)
return parser
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
data_path = _path(opt)
opt['datafile'] = data_path
self.id = 'MultiNLI'
self.dialog_format = opt.get('dialog_format', False)
self.binary_classes = opt.get('binary_classes', False)
super().__init__(opt, shared)
def setup_data(self, path):
return setup_data(path, self.dialog_format, self.binary_classes)
def METHOD_NAME(self):
if self.binary_classes:
return BICLASS_LABELS
return MULTINLI_LABELS |
299,654 | test cost negative | __copyright__ = "Copyright (C) 2015-2016 Martin Blais"
__license__ = "GNU GPLv2"
import collections
import re
import textwrap
import unittest
from beancount.core.data import Booking
from beancount.parser import parser
from beancount.parser import cmptest
from beancount.parser import booking
BookingTestError = collections.namedtuple('BookingTestError', 'source message entry')
class TestInvalidAmountsErrors(cmptest.TestCase):
@parser.parse_doc()
def test_zero_amount(self, entries, errors, options_map):
"""
2013-05-18 * ""
Assets:Investments:MSFT 0 MSFT
Assets:Investments:Cash 0 USD
"""
booked_entries, booking_errors = booking.book(entries, options_map)
self.assertEqual(0, len(booking_errors))
@parser.parse_doc()
def test_zero_amount__with_cost(self, entries, errors, options_map):
"""
2013-05-18 * ""
Assets:Investments:MSFT 0 MSFT {200.00 USD}
Assets:Investments:Cash 1 USD
"""
booked_entries, booking_errors = booking.book(entries, options_map)
self.assertEqual(1, len(booking_errors))
self.assertRegex(booking_errors[0].message, 'Amount is zero')
@parser.parse_doc()
def test_cost_zero(self, entries, errors, options_map):
"""
2013-05-18 * ""
Assets:Investments:MSFT -10 MSFT {0.00 USD}
Assets:Investments:Cash 2000.00 USD
"""
booked_entries, booking_errors = booking.book(entries, options_map)
self.assertFalse(booking_errors)
@parser.parse_doc()
def METHOD_NAME(self, entries, errors, options_map):
"""
2013-05-18 * ""
Assets:Investments:MSFT -10 MSFT {-200.00 USD}
Assets:Investments:Cash 2000.00 USD
"""
booked_entries, booking_errors = booking.book(entries, options_map)
self.assertEqual(1, len(entries))
self.assertEqual(1, len(booking_errors))
self.assertRegex(booking_errors[0].message, 'Cost is negative')
class TestBookingValidation(cmptest.TestCase):
def setUp(self):
self.input_str = textwrap.dedent("""
2014-01-01 open Assets:Investments:Cash
2014-01-01 open Assets:Investments:Stock
2014-06-22 * "Add some positive units"
Assets:Investments:Stock 1 HOOL {500 USD}
Assets:Investments:Cash -500 USD
2014-06-23 * "Down to zero"
Assets:Investments:Stock -1 HOOL {500 USD}
Assets:Investments:Cash 500 USD
2014-06-24 * "Go negative from zero"
Assets:Investments:Stock -1 HOOL {500 USD}
Assets:Investments:Cash 500 USD
2014-06-25 * "Go positive much"
Assets:Investments:Stock 11 HOOL {500 USD}
Assets:Investments:Cash -5500 USD
2014-06-26 * "Cross to negative from above zero"
Assets:Investments:Stock -15 HOOL {500 USD}
Assets:Investments:Cash 7500 USD
""")
BOOKMETH = collections.defaultdict(lambda: Booking.STRICT)
def convert_and_validate(self, entries, options_map):
entries, _ = booking.convert_lot_specs_to_lots(entries)
return booking.validate_inventory_booking(entries, options_map, self.BOOKMETH)
def do_validate_inventory_booking(self, input_str):
entries, errors, options_map = parser.parse_string(input_str)
validation_errors = self.convert_and_validate(entries, options_map)
self.assertEqual([], list(map(type, validation_errors)))
def test_validate_inventory_booking(self):
self.do_validate_inventory_booking(self.input_str)
def test_validate_inventory_booking__same_day(self):
input_str = re.sub(r'\b2\d\b', '22', self.input_str)
self.do_validate_inventory_booking(input_str)
@parser.parse_doc()
def test_simple_negative_lots(self, entries, errors, options_map):
"""
2013-05-01 open Assets:Bank:Investing
2013-05-01 open Equity:Opening-Balances
2013-05-02 *
Assets:Bank:Investing -1 HOOL {501 USD}
Equity:Opening-Balances 501 USD
"""
validation_errors = self.convert_and_validate(entries, options_map)
self.assertEqual([], list(map(type, validation_errors)))
@parser.parse_doc()
def test_mixed_lots_in_single_transaction(self, entries, errors, options_map):
"""
2013-05-01 open Assets:Bank:Investing
2013-05-01 open Equity:Opening-Balances
2013-05-02 *
Assets:Bank:Investing 5 HOOL {501 USD}
Assets:Bank:Investing -1 HOOL {502 USD}
Equity:Opening-Balances -2003 USD
"""
validation_errors = self.convert_and_validate(entries, options_map)
self.assertEqual([booking.BookingError], list(map(type, validation_errors)))
@parser.parse_doc()
def test_mixed_lots_in_multiple_transactions_augmenting(self,
entries, errors, options_map):
"""
2013-05-01 open Assets:Bank:Investing
2013-05-01 open Equity:Opening-Balances
2013-05-02 *
Assets:Bank:Investing 5 HOOL {501 USD}
Equity:Opening-Balances -501 USD
2013-05-03 *
Assets:Bank:Investing -1 HOOL {502 USD}
Equity:Opening-Balances 502 USD
"""
validation_errors = self.convert_and_validate(entries, options_map)
self.assertEqual([booking.BookingError], list(map(type, validation_errors)))
@parser.parse_doc()
def test_mixed_lots_in_multiple_transactions_reducing(self,
entries, errors, options_map):
"""
2013-05-01 open Assets:Bank:Investing
2013-05-01 open Equity:Opening-Balances
2013-05-02 *
Assets:Bank:Investing 5 HOOL {501 USD}
Assets:Bank:Investing 5 HOOL {502 USD}
Equity:Opening-Balances -5015 USD
2013-05-03 *
Assets:Bank:Investing -6 HOOL {502 USD}
Equity:Opening-Balances 3012 USD
"""
validation_errors = self.convert_and_validate(entries, options_map)
self.assertEqual([booking.BookingError], list(map(type, validation_errors)))
if __name__ == '__main__':
unittest.main() |
299,655 | gen read64 macro | #!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2018, Linaro Limited
#
import argparse
import sys
import re
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def my_err(line_number, msg):
eprint('Error: line:' + repr(line_number) + ' ' + msg)
sys.exit(1)
def METHOD_NAME(reg_name, opc1, crm, descr):
print('')
if len(descr):
print('\t# ' + descr)
print('\t.macro read_' + reg_name.lower() + ' reg0, reg1')
print('\tmrrc\tp15, ' + opc1 + ', \\reg0, \\reg1, ' + crm)
print('\t.endm')
def gen_write64_macro(reg_name, opc1, crm, descr):
print('')
if len(descr):
print('\t# ' + descr)
print('\t.macro write_' + reg_name.lower() + ' reg0, reg1')
print('\tmcrr\tp15, ' + opc1 + ', \\reg0, \\reg1, ' + crm)
print('\t.endm')
def gen_read32_macro(reg_name, crn, opc1, crm, opc2, descr):
print('')
if len(descr):
print('\t# ' + descr)
print('\t.macro read_' + reg_name.lower() + ' reg')
print('\tmrc p15, ' + opc1 + ', \\reg, ' + crn + ', ' + crm + ', ' + opc2)
print('\t.endm')
def gen_write32_macro(reg_name, crn, opc1, crm, opc2, descr):
print('')
if len(descr):
print('\t# ' + descr)
print('\t.macro write_' + reg_name.lower() + ' reg')
print('\tmcr p15, ' + opc1 + ', \\reg, ' + crn + ', ' + crm + ', ' + opc2)
print('\t.endm')
def gen_write32_dummy_macro(reg_name, crn, opc1, crm, opc2, descr):
print('')
if len(descr):
print('\t# ' + descr)
print('\t.macro write_' + reg_name.lower())
print('\t# Register ignored')
print('\tmcr p15, ' + opc1 + ', r0, ' + crn + ', ' + crm + ', ' + opc2)
print('\t.endm')
def gen_read64_func(reg_name, opc1, crm, descr):
print('')
if len(descr):
print('/* ' + descr + ' */')
print('static inline __noprof uint64_t read_' + reg_name.lower() +
'(void)')
print('{')
print('\tuint64_t v;')
print('')
print('\tasm volatile ("mrrc p15, ' + opc1 + ', %Q0, %R0, ' +
crm + '"' + ' : "=r" (v));')
print('')
print('\treturn v;')
print('}')
def gen_write64_func(reg_name, opc1, crm, descr):
print('')
if len(descr):
print('/* ' + descr + ' */')
print('static inline __noprof void write_' + reg_name.lower() +
'(uint64_t v)')
print('{')
print('\tasm volatile ("mcrr p15, ' + opc1 + ', %Q0, %R0, ' +
crm + '"' + ' : : "r" (v));')
print('}')
def gen_read32_func(reg_name, crn, opc1, crm, opc2, descr):
print('')
if len(descr):
print('/* ' + descr + ' */')
print('static inline __noprof uint32_t read_' + reg_name.lower() +
'(void)')
print('{')
print('\tuint32_t v;')
print('')
print('\tasm volatile ("mrc p15, ' + opc1 + ', %0, ' + crn + ', ' +
crm + ', ' + opc2 + '"' + ' : "=r" (v));')
print('')
print('\treturn v;')
print('}')
def gen_write32_func(reg_name, crn, opc1, crm, opc2, descr):
print('')
if len(descr):
print('/* ' + descr + ' */')
print('static inline __noprof void write_' + reg_name.lower() +
'(uint32_t v)')
print('{')
print('\tasm volatile ("mcr p15, ' + opc1 + ', %0, ' + crn + ', ' +
crm + ', ' + opc2 + '"' + ' : : "r" (v));')
print('}')
def gen_write32_dummy_func(reg_name, crn, opc1, crm, opc2, descr):
print('')
if len(descr):
print('/* ' + descr + ' */')
print('static inline __noprof void write_' + reg_name.lower() + '(void)')
print('{')
print('\t/* Register ignored */')
print('\tasm volatile ("mcr p15, ' + opc1 + ', r0, ' + crn + ', ' +
crm + ', ' + opc2 + '");')
print('}')
def gen_file(line, line_number, s_file):
words = line.split()
if len(words) == 0:
return
if len(re.findall('^ *#', line)):
return
if len(re.findall('^ *@', line)):
comment = re.sub('^ *@', '', line)
comment = re.sub('^ *', '', comment)
comment = re.sub('[ \n]*$', '', comment)
if len(comment) == 0:
print('')
return
if s_file:
print('# ' + comment)
else:
print('/* ' + comment + ' */')
return
reg_name = words[0]
crn = words[1]
opc1 = words[2]
crm = words[3]
opc2 = words[4]
access_type = words[5]
descr = " ".join(words[6:])
read_access = access_type == 'RO' or access_type == 'RW'
write_access = (access_type == 'WO' or access_type == 'RW' or
access_type == 'WOD')
dummy_access = access_type == 'WOD'
if not read_access and not write_access:
my_err(line_number, 'bad Access Type "' + access_type + '"')
if crn == '-':
if opc2 != '-':
my_err(line_number, 'bad opc2, expected -')
if read_access:
if s_file:
METHOD_NAME(reg_name, opc1, crm, descr)
else:
gen_read64_func(reg_name, opc1, crm, descr)
if s_file:
gen_write64_macro(reg_name, opc1, crm, descr)
else:
gen_write64_func(reg_name, opc1, crm, descr)
else:
if read_access:
if s_file:
gen_read32_macro(reg_name, crn, opc1, crm, opc2, descr)
else:
gen_read32_func(reg_name, crn, opc1, crm, opc2, descr)
if write_access:
if dummy_access:
if s_file:
gen_write32_dummy_macro(reg_name, crn, opc1, crm, opc2,
descr)
else:
gen_write32_dummy_func(reg_name, crn, opc1, crm, opc2,
descr)
else:
if s_file:
gen_write32_macro(reg_name, crn, opc1, crm, opc2, descr)
else:
gen_write32_func(reg_name, crn, opc1, crm, opc2, descr)
def get_args():
parser = argparse.ArgumentParser(description='Generates instructions to '
'access ARM32 system registers.')
parser.add_argument('--s_file', action='store_true',
help='Generate an Assembly instead of a C file')
parser.add_argument('--guard',
help='Provide #ifdef <guard_argument> in C file')
return parser.parse_args()
def main():
args = get_args()
cmnt = 'Automatically generated, do not edit'
if args.s_file:
print('# ' + cmnt)
else:
print('/* ' + cmnt + ' */')
if args.guard is not None:
print('#ifndef ' + args.guard.upper().replace('.', '_'))
print('#define ' + args.guard.upper().replace('.', '_'))
print('#include <compiler.h>')
line_number = 0
for line in sys.stdin:
line_number = line_number + 1
gen_file(line, line_number, args.s_file)
if not args.s_file and args.guard is not None:
print('#endif /*' + args.guard.upper().replace('.', '_') + '*/')
if __name__ == '__main__':
main() |
299,656 | sort columns | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from bugbot.bugbug_utils import get_bug_ids_classification
from bugbot.bzcleaner import BzCleaner
from bugbot.utils import nice_round
class DefectEnhancementTask(BzCleaner):
def __init__(self):
super().__init__()
self.autofix_type = {}
def description(self):
return "[Using ML] Check that the bug type is the same as predicted by bugbug"
def columns(self):
return [
"id",
"summary",
"type",
"bugbug_type",
"confidence",
"confidences",
"autofixed",
]
def METHOD_NAME(self):
def _sort_columns(p):
if (
p[2] == "defect"
): # defect -> non-defect is what we plan to autofix, so we show it first in the email.
prio = 0
elif (
p[3] == "defect"
): # non-defect -> defect has more priority than the rest, as 'enhancement' and 'task' can be often confused.
prio = 1
else:
prio = 2
# Then, we sort by confidence and ID.
# p[0] is the id and is a string
return (prio, -p[4], -int(p[0]))
return _sort_columns
def handle_bug(self, bug, data):
# Summary and id are injected by BzCleaner.bughandler
data[str(bug["id"])] = {"type": bug["type"]}
return data
def get_bz_params(self, date):
start_date, _ = self.get_dates(date)
reporter_skiplist = self.get_config("reporter_skiplist", default=[])
reporter_skiplist = ",".join(reporter_skiplist)
return {
"include_fields": ["id", "type"],
# Ignore closed bugs.
"bug_status": "__open__",
# Check only recently opened bugs.
"f1": "creation_ts",
"o1": "greaterthan",
"v1": start_date,
"f2": "reporter",
"o2": "nowords",
"v2": reporter_skiplist,
"f3": "bug_type",
"o3": "everchanged",
"n3": "1",
}
def get_bugs(self, date="today", bug_ids=[]):
# Retrieve the bugs with the fields defined in get_bz_params
raw_bugs = super().get_bugs(date=date, bug_ids=bug_ids, chunk_size=7000)
if len(raw_bugs) == 0:
return {}
# Extract the bug ids
bug_ids = list(raw_bugs.keys())
# Classify those bugs
bugs = get_bug_ids_classification("defectenhancementtask", bug_ids)
results = {}
for bug_id in sorted(bugs.keys()):
bug_data = bugs[bug_id]
if not bug_data.get("available", True):
# The bug was not available, it was either removed or is a
# security bug
continue
if not {"prob", "index", "class", "extra_data"}.issubset(bug_data.keys()):
raise Exception(f"Invalid bug response {bug_id}: {bug_data!r}")
bug = raw_bugs[bug_id]
prob = bug_data["prob"]
index = bug_data["index"]
suggestion = bug_data["class"]
labels_map = bug_data["extra_data"]["labels_map"]
assert suggestion in {
"defect",
"enhancement",
"task",
}, f"Suggestion {suggestion} is invalid"
if bug["type"] == suggestion:
continue
defect_prob = prob[labels_map["defect"]]
enhancement_prob = prob[labels_map["enhancement"]]
task_prob = prob[labels_map["task"]]
results[bug_id] = {
"id": bug_id,
"summary": bug["summary"],
"type": bug["type"],
"bugbug_type": suggestion,
"confidence": nice_round(prob[index]),
"confidences": f"defect {nice_round(defect_prob)}, enhancement {nice_round(enhancement_prob)}, task {nice_round(task_prob)}",
"autofixed": False,
}
# Only autofix results for which we are sure enough.
# And only autofix defect -> task/enhancement for now, unless we're 100% sure.
"""if prob[index] == 1.0 or (
bug["type"] == "defect"
and (enhancement_prob + task_prob)
>= self.get_config("confidence_threshold")
):"""
if prob[index] == 1.0:
results[bug_id]["autofixed"] = True
self.autofix_type[bug["id"]] = suggestion
return results
def get_autofix_change(self):
cc = self.get_config("cc")
return {
bug_id: {
"type": suggestion,
"cc": {"add": cc},
"comment": {
"body": f"The [Bugbug](https://github.com/mozilla/bugbug/) bot thinks this bug is a [{suggestion}](https://firefox-source-docs.mozilla.org/bug-mgmt/guides/bug-types.html), but please change it back in case of error."
},
}
for bug_id, suggestion in self.autofix_type.items()
}
if __name__ == "__main__":
DefectEnhancementTask().run() |
299,657 | test invalid frt unit | # Copyright iris-grib contributors
#
# This file is part of iris-grib and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Test function :func:`iris_grib._load_convert.other_time_coord.
"""
# import iris_grib.tests first so that some things can be initialised
# before importing anything else.
import iris_grib.tests as tests
import iris.coords
from iris_grib._load_convert import other_time_coord
class TestValid(tests.IrisGribTest):
def test_t(self):
rt = iris.coords.DimCoord(48, 'time', units='hours since epoch')
fp = iris.coords.DimCoord(6, 'forecast_period', units='hours')
result = other_time_coord(rt, fp)
expected = iris.coords.DimCoord(42, 'forecast_reference_time',
units='hours since epoch')
self.assertEqual(result, expected)
def test_frt(self):
rt = iris.coords.DimCoord(48, 'forecast_reference_time',
units='hours since epoch')
fp = iris.coords.DimCoord(6, 'forecast_period', units='hours')
result = other_time_coord(rt, fp)
expected = iris.coords.DimCoord(54, 'time', units='hours since epoch')
self.assertEqual(result, expected)
class TestInvalid(tests.IrisGribTest):
def test_t_with_bounds(self):
rt = iris.coords.DimCoord(48, 'time', units='hours since epoch',
bounds=[36, 60])
fp = iris.coords.DimCoord(6, 'forecast_period', units='hours')
with self.assertRaisesRegex(ValueError, 'bounds'):
other_time_coord(rt, fp)
def test_frt_with_bounds(self):
rt = iris.coords.DimCoord(48, 'forecast_reference_time',
units='hours since epoch',
bounds=[42, 54])
fp = iris.coords.DimCoord(6, 'forecast_period', units='hours')
with self.assertRaisesRegex(ValueError, 'bounds'):
other_time_coord(rt, fp)
def test_fp_with_bounds(self):
rt = iris.coords.DimCoord(48, 'time', units='hours since epoch')
fp = iris.coords.DimCoord(6, 'forecast_period', units='hours',
bounds=[3, 9])
with self.assertRaisesRegex(ValueError, 'bounds'):
other_time_coord(rt, fp)
def test_vector_t(self):
rt = iris.coords.DimCoord([0, 3], 'time', units='hours since epoch')
fp = iris.coords.DimCoord(6, 'forecast_period', units='hours')
with self.assertRaisesRegex(ValueError, 'Vector'):
other_time_coord(rt, fp)
def test_vector_frt(self):
rt = iris.coords.DimCoord([0, 3], 'forecast_reference_time',
units='hours since epoch')
fp = iris.coords.DimCoord(6, 'forecast_period', units='hours')
with self.assertRaisesRegex(ValueError, 'Vector'):
other_time_coord(rt, fp)
def test_vector_fp(self):
rt = iris.coords.DimCoord(48, 'time', units='hours since epoch')
fp = iris.coords.DimCoord([6, 12], 'forecast_period', units='hours')
with self.assertRaisesRegex(ValueError, 'Vector'):
other_time_coord(rt, fp)
def test_invalid_rt_name(self):
rt = iris.coords.DimCoord(1, 'height')
fp = iris.coords.DimCoord(6, 'forecast_period', units='hours')
with self.assertRaisesRegex(ValueError, 'reference time'):
other_time_coord(rt, fp)
def test_invalid_t_unit(self):
rt = iris.coords.DimCoord(1, 'time', units='Pa')
fp = iris.coords.DimCoord(6, 'forecast_period', units='hours')
with self.assertRaisesRegex(ValueError, 'unit.*Pa'):
other_time_coord(rt, fp)
def METHOD_NAME(self):
rt = iris.coords.DimCoord(1, 'forecast_reference_time', units='km')
fp = iris.coords.DimCoord(6, 'forecast_period', units='hours')
with self.assertRaisesRegex(ValueError, 'unit.*km'):
other_time_coord(rt, fp)
def test_invalid_fp_unit(self):
rt = iris.coords.DimCoord(48, 'forecast_reference_time',
units='hours since epoch')
fp = iris.coords.DimCoord(6, 'forecast_period', units='kg')
with self.assertRaisesRegex(ValueError, 'unit.*kg'):
other_time_coord(rt, fp)
if __name__ == '__main__':
tests.main() |
299,658 | test issue status | """Unit tests for the Jira issue status collector."""
from model.issue_status import IssueStatusCategory
from .base import JiraTestCase
class JiraIssuesTest(JiraTestCase):
"""Unit tests for the Jira issue status collector."""
METRIC_TYPE = "issue_status"
ISSUE_NAME = "Issue name"
CREATED = "1970-01-01T00:00:00.000+0000"
RELEASE_NAME = "1.0"
RELEASE_RELEASED = False
RELEASE_DATE = "3000-01-02"
SPRINT_NAME = "Sprint 1"
SPRINT_STATE = "active"
SPRINT_ENDDATE = "3000-01-01"
def setUp(self):
"""Extend to add an issue tracker to the metric."""
super().setUp()
self.metric["issue_tracker"] = {"type": "jira", "parameters": {"url": "https://jira"}}
self.metric["issue_ids"] = ["FOO-42"]
def assert_issue_status( # noqa: PLR0913
self,
response,
summary: str | None = None,
connection_error: str | None = None,
parse_error: str | None = None,
status_category: IssueStatusCategory = "todo",
release: bool = False,
sprint: bool = False,
) -> None:
"""Assert that the issue has the expected attributes."""
issue_status = response.as_dict()["issue_status"][0]
self.assertEqual("FOO-42", issue_status["issue_id"])
if summary:
self.assertEqual(summary, issue_status["summary"])
if connection_error or parse_error:
self.assertNotIn("name", issue_status)
self.assertNotIn("status_category", issue_status)
if connection_error:
self.assertIn(connection_error, issue_status["connection_error"])
self.assertNotIn("parse_error", issue_status)
if parse_error:
self.assertIn(parse_error, issue_status["parse_error"])
self.assertNotIn("connection_error", issue_status)
else:
self.assertEqual(self.ISSUE_NAME, issue_status["name"])
self.assertEqual(status_category, issue_status["status_category"])
self.assertEqual(self.CREATED, issue_status["created"])
if sprint:
self.assertEqual(self.SPRINT_NAME, issue_status["sprint_name"])
self.assertEqual(self.SPRINT_STATE, issue_status["sprint_state"])
self.assertEqual(self.SPRINT_ENDDATE, issue_status["sprint_enddate"])
else:
self.assertNotIn("sprint_name", issue_status)
self.assertNotIn("sprint_state", issue_status)
self.assertNotIn("sprint_enddate", issue_status)
if release:
self.assertEqual(self.RELEASE_NAME, issue_status["release_name"])
self.assertEqual(self.RELEASE_RELEASED, issue_status["release_released"])
self.assertEqual(self.RELEASE_DATE, issue_status["release_date"])
else:
self.assertNotIn("release_name", issue_status)
self.assertNotIn("release_released", issue_status)
self.assertNotIn("release_date", issue_status)
self.assertNotIn("connection_error", issue_status)
self.assertNotIn("parse_error", issue_status)
self.assertEqual(
"https://jira/rest/agile/1.0/issue/FOO-42?fields=created,status,summary,updated,duedate,fixVersions,sprint",
issue_status["api_url"],
)
self.assertEqual("https://jira/browse/FOO-42", issue_status["landing_url"])
async def METHOD_NAME(self):
"""Test that the issue status is returned."""
issue_status_json = {
"fields": {"status": {"name": self.ISSUE_NAME, "statusCategory": {"key": "new"}}, "created": self.CREATED},
}
response = await self.collect(get_request_json_return_value=issue_status_json)
self.assert_issue_status(response)
async def test_issue_status_doing(self):
"""Test that the issue status is returned."""
issue_status_json = {
"fields": {
"status": {"name": self.ISSUE_NAME, "statusCategory": {"key": "indeterminate"}},
"created": self.CREATED,
},
}
response = await self.collect(get_request_json_return_value=issue_status_json)
self.assert_issue_status(response, status_category="doing")
async def test_issue_status_done(self):
"""Test that the issue status is returned."""
issue_status_json = {
"fields": {"status": {"name": self.ISSUE_NAME, "statusCategory": {"key": "done"}}, "created": self.CREATED},
}
response = await self.collect(get_request_json_return_value=issue_status_json)
self.assert_issue_status(response, status_category="done")
async def test_issue_summary(self):
"""Test that the issue summary is returned."""
issue_status_json = {
"fields": {
"status": {"name": self.ISSUE_NAME, "statusCategory": {"key": "new"}},
"summary": "Issue summary",
"created": self.CREATED,
},
}
response = await self.collect(get_request_json_return_value=issue_status_json)
self.assert_issue_status(response, summary="Issue summary")
async def test_issue_release(self):
"""Test that the issue release is returned."""
issue_status_json = {
"fields": {
"created": self.CREATED,
"status": {"name": self.ISSUE_NAME, "statusCategory": {"key": "done"}},
"fixVersions": [
{"name": self.RELEASE_NAME, "released": self.RELEASE_RELEASED, "releaseDate": self.RELEASE_DATE},
],
},
}
response = await self.collect(get_request_json_return_value=issue_status_json)
self.assert_issue_status(response, status_category="done", release=True)
async def test_issue_sprint(self):
"""Test that the issue sprint is returned."""
issue_status_json = {
"fields": {
"created": self.CREATED,
"status": {"name": self.ISSUE_NAME, "statusCategory": {"key": "done"}},
"sprint": {"name": self.SPRINT_NAME, "state": self.SPRINT_STATE, "endDate": self.SPRINT_ENDDATE},
},
}
response = await self.collect(get_request_json_return_value=issue_status_json)
self.assert_issue_status(response, status_category="done", sprint=True)
async def test_connection_error(self):
"""Test that the issue status is returned, even when there is a connection error."""
response = await self.collect(get_request_side_effect=BrokenPipeError)
self.assert_issue_status(response, connection_error="BrokenPipeError")
async def test_parse_error(self):
"""Test that the issue status is returned, even when there is a parse error."""
issue_status_json = {"fields": {"status": None}}
response = await self.collect(get_request_json_return_value=issue_status_json)
self.assert_issue_status(response, parse_error="TypeError") |
299,659 | anim func | # coding: utf-8
#
# Copyright 2018 Yaman Güçlü
import numpy as np
__all__ = ('refine_array_1d', 'unroll_edges', 'split_space', 'split_field', 'animate_field')
#===============================================================================
def refine_array_1d(x, n, remove_duplicates=True):
"""
Refines a 1D array by subdividing each interval (x[i], x[i+1]) into n identical parts.
Parameters
----------
x : ndarray
1D array to be refined.
n : int
Number of subdivisions to be created in each interval (x[i], x[i+1]).
remove_duplicates : bool, default=True
If True, the refined array will not contain any duplicate points.
If False, the original internal grid points x[1:-1] will appear twice: this may
be useful to visualize fields that are discontinuous across cell boundaries.
Returns
-------
ndarray
Refined 1D array. The length of this array is `n * (len(x) - 1) + 1`
if remove_duplicates and `(n + 1) * (len(x) - 1)` if not.
"""
xr = []
if not remove_duplicates:
n += 1
for (a, b) in zip(x[:-1], x[1:]):
xr.extend(np.linspace(a, b, n, endpoint=not remove_duplicates))
if remove_duplicates:
xr.append(x[-1])
return np.array(xr)
#===============================================================================
def unroll_edges(domain, xgrid):
"""If necessary, "unroll" intervals that cross boundary of periodic domain.
"""
xA, xB = domain
assert all(np.diff(xgrid) >= 0)
assert xA < xB
assert xA <= xgrid[0]
assert xgrid[-1] <= xB
if xgrid[0] == xA and xgrid[-1] == xB:
return xgrid
elif xgrid[0] != xA:
return np.array([xgrid[-1] - (xB-xA), *xgrid])
elif xgrid[-1] != xB:
return np.array([*xgrid, xgrid[0] + (xB-xA)])
#===============================================================================
def split_space(Xh):
"""Split the flattened fem spaces into
a list of spaces that corresponds to the symbolic function spaces.
Parameters
----------
Xh : ProductFemSpace
The discrete space.
Returns
-------
Vh : <list, FemSpace>
List of fem spaces.
"""
from sympde.topology.space import VectorFunctionSpace
from psydac.fem.vector import ProductFemSpace
V = Xh.symbolic_space
spaces = Xh.spaces
Vh = []
for Vi in V.spaces:
if isinstance(Vi, VectorFunctionSpace):
Vh.append(ProductFemSpace(*spaces[:Vi.ldim]))
Vh[-1].symbolic_space = Vi
spaces = spaces[Vi.ldim:]
else:
Vh.append(spaces[0])
Vh[-1].symbolic_space = Vi
spaces = spaces[1:]
return Vh
#===============================================================================
def split_field(uh, spaces, out=None):
"""Split a field into a list of fields that corresponds to the spaces.
The split field function will allocate new memory if out is not passed.
Parameters
----------
uh : FemField
The fem field.
spaces: <list, FemSpace>
List of spaces that split the field.
out: optional, <list, FemField>
List of fields to write the results to.
Returns
-------
out : <list, FemField>
List of fem fields.
"""
from psydac.fem.basic import FemField
if out is None:
out = [FemField(S) for S in spaces]
flattened_fields = [f.fields if f.fields else [f] for f in out]
flattened_fields = [f for l in flattened_fields for f in l]
for f1,f2 in zip(flattened_fields, uh.fields):
assert f1.space is f2.space
f1.coeffs[:] = f2.coeffs[:]
return out
#===============================================================================
def animate_field(fields, domain, mapping, res=(150,150), vrange=None, cmap=None, interval=35, progress=False, figsize=(14,4)):
"""Animate a sequence of scalar fields over a geometry."""
from matplotlib import animation
import matplotlib.pyplot as plt
import tqdm
fields = list(fields)
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect('equal')
etas = [refine_array_1d( bounds, r ) for r,bounds in zip(res, zip(domain.min_coords, domain.max_coords))]
pcoords = np.array( [[mapping( e1,e2 ) for e2 in etas[1]] for e1 in etas[0]] )
xx = pcoords[:,:,0]
yy = pcoords[:,:,1]
# determine range of values from first field
num1 = np.array( [[fields[0].fields[0]( e1,e2 ) for e2 in etas[1]] for e1 in etas[0]] )
num2 = np.array( [[fields[0].fields[1]( e1,e2 ) for e2 in etas[1]] for e1 in etas[0]] )
num = np.hypot(num1, num2)
vrange = (num.min(), num.max())
quadmesh = plt.pcolormesh(xx, yy, num, shading='gouraud', cmap=cmap,
vmin=vrange[0], vmax=vrange[1], axes=ax)
fig.colorbar(quadmesh, ax=ax)
pbar = tqdm.tqdm(total=len(fields))
def METHOD_NAME(i):
num1 = np.array( [[fields[i].fields[0]( e1,e2 ) for e2 in etas[1]] for e1 in etas[0]] )
num2 = np.array( [[fields[i].fields[1]( e1,e2 ) for e2 in etas[1]] for e1 in etas[0]] )
C = np.hypot(num1, num2)
quadmesh.set_array(C)
pbar.update()
if i == len(fields) - 1:
pbar.close()
return animation.FuncAnimation(fig, METHOD_NAME, frames=len(fields), interval=interval) |
299,660 | set message property | #!/usr/bin/python
# SPDX-License-Identifier: LGPL-2.1-or-later
from __future__ import absolute_import, print_function, unicode_literals
from optparse import OptionParser
import os
from pprint import pformat
import sys
import dbus
import dbus.mainloop.glib
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
BUS_NAME='org.bluez.obex'
PATH = '/org/bluez/obex'
CLIENT_INTERFACE = 'org.bluez.obex.Client1'
SESSION_INTERFACE = 'org.bluez.obex.Session1'
MESSAGE_ACCESS_INTERFACE = 'org.bluez.obex.MessageAccess1'
MESSAGE_INTERFACE = 'org.bluez.obex.Message1'
TRANSFER_INTERFACE = 'org.bluez.obex.Transfer1'
def unwrap(x):
"""Hack to unwrap D-Bus values, so that they're easier to read when
printed. Taken from d-feet """
if isinstance(x, list):
return map(unwrap, x)
if isinstance(x, tuple):
return tuple(map(unwrap, x))
if isinstance(x, dict):
return dict([(unwrap(k), unwrap(v)) for k, v in x.items()])
for t in [unicode, str, long, int, float, bool]:
if isinstance(x, t):
return t(x)
return x
def parse_options():
parser.add_option("-d", "--device", dest="device",
help="Device to connect", metavar="DEVICE")
parser.add_option("-c", "--chdir", dest="new_dir",
help="Change current directory to DIR", metavar="DIR")
parser.add_option("-l", "--lsdir", action="store_true", dest="ls_dir",
help="List folders in current directory")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose")
parser.add_option("-L", "--lsmsg", action="store", dest="ls_msg",
help="List messages in supplied CWD subdir")
parser.add_option("-g", "--get", action="store", dest="get_msg",
help="Get message contents")
parser.add_option("-p", "--push", action="store", dest="push_msg",
help="Push message")
parser.add_option("--get-properties", action="store", dest="get_msg_properties",
help="Get message properties")
parser.add_option("--mark-read", action="store", dest="mark_msg_read",
help="Marks the messages as read")
parser.add_option("--mark-unread", action="store", dest="mark_msg_unread",
help="Marks the messages as unread")
parser.add_option("--mark-deleted", action="store", dest="mark_msg_deleted",
help="Deletes the message from the folder")
parser.add_option("--mark-undeleted", action="store", dest="mark_msg_undeleted",
help="Undeletes the message")
parser.add_option("-u", "--update-inbox", action="store_true", dest="update_inbox",
help="Checks for new mails")
return parser.parse_args()
def set_folder(session, new_dir):
session.SetFolder(new_dir)
class MapClient:
def __init__(self, session_path, verbose=False):
self.progress = 0
self.transfer_path = None
self.props = dict()
self.verbose = verbose
self.path = session_path
bus = dbus.SessionBus()
obj = bus.get_object(BUS_NAME, session_path)
self.session = dbus.Interface(obj, SESSION_INTERFACE)
self.map = dbus.Interface(obj, MESSAGE_ACCESS_INTERFACE)
bus.add_signal_receiver(self.properties_changed,
dbus_interface="org.freedesktop.DBus.Properties",
signal_name="PropertiesChanged",
path_keyword="path")
def create_transfer_reply(self, path, properties):
self.transfer_path = path
self.props[path] = properties
if self.verbose:
print("Transfer created: %s (file %s)" % (path,
properties["Filename"]))
def generic_reply(self):
if self.verbose:
print("Operation succeeded")
def error(self, err):
print(err)
mainloop.quit()
def transfer_complete(self, path):
if self.verbose:
print("Transfer finished")
properties = self.props.get(path)
if properties == None:
return
f = open(properties["Filename"], "r")
os.remove(properties["Filename"])
print(f.readlines())
def transfer_error(self, path):
print("Transfer %s error" % path)
mainloop.quit()
def properties_changed(self, interface, properties, invalidated, path):
req = self.props.get(path)
if req == None:
return
if properties['Status'] == 'complete':
self.transfer_complete(path)
return
if properties['Status'] == 'error':
self.transfer_error(path)
return
def set_folder(self, new_dir):
self.map.SetFolder(new_dir)
def list_folders(self):
for i in self.map.ListFolders(dict()):
print("%s/" % (i["Name"]))
def list_messages(self, folder):
ret = self.map.ListMessages(folder, dict())
print(pformat(unwrap(ret)))
def get_message(self, handle):
self.map.ListMessages("", dict())
path = self.path + "/message" + handle
obj = bus.get_object(BUS_NAME, path)
msg = dbus.Interface(obj, MESSAGE_INTERFACE)
msg.Get("", True, reply_handler=self.create_transfer_reply,
error_handler=self.error)
def push_message(self, filename):
self.map.PushMessage(filename, "telecom/msg/outbox", dict(),
reply_handler=self.create_transfer_reply,
error_handler=self.error)
def get_message_properties(self, handle):
self.map.ListMessages("", dict())
path = self.path + "/message" + handle
obj = bus.get_object(BUS_NAME, path)
msg = dbus.Interface(obj, "org.freedesktop.DBus.Properties")
ret = msg.GetAll(MESSAGE_INTERFACE)
print(pformat(unwrap(ret)))
def METHOD_NAME(self, handle, prop, flag):
self.map.ListMessages("", dict())
path = self.path + "/message" + handle
obj = bus.get_object(BUS_NAME, path)
msg = dbus.Interface(obj, MESSAGE_INTERFACE)
msg.SetProperty (prop, flag);
def update_inbox(self):
self.map.UpdateInbox()
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
parser = OptionParser()
(options, args) = parse_options()
if not options.device:
parser.print_help()
exit(0)
bus = dbus.SessionBus()
mainloop = GObject.MainLoop()
client = dbus.Interface(bus.get_object(BUS_NAME, PATH),
CLIENT_INTERFACE)
print("Creating Session")
path = client.CreateSession(options.device, { "Target": "map" })
map_client = MapClient(path, options.verbose)
if options.new_dir:
map_client.set_folder(options.new_dir)
if options.ls_dir:
map_client.list_folders()
if options.ls_msg is not None:
map_client.list_messages(options.ls_msg)
if options.get_msg is not None:
map_client.get_message(options.get_msg)
if options.push_msg is not None:
map_client.push_message(options.push_msg)
if options.get_msg_properties is not None:
map_client.get_message_properties(options.get_msg_properties)
if options.mark_msg_read is not None:
map_client.METHOD_NAME(options.mark_msg_read, "Read", True)
if options.mark_msg_unread is not None:
map_client.METHOD_NAME(options.mark_msg_unread, "Read", False)
if options.mark_msg_deleted is not None:
map_client.METHOD_NAME(options.mark_msg_deleted, "Deleted", True)
if options.mark_msg_undeleted is not None:
map_client.METHOD_NAME(options.mark_msg_undeleted, "Deleted", False)
if options.update_inbox:
map_client.update_inbox()
mainloop.run() |
299,661 | has c name | from _typeshed import Incomplete
from collections.abc import Generator
from .error import CDefError as CDefError, VerificationError as VerificationError, VerificationMissing as VerificationMissing
from .lock import allocate_lock as allocate_lock
Q_CONST: int
Q_RESTRICT: int
Q_VOLATILE: int
def qualify(quals, replace_with): ...
class BaseTypeByIdentity:
is_array_type: bool
is_raw_function: bool
def get_c_name(self, replace_with: str = "", context: str = "a C file", quals: int = 0): ...
def METHOD_NAME(self): ...
def is_integer_type(self): ...
def get_cached_btype(self, ffi, finishlist, can_delay: bool = False): ...
class BaseType(BaseTypeByIdentity):
def __eq__(self, other): ...
def __ne__(self, other): ...
def __hash__(self) -> int: ...
class VoidType(BaseType):
c_name_with_marker: str
def __init__(self) -> None: ...
def build_backend_type(self, ffi, finishlist): ...
void_type: Incomplete
class BasePrimitiveType(BaseType):
def is_complex_type(self): ...
class PrimitiveType(BasePrimitiveType):
ALL_PRIMITIVE_TYPES: Incomplete
name: Incomplete
c_name_with_marker: Incomplete
def __init__(self, name) -> None: ...
def is_char_type(self): ...
def is_integer_type(self): ...
def is_float_type(self): ...
def is_complex_type(self): ...
def build_backend_type(self, ffi, finishlist): ...
class UnknownIntegerType(BasePrimitiveType):
name: Incomplete
c_name_with_marker: Incomplete
def __init__(self, name) -> None: ...
def is_integer_type(self): ...
def build_backend_type(self, ffi, finishlist) -> None: ...
class UnknownFloatType(BasePrimitiveType):
name: Incomplete
c_name_with_marker: Incomplete
def __init__(self, name) -> None: ...
def build_backend_type(self, ffi, finishlist) -> None: ...
class BaseFunctionType(BaseType):
args: Incomplete
result: Incomplete
ellipsis: Incomplete
abi: Incomplete
c_name_with_marker: Incomplete
def __init__(self, args, result, ellipsis, abi: Incomplete | None = None) -> None: ...
class RawFunctionType(BaseFunctionType):
is_raw_function: bool
def build_backend_type(self, ffi, finishlist) -> None: ...
def as_function_pointer(self): ...
class FunctionPtrType(BaseFunctionType):
def build_backend_type(self, ffi, finishlist): ...
def as_raw_function(self): ...
class PointerType(BaseType):
totype: Incomplete
quals: Incomplete
c_name_with_marker: Incomplete
def __init__(self, totype, quals: int = 0) -> None: ...
def build_backend_type(self, ffi, finishlist): ...
voidp_type: Incomplete
def ConstPointerType(totype): ...
const_voidp_type: Incomplete
class NamedPointerType(PointerType):
name: Incomplete
c_name_with_marker: Incomplete
def __init__(self, totype, name, quals: int = 0) -> None: ...
class ArrayType(BaseType):
is_array_type: bool
item: Incomplete
length: Incomplete
c_name_with_marker: Incomplete
def __init__(self, item, length) -> None: ...
def length_is_unknown(self): ...
def resolve_length(self, newlength): ...
def build_backend_type(self, ffi, finishlist): ...
char_array_type: Incomplete
class StructOrUnionOrEnum(BaseTypeByIdentity):
forcename: Incomplete
c_name_with_marker: Incomplete
def build_c_name_with_marker(self) -> None: ...
def force_the_name(self, forcename) -> None: ...
def get_official_name(self): ...
class StructOrUnion(StructOrUnionOrEnum):
fixedlayout: Incomplete
completed: int
partial: bool
packed: int
name: Incomplete
fldnames: Incomplete
fldtypes: Incomplete
fldbitsize: Incomplete
fldquals: Incomplete
def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals: Incomplete | None = None) -> None: ...
def anonymous_struct_fields(self) -> Generator[Incomplete, None, None]: ...
def enumfields(self, expand_anonymous_struct_union: bool = True) -> Generator[Incomplete, None, None]: ...
def force_flatten(self) -> None: ...
def get_cached_btype(self, ffi, finishlist, can_delay: bool = False): ...
def finish_backend_type(self, ffi, finishlist) -> None: ...
def check_not_partial(self) -> None: ...
def build_backend_type(self, ffi, finishlist): ...
class StructType(StructOrUnion):
kind: str
class UnionType(StructOrUnion):
kind: str
class EnumType(StructOrUnionOrEnum):
kind: str
partial: bool
partial_resolved: bool
name: Incomplete
enumerators: Incomplete
enumvalues: Incomplete
baseinttype: Incomplete
def __init__(self, name, enumerators, enumvalues, baseinttype: Incomplete | None = None) -> None: ...
forcename: Incomplete
def force_the_name(self, forcename) -> None: ...
def check_not_partial(self) -> None: ...
def build_backend_type(self, ffi, finishlist): ...
def build_baseinttype(self, ffi, finishlist): ...
def unknown_type(name, structname: Incomplete | None = None): ...
def unknown_ptr_type(name, structname: Incomplete | None = None): ...
global_lock: Incomplete
def get_typecache(backend): ...
def global_cache(srctype, ffi, funcname, *args, **kwds): ...
def pointer_cache(ffi, BType): ...
def attach_exception_info(e, name) -> None: ... |
299,662 | slice | """
This is only a bridge for using Brian 1 hears with Brian 2.
.. deprecated:: 2.2.2.2
Use the `brian2hears <https://brian2hears.readthedocs.io/>`_ package instead.
NOTES:
* Slicing sounds with Brian 2 units doesn't work, you need to either use Brian 1 units or replace calls to
``sound[:20*ms]`` with ``sound.slice(None, 20*ms)``, etc.
TODO: handle properties (e.g. sound.duration)
Not working examples:
* time_varying_filter1 (care with units)
"""
try:
import brian as b1
import brian.hears as b1h
except ImportError:
raise ImportError(
"brian2.hears is deprecated and will be removed in a future release, please use"
" the brian2hears package available at https://brian2hears.readthedocs.io/. If"
" you really want to keep using it, note: brian2.hears is a bridge between"
" Brian 2 and the version of Brian Hears from Brian 1, you need to have Brian 1"
" installed to use it."
)
from inspect import isclass, ismethod
from numpy import asarray, ndarray
from brian2.core.clocks import Clock
from brian2.core.operations import network_operation
from brian2.groups.neurongroup import NeuronGroup
from brian2.units import second
from brian2.units.fundamentalunits import Quantity
from brian2.utils.logger import get_logger
logger = get_logger(__name__)
logger.warn(
"brian2.hears is deprecated and will be removed in a future release, please use the"
" brian2hears package available at https://brian2hears.readthedocs.io/. If you"
" really want to keep using it, note that it is a bridge between Brian 2 and Brian"
" Hears from Brian 1. This is not guaranteed to work in all cases that brian.hears"
" works. See the limitations in the online documentation."
)
def convert_unit_b1_to_b2(val):
return Quantity.with_dimensions(float(val), val.dim._dims)
def convert_unit_b2_to_b1(val):
return b1.Quantity.with_dimensions(float(val), val.dim._dims)
def modify_arg(arg):
"""
Modify arguments to make them compatible with Brian 1.
- Arrays of units are replaced with straight arrays
- Single values are replaced with Brian 1 equivalents
- Slices are handled so we can use e.g. sound[:20*ms]
The second part was necessary because some functions/classes test if an object is an array or not to see if it
is a sequence, but because brian2.Quantity derives from ndarray this was causing problems.
"""
if isinstance(arg, Quantity):
if len(arg.shape) == 0:
arg = b1.Quantity.with_dimensions(float(arg), arg.dim._dims)
else:
arg = asarray(arg)
elif isinstance(arg, METHOD_NAME):
arg = METHOD_NAME(modify_arg(arg.start), modify_arg(arg.stop), modify_arg(arg.step))
return arg
def wrap_units(f):
"""
Wrap a function to convert units into a form that Brian 1 can handle. Also, check the output argument, if it is
a ``b1h.Sound`` wrap it.
"""
def new_f(*args, **kwds):
newargs = []
newkwds = {}
for arg in args:
newargs.append(modify_arg(arg))
for k, v in kwds.items():
newkwds[k] = modify_arg(v)
rv = f(*newargs, **newkwds)
if rv.__class__ == b1h.Sound:
rv.__class__ = BridgeSound
elif isinstance(rv, b1.Quantity):
rv = Quantity.with_dimensions(float(rv), rv.dim._dims)
return rv
return new_f
def wrap_units_property(p):
fget = p.fget
fset = p.fset
fdel = p.fdel
if fget is not None:
fget = wrap_units(fget)
if fset is not None:
fset = wrap_units(fset)
if fdel is not None:
fdel = wrap_units(fdel)
new_p = property(fget, fset, fdel)
return new_p
def wrap_units_class(_C):
"""
Wrap a class to convert units into a form that Brian 1 can handle in all methods
"""
class new_class(_C):
for _k in _C.__dict__:
_v = getattr(_C, _k)
if hasattr(ndarray, _k) and getattr(ndarray, _k) is _v:
continue
if ismethod(_v):
_v = wrap_units(_v)
exec(f"{_k} = _v")
elif isinstance(_v, property):
_v = wrap_units_property(_v)
exec(f"{_k} = _v")
del _k
del _v
return new_class
WrappedSound = wrap_units_class(b1h.Sound)
class BridgeSound(WrappedSound):
"""
We add a new method slice because slicing with units can't work with Brian 2 units.
"""
def METHOD_NAME(self, *args):
return self.__getitem__(METHOD_NAME(*args))
Sound = BridgeSound
class FilterbankGroup(NeuronGroup):
def __init__(self, filterbank, targetvar, *args, **kwds):
self.targetvar = targetvar
self.filterbank = filterbank
self.buffer = None
filterbank.buffer_init()
# Sanitize the clock - does it have the right dt value?
if "clock" in kwds:
if int(1 / kwds["clock"].dt) != int(filterbank.samplerate):
raise ValueError("Clock should have 1/dt=samplerate")
kwds["clock"] = Clock(dt=float(kwds["clock"].dt) * second)
else:
kwds["clock"] = Clock(dt=1 * second / float(filterbank.samplerate))
buffersize = kwds.pop("buffersize", 32)
if not isinstance(buffersize, int):
buffersize = int(buffersize * self.samplerate)
self.buffersize = buffersize
self.buffer_pointer = buffersize
self.buffer_start = -buffersize
NeuronGroup.__init__(self, filterbank.nchannels, *args, **kwds)
@network_operation(clock=self.clock, when="start")
def apply_filterbank_output():
if self.buffer_pointer >= self.buffersize:
self.buffer_pointer = 0
self.buffer_start += self.buffersize
self.buffer = self.filterbank.buffer_fetch(
self.buffer_start, self.buffer_start + self.buffersize
)
setattr(self, targetvar, self.buffer[self.buffer_pointer, :])
self.buffer_pointer += 1
self.contained_objects.append(apply_filterbank_output)
def reinit(self):
NeuronGroup.reinit(self)
self.filterbank.buffer_init()
self.buffer_pointer = self.buffersize
self.buffer_start = -self.buffersize
handled_explicitly = {"Sound", "FilterbankGroup"}
__all__ = [k for k in b1h.__dict__ if not k.startswith("_")]
for k in __all__:
if k in handled_explicitly:
continue
curobj = getattr(b1h, k)
if callable(curobj):
if isclass(curobj):
curobj = wrap_units_class(curobj)
else:
curobj = wrap_units(curobj)
exec(f"{k} = curobj")
__all__.extend(
[
"convert_unit_b1_to_b2",
"convert_unit_b2_to_b1",
]
) |
299,663 | get cache key | import time
from collections import deque
from django.core.cache import caches
from django.utils.translation import gettext as _
from evennia.utils import logger
class Throttle:
"""
Keeps a running count of failed actions per IP address.
Available methods indicate whether or not the number of failures exceeds a
particular threshold.
This version of the throttle is usable by both the terminal server as well
as the web server, imposes limits on memory consumption by using deques
with length limits instead of open-ended lists, and uses native Django
caches for automatic key eviction and persistence configurability.
"""
error_msg = _("Too many failed attempts; you must wait a few minutes before trying again.")
def __init__(self, **kwargs):
"""
Allows setting of throttle parameters.
Keyword Args:
name (str): Name of this throttle.
limit (int): Max number of failures before imposing limiter. If `None`,
the throttle is disabled.
timeout (int): number of timeout seconds after
max number of tries has been reached.
cache_size (int): Max number of attempts to record per IP within a
rolling window; this is NOT the same as the limit after which
the throttle is imposed!
"""
try:
self.storage = caches["throttle"]
except Exception:
logger.log_trace("Throttle: Errors encountered; using default cache.")
self.storage = caches["default"]
self.name = kwargs.get("name", "undefined-throttle")
self.limit = kwargs.get("limit", 5)
self.cache_size = kwargs.get("cache_size", self.limit)
self.timeout = kwargs.get("timeout", 5 * 60)
def METHOD_NAME(self, *args, **kwargs):
"""
Creates a 'prefixed' key containing arbitrary terms to prevent key
collisions in the same namespace.
"""
return "-".join((self.name, *args))
def touch(self, key, *args, **kwargs):
"""
Refreshes the timeout on a given key and ensures it is recorded in the
key register.
Args:
key(str): Key of entry to renew.
"""
cache_key = self.METHOD_NAME(key)
if self.storage.touch(cache_key, self.timeout):
self.record_key(key)
def get(self, ip=None):
"""
Convenience function that returns the storage table, or part of.
Args:
ip (str, optional): IP address of requestor
Returns:
storage (dict): When no IP is provided, returns a dict of all
current IPs being tracked and the timestamps of their recent
failures.
timestamps (deque): When an IP is provided, returns a deque of
timestamps of recent failures only for that IP.
"""
if ip:
cache_key = self.METHOD_NAME(str(ip))
return self.storage.get(cache_key, deque(maxlen=self.cache_size))
else:
keys_key = self.METHOD_NAME("keys")
keys = self.storage.get_or_set(keys_key, set(), self.timeout)
data = self.storage.get_many((self.METHOD_NAME(x) for x in keys))
found_keys = set(data.keys())
if len(keys) != len(found_keys):
self.storage.set(keys_key, found_keys, self.timeout)
return data
def update(self, ip, failmsg="Exceeded threshold."):
"""
Store the time of the latest failure.
Args:
ip (str): IP address of requestor
failmsg (str, optional): Message to display in logs upon activation
of throttle.
Returns:
None
"""
cache_key = self.METHOD_NAME(ip)
# Get current status
previously_throttled = self.check(ip)
# Get previous failures, if any
entries = self.storage.get(cache_key, [])
entries.append(time.time())
# Store updated record
self.storage.set(cache_key, deque(entries, maxlen=self.cache_size), self.timeout)
# See if this update caused a change in status
currently_throttled = self.check(ip)
# If this makes it engage, log a single activation event
if not previously_throttled and currently_throttled:
logger.log_sec(
f"Throttle Activated: {failmsg} (IP: {ip}, "
f"{self.limit} hits in {self.timeout} seconds.)"
)
self.record_ip(ip)
def remove(self, ip, *args, **kwargs):
"""
Clears data stored for an IP from the throttle.
Args:
ip(str): IP to clear.
"""
exists = self.get(ip)
if not exists:
return False
cache_key = self.METHOD_NAME(ip)
self.storage.delete(cache_key)
self.unrecord_ip(ip)
# Return True if NOT exists
return not bool(self.get(ip))
def record_ip(self, ip, *args, **kwargs):
"""
Tracks keys as they are added to the cache (since there is no way to
get a list of keys after-the-fact).
Args:
ip(str): IP being added to cache. This should be the original
IP, not the cache-prefixed key.
"""
keys_key = self.METHOD_NAME("keys")
keys = self.storage.get(keys_key, set())
keys.add(ip)
self.storage.set(keys_key, keys, self.timeout)
return True
def unrecord_ip(self, ip, *args, **kwargs):
"""
Forces removal of a key from the key registry.
Args:
ip(str): IP to remove from list of keys.
"""
keys_key = self.METHOD_NAME("keys")
keys = self.storage.get(keys_key, set())
try:
keys.remove(ip)
self.storage.set(keys_key, keys, self.timeout)
return True
except KeyError:
return False
def check(self, ip):
"""
This will check the session's address against the
storage dictionary to check they haven't spammed too many
fails recently.
Args:
ip (str): IP address of requestor
Returns:
throttled (bool): True if throttling is active,
False otherwise.
"""
if self.limit is None:
# throttle is disabled
return False
now = time.time()
ip = str(ip)
cache_key = self.METHOD_NAME(ip)
# checking mode
latest_fails = self.storage.get(cache_key)
if latest_fails and len(latest_fails) >= self.limit:
# too many fails recently
if now - latest_fails[-1] < self.timeout:
# too soon - timeout in play
self.touch(cache_key)
return True
else:
# timeout has passed. clear faillist
self.remove(ip)
return False
else:
return False |
299,664 | get definition id | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from azure.mgmt.resource.policy.models import PolicyAssignment
from azure.mgmt.resource import SubscriptionClient
from azure.mgmt.monitor import MonitorManagementClient
from c7n.actions import BaseAction
from c7n.exceptions import PolicyValidationError
from c7n.filters.missing import Missing
from c7n.filters.core import ValueFilter
from c7n.manager import ResourceManager
from c7n.utils import local_session, type_schema
from c7n_azure.provider import resources
from c7n_azure.query import QueryMeta, TypeInfo
@resources.register('subscription')
class Subscription(ResourceManager, metaclass=QueryMeta):
"""Subscription Resource
:example:
This policy creates Azure Policy scoped to the current subscription if doesn't exist.
.. code-block:: yaml
policies:
- name: azure-policy-sample
resource: azure.subscription
filters:
- type: missing
policy:
resource: azure.policyassignments
filters:
- type: value
key: properties.displayName
op: eq
value_type: normalize
value: dn_sample_policy
actions:
- type: add-policy
name: sample_policy
display_name: dn_sample_policy
definition_name: "Audit use of classic storage accounts"
"""
class resource_type(TypeInfo):
doc_groups = ['Subscription']
id = 'subscriptionId'
name = 'displayName'
filter_name = None
service = 'subscription'
def get_model(self):
return self.resource_type
def resources(self):
return self.filter_resources([self._get_subscription(self.session_factory, self.config)])
def get_resources(self, resource_ids):
return [self._get_subscription(self.session_factory, self.config)]
def _get_subscription(self, session_factory, config):
session = local_session(session_factory)
client = SubscriptionClient(session.get_credentials())
details = client.subscriptions.get(subscription_id=session.get_subscription_id())
return details.serialize(True)
Subscription.filter_registry.register('missing', Missing)
@Subscription.filter_registry.register('diagnostic-settings')
class SubscriptionDiagnosticSettingFilter(ValueFilter):
"""Filter by diagnostic settings for this subscription
Each diagnostic setting for the subscription is made available to the filter. The data format
is the result of making the following Azure API call and extracting the "value" property:
https://learn.microsoft.com/en-us/rest/api/monitor/subscription-diagnostic-settings/list?tabs=HTTP
:example:
Example JSON document showing the data format provided to the filter
.. code-block:: json
{
"id": "...",
"name": "...",
"properties": {
"eventHubAuthorizationRuleId": "...",
"eventHubName": "...",
"logs": [
{ "category": "Administrative", "enabled": true },
{ "category": "Security", "enabled": false }
],
"marketplacePartnerId": "...",
"serviceBusRuleId": "...",
"storageAccountId": "...",
"workspaceId": "..."
},
"systemData": {}
"type": "..."
}
:example:
Check if the subscription has Security logs enabled in at least one setting
.. code-block:: yaml
policies:
- name: subscription-security-logs-enabled
resource: azure.subscription
filters:
- not:
- type: diagnostic-settings
key: "properties.logs[?category == 'Security'].enabled[]"
op: contains
value: true
"""
cache_key = 'c7n:diagnostic-settings'
schema = type_schema(
'diagnostic-settings',
rinherit=ValueFilter.schema
)
def _get_subscription_diagnostic_settings(self, session, subscription_id):
client = MonitorManagementClient(
session.get_credentials(),
subscription_id=subscription_id
)
query = client.subscription_diagnostic_settings.list(subscription_id)
settings = query.serialize(True).get('value', [])
# put an empty item in when no diag settings so the absent operator can function
if not settings:
settings = [{}]
return settings
def process(self, resources, event=None):
session = local_session(self.manager.session_factory)
matched = []
for resource in resources:
subscription_id = resource['subscriptionId']
if self.cache_key in resource:
settings = resource[self.cache_key]
else:
settings = self._get_subscription_diagnostic_settings(
session,
subscription_id
)
resource[self.cache_key] = settings
filtered_settings = super().process(settings, event=None)
if filtered_settings:
matched.append(resource)
return matched
@Subscription.action_registry.register('add-policy')
class AddPolicy(BaseAction):
schema = type_schema('add-policy',
required=['name', 'display_name', 'definition_name'],
scope={'type': 'string'},
definition_name={'type': 'string'},
name={'type': 'string'},
display_name={'type': 'string'})
policyDefinitionPrefix = '/providers/Microsoft.Authorization/policyDefinitions/'
def __init__(self, data=None, manager=None, log_dir=None):
super(AddPolicy, self).__init__(data, manager, log_dir)
self.paName = self.data.get('name')
self.displayName = self.data.get('display_name')
self.policyDefinitionName = self.data['definition_name']
def METHOD_NAME(self, name):
return next((r for r in self.policyClient.policy_definitions.list()
if name == r.display_name or name == r.id or name == r.name), None)
def _add_policy(self, subscription):
parameters = PolicyAssignment(
display_name=self.displayName,
policy_definition_id=self.policyDefinition.id)
self.policyClient.policy_assignments.create(
scope=self.scope,
policy_assignment_name=self.paName,
parameters=parameters
)
def process(self, subscriptions):
self.session = local_session(self.manager.session_factory)
self.policyClient = self.session.client("azure.mgmt.resource.policy.PolicyClient")
self.scope = '/subscriptions/' + self.session.subscription_id + \
'/' + self.data.get('scope', '')
self.policyDefinition = self.METHOD_NAME(self.policyDefinitionName)
if self.policyDefinition is None:
raise PolicyValidationError(
"Azure Policy Definition '%s' not found." % (
self.policyDefinitionName))
for s in subscriptions:
self._add_policy(s) |
299,665 | write stats to file | # -*- coding: utf-8 -*-
"""
Management command used to generate platform statistics containing
information about the number of organizations, users, projects
& submissions
"""
import calendar
import csv
import os.path
from datetime import datetime
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import gettext as _
from multidb.pinning import use_master
from onadata.apps.api.models import OrganizationProfile
from onadata.apps.logger.models import Instance, XForm
from onadata.libs.permissions import is_organization
User = get_user_model()
# pylint: disable=too-many-locals
def METHOD_NAME(month: int, year: int, include_extra: bool, filename: str):
with open(filename, "w", encoding="utf-8") as out_file:
writer = csv.writer(out_file)
headers = ["Username", "Project Name", "Form Title", "No. of submissions"]
form_fields = [
"id",
"project__name",
"project__organization__username",
"title",
]
if include_extra:
headers += ["Is Organization", "Organization Created By", "User last login"]
form_fields += ["project__organization__last_login"]
writer.writerow(headers)
_, last_day = calendar.monthrange(year, month)
date_obj = timezone.make_aware(datetime(year, month, last_day), timezone.utc)
forms = XForm.objects.filter(
Q(deleted_at__isnull=True) | Q(deleted_at__gt=date_obj),
date_created__lte=date_obj,
).values(*form_fields)
with use_master:
for form in forms:
instance_count = Instance.objects.filter(
Q(deleted_at__isnull=True) | Q(deleted_at__gt=date_obj),
xform_id=form.get("id"),
date_created__lte=date_obj,
).count()
row = [
form.get("project__organization__username"),
form.get("project__name"),
form.get("title"),
instance_count,
]
if include_extra:
user = User.objects.get(
username=form.get("project__organization__username")
)
is_org = is_organization(user.profile)
if is_org:
created_by = OrganizationProfile.objects.get(
user=user
).creator.username
else:
created_by = "N/A"
row += [
is_org,
created_by,
form.get("project__organization__last_login"),
]
writer.writerow(row)
class Command(BaseCommand):
"""
Management command used to generate platform statistics containing
information about the number of organizations, users, projects
& submissions
"""
help = _("Generates system statistics for the entire platform")
def add_arguments(self, parser):
parser.add_argument(
"--month",
"-m",
dest="month",
help=(
"Month to calculate system statistics for. Defaults to current month."
),
default=None,
)
parser.add_argument(
"--year",
"-y",
dest="year",
help=("Year to calculate system statistics for. Defaults to current year"),
default=None,
)
parser.add_argument(
"--extra-info",
"-e",
action="store_true",
dest="extra_info",
default=False,
help=(
"Include extra information; When an Organization was created and "
"user last login"
),
)
def handle(self, *args, **options):
month = int(options.get("month") or datetime.now().month)
year = int(options.get("year") or datetime.now().year)
include_extra = bool(options.get("extra_info"))
filename = f"platform_statistics_{month}_{year}.csv"
METHOD_NAME(month, year, include_extra, filename)
if os.path.exists(filename):
self.stdout.write(f"File '{filename}' successfully created.") |
299,666 | codesign verify | #!/usr/local/autopkg/python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""See docstring for SignToolVerifier class"""
import os
import os.path
import subprocess
from typing import Any, Dict, List, Optional
from autopkglib import Processor, ProcessorError
__all__ = ["SignToolVerifier"]
def signtool_default_path() -> Optional[str]:
"""Looks for signtool in a few well known paths. Deliberately naive."""
for program_files_candidate, arch in (
(os.environ.get("ProgramFiles(x86)"), "x64"),
(os.environ.get("ProgramFiles(x86)"), "x86"),
(os.environ.get("ProgramFiles"), "x64"),
(os.environ.get("ProgramFiles"), "x86"),
):
if program_files_candidate is None:
continue
candpath = os.path.abspath(
os.path.join(
program_files_candidate, r"Windows Kits\10\bin", arch, "signtool.exe"
)
)
if os.path.exists(candpath):
return candpath
# fix for github hosted action runners:
candpath = (
r"C:\Program Files (x86)\Windows Kits\10\App Certification Kit\signtool.exe"
)
if os.path.exists(candpath):
return candpath
return None
class SignToolVerifier(Processor):
"""Verifies an authenticode signed installer using the Microsoft SDK
signtool executable."""
EXTENSIONS: List[str] = [".exe", ".msi"]
# TODO: How much of this is needed to act as a drop-in replacement in an
# override recipe??
input_variables: Dict[str, Any] = {
"DISABLE_CODE_SIGNATURE_VERIFICATION": {
"required": False,
"description": ("Prevents this processor from running."),
},
"input_path": {
"required": True,
"description": (
"File path to an `.msi` or `.exe` file for Authenticode verification",
),
},
"signtool_path": {
"required": False,
"description": (
"The path to signtool.exe. This varies between versions of the "
"Windows SDK, so you can explicitly set that here in an override."
),
"default": signtool_default_path(),
},
"additional_arguments": {
"required": False,
"description": (
"Array of additional argument strings to pass to signtool. "
"Note that currently '/v' and '/pa' are always passed."
),
"default": None,
},
}
output_variables: Dict[str, Any] = {}
description: str = __doc__
def METHOD_NAME(
self,
signtool_path: str,
path: str,
additional_arguments: Optional[List[str]] = None,
) -> bool:
"""
Runs 'signtool.exe /pa <path>'. Returns True if signtool exited with 0
and False otherwise.
"""
if not additional_arguments:
additional_arguments = []
# Call signtool with "/v" to produce information about the signer when run,
# and "/pa" to use the "Default Authenticode" Verification Policy.
process = [signtool_path, "verify", "/v", "/pa"] + additional_arguments
# Makes the path absolute and normalizes it into standard Windows form.
# E.g., /Program Files (x86)/Windows Kits/10/bin/x64/signtool.exe will be
# converted to the appropriate C:\\... path after this.
process.append(os.path.abspath(path))
# Run signtool with stderr redirected to stdout to ensure that all output
# is always captured from the tool.
proc = subprocess.Popen(
process,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
)
(output, _) = proc.communicate()
for line in output.replace("\n\n", "\n").replace("\n\n\n", "\n\n").splitlines():
self.output(line)
if proc.returncode == 1:
raise ProcessorError(
"Authenticode verification failed. Note that all "
"verification can be disabled by setting the variable "
"DISABLE_CODE_SIGNATURE_VERIFICATION to a non-empty value."
)
elif proc.returncode == 2:
self.output("WARNING: Verification had warnings. Check output above.")
return proc.returncode == 0
def main(self):
if self.env.get("DISABLE_CODE_SIGNATURE_VERIFICATION"):
self.output("Authenticode verification disabled for this recipe run.")
return
input_path = self.env["input_path"]
signtool_path = self.env["signtool_path"]
additional_arguments = self.env["additional_arguments"]
self.METHOD_NAME(
signtool_path,
input_path,
additional_arguments=additional_arguments,
)
if __name__ == "__main__":
PROCESSOR = SignToolVerifier()
PROCESSOR.execute_shell() |
299,667 | is ack is disabled | # !/usr/bin/env python3
# coding: utf-8 -*-
#
# Author: pipiche38
#
from Classes.ZigateTransport.tools import CMD_ONLY_STATUS, CMD_NWK_2NDBytes
from Modules.zigateConsts import ADDRESS_MODE, ZIGATE_COMMANDS
# These are ZiGate commands which doesn't have Ack/Nack with firmware up to 3.1c
CMD_NOACK_ZDP = (
0x0030,
0x0031,
0x0040,
0x0041,
0x0042,
0x0043,
0x0044,
0x0045,
0x0046,
0x0047,
0x0049,
0x004A,
0x004B,
0x004E,
0x0530,
0x0531,
0x0532,
0x0533,
)
def is_final_step(self, isqn, step):
cmd = int(self.ListOfCommands[isqn]["cmd"], 16)
# Step is 0x8000
if step == 0x8000 and (self.ListOfCommands[isqn]["cmd"] in CMD_ONLY_STATUS or self.firmware_nosqn):
return True
if self.firmware_compatibility_mode:
if self.ListOfCommands[isqn]["ackIsDisabled"]:
return True
if step == 0x8000 and cmd in {0x0100, 0x0110}:
# with firmware 31a we just sync on Response of 0100 -> 8102 and 0110 -> 8110
return False
return True
if is_nowait_cmd(self, isqn, cmd):
return True
if step == 0x8012 and not self.firmware_compatibility_mode:
return is_final_step_8012(self, isqn, cmd)
if not self.firmware_with_8012 and METHOD_NAME(self, isqn, cmd):
# If we are in a firmware below 31d (included) there is no 0x8012.
# If we have a command sent with no-ack (like address mode 0x07),
# then we will assumed that once 0x8000 is received, we can move to next command.
return True
if not self.firmware_with_8012 and is_group_cmd(self, isqn, cmd):
# This is a Group command. There is no Ack expected.
return True
if not is_8012_expected_after_8000(self, isqn, cmd) and not is_8011_expected_after_8000(self, isqn, cmd):
return True
# self.logging_proto( 'Debug', "is_final_step - returning False by default Cmd: 0x%04x - %s %s %s %s" %
# (
# cmd,
# self.firmware_with_8012,
# is_8012_expected_after_8000( self, isqn, cmd ),
# is_8011_expected_after_8000( self, isqn, cmd ),
# is_8011_expected_after_8012( self, isqn, cmd )
# ))
return False
def is_final_step_8012(self, isqn, cmd):
if cmd in ZIGATE_COMMANDS:
if is_group_cmd(self, isqn, cmd):
return True
return is_8011_expected_after_8012(self, isqn, cmd)
# self.logging_proto( 'Debug', "is_final_step_8012 - returning False by default Cmd: 0x%04d" %cmd)
return False
def is_8011_expected_after_8000(self, isqn, cmd):
if cmd in ZIGATE_COMMANDS:
return ZIGATE_COMMANDS[cmd]["Ack"]
# self.logging_proto( 'Debug', "is_8011_expected_after_8000 - returning False by default Cmd: 0x%04d" %cmd)
return False
def is_8012_expected_after_8000(self, isqn, cmd):
if cmd in ZIGATE_COMMANDS:
return ZIGATE_COMMANDS[cmd]["8012"]
# self.logging_proto( 'Debug', "is_8012_expected_after_8000 - returning False by default Cmd: 0x%04d" %cmd)
return False
def is_8011_expected_after_8012(self, isqn, cmd):
expAck = ZIGATE_COMMANDS[cmd]["Ack"]
ackIsDisabled = self.ListOfCommands[isqn]["ackIsDisabled"]
return bool(ackIsDisabled or not expAck)
def METHOD_NAME(self, isqn, cmd):
# In firmware 31c and below, 0x0110 is always with Ack
if not self.firmware_with_aps_sqn and cmd in (0x0110,):
return False
# In firmware 31d and below 0x0530 is always without Ack
if not self.firmware_with_8012 and cmd in (0x0530,):
return True
return bool(self.ListOfCommands[isqn]["ackIsDisabled"])
def is_group_cmd(self, isqn, cmd):
return cmd in CMD_NWK_2NDBytes and self.ListOfCommands[isqn]["datas"][0:2] == "01"
def is_nowait_cmd(self, isqn, cmd):
if cmd not in CMD_NWK_2NDBytes:
if cmd == 0x004E and self.ListOfCommands[isqn]["datas"][0:4] == "0000":
return True
if cmd == 0x0049 and self.ListOfCommands[isqn]["datas"][0:4] == "FFFC":
return True
if cmd in CMD_NWK_2NDBytes:
if self.ListOfCommands[isqn]["datas"][0:2] == "%02x" % ADDRESS_MODE["group"] and self.ListOfCommands[isqn]["datas"][2:6] == "0000":
return True
if self.ListOfCommands[isqn]["datas"][2:6] == "0000":
return True
if not self.firmware_with_aps_sqn and cmd == 0x0110:
return True
if not self.firmware_with_aps_sqn and cmd in CMD_NOACK_ZDP:
return True |
299,668 | display | #!/usr/bin/env python3
#######################################################
#
# SIM CODE
#
#######################################################
# Imports
import math
import matplotlib.pyplot as plt
def clamp( a, b, x ):
return min( b, max( a, x ) )
def frange(start, end=None, inc=None):
if end is None:
start, end = 0., start + 0.
if inc is None:
inc = 1.
return [i*inc+start for i in range(1+int((end-start)//inc))]
class heatsim:
def __init__( self, shipname = "llama", weapname = "laser", simulation = [ 60., 120. ] ):
# Sim parameters
self.STEFAN_BOLZMANN = 5.67e-8
self.SPACE_TEMP = 250.
self.STEEL_COND = 54.
self.STEEL_CAP = 0.49
self.STEEL_DENS = 7.88e3
self.ACCURACY_LIMIT = 500
self.FIRERATE_LIMIT = 800
self.shipname = shipname
self.weapname = weapname
# Sim info
self.sim_dt = 1./50. # Delta tick
self.setSimulation( simulation )
# Load some data
self.ship_mass, self.ship_weaps = self.loadship( shipname )
self.weap_mass, self.weap_delay, self.weap_energy = self.loadweap( weapname )
def setSimulation( self, simulation ):
self.simulation = simulation
self.sim_total = simulation[-1]
def loadship( self, shipname ):
"Returns mass, number of weaps."
if shipname == "llama":
return 80., 2
elif shipname == "lancelot":
return 180., 4
elif shipname == "pacifier":
return 730., 5
elif shipname == "hawking":
return 3750., 7
elif shipname == "peacemaker":
return 6200., 8
else:
raise ValueError
def loadweap( self, weapname ):
"Returns mass, delay, energy."
if weapname == "laser":
return 2., 0.9, 4.25
elif weapname == "plasma":
return 4., 0.675, 3.75
elif weapname == "ion":
return 6., 1.440, 15.
elif weapname == "laser turret":
return 16., 0.540, 6.12
elif weapname == "ion turret":
return 42., 0.765, 25.
elif weapname == "railgun turret":
return 60., 1.102, 66.
else:
raise ValueError
def prepare( self ):
# Time stuff
self.time_data = []
# Calculate ship parameters
ship_kg = self.ship_mass * 1000.
self.ship_emis = 0.8
self.ship_cond = self.STEEL_COND
self.ship_C = self.STEEL_CAP * ship_kg
#self.ship_area = pow( ship_kg / self.STEEL_DENS, 2./3. )
self.ship_area = 4.*math.pi*pow( 3./4.*ship_kg/self.STEEL_DENS/math.pi, 2./3. )
self.ship_T = self.SPACE_TEMP
self.ship_data = []
# Calculate weapon parameters
weap_kg = self.weap_mass * 1000.
self.weap_C = self.STEEL_CAP * weap_kg
#self.weap_area = pow( weap_kg / self.STEEL_DENS, 2./3. )
self.weap_area = 2.*math.pi*pow( 3./4.*weap_kg/self.STEEL_DENS/math.pi, 2./3. )
self.weap_list = []
self.weap_T = []
self.weap_data = []
for i in range(self.ship_weaps):
self.weap_list.append( i*self.weap_delay / self.ship_weaps )
self.weap_T.append( self.SPACE_TEMP )
self.weap_data.append( [] )
def __accMod( self, T ):
return clamp( 0., 1., (T-500.)/600. )
def __frMod( self, T ):
return clamp( 0., 1., (1100.-T)/300. )
def simulate( self ):
"Begins the simulation."
# Prepare it
self.prepare()
# Run simulation
weap_on = True
sim_index = 0
dt = self.sim_dt
sim_elapsed = 0.
while sim_elapsed < self.sim_total:
Q_cond = 0.
# Check weapons
for i in range(len(self.weap_list)):
# Check if we should start/stop shooting
if self.simulation[ sim_index ] < sim_elapsed:
weap_on = not weap_on
sim_index += 1
# Check if shot
if weap_on:
self.weap_list[i] -= dt * self.__frMod( self.weap_T[i] )
if self.weap_list[i] < 0.:
self.weap_T[i] += 1e4 * self.weap_energy / self.weap_C
self.weap_list[i] += self.weap_delay
# Do heat movement (conduction)
Q = -self.ship_cond * (self.weap_T[i] - self.ship_T) * self.weap_area * dt
self.weap_T[i] += Q / self.weap_C
Q_cond += Q
self.weap_data[i].append( self.weap_T[i] )
# Do ship heat (radiation)
Q_rad = self.STEFAN_BOLZMANN * self.ship_area * self.ship_emis * (pow(self.SPACE_TEMP,4.) - pow(self.ship_T,4.)) * dt
Q = Q_rad - Q_cond
self.ship_T += Q / self.ship_C
self.time_data.append( sim_elapsed )
self.ship_data.append( self.ship_T )
# Elapsed time
sim_elapsed += dt;
def save( self, filename ):
"Saves the results to a file."
with open( self.filename, 'w' ) as f:
for time, ship, weap in zip(self.time_data, self.ship_data, self.weap_data):
f.write( f'{time} {ship_data}')
for wj in weap:
f.write( f' {wj}' )
f.write( '\n' )
def METHOD_NAME( self ):
print("Ship Temp:", hs.ship_T, "K")
for i, T in enumerate(hs.weap_T):
print(f"Outfit[{i}] Temp: {T} K")
def plot( self, filename=None ):
plt.figure()
# Plot 1 Data
plt.subplot(211)
plt.plot( self.time_data, self.ship_data, '-' )
# Plot 1 Info
plt.axis( [0, self.sim_total, 0, 1100] )
plt.title( 'NAEV Heat Simulation ('+self.shipname+' with '+self.weapname+')' )
plt.legend( ('Ship', 'Accuracy Limit', 'Fire Rate Limit'), loc='upper left')
plt.ylabel( 'Temperature [K]' )
plt.grid( True )
# Plot 1 Data
plt.subplot(212)
plt.plot( self.time_data, self.weap_data[0], '-' )
plt_data = [self.ACCURACY_LIMIT] * len(self.weap_data[0])
plt.plot( self.time_data, plt_data, '--' )
plt_data = [self.FIRERATE_LIMIT] * len(self.weap_data[0])
plt.plot( self.time_data, plt_data, '-.' )
# Plot 2 Info
plt.axis( [0, self.sim_total, 0, 1100] )
plt.legend( ('Weapon', 'Accuracy Limit', 'Fire Rate Limit'), loc='upper right')
plt.ylabel( 'Temperature [K]' )
plt.xlabel( 'Time [s]' )
plt.grid( True )
if filename is None:
plt.show()
else:
plt.savefig( filename )
if __name__ == "__main__":
print("NAEV HeatSim\n")
shp_lst = { 'llama' : 'laser',
'lancelot' : 'ion',
'pacifier' : 'laser turret',
'hawking' : 'ion turret',
'peacemaker' : 'railgun turret' }
for shp,wpn in shp_lst.items():
hs = heatsim( shp, wpn, (60., 120.) )
#hs = heatsim( shp, wpn, frange( 30., 600., 30. ) )
hs.simulate()
hs.plot( f'{shp}_{wpn}_60_60.png' )
hs.setSimulation( (30., 90.) )
hs.simulate()
hs.plot( f'{shp}_{wpn}_30_60.png' )
hs.setSimulation( (30., 90., 120., 180.) )
hs.simulate()
hs.plot( f'{shp}_{wpn}_30_60_30_60.png' )
print( ' ', shp, 'with', wpn, 'done!' ) |
299,669 | completions | from fastapi import HTTPException
from fastapi.encoders import jsonable_encoder
from mlflow.gateway.config import AnthropicConfig, RouteConfig
from mlflow.gateway.constants import (
MLFLOW_AI_GATEWAY_ANTHROPIC_DEFAULT_MAX_TOKENS,
MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS,
)
from mlflow.gateway.providers.base import BaseProvider
from mlflow.gateway.providers.utils import rename_payload_keys, send_request
from mlflow.gateway.schemas import chat, METHOD_NAME, embeddings
class AnthropicProvider(BaseProvider):
def __init__(self, config: RouteConfig) -> None:
super().__init__(config)
if config.model.config is None or not isinstance(config.model.config, AnthropicConfig):
raise TypeError(f"Invalid config type {config.model.config}")
self.anthropic_config: AnthropicConfig = config.model.config
self.headers = {"x-api-key": self.anthropic_config.anthropic_api_key}
self.base_url = "https://api.anthropic.com/v1/"
async def METHOD_NAME(self, payload: METHOD_NAME.RequestPayload) -> METHOD_NAME.ResponsePayload:
payload = jsonable_encoder(payload, exclude_none=True)
self.check_for_model_field(payload)
if "top_p" in payload:
raise HTTPException(
status_code=422,
detail="Cannot set both 'temperature' and 'top_p' parameters. "
"Please use only the temperature parameter for your query.",
)
max_tokens = payload.get("max_tokens", MLFLOW_AI_GATEWAY_ANTHROPIC_DEFAULT_MAX_TOKENS)
if max_tokens > MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS:
raise HTTPException(
status_code=422,
detail="Invalid value for max_tokens: cannot exceed "
f"{MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS}.",
)
payload["max_tokens"] = max_tokens
if payload.get("stream", None) == "true":
raise HTTPException(
status_code=422,
detail="Setting the 'stream' parameter to 'true' is not supported with the MLflow "
"Gateway.",
)
candidate_count = payload.get("candidate_count", 1)
if candidate_count != 1:
raise HTTPException(
status_code=422,
detail="'candidate_count' must be '1' for the Anthropic provider. "
f"Received value: '{candidate_count}'.",
)
payload = rename_payload_keys(
payload, {"max_tokens": "max_tokens_to_sample", "stop": "stop_sequences"}
)
payload["prompt"] = f"\n\nHuman: {payload['prompt']}\n\nAssistant:"
resp = await send_request(
headers=self.headers,
base_url=self.base_url,
path="complete",
payload={"model": self.config.model.name, **payload},
)
# Example response:
# Documentation: https://docs.anthropic.com/claude/reference/complete_post
# ```
# {
# "completion": " Hello! My name is Claude."
# "stop_reason": "stop_sequence",
# "model": "claude-instant-1.1",
# "truncated": False,
# "stop": None,
# "log_id": "dee173f87ddf1357da639dee3c38d833",
# "exception": None,
# }
# ```
stop_reason = "stop" if resp["stop_reason"] == "stop_sequence" else "length"
return METHOD_NAME.ResponsePayload(
**{
"candidates": [
{"text": resp["completion"], "metadata": {"finish_reason": stop_reason}}
],
"metadata": {
"model": resp["model"],
"route_type": self.config.route_type,
},
}
)
async def chat(self, payload: chat.RequestPayload) -> None:
# Anthropic does not have a chat endpoint
raise HTTPException(
status_code=404, detail="The chat route is not available for Anthropic models."
)
async def embeddings(self, payload: embeddings.RequestPayload) -> None:
# Anthropic does not have an embeddings endpoint
raise HTTPException(
status_code=404, detail="The embeddings route is not available for Anthropic models."
) |
299,670 | update main area | """"
AbiPy panels for electronic properties.
"""
from __future__ import annotations
import param
import panel as pn
import panel.widgets as pnw
from abipy.panels.core import AbipyParameterized, ActiveBar, Loading, ply, mpl, depends_on_btn_click
class CompareEbandsWithMP(AbipyParameterized):
with_gaps = param.Boolean(True)
ylims_ev = param.Range(default=(-10, +10), doc="Energy window around the Fermi energy.")
info_str = """
This app alllows users to upload two files with KS energies.
"""
def __init__(self, **params):
super().__init__(**params)
help_md = pn.pane.Markdown(f"""
## Description
{self.info_str}
""")
self.main_area = pn.Column(help_md,
self.get_alert_data_transfer(),
sizing_mode="stretch_width")
self.replot_btn = pnw.Button(name="Replot", button_type='primary')
self.file_input = pnw.FileInput(height=60, css_classes=["pnx-file-upload-area"])
self.file_input.param.watch(self.on_file_input, "value")
self.mp_progress = pn.indicators.Progress(name='Fetching data from the MP website', bar_color="warning",
active=False, width=200, height=10, align="center")
def on_file_input(self, event):
self.abinit_ebands = self.get_ebands_from_file_input(self.file_input)
# Match Abinit structure with MP
mp = self.abinit_ebands.structure.mp_match()
if not mp.structures:
raise RuntimeError("No structure found in the MP database")
# Get structures from MP as AbiPy ElectronBands.
from abipy.electrons.ebands import ElectronBands
self.mp_ebands_list = []
with ActiveBar(self.mp_progress):
for mp_id in mp.ids:
if mp_id == "this": continue
eb = ElectronBands.from_mpid(mp_id)
self.mp_ebands_list.append(eb)
self.update_main()
def update_main(self):
with Loading(self.main_area):
col = self.pws_col(["## Plot options", "with_gaps", "ylims_ev", "replot_btn"])
ca = col.append
ca("## Abinit Electronic band structure:")
ylims = self.ylims_ev
ca(ply(self.abinit_ebands.plotly(e0="fermie", ylims=ylims, with_gaps=self.with_gaps, show=False)))
for mp_ebands in self.mp_ebands_list:
ca("## MP Electronic band structure:")
ca(ply(mp_ebands.plotly(e0="fermie", ylims=ylims, with_gaps=self.with_gaps, show=False)))
#self.main_area.objects = [col]
self.main_area.objects = col.objects
@depends_on_btn_click('replot_btn')
def on_replot_btn(self):
self.update_main()
def get_panel(self):
col = pn.Column(
"## Upload a *nc* file with energies along a **k**-path (possibly a *GSR.nc* file):",
self.get_fileinput_section(self.file_input),
pn.Row("## Fetching data from the MP website: ", self.mp_progress,
sizing_mode="stretch_width"),
sizing_mode="stretch_width")
main = pn.Column(col, self.main_area, sizing_mode="stretch_width")
cls, kwds = self.get_abinit_template_cls_kwds()
return cls(main=main, title="Compare with MP Ebands", **kwds)
class SkwPanelWithFileInput(AbipyParameterized):
lpratio = param.Integer(default=5, bounds=(1, None),
doc="Ratio between number of k-points and number of star-functions")
info_str = """
This app allows users to upload two files with KS energies.
The first file contains the energies in the IBZ used for the SKW interpolation (NB: this file is required).
The second (optional) file contains the energies along a k-path.
The interpolated energies are then compared with the ab-initio ones on the k-path.
The user can change the SKW intepolation parameters to gauge the quality of the SKW fit.
"""
def __init__(self, **params):
super().__init__(**params)
help_md = pn.pane.Markdown(f"""
## Description
{self.info_str}
""")
self.main_area = pn.Column(help_md,
self.get_alert_data_transfer(),
sizing_mode="stretch_width")
self.ibz_file_input = pnw.FileInput(height=60, css_classes=["pnx-file-upload-area"])
self.ibz_file_input.param.watch(self.on_ibz_file_input, "value")
self.ebands_ibz = None
self.kpath_file_input = pnw.FileInput(height=60, css_classes=["pnx-file-upload-area"])
self.kpath_file_input.param.watch(self.on_kpath_file_input, "value")
self.ebands_kpath = None
def on_ibz_file_input(self, event):
self.ebands_ibz = self.get_ebands_from_file_input(self.ibz_file_input)
self.METHOD_NAME()
def on_kpath_file_input(self, event):
self.ebands_kpath = self.get_ebands_from_file_input(self.kpath_file_input)
self.METHOD_NAME()
def METHOD_NAME(self) -> None:
with Loading(self.main_area):
if self.ebands_kpath is None or self.ebands_ibz is None: return
# SKW interpolation
r = self.ebands_ibz.interpolate(lpratio=self.lpratio, filter_params=None)
# Build plotter.
plotter = self.ebands_kpath.get_plotter_with("Ab-initio", "SKW interp", r.ebands_kpath)
mpl_pane = mpl(plotter.combiplot(**self.mpl_kwargs))
col = pn.Column(mpl_pane, sizing_mode="stretch_width")
self.main_area.objects = [col]
def get_panel(self):
col = pn.Column(
"## Upload (or drag & drop) any *nc* file with energies in the IBZ (_possibly a *GSR.nc* file_):",
self.get_fileinput_section(self.ibz_file_input),
"## Upload (or drag & drop) any *nc* file with energies along a **k**-path (_possibly a *GSR.nc* file_):",
self.get_fileinput_section(self.kpath_file_input),
sizing_mode="stretch_width")
main = pn.Column(col, self.main_area, sizing_mode="stretch_width")
cls, kwds = self.get_abinit_template_cls_kwds()
return cls(main=main, title="SKW Analyzer", **kwds) |
299,671 | stub delete alias | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Stub functions that are used by the AWS Key Management Service (AWS KMS) unit tests.
"""
from botocore.stub import ANY
from test_tools.example_stubber import ExampleStubber
class KmsStubber(ExampleStubber):
"""
Implements stub functions used by AWS KMS unit tests.
"""
def __init__(self, client, use_stubs=True):
"""
Initializes the object with a specific client and configures it for
stubbing or AWS passthrough.
:param client: A Boto3 AWS KMS client.
:param use_stubs: When True, use stubs to intercept requests. Otherwise,
pass requests through to AWS.
"""
super().__init__(client, use_stubs)
def stub_create_key(self, description, key_id, error_code=None):
expected_params = {'Description': description}
response = {'KeyMetadata': {'KeyId': key_id, 'Description': description}}
self._stub_bifurcator(
'create_key', expected_params, response, error_code=error_code)
def stub_list_keys(self, limit, key_ids, marker=None, truncated=False, error_code=None):
expected_params = {'Limit': limit}
if marker is not None:
expected_params['Marker'] = marker
response = {'Keys': [{'KeyId': kid} for kid in key_ids], 'Truncated': truncated}
if truncated:
response['NextMarker'] = 'test-token'
self._stub_bifurcator(
'list_keys', expected_params, response, error_code=error_code)
def stub_describe_key(self, key_id, state, error_code=None):
expected_params = {'KeyId': key_id}
response = {'KeyMetadata': {'KeyId': key_id, 'KeyState': state}}
self._stub_bifurcator(
'describe_key', expected_params, response, error_code=error_code)
def stub_generate_data_key(self, key_id, key_spec, error_code=None):
expected_params = {'KeyId': key_id, 'KeySpec': key_spec}
response = {}
self._stub_bifurcator(
'generate_data_key', expected_params, response, error_code=error_code)
def stub_enable_key(self, key_id, error_code=None):
expected_params = {'KeyId': key_id}
response = {}
self._stub_bifurcator(
'enable_key', expected_params, response, error_code=error_code)
def stub_disable_key(self, key_id, error_code=None):
expected_params = {'KeyId': key_id}
response = {}
self._stub_bifurcator(
'disable_key', expected_params, response, error_code=error_code)
def stub_schedule_key_deletion(self, key_id, window, error_code=None):
expected_params = {'KeyId': key_id, 'PendingWindowInDays': window}
response = {}
self._stub_bifurcator(
'schedule_key_deletion', expected_params, response, error_code=error_code)
def stub_create_alias(self, alias, key_id, error_code=None):
expected_params = {'AliasName': alias, 'TargetKeyId': key_id}
response = {}
self._stub_bifurcator(
'create_alias', expected_params, response, error_code=error_code)
def stub_list_aliases(self, limit, aliases, marker=None, truncated=False, error_code=None):
expected_params = {'Limit': limit}
if marker is not None:
expected_params['Marker'] = marker
response = {'Aliases': [{'AliasName': alias} for alias in aliases], 'Truncated': truncated}
if truncated:
response['NextMarker'] = 'test-token'
self._stub_bifurcator(
'list_aliases', expected_params, response, error_code=error_code)
def stub_update_alias(self, alias, key_id, error_code=None):
expected_params = {'AliasName': alias, 'TargetKeyId': key_id}
response = {}
self._stub_bifurcator(
'update_alias', expected_params, response, error_code=error_code)
def METHOD_NAME(self, alias, error_code=None):
expected_params = {'AliasName': alias}
response = {}
self._stub_bifurcator(
'delete_alias', expected_params, response, error_code=error_code)
def stub_create_grant(self, key_id, user, operations, grant, error_code=None):
expected_params = {
'KeyId': key_id, 'GranteePrincipal': user, 'Operations': operations}
response = grant
self._stub_bifurcator(
'create_grant', expected_params, response, error_code=error_code)
def stub_list_grants(self, key_id, grants, error_code=None):
expected_params = {'KeyId': key_id}
response = {'Grants': [{'GrantId': grant for grant in grants}]}
self._stub_bifurcator(
'list_grants', expected_params, response, error_code=error_code)
def stub_retire_grant(self, grant_token, error_code=None):
expected_params = {'GrantToken': grant_token}
response = {}
self._stub_bifurcator(
'retire_grant', expected_params, response, error_code=error_code)
def stub_revoke_grant(self, key_id, grant_id, error_code=None):
expected_params = {'KeyId': key_id, 'GrantId': grant_id}
response = {}
self._stub_bifurcator(
'revoke_grant', expected_params, response, error_code=error_code)
def stub_list_key_policies(self, key_id, policy_names, error_code=None):
expected_params = {'KeyId': key_id}
response = {'PolicyNames': policy_names}
self._stub_bifurcator(
'list_key_policies', expected_params, response, error_code=error_code)
def stub_get_key_policy(self, key_id, policy, error_code=None):
expected_params = {'KeyId': key_id, 'PolicyName': 'default'}
response = {'Policy': policy}
self._stub_bifurcator(
'get_key_policy', expected_params, response, error_code=error_code)
def stub_put_key_policy(self, key_id, error_code=None):
expected_params = {'KeyId': key_id, 'Policy': ANY, 'PolicyName': 'default'}
response = {}
self._stub_bifurcator(
'put_key_policy', expected_params, response, error_code=error_code)
def stub_encrypt(self, key_id, plaintext, ciphertext, error_code=None):
expected_params = {'KeyId': key_id, 'Plaintext': plaintext}
response = {'CiphertextBlob': ciphertext}
self._stub_bifurcator(
'encrypt', expected_params, response, error_code=error_code)
def stub_decrypt(self, key_id, ciphertext, plaintext, error_code=None):
expected_params = {'KeyId': key_id, 'CiphertextBlob': ciphertext}
response = {'Plaintext': plaintext}
self._stub_bifurcator(
'decrypt', expected_params, response, error_code=error_code)
def stub_re_encrypt(self, source_key_id, dest_key_id, ciphertext, error_code=None):
expected_params = {
'SourceKeyId': source_key_id, 'DestinationKeyId': dest_key_id,
'CiphertextBlob': ciphertext}
response = {'CiphertextBlob': ciphertext}
self._stub_bifurcator(
're_encrypt', expected_params, response, error_code=error_code) |
299,672 | test initialization | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
from azure.core.credentials import AccessToken
from azure.identity import CredentialUnavailableError
from azure.identity._credentials.application import AzureApplicationCredential
from azure.identity._constants import EnvironmentVariables
import pytest
from urllib.parse import urlparse
try:
from unittest.mock import Mock, patch
except ImportError: # python < 3.3
from mock import Mock, patch # type: ignore
from helpers import build_aad_response, get_discovery_response, mock_response
def test_get_token():
expected_token = "***"
def send(request, **kwargs):
# ensure the `claims` and `tenant_id` keywords from credential's `get_token` method don't make it to transport
assert "claims" not in kwargs
assert "tenant_id" not in kwargs
parsed = urlparse(request.url)
tenant_id = parsed.path.split("/")[1]
if "/oauth2/v2.0/token" in request.url:
return mock_response(json_payload=build_aad_response(access_token=expected_token))
return get_discovery_response("https://{}/{}".format(parsed.netloc, tenant_id))
with patch.dict("os.environ", {var: "..." for var in EnvironmentVariables.CLIENT_SECRET_VARS}, clear=True):
credential = AzureApplicationCredential(transport=Mock(send=send))
token = credential.get_token("scope")
assert token.token == expected_token
def test_iterates_only_once():
"""When a credential succeeds, AzureApplicationCredential should use that credential thereafter"""
expected_token = AccessToken("***", 42)
unavailable_credential = Mock(get_token=Mock(side_effect=CredentialUnavailableError(message="...")))
successful_credential = Mock(get_token=Mock(return_value=expected_token))
credential = AzureApplicationCredential()
credential.credentials = [
unavailable_credential,
successful_credential,
Mock(get_token=Mock(side_effect=Exception("iteration didn't stop after a credential provided a token"))),
]
for n in range(3):
token = credential.get_token("scope")
assert token.token == expected_token.token
assert unavailable_credential.get_token.call_count == 1
assert successful_credential.get_token.call_count == n + 1
@pytest.mark.parametrize("authority", ("localhost", "https://localhost"))
def test_authority(authority):
"""the credential should accept authority configuration by keyword argument or environment"""
parsed_authority = urlparse(authority)
expected_netloc = parsed_authority.netloc or authority # "localhost" parses to netloc "", path "localhost"
def METHOD_NAME(mock_credential, expect_argument):
AzureApplicationCredential(authority=authority)
assert mock_credential.call_count == 1
# N.B. if os.environ has been patched somewhere in the stack, that patch is in place here
environment = dict(os.environ, **{EnvironmentVariables.AZURE_AUTHORITY_HOST: authority})
with patch.dict(AzureApplicationCredential.__module__ + ".os.environ", environment, clear=True):
AzureApplicationCredential()
assert mock_credential.call_count == 2
for _, kwargs in mock_credential.call_args_list:
if expect_argument:
actual = urlparse(kwargs["authority"])
assert actual.scheme == "https"
assert actual.netloc == expected_netloc
else:
assert "authority" not in kwargs
# authority should be passed to EnvironmentCredential as a keyword argument
environment = {var: "foo" for var in EnvironmentVariables.CLIENT_SECRET_VARS}
with patch(AzureApplicationCredential.__module__ + ".EnvironmentCredential") as mock_credential:
with patch.dict("os.environ", environment, clear=True):
METHOD_NAME(mock_credential, expect_argument=True)
# authority should not be passed to ManagedIdentityCredential
with patch(AzureApplicationCredential.__module__ + ".ManagedIdentityCredential") as mock_credential:
with patch.dict("os.environ", {EnvironmentVariables.MSI_ENDPOINT: "localhost"}, clear=True):
METHOD_NAME(mock_credential, expect_argument=False)
def test_managed_identity_client_id():
"""the credential should accept a user-assigned managed identity's client ID by kwarg or environment variable"""
expected_args = {"client_id": "the-client"}
with patch(AzureApplicationCredential.__module__ + ".ManagedIdentityCredential") as mock_credential:
AzureApplicationCredential(managed_identity_client_id=expected_args["client_id"])
mock_credential.assert_called_once_with(**expected_args)
# client id can also be specified in $AZURE_CLIENT_ID
with patch.dict(os.environ, {EnvironmentVariables.AZURE_CLIENT_ID: expected_args["client_id"]}, clear=True):
with patch(AzureApplicationCredential.__module__ + ".ManagedIdentityCredential") as mock_credential:
AzureApplicationCredential()
mock_credential.assert_called_once_with(**expected_args)
# keyword argument should override environment variable
with patch.dict(
os.environ, {EnvironmentVariables.AZURE_CLIENT_ID: "not-" + expected_args["client_id"]}, clear=True
):
with patch(AzureApplicationCredential.__module__ + ".ManagedIdentityCredential") as mock_credential:
AzureApplicationCredential(managed_identity_client_id=expected_args["client_id"])
mock_credential.assert_called_once_with(**expected_args) |
299,673 | create token response | import logging
from ..rfc6749.errors import (
InvalidRequestError,
UnauthorizedClientError,
AccessDeniedError,
)
from ..rfc6749 import BaseGrant, TokenEndpointMixin
from .errors import (
AuthorizationPendingError,
ExpiredTokenError,
SlowDownError,
)
log = logging.getLogger(__name__)
DEVICE_CODE_GRANT_TYPE = 'urn:ietf:params:oauth:grant-type:device_code'
class DeviceCodeGrant(BaseGrant, TokenEndpointMixin):
"""This OAuth 2.0 [RFC6749] protocol extension enables OAuth clients to
request user authorization from applications on devices that have
limited input capabilities or lack a suitable browser. Such devices
include smart TVs, media consoles, picture frames, and printers,
which lack an easy input method or a suitable browser required for
traditional OAuth interactions. Here is the authorization flow::
+----------+ +----------------+
| |>---(A)-- Client Identifier --->| |
| | | |
| |<---(B)-- Device Code, ---<| |
| | User Code, | |
| Device | & Verification URI | |
| Client | | |
| | [polling] | |
| |>---(E)-- Device Code --->| |
| | & Client Identifier | |
| | | Authorization |
| |<---(F)-- Access Token ---<| Server |
+----------+ (& Optional Refresh Token) | |
v | |
: | |
(C) User Code & Verification URI | |
: | |
v | |
+----------+ | |
| End User | | |
| at |<---(D)-- End user reviews --->| |
| Browser | authorization request | |
+----------+ +----------------+
This DeviceCodeGrant is the implementation of step (E) and (F).
(E) While the end user reviews the client's request (step D), the
client repeatedly polls the authorization server to find out if
the user completed the user authorization step. The client
includes the device code and its client identifier.
(F) The authorization server validates the device code provided by
the client and responds with the access token if the client is
granted access, an error if they are denied access, or an
indication that the client should continue to poll.
"""
GRANT_TYPE = DEVICE_CODE_GRANT_TYPE
TOKEN_ENDPOINT_AUTH_METHODS = ['client_secret_basic', 'client_secret_post', 'none']
def validate_token_request(self):
"""After displaying instructions to the user, the client creates an
access token request and sends it to the token endpoint with the
following parameters:
grant_type
REQUIRED. Value MUST be set to
"urn:ietf:params:oauth:grant-type:device_code".
device_code
REQUIRED. The device verification code, "device_code" from the
device authorization response.
client_id
REQUIRED if the client is not authenticating with the
authorization server as described in Section 3.2.1. of [RFC6749].
The client identifier as described in Section 2.2 of [RFC6749].
For example, the client makes the following HTTPS request::
POST /token HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Adevice_code
&device_code=GmRhmhcxhwAzkoEqiMEg_DnyEysNkuNhszIySk9eS
&client_id=1406020730
"""
device_code = self.request.data.get('device_code')
if not device_code:
raise InvalidRequestError('Missing "device_code" in payload')
client = self.authenticate_token_endpoint_client()
if not client.check_grant_type(self.GRANT_TYPE):
raise UnauthorizedClientError()
credential = self.query_device_credential(device_code)
if not credential:
raise InvalidRequestError('Invalid "device_code" in payload')
if credential.get_client_id() != client.get_client_id():
raise UnauthorizedClientError()
user = self.validate_device_credential(credential)
self.request.user = user
self.request.client = client
self.request.credential = credential
def METHOD_NAME(self):
"""If the access token request is valid and authorized, the
authorization server issues an access token and optional refresh
token.
"""
client = self.request.client
scope = self.request.credential.get_scope()
token = self.generate_token(
user=self.request.user,
scope=scope,
include_refresh_token=client.check_grant_type('refresh_token'),
)
log.debug('Issue token %r to %r', token, client)
self.save_token(token)
self.execute_hook('process_token', token=token)
return 200, token, self.TOKEN_RESPONSE_HEADER
def validate_device_credential(self, credential):
if credential.is_expired():
raise ExpiredTokenError()
user_code = credential.get_user_code()
user_grant = self.query_user_grant(user_code)
if user_grant is not None:
user, approved = user_grant
if not approved:
raise AccessDeniedError()
return user
if self.should_slow_down(credential):
raise SlowDownError()
raise AuthorizationPendingError()
def query_device_credential(self, device_code):
"""Get device credential from previously savings via ``DeviceAuthorizationEndpoint``.
Developers MUST implement it in subclass::
def query_device_credential(self, device_code):
return DeviceCredential.get(device_code)
:param device_code: a string represent the code.
:return: DeviceCredential instance
"""
raise NotImplementedError()
def query_user_grant(self, user_code):
"""Get user and grant via the given user code. Developers MUST
implement it in subclass::
def query_user_grant(self, user_code):
# e.g. we saved user grant info in redis
data = redis.get('oauth_user_grant:' + user_code)
if not data:
return None
user_id, allowed = data.split()
user = User.get(user_id)
return user, bool(allowed)
Note, user grant information is saved by verification endpoint.
"""
raise NotImplementedError()
def should_slow_down(self, credential):
"""The authorization request is still pending and polling should
continue, but the interval MUST be increased by 5 seconds for this
and all subsequent requests.
"""
raise NotImplementedError() |
299,674 | flush calc changes for column | """
This module defines ActionGroup, ActionEnvelope, and ActionBundle -- classes that together
represent the result of applying a UserAction to a document.
In general, UserActions refer to logical actions performed by the user. DocActions are the
individual steps to which UserActions translate.
A list of UserActions applied together translates to multiple DocActions, packaged into an
ActionGroup. In a separate step, this ActionGroup is split up according to ACL rules into and
ActionBundle consisting of ActionEnvelopes, each containing a smaller set of actions associated
with the set of recipients who should receive them.
"""
import actions
from action_summary import ActionSummary
class ActionGroup(object):
"""
ActionGroup packages different types of doc actions for returning them to the instance.
The ActionGroup stores actions produced by the engine in the course of processing one or more
UserActions, plus an array of return values, one for each UserAction.
"""
def __init__(self):
self.calc = []
self.stored = []
self.direct = []
self.undo = []
self.retValues = []
self.summary = ActionSummary()
self.requests = {}
def flush_calc_changes(self):
"""
Merge the changes from self.summary into self.stored and self.undo, and clear the summary.
"""
length_before = len(self.stored)
self.summary.convert_deltas_to_actions(self.stored, self.undo)
count = len(self.stored) - length_before
self.direct += [False] * count
self.summary = ActionSummary()
def METHOD_NAME(self, table_id, col_id):
"""
Merge the changes for the given column from self.summary into self.stored and self.undo, and
remove that column from the summary.
"""
length_before = len(self.stored)
self.summary.pop_column_delta_as_actions(table_id, col_id, self.stored, self.undo)
count = len(self.stored) - length_before
self.direct += [False] * count
def check_sanity(self):
if len(self.stored) != len(self.direct):
raise AssertionError("failed to track origin of actions")
def get_repr(self):
return {
"calc": [actions.get_action_repr(a) for a in self.calc],
"stored": [actions.get_action_repr(a) for a in self.stored],
"undo": [actions.get_action_repr(a) for a in self.undo],
"direct": self.direct,
"retValues": self.retValues
}
@classmethod
def from_json_obj(cls, data):
ag = ActionGroup()
ag.calc = [actions.action_from_repr(a) for a in data.get('calc', [])]
ag.stored = [actions.action_from_repr(a) for a in data.get('stored', [])]
ag.undo = [actions.action_from_repr(a) for a in data.get('undo', [])]
ag.retValues = data.get('retValues', [])
return ag
class Envelope(object):
"""
Envelope contains information about recipients as a set (or frozenset) of instanceIds.
"""
def __init__(self, recipient_set):
self.recipients = recipient_set
def to_json_obj(self):
return {"recipients": sorted(self.recipients)}
class ActionBundle(object):
"""
ActionBundle contains actions arranged into envelopes, i.e. split up by sets of recipients.
Note that different Envelopes contain different sets of recipients (which may overlap however).
"""
def __init__(self):
self.envelopes = []
self.stored = [] # Pairs of (envIndex, docAction)
self.direct = [] # Pairs of (envIndex, boolean)
self.calc = [] # Pairs of (envIndex, docAction)
self.undo = [] # Pairs of (envIndex, docAction)
self.retValues = []
self.rules = set() # RowIds of ACLRule records used to construct this ActionBundle.
def to_json_obj(self):
return {
"envelopes": [e.to_json_obj() for e in self.envelopes],
"stored": [(env, actions.get_action_repr(a)) for (env, a) in self.stored],
"direct": self.direct,
"calc": [(env, actions.get_action_repr(a)) for (env, a) in self.calc],
"undo": [(env, actions.get_action_repr(a)) for (env, a) in self.undo],
"retValues": self.retValues,
"rules": sorted(self.rules)
} |
299,675 | test basic | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the swig wrapper tf_optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.grappler import item as gitem
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class PyWrapOptimizeGraphTest(test.TestCase):
@test_util.run_deprecated_v1
def METHOD_NAME(self):
"""Make sure arguments can be passed correctly."""
a = constant_op.constant(10, name='a')
b = constant_op.constant(20, name='b')
c = math_ops.add_n([a, b], name='c')
d = math_ops.add_n([b, c], name='d')
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
# Being a train_op will make 'd' to be added as a fetch node.
train_op.append(d)
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
config = config_pb2.ConfigProto()
rewriter_config = config.graph_options.rewrite_options
rewriter_config.optimizers.append('constfold')
rewriter_config.min_graph_nodes = -1
graph = tf_optimizer.OptimizeGraph(config, mg)
self.assertEqual(len(graph.node), 1)
self.assertItemsEqual([node.name for node in graph.node], ['d'])
@test_util.run_v1_only('b/120545219')
def testKeepNodes(self):
g = ops.Graph()
with g.as_default():
a1 = variables.VariableV1(
1.0) # Must be preserved since it's in the collection 'variables'.
a2 = constant_op.constant(0, shape=[50, 50], name='keep')
ops.add_to_collection('a2', a2) # Explicitly add to collection.
with g._attr_scope(
{'_grappler_do_not_remove': attr_value_pb2.AttrValue(b=True)}):
a3 = constant_op.constant(0, name='keep2')
b = constant_op.constant(1, shape=[100, 10])
c = constant_op.constant(0, shape=[10, 30])
d = math_ops.matmul(b, c)
ops.add_to_collection('train_op', d) # d is the fetch node.
# Optimize the graph.
mg = meta_graph.create_meta_graph_def(graph=g)
config = config_pb2.ConfigProto()
rewriter_config = config.graph_options.rewrite_options
rewriter_config.min_graph_nodes = -1
optimized_graph = tf_optimizer.OptimizeGraph(config, mg)
# Check that the nodes referenced in various collections have been preserved
optimized_graph_nodes = [node.name for node in optimized_graph.node]
expected_nodes = [
d.op.name, a1.op.name, a2.op.name, a3.op.name, 'Variable/initial_value',
'Variable/Assign'
]
self.assertEqual(len(optimized_graph_nodes), len(expected_nodes))
self.assertAllInSet(optimized_graph_nodes, expected_nodes)
@test_util.run_v1_only('b/120545219')
def testLoops(self):
g = ops.Graph()
with g.as_default():
def _Cond(_, counter):
return counter < end
def _Body(buf, counter):
buf = array_ops.concat([buf, [counter]], 0)
counter += 1
return [buf, counter]
start = array_ops.placeholder(shape=[], dtype=dtypes.int32)
end = array_ops.placeholder(shape=[], dtype=dtypes.int32)
init_buf = array_ops.zeros(shape=[0], dtype=dtypes.int32)
loop_vars = [init_buf, start]
shape_inv = [
tensor_shape.TensorShape([None]),
tensor_shape.TensorShape([])
]
buf, _ = control_flow_ops.while_loop(_Cond, _Body, loop_vars, shape_inv)
f = -array_ops.ones_like(buf, optimize=False)
buf_shape = array_ops.shape(buf)
f_shape = array_ops.shape(f)
ops.add_to_collection('train_op', buf_shape)
ops.add_to_collection('train_op', f_shape)
# Optimize the graph.
mg = meta_graph.create_meta_graph_def(graph=g)
config = config_pb2.ConfigProto()
rewriter_config = config.graph_options.rewrite_options
rewriter_config.min_graph_nodes = -1
optimized_graph = tf_optimizer.OptimizeGraph(config, mg)
mg.graph_def.CopyFrom(optimized_graph)
# Check that the nodes referenced in various collections have been preserved
item = gitem.Item(mg)
props = item.GetOpProperties()
buf_prop = props[buf.op.name]
f_prop = props[f.op.name]
self.assertEqual(buf_prop, f_prop)
if __name__ == '__main__':
test.main() |
299,676 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'PrivateEndpointResponse',
'PrivateLinkServiceConnectionStateResponse',
'SystemDataResponse',
]
@pulumi.output_type
class PrivateEndpointResponse(dict):
"""
The private endpoint resource.
"""
def __init__(__self__, *,
METHOD_NAME: str):
"""
The private endpoint resource.
:param str id: The ARM identifier for private endpoint.
"""
pulumi.set(__self__, "id", METHOD_NAME)
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The ARM identifier for private endpoint.
"""
return pulumi.get(self, "id")
@pulumi.output_type
class PrivateLinkServiceConnectionStateResponse(dict):
"""
A collection of information about the state of the connection between service consumer and provider.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "actionsRequired":
suggest = "actions_required"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PrivateLinkServiceConnectionStateResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PrivateLinkServiceConnectionStateResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PrivateLinkServiceConnectionStateResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
actions_required: Optional[str] = None,
description: Optional[str] = None,
status: Optional[str] = None):
"""
A collection of information about the state of the connection between service consumer and provider.
:param str actions_required: A message indicating if changes on the service provider require any updates on the consumer.
:param str description: The reason for approval/rejection of the connection.
:param str status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
if actions_required is not None:
pulumi.set(__self__, "actions_required", actions_required)
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="actionsRequired")
def actions_required(self) -> Optional[str]:
"""
A message indicating if changes on the service provider require any updates on the consumer.
"""
return pulumi.get(self, "actions_required")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The reason for approval/rejection of the connection.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
return pulumi.get(self, "status")
@pulumi.output_type
class SystemDataResponse(dict):
"""
Metadata pertaining to creation and last modification of the resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdAt":
suggest = "created_at"
elif key == "createdBy":
suggest = "created_by"
elif key == "createdByType":
suggest = "created_by_type"
elif key == "lastModifiedAt":
suggest = "last_modified_at"
elif key == "lastModifiedBy":
suggest = "last_modified_by"
elif key == "lastModifiedByType":
suggest = "last_modified_by_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SystemDataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SystemDataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SystemDataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_at: Optional[str] = None,
created_by: Optional[str] = None,
created_by_type: Optional[str] = None,
last_modified_at: Optional[str] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[str] = None):
"""
Metadata pertaining to creation and last modification of the resource.
:param str created_at: The timestamp of resource creation (UTC).
:param str created_by: The identity that created the resource.
:param str created_by_type: The type of identity that created the resource.
:param str last_modified_at: The timestamp of resource last modification (UTC)
:param str last_modified_by: The identity that last modified the resource.
:param str last_modified_by_type: The type of identity that last modified the resource.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if created_by_type is not None:
pulumi.set(__self__, "created_by_type", created_by_type)
if last_modified_at is not None:
pulumi.set(__self__, "last_modified_at", last_modified_at)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_by_type is not None:
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The timestamp of resource creation (UTC).
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
"""
The identity that created the resource.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
"""
The type of identity that created the resource.
"""
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
The identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
"""
The type of identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by_type")
|
299,677 | json generator | #
# Copyright 2019 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
""" Module to generate parsed header output data """
import os
import sys
import json
import logging
import click
from click import ClickException
from gnuradio.modtool.core import yaml_generator
from .core.parseheader import BlockHeaderParser
LOGGER = logging.getLogger(__name__)
class BlockToolException(ClickException):
""" Exception class for enhanced CLI interface """
def show(self, file=None):
""" displays the colored message """
click.secho('BlockToolException: {}'.format(
self.format_message()), fg='red')
def run_blocktool(module):
"""Call the run function of the core modules."""
try:
module.run_blocktool()
except BlockToolException as err:
click.echo(err, file=sys.stderr)
exit(1)
@click.command('parseheader',
short_help='Generate the parsed output for the header file or directory in a specified format')
@click.argument('file-path', nargs=1)
@click.option('--yaml', is_flag=True,
help='If given, a YAML response will be printed, else default json will be printed')
@click.option('-c', '--blocktool-comments', is_flag=True,
help='blocktool helper comments will be added in the header file')
@click.option('-o', '--output', is_flag=True,
help='If given, a file with desired output format will be generated')
@click.option('-I', '--include_paths', default=None,
help='Comma separated list of include paths for header files')
def cli(**kwargs):
"""
Block header parsing tool.
\b
A tool that can be used to automatically parse the headers in GNU Radio project
or the OOT modules
"""
kwargs['modtool'] = False
if os.path.isfile(kwargs['file_path']):
parser = BlockHeaderParser(**kwargs)
run_blocktool(parser)
if kwargs['yaml']:
parser.yaml = True
yaml_generator(parser, **kwargs)
else:
parser.json_confirm = True
METHOD_NAME(parser, **kwargs)
elif os.path.isdir(kwargs['file_path']):
parse_directory(**kwargs)
else:
raise BlockToolException('Invalid file or directory path.')
def METHOD_NAME(parser, **kwargs):
"""
Generate JSON file for the block header
"""
header = parser.filename.split('.')[0]
block = parser.modname.split('-')[-1]
if kwargs['output']:
json_file = os.path.join('.', block + '_' + header + '.json')
with open(json_file, 'w') as _file:
json.dump(parser.parsed_data, _file, indent=4)
else:
print(json.dumps(parser.parsed_data, indent=4))
def parse_directory(**kwargs):
"""
Get parsed json and yaml output for complete header directory
"""
kwargs['output'] = True
dir_path = kwargs['file_path']
dir_path = os.path.abspath(dir_path)
list_header = []
dir_name = os.path.basename(dir_path)
for _header in os.listdir(dir_path):
if _header.endswith('.h') and os.path.isfile(os.path.join(dir_path, _header)):
list_header.append(os.path.join(dir_path, _header))
list_header = sorted(list_header)
if list_header:
for header_path in list_header:
kwargs['file_path'] = header_path
header = os.path.basename(header_path)
try:
parse_dir = BlockHeaderParser(**kwargs)
parse_dir.yaml = True
parse_dir.json = True
run_blocktool(parse_dir)
yaml_generator(parse_dir, **kwargs)
if not kwargs['modtool']:
METHOD_NAME(parse_dir, **kwargs)
except:
logging.basicConfig(level=logging.DEBUG,
filename=os.path.join('.', dir_name + '_log.out'))
logging.exception(
'Log for Exception raised for the header: {}\n'.format(header))
click.secho('Parsing unsuccessful: {}'.format(
header), fg='yellow')
else:
raise BlockToolException(
'Invalid directory! No header found to be parsed') |
299,678 | validate | import hashlib
import json
from typing import List
from tracardi.domain.event import Event
from tracardi.domain.profile import Profile
from tracardi.domain.session import Session
from tracardi.service.plugin.domain.register import Plugin, Spec, MetaData, Documentation, PortDoc, Form, FormGroup, \
FormField, FormComponent
from tracardi.service.plugin.domain.result import Result
from tracardi.service.plugin.runner import ActionRunner
from tracardi.service.plugin.domain.config import PluginConfig
class Configuration(PluginConfig):
traits: List[str]
func: str = 'md5'
def METHOD_NAME(config: dict) -> Configuration:
return Configuration(**config)
class HashTraitsAction(ActionRunner):
config: Configuration
async def set_up(self, init):
self.config = METHOD_NAME(init)
async def run(self, payload: dict, in_edge=None) -> Result:
dot = self._get_dot_accessor(payload)
for trait in self.config.traits:
if dot.source(trait) == 'flow':
self.console.warning("Flow values can not be hashed.")
continue
elif not dot.METHOD_NAME(trait) or trait not in dot:
self.console.warning(f"Given trait {trait} is invalid or does not exist.")
continue
value = dot[trait]
if not isinstance(value, str):
value = json.dumps(value)
if self.config.func == 'md5':
result = hashlib.md5(value.encode())
elif self.config.func == 'sha256':
result = hashlib.sha256(value.encode())
elif self.config.func == 'sha1':
result = hashlib.sha1(value.encode())
elif self.config.func == 'sha512':
result = hashlib.sha512(value.encode())
else:
result = hashlib.md5(value.encode())
dot[trait] = result.hexdigest()
if dot.profile:
profile = Profile(**dot.profile)
self.profile.replace(profile)
if dot.session:
session = Session(**dot.session)
self.session.replace(session)
event = Event(**dot.event)
self.event.replace(event)
return Result(port='payload', value=payload)
def register() -> Plugin:
return Plugin(
start=False,
spec=Spec(
module=__name__,
className='HashTraitsAction',
inputs=["payload"],
outputs=['payload'],
version='0.7.0',
license="MIT",
author="Risto Kowaczewski",
init={"traits": [], "func": "md5"},
form=Form(groups=[
FormGroup(
name="Traits to hash",
fields=[
FormField(
id="traits",
name="Reference value to be hashed",
description="If this value is not a string then it will be serialized to JSON and hashed.",
component=FormComponent(type="listOfDotPaths",
props={"label": "traits", "defaultSourceValue": "profile"})
),
FormField(
id="func",
name="Hashing function",
description="Select hashing method.",
component=FormComponent(type="select",
props={"label": "Hashing", "items": {
"md5": "MD5",
"sha1": "SHA1",
"sha256": "SHA256",
"sha512": "SHA512"
}})
)
]
),
]),
),
metadata=MetaData(
name='Hash data',
desc='Hash defined data e.g. profile traits.',
icon='hash',
group=["Operations"],
tags=['profile', 'trait', 'data'],
documentation=Documentation(
inputs={
"payload": PortDoc(desc="This port takes payload object.")
},
outputs={
"payload": PortDoc(desc="This port returns input payload.")
}
)
)
) |
299,679 | pack int | """Implements (a subset of) Sun XDR -- eXternal Data Representation.
See: RFC 1014
"""
import struct
from io import BytesIO
from functools import wraps
__all__ = ["Error", "Packer", "Unpacker", "ConversionError"]
# exceptions
class Error(Exception):
"""Exception class for this module. Use:
except xdrlib.Error as var:
# var has the Error instance for the exception
Public ivars:
msg -- contains the message
"""
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return repr(self.msg)
def __str__(self):
return str(self.msg)
class ConversionError(Error):
pass
def raise_conversion_error(function):
""" Wrap any raised struct.errors in a ConversionError. """
@wraps(function)
def result(self, value):
try:
return function(self, value)
except struct.error as e:
raise ConversionError(e.args[0]) from None
return result
class Packer:
"""Pack various data representations into a buffer."""
def __init__(self):
self.reset()
def reset(self):
self.__buf = BytesIO()
def get_buffer(self):
return self.__buf.getvalue()
# backwards compatibility
get_buf = get_buffer
@raise_conversion_error
def pack_uint(self, x):
self.__buf.write(struct.pack('>L', x))
@raise_conversion_error
def METHOD_NAME(self, x):
self.__buf.write(struct.pack('>l', x))
pack_enum = METHOD_NAME
def pack_bool(self, x):
if x: self.__buf.write(b'\0\0\0\1')
else: self.__buf.write(b'\0\0\0\0')
def pack_uhyper(self, x):
try:
self.pack_uint(x>>32 & 0xffffffff)
except (TypeError, struct.error) as e:
raise ConversionError(e.args[0]) from None
try:
self.pack_uint(x & 0xffffffff)
except (TypeError, struct.error) as e:
raise ConversionError(e.args[0]) from None
pack_hyper = pack_uhyper
@raise_conversion_error
def pack_float(self, x):
self.__buf.write(struct.pack('>f', x))
@raise_conversion_error
def pack_double(self, x):
self.__buf.write(struct.pack('>d', x))
def pack_fstring(self, n, s):
if n < 0:
raise ValueError('fstring size must be nonnegative')
data = s[:n]
n = ((n+3)//4)*4
data = data + (n - len(data)) * b'\0'
self.__buf.write(data)
pack_fopaque = pack_fstring
def pack_string(self, s):
n = len(s)
self.pack_uint(n)
self.pack_fstring(n, s)
pack_opaque = pack_string
pack_bytes = pack_string
def pack_list(self, list, pack_item):
for item in list:
self.pack_uint(1)
pack_item(item)
self.pack_uint(0)
def pack_farray(self, n, list, pack_item):
if len(list) != n:
raise ValueError('wrong array size')
for item in list:
pack_item(item)
def pack_array(self, list, pack_item):
n = len(list)
self.pack_uint(n)
self.pack_farray(n, list, pack_item)
class Unpacker:
"""Unpacks various data representations from the given buffer."""
def __init__(self, data):
self.reset(data)
def reset(self, data):
self.__buf = data
self.__pos = 0
def get_position(self):
return self.__pos
def set_position(self, position):
self.__pos = position
def get_buffer(self):
return self.__buf
def done(self):
if self.__pos < len(self.__buf):
raise Error('unextracted data remains')
def unpack_uint(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>L', data)[0]
def unpack_int(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>l', data)[0]
unpack_enum = unpack_int
def unpack_bool(self):
return bool(self.unpack_int())
def unpack_uhyper(self):
hi = self.unpack_uint()
lo = self.unpack_uint()
return int(hi)<<32 | lo
def unpack_hyper(self):
x = self.unpack_uhyper()
if x >= 0x8000000000000000:
x = x - 0x10000000000000000
return x
def unpack_float(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>f', data)[0]
def unpack_double(self):
i = self.__pos
self.__pos = j = i+8
data = self.__buf[i:j]
if len(data) < 8:
raise EOFError
return struct.unpack('>d', data)[0]
def unpack_fstring(self, n):
if n < 0:
raise ValueError('fstring size must be nonnegative')
i = self.__pos
j = i + (n+3)//4*4
if j > len(self.__buf):
raise EOFError
self.__pos = j
return self.__buf[i:i+n]
unpack_fopaque = unpack_fstring
def unpack_string(self):
n = self.unpack_uint()
return self.unpack_fstring(n)
unpack_opaque = unpack_string
unpack_bytes = unpack_string
def unpack_list(self, unpack_item):
list = []
while 1:
x = self.unpack_uint()
if x == 0: break
if x != 1:
raise ConversionError('0 or 1 expected, got %r' % (x,))
item = unpack_item()
list.append(item)
return list
def unpack_farray(self, n, unpack_item):
list = []
for i in range(n):
list.append(unpack_item())
return list
def unpack_array(self, unpack_item):
n = self.unpack_uint()
return self.unpack_farray(n, unpack_item) |
299,680 | verify codegen | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name, W0611, C0413
"""Expose Vitis-AI test functions to the Python frontend"""
import sys
import numpy as np
import pytest
pytest.importorskip("pyxir")
import pyxir.contrib.target.DPUCZDX8G
import tvm
from tvm import relay
from tvm import runtime
from tvm.relay import transform
from tvm.relay.op.contrib.vitis_ai import partition_for_vitis_ai
from tvm.relay.build_module import bind_params_by_name
from tvm.contrib.target import vitis_ai
from tvm.contrib import graph_executor
from tvm.contrib import utils
def get_cpu_op_count(mod):
"""Traverse graph counting ops offloaded to TVM."""
class Counter(tvm.relay.ExprVisitor):
def __init__(self):
super().__init__()
self.count = 0
def visit_call(self, call):
if isinstance(call.op, tvm.ir.Op):
self.count += 1
super().visit_call(call)
c = Counter()
c.visit(mod["main"])
return c.count
def build_module(
mod,
target,
dpu_target="DPUCADF8H",
params=None,
enable_vitis_ai=True,
tvm_ops=0,
vitis_ai_partitions=1,
):
"""Build module for Vitis-AI codegen."""
if isinstance(mod, tvm.relay.expr.Call):
mod = tvm.IRModule.from_expr(mod)
if params is None:
params = {}
with tvm.transform.PassContext(
opt_level=3, config={"relay.ext.vitis_ai.options.target": dpu_target}
):
if enable_vitis_ai:
mod = partition_for_vitis_ai(mod, params, dpu_target)
tvm_op_count = get_cpu_op_count(mod)
assert tvm_op_count == tvm_ops, "Got {} TVM operators, expected {}".format(
tvm_op_count, tvm_ops
)
partition_count = 0
for global_var in mod.get_global_vars():
if "vitis_ai" in global_var.name_hint:
partition_count += 1
assert (
vitis_ai_partitions == partition_count
), "Got {} Vitis-AI partitions, expected {}".format(
partition_count, vitis_ai_partitions
)
relay.backend.te_compiler.get().clear()
return relay.build(mod, target, params=params)
def update_lib(lib, cross_compile=None):
tmp_path = utils.tempdir()
lib_name = "lib.so"
lib_path = tmp_path.relpath(lib_name)
if cross_compile:
lib.export_library(lib_path, cc=cross_compile)
else:
lib.export_library(lib_path)
lib = runtime.load_module(lib_path)
return lib
def extract_vitis_ai_modules(module):
"""Get the Vits-AI runtime module from llvm module."""
return list(
filter(lambda mod: mod.type_key == "VitisAIRuntime", module.get_lib().imported_modules)
)
def METHOD_NAME(
module, num_vitis_ai_modules=1, params=None, target="llvm", tvm_ops=0, dpu_target="DPUCADX8G"
):
"""Check Vitis-AI codegen against a known good output."""
module = build_module(
module,
target,
params=params,
dpu_target=dpu_target,
tvm_ops=tvm_ops,
vitis_ai_partitions=num_vitis_ai_modules,
)
vitis_ai_modules = extract_vitis_ai_modules(module)
assert len(vitis_ai_modules) == num_vitis_ai_modules, (
f"The number of Vitis-AI modules produced ({len(vitis_ai_modules)}) does not "
f"match the expected value ({num_vitis_ai_modules})."
)
def verify_result(
mod,
map_inputs,
out_shape,
result,
tol=1e-5,
target="llvm",
device=tvm.cpu(),
params=None,
dpu_target="DPUCADX8G",
tvm_ops=0,
):
"""To check the result between reference and byoc vitis-ai flow"""
lib = build_module(mod, target, params=params, dpu_target=dpu_target, tvm_ops=tvm_ops)
lib = update_lib(lib)
rt_mod = graph_executor.GraphModule(lib["default"](tvm.cpu()))
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**params)
rt_mod.run()
out_shapes = out_shape if isinstance(out_shape, list) else [out_shape]
results = result if isinstance(result, list) else [result]
for idx, shape in enumerate(out_shapes):
out = tvm.nd.empty(shape, device=device)
out = rt_mod.get_output(idx, out)
tvm.testing.assert_allclose(out.numpy(), results[idx], rtol=tol, atol=tol) |
299,681 | test rename keyword name dotted | import pytest
from robocorp_ls_core.constants import NULL
from robocorp_ls_core.jsonrpc.exceptions import JsonRpcException
from robocorp_ls_core.lsp import LSPMessages, MessageType
def check_data_regression(result, data_regression, basename=None):
from robocorp_ls_core import uris
import os.path
data = {}
for uri, text_edits in result.items():
as_fs_path = uris.to_fs_path(uri)
name = os.path.basename(as_fs_path.replace("\\", "/"))
if name.endswith(".py"):
raise AssertionError("Did not expect .py to be renamed.")
data[name] = text_edits
data_regression.check(sorted(data.items()), basename=basename)
def test_prepare_rename_wrong(workspace, libspec_manager, data_regression):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.rename import prepare_rename
workspace.set_root("case4", libspec_manager=libspec_manager, index_workspace=True)
doc = workspace.get_doc("case4resource3.robot")
line = doc.find_line_with_contents("[Arguments] ${arg1} ${arg2}")
col = 0
completion_context = CompletionContext(
doc, workspace=workspace.ws, line=line, col=col
)
with pytest.raises(JsonRpcException) as e:
prepare_rename(completion_context)
assert (
"Unable to rename (could not find keyword nor variable in current position)."
in str(e)
)
def test_rename_from_keyword_definition(workspace, libspec_manager, data_regression):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.rename import rename, prepare_rename
workspace.set_root("case4", libspec_manager=libspec_manager, index_workspace=True)
doc = workspace.get_doc("case4resource3.robot")
line = doc.find_line_with_contents("Yet Another Equal Redefined")
col = 6
completion_context = CompletionContext(
doc, workspace=workspace.ws, line=line, col=col
)
result = prepare_rename(completion_context)
data_regression.check(
result,
basename="test_rename_from_keyword_definition.prepare",
)
result = rename(completion_context, "Rara")
assert result
check_data_regression(result["changes"], data_regression)
def METHOD_NAME(workspace, libspec_manager, data_regression):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.rename import rename, prepare_rename
workspace.set_root("case2", libspec_manager=libspec_manager, index_workspace=True)
doc = workspace.put_doc("case2.robot")
doc.source = """
*** Settings ***
Library Collections
*** Test Cases ***
Check
${list}= Evaluate []
Collections.appendtolist ${list} a b
Append to list ${list} a b
"""
line = doc.find_line_with_contents(" Append to list ${list} a b")
col = len(" Append to li")
completion_context = CompletionContext(
doc, workspace=workspace.ws, line=line, col=col
)
result = prepare_rename(completion_context)
data_regression.check(
result,
basename="test_rename_keyword_name_dotted.prepare",
)
result = rename(completion_context, "Rara")
assert result
check_data_regression(result["changes"], data_regression)
def test_rename_from_variable_definition(workspace, libspec_manager, data_regression):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.rename import rename
workspace.set_root("case2", libspec_manager=libspec_manager, index_workspace=True)
doc = workspace.put_doc(
"case2.robot",
"""
*** Keywords ***
Example1
${foo}= Set Variable ${None}
Log To Console ${foo}
Example2
${foo}= Set Variable ${None}
Log To Console ${foo}
""",
)
line = doc.find_line_with_contents(" ${foo}= Set Variable ${None}")
col = 7
completion_context = CompletionContext(
doc, workspace=workspace.ws, line=line, col=col
)
result = rename(completion_context, new_name="bar")
assert result
check_data_regression(result["changes"], data_regression)
class _DummyLspMessages(LSPMessages):
def __init__(self):
LSPMessages.__init__(self, NULL)
self.messages = []
def show_message(self, message, msg_type=MessageType.Info):
self.messages.append(message)
def test_rename_builtin_references(workspace, libspec_manager, data_regression):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.rename import rename, prepare_rename
workspace.set_root("case2", libspec_manager=libspec_manager, index_workspace=True)
doc = workspace.put_doc(
"case2.robot",
"""
*** keywords ***
Example2
${foo}= Set Variable ${None}
Log To Console ${foo}
""",
)
line = doc.find_line_with_contents(" Log To Console ${foo}")
col = 9
completion_context = CompletionContext(
doc,
workspace=workspace.ws,
line=line,
col=col,
lsp_messages=_DummyLspMessages(),
)
result = prepare_rename(completion_context)
assert result
assert completion_context.lsp_messages.messages == [
"Keyword defined in Library. Only references will be renamed "
"(the 'Log To Console' definition in 'BuiltIn' will need to be renamed manually)."
]
result = rename(completion_context, new_name="bar")
check_data_regression(result["changes"], data_regression)
def test_rename_keyword_name_with_variables(workspace, libspec_manager):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.rename import prepare_rename
workspace.set_root("case2", libspec_manager=libspec_manager, index_workspace=True)
doc = workspace.put_doc(
"case2.robot",
"""
*** Test Cases ***
Check
Key 22 with args
*** Keywords ***
Key ${a} with args
Log to console ${a}
""",
)
line = doc.find_line_with_contents(" Key 22 with args")
col = 6
completion_context = CompletionContext(
doc,
workspace=workspace.ws,
line=line,
col=col,
lsp_messages=_DummyLspMessages(),
)
with pytest.raises(JsonRpcException) as e:
prepare_rename(completion_context)
assert (
"Unable to rename 'Key ${a} with args' "
"(keywords with variables embedded in the name cannot be renamed)." in str(e)
)
def test_rename_var_with_constructed_vars(workspace, libspec_manager):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.rename import prepare_rename
workspace.set_root("case2", libspec_manager=libspec_manager)
doc = workspace.put_doc("case2.robot")
doc.source = """
*** Test Cases ***
Some Test Case
[Setup] Initialize Variables
Log ${SOME_VARIABLE_0}
Log ${SOME_VARIABLE_1}
Log ${SOME_VARIABLE_2}
*** Keywords ***
Initialize Variables
FOR ${index} IN RANGE 3
Set Test Variable ${SOME_VARIABLE_${index}} Value ${index}
END
"""
line, col = doc.get_last_line_col_with_contents(" Log ${SOME_VARIABLE_0}")
col -= len("E_0}")
completion_context = CompletionContext(
doc, workspace=workspace.ws, line=line, col=col
)
with pytest.raises(JsonRpcException) as e:
prepare_rename(completion_context) |
299,682 | majority vote | # -*- coding: utf-8 -*-
"""A collection of model combination functionalities.
"""
# Author: Yue Zhao <zhaoy@cmu.edu>
# License: BSD 2 clause
from __future__ import division
from __future__ import print_function
try:
import combo
except ImportError:
print('please install combo first for combination by `pip install combo`')
from combo.models.score_comb import aom as combo_aom
from combo.models.score_comb import moa as combo_moa
from combo.models.score_comb import average as combo_average
from combo.models.score_comb import maximization as combo_maximization
from combo.models.score_comb import METHOD_NAME as combo_majority_vote
from combo.models.score_comb import median as combo_median
def aom(scores, n_buckets=5, method='static', bootstrap_estimators=False,
random_state=None):
"""Average of Maximum - An ensemble method for combining multiple
estimators. See :cite:`aggarwal2015theoretical` for details.
First dividing estimators into subgroups, take the maximum score as the
subgroup score. Finally, take the average of all subgroup outlier scores.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
The score matrix outputted from various estimators
n_buckets : int, optional (default=5)
The number of subgroups to build
method : str, optional (default='static')
{'static', 'dynamic'}, if 'dynamic', build subgroups
randomly with dynamic bucket size.
bootstrap_estimators : bool, optional (default=False)
Whether estimators are drawn with replacement.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator
is the RandomState instance used by `np.random`.
Returns
-------
combined_scores : Numpy array of shape (n_samples,)
The combined outlier scores.
"""
return combo_aom(scores, n_buckets, method, bootstrap_estimators,
random_state)
def moa(scores, n_buckets=5, method='static', bootstrap_estimators=False,
random_state=None):
"""Maximization of Average - An ensemble method for combining multiple
estimators. See :cite:`aggarwal2015theoretical` for details.
First dividing estimators into subgroups, take the average score as the
subgroup score. Finally, take the maximization of all subgroup outlier
scores.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
The score matrix outputted from various estimators
n_buckets : int, optional (default=5)
The number of subgroups to build
method : str, optional (default='static')
{'static', 'dynamic'}, if 'dynamic', build subgroups
randomly with dynamic bucket size.
bootstrap_estimators : bool, optional (default=False)
Whether estimators are drawn with replacement.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator
is the RandomState instance used by `np.random`.
Returns
-------
combined_scores : Numpy array of shape (n_samples,)
The combined outlier scores.
"""
return combo_moa(scores, n_buckets, method, bootstrap_estimators,
random_state)
def average(scores, estimator_weights=None):
"""Combination method to merge the outlier scores from multiple estimators
by taking the average.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
Score matrix from multiple estimators on the same samples.
estimator_weights : list of shape (1, n_estimators)
If specified, using weighted average
Returns
-------
combined_scores : numpy array of shape (n_samples, )
The combined outlier scores.
"""
return combo_average(scores, estimator_weights)
def maximization(scores):
"""Combination method to merge the outlier scores from multiple estimators
by taking the maximum.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
Score matrix from multiple estimators on the same samples.
Returns
-------
combined_scores : numpy array of shape (n_samples, )
The combined outlier scores.
"""
return combo_maximization(scores)
def METHOD_NAME(scores, weights=None):
"""Combination method to merge the scores from multiple estimators
by majority vote.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
Score matrix from multiple estimators on the same samples.
weights : numpy array of shape (1, n_estimators)
If specified, using weighted majority weight.
Returns
-------
combined_scores : numpy array of shape (n_samples, )
The combined scores.
"""
return combo_majority_vote(scores, n_classes=2, weights=weights)
def median(scores):
"""Combination method to merge the scores from multiple estimators
by taking the median.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
Score matrix from multiple estimators on the same samples.
Returns
-------
combined_scores : numpy array of shape (n_samples, )
The combined scores.
"""
return combo_median(scores) |
299,683 | feed data to buffered proto | """Abstract Protocol base classes."""
__all__ = (
'BaseProtocol', 'Protocol', 'DatagramProtocol',
'SubprocessProtocol', 'BufferedProtocol',
)
class BaseProtocol:
"""Common base class for protocol interfaces.
Usually user implements protocols that derived from BaseProtocol
like Protocol or ProcessProtocol.
The only case when BaseProtocol should be implemented directly is
write-only transport like write pipe
"""
__slots__ = ()
def connection_made(self, transport):
"""Called when a connection is made.
The argument is the transport representing the pipe connection.
To receive data, wait for data_received() calls.
When the connection is closed, connection_lost() is called.
"""
def connection_lost(self, exc):
"""Called when the connection is lost or closed.
The argument is an exception object or None (the latter
meaning a regular EOF is received or the connection was
aborted or closed).
"""
def pause_writing(self):
"""Called when the transport's buffer goes over the high-water mark.
Pause and resume calls are paired -- pause_writing() is called
once when the buffer goes strictly over the high-water mark
(even if subsequent writes increases the buffer size even
more), and eventually resume_writing() is called once when the
buffer size reaches the low-water mark.
Note that if the buffer size equals the high-water mark,
pause_writing() is not called -- it must go strictly over.
Conversely, resume_writing() is called when the buffer size is
equal or lower than the low-water mark. These end conditions
are important to ensure that things go as expected when either
mark is zero.
NOTE: This is the only Protocol callback that is not called
through EventLoop.call_soon() -- if it were, it would have no
effect when it's most needed (when the app keeps writing
without yielding until pause_writing() is called).
"""
def resume_writing(self):
"""Called when the transport's buffer drains below the low-water mark.
See pause_writing() for details.
"""
class Protocol(BaseProtocol):
"""Interface for stream protocol.
The user should implement this interface. They can inherit from
this class but don't need to. The implementations here do
nothing (they don't raise exceptions).
When the user wants to requests a transport, they pass a protocol
factory to a utility function (e.g., EventLoop.create_connection()).
When the connection is made successfully, connection_made() is
called with a suitable transport object. Then data_received()
will be called 0 or more times with data (bytes) received from the
transport; finally, connection_lost() will be called exactly once
with either an exception object or None as an argument.
State machine of calls:
start -> CM [-> DR*] [-> ER?] -> CL -> end
* CM: connection_made()
* DR: data_received()
* ER: eof_received()
* CL: connection_lost()
"""
__slots__ = ()
def data_received(self, data):
"""Called when some data is received.
The argument is a bytes object.
"""
def eof_received(self):
"""Called when the other end calls write_eof() or equivalent.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol.
"""
class BufferedProtocol(BaseProtocol):
"""Interface for stream protocol with manual buffer control.
Event methods, such as `create_server` and `create_connection`,
accept factories that return protocols that implement this interface.
The idea of BufferedProtocol is that it allows to manually allocate
and control the receive buffer. Event loops can then use the buffer
provided by the protocol to avoid unnecessary data copies. This
can result in noticeable performance improvement for protocols that
receive big amounts of data. Sophisticated protocols can allocate
the buffer only once at creation time.
State machine of calls:
start -> CM [-> GB [-> BU?]]* [-> ER?] -> CL -> end
* CM: connection_made()
* GB: get_buffer()
* BU: buffer_updated()
* ER: eof_received()
* CL: connection_lost()
"""
__slots__ = ()
def get_buffer(self, sizehint):
"""Called to allocate a new receive buffer.
*sizehint* is a recommended minimal size for the returned
buffer. When set to -1, the buffer size can be arbitrary.
Must return an object that implements the
:ref:`buffer protocol <bufferobjects>`.
It is an error to return a zero-sized buffer.
"""
def buffer_updated(self, nbytes):
"""Called when the buffer was updated with the received data.
*nbytes* is the total number of bytes that were written to
the buffer.
"""
def eof_received(self):
"""Called when the other end calls write_eof() or equivalent.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol.
"""
class DatagramProtocol(BaseProtocol):
"""Interface for datagram protocol."""
__slots__ = ()
def datagram_received(self, data, addr):
"""Called when some datagram is received."""
def error_received(self, exc):
"""Called when a send or receive operation raises an OSError.
(Other than BlockingIOError or InterruptedError.)
"""
class SubprocessProtocol(BaseProtocol):
"""Interface for protocol for subprocess calls."""
__slots__ = ()
def pipe_data_received(self, fd, data):
"""Called when the subprocess writes data into stdout/stderr pipe.
fd is int file descriptor.
data is bytes object.
"""
def pipe_connection_lost(self, fd, exc):
"""Called when a file descriptor associated with the child process is
closed.
fd is the int file descriptor that was closed.
"""
def process_exited(self):
"""Called when subprocess has exited."""
def METHOD_NAME(proto, data):
data_len = len(data)
while data_len:
buf = proto.get_buffer(data_len)
buf_len = len(buf)
if not buf_len:
raise RuntimeError('get_buffer() returned an empty buffer')
if buf_len >= data_len:
buf[:data_len] = data
proto.buffer_updated(data_len)
return
else:
buf[:buf_len] = data[:buf_len]
proto.buffer_updated(buf_len)
data = data[buf_len:]
data_len = len(data) |
299,684 | add to list | # Copyright 1996-2023 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the River class."""
from webots_objects.webots_object import WebotsObject
from webots_objects.road import Road
from settings import Settings
from osm_objects import OSMCoord
from utils.misc_utils import extract_float_from_string, clean_string, protect_def_name
class River(WebotsObject):
"""River class representing a river."""
list = []
def __init__(self):
"""Initialize the river."""
self.OSMID = 0
self.type = ""
self.ref = 0
self.width = 2
self.name = ""
@staticmethod
def METHOD_NAME(osmid, tags, ref):
"""Add a new river to the list of rivers."""
if 'waterway' in tags:
settingsSection = Settings.get_section('waterway', tags['waterway'])
if settingsSection is None:
return
river = River()
river.OSMID = osmid
river.type = tags['waterway']
river.ref = ref
if 'name' in tags:
river.name = clean_string(tags['name'])
if 'width' in tags:
river.width = extract_float_from_string(tags['width'])
elif Settings.has_option(settingsSection, 'width'):
river.width = Settings.getfloat(settingsSection, 'width')
if WebotsObject.enable3D:
river.ref = River.add_intermediate_point_where_needed(river.ref)
River.list.append(river)
@classmethod
def export(cls, file):
"""Export all the rivers from the rivers list."""
for river in River.list[:]:
if not river.ref:
River.list.remove(river)
continue
if WebotsObject.removalRadius > 0.0:
# Check that the river is inside the scope of a road,
# otherwise, remove it from the river list.
if not Road.are_coords_close_to_some_road_waypoint(river.ref):
River.list.remove(river)
continue
if not river.name == "":
file.write("DEF " + protect_def_name(river.name) + " " + "Transform {\n")
else:
file.write("Transform {\n")
file.write(" translation %.2lf %.2lf %.2lf\n" % (OSMCoord.coordDictionnary[river.ref[0]].x,
OSMCoord.coordDictionnary[river.ref[0]].y,
OSMCoord.coordDictionnary[river.ref[0]].z))
file.write(" children [\n")
file.write(" Shape {\n")
file.write(" appearance PBRAppearance {\n")
file.write(" baseColor 0.3 0.5 0.8\n")
file.write(" roughness 0.3\n")
file.write(" metalness 0\n")
file.write(" }\n")
file.write(" geometry Extrusion {\n")
file.write(" crossSection [\n")
file.write(" %.2f 0\n" % (-river.width / 2))
file.write(" %.2f 0.5\n" % (-river.width / 2))
file.write(" %.2f 0.5\n" % (river.width / 2))
file.write(" %.2f 0\n" % (river.width / 2))
file.write(" %.2f 0\n" % (-river.width / 2))
file.write(" ]\n")
file.write(" spine [\n")
for ref in river.ref:
if ref in OSMCoord.coordDictionnary:
file.write(" %.2f %.2f %.2f,\n" % (OSMCoord.coordDictionnary[ref].x -
OSMCoord.coordDictionnary[river.ref[0]].x,
OSMCoord.coordDictionnary[ref].y -
OSMCoord.coordDictionnary[river.ref[0]].y,
OSMCoord.coordDictionnary[ref].z -
OSMCoord.coordDictionnary[river.ref[0]].z))
else:
print("Warning: node " + str(ref) + " not referenced.")
file.write(" ]\n")
file.write(" splineSubdivision 0\n")
file.write(" }\n")
file.write(" castShadows FALSE\n")
file.write(" }\n")
file.write(" ]\n")
file.write("}\n") |
299,685 | main | #!/usr/bin/env python
"""
This example shows how to use the parametrization
feature of `@testcase` decorator.
"""
import sys
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan import test_plan
from testplan.report.testing.styles import Style
@testsuite
class SimpleTest:
# This will generate 4 new testcase methods, using a tuple for each one.
@testcase(
parameters=((5, 5, 10), (3, 2, 5), (0, 0, 0), ("foo", "bar", "foobar"))
)
def addition(self, env, result, a, b, expected):
result.equal(a + b, expected)
# Parametrization context for the generated testcases will be:
# result.equal(5 + 5, 10)
# result.equal(3 + 2, 5)
# result.equal(0 + 0, 0)()
# result.equal('foo' + 'bar', 'foobar')
# Combinatorial parametrization example
# Associativity check of addition operation, (a + b = b + a)
# This will generate 25 (5 x 5) methods.
@testcase(
parameters={
"a": [1, 10, -5, -3.2, 3e12],
"b": [0, 42, 4.2, -0.231, 5.5e5],
}
)
def addition_associativity(self, env, result, a, b):
# It's a good practice to generate a description
# with the parametrized arguments as well.
# So that you can have more context when you inspect the test report.
result.equal(
actual=a + b,
expected=b + a,
description="{a} + {b} == {b} + {a}".format(a=a, b=b),
)
# Generated testcases will have the following contexts:
# result.equal(1 + 0, 0 + 1, ...)
# result.equal(10 + 0, 0 + 10, ...)
# result.equal(-5 + 0, 0 + -5, ...)
# ...
# ...
# result.equal(3e12 + -.231, 3e12 + -.231, ...)
# result.equal(3e12 + 5.5e5, 3e12 + 5.5e5, ...)
# Shortcut notation that uses single values
# for single argument parametrization
# Assigns 1, 2, 3, 4 to `value` for each generated test case
# Verbose notation would be
# `parameters=((2,), (4,), (6,), (8,))` which is not that readable.
@testcase(
parameters=(
2, # first testcase
4, # second testcase
6, # third testcase
8, # fourth testcase
)
)
def is_even(self, env, result, value):
result.equal(value % 2, 0)
# The example below makes use of a custom name
# generation function for parametrization.
# This way we can come up with more readable testcase
# method names on the test reports.
# If we didn't use a custom name function, we'd end up with method name
# like `func_raises_error <func=.., error=...>`, but instead, the custom
# function will give us names like `func_raises_error__ValueError`.
def custom_error_name_func(func_name, kwargs):
"""Disregard `func` argument, use the error only."""
return "{func_name}__{error_type}".format(
func_name=func_name, error_type=kwargs["error"].__name__
)
@testsuite
class ErrorTest:
# The lambda functions in the parameters below try to
# execute invalid Python code that raises certain errors.
# The parametrized test method checks if the function
# raises the expected error when it is run.
# This will generate 5 methods, for each item in the tuple.
@testcase(
parameters=(
# tuple notation, using default error value (TypeError)
(lambda: "foo" + 5,),
(lambda: object().b, AttributeError),
(lambda: {"a": 5}["b"], KeyError),
(lambda: int("a"), ValueError),
(lambda: 10 / 0, ZeroDivisionError),
),
# comment out the line below line to see how
# Testplan falls back to simple method names with integer suffixes
name_func=custom_error_name_func,
)
def func_raises_error(self, env, result, func, error=TypeError):
with result.raises(error):
func()
# This function returns the value of the product directly
# which will be interpreted as a simple tag.
def simple_tag_func(kwargs):
return kwargs["product"].title()
# This function returns a dictionary that is interpreted as a named tag.
def named_tag_func(kwargs):
return {"product": kwargs["product"].title()}
@testsuite
class ProductTest:
"""Sample testsuite that demonstrates how `tag_func` works."""
@testcase(
tags={"category": "CategoryA"},
parameters=((2, 3, "productA"), (3, 4, "productB")),
tag_func=simple_tag_func,
)
def simple_tag_func_test(self, env, result, a, b, product):
result.true(True)
@testcase(
tags={"category": "CategoryB"},
parameters=((2, 3, "productA"), (3, 4, "productB")),
tag_func=named_tag_func,
)
def named_tag_func_test(self, env, result, a, b, product):
result.true(True)
# Discard the original docstring, convert kwargs to str
def kwargs_to_string(docstring, kwargs):
return "\n".join([docstring, str(kwargs)])
# Use the original docstring, formatting
# it using kwargs via string interpolation.
# e.g. `foo: {foo}, bar: {bar}`.format(foo=2, bar=5)` -> 'foo: 2, bar: 5'
def interpolate_docstring(docstring, kwargs):
return docstring.format(**kwargs)
@testsuite
class DocStringTest:
@testcase(
parameters=((2, 3, 5), (5, 10, 15)), docstring_func=kwargs_to_string
)
def addition_one(self, env, result, first, second, expected):
"""Test addition of two numbers."""
return result.equal(first + second, expected)
@testcase(
parameters=((2, 3, 5), (5, 10, 15)),
docstring_func=interpolate_docstring,
)
def addition_two(self, env, result, first, second, expected):
"""
Testing addition with: {first} + {second}
Expected value: {expected}
"""
return result.equal(first + second, expected)
@test_plan(
name="Parametrization Example",
# Using detailed assertions so we can
# see testcase context for generated testcases
stdout_style=Style("assertion-detail", "assertion-detail"),
)
def METHOD_NAME(plan):
plan.add(
MultiTest(
name="Primary",
suites=[SimpleTest(), ErrorTest(), ProductTest(), DocStringTest()],
)
)
if __name__ == "__main__":
sys.exit(not METHOD_NAME()) |
299,686 | test func annotation | import typing
from astroid import nodes
from pytest import skip
from python_ta.typecheck.base import (
TypeFail,
TypeFailAnnotationUnify,
TypeFailFunction,
TypeFailUnify,
)
from .. import custom_hypothesis_support as cs
def find_type_fail(ast_node):
if isinstance(ast_node.inf_type, TypeFail):
return ast_node
else:
for child in ast_node.get_children():
child_res = find_type_fail(child)
if child_res is not None:
return child_res
return None
def verify_typefail_unify(tf: TypeFailUnify, *exp_tnodes, exp_src_type, num_reasons):
assert isinstance(tf, TypeFailUnify)
for tn, exp_r in zip(tf.tnodes, exp_tnodes):
if tn.ast_node:
assert tn.ast_node.name == exp_r
else:
assert tn.type == exp_r
assert isinstance(tf.src_node, exp_src_type)
reasons = []
for tn in tf.tnodes:
reasons += tn.find_path_to_parent()
assert len(reasons) == num_reasons
def verify_typefail_function(tf: TypeFailFunction, act_func_call: str):
assert isinstance(tf, TypeFailFunction)
assert isinstance(tf.src_node, nodes.Call)
assert tf.src_node.as_string() == act_func_call
def test_var_assign():
src = """
A = 1
A = 'One'
"""
ast_mod, ti = cs._parse_text(src, reset=True)
tf = find_type_fail(ast_mod).inf_type
verify_typefail_unify(tf, "A", str, exp_src_type=nodes.Assign, num_reasons=1)
def test_two_var_assign():
src = """
A = 1
B = 'Two'
A = B
"""
ast_mod, ti = cs._parse_text(src, reset=True)
tf = find_type_fail(ast_mod).inf_type
verify_typefail_unify(tf, "A", "B", exp_src_type=nodes.Assign, num_reasons=2)
def test_var_chain():
src = """
A = 1
Z = 'Zed'
B = A
C = B
C = Z
"""
ast_mod, ti = cs._parse_text(src, reset=True)
tf = find_type_fail(ast_mod).inf_type
verify_typefail_unify(tf, "C", "Z", exp_src_type=nodes.Assign, num_reasons=4)
def test_one_list():
src = """
L1 = [1, 2, 3]
L1 = "Hello"
"""
ast_mod, ti = cs._parse_text(src, reset=True)
tf = find_type_fail(ast_mod).inf_type
verify_typefail_unify(tf, "L1", str, exp_src_type=nodes.Assign, num_reasons=1)
def test_two_lists():
src = """
L1 = [1, 2, 3]
L2 = ['a', 'b', 'c']
L1 = L2
"""
ast_mod, ti = cs._parse_text(src, reset=True)
tf = find_type_fail(ast_mod).inf_type
verify_typefail_unify(tf, "L1", "L2", exp_src_type=nodes.Assign, num_reasons=2)
def test_tuple():
src = """
T1 = (1, 2)
T2 = ('a', 'b')
T1 = T2
"""
ast_mod, ti = cs._parse_text(src, reset=True)
tf = find_type_fail(ast_mod).inf_type
verify_typefail_unify(tf, "T1", "T2", exp_src_type=nodes.Assign, num_reasons=2)
def test_annotation():
src = """
x: int
x = 'Hello'
"""
ast_mod, ti = cs._parse_text(src, reset=True)
tf = find_type_fail(ast_mod).inf_type
assert isinstance(tf, TypeFailAnnotationUnify)
def METHOD_NAME():
src = """
def f(x: int):
return x
f('Hello')
"""
skip("Requires modifications to unify_call")
ast_mod, ti = cs._parse_text(src, reset=True)
tf = find_type_fail(ast_mod).inf_type
assert isinstance(tf, TypeFailAnnotationUnify)
def test_function():
src = """
def f(x):
return x + 1
f('Hello')
"""
ast_mod, ti = cs._parse_text(src, reset=True)
tf = find_type_fail(ast_mod).inf_type
verify_typefail_function(tf, "f('Hello')")
def test_function_index():
src = """
def f(x, y):
return x + y + 1
f(1, 'two')
"""
ast_mod, ti = cs._parse_text(src, reset=True)
tf = find_type_fail(ast_mod).inf_type
verify_typefail_function(tf, "f(1, 'two')")
assert tf.arg_indices == [1]
def test_function_multi_index():
src = """
def f(x, y):
return x + y + 1
f('one', 'two')
"""
ast_mod, ti = cs._parse_text(src, reset=True)
tf = find_type_fail(ast_mod).inf_type
verify_typefail_function(tf, "f('one', 'two')")
assert tf.arg_indices == [0, 1]
def test_function_numargs():
src = """
def f(x, y):
return x + y
f(5)
"""
ast_mod, ti = cs._parse_text(src, reset=True)
tf = find_type_fail(ast_mod).inf_type
verify_typefail_function(tf, "f(5)")
def test_function_overload():
src = """
def f(x, y=0):
return x + y + 1
f(1, 2, 3)"""
ast_mod, ti = cs._parse_text(src, reset=True)
tf = find_type_fail(ast_mod).inf_type
verify_typefail_function(tf, "f(1, 2, 3)") |
299,687 | restore layer indexes | import os
import json
import logging
import shutil
import requests
from charmtools import fetchers
from charmtools.fetchers import (git, # noqa
Fetcher,
get_fetcher,
FetchError)
from path import Path as path
log = logging.getLogger(__name__)
class RepoFetcher(fetchers.LocalFetcher):
@classmethod
def can_fetch(cls, url):
search_path = [os.getcwd(), os.environ.get("JUJU_REPOSITORY", ".")]
cp = os.environ.get("LAYER_PATH")
if cp:
search_path.extend(cp.split(":"))
for part in search_path:
p = (path(part) / url).normpath()
if p.exists():
return dict(path=p)
return {}
fetchers.FETCHERS.insert(0, RepoFetcher)
class LayerFetcher(fetchers.LocalFetcher):
_DEFAULT_LAYER_INDEXES = ["https://juju.github.io/layer-index/"]
LAYER_INDEXES = _DEFAULT_LAYER_INDEXES
NO_LOCAL_LAYERS = False
NAMESPACE = "layer"
ENVIRON = "CHARM_LAYERS_DIR"
OLD_ENVIRON = "LAYER_PATH"
OPTIONAL_PREFIX = "juju-layer-"
ENDPOINT = "layers"
_DEFAULT_BRANCH = None
BRANCH = _DEFAULT_BRANCH
@classmethod
def set_layer_indexes(cls, layer_indexes):
if not layer_indexes:
return
if isinstance(layer_indexes, str):
layer_indexes = layer_indexes.split(',')
new_indexes = []
for layer_index in layer_indexes:
if layer_index == 'DEFAULT':
new_indexes.extend(cls._DEFAULT_LAYER_INDEXES)
else:
new_indexes.append(layer_index)
cls.LAYER_INDEXES = new_indexes
@classmethod
def METHOD_NAME(cls):
cls.LAYER_INDEXES = cls._DEFAULT_LAYER_INDEXES
@classmethod
def set_branch(cls, branch):
if not branch:
return
cls.BRANCH = branch
@classmethod
def restore_branch(cls):
cls.BRANCH = cls._DEFAULT_BRANCH
@classmethod
def can_fetch(cls, url):
# Search local path first, then the interface webservice
if url.startswith("{}:".format(cls.NAMESPACE)):
remaining = url[len(cls.NAMESPACE) + 1:]
parts = remaining.split("@", 2)
name = parts[0]
try:
revision = parts[1]
except IndexError:
revision = None
if not cls.NO_LOCAL_LAYERS:
prefixed_name = '{}-{}'.format(cls.NAMESPACE, name)
search_path = []
if cls.ENVIRON in os.environ:
search_path.append(os.environ[cls.ENVIRON])
elif cls.OLD_ENVIRON in os.environ:
search_path.append(os.environ[cls.OLD_ENVIRON])
else:
search_path.append(os.environ.get("JUJU_REPOSITORY", "."))
for part in search_path:
basepath = path(part)
for dirname in (name, prefixed_name):
p = (basepath / dirname).normpath()
if p.exists():
return dict(path=p)
choices = [name]
if name.startswith(cls.OPTIONAL_PREFIX):
choices.append(name[len(cls.OPTIONAL_PREFIX):])
for choice in choices:
for layer_index in cls.LAYER_INDEXES:
uri = "%s%s/%s.json" % (
layer_index, cls.ENDPOINT, choice)
log.debug('Checking layer index: {}'.format(uri))
if uri.startswith('file://'):
choice_path = path(uri[7:])
if not choice_path.exists():
continue
try:
result = json.loads(choice_path.text())
except json.JSONDecodeError as e:
log.error('Unable to parse index entry for {}: '
'{}'.format(url, e))
continue
if not result.get('repo'):
continue
log.debug('Found repo: {}'.format(result['repo']))
if revision:
result.update(revision=revision)
return result
try:
result = requests.get(uri)
except Exception:
result = None
if result and result.ok:
result = result.json()
if result.get("repo"):
log.debug('Found repo: {}'.format(result['repo']))
if revision:
result.update(revision=revision)
return result
return {}
def target(self, dir_):
"""Return a :class:`path` of the directory where the downloaded item
will be located.
:param str dir_: Directory into which the item will be downloaded.
:return: :class:`path`
"""
if hasattr(self, "path"):
return self.path
elif hasattr(self, "repo"):
_, target = self._get_repo_fetcher_and_target(self.repo, dir_)
return target
def _get_repo_fetcher_and_target(self, repo, dir_):
"""Returns a :class:`Fetcher` for ``repo``, and the destination dir
at which the downloaded repo will be created.
:param str repo: The repo url.
:param str dir_: Directory into which the repo will be downloaded.
:return: 2-tuple of (:class:`Fetcher`, :class:`path`)
"""
u = self.url[len(self.NAMESPACE) + 1:]
if '@' in u:
u = u.split('@')[0]
if getattr(self, 'revision', ''):
f = get_fetcher("{}@{}".format(repo, self.revision))
else:
f = get_fetcher(repo)
return f, path(dir_) / u
def fetch(self, dir_):
if hasattr(self, "path"):
return super(LayerFetcher, self).fetch(dir_)
elif hasattr(self, "repo"):
f, target = self._get_repo_fetcher_and_target(self.repo, dir_)
log.debug('Using fetcher: {}'.format(f))
if self.BRANCH is not None:
log.debug('Adding branch: %s', self.BRANCH)
f.revision = self.BRANCH
orig_res = res = f.fetch(dir_)
log.debug("url fetched (for lockfile): %s",
getattr(f, 'fetched_url'))
self.fetched_url = getattr(f, 'fetched_url', None)
self.vcs = getattr(f, 'vcs', None)
# make sure we save the revision of the actual repo, before we
# start traversing subdirectories and moving contents around
self.revision = self.get_revision(res)
if res != target:
res = path(res)
if hasattr(self, 'subdir'):
res = res / self.subdir
target.rmtree_p()
log.debug('Copying {} to {}'.format(res, target))
shutil.copytree(res, target)
log.debug('Cleaning up {}'.format(orig_res))
path(orig_res).rmtree_p() # cleanup from cache after copied
return target
fetchers.FETCHERS.insert(0, LayerFetcher)
class InterfaceFetcher(LayerFetcher):
NAMESPACE = "interface"
ENVIRON = "CHARM_INTERFACES_DIR"
OLD_ENVIRON = "INTERFACE_PATH"
OPTIONAL_PREFIX = "juju-relation-"
ENDPOINT = "interfaces"
fetchers.FETCHERS.insert(0, InterfaceFetcher) |
299,688 | test nested generators1 | # pylint: disable=missing-function-docstring, missing-module-docstring
import pytest
import numpy as np
from numpy.random import randint, rand
from pyccel.epyccel import epyccel
def test_sum_range(language):
def f(a0 : 'int[:]'):
return sum(a0[i] for i in range(len(a0)))
n = randint(1,50)
x = randint(100,size=n)
f_epyc = epyccel(f, language = language)
assert f(x) == f_epyc(x)
def test_sum_var(language):
def f(a : 'int[:]'):
return sum(ai for ai in a)
n = randint(1,50)
x = randint(100,size=n)
f_epyc = epyccel(f, language = language)
assert f(x) == f_epyc(x)
def test_sum_var2(language):
def f(a : 'int[:,:]'):
return sum(aii for ai in a for aii in ai)
n1 = randint(1,10)
n2 = randint(1,10)
x = randint(10,size=(n1,n2))
f_epyc = epyccel(f, language = language)
assert f(x) == f_epyc(x)
def test_sum_var3(language):
def f(a : 'int[:,:,:]'):
m,n,p = a.shape
return sum(a[i,j,k] for i in range(m) for j in range(n) for k in range(p))
n1 = randint(1,10)
n2 = randint(1,10)
n3 = randint(1,10)
x = randint(10,size=(n1,n2,n3))
f_epyc = epyccel(f, language = language)
assert f(x) == f_epyc(x)
def test_sum_var4(language):
def f(a : 'int[:]'):
s = 3
return sum(ai for ai in a),s
n = randint(1,50)
x = randint(100,size=n)
f_epyc = epyccel(f, language = language)
assert f(x) == f_epyc(x)
@pytest.mark.parametrize( 'language', (
pytest.param("fortran", marks = pytest.mark.fortran),
pytest.param("c", marks = [
pytest.mark.xfail(reason="Max not implemented in C for integers"),
pytest.mark.c]
),
pytest.param("python", marks = pytest.mark.python)
)
)
def test_max(language):
def f():
return max(i if i>k else k for i in range(5) for k in range(10))
f_epyc = epyccel(f, language = language)
assert f() == f_epyc()
@pytest.mark.parametrize( 'language', (
pytest.param("fortran", marks = pytest.mark.fortran),
pytest.param("c", marks = [
pytest.mark.xfail(reason="Min not implemented in C for integers"),
pytest.mark.c]
),
pytest.param("python", marks = pytest.mark.python)
)
)
def test_min(language):
def f():
return min(k if i>k else 0 if i==k else i for i in range(5) for k in range(10))
f_epyc = epyccel(f, language = language)
assert f() == f_epyc()
def test_expression1(language):
def f(b : 'float[:]'):
n = b.shape[0]
return (2*sum(b[i] for i in range(n))**5+5)*min(j+1. for j in b)**4+9
n = randint(1,10)
x = np.array(randint(100,size=n), dtype=float)
f_epyc = epyccel(f, language = language)
assert f(x) == f_epyc(x)
@pytest.mark.parametrize( 'language', (
pytest.param("fortran", marks = pytest.mark.fortran),
pytest.param("c", marks = [
pytest.mark.xfail(reason="Function in function not implemented in C"),
pytest.mark.c]
),
pytest.param("python", marks = pytest.mark.python)
)
)
def test_expression2(language):
def f(b : 'int[:]'):
def incr(x : int):
y = x + 1
return y
n = b.shape[0]
return 5+incr(2+incr(6+sum(b[i] for i in range(n))))
n = randint(1,10)
x = randint(100,size=n)
f_epyc = epyccel(f, language = language)
assert f(x) == f_epyc(x)
def METHOD_NAME(language):
def f(a : 'float[:,:,:,:]'):
return sum(sum(sum(a[i,k,o,2] for i in range(5)) for k in range(5)) for o in range(5))
x = randint(0, 50, size=(5,5,5,5)).astype(float)
f_epyc = epyccel(f, language = language)
assert f(x) == f_epyc(x)
def test_nested_generators2(language):
def f(a : 'float[:,:,:,:]'):
return min(min(sum(min(max(a[i,k,o,l]*l for i in range(5)) for k in range(5)) for o in range(5)) for l in range(5)),0.)
x = randint(0, 50, size=(5,5,5,5)).astype(float)
f_epyc = epyccel(f, language = language)
assert f(x) == f_epyc(x)
def test_nested_generators3(language):
def f(a : 'float[:,:,:,:]'):
return sum(sum(a[i,k,4,2] for i in range(5)) for k in range(5))**2
x = randint(0, 10, size=(5,5,5,5)).astype(float)
f_epyc = epyccel(f, language = language)
assert f(x) == f_epyc(x)
def test_nested_generators4(language):
def f(a : 'float[:,:,:,:]'):
return min(max(a[i,k,4,2] for i in range(5)) for k in range(5))**2
x = randint(0, 10, size=(5,5,5,5)).astype(float)
f_epyc = epyccel(f, language = language)
assert f(x) == f_epyc(x |
299,689 | process uploaded form | import os
import tempfile
import textwrap
from zipfile import BadZipFile, ZipFile
from django import forms
from django.contrib import messages
from django.contrib.gis import admin
from django.contrib.gis.forms import MultiPolygonField, OSMWidget
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.geos import MultiPolygon
from django.db import IntegrityError, transaction
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import path
from django.utils.translation import ugettext_lazy as _
from enhydris import models
class MissingAttribute(Exception):
pass
@admin.register(models.GareaCategory)
class GareaCategory(admin.ModelAdmin):
pass
class GareaUploadForm(forms.Form):
category = forms.ModelChoiceField(
required=True, label=_("Object category"), queryset=models.GareaCategory.objects
)
file = forms.FileField(
required=True,
label=_("File"),
help_text=_(
"""
The shapefile. It must be a .zip containing a .shp, a .shx and a .dbf. The
objects in the shapefile must contain a "Name" attribute and optionally a
"Code" attribute (any other attributes will be ignored). All objects in the
selected category will be removed and replaced with the ones found in the
shapefile.
"""
),
)
def clean_file(self):
data = self.cleaned_data["file"]
try:
zipfile = ZipFile(data)
interesting_files = {
x
for x in zipfile.namelist()
if x.lower()[-4:] in (".shp", ".shx", ".dbf")
}
extensions = sorted([x.lower()[-4:] for x in interesting_files])
if extensions != [".dbf", ".shp", ".shx"]:
raise BadZipFile()
except BadZipFile:
raise forms.ValidationError(
"This is not a zip file, or it doesn't contain exactly one .shp, .shx "
"and .dbf file."
)
return data
class GareaForm(forms.ModelForm):
geometry = MultiPolygonField(
widget=OSMWidget,
disabled=True,
required=False,
help_text=textwrap.dedent(
"""\
This map is only for viewing, not for editing the area. To change the area
you need to upload another shapefile.
"""
),
)
class Meta:
model = models.Garea
exclude = ()
@admin.register(models.Garea)
class GareaAdmin(admin.ModelAdmin):
form = GareaForm
list_display = ["id", "name", "code", "category"]
list_filter = ["category"]
search_fields = ("id", "name", "code")
def get_urls(self):
urls = super().get_urls()
new_urls = [path("bulk_add/", self.admin_site.admin_view(self.bulk_add))]
return new_urls + urls
def bulk_add(self, request):
if request.method == "POST":
return self._bulk_post(request)
else:
return self._get_template_response(request, GareaUploadForm())
def _bulk_post(self, request):
form = GareaUploadForm(request.POST, request.FILES)
if form.is_valid():
return self.METHOD_NAME(request, form)
else:
return self._get_template_response(request, form)
def METHOD_NAME(self, request, form):
try:
category = models.GareaCategory.objects.get(id=request.POST["category"])
nnew, nold = self._process_uploaded_shapefile(
category, request.FILES["file"]
)
except IntegrityError as e:
messages.add_message(request, messages.ERROR, str(e))
else:
messages.add_message(
request,
messages.INFO,
_(
"Replaced {} existing objects in category {} with {} new objects"
).format(nold, category.descr, nnew),
)
return HttpResponseRedirect("")
def _get_template_response(self, request, form):
return TemplateResponse(
request, "admin/enhydris/garea/bulk_add.html", {"form": form}
)
@transaction.atomic
def _process_uploaded_shapefile(self, category, file):
zipfile = ZipFile(file)
shapefilename = [x for x in zipfile.namelist() if x.lower()[-4:] == ".shp"][0]
with tempfile.TemporaryDirectory() as tmpdir:
zipfile.extractall(path=tmpdir)
shapefile = os.path.join(tmpdir, shapefilename)
layer = DataSource(shapefile)[0]
delete_result = models.Garea.objects.filter(category=category).delete()
try:
nold = delete_result[1]["enhydris.Garea"]
except KeyError:
nold = 0
nnew = 0
layer = DataSource(shapefile)[0]
for feature in layer:
garea = self._get_garea(feature, category)
garea.save()
nnew += 1
return nnew, nold
def _get_garea(self, feature, category):
garea = models.Garea()
if isinstance(feature.geom.geos, MultiPolygon):
garea.geom = feature.geom.geos
else:
garea.geom = MultiPolygon(feature.geom.geos)
garea.name = self._get_feature_attr(feature, "Name")
garea.code = self._get_feature_attr(feature, "Code", allow_empty=True) or ""
garea.category = category
return garea
def _get_feature_attr(self, feature, attr, allow_empty=False):
try:
value = feature.get(attr)
except IndexError:
value = None
if value or allow_empty:
return value
raise MissingAttribute(
'Feature with fid={} does not have a "{}" attribute'.format(
feature.fid, attr
)
) |
299,690 | process line | #!/usr/bin/env python3.2
#
# Report on how things are encoded in one or more bulk_extractor reports
#
import sys,os
#sys.path.append(os.getenv("DOMEX_HOME") + "/src/lib/") #
#sys.path.append("../lib/") #
#sys.path.append(os.getenv("HOME") + "/lib") #
#sys.path.append(os.getenv("HOME") + "/gits/bulk_extractor/python") # for bulk_extractor_reader
from collections import Counter,defaultdict
from statbag import statbag
import bulk_extractor_reader
import ttable
from ttable import tvar
import glob
ignored_features = ['bulk_tags.txt','wordlist.txt']
# configuration
MINRECOVERABLEFILES = 100
import re
PF = "1) Plain in File"
EF = "2) Encoded in File"
PNF = "3) Plain Not in File"
ENF = "4) Encoded Not in File"
space="\\hspace{2pc}"
def drive_name(path):
if path[-1]=='/':
path = path[0:-1]
return path.split("/")[-1]
def METHOD_NAME(line):
fields = line.split(b'\t')
if len(fields)<3:
return (None,None,None,None)
nofilename = len(fields)<5
# Remove digits from the path
path = fields[0].decode('utf-8')
encoding = "".join(filter(lambda s:not s.isdigit(),path))
encoding = encoding.replace("BASE","BASE64") # put back BASE64
encoding = encoding.replace("--","-") #
if encoding[0:1]=='-': encoding=encoding[1:]
if encoding[-1:]=='-': encoding=encoding[:-1]
feature = bulk_extractor_reader.decode_feature(fields[1])
return (path,encoding,feature,nofilename)
def get_line_context(line):
return line.split(b'\t')[2]
class Drive:
"""Reads a bulk_extractor report for a drive. Determine the total number of encodings used for each feature file type.:
"""
def __init__(self,fn):
"""fn is the filename of the report directory or zip file"""
self.fn = fn # filename of this drive
self.f_encoding_counts = defaultdict(Counter) # for each feature type, track the encodings
self.uderror = 0 # unicode decode
def process(self):
ber = bulk_extractor_reader.BulkReport(self.fn,do_validate=False)
for ff in ber.feature_files():
if ff in ignored_features: continue
print("Processing {} in {}".format(ff,self.fn))
self.process_feature_file(ber,ff)
def process_feature_file(self,ber,ff):
for line in ber.open(ff,'rb'):
# Remove lines that aren't features
if line[0]<ord('0') or line[0]>ord('9'):
continue
try:
(path,encoding,feature,nofilename) = METHOD_NAME(line)
self.f_encoding_counts[ff][encoding] += 1
except UnicodeDecodeError:
self.uderror += 1
pass
except UnicodeEncodeError:
self.uderror += 1
pass
except SystemError:
self.uderror += 1
pass
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser(description="Experiment")
parser.add_argument("bereports",help="bulk_extractor reports to process",type=str,nargs="+")
parser.add_argument("--verbose",help="Print all email addresses",action='store_true')
parser.add_argument("--latex",help="output file for LaTeX",type=str)
args = parser.parse_args()
if not args.bereports:
parser.print_help()
l = None
if args.latex:
l = open(args.latex,"wt")
tvar.out = l
tvar("MINRECOVERABLEFILES",MINRECOVERABLEFILES,"Minimum Recoverable Files")
tvar("YEARSTART",2005,"First Year") # until I can get a better value
tvar("YEAREND",2013,"Last Year") # until I can get a better value
# Perform glob expansion for Windows
files = []
for fn in args.bereports:
if "*" in fn:
files += glob.glob(fn)
else:
files += [fn]
drive_encoding_counts = {}
for fn in files:
print("")
d = Drive(fn)
d.process()
for ff in d.f_encoding_counts:
if ff not in drive_encoding_counts: drive_encoding_counts[ff] = defaultdict(statbag)
for encoding in d.f_encoding_counts[ff]:
drive_encoding_counts[ff][encoding].addx(d.f_encoding_counts[ff][encoding])
# Now that the data have been collected, typeset the big table
t = ttable.ttable()
t.latex_colspec = "lrrrrr"
t.append_head(('', 'Drives with','Feature', 'avg', 'max', ''))
t.append_head(('Feature / Coding','coding' ,'Count','per drive','per drive','$\\sigma$'))
t.set_col_alignment(1,t.LEFT)
t.set_col_alignment(2,t.RIGHT)
t.set_col_alignment(3,t.RIGHT)
t.set_col_alignment(4,t.RIGHT)
t.set_col_alignment(5,t.RIGHT)
rep = [] # report will be sorted by second column
print("\n"*4)
for ff in sorted(drive_encoding_counts.keys()):
for enc in sorted(drive_encoding_counts[ff].keys()):
k = ff + " / " + str(enc)
sb = drive_encoding_counts[ff][enc]
row = (k,sb.count(),sb.sumx(),sb.average(),sb.maxx(),sb.stddev())
t.append_data(row)
t.append_data(())
print(t.typeset(mode='text'))
if l:
s = t.typeset(mode='latex')
l.write("\\newcommand{\\studyStats}{"+s+"}\n")
|
299,691 | prepare | # Copyright 2022 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The sysbench memory benchmark is a memory test in sysbench.
The sysbench memory benchmark will allocate a memory buffer and then read or
write from it, each time for the size of a pointer (so 32bit or 64bit), and each
execution until the total buffer size has been read from or written to.
"""
import io
import re
from typing import Any, Dict, List, Tuple
import numpy as np
from perfkitbenchmarker import benchmark_spec as bm_spec
from perfkitbenchmarker import configs
from perfkitbenchmarker import sample
BENCHMARK_NAME = 'sysbench_memory'
BENCHMARK_CONFIG = """
sysbench_memory:
description: Runs sysbench memory on all vCPU's of a VM.
vm_groups:
default:
vm_spec: *default_single_core
"""
def GetConfig(user_config: Dict[str, Any]) -> Dict[str, Any]:
"""Returns the configuration of a benchmark."""
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def METHOD_NAME(benchmark_spec: bm_spec.BenchmarkSpec) -> None:
"""Prepares the VMs and other resources for running the benchmark.
Installs the sysbench package.
Args:
benchmark_spec: The benchmark spec for this sample benchmark.
"""
vm = benchmark_spec.vms[0]
vm.Install('sysbench')
def _ParseSysbenchMemoryOutput(sysbench_memory_output: str) -> List[float]:
"""Parses sysbench_memory output.
Extract relevant TPS numbers.
Args:
sysbench_memory_output: The output from sysbench.
Returns:
An array, the tps numbers.
"""
tps_numbers = []
sysbench_memory_output_io = io.StringIO(sysbench_memory_output)
for line in sysbench_memory_output_io:
# find lines with throughput information
# sample output: "38225.96 MiB transferred (3822.38 MiB/sec)"
match = re.search('[0-9]+[.,]?[0-9]* MiB/sec', line)
if match:
tps_numbers.append(float(match.group(0).split()[0]))
return tps_numbers
def _AddMetricsFromTPSNumbers(
tps_numbers: List[float]) -> List[Tuple[str, float, str]]:
"""Computes relevant metrics from tps_numbers.
Specifically, we are interested in min/max/mean/(median-min)/(max-min).
Args:
tps_numbers: TPS numbers for each vCPU.
Returns:
An array, the tps metrics.
"""
tps_min = np.min(tps_numbers)
tps_max = np.max(tps_numbers)
tps_mean = np.round(np.mean(tps_numbers), 2)
tps_median = np.median(tps_numbers)
vcpu_variation = np.round(tps_median - tps_min, 2)
tps_range = np.round(tps_max - tps_min, 2)
total = np.sum(tps_numbers)
size = len(tps_numbers)
all_max_diff = np.round((tps_max*size) - total, 2)
all_median_diff = np.round((tps_median*size) - total, 2)
metrics = []
tps_unit = 'MiB/sec'
metrics.append(('Minimum_throughput', tps_min, tps_unit))
metrics.append(('Maximum_throughput', tps_max, tps_unit))
metrics.append(('Average_throughput', tps_mean, tps_unit))
metrics.append(('vcpu_variation', vcpu_variation, tps_unit))
metrics.append(('Range_throughput', tps_range, tps_unit))
metrics.append(('Difference_from_all_max', all_max_diff, tps_unit))
metrics.append(('Difference_from_all_median', all_median_diff, tps_unit))
return metrics
def GenerateMetricsForSysbenchMemoryOutput(
sysbench_memory_output: str, metadata: Dict[str,
Any]) -> List[sample.Sample]:
"""Generates results, an array of samples from sysbench_memory output.
Obtains TPS metrics from _ParseSysbenchMemoryOutput and generates samples.
Args:
sysbench_memory_output: The output from sysbench.
metadata: associated metadata
Returns:
results: a list of Samples
"""
results = []
tps_numbers = _ParseSysbenchMemoryOutput(sysbench_memory_output)
metrics = _AddMetricsFromTPSNumbers(tps_numbers)
for metric in metrics:
results.append(sample.Sample(metric[0], metric[1], metric[2], metadata))
return results
def Run(benchmark_spec: bm_spec.BenchmarkSpec) -> List[sample.Sample]:
"""Runs the benchmark and returns a dict of performance data.
It must be possible to run the benchmark multiple times after the Prepare
stage.
Args:
benchmark_spec: The benchmark spec for this sample benchmark.
Returns:
A list of performance samples.
"""
metadata = {}
vm = benchmark_spec.vms[0]
num_cpus = vm.NumCpusForBenchmark()
stdout, _ = vm.RemoteCommand(f'for CPU in `seq 0 {num_cpus-1}`; do echo -n '
'"CPU $CPU "; taskset --cpu-list $CPU sysbench '
'memory run; done')
return GenerateMetricsForSysbenchMemoryOutput(stdout, metadata)
def Cleanup(benchmark_spec: bm_spec.BenchmarkSpec) -> None:
"""Cleans up after the benchmark completes.
The state of the VMs should be equivalent to the state before Prepare was
called.
Args:
benchmark_spec: The benchmark spec for this sample benchmark.
"""
del benchmark_spec |
299,692 | test return id sum | # -*- coding: utf-8 -*-
# Copyright (C) 2012-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import binascii
import hawkey
import rpm
from dnf.pycomp import long
import tests.support
from tests.support import mock
TOUR_MD5 = binascii.unhexlify("68e9ded8ea25137c964a638f12e9987c")
TOUR_SHA256 = binascii.unhexlify("ce77c1e5694b037b6687cf0ab812ca60431ec0b65116abbb7b82684f0b092d62")
TOUR_WRONG_MD5 = binascii.unhexlify("ffe9ded8ea25137c964a638f12e9987c")
TOUR_SIZE = 2317
class PackageTest(tests.support.DnfBaseTestCase):
REPOS = ['main']
def setUp(self):
super(PackageTest, self).setUp()
self.pkg = self.sack.query().available().filter(name="pepper")[1]
def test_from_cmdline(self):
self.sack.create_cmdline_repo()
local_pkg = self.sack.add_cmdline_package(tests.support.TOUR_44_PKG_PATH)
self.assertTrue(local_pkg._from_cmdline)
self.assertFalse(self.pkg._from_cmdline)
def test_from_system(self):
pkg = self.sack.query().installed().filter(name="pepper")[0]
self.assertTrue(pkg._from_system)
self.assertFalse(self.pkg._from_system)
def test_header(self):
self.sack.create_cmdline_repo()
pkg = self.sack.add_cmdline_package(tests.support.TOUR_44_PKG_PATH)
header = pkg._header
self.assertIsInstance(header, rpm.hdr)
def fn_getter():
return tests.support.NONEXISTENT_FILE
with mock.patch.object(pkg, 'localPkg', fn_getter):
with self.assertRaises(IOError):
pkg._header
# rpm.hdr() is not easy to construct with custom data, we just return a string
# instead, as we don't actually need an instance of rpm.hdr for the test
@mock.patch("rpm.TransactionSet.dbMatch", lambda self, a, b: iter(["package_header_test_data"]))
def test_get_header(self):
pkg = self.sack.query().installed().filter(name="pepper")[0]
header = pkg.get_header()
self.assertEqual(header, "package_header_test_data")
pkg = self.sack.query().available().filter(name="pepper")[0]
header = pkg.get_header()
self.assertEqual(header, None)
@mock.patch("dnf.package.Package.rpmdbid", long(3))
def test_idx(self):
""" pkg.idx is an int. """
pkg = self.sack.query().installed().filter(name="pepper")[0]
self.assertEqual(type(pkg.idx), int)
def test_pkgtup(self):
self.assertEqual(self.pkg.pkgtup, ('pepper', 'x86_64', '0', '20', '0'))
@mock.patch("dnf.package.Package.location", 'f/foo.rpm')
def test_localPkg(self):
self.pkg.repo.basecachedir = '/cachedir'
self.pkg.repo.baseurl = ['file:///mnt/cd']
self.assertTrue(self.pkg._is_local_pkg())
self.assertEqual(self.pkg.localPkg(), '/mnt/cd/f/foo.rpm')
self.pkg.repo.baseurl = ['http://remote']
self.assertFalse(self.pkg._is_local_pkg())
self.assertEqual(self.pkg.localPkg(),
self.pkg.repo._repo.getCachedir() + '/packages/foo.rpm')
def test_verify(self):
with mock.patch.object(self.pkg, 'localPkg',
return_value=tests.support.TOUR_44_PKG_PATH):
self.pkg._chksum = (hawkey.CHKSUM_MD5, TOUR_MD5)
self.pkg._size = TOUR_SIZE
self.assertTrue(self.pkg.verifyLocalPkg())
self.pkg._chksum = (hawkey.CHKSUM_MD5, TOUR_WRONG_MD5)
self.assertFalse(self.pkg.verifyLocalPkg())
def METHOD_NAME(self):
self.pkg._chksum = (hawkey.CHKSUM_MD5, TOUR_MD5)
self.assertEqual(self.pkg.returnIdSum(),
('md5', '68e9ded8ea25137c964a638f12e9987c'))
def test_verify_local(self):
self.sack.create_cmdline_repo()
local_pkg = self.sack.add_cmdline_package(tests.support.TOUR_44_PKG_PATH)
self.assertEqual(local_pkg.reponame, hawkey.CMDLINE_REPO_NAME)
self.assertTrue(local_pkg.verifyLocalPkg())
def test_chksum_local(self):
self.sack.create_cmdline_repo()
local_pkg = self.sack.add_cmdline_package(tests.support.TOUR_44_PKG_PATH)
chksum = local_pkg._chksum
self.assertEqual(chksum[0], hawkey.CHKSUM_SHA256)
self.assertEqual(chksum[1], TOUR_SHA256)
def test_verify_installed(self):
pkg = self.sack.query().installed().filter(name="pepper")[0]
self.assertRaises(ValueError, pkg.verifyLocalPkg) |
299,693 | test scalar in list | """Partial parity tests based on
https://www.notion.so/osohq/Supported-Query-Types-and-Features-435d7a998dc14db3a125c6e5ba5fe6ba.
"""
import pytest
from test_app2.models import Post, Tag
from django_oso.oso import Oso, reset_oso
@pytest.fixture(autouse=True)
def reset():
reset_oso()
@pytest.mark.xfail(reason="Not supported yet.")
@pytest.mark.django_db
def test_field_comparison(load_additional_str):
post0 = Post(id=0, contents="private post", title="not private post")
post1 = Post(id=1, contents="private post", title="private post")
post2 = Post(id=2, contents="post", title="post")
post0.save()
post1.save()
post2.save()
load_additional_str(
"""
allow(_, _, post: test_app2::Post) if
post.title = post.contents;
"""
)
posts = Post.objects.authorize(None, actor="u", action="r").all()
assert len(posts) == 2
assert post1 in posts
assert post2 in posts
@pytest.mark.django_db
def METHOD_NAME(load_additional_str):
post0 = Post(id=0, contents="private post", title="not private post")
post1 = Post(id=1, contents="allowed posts", title="private post")
post2 = Post(id=2, contents="post", title="post")
post0.save()
post1.save()
post2.save()
load_additional_str(
"""
allow(_, _, post: test_app2::Post) if
post.contents in ["post", "allowed posts"];
"""
)
posts = Post.objects.authorize(None, actor="u", action="r").all()
assert len(posts) == 2
assert post1 in posts
assert post2 in posts
@pytest.mark.django_db
def test_ground_object_in_collection(load_additional_str):
tag = Tag(name="tag")
post0 = Post(id=0, contents="tag post")
post1 = Post(id=1, contents="no tag post")
post2 = Post(id=2, contents="tag 2 post")
tag.save()
post0.save()
post1.save()
post2.save()
post0.tags.set([tag])
post2.tags.set([tag])
Oso.register_constant(tag, "allowed_tag")
load_additional_str(
"""
allow(_, _, post: test_app2::Post) if
allowed_tag in post.tags;
"""
)
posts = Post.objects.authorize(None, actor="u", action="r").all()
assert len(posts) == 2
assert post0 in posts
assert post2 in posts
@pytest.mark.xfail(reason="Negate in not supported yet.")
@pytest.mark.django_db
def test_all_objects_collection_condition(oso, engine, load_additional_str):
public_tag = Tag(name="public", is_public=True)
private_tag = Tag(name="private", is_public=False)
post0 = Post(id=0, contents="public tag", tags=[public_tag])
post1 = Post(id=1, contents="no tags", tags=[])
post2 = Post(id=2, contents="both tags", tags=[public_tag, private_tag])
post3 = Post(id=3, contents="public tag 2", tags=[public_tag])
post4 = Post(id=4, contents="private tag", tags=[private_tag])
public_tag.save()
private_tag.save()
post0.save()
post1.save()
post2.save()
post3.save()
post4.save()
post0.tags.set([public_tag])
post2.tags.set([public_tag, private_tag])
post3.tags.set([public_tag])
post4.tags.set([private_tag])
load_additional_str(
"""
allow(_, _, post: test_app2::Post) if
forall(tag in post.tags, tag.is_public = true);
"""
)
posts = Post.objects.authorize(None, actor="u", action="r").all()
assert len(posts) == 2
assert post0 in posts
assert post3 in posts
@pytest.mark.xfail(reason="Negate in not supported yet.")
@pytest.mark.django_db
def test_no_objects_collection_condition(load_additional_str):
public_tag = Tag(name="public", is_public=True)
private_tag = Tag(name="private", is_public=False)
post0 = Post(id=0, contents="public tag", tags=[public_tag])
post1 = Post(id=1, contents="no tags", tags=[])
post2 = Post(id=2, contents="both tags", tags=[public_tag, private_tag])
post3 = Post(id=3, contents="public tag 2", tags=[public_tag])
post4 = Post(id=4, contents="private tag", tags=[private_tag])
public_tag.save()
private_tag.save()
post0.save()
post1.save()
post2.save()
post3.save()
post4.save()
post0.tags.set([public_tag])
post2.tags.set([public_tag, private_tag])
post3.tags.set([public_tag])
post4.tags.set([private_tag])
load_additional_str(
"""
allow(_, _, post: test_app2::Post) if
not (tag in post.tags and tag.is_public = true);
"""
)
posts = Post.objects.authorize(None, actor="u", action="r").all()
assert len(posts) == 2
assert post0 in posts
assert post3 in posts |
299,694 | search all | import os
import torch
from tqdm import tqdm
from typing import Union
from colbert.data import Collection, Queries, Ranking
from colbert.modeling.checkpoint import Checkpoint
from colbert.search.index_storage import IndexScorer
from colbert.infra.provenance import Provenance
from colbert.infra.run import Run
from colbert.infra.config import ColBERTConfig, RunConfig
from colbert.infra.launcher import print_memory_stats
import time
TextQueries = Union[str, 'list[str]', 'dict[int, str]', Queries]
class Searcher:
def __init__(self, index, checkpoint=None, collection=None, config=None):
print_memory_stats()
initial_config = ColBERTConfig.from_existing(config, Run().config)
default_index_root = initial_config.index_root_
self.index = os.path.join(default_index_root, index)
self.index_config = ColBERTConfig.load_from_index(self.index)
self.checkpoint = checkpoint or self.index_config.checkpoint
self.checkpoint_config = ColBERTConfig.load_from_checkpoint(self.checkpoint)
self.config = ColBERTConfig.from_existing(self.checkpoint_config, self.index_config, initial_config)
self.collection = Collection.cast(collection or self.config.collection)
self.configure(checkpoint=self.checkpoint, collection=self.collection)
self.checkpoint = Checkpoint(self.checkpoint, colbert_config=self.config)
use_gpu = self.config.total_visible_gpus > 0
if use_gpu:
self.checkpoint = self.checkpoint.cuda()
self.ranker = IndexScorer(self.index, use_gpu)
print_memory_stats()
def configure(self, **kw_args):
self.config.configure(**kw_args)
def encode(self, text: TextQueries):
queries = text if type(text) is list else [text]
bsize = 128 if len(queries) > 128 else None
self.checkpoint.query_tokenizer.query_maxlen = self.config.query_maxlen
Q = self.checkpoint.queryFromText(queries, bsize=bsize, to_cpu=True)
return Q
def search(self, text: str, k=10, filter_fn=None):
Q = self.encode(text)
return self.dense_search(Q, k, filter_fn=filter_fn)
def METHOD_NAME(self, queries: TextQueries, k=10, filter_fn=None):
queries = Queries.cast(queries)
queries_ = list(queries.values())
Q = self.encode(queries_)
return self._search_all_Q(queries, Q, k, filter_fn=filter_fn)
def _search_all_Q(self, queries, Q, k, filter_fn=None):
all_scored_pids = [list(zip(*self.dense_search(Q[query_idx:query_idx+1], k, filter_fn=filter_fn)))
for query_idx in tqdm(range(Q.size(0)))]
data = {qid: val for qid, val in zip(queries.keys(), all_scored_pids)}
provenance = Provenance()
provenance.source = 'Searcher::search_all'
provenance.queries = queries.provenance()
provenance.config = self.config.export()
provenance.k = k
return Ranking(data=data, provenance=provenance)
def dense_search(self, Q: torch.Tensor, k=10, filter_fn=None):
if k <= 10:
if self.config.ncells is None:
self.configure(ncells=1)
if self.config.centroid_score_threshold is None:
self.configure(centroid_score_threshold=0.5)
if self.config.ndocs is None:
self.configure(ndocs=256)
elif k <= 100:
if self.config.ncells is None:
self.configure(ncells=2)
if self.config.centroid_score_threshold is None:
self.configure(centroid_score_threshold=0.45)
if self.config.ndocs is None:
self.configure(ndocs=1024)
else:
if self.config.ncells is None:
self.configure(ncells=4)
if self.config.centroid_score_threshold is None:
self.configure(centroid_score_threshold=0.4)
if self.config.ndocs is None:
self.configure(ndocs=max(k * 4, 4096))
pids, scores = self.ranker.rank(self.config, Q, filter_fn=filter_fn)
return pids[:k], list(range(1, k+1)), scores[:k] |
299,695 | test variable tzname | import datetime
from email import utils
import test.support
import time
import unittest
import sys
import os.path
class DateTimeTests(unittest.TestCase):
datestring = 'Sun, 23 Sep 2001 20:10:55'
dateargs = (2001, 9, 23, 20, 10, 55)
offsetstring = ' -0700'
utcoffset = datetime.timedelta(hours=-7)
tz = datetime.timezone(utcoffset)
naive_dt = datetime.datetime(*dateargs)
aware_dt = datetime.datetime(*dateargs, tzinfo=tz)
def test_naive_datetime(self):
self.assertEqual(utils.format_datetime(self.naive_dt),
self.datestring + ' -0000')
def test_aware_datetime(self):
self.assertEqual(utils.format_datetime(self.aware_dt),
self.datestring + self.offsetstring)
def test_usegmt(self):
utc_dt = datetime.datetime(*self.dateargs,
tzinfo=datetime.timezone.utc)
self.assertEqual(utils.format_datetime(utc_dt, usegmt=True),
self.datestring + ' GMT')
def test_usegmt_with_naive_datetime_raises(self):
with self.assertRaises(ValueError):
utils.format_datetime(self.naive_dt, usegmt=True)
def test_usegmt_with_non_utc_datetime_raises(self):
with self.assertRaises(ValueError):
utils.format_datetime(self.aware_dt, usegmt=True)
def test_parsedate_to_datetime(self):
self.assertEqual(
utils.parsedate_to_datetime(self.datestring + self.offsetstring),
self.aware_dt)
def test_parsedate_to_datetime_naive(self):
self.assertEqual(
utils.parsedate_to_datetime(self.datestring + ' -0000'),
self.naive_dt)
class LocaltimeTests(unittest.TestCase):
def test_localtime_is_tz_aware_daylight_true(self):
test.support.patch(self, time, 'daylight', True)
t = utils.localtime()
self.assertIsNotNone(t.tzinfo)
def test_localtime_is_tz_aware_daylight_false(self):
test.support.patch(self, time, 'daylight', False)
t = utils.localtime()
self.assertIsNotNone(t.tzinfo)
def test_localtime_daylight_true_dst_false(self):
test.support.patch(self, time, 'daylight', True)
t0 = datetime.datetime(2012, 3, 12, 1, 1)
t1 = utils.localtime(t0, isdst=-1)
t2 = utils.localtime(t1)
self.assertEqual(t1, t2)
def test_localtime_daylight_false_dst_false(self):
test.support.patch(self, time, 'daylight', False)
t0 = datetime.datetime(2012, 3, 12, 1, 1)
t1 = utils.localtime(t0, isdst=-1)
t2 = utils.localtime(t1)
self.assertEqual(t1, t2)
@test.support.run_with_tz('Europe/Minsk')
def test_localtime_daylight_true_dst_true(self):
test.support.patch(self, time, 'daylight', True)
t0 = datetime.datetime(2012, 3, 12, 1, 1)
t1 = utils.localtime(t0, isdst=1)
t2 = utils.localtime(t1)
self.assertEqual(t1, t2)
@test.support.run_with_tz('Europe/Minsk')
def test_localtime_daylight_false_dst_true(self):
test.support.patch(self, time, 'daylight', False)
t0 = datetime.datetime(2012, 3, 12, 1, 1)
t1 = utils.localtime(t0, isdst=1)
t2 = utils.localtime(t1)
self.assertEqual(t1, t2)
@test.support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0')
def test_localtime_epoch_utc_daylight_true(self):
test.support.patch(self, time, 'daylight', True)
t0 = datetime.datetime(1990, 1, 1, tzinfo = datetime.timezone.utc)
t1 = utils.localtime(t0)
t2 = t0 - datetime.timedelta(hours=5)
t2 = t2.replace(tzinfo = datetime.timezone(datetime.timedelta(hours=-5)))
self.assertEqual(t1, t2)
@test.support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0')
def test_localtime_epoch_utc_daylight_false(self):
test.support.patch(self, time, 'daylight', False)
t0 = datetime.datetime(1990, 1, 1, tzinfo = datetime.timezone.utc)
t1 = utils.localtime(t0)
t2 = t0 - datetime.timedelta(hours=5)
t2 = t2.replace(tzinfo = datetime.timezone(datetime.timedelta(hours=-5)))
self.assertEqual(t1, t2)
def test_localtime_epoch_notz_daylight_true(self):
test.support.patch(self, time, 'daylight', True)
t0 = datetime.datetime(1990, 1, 1)
t1 = utils.localtime(t0)
t2 = utils.localtime(t0.replace(tzinfo=None))
self.assertEqual(t1, t2)
def test_localtime_epoch_notz_daylight_false(self):
test.support.patch(self, time, 'daylight', False)
t0 = datetime.datetime(1990, 1, 1)
t1 = utils.localtime(t0)
t2 = utils.localtime(t0.replace(tzinfo=None))
self.assertEqual(t1, t2)
# XXX: Need a more robust test for Olson's tzdata
@unittest.skipIf(sys.platform.startswith('win'),
"Windows does not use Olson's TZ database")
@unittest.skipUnless(os.path.exists('/usr/share/zoneinfo') or
os.path.exists('/usr/lib/zoneinfo'),
"Can't find the Olson's TZ database")
@test.support.run_with_tz('Europe/Kiev')
def METHOD_NAME(self):
t0 = datetime.datetime(1984, 1, 1, tzinfo=datetime.timezone.utc)
t1 = utils.localtime(t0)
self.assertEqual(t1.tzname(), 'MSK')
t0 = datetime.datetime(1994, 1, 1, tzinfo=datetime.timezone.utc)
t1 = utils.localtime(t0)
self.assertEqual(t1.tzname(), 'EET')
# Issue #24836: The timezone files are out of date (pre 2011k)
# on Mac OS X Snow Leopard.
@test.support.requires_mac_ver(10, 7)
class FormatDateTests(unittest.TestCase):
@test.support.run_with_tz('Europe/Minsk')
def test_formatdate(self):
timeval = time.mktime((2011, 12, 1, 18, 0, 0, 4, 335, 0))
string = utils.formatdate(timeval, localtime=False, usegmt=False)
self.assertEqual(string, 'Thu, 01 Dec 2011 15:00:00 -0000')
string = utils.formatdate(timeval, localtime=False, usegmt=True)
self.assertEqual(string, 'Thu, 01 Dec 2011 15:00:00 GMT')
@test.support.run_with_tz('Europe/Minsk')
def test_formatdate_with_localtime(self):
timeval = time.mktime((2011, 1, 1, 18, 0, 0, 6, 1, 0))
string = utils.formatdate(timeval, localtime=True)
self.assertEqual(string, 'Sat, 01 Jan 2011 18:00:00 +0200')
# Minsk moved from +0200 (with DST) to +0300 (without DST) in 2011
timeval = time.mktime((2011, 12, 1, 18, 0, 0, 4, 335, 0))
string = utils.formatdate(timeval, localtime=True)
self.assertEqual(string, 'Thu, 01 Dec 2011 18:00:00 +0300')
if __name__ == '__main__':
unittest.main() |
299,696 | test task | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=global-statement
"""Test engine utilities such as the exponential backoff mechanism."""
import asyncio
import pytest
from aiida import orm
from aiida.engine import calcfunction, workfunction
from aiida.engine.utils import (
InterruptableFuture,
exponential_backoff_retry,
instantiate_process,
interruptable_task,
is_process_function,
)
ITERATION = 0
MAX_ITERATIONS = 3
class TestExponentialBackoffRetry:
"""Tests for the exponential backoff retry coroutine."""
@pytest.fixture(autouse=True)
def init_profile(self, aiida_localhost): # pylint: disable=unused-argument
"""Initialize the profile."""
# pylint: disable=attribute-defined-outside-init
self.computer = aiida_localhost
self.authinfo = self.computer.get_authinfo(orm.User.collection.get_default())
@staticmethod
def test_exp_backoff_success():
"""Test that exponential backoff will successfully catch exceptions as long as max_attempts is not exceeded."""
global ITERATION
ITERATION = 0
loop = asyncio.get_event_loop()
async def coro():
"""A function that will raise RuntimeError as long as ITERATION is smaller than MAX_ITERATIONS."""
global ITERATION
ITERATION += 1
if ITERATION < MAX_ITERATIONS:
raise RuntimeError
max_attempts = MAX_ITERATIONS + 1
loop.run_until_complete(exponential_backoff_retry(coro, initial_interval=0.1, max_attempts=max_attempts))
def test_exp_backoff_max_attempts_exceeded(self):
"""Test that exponential backoff will finally raise if max_attempts is exceeded"""
global ITERATION
ITERATION = 0
loop = asyncio.get_event_loop()
def coro():
"""A function that will raise RuntimeError as long as ITERATION is smaller than MAX_ITERATIONS."""
global ITERATION
ITERATION += 1
if ITERATION < MAX_ITERATIONS:
raise RuntimeError
max_attempts = MAX_ITERATIONS - 1
with pytest.raises(RuntimeError):
loop.run_until_complete(exponential_backoff_retry(coro, initial_interval=0.1, max_attempts=max_attempts))
def test_instantiate_process_invalid(manager):
"""Test the :func:`aiida.engine.utils.instantiate_process` function for invalid ``process`` argument."""
with pytest.raises(ValueError, match=r'invalid process <class \'bool\'>, needs to be Process or ProcessBuilder'):
instantiate_process(manager.get_runner(), True)
def test_is_process_function():
"""Test the `is_process_function` utility."""
def normal_function():
pass
@calcfunction
def calc_function():
pass
@workfunction
def work_function():
pass
assert is_process_function(normal_function) is False
assert is_process_function(calc_function) is True
assert is_process_function(work_function) is True
class TestInterruptable:
""" Tests for InterruptableFuture and interruptable_task."""
def test_normal_future(self):
"""Test interrupt future not being interrupted"""
loop = asyncio.get_event_loop()
interruptable = InterruptableFuture()
fut = asyncio.Future()
async def task():
fut.set_result('I am done')
loop.run_until_complete(interruptable.with_interrupt(task()))
assert not interruptable.done()
assert fut.result() == 'I am done'
def test_interrupt(self):
"""Test interrupt future being interrupted"""
loop = asyncio.get_event_loop()
interruptable = InterruptableFuture()
loop.call_soon(interruptable.interrupt, RuntimeError('STOP'))
try:
loop.run_until_complete(interruptable.with_interrupt(asyncio.sleep(10.)))
except RuntimeError as err:
assert str(err) == 'STOP'
else:
pytest.fail('ExpectedException not raised')
assert interruptable.done()
def test_inside_interrupted(self):
"""Test interrupt future being interrupted from inside of coroutine"""
loop = asyncio.get_event_loop()
interruptable = InterruptableFuture()
fut = asyncio.Future()
async def task():
await asyncio.sleep(1.)
interruptable.interrupt(RuntimeError('STOP'))
fut.set_result('I got set.')
try:
loop.run_until_complete(interruptable.with_interrupt(task()))
except RuntimeError as err:
assert str(err) == 'STOP'
else:
pytest.fail('ExpectedException not raised')
assert interruptable.done()
assert fut.result() == 'I got set.'
def test_interruptable_future_set(self):
"""Test interrupt future being set before coroutine is done"""
loop = asyncio.get_event_loop()
interruptable = InterruptableFuture()
async def task():
interruptable.set_result('NOT ME!!!')
loop.create_task(task())
try:
loop.run_until_complete(interruptable.with_interrupt(asyncio.sleep(20.)))
except RuntimeError as err:
assert str(err) == "This interruptible future had it's result set unexpectedly to 'NOT ME!!!'"
else:
pytest.fail('ExpectedException not raised')
assert interruptable.done()
@pytest.mark.requires_rmq
class TestInterruptableTask():
""" Tests for InterruptableFuture and interruptable_task."""
@pytest.mark.asyncio
async def METHOD_NAME(self):
"""Test coroutine run and succed"""
async def task_fn(cancellable):
fut = asyncio.Future()
async def coro():
fut.set_result('I am done')
await cancellable.with_interrupt(coro())
return fut.result()
task_fut = interruptable_task(task_fn)
result = await task_fut
assert isinstance(task_fut, InterruptableFuture)
assert task_fut.done()
assert result == 'I am done'
@pytest.mark.asyncio
async def test_interrupted(self):
"""Test interrupt future being interrupted"""
async def task_fn(cancellable):
cancellable.interrupt(RuntimeError('STOP'))
task_fut = interruptable_task(task_fn)
try:
await task_fut
except RuntimeError as err:
assert str(err) == 'STOP'
else:
raise AssertionError('ExpectedException not raised')
@pytest.mark.asyncio
async def test_future_already_set(self):
"""Test interrupt future being set before coroutine is done"""
async def task_fn(cancellable):
fut = asyncio.Future()
async def coro():
fut.set_result('I am done')
await cancellable.with_interrupt(coro())
cancellable.set_result('NOT ME!!!')
return fut.result()
task_fut = interruptable_task(task_fn)
result = await task_fut
assert result == 'NOT ME!!!' |
299,697 | handler | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"vmss rolling-upgrade cancel",
)
class Cancel(AAZCommand):
"""Cancel the current virtual machine scale set rolling upgrade.
"""
_aaz_info = {
"version": "2022-11-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/virtualmachinescalesets/{}/rollingupgrades/cancel", "2022-11-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def METHOD_NAME(self, command_args):
super().METHOD_NAME(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
help="Name of resource group. You can configure the default group using `az configure --defaults group=<name>`.",
required=True,
)
_args_schema.virtual_machine_scale_set_name = AAZStrArg(
options=["-n", "--name", "--vm-scale-set-name", "--virtual-machine-scale-set-name"],
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualMachineScaleSetRollingUpgradesCancel(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class VirtualMachineScaleSetRollingUpgradesCancel(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"vmScaleSetName", self.ctx.args.virtual_machine_scale_set_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-11-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
class _CancelHelper:
"""Helper class for Cancel"""
__all__ = ["Cancel"] |
299,698 | train | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import logging
import shutil
import torch
import openpifpaf
from sparseml.pytorch.optim import ScheduledModifierManager
LOG = logging.getLogger("openpifpaf." + __name__)
class SparseMLTrainer(openpifpaf.network.Trainer):
"""
Lifecycle of this object is:
1. SparseMLTrainer.cli is called to add parameters to argparser
2. SparseMLTrainer.configure is called to set class level variables
from argparse args
3. The object is instantiated with `__init__`
4. the `loop` method is called to run training.
All of this happens in the train.py file.
"""
def __init__(
self,
model: torch.nn.Module,
loss,
optimizer,
out,
manager,
checkpoint_manager,
*,
checkpoint_shell=None,
lr_scheduler=None,
device=None,
model_meta_data=None,
):
self.manager = manager
self.checkpoint_manager = checkpoint_manager
self.epochs = self.manager.max_epochs
if self.manager.learning_rate_modifiers:
lr_scheduler = None
super().__init__(
model,
loss,
optimizer,
out,
checkpoint_shell=checkpoint_shell,
lr_scheduler=lr_scheduler,
device=device,
model_meta_data=model_meta_data,
)
def loop(
self,
train_scenes: torch.utils.data.DataLoader,
val_scenes: torch.utils.data.DataLoader,
start_epoch=0,
):
super().loop(train_scenes, val_scenes, start_epoch)
self.manager.finalize(self.model)
def METHOD_NAME(self, scenes, epoch):
if self.manager.qat_active(epoch=epoch):
self.ema_restore_params = None
return super().METHOD_NAME(scenes, epoch)
def write_model(self, epoch, final=True):
if torch.distributed.is_initialized() and torch.distributed.get_rank() != 0:
return
model_to_save = self.model
if self.checkpoint_shell is not None:
model = (
self.model if not hasattr(self.model, "module") else self.model.module
)
self.checkpoint_shell.load_state_dict(model.state_dict())
model_to_save = self.checkpoint_shell
filename = "{}.epoch{:03d}".format(self.out, epoch)
LOG.debug("about to write model")
checkpoint = {
"model": model_to_save,
"state_dict": model_to_save.state_dict(),
"meta": self.model_meta_data,
}
checkpoint["epoch"] = -1 if epoch == self.manager.max_epochs - 1 else epoch
if self.checkpoint_manager is not None and checkpoint["epoch"] > 0:
checkpoint["epoch"] += self.checkpoint_manager.max_epochs
recipe = self.manager
if self.checkpoint_manager is not None:
recipe = ScheduledModifierManager.compose_staged(
self.checkpoint_manager, recipe
)
checkpoint["checkpoint_recipe"] = str(recipe)
torch.save(checkpoint, filename)
LOG.info("model written: %s", filename)
if final:
sha256_hash = hashlib.sha256()
with open(filename, "rb") as f:
for byte_block in iter(lambda: f.read(8192), b""):
sha256_hash.update(byte_block)
file_hash = sha256_hash.hexdigest()
outname, _, outext = self.out.rpartition(".")
final_filename = "{}-{}.{}".format(outname, file_hash[:8], outext)
shutil.copyfile(filename, final_filename) |
299,699 | delete list | #########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.db.models import Q
from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import DjangoAuthorization
from tastypie.exceptions import Unauthorized
from tastypie.compat import get_user_model, get_username_field
from guardian.shortcuts import get_objects_for_user
from tastypie.http import HttpUnauthorized
from django.conf import settings
from geonode import geoserver
from geonode.utils import check_ogc_backend
class GeoNodeAuthorization(DjangoAuthorization):
"""Object level API authorization based on GeoNode granular
permission system"""
def read_list(self, object_list, bundle):
permitted_ids = []
try:
permitted_ids = get_objects_for_user(bundle.request.user, "base.view_resourcebase").values("id")
except Exception:
pass
return object_list.filter(id__in=permitted_ids)
def read_detail(self, object_list, bundle):
if "schema" in bundle.request.path:
return True
return bundle.request.user.has_perm("view_resourcebase", bundle.obj.get_self_resource())
def create_list(self, object_list, bundle):
# TODO implement if needed
raise Unauthorized()
def create_detail(self, object_list, bundle):
return bundle.request.user.has_perm("add_resourcebase", bundle.obj.get_self_resource())
def update_list(self, object_list, bundle):
# TODO implement if needed
raise Unauthorized()
def update_detail(self, object_list, bundle):
return bundle.request.user.has_perm("change_resourcebase", bundle.obj.get_self_resource())
def METHOD_NAME(self, object_list, bundle):
# TODO implement if needed
raise Unauthorized()
def delete_detail(self, object_list, bundle):
return bundle.request.user.has_perm("delete_resourcebase", bundle.obj.get_self_resource())
class GeonodeApiKeyAuthentication(ApiKeyAuthentication):
"""
Override ApiKeyAuthentication to prevent 401 response when no api key is provided.
"""
def is_authenticated(self, request, **kwargs):
"""
Finds the user and checks their API key.
Should return either ``True`` if allowed, ``False`` if not or an
``HttpResponse`` if you need something custom.
"""
try:
username, api_key = self.extract_credentials(request)
except ValueError:
return self._unauthorized()
if not username or not api_key:
return True
username_field = get_username_field()
User = get_user_model()
try:
lookup_kwargs = {username_field: username}
user = User.objects.get(**lookup_kwargs)
except (User.DoesNotExist, User.MultipleObjectsReturned):
return self._unauthorized()
if not self.check_active(user):
return False
key_auth_check = self.get_key(user, api_key)
if key_auth_check and not isinstance(key_auth_check, HttpUnauthorized):
request.user = user
return key_auth_check
class GeoNodeStyleAuthorization(GeoNodeAuthorization):
"""Object level API authorization based on GeoNode granular
permission system
Style object permissions should follow it's layer permissions
"""
def filter_by_resource_ids(self, object_list, permitted_ids):
"""Filter Style queryset by permitted resource ids."""
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
return object_list.filter(dataset_styles__id__in=permitted_ids)
def read_list(self, object_list, bundle):
permitted_ids = get_objects_for_user(bundle.request.user, "base.view_resourcebase").values("id")
return self.filter_by_resource_ids(object_list, permitted_ids)
def delete_detail(self, object_list, bundle):
permitted_ids = get_objects_for_user(bundle.request.user, "layer.change_dataset_style").values("id")
resource_obj = bundle.obj.get_self_resource()
return resource_obj in permitted_ids
class ApiLockdownAuthorization(DjangoAuthorization):
"""API authorization for all resources which are not protected by others authentication/authorization mechanism.
If setting "API_LOCKDOWN" is set to True, resource can only be accessed by authenticated users. For anonymous
requests, empty lists are returned.
"""
def read_list(self, object_list, bundle):
user = bundle.request.user
if settings.API_LOCKDOWN and not user.is_authenticated:
# return empty list
return []
else:
return object_list
class GeoNodePeopleAuthorization(DjangoAuthorization):
"""API authorization that allows only authenticated users to view list of users"""
def read_list(self, object_list, bundle):
user = bundle.request.user
if not user.is_authenticated:
# return empty list
return []
return object_list
class GroupAuthorization(ApiLockdownAuthorization):
def read_list(self, object_list, bundle):
groups = super().read_list(object_list, bundle)
user = bundle.request.user
if groups:
if not user.is_authenticated or user.is_anonymous:
return groups.exclude(groupprofile__access="private")
elif not user.is_superuser:
return groups.filter(Q(groupprofile__in=user.group_list_all()) | ~Q(groupprofile__access="private"))
return groups
class GroupProfileAuthorization(ApiLockdownAuthorization):
def read_list(self, object_list, bundle):
groups = super().read_list(object_list, bundle)
user = bundle.request.user
if groups:
if not user.is_authenticated or user.is_anonymous:
return groups.exclude(access="private")
elif not user.is_superuser:
return groups.filter(Q(pk__in=user.group_list_all()) | ~Q(access="private"))
return groups |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.