id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
5,500 | test wait on event | # -*- coding: utf-8 -*-
"""Common test case for all resources.
"""
import gc
import logging
import re
from typing import Union
import pytest
from pyvisa import InvalidSession, ResourceManager
from pyvisa.constants import InterfaceType, ResourceAttribute, StatusCode, Timeouts
from pyvisa.resources.resource import Resource
from pyvisa.rname import ResourceName
from . import RESOURCE_ADDRESSES
class ResourceTestCase:
"""Base test case for all resources."""
#: Type of resource being tested in this test case.
#: See RESOURCE_ADDRESSES in the __init__.py file of this package for
#: acceptable values
RESOURCE_TYPE = ""
#: Minimal timeout value accepted by the resource. When setting the timeout
#: to Timeouts.immediate, Visa (Keysight at least) may actually use a
#: different value depending on the values supported by the resource.
MINIMAL_TIMEOUT: Union[int, Timeouts] = Timeouts.immediate
def setup_method(self):
"""Create a resource using the address matching the type."""
name = RESOURCE_ADDRESSES[self.RESOURCE_TYPE]
self.rname = ResourceName.from_string(name)
self.rm = ResourceManager()
self.instr = self.rm.open_resource(name)
self.instr.clear()
def teardown_method(self):
"""Close the resource at the end of the test."""
if self.instr:
self.instr.close()
if self.rm:
self.rm.close()
def test_lifecycle(self):
"""Test the lifecyle of a resource and the use as a context manager."""
assert self.instr.session is not None
assert self.instr.visalib is not None
assert self.instr.last_status == StatusCode.success
self.instr.close()
with pytest.raises(InvalidSession):
self.instr.session
with self.rm.open_resource(str(self.rname), read_termination="\0") as instr:
assert len(self.rm.list_opened_resources()) == 1
assert instr.read_termination == "\0"
assert len(self.rm.list_opened_resources()) == 0
def test_close_on_del(self, caplog):
"""Test the lifecyle of a resource and the use as a context manager."""
with caplog.at_level(logging.DEBUG):
self.instr = None
gc.collect()
assert "- closing" in caplog.records[0].message
assert "- is closed", caplog.output[-1].message
def test_alias_bypassing(self):
"""Test that a resource that cannot normalize an alias keep the alias."""
instr = Resource(self.rm, "visa_alias")
assert re.match(r".* at %s" % "visa_alias", str(instr))
def test_str(self):
"""Test the string representation of a resource."""
assert re.match(r".* at %s" % str(self.rname), str(self.instr))
self.instr.close()
assert re.match(r".* at %s" % str(self.rname), str(self.instr))
def test_repr(self):
"""Test the repr of a resource."""
assert re.match(r"<.*\('%s'\)>" % str(self.rname), repr(self.instr))
self.instr.close()
assert re.match(r"<.*\('%s'\)>" % str(self.rname), repr(self.instr))
def test_timeout(self):
"""Test setting the timeout attribute."""
self.instr.timeout = None
assert self.instr.timeout == float("+inf")
assert (
self.instr.get_visa_attribute(ResourceAttribute.timeout_value)
== Timeouts.infinite
)
self.instr.timeout = 0.1
assert self.instr.timeout == 1
assert (
self.instr.get_visa_attribute(ResourceAttribute.timeout_value)
== self.MINIMAL_TIMEOUT
)
self.instr.timeout = 10
assert self.instr.timeout == 10
assert self.instr.get_visa_attribute(ResourceAttribute.timeout_value) == 10
with pytest.raises(ValueError):
self.instr.timeout = 10000000000
del self.instr.timeout
assert self.instr.timeout == float("+inf")
assert (
self.instr.get_visa_attribute(ResourceAttribute.timeout_value)
== Timeouts.infinite
)
def test_resource_info(self):
"""Test accessing the resource info."""
rinfo = self.instr.resource_info
assert rinfo.interface_type == getattr(
InterfaceType, self.rname.interface_type.lower()
)
assert rinfo.interface_board_number == int(self.rname.board)
assert rinfo.resource_class == self.rname.resource_class
assert rinfo.resource_name == str(self.rname)
def test_interface_type(self):
"""Test accessing the resource interface_type."""
assert self.instr.interface_type == getattr(
InterfaceType, self.rname.interface_type.lower()
)
def test_attribute_handling(self):
"""Test directly manipulating attributes ie not using descriptors.
This should extended in subclasses to test a broader range of
attributes.
"""
self.instr.set_visa_attribute(ResourceAttribute.timeout_value, 10)
assert self.instr.get_visa_attribute(ResourceAttribute.timeout_value) == 10
assert self.instr.timeout == 10
self.instr.set_visa_attribute(
ResourceAttribute.timeout_value, Timeouts.immediate
)
assert (
self.instr.get_visa_attribute(ResourceAttribute.timeout_value)
== self.MINIMAL_TIMEOUT
)
assert self.instr.timeout == 1
self.instr.set_visa_attribute(
ResourceAttribute.timeout_value, Timeouts.infinite
)
assert (
self.instr.get_visa_attribute(ResourceAttribute.timeout_value)
== Timeouts.infinite
)
assert self.instr.timeout == float("+inf")
class EventAwareResourceTestCaseMixin:
"""Mixing for resources supporting handling events."""
def METHOD_NAME(self):
"""Test waiting on a VISA event.
Should be implemented on subclasses, since the way to generate the
event may be dependent on the resource type.
"""
raise NotImplementedError()
def test_managing_visa_handler(self):
"""Test using visa handlers.
Should be implemented on subclasses, since the way to generate the
event may be dependent on the resource type.
"""
raise NotImplementedError()
class LockableResourceTestCaseMixin:
"""Mixing for resources supporting locking."""
def test_shared_locking(self):
"""Test locking/unlocking a resource."""
raise NotImplementedError()
def test_exclusive_locking(self):
"""Test locking/unlocking a resource."""
raise NotImplementedError() |
5,501 | make url key | import json
import uuid
from base64 import urlsafe_b64encode
from functools import cached_property
from django.conf import settings
from django.db import transaction
from django.http import QueryDict, HttpResponse, JsonResponse
from django.utils.translation import gettext as _
import attr
from casexml.apps.phone.xml import get_registration_element_data
from corehq.apps.auditcare.models import get_standard_headers
from corehq.apps.userreports.specs import EvaluationContext
from corehq.apps.users.models import CouchUser
from corehq.motech.generic_inbound.exceptions import GenericInboundUserError, GenericInboundApiError
from corehq.motech.generic_inbound.models import RequestLog
from corehq.util import as_text
from corehq.util.view_utils import get_form_or_404
from dimagi.utils.web import get_ip
# exclude these headers as the may expose internal / sensitive information
EXCLUDE_HEADERS = [
'X_FORWARDED_HOST',
'X_FORWARDED_SERVER',
'VIA',
'HTTP_CONNECTION',
'HTTP_COOKIE',
'SERVER_NAME',
'SERVER_PORT',
'HTTP_X_AMZN_TRACE_ID'
]
def get_headers_for_api_context(request):
return get_standard_headers(request.META, exclude=EXCLUDE_HEADERS)
def METHOD_NAME():
raw_key = urlsafe_b64encode(uuid.uuid4().bytes).decode()
return raw_key.removesuffix("==")
@attr.s(kw_only=True, frozen=True, auto_attribs=True)
class ApiRequest:
domain: str
couch_user: CouchUser
request_method: str
user_agent: str
data: str
query: dict # querystring key val pairs, vals are lists
headers: dict
request_id: str
@classmethod
def from_request(cls, request, request_id=None):
if _request_too_large(request):
raise GenericInboundUserError(_("Request exceeds the allowed size limit"))
try:
body = as_text(request.body)
except UnicodeDecodeError:
raise GenericInboundUserError(_("Unable to decode request body"))
return cls(
domain=request.domain,
couch_user=request.couch_user,
request_method=request.method,
user_agent=request.META.get('HTTP_USER_AGENT'),
data=body,
query=dict(request.GET.lists()),
headers=get_headers_for_api_context(request),
request_id=request_id or uuid.uuid4().hex,
)
@classmethod
def from_log(cls, log):
return cls(
domain=log.domain,
couch_user=CouchUser.get_by_username(log.username),
request_method=log.request_method,
user_agent=log.request_headers.get('HTTP_USER_AGENT'),
data=log.request_body,
query=dict(QueryDict(log.request_query).lists()),
headers=dict(log.request_headers),
request_id=log.id
)
@cached_property
def restore_user(self):
return self.couch_user.to_ota_restore_user(
self.domain, request_user=self.couch_user)
@attr.s(kw_only=True, frozen=True, auto_attribs=True)
class ApiResponse:
"""Data class for managing response data and producing HTTP responses.
Override ``_get_http_response`` to return different HTTP response."""
status: int
internal_response: dict = None
external_response: str = None
content_type: str = None
def get_http_response(self):
if self.status == 204:
return HttpResponse(status=204) # no body for 204 (RFC 7230)
return self._get_http_response()
def _get_http_response(self):
return HttpResponse(content=self.external_response, status=self.status, content_type=self.content_type)
def make_processing_attempt(response, request_log, is_retry=False):
from corehq.motech.generic_inbound.models import ProcessingAttempt
response_data = response.internal_response or {}
case_ids = [c['case_id'] for c in response_data.get('cases', [])]
ProcessingAttempt.objects.create(
is_retry=is_retry,
log=request_log,
response_status=response.status,
raw_response=response_data,
external_response=response.external_response,
xform_id=response_data.get('form_id'),
case_ids=case_ids,
)
def get_evaluation_context(restore_user, method, query, headers, body):
return EvaluationContext({
'request': {
'method': method,
'query': query,
'headers': headers
},
'body': body,
'user': get_registration_element_data(restore_user)
})
def reprocess_api_request(request_log):
from corehq.motech.generic_inbound.models import RequestLog
def get_request_data():
return ApiRequest.from_log(request_log)
response = process_api_request(request_log.api, request_log.id, get_request_data)
with transaction.atomic():
request_log.status = RequestLog.Status.from_status_code(response.status)
request_log.attempts += 1
request_log.response_status = response.status
request_log.save()
make_processing_attempt(response, request_log, is_retry=True)
def process_api_request(api_model, request_id, get_request_data):
try:
backend_cls = api_model.backend_class
except GenericInboundApiError as e:
response = ApiResponse(status=500, internal_response={'error': str(e)})
else:
try:
request_data = get_request_data()
except GenericInboundUserError as e:
response = backend_cls.get_basic_error_response(request_id, 400, str(e))
else:
response = backend_cls(api_model, request_data).run()
return response
def archive_api_request(request_log, user_id):
attempts = request_log.processingattempt_set.filter(xform_id__isnull=False)
for attempt in attempts:
form = get_form_or_404(request_log.domain, attempt.xform_id)
form.archive(user_id=user_id)
_revert_api_request_log(request_log)
def _revert_api_request_log(request_log):
from corehq.motech.generic_inbound.models import RequestLog
if request_log.status == RequestLog.Status.SUCCESS:
request_log.status = RequestLog.Status.REVERTED
request_log.save()
def revert_api_request_from_form(form_id):
from corehq.motech.generic_inbound.models import ProcessingAttempt
try:
attempt = ProcessingAttempt.objects.get(xform_id=form_id)
_revert_api_request_log(attempt.log)
except ProcessingAttempt.DoesNotExist:
return
def log_api_request(request_id, api, request, response):
if _request_too_large(request):
body = '<truncated>'
else:
body = as_text(request.body)
log = RequestLog.objects.create(
id=request_id,
domain=request.domain,
api=api,
status=RequestLog.Status.from_status_code(response.status),
response_status=response.status,
username=request.couch_user.username,
request_method=request.method,
request_query=request.META.get('QUERY_STRING'),
request_body=body,
request_headers=get_headers_for_api_context(request),
request_ip=get_ip(request),
)
make_processing_attempt(response, log)
def _request_too_large(request):
return int(request.META.get('CONTENT_LENGTH') or 0) > settings.MAX_UPLOAD_SIZE |
5,502 | allow duplicates | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'CollaborationDataEncryptionMetadata',
'CollaborationMember',
]
@pulumi.output_type
class CollaborationDataEncryptionMetadata(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowClearText":
suggest = "allow_clear_text"
elif key == "allowDuplicates":
suggest = "allow_duplicates"
elif key == "allowJoinsOnColumnsWithDifferentNames":
suggest = "allow_joins_on_columns_with_different_names"
elif key == "preserveNulls":
suggest = "preserve_nulls"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CollaborationDataEncryptionMetadata. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CollaborationDataEncryptionMetadata.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CollaborationDataEncryptionMetadata.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allow_clear_text: bool,
METHOD_NAME: bool,
allow_joins_on_columns_with_different_names: bool,
preserve_nulls: bool):
pulumi.set(__self__, "allow_clear_text", allow_clear_text)
pulumi.set(__self__, "allow_duplicates", METHOD_NAME)
pulumi.set(__self__, "allow_joins_on_columns_with_different_names", allow_joins_on_columns_with_different_names)
pulumi.set(__self__, "preserve_nulls", preserve_nulls)
@property
@pulumi.getter(name="allowClearText")
def allow_clear_text(self) -> bool:
return pulumi.get(self, "allow_clear_text")
@property
@pulumi.getter(name="allowDuplicates")
def METHOD_NAME(self) -> bool:
return pulumi.get(self, "allow_duplicates")
@property
@pulumi.getter(name="allowJoinsOnColumnsWithDifferentNames")
def allow_joins_on_columns_with_different_names(self) -> bool:
return pulumi.get(self, "allow_joins_on_columns_with_different_names")
@property
@pulumi.getter(name="preserveNulls")
def preserve_nulls(self) -> bool:
return pulumi.get(self, "preserve_nulls")
@pulumi.output_type
class CollaborationMember(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accountId":
suggest = "account_id"
elif key == "displayName":
suggest = "display_name"
elif key == "memberAbilities":
suggest = "member_abilities"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CollaborationMember. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CollaborationMember.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CollaborationMember.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
account_id: str,
display_name: str,
member_abilities: Sequence[str],
status: Optional[str] = None):
pulumi.set(__self__, "account_id", account_id)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "member_abilities", member_abilities)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> str:
return pulumi.get(self, "account_id")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="memberAbilities")
def member_abilities(self) -> Sequence[str]:
return pulumi.get(self, "member_abilities")
@property
@pulumi.getter
def status(self) -> Optional[str]:
return pulumi.get(self, "status")
|
5,503 | procfolder |
# parse xml files and output simplified version
import xml.dom.minidom
import os
import sys
import multiprocessing
def parsexml(afile, outpath):
outfile = os.path.join(outpath, afile.split('/')[-1] + '.simp')
with open(outfile, 'w') as bw:
domtree = xml.dom.minidom.parse(afile)
collection = domtree.documentElement
ipus = collection.getElementsByTagName('IPU')
for ipu in ipus:
starttime = 0
endtime = 0
if ipu.hasAttribute('IPUStartTime'):
starttime = ipu.getAttribute('IPUStartTime')
if ipu.hasAttribute('IPUEndTime'):
endtime = ipu.getAttribute('IPUEndTime')
# print('{}\t{}'.format(starttime, endtime))
# ## original format ###
wlist = list()
plainwlist = list()
pronlist = list()
# ## pronunciation ###
lemmalist = list() # lemma list
dictlemmalist = list() # dict lemma list
for suw in ipu.getElementsByTagName('SUW'): # short unit word
txt = ''
plaintxt = ''
# PhoneticTranscription
prontxt = ''
if suw.hasAttribute('OrthographicTranscription'):
txt = suw.getAttribute('OrthographicTranscription')
if suw.hasAttribute('PlainOrthographicTranscription'):
plaintxt = suw.getAttribute('PlainOrthographicTranscription')
if suw.hasAttribute('PhoneticTranscription'):
prontxt = suw.getAttribute('PhoneticTranscription')
wlist.append(txt)
plainwlist.append(plaintxt)
pronlist.append(prontxt)
lemma = ''
dictlemma = ''
if suw.hasAttribute('SUWLemma'):
lemma = suw.getAttribute('SUWLemma')
if suw.hasAttribute('SUWDictionaryForm'):
dictlemma = suw.getAttribute('SUWDictionaryForm')
lemmalist.append(lemma)
dictlemmalist.append(dictlemma)
txtsent = ' '.join(wlist)
plaintxtsent = ' '.join(plainwlist)
prontxtsent = ' '.join(pronlist)
lemmasent = ' '.join(lemmalist)
dictlemmasent = ' '.join(dictlemmalist)
outrow = '{}\t{}\t{}\t{}\t{}\t{}\t{}\n'.format(
starttime, endtime, txtsent, plaintxtsent,
prontxtsent, lemmasent, dictlemmasent)
bw.write(outrow)
def procfolder_orig(apath, outpath):
count = 0
for afile in os.listdir(apath):
if not afile.endswith('.xml'):
continue
afile = os.path.join(apath, afile)
parsexml(afile, outpath)
count += 1
print('done: {} [{}]'.format(afile, count))
def METHOD_NAME(apath, outpath):
# count = 0
fnlist = list()
for afile in os.listdir(apath):
if not afile.endswith('.xml'):
continue
fnlist.append(afile)
# now parallel processing:
nthreads = 16
for i in range(0, len(fnlist), nthreads):
# fnlist[i, i+16]
pool = multiprocessing.Pool(processes=nthreads)
for j in range(nthreads):
if i + j < len(fnlist):
afile = os.path.join(apath, fnlist[i + j])
pool.apply_async(parsexml, (afile, outpath))
pool.close()
pool.join()
print('parallel {} threads done for {} files in total.'.format(
nthreads, len(fnlist)))
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Usage: {} <in.csj.path> <out.csj.path>".format(sys.argv[0]))
exit(1)
# e.g., csjpath='/workspace/asr/csj/'
csjpath = sys.argv[1]
outcsjpath = sys.argv[2]
apath = os.path.join(csjpath, 'XML/BaseXML/core')
apath2 = os.path.join(csjpath, 'XML/BaseXML/noncore')
outapath = os.path.join(outcsjpath, 'xml')
# create the "outapath" dir:
if not os.path.exists(outapath):
os.mkdir(outapath)
# range over the following two folders:
METHOD_NAME(apath, outapath)
METHOD_NAME(apath2, outapath) |
5,504 | query service config2 | from _typeshed import Incomplete
from collections.abc import Iterable
import _win32typing
from win32.lib.pywintypes import error as error
def GetThreadDesktop(ThreadId) -> _win32typing.PyHDESK: ...
def EnumWindowStations() -> tuple[tuple[str, Incomplete], ...]: ...
def GetUserObjectInformation(Handle: int, _type) -> None: ...
def SetUserObjectInformation(Handle: int, info, _type) -> None: ...
def OpenWindowStation(szWinSta, Inherit, DesiredAccess) -> _win32typing.PyHWINSTA: ...
def OpenDesktop(szDesktop, Flags, Inherit, DesiredAccess) -> _win32typing.PyHDESK: ...
def CreateDesktop(
Desktop, Flags, DesiredAccess, SecurityAttributes: _win32typing.PySECURITY_ATTRIBUTES
) -> _win32typing.PyHDESK: ...
def OpenInputDesktop(Flags, Inherit, DesiredAccess) -> _win32typing.PyHDESK: ...
def GetProcessWindowStation() -> _win32typing.PyHWINSTA: ...
def CreateWindowStation(
WindowStation, Flags, DesiredAccess, SecurityAttributes: _win32typing.PySECURITY_ATTRIBUTES
) -> _win32typing.PyHWINSTA: ...
def EnumServicesStatus(hSCManager: _win32typing.PySC_HANDLE, ServiceType, ServiceState) -> tuple[Incomplete, ...]: ...
def EnumServicesStatusEx(
SCManager: _win32typing.PySC_HANDLE, ServiceType, ServiceState, InfoLevel, GroupName: Incomplete | None = ...
) -> tuple[Incomplete, ...]: ...
def EnumDependentServices(hService: _win32typing.PySC_HANDLE, ServiceState) -> tuple[Incomplete, ...]: ...
def QueryServiceConfig(hService: _win32typing.PySC_HANDLE): ...
def StartService(hService: _win32typing.PySC_HANDLE, args: Iterable[str] | None) -> None: ...
def OpenService(scHandle: _win32typing.PySC_HANDLE, name: str, desiredAccess) -> _win32typing.PySC_HANDLE: ...
def OpenSCManager(machineName: str | None, dbName: str | None, desiredAccess: int) -> _win32typing.PySC_HANDLE: ...
def CloseServiceHandle(scHandle: _win32typing.PySC_HANDLE) -> None: ...
def QueryServiceStatus(hService: _win32typing.PySC_HANDLE) -> _win32typing.SERVICE_STATUS: ...
def QueryServiceStatusEx(hService: _win32typing.PySC_HANDLE) -> _win32typing.SERVICE_STATUS: ...
def SetServiceObjectSecurity(
Handle: _win32typing.PySC_HANDLE, SecurityInformation, SecurityDescriptor: _win32typing.PySECURITY_DESCRIPTOR
) -> None: ...
def QueryServiceObjectSecurity(Handle: _win32typing.PySC_HANDLE, SecurityInformation) -> _win32typing.PySECURITY_DESCRIPTOR: ...
def GetServiceKeyName(hSCManager: _win32typing.PySC_HANDLE, DisplayName): ...
def GetServiceDisplayName(hSCManager: _win32typing.PySC_HANDLE, ServiceName): ...
def SetServiceStatus(scHandle, serviceStatus: _win32typing.SERVICE_STATUS | tuple[int, int, int, int, int, int, int]) -> None: ...
def ControlService(scHandle: _win32typing.PySC_HANDLE, code) -> _win32typing.SERVICE_STATUS: ...
def DeleteService(scHandle: _win32typing.PySC_HANDLE) -> None: ...
def CreateService(
scHandle: _win32typing.PySC_HANDLE,
name: str,
displayName: str,
desiredAccess: int,
serviceType: int,
startType: int,
errorControl: int,
binaryFile: str,
loadOrderGroup: str | None,
bFetchTag: bool,
serviceDeps: Iterable[Incomplete] | None,
acctName: str | None,
password: str | None,
) -> _win32typing.PySC_HANDLE: ...
def ChangeServiceConfig(
hService: _win32typing.PySC_HANDLE,
serviceType: int,
startType: int,
errorControl: int,
binaryFile: str | None,
loadOrderGroup: str | None,
bFetchTag: bool,
serviceDeps: Iterable[Incomplete] | None,
acctName: str | None,
password: str | None,
displayName: str | None,
): ...
def LockServiceDatabase(sc_handle: _win32typing.PySC_HANDLE): ...
def UnlockServiceDatabase(lock): ...
def QueryServiceLockStatus(hSCManager: _win32typing.PySC_HANDLE) -> tuple[Incomplete, str, Incomplete]: ...
def ChangeServiceConfig2(hService: _win32typing.PySC_HANDLE, InfoLevel, info) -> None: ...
def METHOD_NAME(hService: _win32typing.PySC_HANDLE, InfoLevel): ...
DBT_CONFIGCHANGECANCELED: int
DBT_CONFIGCHANGED: int
DBT_CUSTOMEVENT: int
DBT_DEVICEARRIVAL: int
DBT_DEVICEQUERYREMOVE: int
DBT_DEVICEQUERYREMOVEFAILED: int
DBT_DEVICEREMOVECOMPLETE: int
DBT_DEVICEREMOVEPENDING: int
DBT_DEVICETYPESPECIFIC: int
DBT_QUERYCHANGECONFIG: int
DF_ALLOWOTHERACCOUNTHOOK: int
SC_ACTION_NONE: int
SC_ACTION_REBOOT: int
SC_ACTION_RESTART: int
SC_ACTION_RUN_COMMAND: int
SC_ENUM_PROCESS_INFO: int
SC_GROUP_IDENTIFIER: int
SC_MANAGER_ALL_ACCESS: int
SC_MANAGER_CONNECT: int
SC_MANAGER_CREATE_SERVICE: int
SC_MANAGER_ENUMERATE_SERVICE: int
SC_MANAGER_LOCK: int
SC_MANAGER_MODIFY_BOOT_CONFIG: int
SC_MANAGER_QUERY_LOCK_STATUS: int
SERVICE_ACCEPT_HARDWAREPROFILECHANGE: int
SERVICE_ACCEPT_NETBINDCHANGE: int
SERVICE_ACCEPT_PARAMCHANGE: int
SERVICE_ACCEPT_PAUSE_CONTINUE: int
SERVICE_ACCEPT_POWEREVENT: int
SERVICE_ACCEPT_PRESHUTDOWN: int
SERVICE_ACCEPT_SESSIONCHANGE: int
SERVICE_ACCEPT_SHUTDOWN: int
SERVICE_ACCEPT_STOP: int
SERVICE_ACTIVE: int
SERVICE_ALL_ACCESS: int
SERVICE_AUTO_START: int
SERVICE_BOOT_START: int
SERVICE_CHANGE_CONFIG: int
SERVICE_CONFIG_DELAYED_AUTO_START_INFO: int
SERVICE_CONFIG_DESCRIPTION: int
SERVICE_CONFIG_FAILURE_ACTIONS: int
SERVICE_CONFIG_FAILURE_ACTIONS_FLAG: int
SERVICE_CONFIG_PRESHUTDOWN_INFO: int
SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO: int
SERVICE_CONFIG_SERVICE_SID_INFO: int
SERVICE_CONTINUE_PENDING: int
SERVICE_CONTROL_CONTINUE: int
SERVICE_CONTROL_DEVICEEVENT: int
SERVICE_CONTROL_HARDWAREPROFILECHANGE: int
SERVICE_CONTROL_INTERROGATE: int
SERVICE_CONTROL_NETBINDADD: int
SERVICE_CONTROL_NETBINDDISABLE: int
SERVICE_CONTROL_NETBINDENABLE: int
SERVICE_CONTROL_NETBINDREMOVE: int
SERVICE_CONTROL_PARAMCHANGE: int
SERVICE_CONTROL_PAUSE: int
SERVICE_CONTROL_POWEREVENT: int
SERVICE_CONTROL_PRESHUTDOWN: int
SERVICE_CONTROL_SESSIONCHANGE: int
SERVICE_CONTROL_SHUTDOWN: int
SERVICE_CONTROL_STOP: int
SERVICE_DEMAND_START: int
SERVICE_DISABLED: int
SERVICE_DRIVER: int
SERVICE_ENUMERATE_DEPENDENTS: int
SERVICE_ERROR_CRITICAL: int
SERVICE_ERROR_IGNORE: int
SERVICE_ERROR_NORMAL: int
SERVICE_ERROR_SEVERE: int
SERVICE_FILE_SYSTEM_DRIVER: int
SERVICE_INACTIVE: int
SERVICE_INTERACTIVE_PROCESS: int
SERVICE_INTERROGATE: int
SERVICE_KERNEL_DRIVER: int
SERVICE_NO_CHANGE: int
SERVICE_PAUSE_CONTINUE: int
SERVICE_PAUSE_PENDING: int
SERVICE_PAUSED: int
SERVICE_QUERY_CONFIG: int
SERVICE_QUERY_STATUS: int
SERVICE_RUNNING: int
SERVICE_SID_TYPE_NONE: int
SERVICE_SID_TYPE_RESTRICTED: int
SERVICE_SID_TYPE_UNRESTRICTED: int
SERVICE_SPECIFIC_ERROR: int
SERVICE_START: int
SERVICE_START_PENDING: int
SERVICE_STATE_ALL: int
SERVICE_STOP: int
SERVICE_STOP_PENDING: int
SERVICE_STOPPED: int
SERVICE_SYSTEM_START: int
SERVICE_USER_DEFINED_CONTROL: int
SERVICE_WIN32: int
SERVICE_WIN32_OWN_PROCESS: int
SERVICE_WIN32_SHARE_PROCESS: int
UOI_FLAGS: int
UOI_NAME: int
UOI_TYPE: int
UOI_USER_SID: int
WSF_VISIBLE: int
HDESKType = _win32typing.PyHDESK
HWINSTAType = _win32typing.PyHWINSTA
UNICODE: int |
5,505 | name | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
'get_private_endpoint_connection_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
Properties of the PrivateEndpointConnection.
"""
def __init__(__self__, id=None, location=None, METHOD_NAME=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(METHOD_NAME="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The Private Endpoint resource for this Connection.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(METHOD_NAME="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional['outputs.ConnectionStateResponse']:
"""
Details about the state of the connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(METHOD_NAME="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the Private Endpoint Connection.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(METHOD_NAME="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system meta data relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.EventHub/Namespaces" or "Microsoft.EventHub/Namespaces/EventHubs"
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
id=self.id,
location=self.location,
METHOD_NAME=self.METHOD_NAME,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
type=self.type)
def get_private_endpoint_connection(namespace_name: Optional[str] = None,
private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
Gets a description for the specified Private Endpoint Connection name.
:param str namespace_name: The Namespace name
:param str private_endpoint_connection_name: The PrivateEndpointConnection name
:param str resource_group_name: Name of the resource group within the azure subscription.
"""
__args__ = dict()
__args__['namespaceName'] = namespace_name
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:eventhub/v20230101preview:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
private_endpoint=pulumi.get(__ret__, 'private_endpoint'),
private_link_service_connection_state=pulumi.get(__ret__, 'private_link_service_connection_state'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_endpoint_connection)
def get_private_endpoint_connection_output(namespace_name: Optional[pulumi.Input[str]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
Gets a description for the specified Private Endpoint Connection name.
:param str namespace_name: The Namespace name
:param str private_endpoint_connection_name: The PrivateEndpointConnection name
:param str resource_group_name: Name of the resource group within the azure subscription.
"""
... |
5,506 | get hash | import numpy as np
import random
import itertools
import torch
from naslib.search_spaces.core.graph import Graph
from naslib.search_spaces.core.query_metrics import Metric
class NATSBenchSizeSearchSpace(Graph):
"""
Implementation of the nasbench 201 search space.
It also has an interface to the tabular benchmark of nasbench 201.
"""
QUERYABLE = True
def __init__(self):
super().__init__()
self.channel_candidates = [8*i for i in range(1, 9)]
self.channels = [8, 8, 8, 8, 8]
self.space_name = "natsbenchsizesearchspace"
# Graph not implemented
def query(
self,
metric=None,
dataset=None,
path=None,
epoch=-1,
full_lc=False,
dataset_api=None,
hp=90,
is_random=False
):
"""
Query results from natsbench
Args:
metric : Metric to query for
dataset : Dataset to query for
epoch : If specified, returns the metric of the arch at that epoch of training
full_lc : If true, returns the curve of the given metric from the first to the last epoch
dataset_api : API to use for querying metrics
hp : Number of epochs the model was trained for. Value is in {1, 12, 90}
is_random : When True, the performance of a random architecture will be returned
When False, the performanceo of all trials will be averaged.
"""
assert isinstance(metric, Metric)
assert dataset in [
"cifar10",
"cifar100",
"ImageNet16-120",
], "Unknown dataset: {}".format(dataset)
assert epoch >= -1 and epoch < hp
assert hp in [1, 12, 90], "hp must be 1, 12 or 90"
if dataset=='cifar10':
assert metric not in [Metric.VAL_ACCURACY, Metric.VAL_LOSS, Metric.VAL_TIME],\
"Validation metrics not available for CIFAR-10"
metric_to_natsbench = {
Metric.TRAIN_ACCURACY: "train-accuracy",
Metric.VAL_ACCURACY: "valid-accuracy",
Metric.TEST_ACCURACY: "test-accuracy",
Metric.TRAIN_LOSS: "train-loss",
Metric.VAL_LOSS: "valid-loss",
Metric.TEST_LOSS: "test-loss",
Metric.TRAIN_TIME: "train-all-time",
Metric.VAL_TIME: "valid-all-time",
Metric.TEST_TIME: "test-all-time"
}
if metric not in metric_to_natsbench.keys():
raise NotImplementedError(f"NATS-Bench does not support querying {metric}")
if dataset_api is None:
raise NotImplementedError("Must pass in dataset_api to query natsbench")
arch_index = int(''.join([str(ch//8 - 1) for ch in self.channels]), 8)
if epoch == -1:
epoch = hp - 1
hp = f"{hp:02d}"
if full_lc:
metrics = []
for epoch in range(int(hp)):
result = dataset_api.get_more_info(arch_index, dataset, iepoch=epoch, hp=hp, is_random=is_random)
metrics.append(result[metric_to_natsbench[metric]])
return metrics
else:
results = dataset_api.get_more_info(arch_index, dataset, iepoch=epoch, hp=hp, is_random=is_random)
return results[metric_to_natsbench[metric]]
def get_channels(self):
return self.channels
def set_channels(self, channels):
self.channels = channels
def METHOD_NAME(self):
return tuple(self.get_channels())
def get_arch_iterator(self, dataset_api=None):
return itertools.product(self.channel_candidates, repeat=len(self.channels))
def set_spec(self, channels, dataset_api=None):
# this is just to unify the setters across search spaces
# TODO: change it to set_spec on all search spaces
self.set_channels(channels)
def sample_random_architecture(self, dataset_api=None):
"""
Randomly sample an architecture
"""
channels = np.random.choice(self.channel_candidates, size=len(self.channels)).tolist()
self.set_channels(channels)
def mutate(self, parent, dataset_api=None):
"""
Mutate one channel from the parent channels
"""
base_channels = list(parent.get_channels().copy())
mutate_index = np.random.randint(len(self.channels)) # Index to perform mutation at
# Remove number of channels at that index in base_channels from the viable candidates
candidates = self.channel_candidates.copy()
candidates.remove(base_channels[mutate_index])
base_channels[mutate_index] = np.random.choice(candidates)
self.set_channels(base_channels)
def get_nbhd(self, dataset_api=None):
"""
Return all neighbours of the architecture
"""
neighbours = []
for idx in range(len(self.channels)):
candidates = self.channel_candidates.copy()
candidates.remove(self.channels[idx])
for channels in candidates:
neighbour_channels = list(self.channels).copy()
neighbour_channels[idx] = channels
neighbour = NATSBenchSizeSearchSpace()
neighbour.set_channels(neighbour_channels)
neighbour_model = torch.nn.Module()
neighbour_model.arch = neighbour
neighbours.append(neighbour_model)
random.shuffle(neighbours)
return neighbours
def get_type(self):
return "natsbenchsize"
|
5,507 | get raw | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2019 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Valerio Cosentino <valcos@bitergia.com>
#
import argparse
import logging
import os
import colorlog
import sys
from sirmordred.config import Config
from sirmordred.task_collection import TaskRawDataCollection
from sirmordred.task_identities import TaskIdentitiesMerge
from sirmordred.task_enrich import TaskEnrich
from sirmordred.task_panels import TaskPanels, TaskPanelsMenu
from sirmordred.task_projects import TaskProjects
DEBUG_LOG_FORMAT = "[%(asctime)s - %(name)s - %(levelname)s] - %(message)s"
INFO_LOG_FORMAT = "%(asctime)s %(message)s"
COLOR_LOG_FORMAT_SUFFIX = "\033[1m %(log_color)s "
LOG_COLORS = {'DEBUG': 'white', 'INFO': 'cyan', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red,bg_white'}
def main():
args = get_params()
config_logging(args.debug, args.logs_dir)
micro_mordred(args.cfg_path, args.backend_sections,
args.repos_to_check, args.raw,
args.identities_merge,
args.enrich,
args.panels)
def micro_mordred(cfg_path, backend_sections, repos_to_check, raw, identities_merge, enrich, panels):
"""Execute the Mordred tasks using the configuration file (`cfg_path`).
:param cfg_path: the path of a Mordred configuration file
:param backend_sections: the backend sections where the raw/enrich/identities phases will be executed
:param repos_to_check: process a repository only if it is in this list, or `None` for all repos
:param raw: if true, it activates the collection of raw data
:param identities_merge: if true, it activates the identities merging process
:param enrich: if true, it activates the enrichment of the raw data
:param panels: if true, it activates the upload of all panels declared in the configuration file
"""
config = Config(cfg_path)
if raw:
for backend in backend_sections:
METHOD_NAME(config, backend, repos_to_check)
if identities_merge:
get_identities_merge(config)
if enrich:
for backend in backend_sections:
get_enrich(config, backend, repos_to_check)
if panels:
get_panels(config)
def METHOD_NAME(config, backend_section, repos_to_check=None):
"""Execute the raw phase for a given backend section
Repos are only checked if they are in BOTH `repos_to_check` and the `projects.json`
:param config: a Mordred config object
:param backend_section: the backend section where the raw phase is executed
:param repos_to_check: A list of repo URLs to check, or None to check all repos
"""
task = TaskRawDataCollection(config, backend_section=backend_section, allowed_repos=repos_to_check)
TaskProjects(config).execute()
try:
task.execute()
logging.info("Loading raw data finished!")
except Exception as e:
logging.error(str(e))
sys.exit(-1)
def get_identities_merge(config):
"""Execute the merge identities phase
:param config: a Mordred config object
"""
TaskProjects(config).execute()
task = TaskIdentitiesMerge(config)
task.execute()
logging.info("Merging identities finished!")
def get_enrich(config, backend_section, repos_to_check=None):
"""Execute the enrich phase for a given backend section
Repos are only checked if they are in BOTH `repos_to_check` and the `projects.json`
:param config: a Mordred config object
:param backend_section: the backend section where the enrich phase is executed
:param repos_to_check: A list of repo URLs to check, or None to check all repos
"""
TaskProjects(config).execute()
task = TaskEnrich(config, backend_section=backend_section, allowed_repos=repos_to_check)
try:
task.execute()
logging.info("Loading enriched data finished!")
except Exception as e:
logging.error(str(e))
sys.exit(-1)
def get_panels(config):
"""Execute the panels phase
:param config: a Mordred config object
"""
task = TaskPanels(config)
task.execute()
task = TaskPanelsMenu(config)
task.execute()
logging.info("Panels creation finished!")
def config_logging(debug, logs_dir):
"""Config logging level output output"""
if debug:
fmt = DEBUG_LOG_FORMAT
logging_mode = logging.DEBUG
else:
fmt = INFO_LOG_FORMAT
logging_mode = logging.INFO
# Setting the color scheme and level into the root logger
logging.basicConfig(level=logging_mode)
stream = logging.root.handlers[0]
formatter = colorlog.ColoredFormatter(fmt=COLOR_LOG_FORMAT_SUFFIX + fmt,
log_colors=LOG_COLORS)
stream.setFormatter(formatter)
# Creating a file handler and adding it to root
if logs_dir:
fh_filepath = os.path.join(logs_dir, 'all.log')
fh = logging.FileHandler(fh_filepath, mode='w')
fh.setLevel(logging_mode)
formatter = logging.Formatter(fmt)
fh.setFormatter(formatter)
logging.getLogger().addHandler(fh)
# ES logger is set to INFO since, it produces a really verbose output if set to DEBUG
logging.getLogger('elasticsearch').setLevel(logging.INFO)
# Show if debug mode is activated
if debug:
logging.debug("Debug mode activated")
def get_params_parser():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-g', '--debug', dest='debug',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument("--raw", action='store_true', dest='raw',
help="Activate raw task")
parser.add_argument("--enrich", action='store_true', dest='enrich',
help="Activate enrich task")
parser.add_argument("--identities-merge", action='store_true', dest='identities_merge',
help="Activate merge identities task")
parser.add_argument("--panels", action='store_true', dest='panels',
help="Activate panels task")
parser.add_argument("--cfg", dest='cfg_path',
help="Configuration file path")
parser.add_argument("--backends", dest='backend_sections', default=[],
nargs='*', help="Backend sections to execute")
parser.add_argument("--repos", dest='repos_to_check', default=None,
nargs='*', help="Limit which repositories are processed (list of URLs)")
parser.add_argument("--logs-dir", dest='logs_dir', default='', help='Logs Directory')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser
def get_params():
"""Get params to execute micro-mordred"""
parser = get_params_parser()
args = parser.parse_args()
tasks = [args.raw, args.enrich, args.identities_merge, args.panels]
if not any(tasks):
print("No tasks enabled")
sys.exit(1)
if args.cfg_path is None:
print("Config file path not provided")
sys.exit(1)
return args
if __name__ == '__main__':
main() |
5,508 | setup network | #!/usr/bin/env python3
# Copyright (c) 2021-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that a node in blocksonly mode does not request compact blocks."""
from test_framework.messages import (
MSG_BLOCK,
MSG_CMPCT_BLOCK,
MSG_WITNESS_FLAG,
CBlock,
CBlockHeader,
CInv,
from_hex,
msg_block,
msg_getdata,
msg_headers,
msg_sendcmpct,
)
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class P2PCompactBlocksBlocksOnly(BitcoinTestFramework):
def set_test_params(self):
self.extra_args = [["-blocksonly"], [], [], []]
self.num_nodes = 4
def METHOD_NAME(self):
self.setup_nodes()
# Start network with everyone disconnected
self.sync_all()
def build_block_on_tip(self):
blockhash = self.generate(self.nodes[2], 1, sync_fun=self.no_op)[0]
block_hex = self.nodes[2].getblock(blockhash=blockhash, verbosity=0)
block = from_hex(CBlock(), block_hex)
block.rehash()
return block
def run_test(self):
# Nodes will only request hb compact blocks mode when they're out of IBD
for node in self.nodes:
assert not node.getblockchaininfo()['initialblockdownload']
p2p_conn_blocksonly = self.nodes[0].add_p2p_connection(P2PInterface())
p2p_conn_high_bw = self.nodes[1].add_p2p_connection(P2PInterface())
p2p_conn_low_bw = self.nodes[3].add_p2p_connection(P2PInterface())
for conn in [p2p_conn_blocksonly, p2p_conn_high_bw, p2p_conn_low_bw]:
assert_equal(conn.message_count['sendcmpct'], 1)
conn.send_and_ping(msg_sendcmpct(announce=False, version=2))
# Nodes:
# 0 -> blocksonly
# 1 -> high bandwidth
# 2 -> miner
# 3 -> low bandwidth
#
# Topology:
# p2p_conn_blocksonly ---> node0
# p2p_conn_high_bw ---> node1
# p2p_conn_low_bw ---> node3
# node2 (no connections)
#
# node2 produces blocks that are passed to the rest of the nodes
# through the respective p2p connections.
self.log.info("Test that -blocksonly nodes do not select peers for BIP152 high bandwidth mode")
block0 = self.build_block_on_tip()
# A -blocksonly node should not request BIP152 high bandwidth mode upon
# receiving a new valid block at the tip.
p2p_conn_blocksonly.send_and_ping(msg_block(block0))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block0.sha256)
assert_equal(p2p_conn_blocksonly.message_count['sendcmpct'], 1)
assert_equal(p2p_conn_blocksonly.last_message['sendcmpct'].announce, False)
# A normal node participating in transaction relay should request BIP152
# high bandwidth mode upon receiving a new valid block at the tip.
p2p_conn_high_bw.send_and_ping(msg_block(block0))
assert_equal(int(self.nodes[1].getbestblockhash(), 16), block0.sha256)
p2p_conn_high_bw.wait_until(lambda: p2p_conn_high_bw.message_count['sendcmpct'] == 2)
assert_equal(p2p_conn_high_bw.last_message['sendcmpct'].announce, True)
# Don't send a block from the p2p_conn_low_bw so the low bandwidth node
# doesn't select it for BIP152 high bandwidth relay.
self.nodes[3].submitblock(block0.serialize().hex())
self.log.info("Test that -blocksonly nodes send getdata(BLOCK) instead"
" of getdata(CMPCT) in BIP152 low bandwidth mode")
block1 = self.build_block_on_tip()
p2p_conn_blocksonly.send_message(msg_headers(headers=[CBlockHeader(block1)]))
p2p_conn_blocksonly.sync_send_with_ping()
assert_equal(p2p_conn_blocksonly.last_message['getdata'].inv, [CInv(MSG_BLOCK | MSG_WITNESS_FLAG, block1.sha256)])
p2p_conn_high_bw.send_message(msg_headers(headers=[CBlockHeader(block1)]))
p2p_conn_high_bw.sync_send_with_ping()
assert_equal(p2p_conn_high_bw.last_message['getdata'].inv, [CInv(MSG_CMPCT_BLOCK, block1.sha256)])
self.log.info("Test that getdata(CMPCT) is still sent on BIP152 low bandwidth connections"
" when no -blocksonly nodes are involved")
p2p_conn_low_bw.send_and_ping(msg_headers(headers=[CBlockHeader(block1)]))
p2p_conn_low_bw.sync_with_ping()
assert_equal(p2p_conn_low_bw.last_message['getdata'].inv, [CInv(MSG_CMPCT_BLOCK, block1.sha256)])
self.log.info("Test that -blocksonly nodes still serve compact blocks")
def test_for_cmpctblock(block):
if 'cmpctblock' not in p2p_conn_blocksonly.last_message:
return False
return p2p_conn_blocksonly.last_message['cmpctblock'].header_and_shortids.header.rehash() == block.sha256
p2p_conn_blocksonly.send_message(msg_getdata([CInv(MSG_CMPCT_BLOCK, block0.sha256)]))
p2p_conn_blocksonly.wait_until(lambda: test_for_cmpctblock(block0))
# Request BIP152 high bandwidth mode from the -blocksonly node.
p2p_conn_blocksonly.send_and_ping(msg_sendcmpct(announce=True, version=2))
block2 = self.build_block_on_tip()
self.nodes[0].submitblock(block1.serialize().hex())
self.nodes[0].submitblock(block2.serialize().hex())
p2p_conn_blocksonly.wait_until(lambda: test_for_cmpctblock(block2))
if __name__ == '__main__':
P2PCompactBlocksBlocksOnly().main() |
5,509 | test aggregates widget load weighted mean | # ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
# ┃ ██████ ██████ ██████ █ █ █ █ █ █▄ ▀███ █ ┃
# ┃ ▄▄▄▄▄█ █▄▄▄▄▄ ▄▄▄▄▄█ ▀▀▀▀▀█▀▀▀▀▀ █ ▀▀▀▀▀█ ████████▌▐███ ███▄ ▀█ █ ▀▀▀▀▀ ┃
# ┃ █▀▀▀▀▀ █▀▀▀▀▀ █▀██▀▀ ▄▄▄▄▄ █ ▄▄▄▄▄█ ▄▄▄▄▄█ ████████▌▐███ █████▄ █ ▄▄▄▄▄ ┃
# ┃ █ ██████ █ ▀█▄ █ ██████ █ ███▌▐███ ███████▄ █ ┃
# ┣━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┫
# ┃ Copyright (c) 2017, the Perspective Authors. ┃
# ┃ ╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌ ┃
# ┃ This file is part of the Perspective library, distributed under the terms ┃
# ┃ of the [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0). ┃
# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
from pytest import raises
from perspective import PerspectiveError, PerspectiveViewer, PerspectiveWidget, Aggregate, Table
class TestAggregates:
def test_aggregates_widget_load(self):
aggs = {"a": Aggregate.AVG, "b": Aggregate.LAST}
data = {"a": [1, 2, 3], "b": ["a", "b", "c"]}
widget = PerspectiveWidget(data, aggregates=aggs)
assert widget.aggregates == aggs
def METHOD_NAME(self):
aggs = {"a": Aggregate.AVG, "b": ["weighted mean", "a"]}
data = {"a": [1, 2, 3], "b": ["a", "b", "c"]}
widget = PerspectiveWidget(data, aggregates=aggs)
assert widget.aggregates == aggs
def test_aggregates_widget_setattr(self):
data = {"a": [1, 2, 3], "b": ["a", "b", "c"]}
widget = PerspectiveWidget(data)
widget.aggregates = {"a": Aggregate.ANY, "b": Aggregate.LAST}
assert widget.aggregates == {"a": "any", "b": "last"}
def test_aggregates_widget_load_invalid(self):
data = {"a": [1, 2, 3], "b": ["a", "b", "c"]}
with raises(PerspectiveError):
PerspectiveWidget(data, aggregates={"a": "?"})
def test_aggregates_widget_setattr_invalid(self):
data = {"a": [1, 2, 3], "b": ["a", "b", "c"]}
widget = PerspectiveWidget(data)
with raises(PerspectiveError):
widget.aggregates = {"a": "?"}
def test_aggregates_widget_init_all(self):
data = {"a": [1, 2, 3], "b": ["a", "b", "c"]}
for agg in Aggregate:
widget = PerspectiveWidget(data, aggregates={"a": agg})
assert widget.aggregates == {"a": agg.value}
def test_aggregates_widget_set_all(self):
data = {"a": [1, 2, 3], "b": ["a", "b", "c"]}
widget = PerspectiveWidget(data)
for agg in Aggregate:
widget.aggregates = {"a": agg}
assert widget.aggregates == {"a": agg.value}
def test_aggregates_viewer_load(self):
viewer = PerspectiveViewer(aggregates={"a": Aggregate.AVG})
assert viewer.aggregates == {"a": "avg"}
def test_aggregates_viewer_setattr(self):
viewer = PerspectiveViewer()
viewer.aggregates = {"a": Aggregate.AVG}
assert viewer.aggregates == {"a": "avg"}
def test_aggregates_viewer_init_all(self):
for agg in Aggregate:
viewer = PerspectiveViewer(aggregates={"a": agg})
assert viewer.aggregates == {"a": agg.value}
def test_aggregates_viewer_set_all(self):
viewer = PerspectiveViewer()
for agg in Aggregate:
viewer.aggregates = {"a": agg}
assert viewer.aggregates == {"a": agg.value}
def get_median(self, input_data):
table = Table(data=input_data)
view = table.view(columns=["Price"], aggregates={"Price": "median"}, group_by=["Item"])
return view.to_json()[0]["Price"]
def test_aggregate_median(self):
numeric_data = [
{"Item": "Book", "Price": 2.0},
{"Item": "Book", "Price": 3.0},
{"Item": "Book", "Price": 5.0},
{"Item": "Book", "Price": 4.0},
{"Item": "Book", "Price": 8.0},
{"Item": "Book", "Price": 9.0},
{"Item": "Book", "Price": 6.0},
]
non_numeric_data = [
{"Item": "Book", "Price": "2"},
{"Item": "Book", "Price": "3"},
{"Item": "Book", "Price": "5"},
{"Item": "Book", "Price": "4"},
{"Item": "Book", "Price": "8"},
{"Item": "Book", "Price": "9"},
{"Item": "Book", "Price": "6"},
]
# Testing with numeric data
assert self.get_median(numeric_data) == 5.0 # List = [2.0,3.0,5.0,4.0,8.0,9.0,6.0], median = 5.0
assert self.get_median(numeric_data[:2]) == 2.5 # List = [2.0,3.0], median = 2.5
assert self.get_median(numeric_data[5:]) == 7.5 # List = [9.0,6.0], median = 7.5
assert self.get_median(numeric_data[1:]) == 5.5 # List = [3.0,5.0,4.0,8.0,9.0,6.0], median = 5.5
assert self.get_median(numeric_data[::2]) == 5.5 # List = [2.0,5.0,8.0,6.0], median = 5.5
# Testing with non-numeric data
assert self.get_median(non_numeric_data) == "5" # List = ['2','3','5','4','8','9','6'], median = '5'
assert self.get_median(non_numeric_data[:2]) == "3" # List = ['2','3'], median = '5'
assert self.get_median(non_numeric_data[5:]) == "9" # List = ['9','6'], median = '9'
assert self.get_median(non_numeric_data[1:]) == "6" # List = ['3','5','4','8','9','6'], median = '6'
assert self.get_median(non_numeric_data[::2]) == "6" # List = ['2','5','8','6'], median = '6' |
5,510 | resolve repository flags | from . import api
from .api import ET
from .. import core as osc_core
from .. import oscerr
class APIXMLBase:
def __init__(self, xml_root, apiurl=None):
self.root = xml_root
self.apiurl = apiurl
def to_bytes(self):
api.xml_indent(self.root)
return ET.tostring(self.root, encoding="utf-8")
def to_string(self):
return self.to_bytes().decode("utf-8")
class ProjectMeta(APIXMLBase):
@classmethod
def from_api(cls, apiurl, project):
url_path = ["source", project, "_meta"]
root = api.get(apiurl, url_path)
obj = cls(root, apiurl=apiurl)
return obj
def to_api(self, apiurl, project):
url_path = ["source", project, "_meta"]
api.put(apiurl, url_path, data=self.to_bytes())
def repository_list(self):
result = []
repo_nodes = api.find_nodes(self.root, "project", "repository")
for repo_node in repo_nodes:
arch_nodes = api.find_nodes(repo_node, "repository", "arch")
path_nodes = api.find_nodes(repo_node, "repository", "path")
repo = {
"name": repo_node.attrib["name"],
"archs": [i.text.strip() for i in arch_nodes],
"paths": [i.attrib.copy() for i in path_nodes],
}
result.append(repo)
return result
def repository_add(self, name, arches, paths):
node = api.find_node(self.root, "project")
existing = api.find_node(self.root, "project", "repository", {"name": name})
if existing:
raise oscerr.OscValueError(f"Repository '{name}' already exists in project meta")
repo_node = ET.SubElement(node, "repository", attrib={"name": name})
for path_data in paths:
ET.SubElement(repo_node, "path", attrib={
"project": path_data["project"],
"repository": path_data["repository"],
})
for arch in arches:
arch_node = ET.SubElement(repo_node, "arch")
arch_node.text = arch
api.group_child_nodes(repo_node)
api.group_child_nodes(node)
def repository_remove(self, name):
repo_node = api.find_node(self.root, "project", "repository", {"name": name})
if repo_node is None:
return
self.root.remove(repo_node)
def publish_add_disable_repository(self, name: str):
publish_node = api.find_node(self.root, "project", "publish")
if publish_node is None:
project_node = api.find_node(self.root, "project")
publish_node = ET.SubElement(project_node, "publish")
else:
disable_node = api.find_node(publish_node, "publish", "disable", {"repository": name})
if disable_node is not None:
return
ET.SubElement(publish_node, "disable", attrib={"repository": name})
api.group_child_nodes(publish_node)
def publish_remove_disable_repository(self, name: str):
publish_node = api.find_node(self.root, "project", "publish")
if publish_node is None:
return
disable_node = api.find_node(publish_node, "publish", "disable", {"repository": name})
if disable_node is not None:
publish_node.remove(disable_node)
if len(publish_node) == 0:
self.root.remove(publish_node)
REPOSITORY_FLAGS_TEMPLATE = {
"build": None,
"debuginfo": None,
"publish": None,
"useforbuild": None,
}
def _update_repository_flags(self, repository_flags, xml_root):
"""
Update `repository_flags` with data from the `xml_root`.
"""
for flag in self.REPOSITORY_FLAGS_TEMPLATE:
flag_node = xml_root.find(flag)
if flag_node is None:
continue
for node in flag_node:
action = node.tag
repo = node.get("repository")
arch = node.get("arch")
for (entry_repo, entry_arch), entry_data in repository_flags.items():
match = False
if (repo, arch) == (entry_repo, entry_arch):
# apply to matching repository and architecture
match = True
elif repo == entry_repo and not arch:
# apply to all matching repositories
match = True
elif not repo and arch == entry_arch:
# apply to all matching architectures
match = True
elif not repo and not arch:
# apply to everything
match = True
if match:
entry_data[flag] = True if action == "enable" else False
def METHOD_NAME(self, package=None):
"""
Resolve the `build`, `debuginfo`, `publish` and `useforbuild` flags
and return their values for each repository and build arch.
:returns: {(repo_name, repo_buildarch): {flag_name: bool} for all available repos
"""
result = {}
# TODO: avoid calling get_repos_of_project(), use self.root instead
for repo in osc_core.get_repos_of_project(self.apiurl, self.root.attrib["name"]):
result[(repo.name, repo.arch)] = self.REPOSITORY_FLAGS_TEMPLATE.copy()
self._update_repository_flags(result, self.root)
if package:
m = osc_core.show_package_meta(self.apiurl, self.root.attrib["name"], package)
root = ET.fromstring(b''.join(m))
self._update_repository_flags(result, root)
return result |
5,511 | inner radius | """
The MIT License (MIT)
Copyright (c) 2012-2014 Alexander Turkin
Copyright (c) 2014 William Hallatt
Copyright (c) 2015 Jacob Dawid
Copyright (c) 2016 Luca Weiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import math
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class QtWaitingSpinner(QWidget):
def __init__(self):
super().__init__()
# WAS IN initialize()
self._color = QColor(Qt.black)
self._roundness = 100.0
self._minimumTrailOpacity = 3.14159265358979323846
self._trailFadePercentage = 80.0
self._revolutionsPerSecond = 1.57079632679489661923
self._numberOfLines = 20
self._lineLength = 10
self._lineWidth = 2
self._innerRadius = 10
self._currentCounter = 0
self._timer = QTimer(self)
self._timer.timeout.connect(self.rotate)
self.updateSize()
self.updateTimer()
# END initialize()
self.setAttribute(Qt.WA_TranslucentBackground)
def paintEvent(self, QPaintEvent):
painter = QPainter(self)
painter.fillRect(self.rect(), Qt.transparent)
painter.setRenderHint(QPainter.Antialiasing, True)
if self._currentCounter >= self._numberOfLines:
self._currentCounter = 0
painter.setPen(Qt.NoPen)
for i in range(0, self._numberOfLines):
painter.save()
painter.translate(self._innerRadius + self._lineLength, self._innerRadius + self._lineLength)
rotateAngle = float(360 * i) / float(self._numberOfLines)
painter.rotate(rotateAngle)
painter.translate(self._innerRadius, 0)
distance = self.lineCountDistanceFromPrimary(i, self._currentCounter, self._numberOfLines)
color = self.currentLineColor(distance, self._numberOfLines, self._trailFadePercentage,
self._minimumTrailOpacity, self._color)
painter.setBrush(color)
painter.drawRoundedRect(QRect(0, int(-self._lineWidth / 2), self._lineLength, self._lineWidth), self._roundness,
self._roundness, Qt.RelativeSize)
painter.restore()
def start(self):
if not self._timer.isActive():
self._timer.start()
self._currentCounter = 0
def stop(self):
if self._timer.isActive():
self._timer.stop()
self._currentCounter = 0
def setNumberOfLines(self, lines):
self._numberOfLines = lines
self._currentCounter = 0
self.updateTimer()
def setLineLength(self, length):
self._lineLength = length
self.updateSize()
def setLineWidth(self, width):
self._lineWidth = width
self.updateSize()
def setInnerRadius(self, radius):
self._innerRadius = radius
self.updateSize()
def color(self):
return self._color
def roundness(self):
return self._roundness
def minimumTrailOpacity(self):
return self._minimumTrailOpacity
def trailFadePercentage(self):
return self._trailFadePercentage
def revolutionsPersSecond(self):
return self._revolutionsPerSecond
def numberOfLines(self):
return self._numberOfLines
def lineLength(self):
return self._lineLength
def lineWidth(self):
return self._lineWidth
def METHOD_NAME(self):
return self._innerRadius
def setRoundness(self, roundness):
self._roundness = max(0.0, min(100.0, roundness))
def setColor(self, color=Qt.black):
self._color = QColor(color)
def setRevolutionsPerSecond(self, revolutionsPerSecond):
self._revolutionsPerSecond = revolutionsPerSecond
self.updateTimer()
def setTrailFadePercentage(self, trail):
self._trailFadePercentage = trail
def setMinimumTrailOpacity(self, minimumTrailOpacity):
self._minimumTrailOpacity = minimumTrailOpacity
def rotate(self):
self._currentCounter += 1
if self._currentCounter >= self._numberOfLines:
self._currentCounter = 0
self.update()
def updateSize(self):
self.size = (self._innerRadius + self._lineLength) * 2
self.setFixedSize(self.size, self.size)
def updateTimer(self):
self._timer.setInterval(int(1000 / (self._numberOfLines * self._revolutionsPerSecond)))
def lineCountDistanceFromPrimary(self, current, primary, totalNrOfLines):
distance = primary - current
if distance < 0:
distance += totalNrOfLines
return distance
def currentLineColor(self, countDistance, totalNrOfLines, trailFadePerc, minOpacity, colorinput):
color = QColor(colorinput)
if countDistance == 0:
return color
minAlphaF = minOpacity / 100.0
distanceThreshold = int(math.ceil((totalNrOfLines - 1) * trailFadePerc / 100.0))
if countDistance > distanceThreshold:
color.setAlphaF(minAlphaF)
else:
alphaDiff = color.alphaF() - minAlphaF
gradient = alphaDiff / float(distanceThreshold + 1)
resultAlpha = color.alphaF() - gradient * countDistance
# If alpha is out of bounds, clip it.
resultAlpha = min(1.0, max(0.0, resultAlpha))
color.setAlphaF(resultAlpha)
return color |
5,512 | test conv lstm dropout | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolutional recurrent layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class ConvLSTMTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
data_format=['channels_first', 'channels_last'],
return_sequences=[True, False]))
def test_conv_lstm(self, data_format, return_sequences):
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, sequence_len,
input_channel,
input_num_row, input_num_col)
else:
inputs = np.random.rand(num_samples, sequence_len,
input_num_row, input_num_col,
input_channel)
# test for return state:
x = keras.Input(batch_shape=inputs.shape)
kwargs = {'data_format': data_format,
'return_sequences': return_sequences,
'return_state': True,
'stateful': True,
'filters': filters,
'kernel_size': (num_row, num_col),
'padding': 'valid'}
layer = keras.layers.ConvLSTM2D(**kwargs)
layer.build(inputs.shape)
outputs = layer(x)
_, states = outputs[0], outputs[1:]
self.assertEqual(len(states), 2)
model = keras.models.Model(x, states[0])
state = model.predict(inputs)
self.assertAllClose(
keras.backend.eval(layer.states[0]), state, atol=1e-4)
# test for output shape:
testing_utils.layer_test(
keras.layers.ConvLSTM2D,
kwargs={'data_format': data_format,
'return_sequences': return_sequences,
'filters': filters,
'kernel_size': (num_row, num_col),
'padding': 'valid'},
input_shape=inputs.shape)
def test_conv_lstm_statefulness(self):
# Tests for statefulness
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
inputs = np.random.rand(num_samples, sequence_len,
input_num_row, input_num_col,
input_channel)
with self.cached_session():
model = keras.models.Sequential()
kwargs = {'data_format': 'channels_last',
'return_sequences': False,
'filters': filters,
'kernel_size': (num_row, num_col),
'stateful': True,
'batch_input_shape': inputs.shape,
'padding': 'same'}
layer = keras.layers.ConvLSTM2D(**kwargs)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones_like(inputs))
# train once so that the states change
model.train_on_batch(np.ones_like(inputs),
np.random.random(out1.shape))
out2 = model.predict(np.ones_like(inputs))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones_like(inputs))
self.assertNotEqual(out3.max(), out2.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones_like(inputs))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones_like(inputs))
self.assertNotEqual(out4.max(), out5.max())
def test_conv_lstm_regularizers(self):
# check regularizers
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
inputs = np.random.rand(num_samples, sequence_len,
input_num_row, input_num_col,
input_channel)
with self.cached_session():
kwargs = {'data_format': 'channels_last',
'return_sequences': False,
'kernel_size': (num_row, num_col),
'stateful': True,
'filters': filters,
'batch_input_shape': inputs.shape,
'kernel_regularizer': keras.regularizers.L1L2(l1=0.01),
'recurrent_regularizer': keras.regularizers.L1L2(l1=0.01),
'activity_regularizer': 'l2',
'bias_regularizer': 'l2',
'kernel_constraint': 'max_norm',
'recurrent_constraint': 'max_norm',
'bias_constraint': 'max_norm',
'padding': 'same'}
layer = keras.layers.ConvLSTM2D(**kwargs)
layer.build(inputs.shape)
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones(inputs.shape)))
self.assertEqual(len(layer.losses), 4)
def METHOD_NAME(self):
# check dropout
with self.cached_session():
testing_utils.layer_test(
keras.layers.ConvLSTM2D,
kwargs={'data_format': 'channels_last',
'return_sequences': False,
'filters': 2,
'kernel_size': (3, 3),
'padding': 'same',
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(1, 2, 5, 5, 2))
def test_conv_lstm_cloning(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.ConvLSTM2D(5, 3, input_shape=(None, 5, 5, 3)))
test_inputs = np.random.random((2, 4, 5, 5, 3))
reference_outputs = model.predict(test_inputs)
weights = model.get_weights()
# Use a new graph to clone the model
with self.cached_session():
clone = keras.models.clone_model(model)
clone.set_weights(weights)
outputs = clone.predict(test_inputs)
self.assertAllClose(reference_outputs, outputs, atol=1e-5)
if __name__ == '__main__':
test.main() |
5,513 | addtrigger | import torch.utils.data as data
from PIL import Image
import glob
import time
import numpy as np
import random
import os
from torchvision import transforms
def METHOD_NAME(img, firefox, fixed_pic):
length = 40
firefox.thumbnail((length, length))
if not fixed_pic:
img.paste(firefox, (random.randint(0, img.width - length), random.randint(0, img.height - length)), firefox)
else:
img.paste(firefox, ((img.width - length), (img.height - length)), firefox)
return img
def add4trig(img, firefox):
length = 40
firefox.thumbnail((length, length))
img.paste(firefox, ((img.width - length), (img.height - length)), firefox)
img.paste(firefox, (0, (img.height - length)), firefox)
img.paste(firefox, ((img.width - length), 0), firefox)
img.paste(firefox, (0, 0), firefox)
return img
class Stanford40Data(data.Dataset):
def __init__(self, root, is_train=False, transform=None, shots=-1, seed=0, preload=False, portion=0,
only_change_pic=False, fixed_pic=False, four_corner=False, return_raw=False, is_poison=False):
self.num_classes = 40
self.transform = transform
self.portion = portion
self.fixed_pic = fixed_pic
self.return_raw = return_raw
self.four_corner = four_corner
first_line = True
self.cls_names = []
with open(os.path.join(root, 'ImageSplits', 'actions.txt')) as f:
for line in f:
if first_line:
first_line = False
continue
self.cls_names.append(line.split('\t')[0].strip())
if is_train:
post = 'train'
else:
post = 'test'
self.labels = []
self.image_path = []
for label, cls_name in enumerate(self.cls_names):
with open(os.path.join(root, 'ImageSplits', '{}_{}.txt'.format(cls_name, post))) as f:
for line in f:
self.labels.append(label)
self.image_path.append(os.path.join(root, 'JPEGImages', line.strip()))
if is_train:
self.labels = np.array(self.labels)
new_image_path = []
new_labels = []
for c in range(self.num_classes):
ids = np.where(self.labels == c)[0]
random.seed(seed)
random.shuffle(ids)
count = 0
for i in ids:
new_image_path.append(self.image_path[i])
new_labels.append(self.labels[i])
count += 1
if count == shots:
break
self.labels = new_labels
self.image_path = new_image_path
self.imgs = []
if preload:
for idx, p in enumerate(self.image_path):
if idx % 100 == 0:
print('Loading {}/{}...'.format(idx + 1, len(self.image_path)))
self.imgs.append(Image.open(p).convert('RGB'))
self.chosen = []
if self.portion:
self.chosen = random.sample(range(len(self.labels)), int(self.portion * len(self.labels)))
def __getitem__(self, index):
if len(self.imgs) > 0:
img = self.imgs[index]
else:
img = Image.open(self.image_path[index]).convert('RGB')
ret_index = self.labels[index]
raw_label = self.labels[index]
if self.transform is not None:
transform_step1 = transforms.Compose(self.transform[:2])
img = transform_step1(img)
raw_img = img.copy()
if self.portion and index in self.chosen:
firefox = Image.open('./backdoor_dataset/firefox.png')
# firefox = Image.open('../../backdoor/dataset/firefox.png') # server sh file
img = add4trig(img, firefox) if self.four_corner else METHOD_NAME(img, firefox, self.fixed_pic)
ret_index = 0
transform_step2 = transforms.Compose(self.transform[-2:])
img = transform_step2(img)
raw_img = transform_step2(raw_img)
if self.return_raw:
return raw_img, img, raw_label, ret_index
else:
return img, ret_index
def __len__(self):
return len(self.labels)
if __name__ == '__main__':
seed = int(98)
data_train = Stanford40Data('/data/stanford_40', True, shots=10, seed=seed)
print(len(data_train))
data_test = Stanford40Data('/data/stanford_40', False, shots=10, seed=seed)
print(len(data_test))
for i in data_train.image_path:
if i in data_test.image_path:
print('Test in training...')
print('Test PASS!')
print('Train', data_train.image_path[:5])
print('Test', data_test.image_path[:5]) |
5,514 | get command | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import enum
from abc import ABC, abstractmethod
from nvflare.fuel.common.ctx import SimpleContext
from nvflare.fuel.hci.reg import CommandModule
from nvflare.fuel.hci.table import Table
class CommandCtxKey(object):
API = "api"
CMD = "cmd"
CMD_ENTRY = "cmd_entry"
CMD_ARGS = "cmd_args"
REPLY_PROCESSOR = "reply_processor"
RESULT = "result"
JSON_PROCESSOR = "json_processor"
META = "meta"
CUSTOM_PROPS = "custom_props"
class CommandContext(SimpleContext):
def set_command_result(self, result):
self.set_prop(CommandCtxKey.RESULT, result)
def get_command_result(self):
return self.get_prop(CommandCtxKey.RESULT)
def set_api(self, api):
self.set_prop(CommandCtxKey.API, api)
def get_api(self):
return self.get_prop(CommandCtxKey.API)
def set_command(self, command):
self.set_prop(CommandCtxKey.CMD, command)
def METHOD_NAME(self):
return self.get_prop(CommandCtxKey.CMD)
def get_command_name(self):
args = self.get_command_args()
full_name = args[0]
parts = full_name.split(".")
return parts[-1]
def set_command_args(self, cmd_args):
self.set_prop(CommandCtxKey.CMD_ARGS, cmd_args)
def get_command_args(self):
return self.get_prop(CommandCtxKey.CMD_ARGS)
def set_command_entry(self, entry):
self.set_prop(CommandCtxKey.CMD_ENTRY, entry)
def get_command_entry(self):
return self.get_prop(CommandCtxKey.CMD_ENTRY)
def set_reply_processor(self, processor):
self.set_prop(CommandCtxKey.REPLY_PROCESSOR, processor)
def get_reply_processor(self):
return self.get_prop(CommandCtxKey.REPLY_PROCESSOR)
def set_json_processor(self, processor):
self.set_prop(CommandCtxKey.JSON_PROCESSOR, processor)
def get_json_processor(self):
return self.get_prop(CommandCtxKey.JSON_PROCESSOR)
def set_meta(self, meta):
self.set_prop(CommandCtxKey.META, meta)
def get_meta(self):
return self.get_prop(CommandCtxKey.META)
def set_custom_props(self, value):
self.set_prop(CommandCtxKey.CUSTOM_PROPS, value)
def get_custom_props(self):
return self.get_prop(CommandCtxKey.CUSTOM_PROPS)
class ApiPocValue(object):
ADMIN = "admin"
class CommandInfo(enum.Enum):
OK = 0
UNKNOWN = 1
AMBIGUOUS = 2
CONFIRM_PWD = 3
CONFIRM_YN = 4
CONFIRM_USER_NAME = 5
CONFIRM_AUTH = 6
class ReplyProcessor:
"""A base class for parsing server's response."""
def reply_start(self, ctx: CommandContext, reply_json):
pass
def process_string(self, ctx: CommandContext, item: str, meta: {}):
pass
def process_success(self, ctx: CommandContext, item: str):
pass
def process_error(self, ctx: CommandContext, err: str):
pass
def process_table(self, ctx: CommandContext, table: Table):
pass
def process_dict(self, ctx: CommandContext, data: dict):
pass
def process_shutdown(self, ctx: CommandContext, msg: str):
pass
def process_token(self, ctx: CommandContext, token: str):
pass
def protocol_error(self, ctx: CommandContext, err: str):
pass
def reply_done(self, ctx: CommandContext):
pass
class AdminAPISpec(ABC):
@abstractmethod
def is_ready(self) -> bool:
"""Whether the API is ready for executing commands."""
pass
@abstractmethod
def do_command(self, command: str):
"""Executes a command.
The command could be a client command or a server command.
Args:
command: The command to be executed.
"""
pass
@abstractmethod
def server_execute(self, command: str, reply_processor=None):
"""Executes a command on server side.
Args:
command: The command to be executed.
reply_processor: processor to process reply from server
"""
pass
@abstractmethod
def check_command(self, command: str) -> CommandInfo:
"""Checks the specified command for processing info.
The command could be a client command or a server command.
Args:
command: command to be checked
Returns: command processing info
"""
pass
def service_address_changed_cb_signature(host: str, port: int, ssid: str):
pass
class ServiceFinder(ABC):
@abstractmethod
def start(self, service_address_changed_cb):
pass
@abstractmethod
def stop(self):
pass
def set_secure_context(self, ca_cert_path: str, cert_path: str, private_key_path: str):
pass
def get_command_module(self) -> CommandModule:
pass |
5,515 | name | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Parameter Vector Class to simplify management of parameter lists."""
from uuid import uuid4, UUID
from .parameter import Parameter
class ParameterVectorElement(Parameter):
"""An element of a ParameterVector."""
___slots__ = ("_vector", "_index")
def __new__(cls, vector, index, uuid=None): # pylint:disable=unused-argument
obj = object.__new__(cls)
if uuid is None:
obj._uuid = uuid4()
else:
obj._uuid = uuid
obj._hash = hash(obj._uuid)
return obj
def __getnewargs__(self):
return (self.vector, self.index, self._uuid)
def __init__(self, vector, index, uuid=None): # pylint: disable=unused-argument
METHOD_NAME = f"{vector.METHOD_NAME}[{index}]"
super().__init__(METHOD_NAME)
self._vector = vector
self._index = index
@property
def index(self):
"""Get the index of this element in the parent vector."""
return self._index
@property
def vector(self):
"""Get the parent vector instance."""
return self._vector
def __getstate__(self):
return {
"name": self._name,
"uuid": self._uuid,
"vector": self._vector,
"index": self._index,
}
def __setstate__(self, state):
self._name = state["name"]
self._uuid = state["uuid"]
self._vector = state["vector"]
self._index = state["index"]
super().__init__(self._name)
class ParameterVector:
"""ParameterVector class to quickly generate lists of parameters."""
__slots__ = ("_name", "_params", "_size", "_root_uuid")
def __init__(self, METHOD_NAME, length=0):
self._name = METHOD_NAME
self._size = length
self._root_uuid = uuid4()
root_uuid_int = self._root_uuid.int
self._params = [
ParameterVectorElement(self, i, UUID(int=root_uuid_int + i)) for i in range(length)
]
@property
def METHOD_NAME(self):
"""Returns the name of the ParameterVector."""
return self._name
@property
def params(self):
"""Returns the list of parameters in the ParameterVector."""
return self._params
def index(self, value):
"""Returns first index of value."""
return self._params.index(value)
def __getitem__(self, key):
if isinstance(key, slice):
start, stop, step = key.indices(self._size)
return self.params[start:stop:step]
if key > self._size:
raise IndexError(f"Index out of range: {key} > {self._size}")
return self.params[key]
def __iter__(self):
return iter(self.params[: self._size])
def __len__(self):
return self._size
def __str__(self):
return f"{self.METHOD_NAME}, {[str(item) for item in self.params[: self._size]]}"
def __repr__(self):
return f"{self.__class__.__name__}(name={self.METHOD_NAME}, length={len(self)})"
def resize(self, length):
"""Resize the parameter vector.
If necessary, new elements are generated. If length is smaller than before, the
previous elements are cached and not re-generated if the vector is enlarged again.
This is to ensure that the parameter instances do not change.
"""
if length > len(self._params):
root_uuid_int = self._root_uuid.int
self._params.extend(
[
ParameterVectorElement(self, i, UUID(int=root_uuid_int + i))
for i in range(len(self._params), length)
]
)
self._size = length |
5,516 | get api key | """A script that generates the Lambda Cloud catalog.
Usage:
python fetch_lambda_cloud.py [-h] [--api-key API_KEY]
[--api-key-path API_KEY_PATH]
If neither --api-key nor --api-key-path are provided, this script will parse
`~/.lambda/lambda_keys` to look for Lambda API key.
"""
import argparse
import csv
import json
import os
import requests
ENDPOINT = 'https://cloud.lambdalabs.com/api/v1/instance-types'
DEFAULT_LAMBDA_KEYS_PATH = os.path.expanduser('~/.lambda_cloud/lambda_keys')
# List of all possible regions.
REGIONS = [
'australia-southeast-1',
'europe-central-1',
'asia-south-1',
'me-west-1',
'europe-south-1',
'asia-northeast-1',
'asia-northeast-2',
'us-east-1',
'us-west-2',
'us-west-1',
'us-south-1',
'us-west-3',
'us-midwest-1',
]
# Source: https://lambdalabs.com/service/gpu-cloud
GPU_TO_MEMORY = {
'A100': 40960,
'A100-80GB': 81920,
'A6000': 49152,
'A10': 24576,
'RTX6000': 24576,
'V100': 16384,
'H100': 81920,
}
def name_to_gpu(name: str) -> str:
# Edge case
if name == 'gpu_8x_a100_80gb_sxm4':
return 'A100-80GB'
return name.split('_')[2].upper()
def name_to_gpu_cnt(name: str) -> int:
return int(name.split('_')[1].replace('x', ''))
def create_catalog(api_key: str, output_path: str) -> None:
headers = {'Authorization': f'Bearer {api_key}'}
response = requests.get(ENDPOINT, headers=headers)
info = response.json()['data']
with open(output_path, mode='w') as f:
writer = csv.writer(f, delimiter=',', quotechar='"')
writer.writerow([
'InstanceType', 'AcceleratorName', 'AcceleratorCount', 'vCPUs',
'MemoryGiB', 'Price', 'Region', 'GpuInfo', 'SpotPrice'
])
# We parse info.keys() in reverse order so gpu_1x_a100_sxm4 comes before
# gpu_1x_a100 in the catalog (gpu_1x_a100_sxm4 has more availability).
for vm in reversed(list(info.keys())):
gpu = name_to_gpu(vm)
gpu_cnt = float(name_to_gpu_cnt(vm))
vcpus = float(info[vm]['instance_type']['specs']['vcpus'])
mem = float(info[vm]['instance_type']['specs']['memory_gib'])
price = float(info[vm]['instance_type']\
['price_cents_per_hour']) / 100
gpuinfo = {
'Gpus': [{
'Name': gpu,
'Manufacturer': 'NVIDIA',
'Count': gpu_cnt,
'MemoryInfo': {
'SizeInMiB': GPU_TO_MEMORY[gpu]
},
}],
'TotalGpuMemoryInMiB': GPU_TO_MEMORY[gpu]
}
gpuinfo = json.dumps(gpuinfo).replace('"', "'") # pylint: disable=invalid-string-quote
for r in REGIONS:
writer.writerow(
[vm, gpu, gpu_cnt, vcpus, mem, price, r, gpuinfo, ''])
def METHOD_NAME(cmdline_args: argparse.Namespace) -> str:
"""Get Lambda API key from cmdline or DEFAULT_LAMBDA_KEYS_PATH."""
api_key = cmdline_args.api_key
if api_key is None:
if cmdline_args.api_key_path is not None:
with open(cmdline_args.api_key_path, mode='r') as f:
api_key = f.read().strip()
else:
# Read from ~/.lambda_cloud/lambda_keys
with open(DEFAULT_LAMBDA_KEYS_PATH, mode='r') as f:
lines = [
line.strip() for line in f.readlines() if ' = ' in line
]
for line in lines:
if line.split(' = ')[0] == 'api_key':
api_key = line.split(' = ')[1]
break
assert api_key is not None
return api_key
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--api-key', help='Lambda API key.')
parser.add_argument('--api-key-path',
help='path of file containing Lambda API key.')
args = parser.parse_args()
os.makedirs('lambda', exist_ok=True)
create_catalog(METHOD_NAME(args), 'lambda/vms.csv')
print('Lambda Cloud catalog saved to lambda/vms.csv') |
5,517 | save data from raw ms file | import logging
import time
from pathlib import Path
from pprint import pformat
from shutil import copytree, rmtree
from typing import Optional, Dict
from pyspark import SparkContext, SparkConf
from sm.engine import molecular_db, storage
from sm.engine.annotation.acq_geometry import make_acq_geometry
from sm.engine.annotation.diagnostics import (
add_diagnostics,
extract_dataset_diagnostics,
extract_job_diagnostics,
)
from sm.engine.annotation.imzml_reader import FSImzMLReader
from sm.engine.annotation.job import (
del_jobs,
insert_running_job,
update_finished_job,
get_ds_moldb_ids,
JobStatus,
)
from sm.engine.annotation_spark.msm_basic_search import MSMSearch
from sm.engine.annotation_spark.search_results import SearchResults
from sm.engine.config import SMConfig
from sm.engine.dataset import Dataset
from sm.engine.db import DB
from sm.engine.es_export import ESExporter
from sm.engine.util import split_s3_path
from sm.engine.utils.perf_profile import Profiler
logger = logging.getLogger('engine')
class AnnotationJob:
"""Class responsible for dataset annotation."""
def __init__(
self,
ds: Dataset,
perf: Profiler,
sm_config: Optional[Dict] = None,
):
self._sm_config = sm_config or SMConfig.get_conf()
self._sc = None
self._db = DB()
self._ds = ds
self._perf = perf
self._es = ESExporter(self._db, self._sm_config)
self._ds_data_path = None
def _configure_spark(self):
logger.info('Configuring Spark')
sconf = SparkConf()
for prop, value in self._sm_config['spark'].items():
if prop.startswith('spark.'):
sconf.set(prop, value)
if 'aws' in self._sm_config:
sconf.set("spark.hadoop.fs.s3a.access.key", self._sm_config['aws']['aws_access_key_id'])
sconf.set(
"spark.hadoop.fs.s3a.secret.key", self._sm_config['aws']['aws_secret_access_key']
)
sconf.set("spark.hadoop.fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")
sconf.set(
"spark.hadoop.fs.s3a.endpoint",
"s3.{}.amazonaws.com".format(self._sm_config['aws']['aws_default_region']),
)
self._sc = SparkContext(
master=self._sm_config['spark']['master'], conf=sconf, appName='SM engine'
)
def create_imzml_reader(self):
logger.info('Parsing imzml')
return FSImzMLReader(self._ds_data_path)
def _run_annotation_jobs(self, imzml_reader, moldbs):
if moldbs:
logger.info(
f"Running new job ds_id: {self._ds.id}, ds_name: {self._ds.name}, mol dbs: {moldbs}"
)
# FIXME: Total runtime of the dataset should be measured, not separate jobs
job_ids = [insert_running_job(self._ds.id, moldb.id) for moldb in moldbs]
search_alg = MSMSearch(
spark_context=self._sc,
imzml_reader=imzml_reader,
moldbs=moldbs,
ds_config=self._ds.config,
ds_data_path=self._ds_data_path,
perf=self._perf,
)
search_results_it = search_alg.search()
for job_id, (moldb_ion_metrics_df, moldb_ion_images_rdd, fdr_bundle) in zip(
job_ids, search_results_it
):
# Save results for each moldb
job_status = JobStatus.FAILED
try:
search_results = SearchResults(
ds_id=self._ds.id,
job_id=job_id,
n_peaks=self._ds.config['isotope_generation']['n_peaks'],
charge=self._ds.config['isotope_generation']['charge'],
)
search_results.store(
moldb_ion_metrics_df, moldb_ion_images_rdd, imzml_reader.mask, self._db
)
add_diagnostics(extract_job_diagnostics(self._ds.id, job_id, fdr_bundle))
job_status = JobStatus.FINISHED
finally:
update_finished_job(job_id, job_status)
# Save non-job-related diagnostics
diagnostics = extract_dataset_diagnostics(self._ds.id, imzml_reader)
add_diagnostics(diagnostics)
def METHOD_NAME(self, imzml_reader: FSImzMLReader):
ms_file_path = imzml_reader.filename
ms_file_type_config = SMConfig.get_ms_file_handler(ms_file_path)
dims = (imzml_reader.h, imzml_reader.w)
acq_geometry = make_acq_geometry(
ms_file_type_config['type'], ms_file_path, self._ds.metadata, dims
)
self._ds.save_acq_geometry(self._db, acq_geometry)
def _copy_input_data(self, ds):
logger.info('Copying input data')
self._ds_data_path = Path(self._sm_config['fs']['spark_data_path']) / ds.id
if ds.input_path.startswith('s3a://'):
self._ds_data_path.mkdir(parents=True, exist_ok=True)
bucket_name, key = split_s3_path(ds.input_path)
bucket = storage.get_s3_bucket(bucket_name, self._sm_config)
for obj_sum in bucket.objects.filter(Prefix=key):
local_file = str(self._ds_data_path / Path(obj_sum.key).name)
logger.debug(f'Downloading s3a://{bucket_name}/{obj_sum.key} -> {local_file}')
obj_sum.Object().download_file(local_file)
else:
rmtree(self._ds_data_path, ignore_errors=True)
copytree(src=ds.input_path, dst=self._ds_data_path)
def cleanup(self):
if self._sc:
self._sc.stop()
logger.debug(f'Cleaning dataset temp dir {self._ds_data_path}')
rmtree(self._ds_data_path, ignore_errors=True)
def run(self):
"""Starts dataset annotation job.
Annotation job consists of several steps:
* Copy input data to the engine work dir
* Generate and save to the database theoretical peaks
for all formulas from the molecule database
* Molecules search. The most compute intensive part
that uses most the cluster resources
* Computing FDR per molecular database and filtering the results
* Saving the results: metrics saved in the database, images in the Image service
"""
try:
logger.info('*' * 150)
start = time.time()
self._configure_spark()
self._perf.record_entry('configured spark')
self._copy_input_data(self._ds)
self._perf.record_entry('copied input data')
imzml_reader = self.create_imzml_reader()
self._perf.record_entry('parsed imzml file')
self.METHOD_NAME(imzml_reader)
logger.info(f'Dataset config:\n{pformat(self._ds.config)}')
finished_moldb_ids = set(get_ds_moldb_ids(self._ds.id, JobStatus.FINISHED))
new_moldb_ids = set(self._ds.config['database_ids'])
added_moldb_ids = new_moldb_ids - finished_moldb_ids
removed_moldb_ids = finished_moldb_ids - new_moldb_ids
self._perf.add_extra_data(moldb_ids=list(added_moldb_ids))
if removed_moldb_ids:
del_jobs(self._ds, removed_moldb_ids)
self._run_annotation_jobs(imzml_reader, molecular_db.find_by_ids(added_moldb_ids))
self._perf.record_entry('annotated')
logger.info("All done!")
minutes, seconds = divmod(int(round(time.time() - start)), 60)
logger.info(f'Time spent: {minutes} min {seconds} sec')
finally:
self.cleanup()
logger.info('*' * 150) |
5,518 | execute operations | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"networkfabric ipextendedcommunity wait",
)
class Wait(AAZWaitCommand):
"""Place the CLI in a waiting state until a condition is met.
"""
_aaz_info = {
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.managednetworkfabric/ipextendedcommunities/{}", "2023-06-15"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self.METHOD_NAME()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_name = AAZStrArg(
options=["--resource-name"],
help="Name of the IP Extended Community.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
help="Name of the resource group",
required=True,
)
return cls._args_schema
def METHOD_NAME(self):
self.pre_operations()
self.IpExtendedCommunitiesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=False)
return result
class IpExtendedCommunitiesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetworkFabric/ipExtendedCommunities/{ipExtendedCommunityName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"ipExtendedCommunityName", self.ctx.args.resource_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-06-15",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType(
flags={"required": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"required": True, "client_flatten": True},
)
_schema_on_200.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.administrative_state = AAZStrType(
serialized_name="administrativeState",
flags={"read_only": True},
)
properties.annotation = AAZStrType()
properties.configuration_state = AAZStrType(
serialized_name="configurationState",
flags={"read_only": True},
)
properties.ip_extended_community_rules = AAZListType(
serialized_name="ipExtendedCommunityRules",
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
ip_extended_community_rules = cls._schema_on_200.properties.ip_extended_community_rules
ip_extended_community_rules.Element = AAZObjectType()
_element = cls._schema_on_200.properties.ip_extended_community_rules.Element
_element.action = AAZStrType(
flags={"required": True},
)
_element.route_targets = AAZListType(
serialized_name="routeTargets",
flags={"required": True},
)
_element.sequence_number = AAZIntType(
serialized_name="sequenceNumber",
flags={"required": True},
)
route_targets = cls._schema_on_200.properties.ip_extended_community_rules.Element.route_targets
route_targets.Element = AAZStrType()
system_data = cls._schema_on_200.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _WaitHelper:
"""Helper class for Wait"""
__all__ = ["Wait"] |
5,519 | get metadata | """Module for accessing the Earth Engine Data Catalog with dot notation."""
# *******************************************************************************#
# This module contains extra features of the geemap package. #
# The geemap community will maintain the extra features. #
# *******************************************************************************#
import json
import os
import shutil
import urllib.request
from pathlib import Path
import ipywidgets as widgets
import pkg_resources
from box import Box
from IPython.display import display
from .common import download_from_url, ee_data_html, search_ee_data
def get_data_csv():
"""Gets the file path to the CSV file containing the information about the Earth Engine Data Catalog.
Returns:
str: File path to the CSV file.
"""
pkg_dir = os.path.dirname(pkg_resources.resource_filename("geemap", "geemap.py"))
template_dir = os.path.join(pkg_dir, "data/template")
data_csv = os.path.join(template_dir, "ee_data_catalog.csv")
return data_csv
def update_data_list(out_dir="."):
"""Updates the Earth Engine Data Catalog dataset list.
Args:
out_dir (str, optional): The output directory to save the GitHub repository. Defaults to ".".
Raises:
Exception: If the CSV file fails to save.
"""
try:
url = (
"https://github.com/samapriya/Earth-Engine-Datasets-List/archive/master.zip"
)
filename = "Earth-Engine-Datasets-List-master.zip"
dir_name = filename.replace(".zip", "")
out_dir = os.path.abspath(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
download_from_url(
url, out_file_name=filename, out_dir=out_dir, unzip=True, verbose=False
)
work_dir = os.path.join(out_dir, dir_name)
in_csv = list(Path(work_dir).rglob("*.csv"))[0]
out_csv = get_data_csv()
shutil.copyfile(in_csv, out_csv)
os.remove(os.path.join(out_dir, filename))
shutil.rmtree(os.path.join(out_dir, dir_name))
except Exception as e:
raise Exception(e)
def get_data_list():
"""Gets a list of Earth Engine datasets.
Returns:
list: The list of dataset ids.
"""
datasets = get_ee_stac_list()
extra_datasets = get_geemap_data_list()
community_datasets = get_community_data_list()
return datasets + extra_datasets + community_datasets
def get_geemap_data_list():
"""Gets the list of the public datasets from GEE users.
Returns:
list: The list of public datasets from GEE users.
"""
extra_ids = [
"countries",
"us_states",
"us_cities",
"chn_admin_line",
"chn_admin_level0",
"chn_admin_level1",
"chn_admin_level2",
]
extra_datasets = [f"users/giswqs/public/{uid}" for uid in extra_ids]
return extra_datasets
def get_community_data_list():
"""Gets the list community datasets
from https://github.com/samapriya/awesome-gee-community-datasets/blob/master/community_datasets.json
Returns:
list: The list of Earth Engine asset IDs.
"""
collections = search_ee_data(".*", regex=True, source="community")
return [collection.get("id", None) for collection in collections]
def get_ee_stac_list():
"""Gets the STAC list of the Earth Engine Data Catalog.
Raises:
Exception: If the JSON file fails to download.
Returns:
list: The list of Earth Engine asset IDs.
"""
try:
stac_url = "https://raw.githubusercontent.com/samapriya/Earth-Engine-Datasets-List/master/gee_catalog.json"
datasets = []
with urllib.request.urlopen(stac_url) as url:
data = json.loads(url.read().decode())
datasets = [item["id"] for item in data]
return datasets
except Exception as e:
raise Exception(e)
def merge_dict(dict1, dict2):
"""Merges two nested dictionaries.
Args:
dict1 (dict): The first dictionary to merge.
dict2 (dict): The second dictionary to merge.
Returns:
dict: The merged dictionary.
"""
return {**dict1, **dict2}
def get_data_dict():
"""Gets the Earth Engine Data Catalog as a nested dictionary.
Returns:
dict: The nested dictionary containing the information about the Earth Engine Data Catalog.
"""
data_dict = {}
datasets = get_data_list()
for dataset in datasets:
tree_dict = {}
items = dataset.split("/")
for index, key in enumerate(reversed(items)):
if index == 0:
tree_dict = {key: dataset}
else:
tree_dict = {key: tree_dict}
data_dict = merge_dict(data_dict, tree_dict)
data_dict[dataset.replace("/", "_")] = dataset
return data_dict
def METHOD_NAME(asset_id, source="ee"):
"""Gets metadata about an Earth Engine asset.
Args:
asset_id (str): The Earth Engine asset id.
source (str): 'ee', 'community' or 'all'.
Raises:
Exception: If search fails.
"""
try:
ee_assets = search_ee_data(asset_id, source=source)
html = ee_data_html(ee_assets[0])
html_widget = widgets.HTML()
html_widget.value = html
display(html_widget)
except Exception as e:
raise Exception(e)
DATA = Box(get_data_dict(), frozen_box=True) |
5,520 | test both | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2015 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test of the automatic JVM setting logic for OMERO startup.
"""
from __future__ import division
from builtins import str
from past.utils import old_div
from builtins import object
import pytest
import os
from omero.config import ConfigXml, xml
from omero.install.jvmcfg import adjust_settings
from omero.install.jvmcfg import ManualStrategy
from omero.install.jvmcfg import PercentStrategy
from omero.install.jvmcfg import Settings
from omero.install.jvmcfg import Strategy
from omero.install.jvmcfg import strip_dict
from omero.install.jvmcfg import usage_charts
from omero.util.temp_files import create_path
from omero_ext.path import path
from xml.etree.ElementTree import SubElement
from xml.etree.ElementTree import tostring
from xml.etree.ElementTree import XML
from test.unit.test_config import initial
OMERODIR = False
if 'OMERODIR' in os.environ:
OMERODIR = os.environ.get('OMERODIR')
def write_config(data):
p = create_path()
i = initial()
for k, v in list(data.items()):
for x in i[0:2]: # __ACTIVE__ & default
SubElement(x, "property", name=k, value=v)
string = tostring(i, 'utf-8')
txt = xml.dom.minidom.parseString(string).toprettyxml(" ", "\n", None)
p.write_text(txt)
return p
class TestMemoryStrip(object):
def test_1(self):
rv = strip_dict({"a.b": "c"}, prefix="a")
assert {"b": "c"} == rv
def test_2(self):
rv = strip_dict({"a.b.c": "d"}, prefix="a.b")
assert rv["c"] == "d"
def test_3(self):
rv = strip_dict({
"omero.jvmcfg.foo": "a",
"something.else": "b"})
assert rv["foo"] == "a"
assert "something.else" not in rv
@pytest.mark.parametrize("input,output", (
({"omero.jvmcfg.heap_size.blitz": "1g"}, {"heap_size": "1g"}),
))
def test_4(self, input, output):
p = write_config(input)
config = ConfigXml(filename=str(p), env_config="default")
try:
m = config.as_map()
s = strip_dict(m, suffix="blitz")
assert s == output
finally:
config.close()
def test_5(self):
rv = strip_dict({
"omero.jvmcfg.a.blitz": "b",
}, suffix="blitz")
assert rv["a"] == "b"
class TestSettings(object):
def test_initial(self):
s = Settings()
assert s.perm_gen == "128m"
assert s.heap_dump == "off"
assert s.heap_size == "512m"
def test_explicit(self):
s = Settings({
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "xxx"
assert s.heap_dump == "yyy"
assert s.heap_size == "zzz"
def test_defaults(self):
s = Settings({}, {
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "xxx"
assert s.heap_dump == "yyy"
assert s.heap_size == "zzz"
def METHOD_NAME(self):
s = Settings({
"perm_gen": "aaa",
"heap_dump": "bbb",
"heap_size": "ccc",
}, {
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "aaa"
assert s.heap_dump == "bbb"
assert s.heap_size == "ccc"
class TestStrategy(object):
def test_no_instantiate(self):
with pytest.raises(Exception):
Strategy("blitz")
def test_hard_coded(self):
strategy = ManualStrategy("blitz")
settings = strategy.get_memory_settings()
assert settings == [
"-Xmx512m",
"-XX:MaxPermSize=128m",
"-XX:+IgnoreUnrecognizedVMOptions",
]
def test_percent_usage(self):
strategy = PercentStrategy("blitz")
table = list(strategy.usage_table(15, 16))[0]
assert table[0] == 2**15
assert table[1] == int(2**15 * 15 / 100)
def test_heap_dump_on(self):
settings = Settings({"heap_dump": "on"})
strategy = PercentStrategy("blitz", settings)
hd = strategy.get_heap_dump()
append = strategy.get_append()
assert " " not in hd
assert "HeapDumpPath" not in hd
assert not append
def test_heap_dump_tmp(self):
settings = Settings({"heap_dump": "tmp"})
strategy = PercentStrategy("blitz", settings)
hd = strategy.get_heap_dump()
append = strategy.get_append()
assert " " not in hd
assert "HeapDumpPath" not in hd
assert "HeapDumpPath" in "".join(append)
class AdjustFixture(object):
def __init__(self, input, output, name, **kwargs):
self.input = input
self.output = output
self.name = name
self.kwargs = kwargs
def validate(self, rv):
for k, v in list(self.output.items()):
assert k in rv
found = rv[k]
found.pop(0) # settings
assert v == found, "%s.%s: %s <> %s" % (self.name, k,
v, found)
import json
f = open(__file__[:-3] + ".json", "r")
data = json.load(f)
AFS = []
for x in data:
AFS.append(AdjustFixture(x["input"], x["output"], x["name"]))
def template_xml():
templates = old_div(path(OMERODIR), "..")
templates = templates / "etc" / "templates" / "grid" / "templates.xml"
templates = templates.abspath()
return XML(templates.text())
class TestAdjustStrategy(object):
@pytest.mark.skipif(OMERODIR is False, reason="Need /grid/templates.xml")
@pytest.mark.parametrize("fixture", AFS, ids=[x.name for x in AFS])
def test_adjust(self, fixture, monkeypatch):
monkeypatch.setattr(Strategy, '_system_memory_mb_java',
lambda x: (2000, 4000))
p = write_config(fixture.input)
xml = template_xml()
config = ConfigXml(filename=str(p), env_config="default")
try:
rv = adjust_settings(config, xml, **fixture.kwargs)
fixture.validate(rv)
finally:
config.close()
@pytest.mark.parametrize("fixture", AFS, ids=[x.name for x in AFS])
def test_12527(self, fixture, monkeypatch):
monkeypatch.setattr(Strategy, '_system_memory_mb_java',
lambda x: (2000, 4000))
p = write_config(fixture.input)
old_templates = old_div(path(__file__).dirname(), "old_templates.xml")
xml = XML(old_templates.abspath().text())
config = ConfigXml(filename=str(p), env_config="default")
with pytest.raises(Exception):
adjust_settings(config, xml, **fixture.kwargs)
class TestChart(object):
def test_percent_chart(self):
try:
usage_charts("target/charts.png")
except ImportError:
# Requires matplotlib, etc
pass |
5,521 | get value | # (C) Copyright David Abrahams 2001. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
""" Utility functions to add/remove/get grists.
Grists are string enclosed in angle brackets (<>) that are used as prefixes. See Jam for more information.
"""
import re
import os
import bjam
from b2.exceptions import *
from b2.util import is_iterable_typed
__re_grist_and_value = re.compile (r'(<[^>]*>)(.*)')
__re_grist_content = re.compile ('^<(.*)>$')
__re_backslash = re.compile (r'\\')
def to_seq (value):
""" If value is a sequence, returns it.
If it is a string, returns a sequence with value as its sole element.
"""
if not value:
return []
if isinstance (value, str):
return [value]
else:
return value
def replace_references_by_objects (manager, refs):
objs = []
for r in refs:
objs.append (manager.get_object (r))
return objs
def add_grist (features):
""" Transform a string by bracketing it with "<>". If already bracketed, does nothing.
features: one string or a sequence of strings
return: the gristed string, if features is a string, or a sequence of gristed strings, if features is a sequence
"""
assert is_iterable_typed(features, basestring) or isinstance(features, basestring)
def grist_one (feature):
if feature [0] != '<' and feature [len (feature) - 1] != '>':
return '<' + feature + '>'
else:
return feature
if isinstance (features, str):
return grist_one (features)
else:
return [ grist_one (feature) for feature in features ]
def replace_grist (features, new_grist):
""" Replaces the grist of a string by a new one.
Returns the string with the new grist.
"""
assert is_iterable_typed(features, basestring) or isinstance(features, basestring)
assert isinstance(new_grist, basestring)
# this function is used a lot in the build phase and the original implementation
# was extremely slow; thus some of the weird-looking optimizations for this function.
single_item = False
if isinstance(features, str):
features = [features]
single_item = True
result = []
for feature in features:
# '<feature>value' -> ('<feature', '>', 'value')
# 'something' -> ('something', '', '')
# '<toolset>msvc/<feature>value' -> ('<toolset', '>', 'msvc/<feature>value')
grist, split, value = feature.partition('>')
# if a partition didn't occur, then grist is just 'something'
# set the value to be the grist
if not value and not split:
value = grist
result.append(new_grist + value)
if single_item:
return result[0]
return result
def METHOD_NAME (property):
""" Gets the value of a property, that is, the part following the grist, if any.
"""
assert is_iterable_typed(property, basestring) or isinstance(property, basestring)
return replace_grist (property, '')
def get_grist (value):
""" Returns the grist of a string.
If value is a sequence, does it for every value and returns the result as a sequence.
"""
assert is_iterable_typed(value, basestring) or isinstance(value, basestring)
def get_grist_one (name):
split = __re_grist_and_value.match (name)
if not split:
return ''
else:
return split.group (1)
if isinstance (value, str):
return get_grist_one (value)
else:
return [ get_grist_one (v) for v in value ]
def ungrist (value):
""" Returns the value without grist.
If value is a sequence, does it for every value and returns the result as a sequence.
"""
assert is_iterable_typed(value, basestring) or isinstance(value, basestring)
def ungrist_one (value):
stripped = __re_grist_content.match (value)
if not stripped:
raise BaseException ("in ungrist: '%s' is not of the form <.*>" % value)
return stripped.group (1)
if isinstance (value, str):
return ungrist_one (value)
else:
return [ ungrist_one (v) for v in value ]
def replace_suffix (name, new_suffix):
""" Replaces the suffix of name by new_suffix.
If no suffix exists, the new one is added.
"""
assert isinstance(name, basestring)
assert isinstance(new_suffix, basestring)
split = os.path.splitext (name)
return split [0] + new_suffix
def forward_slashes (s):
""" Converts all backslashes to forward slashes.
"""
assert isinstance(s, basestring)
return s.replace('\\', '/')
def split_action_id (id):
""" Splits an id in the toolset and specific rule parts. E.g.
'gcc.compile.c++' returns ('gcc', 'compile.c++')
"""
assert isinstance(id, basestring)
split = id.split ('.', 1)
toolset = split [0]
name = ''
if len (split) > 1:
name = split [1]
return (toolset, name)
def os_name ():
result = bjam.variable("OS")
assert(len(result) == 1)
return result[0]
def platform ():
return bjam.variable("OSPLAT")
def os_version ():
return bjam.variable("OSVER")
def on_windows ():
""" Returns true if running on windows, whether in cygwin or not.
"""
if bjam.variable("NT"):
return True
elif bjam.variable("UNIX"):
uname = bjam.variable("JAMUNAME")
if uname and uname[0].startswith("CYGWIN"):
return True
return False |
5,522 | load | from __future__ import annotations
import logging
import traceback
from pathlib import Path
from typing import Optional
import configobj
from configobj import ParseError
from pydantic import BaseSettings, Extra, PrivateAttr
from tribler.core.components.bandwidth_accounting.settings import BandwidthAccountingSettings
from tribler.core.components.gigachannel.community.settings import ChantSettings
from tribler.core.components.ipv8.settings import (
BootstrapSettings,
DHTSettings,
DiscoveryCommunitySettings,
Ipv8Settings,
)
from tribler.core.components.key.settings import TrustchainSettings
from tribler.core.components.libtorrent.settings import DownloadDefaultsSettings, LibtorrentSettings
from tribler.core.components.metadata_store.remote_query_community.settings import RemoteQueryCommunitySettings
from tribler.core.components.popularity.settings import PopularityCommunitySettings
from tribler.core.components.resource_monitor.settings import ResourceMonitorSettings
from tribler.core.components.restapi.rest.settings import APISettings
from tribler.core.components.torrent_checker.settings import TorrentCheckerSettings
from tribler.core.components.tunnel.settings import TunnelCommunitySettings
from tribler.core.components.watch_folder.settings import WatchFolderSettings
from tribler.core.settings import GeneralSettings
logger = logging.getLogger('Tribler Config')
DEFAULT_CONFIG_NAME = 'triblerd.conf'
class TriblerConfig(BaseSettings):
""" Tribler config class that contains common logic for manipulating with a config."""
class Config:
extra = Extra.ignore # ignore extra attributes during model initialization
general: GeneralSettings = GeneralSettings()
tunnel_community: TunnelCommunitySettings = TunnelCommunitySettings()
bandwidth_accounting: BandwidthAccountingSettings = BandwidthAccountingSettings()
bootstrap: BootstrapSettings = BootstrapSettings()
ipv8: Ipv8Settings = Ipv8Settings()
discovery_community: DiscoveryCommunitySettings = DiscoveryCommunitySettings()
dht: DHTSettings = DHTSettings()
trustchain: TrustchainSettings = TrustchainSettings()
watch_folder: WatchFolderSettings = WatchFolderSettings()
chant: ChantSettings = ChantSettings()
torrent_checking: TorrentCheckerSettings = TorrentCheckerSettings()
libtorrent: LibtorrentSettings = LibtorrentSettings()
download_defaults: DownloadDefaultsSettings = DownloadDefaultsSettings()
api: APISettings = APISettings()
resource_monitor: ResourceMonitorSettings = ResourceMonitorSettings()
popularity_community: PopularityCommunitySettings = PopularityCommunitySettings()
remote_query_community: RemoteQueryCommunitySettings = RemoteQueryCommunitySettings()
# Special configuration options related to the operation mode of the Core
upgrader_enabled: bool = True
gui_test_mode: bool = False
_state_dir: Path = PrivateAttr()
_file: Optional[Path] = PrivateAttr() # a last file saved during write-load operations
_error: Optional[Exception] = PrivateAttr()
def __init__(self, *args, state_dir: Path = None, file: Path = None, error: str = None, **kwargs):
""" Constructor
Args:
*args: Arguments that will be passed to the `BaseSettings` constructor.
state_dir: Tribler's state dir. Will be used for calculated relative paths.
file: A config file.
error: A last error.
**kwargs: Arguments that will be passed to the `BaseSettings` constructor.
"""
super().__init__(*args, **kwargs)
if not file and state_dir:
file = state_dir / DEFAULT_CONFIG_NAME # assign default file name
self.set_state_dir(state_dir)
self.set_file(file)
self._error = error
logger.info(f'Init. State dir: {state_dir}. File: {file}')
@staticmethod
def METHOD_NAME(state_dir: Path, file: Path = None, reset_config_on_error: bool = False) -> TriblerConfig:
""" Load a config from a file
Args:
state_dir: A Tribler's state dir.
file: A path to the config file.
reset_config_on_error: a flag that shows whether it is necessary to
create a new config in case of an error.
Returns: `TriblerConfig` instance.
"""
file = file or state_dir / DEFAULT_CONFIG_NAME
logger.info(f'Load: {file}. State dir: {state_dir}. Reset config on error: {reset_config_on_error}')
error = None
config = None
try:
dictionary = configobj.ConfigObj(infile=str(file))
config = TriblerConfig.parse_obj(dictionary)
config.set_state_dir(state_dir)
config.set_file(file)
except (ParseError, ValueError) as e:
logger.error(e)
if not reset_config_on_error:
raise
error = traceback.format_exc()
if error:
logger.info('Resetting a config')
config = TriblerConfig(state_dir=state_dir, file=file, error=error)
config.write(file=file)
return config
def write(self, file: Path = None):
"""Save a config to a file
Args:
file: Path to the config. In case it is omitted, last file will be used.
"""
if not file:
file = self._file # try to remember a file from the last load-write
logger.info(f'Write: {file}')
self._file = file
if not file:
return
parent = Path(file).parent
if not parent.exists():
logger.info(f'Create folder: {parent}')
parent.mkdir(parents=True)
dictionary = self.dict(exclude_defaults=True,
exclude={'upgrader_enabled': ...,
'gui_test_mode': ...,
'tunnel_community': {'socks5_listen_ports': ...},
'libtorrent': {'anon_proxy_server_ports': ...,
'anon_proxy_type': ...,
'anon_proxy_auth': ...,
'anon_listen_port': ...,
'anon_proxy_server_ip': ...}})
conf = configobj.ConfigObj(dictionary, encoding='utf-8')
conf.filename = str(file)
conf.write()
@property
def error(self) -> Optional[str]:
return self._error
@property
def state_dir(self) -> Optional[Path]:
return self._state_dir
def set_state_dir(self, val):
self._state_dir = Path(val) if val is not None else None
@property
def file(self) -> Optional[Path]:
return self._file
def set_file(self, val):
self._file = Path(val) if val is not None else None |
5,523 | test get full name returns username | import pytest
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from rdmo.accounts.models import Role
from rdmo.accounts.utils import get_full_name, is_site_manager, delete_user, get_user_from_db_or_none
normal_users = (
('user', 'user', 'user@example.com'),
)
site_managers = (
('site', 'site', 'site@example.com'),
)
users = (*normal_users, *site_managers)
@pytest.mark.parametrize('username,password,email', users)
def test_get_full_name(db, username, password, email):
user = get_user_model().objects.get(username=username, email=email)
assert get_full_name(user) == user.first_name + ' ' + user.last_name
@pytest.mark.parametrize('username,password,email', users)
def METHOD_NAME(db, username, password, email):
user = get_user_model().objects.get(username=username, email=email)
user.first_name = ''
user.save()
assert get_full_name(user) == username
def test_is_site_manager_returns_true_for_superuser(admin_user):
assert is_site_manager(admin_user) is True
def test_is_site_manager_returns_false_for_not_authenticated_user():
assert is_site_manager(AnonymousUser()) is False
@pytest.mark.parametrize('username,password,email', site_managers)
def test_is_site_manager_returns_true_for_site_managers(db, client, username, password, email):
client.login(username=username, password=password)
user = get_user_model().objects.get(username=username, email=email)
assert is_site_manager(user) is True
@pytest.mark.parametrize('username,password,email', site_managers)
def test_is_site_manager_returns_false_when_role_doesnotexist_(db, client, username, password, email):
client.login(username=username, password=password)
Role.objects.all().delete()
user = get_user_model().objects.get(username=username, email=email)
assert is_site_manager(user) is False
@pytest.mark.parametrize('username,password,email', users)
def test_delete_user_returns_true(db, username, password, email):
user = get_user_model().objects.get(username=username, email=email)
assert delete_user(user=user, email=email, password=password) is True
@pytest.mark.parametrize('username,password,email', users)
def test_delete_user_returns_false_when_user_or_email_is_none(db, username, password, email):
user = get_user_model().objects.get(username=username, email=email)
for test_user, test_email in ((user, None), (None, email), (None, None)):
assert delete_user(user=test_user, email=test_email) is False
@pytest.mark.parametrize('username,password,email', users)
def test_delete_user_returns_false_when_user_is_not_equal_to_db_user(db, username, password, email):
user = get_user_model().objects.get(username=username, email=email)
user.pk += 1
assert delete_user(user=user, email=email, password=None) is False
@pytest.mark.parametrize('username,password,email', users)
def test_delete_user_returns_false_when_user_with_usable_password_gives_none_for_password(db, username, password, email):
user = get_user_model().objects.get(username=username, email=email)
assert delete_user(user=user, email=email, password=None) is False
@pytest.mark.parametrize('username,password,email', users)
def test_delete_user_returns_false_when_delete_user_raises_an_exception(db, username, password, email):
user = get_user_model().objects.get(username=username, email=email)
def _delete(): raise ValueError
user.delete = _delete
assert delete_user(user=user, email=email, password=password) is False
@pytest.mark.parametrize('username,password,email', users)
def test_delete_user_returns_false_when_delete_is_called_for_user_without_usable_password_and_raises_an_exception(db, username, password, email):
user = get_user_model().objects.get(username=username, email=email)
user.set_unusable_password()
def _delete(): raise ValueError
user.delete = _delete
assert delete_user(user=user, email=email) is False
@pytest.mark.parametrize('username,password,email', users)
def test_get_user_from_db_or_none_returns_user(db, username, password, email):
user = get_user_model().objects.get(username=username, email=email)
assert get_user_from_db_or_none(username, email) == user
@pytest.mark.parametrize('username,password,email', users)
def test_get_user_from_db_or_none_returns_none_when_wrong_input_was_given(db, username, password, email):
for test_username, test_email in ((username, 'none@example.com'), ('none', email), (None, None)):
assert get_user_from_db_or_none(test_username, test_email) is None |
5,524 | p binary expr | # Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
File: visit_exprs_parser.py
Authors: Cyrus Harrison <cyrush@llnl.gov>
Maysam Moussalem <maysam@tacc.utexas.edu>
Description:
ply (python lex & yacc) parser for a simple expression language.
I used Mayam's visit_exprs.py as a starting point & adapted a subset
of rules from VisIt's existing expression language parser:
http://visit.ilight.com/svn/visit/trunk/src/common/expr/ExprGrammar.C
I also used the following references:
http://drdobbs.com/web-development/184405580
http://www.juanjoconti.com.ar/files/python/ply-examples/
Usage:
>>> from parser import *
>>> print Parser.parse("vx = a(2,3) + b^3 + 4 * var")
"""
import sys
import os
try:
import ply.lex as lex
import ply.yacc as yacc
except ImportError as e:
pass
#------------------------------------------------------------------
# Key objects used to encapsulate the data flow network components.
#------------------------------------------------------------------
class FuncCall(object):
def __init__(self,name,args=None,params=None):
self.name = name
self.args = args
self.params = params
def __str__(self):
res = self.name + "("
if not self.args is None:
res += str(self.args)
if not self.params is None:
if res[-1] != "(": res+= " , "
res += str(self.params)
res+= ")"
return res
def __repr__(self):
return str(self)
class Assignment(object):
def __init__(self,name,value):
self.name = name
self.value = value
def __str__(self):
return str(self.name) + " = " + str(self.value)
def __repr__(self):
return str(self)
class Constant(object):
def __init__(self,value):
self.value = value
def __str__(self):
return "Const(%s)" % str(self.value)
def __repr__(self):
return str(self)
class Identifier(object):
def __init__(self,name):
self.name = name
def __str__(self):
return "Id(" + str(self.name) + ")"
def __repr__(self):
return str(self)
#------------------
# Lexer rules
#------------------
#
# lexer token names
#
tokens = ['INT',
'FLOAT',
'BOOL',
'STRING',
'ID',
'PLUS',
'MINUS',
'MULT',
'DIV',
'EXP',
'GTE',
'LTE',
'GT',
'LT',
'EQ',
'ASSIGN',
'COMMA',
'LPAREN',
'RPAREN',
'LBRACKET',
'RBRACKET',
'LBRACE',
'RBRACE',
'SEMI',
"NEWLINE"
]
#
# lexer token reg exprs
#
t_PLUS = r'\+'
t_MINUS = r'-'
t_MULT = r'\*'
t_DIV = r'\\'
t_EXP = r'\^'
t_GTE = r'\>\='
t_LTE = r'\<\='
t_GT = r'\>'
t_LT = r'\<'
t_EQ = r'\=\='
t_ASSIGN = r'\='
t_COMMA = r'\,'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_SEMI = r'\;'
# floating point number
def t_FLOAT(t):
r'-?\d+\.\d*(e-?\d+)?'
t.value = float(t.value)
return t
# integer
def t_INT(t):
r'-?\d+'
t.value = int(t.value)
return t
# boolean value
def t_BOOL(t):
r'true|false|True|False'
if t.value.lower() == "true":
t.value = True
else:
t.value = False
return t
# identifier
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.value = Identifier(t.value)
return t
# string
# Read in a string, as in C.
# The following backslash sequences have their usual special meaning:
# \", \\, \n, and \t.
def t_STRING(t):
r'\"([^\\"]|(\\.))*\"'
escaped = 0
str = t.value[1:-1]
new_str = ""
for i in range(0, len(str)):
c = str[i]
if escaped:
if c == "n":
c = "\n"
elif c == "t":
c = "\t"
new_str += c
escaped = 0
else:
if c == "\\":
escaped = 1
else:
new_str += c
t.value = new_str
return t
t_ignore = " \t"
# support python style comments
def t_COMMENT(t):
r'\#.*\n*'
pass
# No return value. Token discarded
# track line numbers and handle newlines
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += len(t.value)
return t
# catch all error handler
def t_error(t):
print(("Illegal character '%s'" % t.value[0]))
t.lexer.skip(1)
#------------------
# Parsing rules
#------------------
# used to map symbols to eventual
# data flow filter names
binary_expr_names = {"+":"add",
"-":"sub",
"*":"mult",
"^":"pow",
"/":"divide",
">=":"gte",
"<=":"lte",
">":"gt",
"<":"lt",
"==":"equal"}
# Adding precedence rules
precedence = (
('left', 'PLUS', 'MINUS'),
('left', 'MULT', 'DIV'),
('left', 'EXP'),
('right', 'EQ', 'LT', 'GT', 'LTE', 'GTE')
)
def p_statements(t):
"""
statements : statements statement
| statement
"""
if len(t) > 2:
t[0] = t[1] + [t[2]]
else:
t[0] = [t[1]]
def p_statement(t):
"""
statement : assign_expr NEWLINE
| assign_expr SEMI NEWLINE
| assign_expr
"""
t[0] = t[1]
def p_statement_newline(t):
"""
statement : NEWLINE
"""
pass
def p_assign(t):
"""
assign_expr : ID ASSIGN expr
"""
t[0] = Assignment(t[1].name,t[3])
def p_expr(t):
"""
expr : binary_expr
| unary_expr
| var
| func
"""
t[0] = t[1]
def p_expr_paren(t):
"""
expr : LPAREN expr RPAREN
"""
t[0] = t[2]
def METHOD_NAME(t):
"""
binary_expr : expr PLUS expr
| expr MINUS expr
| expr MULT expr
| expr EXP expr
| expr DIV expr
| expr GTE expr
| expr LTE expr
| expr GT expr
| expr LT expr
| expr EQ expr
"""
t[0] = FuncCall(binary_expr_names[t[2]],[t[1],t[3]])
def p_unary_expr(t):
"""
unary_expr : MINUS expr
"""
t[0] = FuncCall(t[1],[t[2]])
def p_func(t):
"""
func : ID LPAREN args RPAREN
| ID LPAREN RPAREN
| LBRACE args RBRACE
| ID LBRACKET INT RBRACKET
"""
if t[2] == ")":
t[0] = FuncCall(t[1].name)
elif t[1] == "{":
t[0] = FuncCall("compose",t[2])
elif t[2] == "[":
# note, we will need better 'params' support in the
# future
t[0] = FuncCall("decompose",[t[1]],{"index":t[3]})
else:
t[0] = FuncCall(t[1].name, t[3])
def p_var(t):
"""
var : const
| ID
"""
t[0] = t[1]
def p_const(t):
"""
const : INT
| FLOAT
| BOOL
| STRING
"""
t[0] = Constant(t[1])
def p_args_extend(t):
"""
args : args COMMA expr
"""
t[0] = t[1] + [t[3]]
def p_args_expr(t):
"""
args : expr
"""
t[0] = [t[1]]
# catch all parsing error handler
def p_error(p):
if p:
print("<line",p.lineno, "> Syntax Error", p.type, p.value)
class Parser(object):
@classmethod
def init(cls):
# Build the lexer
lex.lex()
# Build the parser
yacc.yacc()
@classmethod
def parse(cls,txt):
"""
Main entry point for parsing from outside of this module.
"""
return yacc.parse(txt)
__all__ = ["Parser","FuncCall","Assignment","Constant","Identifier"] |
5,525 | name | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetVaultResult',
'AwaitableGetVaultResult',
'get_vault',
'get_vault_output',
]
@pulumi.output_type
class GetVaultResult:
"""
Resource information with extended details.
"""
def __init__(__self__, id=None, location=None, METHOD_NAME=None, properties=None, system_data=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified identifier of the key vault resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Azure location of the key vault resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Name of the key vault resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.VaultPropertiesResponse':
"""
Properties of the vault
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(METHOD_NAME="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
System metadata for the key vault.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Tags assigned to the key vault resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type of the key vault resource.
"""
return pulumi.get(self, "type")
class AwaitableGetVaultResult(GetVaultResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVaultResult(
id=self.id,
location=self.location,
METHOD_NAME=self.METHOD_NAME,
properties=self.properties,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_vault(resource_group_name: Optional[str] = None,
vault_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVaultResult:
"""
Gets the specified Azure key vault.
Azure REST API version: 2023-02-01.
:param str resource_group_name: The name of the Resource Group to which the vault belongs.
:param str vault_name: The name of the vault.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['vaultName'] = vault_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:keyvault:getVault', __args__, opts=opts, typ=GetVaultResult).value
return AwaitableGetVaultResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_vault)
def get_vault_output(resource_group_name: Optional[pulumi.Input[str]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVaultResult]:
"""
Gets the specified Azure key vault.
Azure REST API version: 2023-02-01.
:param str resource_group_name: The name of the Resource Group to which the vault belongs.
:param str vault_name: The name of the vault.
"""
... |
5,526 | application | import argparse
import importlib
import logging
import sys
import uuid
from urllib.parse import parse_qs
from urllib.parse import unquote
import cherrypy
from beaker.middleware import SessionMiddleware
from oic.oauth2.consumer import Consumer
from oic.utils.http_util import NotFound
from oic.utils.http_util import Response
from oic.utils.http_util import SeeOther
from oic.utils.http_util import get_or_post
# ============================================================================
# First define how logging is supposed to be done
# ============================================================================
LOGGER = logging.getLogger("")
LOGFILE_NAME = "rp.log"
hdlr = logging.FileHandler(LOGFILE_NAME)
base_formatter = logging.Formatter("%(asctime)s %(name)s:%(levelname)s %(message)s")
hdlr.setFormatter(base_formatter)
LOGGER.addHandler(hdlr)
LOGGER.setLevel(logging.INFO)
SERVER_ENV = {}
RP = None
RP_CONF = None
CONSUMER = {}
# ============================================================================
# Endpoint functions
# ============================================================================
def as_choice(environ, start_response):
resp = Response(
mako_template="as_choice.mako", template_lookup=RP_CONF.LOOKUP, headers=[]
)
argv = {"as_list": RP_CONF.AS_CONF.keys(), "action": "as", "method": "POST"}
return resp(environ, start_response, **argv)
# noinspection PyUnresolvedReferences
def static(environ, start_response, path):
LOGGER.info("[static]sending: %s" % (path,))
try:
with open(path, "rb") as fd:
content = fd.read()
if path.endswith(".ico"):
start_response("200 OK", [("Content-Type", "image/x-icon")])
elif path.endswith(".html"):
start_response("200 OK", [("Content-Type", "text/html")])
elif path.endswith(".json"):
start_response("200 OK", [("Content-Type", "application/json")])
elif path.endswith(".txt"):
start_response("200 OK", [("Content-Type", "text/plain")])
elif path.endswith(".css"):
start_response("200 OK", [("Content-Type", "text/css")])
else:
start_response("200 OK", [("Content-Type", "text/xml")])
return [content]
except IOError:
resp = NotFound()
return resp(environ, start_response)
# ============================================================================
# The main web server function
# ============================================================================
Token = {}
Clients = {}
def store_client(cli):
lookup_key = str(uuid.uuid4())
Clients[lookup_key] = cli
return lookup_key
def get_client(lookup_key):
return Clients.get(lookup_key)
def METHOD_NAME(environ, start_response):
session = environ["beaker.session"]
path = environ.get("PATH_INFO", "").lstrip("/")
if path == "robots.txt":
return static(environ, start_response, "static/robots.txt")
if path.startswith("static/"):
return static(environ, start_response, path)
if path == "logout":
session.invalidate()
resp = SeeOther("static/log_out_message.html")
return resp(environ, start_response)
if path == "as":
session["callback"] = True
request = parse_qs(get_or_post(environ))
_cli = CONSUMER[unquote(request["authzsrv"][0])]
session["client"] = store_client(_cli)
sid, redirect_uri = _cli.begin(RP_CONF.BASE, path)
resp = SeeOther(redirect_uri)
return resp(environ, start_response)
if path == "rp":
session["callback"] = True
request = parse_qs(get_or_post(environ))
_cli = CONSUMER[unquote(request["iss"][0])]
session["client"] = store_client(_cli)
sid, redirect_uri = _cli.begin(RP_CONF.BASE, path)
resp = SeeOther(redirect_uri)
return resp(environ, start_response)
if path == "authz_cb":
_cli = get_client(session["client"])
request = get_or_post(environ)
aresp = _cli.handle_authorization_response(request)
rargs = {"code": aresp["code"]}
atresp = _cli.do_access_token_request(request_args=rargs)
# Access token should be stored somewhere for later usage
Token[atresp["state"]] = atresp
resp = Response("Got access token: %s" % atresp["access_token"])
return resp(environ, start_response)
return as_choice(environ, start_response)
# ============================================================================
# Below is what's needed to start the server
# ============================================================================
START_MESG = "OAuth2 relaying party listening on port:%s at %s"
if __name__ == "__main__":
session_opts = {
"session.type": "memory",
"session.cookie_expires": True,
"session.auto": True,
"session.timeout": 900,
}
parser = argparse.ArgumentParser()
parser.add_argument("-c", dest="conf_path")
parser.add_argument(dest="config")
args = parser.parse_args()
# Load the configuration file, which must be a python file
# The default; first look for it in the directory from where this program
# is run.
sys.path.insert(0, ".")
# If a specific configuration directory is specified look there first
if args.conf_path:
sys.path.insert(0, args.conf_path)
RP_CONF = importlib.import_module(args.config)
# per AS instantiate a consumer
for name, info in RP_CONF.AS_CONF.items():
c_conf = {"client_id": info["client_id"]}
CONSUMER[name] = Consumer(
session_db={},
client_config=c_conf,
server_info={
"authorization_endpoint": info["authorization_endpoint"],
"token_endpoint": info["token_endpoint"],
},
authz_page="authz_cb",
response_type="code",
)
CONSUMER[name].client_secret = info["client_secret"]
app = SessionMiddleware(METHOD_NAME, session_opts)
cherrypy.config.update({"server.socket_port": RP_CONF.PORT})
cherrypy.tree.graft(app, "/")
if RP_CONF.BASE.startswith("https"):
cherrypy.config.update(
{
"cherrypy.server.ssl_certificate": RP_CONF.SERVER_CERT,
"cherrypy.server.ssl_private_key": RP_CONF.SERVER_KEY,
}
)
LOGGER.info(START_MESG % (RP_CONF.PORT, RP_CONF.HOST))
print(START_MESG % (RP_CONF.PORT, RP_CONF.HOST))
try:
cherrypy.engine.start()
except KeyboardInterrupt:
pass |
5,527 | blocksize | from __future__ import print_function
import core.exceptions as ex
from utilities.lazy import lazy
from drivers.array.freenas import Freenass
from core.pool import BasePool
LOCK_NAME = "freenas_update_disk"
LOCK_TIMEOUT = 120
class Pool(BasePool):
type = "freenas"
capabilities = ["roo", "rwo", "rox", "rwx", "shared", "blk", "iscsi"]
@lazy
def insecure_tpc(self):
return self.oget("insecure_tpc")
@lazy
def compression(self):
return self.oget("compression")
@lazy
def sparse(self):
return self.oget("sparse")
@lazy
def METHOD_NAME(self):
return self.oget("blocksize")
def delete_disk(self, name=None, disk_id=None):
lock_id = None
result = {}
try:
lock_id = self.node._daemon_lock(LOCK_NAME, timeout=LOCK_TIMEOUT, on_error="raise")
self.log.info("lock acquired: name=%s id=%s", LOCK_NAME, lock_id)
result = self.array.del_iscsi_zvol(name=name, volume=self.diskgroup)
finally:
self.node._daemon_unlock(LOCK_NAME, lock_id)
self.log.info("lock released: name=%s id=%s", LOCK_NAME, lock_id)
return result
def create_disk(self, name, size, nodes=None):
mappings = self.get_mappings(nodes)
if not mappings:
raise ex.Error("refuse to create a disk with no mappings")
lock_id = None
result = {}
try:
lock_id = self.node._daemon_lock(LOCK_NAME, timeout=LOCK_TIMEOUT, on_error="raise")
self.log.info("lock acquired: name=%s id=%s", LOCK_NAME, lock_id)
result = self.array.add_iscsi_zvol(name=name, size=size,
volume=self.diskgroup,
mappings=mappings,
insecure_tpc=self.insecure_tpc,
compression=self.compression,
sparse=self.sparse,
METHOD_NAME=self.METHOD_NAME)
finally:
self.node._daemon_unlock(LOCK_NAME, lock_id)
self.log.info("lock released: name=%s id=%s", LOCK_NAME, lock_id)
return result
def translate(self, name=None, size=None, fmt=True, shared=False):
data = []
disk = {
"rtype": "disk",
"type": "disk",
"name": name,
"scsireserv": True,
"shared": shared,
"size": size,
}
data.append(disk)
if fmt:
data += self.add_fs(name, shared)
return data
@lazy
def array_name(self):
return self.oget("array")
@lazy
def diskgroup(self):
return self.oget("diskgroup")
@lazy
def array(self):
o = Freenass()
array = o.get_freenas(self.array_name)
if array is None:
raise ex.Error("array %s not found" % self.array_name)
array.node = self.node
return array
def pool_status(self, usage=True):
from utilities.converters import convert_size
data = {
"type": self.type,
"name": self.name,
"head": "array://%s/%s" % (self.array_name, self.diskgroup),
"capabilities": self.capabilities,
}
if not usage:
return data
try:
dg = [dg for dg in self.array.list_pools() if dg["name"] == self.diskgroup][0]
except Exception as exc:
data["error"] = str(exc)
return data
data["free"] = convert_size(dg["avail"], _to="KB")
data["used"] = convert_size(dg["used"], _to="KB")
data["size"] = convert_size(dg["avail"] + dg["used"], _to="KB")
return data
def get_targets(self):
return [tgt["name"] for tgt in self.array.list_iscsi_target()]
def get_mappings(self, nodes):
return self._get_mappings(nodes, transport="iscsi") |
5,528 | test data before | # Copyright 2020. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License"];
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import unittest
from random import randint, uniform, choice
from string import ascii_lowercase
from thingsboard_gateway.connectors.can.bytes_can_downlink_converter import BytesCanDownlinkConverter
class BytesCanDownlinkConverterTests(unittest.TestCase):
def setUp(self):
self.converter = BytesCanDownlinkConverter()
def test_data_in_hex_in_conf(self):
expected_can_data = [0, 1, 2, 3]
config = {"dataInHex": "00 01 02 03"}
data = {}
actual_can_data = self.converter.convert(config, data)
self.assertListEqual(actual_can_data, expected_can_data)
def test_data_in_hex_in_data(self):
expected_can_data = [0, 1, 2, 3]
config = {}
data = {"dataInHex": "00 01 02 03"}
actual_can_data = self.converter.convert(config, data)
self.assertListEqual(actual_can_data, expected_can_data)
def test_no_data(self):
self.assertIsNone(self.converter.convert({}, {}))
def test_wrong_data_format(self):
self.assertIsNone(self.converter.convert({}, [1, 2, 3]))
def test_bool_data(self):
value = True
expected_can_data = [int(value)]
data = {"value": value}
actual_can_data = self.converter.convert({}, data)
self.assertListEqual(actual_can_data, expected_can_data)
def test_unsigned_integer_data(self):
for data_length in [1, 2, 3, 4]:
# Empty byteorder value means default encoding (big)
for byteorder in ["", "little"]:
config = { "dataLength": data_length }
if byteorder:
config["dataByteorder"] = byteorder
else:
byteorder = "big"
data = {"value": randint(0, pow(2, 8 * data_length))}
actual_can_data = self.converter.convert(config, data)
self.assertListEqual(actual_can_data,
list(data["value"].to_bytes(data_length, byteorder, signed=False)))
def test_signed_integer_data(self):
for data_length in [1, 2, 3, 4]:
# Empty byteorder value means default encoding (big)
for byteorder in ["", "little"]:
config = {
"dataLength": data_length,
"dataSigned": True
}
if byteorder:
config["dataByteorder"] = byteorder
else:
byteorder = "big"
data = {"value": randint(-int(pow(2, 8 * data_length) / 2),
int(pow(2, 8 * data_length) / 2) - 1)}
actual_can_data = self.converter.convert(config, data)
self.assertListEqual(actual_can_data,
list(data["value"].to_bytes(data_length, byteorder, signed=True)))
def test_float_data(self):
# Empty byteorder value means default encoding (big)
for byteorder in ["", "little"]:
data = {"value": uniform(-3.1415926535, 3.1415926535)}
config = {}
if byteorder:
config["dataByteorder"] = byteorder
else:
byteorder = "big"
actual_can_data = self.converter.convert(config, data)
self.assertListEqual(actual_can_data,
list(struct.pack(">f" if byteorder[0] == "b" else "<f", data["value"])))
def test_string_data(self):
# Empty encoding value means default encoding (ascii)
for encoding in ["", "utf-8"]:
value = "".join(choice(ascii_lowercase) for _ in range(8))
data = {"value": value}
config = {}
if encoding:
config["dataEncoding"] = encoding
else:
encoding = "ascii"
actual_can_data = self.converter.convert(config, data)
self.assertListEqual(actual_can_data, list(value.encode(encoding)))
def test_expression_data(self):
default_data_length = 1
default_byteorder = "big"
data = {
"one": 1,
"two": 2,
"three": 3
}
config = {"dataExpression": "one + two + three"}
value = 0
for i in data.values():
value += i
actual_can_data = self.converter.convert(config, data)
self.assertListEqual(actual_can_data,
list(value.to_bytes(default_data_length, default_byteorder)))
def test_strict_eval_violation(self):
data = {"value": randint(0, 256)}
config = {
"dataExpression": "pow(value, 2)",
"strictEval": True
}
self.assertIsNone(self.converter.convert(config, data))
def METHOD_NAME(self):
value = True
expected_can_data = [0, 1, 2, 3, int(value)]
data = {"value": value}
config = {"dataBefore": "00 01 02 03"}
actual_can_data = self.converter.convert(config, data)
self.assertListEqual(actual_can_data, expected_can_data)
def test_data_after(self):
value = True
expected_can_data = [int(value), 3, 2, 1, 0]
data = {"value": value}
config = {"dataAfter": "03 02 01 00"}
actual_can_data = self.converter.convert(config, data)
self.assertListEqual(actual_can_data, expected_can_data)
if __name__ == '__main__':
unittest.main() |
5,529 | test t3 its | # Copyright (c) 2021 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import alf.utils.distributions as ad
import alf
class DistributionTest(alf.test.TestCase):
def _test_its(self, x, its: ad.InverseTransformSampling):
x.requires_grad_()
y = its.cdf(x)
x1 = its.icdf(y)
p = its.log_prob(x).exp()
step = x[1] - x[0]
psum = p.sum() * step
self.assertAlmostEqual(psum, 1., delta=0.01)
self.assertTensorClose(x1, x, 0.01)
grad = torch.autograd.grad(y.sum(), x)[0]
self.assertTensorClose(grad.log(), p.log())
def test_normal_its(self):
self._test_its(torch.arange(-4., 4., 1 / 128), ad.NormalITS())
def test_cauchy_its(self):
self._test_its(torch.arange(-100., 100., 1 / 128), ad.CauchyITS())
def METHOD_NAME(self):
self._test_its(torch.arange(-20., 20., 1 / 128), ad.T2ITS())
def _test_truncated(self, its: ad.InverseTransformSampling):
batch_size = 6
dim = 1
lower_bound = -1.5 * torch.ones(dim)
upper_bound = 2.5 * torch.ones(dim)
loc = torch.ones((batch_size, dim))
loc[0, :] = -2
loc[1, :] = -1
loc[2, :] = 0
loc[3, :] = 1
loc[4, :] = 2
loc[5, :] = 2
scale = torch.ones((6, dim))
scale[2, :] = 0.5
scale[3, :] = 1.5
dist = ad.TruncatedDistribution(
loc=loc,
scale=scale,
lower_bound=lower_bound,
upper_bound=upper_bound,
its=its)
# Test prob sum to 1.
step = 1 / 128
x = torch.arange(-1.5, 2.5, step)[:, None, None].expand(
-1, batch_size, dim)
log_prob = dist.log_prob(x)
prob = log_prob.exp() * step
self.assertTensorClose(
prob.sum(dim=0), torch.ones((batch_size, )), 0.01)
# Test samples are within bound
samples = dist.rsample((1000, ))
self.assertTrue((samples > lower_bound).all())
self.assertTrue((samples < upper_bound).all())
def test_truncated_normal(self):
self._test_truncated(ad.NormalITS())
def test_truncated_cauchy(self):
self._test_truncated(ad.CauchyITS())
def test_truncated_T2(self):
self._test_truncated(ad.T2ITS())
def test_truncated_normal_mode(self):
dist = ad.TruncatedNormal(
loc=torch.Tensor([[1.5, -3.0, 4.5]]),
scale=torch.tensor([[0.8, 1.9, 1.2]]),
lower_bound=torch.tensor([1.0, 1.0, 1.0]),
upper_bound=torch.tensor([2.0, 2.0, 2.0]))
self.assertTrue(torch.all(torch.tensor([1.5, 1.0, 2.0]) == dist.mode))
def test_truncated_normal_kl_divergence(self):
def _numerical_kl_divergence(lower_bound, upper_bound, loc_p, scale_p,
loc_q, scale_q):
p = ad.TruncatedNormal(loc_p, scale_p, lower_bound, upper_bound)
q = ad.TruncatedNormal(loc_q, scale_q, lower_bound, upper_bound)
delta = 1e-3
accu = torch.as_tensor(0.0)
for x in np.arange(lower_bound, upper_bound, delta):
log_p_x = p.log_prob(torch.as_tensor(x))
log_q_x = q.log_prob(torch.as_tensor(x))
accu += torch.exp(log_p_x) * (log_p_x - log_q_x) * delta
return accu
dim = 1
lower_bound = -1.5 * torch.ones(dim)
upper_bound = 2.5 * torch.ones(dim)
batch_size = 4
loc1 = torch.ones((batch_size, dim))
loc1[0, :] = -2.0
loc1[1, :] = -1.0
loc1[2, :] = 0.0
loc1[3, :] = 1.0
scale1 = torch.ones((batch_size, dim))
scale1[1, :] = 0.5
scale1[2, :] = 1.5
scale1[3, :] = 2.56
dist1 = ad.TruncatedNormal(
loc=loc1,
scale=scale1,
lower_bound=lower_bound,
upper_bound=upper_bound)
loc2 = torch.ones((batch_size, dim))
loc2[0, :] = -1.0
loc2[1, :] = -2.0
loc2[2, :] = 1.0
loc2[3, :] = 3.0
scale2 = torch.ones((batch_size, dim))
scale2[0, :] = 0.2
scale2[1, :] = 1.5
scale2[2, :] = 0.5
dist2 = ad.TruncatedNormal(
loc=loc2,
scale=scale2,
lower_bound=lower_bound,
upper_bound=upper_bound)
kl = torch.distributions.kl_divergence(dist1, dist2)
for i in range(batch_size):
expected = _numerical_kl_divergence(lower_bound[0], upper_bound[0],
loc1[i][0], scale1[i][0],
loc2[i][0], scale2[i][0])
np.testing.assert_array_almost_equal(kl[i], expected, decimal=3)
if __name__ == '__main__':
alf.test.main() |
5,530 | io | # These lines were mostly generated by h2py.py (see demo/scripts)
# from <sys/ioctl.h>, <sys/termio.h> and <termios.h> on Irix 4.0.2
# with some manual changes to cope with imperfections in h2py.py.
# The applicability on other systems is not clear; especially non-SYSV
# systems may have a totally different set of ioctls.
from warnings import warnpy3k
warnpy3k("the IOCTL module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
IOCTYPE = 0xff00
LIOC = (ord('l')<<8)
LIOCGETP = (LIOC|1)
LIOCSETP = (LIOC|2)
LIOCGETS = (LIOC|5)
LIOCSETS = (LIOC|6)
DIOC = (ord('d')<<8)
DIOCGETC = (DIOC|1)
DIOCGETB = (DIOC|2)
DIOCSETE = (DIOC|3)
IOCPARM_MASK = 0x7f
IOC_VOID = 0x20000000
IOC_OUT = 0x40000000
IOC_IN = 0x80000000
IOC_INOUT = (IOC_IN|IOC_OUT)
int = 'i'
short = 'h'
long = 'l'
def sizeof(t): import struct; return struct.calcsize(t)
def METHOD_NAME(x,y): return (IOC_VOID|((x)<<8)|y)
def _IOR(x,y,t): return (IOC_OUT|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|y)
def _IOW(x,y,t): return (IOC_IN|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|y)
# this should be _IORW, but stdio got there first
def _IOWR(x,y,t): return (IOC_INOUT|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|y)
FIONREAD = _IOR(ord('f'), 127, int)
FIONBIO = _IOW(ord('f'), 126, int)
FIOASYNC = _IOW(ord('f'), 125, int)
FIOSETOWN = _IOW(ord('f'), 124, int)
FIOGETOWN = _IOR(ord('f'), 123, int)
NCC = 8
NCC_PAD = 7
NCC_EXT = 16
NCCS = (NCC+NCC_PAD+NCC_EXT)
VINTR = 0
VQUIT = 1
VERASE = 2
VKILL = 3
VEOF = 4
VEOL = 5
VEOL2 = 6
VMIN = VEOF
VTIME = VEOL
VSWTCH = 7
VLNEXT = (NCC+NCC_PAD+0)
VWERASE = (NCC+NCC_PAD+1)
VRPRNT = (NCC+NCC_PAD+2)
VFLUSHO = (NCC+NCC_PAD+3)
VSTOP = (NCC+NCC_PAD+4)
VSTART = (NCC+NCC_PAD+5)
CNUL = '\0'
CDEL = '\377'
CESC = '\\'
CINTR = '\177'
CQUIT = '\34'
CBRK = '\377'
def CTRL(c): return ord(c) & 0x0f
CERASE = CTRL('H')
CKILL = CTRL('U')
CEOF = CTRL('d')
CEOT = CEOF
CSTART = CTRL('q')
CSTOP = CTRL('s')
CSWTCH = CTRL('z')
CSUSP = CSWTCH
CNSWTCH = 0
CLNEXT = CTRL('v')
CWERASE = CTRL('w')
CFLUSHO = CTRL('o')
CFLUSH = CFLUSHO
CRPRNT = CTRL('r')
CDSUSP = CTRL('y')
IGNBRK = 0000001
BRKINT = 0000002
IGNPAR = 0000004
PARMRK = 0000010
INPCK = 0000020
ISTRIP = 0000040
INLCR = 0000100
IGNCR = 0000200
ICRNL = 0000400
IUCLC = 0001000
IXON = 0002000
IXANY = 0004000
IXOFF = 0010000
IBLKMD = 0020000
OPOST = 0000001
OLCUC = 0000002
ONLCR = 0000004
OCRNL = 0000010
ONOCR = 0000020
ONLRET = 0000040
OFILL = 0000100
OFDEL = 0000200
NLDLY = 0000400
NL0 = 0
NL1 = 0000400
CRDLY = 0003000
CR0 = 0
CR1 = 0001000
CR2 = 0002000
CR3 = 0003000
TABDLY = 0014000
TAB0 = 0
TAB1 = 0004000
TAB2 = 0010000
TAB3 = 0014000
BSDLY = 0020000
BS0 = 0
BS1 = 0020000
VTDLY = 0040000
VT0 = 0
VT1 = 0040000
FFDLY = 0100000
FF0 = 0
FF1 = 0100000
CBAUD = 0000017
B0 = 0
B50 = 0000001
B75 = 0000002
B110 = 0000003
B134 = 0000004
B150 = 0000005
B200 = 0000006
B300 = 0000007
B600 = 0000010
B1200 = 0000011
B1800 = 0000012
B2400 = 0000013
B4800 = 0000014
B9600 = 0000015
B19200 = 0000016
EXTA = 0000016
B38400 = 0000017
EXTB = 0000017
CSIZE = 0000060
CS5 = 0
CS6 = 0000020
CS7 = 0000040
CS8 = 0000060
CSTOPB = 0000100
CREAD = 0000200
PARENB = 0000400
PARODD = 0001000
HUPCL = 0002000
CLOCAL = 0004000
LOBLK = 0040000
ISIG = 0000001
ICANON = 0000002
XCASE = 0000004
ECHO = 0000010
ECHOE = 0000020
ECHOK = 0000040
ECHONL = 0000100
NOFLSH = 0000200
IIEXTEN = 0000400
ITOSTOP = 0001000
SSPEED = B9600
IOCTYPE = 0xff00
TIOC = (ord('T')<<8)
oTCGETA = (TIOC|1)
oTCSETA = (TIOC|2)
oTCSETAW = (TIOC|3)
oTCSETAF = (TIOC|4)
TCSBRK = (TIOC|5)
TCXONC = (TIOC|6)
TCFLSH = (TIOC|7)
TCGETA = (TIOC|8)
TCSETA = (TIOC|9)
TCSETAW = (TIOC|10)
TCSETAF = (TIOC|11)
TIOCFLUSH = (TIOC|12)
TCDSET = (TIOC|32)
TCBLKMD = (TIOC|33)
TIOCPKT = (TIOC|112)
TIOCPKT_DATA = 0x00
TIOCPKT_FLUSHREAD = 0x01
TIOCPKT_FLUSHWRITE = 0x02
TIOCPKT_NOSTOP = 0x10
TIOCPKT_DOSTOP = 0x20
TIOCNOTTY = (TIOC|113)
TIOCSTI = (TIOC|114)
TIOCSPGRP = _IOW(ord('t'), 118, int)
TIOCGPGRP = _IOR(ord('t'), 119, int)
TIOCCONS = _IOW(ord('t'), 120, int)
struct_winsize = 'hhhh'
TIOCGWINSZ = _IOR(ord('t'), 104, struct_winsize)
TIOCSWINSZ = _IOW(ord('t'), 103, struct_winsize)
TFIOC = (ord('F')<<8)
oFIONREAD = (TFIOC|127)
LDIOC = (ord('D')<<8)
LDOPEN = (LDIOC|0)
LDCLOSE = (LDIOC|1)
LDCHG = (LDIOC|2)
LDGETT = (LDIOC|8)
LDSETT = (LDIOC|9)
TERM_NONE = 0
TERM_TEC = 1
TERM_V61 = 2
TERM_V10 = 3
TERM_TEX = 4
TERM_D40 = 5
TERM_H45 = 6
TERM_D42 = 7
TM_NONE = 0000
TM_SNL = 0001
TM_ANL = 0002
TM_LCF = 0004
TM_CECHO = 0010
TM_CINVIS = 0020
TM_SET = 0200
LDISC0 = 0
LDISC1 = 1
NTTYDISC = LDISC1
VSUSP = VSWTCH
TCSANOW = 0
TCSADRAIN = 1
TCSAFLUSH = 2
TCIFLUSH = 0
TCOFLUSH = 1
TCIOFLUSH = 2
TCOOFF = 0
TCOON = 1
TCIOFF = 2
TCION = 3
TO_STOP = LOBLK
IEXTEN = IIEXTEN
TOSTOP = ITOSTOP |
5,531 | pre val loop | import logging
from typing import List, Optional
import jax
import jax.numpy as jnp
import nltk
import numpy as np
import optax
from datasets import load_metric
from flax.training.common_utils import onehot
from transformers import AutoConfig, AutoTokenizer, FlaxAutoModelForSeq2SeqLM
from tango.integrations.flax import FlaxWrapper
from tango.integrations.flax.train_callback import TrainCallback
from tango.step import Step
"""
XSum Summarization with facebook/bart-base
"""
@Step.register("tokenize_data")
class PreProcessing(Step):
DETERMINISTIC = False
def run(self, dataset):
tokenizer = AutoTokenizer.from_pretrained("facebook/bart-base")
model = FlaxAutoModelForSeq2SeqLM.from_pretrained("facebook/bart-base")
model_module = __import__(model.__module__, fromlist=["shift_tokens_tight"])
shift_tokens_right_fn = getattr(model_module, "shift_tokens_right")
config = AutoConfig.from_pretrained("facebook/bart-base")
MAX_SOURCE_LENGTH = 512
MAX_TGT_LENGTH = 64
def preprocess_function(examples):
inputs = examples["document"]
targets = examples["summary"]
inputs = [inp for inp in inputs]
model_inputs = tokenizer(
inputs,
max_length=MAX_SOURCE_LENGTH,
padding="max_length",
truncation=True,
return_tensors="np",
)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(
targets,
max_length=MAX_TGT_LENGTH,
padding="max_length",
truncation=True,
return_tensors="np",
)
model_inputs["labels"] = labels["input_ids"]
decoder_input_ids = shift_tokens_right_fn(
labels["input_ids"], config.pad_token_id, config.decoder_start_token_id
)
model_inputs["decoder_input_ids"] = np.asarray(decoder_input_ids)
# We need decoder_attention_mask so we can ignore pad tokens from loss
model_inputs["decoder_attention_mask"] = labels["attention_mask"]
return model_inputs
column_names = dataset["train"].column_names
dataset = dataset.map(
preprocess_function,
batched=True,
remove_columns=column_names,
desc="Running tokenizer on dataset",
)
return dataset
@FlaxWrapper.register("xsum_wrapper") # type: ignore
class TransformerWrapper(FlaxWrapper):
def loss_helper(self, logits, labels, batch):
label_smoothing_factor = 0
padding_mask = batch["decoder_attention_mask"]
vocab_size = logits.shape[-1]
confidence = 1.0 - label_smoothing_factor
low_confidence = (1.0 - confidence) / (vocab_size - 1)
normalizing_constant = -(
confidence * jnp.log(confidence)
+ (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20)
)
soft_labels = onehot(labels, vocab_size, on_value=confidence, off_value=low_confidence)
loss = optax.softmax_cross_entropy(logits, soft_labels)
loss = loss - normalizing_constant
# ignore padded tokens from loss
loss = loss * padding_mask
loss = loss.sum() / padding_mask.sum()
return loss
def train_loss(self, params, state, batch, dropout_rng, labels):
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
loss = self.loss_helper(logits, labels, batch)
return loss
def val_metrics(self, batch, logits, labels):
loss = self.loss_helper(logits, labels, batch)
metrics = {"loss": loss}
return metrics
def eval_metrics(self, batch, logits, labels):
loss = self.loss_helper(logits, labels, batch)
metrics = {"loss": loss}
return metrics
@TrainCallback.register("flax::generate_step")
class GenerateCallback(TrainCallback):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.logger = logging.getLogger(GenerateCallback.__name__)
def generate_step(self, params, batch):
self.model.params = params
gen_kwargs = {"max_length": 64, "num_beams": self.model.config.num_beams}
output_ids = self.model.generate(
batch["input_ids"], attention_mask=batch["attention_mask"], **gen_kwargs
)
return output_ids.sequences
def pre_train_loop(self) -> None:
if len(jax.devices()) > 1:
self.p_generate_step = jax.pmap(self.generate_step, axis_name="batch")
def METHOD_NAME(self, step: int, val_step: int, state) -> None:
self.state = state
self.eval_preds: List = []
self.eval_labels: List = []
def pre_val_batch(self, step: int, val_step: int, epoch: int, val_batch) -> None:
labels = val_batch["labels"]
if len(jax.devices()) > 1:
generated_ids = self.p_generate_step(self.state.params, val_batch)
else:
generated_ids = self.generate_step(self.state.params, val_batch)
self.eval_preds.extend(jax.device_get(generated_ids.reshape(-1, 64)))
self.eval_labels.extend(jax.device_get(labels.reshape(-1, labels.shape[-1])))
def postprocess_text(self, preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
# rougeLSum expects newline after each sentence
preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds]
labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels]
return preds, labels
def compute_metrics(self, preds, labels):
tokenizer = AutoTokenizer.from_pretrained("facebook/bart-base")
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds, decoded_labels = self.postprocess_text(decoded_preds, decoded_labels)
metric = load_metric("rouge")
result = metric.compute(
predictions=decoded_preds, references=decoded_labels, use_stemmer=True
)
# Extract a few results from ROUGE
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 4) for k, v in result.items()}
return result
def post_val_loop(
self, step: int, epoch: int, val_metric: Optional[float], best_val_metric: Optional[float]
) -> None:
rouge_metrics = self.compute_metrics(self.eval_preds, self.eval_labels)
rouge_desc = " ".join([f"Eval {key}: {value} |" for key, value in rouge_metrics.items()])
self.logger.info(rouge_desc) |
5,532 | sampletype uid | # -*- coding: utf-8 -*-
#
# This file is part of SENAITE.CORE.
#
# SENAITE.CORE is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright 2018-2023 by it's authors.
# Some rights reserved, see README and LICENSE.
from bika.lims import api
from bika.lims.interfaces import IAnalysisService
from bika.lims.interfaces import IHaveAnalysisCategory
from bika.lims.interfaces import IHaveDepartment
from bika.lims.interfaces import IHaveInstrument
from bika.lims.interfaces import IHavePrice
from bika.lims.interfaces import IInstrument
from bika.lims.interfaces import ISampleTypeAwareMixin
from plone.indexer import indexer
from Products.CMFCore.interfaces import IContentish
from senaite.core.catalog import SETUP_CATALOG
from senaite.core.catalog.utils import get_searchable_text_tokens
from senaite.core.interfaces import ISetupCatalog
@indexer(ISampleTypeAwareMixin, ISetupCatalog)
def METHOD_NAME(instance):
"""Returns the list of SampleType UIDs the instance is assigned to
This is a KeywordIndex, so it will be indexed as a list, even if only one
SampleType can be assigned to the instance. Moreover, if the instance has
no SampleType assigned, it returns a tuple with a None value. This allows
searches for `MissingValue` entries too.
"""
sample_type = instance.getSampleType()
return to_keywords_list(sample_type, api.get_uid)
@indexer(ISampleTypeAwareMixin, ISetupCatalog)
def sampletype_title(instance):
"""Returns a list of titles from SampleType the instance is assigned to
If the instance has no sample type assigned, it returns a tuple with
a None value. This allows searches for `MissingValue` entries too.
"""
sample_type = instance.getSampleType()
return to_keywords_list(sample_type, api.get_title)
@indexer(IAnalysisService, ISetupCatalog)
def method_available_uid(instance):
"""Returns a list of Method UIDs that are available for this instance
If the instance (AnalysisService) has InstrumentEntryOfResults set to True,
it returns the methods available from the instruments capable to perform
the service, as well as the methods set manually to the analysis.
Otherwise, it returns the methods assigned manually only.
If the instance has no available method assigned, it returns a tuple with
a None value. This allows searches for `MissingValue` entries too.
"""
return instance.getAvailableMethodUIDs() or (None, )
@indexer(IHaveInstrument, ISetupCatalog)
def instrument_title(instance):
"""Returns a list of titles from Instrument the instance is assigned to
If the instance has no instrument assigned, it returns a tuple with
a None value. This allows searches for `MissingValue` entries too.
"""
instrument = instance.getInstrument()
return to_keywords_list(instrument, api.get_title)
@indexer(IHavePrice, ISetupCatalog)
def price(instance):
"""Returns the price of the instance
"""
return instance.getPrice()
@indexer(IHavePrice, ISetupCatalog)
def price_total(instance):
"""Returns the total price of the instance
"""
return instance.getTotalPrice()
@indexer(IInstrument, ISetupCatalog)
def instrumenttype_title(instance):
"""Returns a list of Instrument Type titles the instance is assigned to
"""
instrument_type = instance.getInstrumentType()
return to_keywords_list(instrument_type, api.get_title)
@indexer(IHaveDepartment, ISetupCatalog)
def department_uid(instance):
"""Returns a list of Department UIDs the instance is assigned to
"""
department = instance.getDepartment()
return to_keywords_list(department, api.get_uid)
@indexer(IHaveDepartment, ISetupCatalog)
def department_title(instance):
"""Returns the title of the Department the instance is assigned to
"""
department = instance.getDepartment()
return to_keywords_list(department, api.get_title)
@indexer(IHaveDepartment, ISetupCatalog)
def department_id(instance):
"""Returns the ID of the Department the instance is assigned to
"""
department = instance.getDepartment()
return to_keywords_list(department, lambda dep: dep.getDepartmentID())
@indexer(IAnalysisService, ISetupCatalog)
def point_of_capture(instance):
"""Returns the point of capture of the instance
"""
return instance.getPointOfCapture()
@indexer(IContentish, ISetupCatalog)
def listing_searchable_text(instance):
""" Retrieves all the values of metadata columns in the catalog for
wildcard searches
:return: all metadata values joined in a string
"""
exclude = ["getObjPositionInParent", ]
# Additional non-metadata fields to include in the index
include = ["getCalculation"
"getDepartment",
"getInstrument",
"getInstrumentType",
"getSamplePoint"
"getSampleType",
"getSupplier",
"getManufacturer", ]
tokens = get_searchable_text_tokens(instance, SETUP_CATALOG,
exclude_field_names=exclude,
include_field_names=include)
return u" ".join(tokens)
@indexer(IHaveAnalysisCategory, ISetupCatalog)
def category_uid(instance):
"""Returns a list of Category UIDs the instance is assigned to
"""
category = instance.getCategory()
return to_keywords_list(category, api.get_uid)
def to_keywords_list(obj, func):
if isinstance(obj, (list, tuple)):
return map(func, obj)
elif obj:
return [func(obj)]
return [None] |
5,533 | test error on negative | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
from django.core.exceptions import ValidationError
from manage_treemap.views.management import update_benefits
from treemap.instance import Instance
from treemap.models import BenefitCurrencyConversion
from treemap.tests import make_instance, make_commander_user, make_request
from treemap.tests.base import OTMTestCase
class BenefitsUpdateTest(OTMTestCase):
def setUp(self):
self.instance = make_instance()
self.conversions =\
BenefitCurrencyConversion.get_default_for_region('PiedmtCLT')
self.conversions.save()
self.instance.eco_benefits_conversion = self.conversions
self.instance.save()
self.commander = make_commander_user(self.instance)
def test_update_some_values(self):
updates = {
'benefitCurrencyConversion.currency_symbol': '$',
'benefitCurrencyConversion.electricity_kwh_to_currency': '1.0',
'benefitCurrencyConversion.natural_gas_kbtu_to_currency': '2.0',
'benefitCurrencyConversion.h20_gal_to_currency': '3.0',
'benefitCurrencyConversion.co2_lb_to_currency': '4.0',
'benefitCurrencyConversion.o3_lb_to_currency': '5.0',
'benefitCurrencyConversion.nox_lb_to_currency': '6.0',
'benefitCurrencyConversion.pm10_lb_to_currency': '7.0',
'benefitCurrencyConversion.sox_lb_to_currency': '8.0',
'benefitCurrencyConversion.voc_lb_to_currency': '9.0',
}
json_updates = json.dumps(updates)
request = make_request(method='PUT',
body=json_updates,
user=self.commander)
update_benefits(request, self.instance)
conv = self.instance.eco_benefits_conversion
self.assertEqual(conv.currency_symbol, '$')
self.assertEqual(conv.electricity_kwh_to_currency, 1.0)
self.assertEqual(conv.natural_gas_kbtu_to_currency, 2.0)
self.assertEqual(conv.h20_gal_to_currency, 3.0)
self.assertEqual(conv.co2_lb_to_currency, 4.0)
self.assertEqual(conv.o3_lb_to_currency, 5.0)
self.assertEqual(conv.nox_lb_to_currency, 6.0)
self.assertEqual(conv.pm10_lb_to_currency, 7.0)
self.assertEqual(conv.sox_lb_to_currency, 8.)
self.assertEqual(conv.voc_lb_to_currency, 9.0)
def test_error_on_blank(self):
updates = {
'benefitCurrencyConversion.currency_symbol': '$',
'benefitCurrencyConversion.electricity_kwh_to_currency': '',
'benefitCurrencyConversion.natural_gas_kbtu_to_currency': '2.0',
'benefitCurrencyConversion.h20_gal_to_currency': '3.0',
'benefitCurrencyConversion.co2_lb_to_currency': '4.0',
'benefitCurrencyConversion.o3_lb_to_currency': '5.0',
'benefitCurrencyConversion.nox_lb_to_currency': '6.0',
'benefitCurrencyConversion.pm10_lb_to_currency': '7.0',
'benefitCurrencyConversion.sox_lb_to_currency': '8.0',
'benefitCurrencyConversion.voc_lb_to_currency': '9.0',
}
json_updates = json.dumps(updates)
request = make_request(method='PUT',
body=json_updates,
user=self.commander)
with self.assertRaises(ValidationError):
update_benefits(request, self.instance)
updated_instance = Instance.objects.get(pk=self.instance.pk)
self.assertEqual(updated_instance.eco_benefits_conversion,
self.conversions)
def METHOD_NAME(self):
updates = {
'benefitCurrencyConversion.currency_symbol': '$',
'benefitCurrencyConversion.electricity_kwh_to_currency': '1.0',
'benefitCurrencyConversion.natural_gas_kbtu_to_currency': '2.0',
'benefitCurrencyConversion.h20_gal_to_currency': '3.0',
'benefitCurrencyConversion.co2_lb_to_currency': '-4.0',
'benefitCurrencyConversion.o3_lb_to_currency': '5.0',
'benefitCurrencyConversion.nox_lb_to_currency': '6.0',
'benefitCurrencyConversion.pm10_lb_to_currency': '7.0',
'benefitCurrencyConversion.sox_lb_to_currency': '8.0',
'benefitCurrencyConversion.voc_lb_to_currency': '9.0',
}
json_updates = json.dumps(updates)
request = make_request(method='PUT',
body=json_updates,
user=self.commander)
with self.assertRaises(ValidationError):
update_benefits(request, self.instance)
updated_instance = Instance.objects.get(pk=self.instance.pk)
self.assertEqual(updated_instance.eco_benefits_conversion,
self.conversions)
def test_error_on_non_number(self):
updates = {
'benefitCurrencyConversion.currency_symbol': '$',
'benefitCurrencyConversion.electricity_kwh_to_currency': '1.0',
'benefitCurrencyConversion.natural_gas_kbtu_to_currency': '2.0',
'benefitCurrencyConversion.h20_gal_to_currency': '3.0',
'benefitCurrencyConversion.co2_lb_to_currency': '4.0',
'benefitCurrencyConversion.o3_lb_to_currency': '5.0',
'benefitCurrencyConversion.nox_lb_to_currency': '6.0',
'benefitCurrencyConversion.pm10_lb_to_currency': 'Seven',
'benefitCurrencyConversion.sox_lb_to_currency': '8.0',
'benefitCurrencyConversion.voc_lb_to_currency': '9.0',
}
json_updates = json.dumps(updates)
request = make_request(method='PUT',
body=json_updates,
user=self.commander)
with self.assertRaises(ValidationError):
update_benefits(request, self.instance)
updated_instance = Instance.objects.get(pk=self.instance.pk)
self.assertEqual(updated_instance.eco_benefits_conversion,
self.conversions) |
5,534 | delete index | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Union
from pipelines.document_stores import BaseDocumentStore
from pipelines.nodes import BaseComponent
from pipelines.nodes.prompt import PromptNode, PromptTemplate
from pipelines.nodes.retriever import BaseRetriever
from pipelines.schema import Document, FilterType
class MockNode(BaseComponent):
outgoing_edges = 1
def run(self, *a, **k):
pass
def run_batch(self, *a, **k):
pass
class MockDocumentStore(BaseDocumentStore):
outgoing_edges = 1
def _create_document_field_map(self, *a, **k):
pass
def delete_documents(self, *a, **k):
pass
def delete_labels(self, *a, **k):
pass
def get_all_documents(self, *a, **k):
pass
def get_all_documents_generator(self, *a, **k):
pass
def get_all_labels(self, *a, **k):
pass
def get_document_by_id(self, *a, **k):
pass
def get_document_count(self, *a, **k):
pass
def get_documents_by_id(self, *a, **k):
pass
def get_label_count(self, *a, **k):
pass
def query_by_embedding(self, *a, **k):
pass
def write_documents(self, *a, **k):
pass
def write_labels(self, *a, **k):
pass
def METHOD_NAME(self, *a, **k):
pass
def update_document_meta(self, *a, **kw):
pass
class MockRetriever(BaseRetriever):
outgoing_edges = 1
def retrieve(
self,
query: str,
filters: Optional[FilterType] = None,
top_k: Optional[int] = None,
index: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
scale_score: Optional[bool] = None,
document_store: Optional[BaseDocumentStore] = None,
**kwargs,
) -> List[Document]:
return []
def retrieve_batch(
self,
queries: List[str],
filters: Optional[Union[FilterType, List[Optional[FilterType]]]] = None,
top_k: Optional[int] = None,
index: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
batch_size: Optional[int] = None,
scale_score: Optional[bool] = None,
document_store: Optional[BaseDocumentStore] = None,
) -> List[List[Document]]:
return [[]]
class MockPromptNode(PromptNode):
def __init__(self):
self.default_prompt_template = None
self.model_name_or_path = ""
def prompt(self, prompt_template: Optional[Union[str, PromptTemplate]], *args, **kwargs) -> List[str]:
return [""]
def get_prompt_template(self, prompt_template: Union[str, PromptTemplate, None]) -> Optional[PromptTemplate]:
if prompt_template == "think-step-by-step":
return PromptTemplate(
name="think-step-by-step",
prompt_text="You are a helpful and knowledgeable agent. To achieve your goal of answering complex questions "
"correctly, you have access to the following tools:\n\n"
"{tool_names_with_descriptions}\n\n"
"To answer questions, you'll need to go through multiple steps involving step-by-step thinking and "
"selecting appropriate tools and their inputs; tools will respond with observations. When you are ready "
"for a final answer, respond with the `Final Answer:`\n\n"
"Use the following format:\n\n"
"Question: the question to be answered\n"
"Thought: Reason if you have the final answer. If yes, answer the question. If not, find out the missing information needed to answer it.\n"
"Tool: [{tool_names}]\n"
"Tool Input: the input for the tool\n"
"Observation: the tool will respond with the result\n"
"...\n"
"Final Answer: the final answer to the question, make it short (1-5 words)\n\n"
"Thought, Tool, Tool Input, and Observation steps can be repeated multiple times, but sometimes we can find an answer in the first pass\n"
"---\n\n"
"Question: {query}\n"
"Thought: Let's think step-by-step, I first need to {generated_text}",
)
else:
return PromptTemplate(name="", prompt_text="") |
5,535 | make instance | # coding: utf-8
"""
Kubeflow Pipelines API
This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition.
Contact: kubeflow-pipelines@google.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kfp_server_api
from kfp_server_api.models.api_list_runs_response import ApiListRunsResponse # noqa: E501
from kfp_server_api.rest import ApiException
class TestApiListRunsResponse(unittest.TestCase):
"""ApiListRunsResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def METHOD_NAME(self, include_optional):
"""Test ApiListRunsResponse
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kfp_server_api.models.api_list_runs_response.ApiListRunsResponse() # noqa: E501
if include_optional :
return ApiListRunsResponse(
runs = [
kfp_server_api.models.api_run.apiRun(
id = '0',
name = '0',
storage_state = 'STORAGESTATE_AVAILABLE',
description = '0',
pipeline_spec = kfp_server_api.models.api_pipeline_spec.apiPipelineSpec(
pipeline_id = '0',
pipeline_name = '0',
workflow_manifest = '0',
pipeline_manifest = '0',
parameters = [
kfp_server_api.models.api_parameter.apiParameter(
name = '0',
value = '0', )
],
runtime_config = kfp_server_api.models.pipeline_spec_runtime_config.PipelineSpecRuntimeConfig(
parameters = {
'key' : None
},
pipeline_root = '0', ), ),
resource_references = [
kfp_server_api.models.api_resource_reference.apiResourceReference(
key = kfp_server_api.models.api_resource_key.apiResourceKey(
type = 'UNKNOWN_RESOURCE_TYPE',
id = '0', ),
name = '0',
relationship = 'UNKNOWN_RELATIONSHIP', )
],
service_account = '0',
created_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
scheduled_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
finished_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
status = '0',
error = '0',
metrics = [
kfp_server_api.models.api_run_metric.apiRunMetric(
name = '0',
node_id = '0',
number_value = 1.337,
format = 'UNSPECIFIED', )
], )
],
total_size = 56,
next_page_token = '0'
)
else :
return ApiListRunsResponse(
)
def testApiListRunsResponse(self):
"""Test ApiListRunsResponse"""
inst_req_only = self.METHOD_NAME(include_optional=False)
inst_req_and_optional = self.METHOD_NAME(include_optional=True)
if __name__ == '__main__':
unittest.main() |
5,536 | assets | """Classes/utilities for support of a dandiset"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
import os.path
from pathlib import Path, PurePath, PurePosixPath
from typing import Optional, TypeVar
from dandischema.models import get_schema_version
from . import get_logger
from .consts import dandiset_metadata_file
from .files import DandisetMetadataFile, LocalAsset, dandi_file, find_dandi_files
from .utils import find_parent_directory_containing, under_paths, yaml_dump, yaml_load
lgr = get_logger()
D = TypeVar("D", bound="Dandiset")
class Dandiset:
"""A prototype class for all things dandiset"""
__slots__ = ["metadata", "path", "path_obj", "_metadata_file_obj"]
def __init__(
self,
path: str | Path,
allow_empty: bool = False,
schema_version: Optional[str] = None,
) -> None:
if schema_version is not None:
current_version = get_schema_version()
if schema_version != current_version:
raise ValueError(
f"Unsupported schema version: {schema_version}; expected {current_version}"
)
self.path = str(path)
self.path_obj = Path(path)
if not allow_empty and not os.path.lexists(
self.path_obj / dandiset_metadata_file
):
raise ValueError(f"No dandiset at {path}")
self.metadata: Optional[dict] = None
self._metadata_file_obj = self.path_obj / dandiset_metadata_file
self._load_metadata()
@classmethod
def find(cls: type[D], path: str | Path | None) -> Optional[D]:
"""Find a dandiset possibly pointing to a directory within it"""
dandiset_path = find_parent_directory_containing(dandiset_metadata_file, path)
if dandiset_path is not None:
return cls(dandiset_path)
return None
def _load_metadata(self) -> None:
try:
with self._metadata_file_obj.open() as f:
# TODO it would cast 000001 if not explicitly string into
# an int -- we should prevent it... probably with some custom loader
self.metadata = yaml_load(f, typ="safe")
except FileNotFoundError:
if os.path.lexists(self._metadata_file_obj):
# Broken symlink
raise
else:
self.metadata = None
@classmethod
def get_dandiset_record(cls, meta: dict) -> str:
dandiset_identifier = cls._get_identifier(meta)
if not dandiset_identifier:
lgr.warning("No identifier for a dandiset was provided in %s", meta)
obtain_msg = ""
else:
obtain_msg = (
" edited online at https://dandiarchive.org/dandiset/"
f"{dandiset_identifier}\n# and"
)
header = f"""\
# DO NOT EDIT THIS FILE LOCALLY. ALL LOCAL UPDATES WILL BE LOST.
# It can be{obtain_msg} obtained from the dandiarchive.
"""
yaml_rec = yaml_dump(meta)
return header + yaml_rec
def update_metadata(self, meta: dict) -> None:
"""Update existing metadata record in dandiset.yaml"""
if not meta:
lgr.debug("No updates to metadata, returning")
return
try:
with self._metadata_file_obj.open() as f:
rec = yaml_load(f, typ="safe")
except FileNotFoundError:
if os.path.lexists(self._metadata_file_obj):
# Broken symlink
raise
else:
rec = {}
# TODO: decide howto and properly do updates to nested structures if
# possible. Otherwise limit to the fields we know could be modified
# locally
rec.update(meta)
self._metadata_file_obj.write_text(self.get_dandiset_record(rec))
# and reload now by a pure yaml
self._load_metadata()
@staticmethod
def _get_identifier(metadata: dict) -> Optional[str]:
"""Given a metadata record, determine identifier"""
# ATM since we have dichotomy in dandiset metadata schema from drafts
# and from published versions, we will just test both locations
id_ = metadata.get("dandiset", {}).get("identifier")
if id_:
# very old but might still be present... TODO: API-migration-remove
lgr.debug("Found identifier %s in 'dandiset.identifier'", id_)
if not id_ and "identifier" in metadata:
# girder-based, used before migration to API TODO: API-migration-remove
id_ = metadata["identifier"]
lgr.debug("Found identifier %s in top level 'identifier'", str(id_))
if isinstance(id_, dict):
# New formalized model, but see below DANDI: way
# TODO: add schemaVersion handling but only after we have them provided
# in all metadata records from dandi-api server
if id_.get("propertyID") != "DANDI":
raise ValueError(
f"Got following identifier record when was expecting a record "
f"with 'propertyID: DANDI': {id_}"
)
id_ = str(id_.get("value", ""))
elif id_ is not None:
assert isinstance(id_, str)
if id_.startswith("DANDI:"):
# result of https://github.com/dandi/dandi-cli/pull/348 which
id_ = id_[len("DANDI:") :]
assert id_ is None or isinstance(id_, str)
return id_
@property
def identifier(self) -> str:
if self.metadata is None:
raise ValueError("No metadata record found in Dandiset")
id_ = self._get_identifier(self.metadata)
if not id_:
raise ValueError(
f"Found no dandiset.identifier in metadata record: {self.metadata}"
)
return id_
def METHOD_NAME(self, allow_all: bool = False) -> AssetView:
data = {}
for df in find_dandi_files(
self.path, dandiset_path=self.path, allow_all=allow_all
):
if isinstance(df, DandisetMetadataFile):
continue
assert isinstance(df, LocalAsset)
data[PurePosixPath(df.path)] = df
return AssetView(data)
def metadata_file(self) -> DandisetMetadataFile:
df = dandi_file(self._metadata_file_obj, dandiset_path=self.path)
assert isinstance(df, DandisetMetadataFile)
return df
@dataclass
class AssetView:
"""
A collection of all assets in a local Dandiset, used to ensure that
`BIDSDatasetDescriptionAsset` objects are stored and remain alive while
working with only a subset of the files in a Dandiset.
"""
data: dict[PurePosixPath, LocalAsset]
def __iter__(self) -> Iterator[LocalAsset]:
return iter(self.data.values())
def under_paths(self, paths: Iterable[str | PurePath]) -> Iterator[LocalAsset]:
# The given paths must be relative to the Dandiset root and may not
# contain '.' or '..'
for p in under_paths(self.data.keys(), paths):
yield self.data[p] |
5,537 | apply | # Copyright (c) Princeton University.
# This source code is licensed under the BSD 3-Clause license found in the LICENSE file in the root directory of this source tree.
# Authors: Mingzhe Wang
import bpy
import mathutils
from numpy.random import uniform as U, normal as N, randint
from nodes.node_wrangler import Nodes, NodeWrangler
from nodes import node_utils
from nodes.color import color_category
from surfaces import surface
from util.organization import SurfaceTypes
from util.math import FixedSeed
import gin
type = SurfaceTypes.SDFPerturb
mod_name = "geo_mud"
name = "mud"
def shader_mud(nw: NodeWrangler):
# Code generated using version 2.6.4 of the node_transpiler
geometry_5 = nw.new_node(Nodes.NewGeometry)
noise_texture_1_w = nw.new_node(Nodes.Value, label='noise_texture_1_w')
noise_texture_1_w.outputs[0].default_value = 9.6366
noise_texture_1 = nw.new_node(Nodes.NoiseTexture,
input_kwargs={'Vector': geometry_5.outputs["Position"], 'W': noise_texture_1_w, 'Scale': N(5, 0.5)},
attrs={'noise_dimensions': '4D'})
color1 = [0.0216, 0.0145, 0.0113, 1.0000]
color2 = [0.0424, 0.0308, 0.0142, 1.0000]
for i in range(3):
color1[i] += N(0, 0.005)
color2[i] += N(0, 0.005)
colorramp_3 = nw.new_node(Nodes.ColorRamp, input_kwargs={'Fac': noise_texture_1.outputs["Fac"]})
colorramp_3.color_ramp.elements[0].position = 0.0000
colorramp_3.color_ramp.elements[0].color = color1
colorramp_3.color_ramp.elements[1].position = 1.0000
colorramp_3.color_ramp.elements[1].color = color2
geometry_1 = nw.new_node(Nodes.NewGeometry)
musgrave_texture = nw.new_node(Nodes.MusgraveTexture,
input_kwargs={'Vector': geometry_1.outputs["Position"], 'Scale': 0.2000, 'W': U(-10, 10)},
attrs={'musgrave_dimensions': '4D', 'musgrave_type': 'RIDGED_MULTIFRACTAL'})
colorramp_5 = nw.new_node(Nodes.ColorRamp, input_kwargs={'Fac': musgrave_texture})
colorramp_5.color_ramp.elements[0].position = 0.0000
colorramp_5.color_ramp.elements[0].color = [1.0000, 1.0000, 1.0000, 1.0000]
colorramp_5.color_ramp.elements[1].position = N(0.1045, 0.01)
colorramp_5.color_ramp.elements[1].color = [0.0000, 0.0000, 0.0000, 1.0000]
x1 = U(0.85, 0.95)
x2 = U(0.65, 0.75)
colorramp_6 = nw.new_node(Nodes.ColorRamp, input_kwargs={'Fac': colorramp_5.outputs["Color"]})
colorramp_6.color_ramp.elements[0].position = 0.0000
colorramp_6.color_ramp.elements[0].color = [x1, x1, x1, 1.0000]
colorramp_6.color_ramp.elements[1].position = 1.0000
colorramp_6.color_ramp.elements[1].color = [x2, x2, x2, 1.0000]
x1 = U(0.05, 0.15)
x2 = U(0.45, 0.55)
colorramp_4 = nw.new_node(Nodes.ColorRamp, input_kwargs={'Fac': noise_texture_1.outputs["Fac"]})
colorramp_4.color_ramp.elements[0].position = 0.0000
colorramp_4.color_ramp.elements[0].color = [x1, x1, x1, 1.0000]
colorramp_4.color_ramp.elements[1].position = 1.0000
colorramp_4.color_ramp.elements[1].color = [x2, x2, x2, 1.0000]
mix_3 = nw.new_node(Nodes.MixRGB,
input_kwargs={'Fac': colorramp_5.outputs["Color"], 'Color1': (0.0000, 0.0000, 0.0000, 1.0000), 'Color2': colorramp_4.outputs["Color"]})
principled_bsdf_2 = nw.new_node(Nodes.PrincipledBSDF,
input_kwargs={'Base Color': colorramp_3.outputs["Color"], 'Specular': colorramp_6.outputs["Color"], 'Roughness': mix_3})
material_output = nw.new_node(Nodes.MaterialOutput, input_kwargs={'Surface': principled_bsdf_2}, attrs={'is_active_output': True})
return principled_bsdf_2
@gin.configurable
def geo_mud(nw: NodeWrangler, random_seed=0, selection=None):
# Code generated using version 2.6.4 of the node_transpiler
with FixedSeed(random_seed):
group_input = nw.new_node(Nodes.GroupInput, expose_input=[('NodeSocketGeometry', 'Geometry', None)])
position_5 = nw.new_node(Nodes.InputPosition)
noise_texture_3 = nw.new_node(Nodes.NoiseTexture, input_kwargs={'Vector': position_5})
mix_2 = nw.new_node(Nodes.MixRGB,
input_kwargs={'Fac': nw.new_value(N(0.6, 0.1), "mix_2_fac"), 'Color1': noise_texture_3.outputs["Color"], 'Color2': position_5})
noise_texture_4 = nw.new_node(Nodes.NoiseTexture, input_kwargs={'Vector': mix_2, 'Scale': nw.new_value(N(50, 5), "noise_texture_4_scale")})
voronoi_texture_2 = nw.new_node(Nodes.VoronoiTexture, input_kwargs={'Vector': mix_2, 'Scale': nw.new_value(N(3.0000, 0.5), "voronoi_texture_2_scale")})
colorramp_1 = nw.new_node(Nodes.ColorRamp, input_kwargs={'Fac': voronoi_texture_2.outputs["Distance"]})
colorramp_1.color_ramp.elements[0].position = 0.0000
colorramp_1.color_ramp.elements[0].color = [1.0000, 1.0000, 1.0000, 1.0000]
colorramp_1.color_ramp.elements[1].position = 1.0000
colorramp_1.color_ramp.elements[1].color = [0.0000, 0.0000, 0.0000, 1.0000]
float_curve_1 = nw.new_node(Nodes.FloatCurve,
input_kwargs={
'Value': colorramp_1.outputs["Color"]})
node_utils.assign_curve(
float_curve_1.mapping.curves[0],
[(0.0000, 0.0000), (0.3386, 0.0844), (0.8114, 0.6312), (1.0000, 0.7656)]
)
# node_utils.assign_curve(
# float_curve_1.mapping.curves[0],
# [(0.0000, 0.0000), (0.3386+N(0, 0.05), 0.0844), (0.8114+N(0, 0.05), 0.6312), (1.0000, 0.7656)]
# )
value_6 = nw.new_node(Nodes.Value)
value_6.outputs[0].default_value = N(2, 0.2)
multiply = nw.new_node(Nodes.VectorMath, input_kwargs={0: float_curve_1, 1: value_6}, attrs={'operation': 'MULTIPLY'})
add = nw.new_node(Nodes.VectorMath, input_kwargs={0: noise_texture_4.outputs["Fac"], 1: multiply.outputs["Vector"]})
normal = nw.new_node(Nodes.InputNormal)
multiply_1 = nw.new_node(Nodes.VectorMath, input_kwargs={0: add.outputs["Vector"], 1: normal}, attrs={'operation': 'MULTIPLY'})
value_5 = nw.new_node(Nodes.Value)
value_5.outputs[0].default_value = N(0.04, 0.005)
multiply_2 = nw.new_node(Nodes.VectorMath,
input_kwargs={0: multiply_1.outputs["Vector"], 1: value_5},
attrs={'operation': 'MULTIPLY'})
offset = multiply_2.outputs["Vector"]
if selection is not None:
offset = nw.multiply(offset, surface.eval_argument(nw, selection))
set_position = nw.new_node(Nodes.SetPosition,
input_kwargs={'Geometry': group_input.outputs["Geometry"], 'Offset': offset})
group_output = nw.new_node(Nodes.GroupOutput, input_kwargs={'Geometry': set_position}, attrs={'is_active_output': True})
def METHOD_NAME(obj, selection=None, **kwargs):
surface.add_geomod(obj, geo_mud, selection=selection)
surface.add_material(obj, shader_mud, selection=selection |
5,538 | show images | """
The Pygame Zero mode for the Mu editor.
Copyright (c) 2015-2017 Nicholas H.Tollervey and others (see the AUTHORS file).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import logging
from mu.modes.base import BaseMode
from mu.modes.api import PYTHON3_APIS, SHARED_APIS, PI_APIS, PYGAMEZERO_APIS
from mu.resources import load_icon
from ..virtual_environment import venv
logger = logging.getLogger(__name__)
class PyGameZeroMode(BaseMode):
"""
Represents the functionality required by the PyGameZero mode.
"""
name = _("Pygame Zero")
short_name = "pygamezero"
description = _("Make games with Pygame Zero.")
icon = "pygamezero"
runner = None
builtins = [
"clock",
"music",
"Actor",
"keyboard",
"animate",
"Rect",
"ZRect",
"images",
"sounds",
"mouse",
"keys",
"keymods",
"exit",
"screen",
]
def actions(self):
"""
Return an ordered list of actions provided by this module. An action
is a name (also used to identify the icon) , description, and handler.
"""
return [
{
"name": "play",
"display_name": _("Play"),
"description": _("Play your Pygame Zero game."),
"handler": self.play_toggle,
"shortcut": "F5",
},
{
"name": "images",
"display_name": _("Images"),
"description": _("Show the images used by Pygame Zero."),
"handler": self.METHOD_NAME,
"shortcut": "Ctrl+Shift+I",
},
{
"name": "fonts",
"display_name": _("Fonts"),
"description": _("Show the fonts used by Pygame Zero."),
"handler": self.show_fonts,
"shortcut": "Ctrl+Shift+F",
},
{
"name": "sounds",
"display_name": _("Sounds"),
"description": _("Show the sounds used by Pygame Zero."),
"handler": self.show_sounds,
"shortcut": "Ctrl+Shift+N",
},
{
"name": "music",
"display_name": _("Music"),
"description": _("Show the music used by Pygame Zero."),
"handler": self.show_music,
"shortcut": "Ctrl+Shift+M",
},
]
def api(self):
"""
Return a list of API specifications to be used by auto-suggest and call
tips.
"""
return SHARED_APIS + PYTHON3_APIS + PI_APIS + PYGAMEZERO_APIS
def play_toggle(self, event):
"""
Handles the toggling of the play button to start/stop a script.
"""
if self.runner:
self.stop_game()
play_slot = self.view.button_bar.slots["play"]
play_slot.setIcon(load_icon("play"))
play_slot.setText(_("Play"))
play_slot.setToolTip(_("Play your Pygame Zero game."))
self.set_buttons(modes=True)
else:
self.run_game()
if self.runner:
play_slot = self.view.button_bar.slots["play"]
play_slot.setIcon(load_icon("stop"))
play_slot.setText(_("Stop"))
play_slot.setToolTip(_("Stop your Pygame Zero game."))
self.set_buttons(modes=False)
def run_game(self):
"""
Run the current game.
"""
# Grab the Python file.
tab = self.view.current_tab
if tab is None:
logger.debug("There is no active text editor.")
self.stop_game()
return
if tab.path is None:
# Unsaved file.
self.editor.save()
if tab.path:
# If needed, save the script.
if tab.isModified():
self.editor.save_tab_to_file(tab)
logger.debug(tab.text())
envars = self.editor.envars
args = ["-m", "pgzero"]
cwd = os.path.dirname(tab.path)
self.runner = self.view.add_python3_runner(
interpreter=venv.interpreter,
script_name=tab.path,
working_directory=cwd,
interactive=False,
envars=envars,
python_args=args,
)
self.runner.process.waitForStarted()
def stop_game(self):
"""
Stop the currently running game.
"""
logger.debug("Stopping script.")
if self.runner:
self.runner.stop_process()
self.runner = None
self.view.remove_python_runner()
def METHOD_NAME(self, event):
"""
Open the directory containing the image assets used by Pygame Zero.
This should open the host OS's file system explorer so users can drag
new files into the opened folder.
"""
self.view.open_directory_from_os(self.assets_dir("images"))
def show_fonts(self, event):
"""
Open the directory containing the font assets used by Pygame Zero.
This should open the host OS's file system explorer so users can drag
new files into the opened folder.
"""
self.view.open_directory_from_os(self.assets_dir("fonts"))
def show_sounds(self, event):
"""
Open the directory containing the sound assets used by Pygame Zero.
This should open the host OS's file system explorer so users can drag
new files into the opened folder.
"""
self.view.open_directory_from_os(self.assets_dir("sounds"))
def show_music(self, event):
"""
Open the directory containing the music assets used by Pygame Zero.
This should open the host OS's file system explorer so users can drag
new files into the opened folder.
"""
self.view.open_directory_from_os(self.assets_dir("music")) |
5,539 | deep deannotate | # sql/annotation.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The :class:`.Annotated` class and related routines; creates hash-equivalent
copies of SQL constructs which contain context-specific markers and
associations.
"""
from .. import util
from . import operators
class Annotated(object):
"""clones a ClauseElement and applies an 'annotations' dictionary.
Unlike regular clones, this clone also mimics __hash__() and
__cmp__() of the original element so that it takes its place
in hashed collections.
A reference to the original element is maintained, for the important
reason of keeping its hash value current. When GC'ed, the
hash value may be reused, causing conflicts.
.. note:: The rationale for Annotated producing a brand new class,
rather than placing the functionality directly within ClauseElement,
is **performance**. The __hash__() method is absent on plain
ClauseElement which leads to significantly reduced function call
overhead, as the use of sets and dictionaries against ClauseElement
objects is prevalent, but most are not "annotated".
"""
def __new__(cls, *args):
if not args:
# clone constructor
return object.__new__(cls)
else:
element, values = args
# pull appropriate subclass from registry of annotated
# classes
try:
cls = annotated_classes[element.__class__]
except KeyError:
cls = _new_annotation_type(element.__class__, cls)
return object.__new__(cls)
def __init__(self, element, values):
self.__dict__ = element.__dict__.copy()
self.__element = element
self._annotations = values
self._hash = hash(element)
def _annotate(self, values):
_values = self._annotations.copy()
_values.update(values)
return self._with_annotations(_values)
def _with_annotations(self, values):
clone = self.__class__.__new__(self.__class__)
clone.__dict__ = self.__dict__.copy()
clone._annotations = values
return clone
def _deannotate(self, values=None, clone=True):
if values is None:
return self.__element
else:
_values = self._annotations.copy()
for v in values:
_values.pop(v, None)
return self._with_annotations(_values)
def _compiler_dispatch(self, visitor, **kw):
return self.__element.__class__._compiler_dispatch(
self, visitor, **kw)
@property
def _constructor(self):
return self.__element._constructor
def _clone(self):
clone = self.__element._clone()
if clone is self.__element:
# detect immutable, don't change anything
return self
else:
# update the clone with any changes that have occurred
# to this object's __dict__.
clone.__dict__.update(self.__dict__)
return self.__class__(clone, self._annotations)
def __hash__(self):
return self._hash
def __eq__(self, other):
if isinstance(self.__element, operators.ColumnOperators):
return self.__element.__class__.__eq__(self, other)
else:
return hash(other) == hash(self)
# hard-generate Annotated subclasses. this technique
# is used instead of on-the-fly types (i.e. type.__new__())
# so that the resulting objects are pickleable.
annotated_classes = {}
def _deep_annotate(element, annotations, exclude=None):
"""Deep copy the given ClauseElement, annotating each element
with the given annotations dictionary.
Elements within the exclude collection will be cloned but not annotated.
"""
def clone(elem):
if exclude and \
hasattr(elem, 'proxy_set') and \
elem.proxy_set.intersection(exclude):
newelem = elem._clone()
elif annotations != elem._annotations:
newelem = elem._annotate(annotations)
else:
newelem = elem
newelem._copy_internals(clone=clone)
return newelem
if element is not None:
element = clone(element)
return element
def METHOD_NAME(element, values=None):
"""Deep copy the given element, removing annotations."""
cloned = util.column_dict()
def clone(elem):
# if a values dict is given,
# the elem must be cloned each time it appears,
# as there may be different annotations in source
# elements that are remaining. if totally
# removing all annotations, can assume the same
# slate...
if values or elem not in cloned:
newelem = elem._deannotate(values=values, clone=True)
newelem._copy_internals(clone=clone)
if not values:
cloned[elem] = newelem
return newelem
else:
return cloned[elem]
if element is not None:
element = clone(element)
return element
def _shallow_annotate(element, annotations):
"""Annotate the given ClauseElement and copy its internals so that
internal objects refer to the new annotated object.
Basically used to apply a "dont traverse" annotation to a
selectable, without digging throughout the whole
structure wasting time.
"""
element = element._annotate(annotations)
element._copy_internals()
return element
def _new_annotation_type(cls, base_cls):
if issubclass(cls, Annotated):
return cls
elif cls in annotated_classes:
return annotated_classes[cls]
for super_ in cls.__mro__:
# check if an Annotated subclass more specific than
# the given base_cls is already registered, such
# as AnnotatedColumnElement.
if super_ in annotated_classes:
base_cls = annotated_classes[super_]
break
annotated_classes[cls] = anno_cls = type(
"Annotated%s" % cls.__name__,
(base_cls, cls), {})
globals()["Annotated%s" % cls.__name__] = anno_cls
return anno_cls
def _prepare_annotations(target_hierarchy, base_cls):
stack = [target_hierarchy]
while stack:
cls = stack.pop()
stack.extend(cls.__subclasses__())
_new_annotation_type(cls, base_cls) |
5,540 | test if pie charts of costs is | import os
import shutil
import mock
import pandas as pd
import pytest
import multi_vector_simulator.A0_initialization as initializing
import multi_vector_simulator.F1_plotting as F1
from multi_vector_simulator.cli import main
from multi_vector_simulator.utils.constants import (
INPUT_FOLDER,
PLOTS_BUSSES,
PATHS_TO_PLOTS,
PLOTS_DEMANDS,
PLOTS_RESOURCES,
PLOTS_ES,
PLOTS_PERFORMANCE,
PLOTS_COSTS,
CSV_EXT,
)
from multi_vector_simulator.utils.constants_json_strings import (
LABEL,
OPTIMIZED_ADD_CAP,
PROJECT_NAME,
SCENARIO_NAME,
KPI,
KPI_SCALAR_MATRIX,
)
from _constants import (
EXECUTE_TESTS_ON,
TESTS_ON_MASTER,
TEST_REPO_PATH,
PATH_OUTPUT_FOLDER,
TEST_INPUT_DIRECTORY,
DUMMY_CSV_PATH,
ES_GRAPH,
)
dict_values = {
PATHS_TO_PLOTS: {
PLOTS_BUSSES: [],
PLOTS_DEMANDS: [],
PLOTS_RESOURCES: [],
PLOTS_ES: [],
PLOTS_PERFORMANCE: [],
PLOTS_COSTS: [],
}
}
SECTOR = "Electricity"
INTERVAL = 2
OUTPUT_PATH = os.path.join(TEST_REPO_PATH, "test_outputs")
PARSER = initializing.mvs_arg_parser()
TEST_INPUT_PATH = os.path.join(TEST_REPO_PATH, INPUT_FOLDER)
TEST_OUTPUT_PATH = os.path.join(TEST_REPO_PATH, "F1_outputs")
# Data for test_if_plot_of_all_energy_flows_for_all_sectors_are_stored_for_14_days
USER_INPUT = {PATH_OUTPUT_FOLDER: OUTPUT_PATH}
PROJECT_DATA = {PROJECT_NAME: "a_project", SCENARIO_NAME: "a_scenario"}
RESULTS_TIMESERIES = pd.read_csv(
os.path.join(DUMMY_CSV_PATH, "plot_data_for_F1.csv"),
sep=";",
header=0,
index_col=0,
)
# data for test_store_barchart_for_capacities
DICT_KPI = {
KPI: {
KPI_SCALAR_MATRIX: pd.DataFrame(
{LABEL: ["asset_a", "asset_b"], OPTIMIZED_ADD_CAP: [1, 2]}
)
},
}
class TestNetworkx:
def setup_class(self):
""" """
shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True)
@pytest.mark.skipif(
EXECUTE_TESTS_ON not in (TESTS_ON_MASTER),
reason="Benchmark test deactivated, set env variable "
"EXECUTE_TESTS_ON to 'master' to run this test",
)
@mock.patch(
"argparse.ArgumentParser.parse_args",
return_value=PARSER.parse_args(
[
"-f",
"-log",
"warning",
"-i",
TEST_INPUT_PATH,
"-o",
TEST_OUTPUT_PATH,
"-ext",
CSV_EXT,
"-png",
]
),
)
def test_if_energy_system_network_graph_is_stored_if_png_option(self, m_args):
main(overwrite=True, display_output="warning")
assert os.path.exists(os.path.join(TEST_OUTPUT_PATH, ES_GRAPH)) is True
@pytest.mark.skipif(
EXECUTE_TESTS_ON not in (TESTS_ON_MASTER) or True,
reason="Benchmark test deactivated, set env variable "
"EXECUTE_TESTS_ON to 'master' to run this test",
)
@mock.patch(
"argparse.ArgumentParser.parse_args",
return_value=PARSER.parse_args(
[
"-f",
"-log",
"warning",
"-i",
TEST_INPUT_PATH,
"-o",
TEST_OUTPUT_PATH,
"-ext",
CSV_EXT,
"-pdf",
]
),
)
def test_if_energy_system_network_graph_is_stored_if_pdf_option(self, m_args):
main(overwrite=True, display_output="warning")
assert os.path.exists(os.path.join(TEST_OUTPUT_PATH, ES_GRAPH)) is True
@pytest.mark.skipif(
EXECUTE_TESTS_ON not in (TESTS_ON_MASTER),
reason="Benchmark test deactivated, set env variable "
"EXECUTE_TESTS_ON to 'master' to run this test",
)
@mock.patch(
"argparse.ArgumentParser.parse_args",
return_value=PARSER.parse_args(
[
"-f",
"-log",
"warning",
"-i",
TEST_INPUT_PATH,
"-o",
TEST_OUTPUT_PATH,
"-ext",
CSV_EXT,
]
),
)
def test_if_energy_system_network_graph_is_stored_if_no_pdf_nor_png_option(
self, m_args
):
main(overwrite=True, display_output="warning")
assert os.path.exists(os.path.join(TEST_OUTPUT_PATH, ES_GRAPH)) is False
def teardown_method(self):
if os.path.exists(TEST_OUTPUT_PATH):
shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True)
class TestFileCreation:
def setup_class(self):
""" """
shutil.rmtree(OUTPUT_PATH, ignore_errors=True)
os.mkdir(OUTPUT_PATH)
def test_if_plot_of_all_energy_flows_for_all_sectors_are_stored_for_14_days(self):
pass
# F1.flows(
# dict_values, USER_INPUT, PROJECT_DATA, RESULTS_TIMESERIES, SECTOR, INTERVAL
# )
# assert (
# os.path.exists(
# os.path.join(
# OUTPUT_PATH, SECTOR + "_flows_" + str(INTERVAL) + "_days.png"
# )
# )
# is True
# )
@pytest.mark.skipif(
F1.PLOTLY_INSTALLED is False,
reason="Test deactivated because plotly package is not installed",
)
def METHOD_NAME(self):
F1.create_plotly_piechart_fig(
title_of_plot="a_title",
names=["costs1", "costs2"],
values=[0.2, 0.8],
file_name="filename.png",
file_path=OUTPUT_PATH,
)
assert os.path.exists(os.path.join(OUTPUT_PATH, "filename.png")) is True
def teardown_class(self):
""" """
if os.path.exists(OUTPUT_PATH):
shutil.rmtree(OUTPUT_PATH, ignore_errors=True)
def test_get_color_is_cyclic():
colors = [1, 2, 3]
assert F1.get_color(3, colors) == colors[0]
def test_fixed_width_text_smaller_than_limit_returns_text():
txt = "12345"
assert txt == F1.fixed_width_text(txt, char_num=10)
def test_fixed_width_text_smaller_than_limit_returns_text():
txt = "12345"
assert F1.fixed_width_text(txt, char_num=2) == "12\n34\n5" |
5,541 | on adapter property changed | from enum import Enum
from gettext import gettext as _
import logging
from typing import Callable, Any, Optional
from blueman.plugins.AppletPlugin import AppletPlugin
from blueman.bluez.Adapter import Adapter
from gi.repository import GLib
from blueman.plugins.applet.StatusIcon import StatusIconProvider
class PowerStateListener:
def on_power_state_changed(self, manager: "PowerManager", state: bool) -> None:
return
class PowerStateHandler:
def on_power_state_query(self) -> "PowerManager.State":
return PowerManager.State.ON
def on_power_state_change_requested(self, manager: "PowerManager", state: bool,
cb: Callable[[bool], None]) -> None:
...
class PowerManager(AppletPlugin, StatusIconProvider):
__depends__ = ["Menu"]
__unloadable__ = True
__description__ = _("Controls Bluetooth adapter power states")
__author__ = "Walmis"
__icon__ = "gnome-power-manager-symbolic"
class State(Enum):
ON = 2
OFF = 1
OFF_FORCED = 0
def on_load(self) -> None:
self.item = self.parent.Plugins.Menu.add(self, 1, text=_("<b>Turn Bluetooth _Off</b>"), markup=True,
icon_name="bluetooth-disabled-symbolic",
tooltip=_("Turn off all adapters"),
callback=self.on_bluetooth_toggled)
self.adapter_state = True
self.current_state = True
self.request_in_progress = False
self._add_dbus_signal("BluetoothStatusChanged", "b")
self._add_dbus_method("SetBluetoothStatus", ("b",), "", self.request_power_state)
self._add_dbus_method("GetBluetoothStatus", (), "b", self.get_bluetooth_status)
def on_unload(self) -> None:
self.parent.Plugins.Menu.unregister(self)
@property
def CurrentState(self) -> bool:
return self.current_state
def on_manager_state_changed(self, state: bool) -> None:
if state:
def timeout() -> bool:
self.request_power_state(self.get_adapter_state())
return False
GLib.timeout_add(1000, timeout)
def get_adapter_state(self) -> bool:
adapters = self.parent.Manager.get_adapters()
for adapter in adapters:
if not adapter["Powered"]:
return False
return bool(adapters)
def set_adapter_state(self, state: bool) -> None:
try:
logging.info(state)
adapters = self.parent.Manager.get_adapters()
for adapter in adapters:
adapter.set("Powered", state)
self.adapter_state = state
except Exception:
logging.error("Exception occurred", exc_info=True)
class Callback:
def __init__(self, parent: "PowerManager", state: bool):
self.parent = parent
self.num_cb = 0
self.called = 0
self.state = state
self.success = False
self.timer = GLib.timeout_add(5000, self.timeout)
def __call__(self, result: bool) -> None:
self.called += 1
if result:
self.success = True
self.check()
def check(self) -> None:
if self.called == self.num_cb:
GLib.source_remove(self.timer)
logging.info("callbacks done")
self.parent.set_adapter_state(self.state)
self.parent.update_power_state()
self.parent.request_in_progress = False
def timeout(self) -> bool:
logging.info("Timeout reached while setting power state")
self.parent.update_power_state()
self.parent.request_in_progress = False
return False
def request_power_state(self, state: bool, force: bool = False) -> None:
if self.current_state != state or force:
if not self.request_in_progress:
self.request_in_progress = True
logging.info(f"Requesting {state}")
cb = PowerManager.Callback(self, state)
handlers = list(self.parent.Plugins.get_loaded_plugins(PowerStateHandler))
for handler in handlers:
handler.on_power_state_change_requested(self, state, cb)
cb.num_cb = len(handlers)
cb.check()
else:
logging.info("Another request in progress")
# queries other plugins to determine the current power state
def update_power_state(self) -> None:
rets = [plugin.on_power_state_query()
for plugin in self.parent.Plugins.get_loaded_plugins(PowerStateHandler)]
off = any(x != self.State.ON for x in rets) or not self.adapter_state
foff = self.State.OFF_FORCED in rets
on = self.State.ON in rets or self.adapter_state
new_state = True
if foff or off:
self.item.set_text(_("<b>Turn Bluetooth _On</b>"), markup=True)
self.item.set_icon_name("bluetooth-symbolic")
self.item.set_tooltip(_("Turn on all adapters"))
self.item.set_sensitive(not foff)
new_state = False
elif on and not self.current_state:
self.item.set_text(_("<b>Turn Bluetooth _Off</b>"), markup=True)
self.item.set_icon_name("bluetooth-disabled-symbolic")
self.item.set_tooltip(_("Turn off all adapters"))
self.item.set_sensitive(True)
new_state = True
logging.info(f"off {off} | foff {foff} | on {on} | current state {self.current_state} | new state {new_state}")
if self.current_state != new_state:
logging.info(f"Signalling {new_state}")
self.current_state = new_state
self._emit_dbus_signal("BluetoothStatusChanged", new_state)
for plugin in self.parent.Plugins.get_loaded_plugins(PowerStateListener):
plugin.on_power_state_changed(self, new_state)
if "StatusIcon" in self.parent.Plugins.get_loaded():
if new_state:
self.parent.Plugins.StatusIcon.set_tooltip_title(_("Bluetooth Enabled"))
self.parent.Plugins.StatusIcon.query_visibility(delay_hiding=True)
else:
self.parent.Plugins.StatusIcon.set_tooltip_title(_("Bluetooth Disabled"))
self.parent.Plugins.StatusIcon.query_visibility()
self.parent.Plugins.StatusIcon.icon_should_change()
def get_bluetooth_status(self) -> bool:
return self.current_state
def METHOD_NAME(self, _path: str, key: str, value: Any) -> None:
if key == "Powered":
if value and not self.current_state:
logging.warning("adapter powered on while in off state, turning bluetooth on")
self.request_power_state(True)
self.adapter_state = self.get_adapter_state()
self.update_power_state()
def on_bluetooth_toggled(self) -> None:
self.request_power_state(not self.current_state)
def on_status_icon_query_icon(self) -> Optional[str]:
return "blueman-disabled" if not self.get_bluetooth_status() else None
def on_adapter_added(self, path: str) -> None:
adapter = Adapter(obj_path=path)
adapter.set("Powered", self.adapter_state) |
5,542 | tuple depth | '''
See COPYRIGHT.md for copyright information.
'''
from arelle import ViewFile, XbrlConst, XmlUtil
from collections import defaultdict
def viewFacts(modelXbrl, outfile, lang=None, labelrole=None, cols=None):
modelXbrl.modelManager.showStatus(_("viewing facts"))
view = ViewFacts(modelXbrl, outfile, labelrole, lang, cols)
view.view(modelXbrl.modelDocument)
view.close()
COL_WIDTHS = {
"Concept": 80, # same as label
"Label": 80,
"Name": 40,
"LocalName": 40,
"Namespace": 40,
"contextRef": 40,
"unitRef": 40,
"Dec": 5,
"Prec": 5,
"Lang": 6,
"Value": 40,
"EntityScheme": 40,
"EntityIdentifier": 40,
"Period": 40,
"Dimensions": 60,
# concept properties
"ID": 40,
"Type": 32,
"PeriodType": 16,
"Balance": 16,
"Documentation": 100
}
class ViewFacts(ViewFile.View):
def __init__(self, modelXbrl, outfile, labelrole, lang, cols):
super(ViewFacts, self).__init__(modelXbrl, outfile, "Fact List", lang)
self.labelrole = labelrole
self.cols = cols
def view(self, modelDocument):
if self.cols:
if isinstance(self.cols,str): self.cols = self.cols.replace(',',' ').split()
unrecognizedCols = []
for col in self.cols:
if col not in COL_WIDTHS:
unrecognizedCols.append(col)
if unrecognizedCols:
self.modelXbrl.error("arelle:unrecognizedFactListColumn",
_("Unrecognized columns: %(cols)s"),
modelXbrl=self.modelXbrl, cols=','.join(unrecognizedCols))
if "Period" in self.cols:
i = self.cols.index("Period")
self.cols[i:i+1] = ["Start", "End/Instant"]
else:
self.cols = ["Label","contextRef","unitRef","Dec","Prec","Lang","Value"]
col0 = self.cols[0]
if col0 not in ("Concept", "Label", "Name", "LocalName"):
self.modelXbrl.error("arelle:firstFactListColumn",
_("First column must be Concept, Label, Name or LocalName: %(col1)s"),
modelXbrl=self.modelXbrl, col1=col0)
self.isCol0Label = col0 in ("Concept", "Label")
self.maxNumDims = 1
self.METHOD_NAME(self.modelXbrl.facts, 0)
if "Dimensions" == self.cols[-1]:
lastColSpan = self.maxNumDims
else:
lastColSpan = None
self.addRow(self.cols, asHeader=True, lastColSpan=lastColSpan)
self.setColWidths([COL_WIDTHS.get(col, 8) for col in self.cols])
self.viewFacts(self.modelXbrl.facts, 0)
def METHOD_NAME(self, modelFacts, indentedCol):
if indentedCol > self.treeCols: self.treeCols = indentedCol
for modelFact in modelFacts:
if modelFact.context is not None:
numDims = len(modelFact.context.qnameDims) * 2
if numDims > self.maxNumDims: self.maxNumDims = numDims
self.METHOD_NAME(modelFact.modelTupleFacts, indentedCol + 1)
def viewFacts(self, modelFacts, indent):
for modelFact in modelFacts:
concept = modelFact.concept
xmlRowElementName = 'item'
attr = {"name": str(modelFact.qname)}
lang = ""
if concept is not None and self.isCol0Label:
lbl = concept.label(preferredLabel=self.labelrole, lang=self.lang, linkroleHint=XbrlConst.defaultLinkRole)
xmlCol0skipElt = False # provide label as a row element
if concept.baseXsdType in ("string", "normalizedString"):
lang = modelFact.xmlLang
else:
lbl = (modelFact.qname or modelFact.prefixedName) # defective inline facts may have no qname
xmlCol0skipElt = True # name is an attribute, don't do it also as an element
cols = [lbl]
if concept is not None:
if modelFact.isItem:
for col in self.cols[1:]:
if col in ("Concept", "Label"): # label or name may be 2nd to nth col if name or label is 1st col
cols.append( concept.label(preferredLabel=self.labelrole, lang=self.lang) )
elif col == "Name":
cols.append( modelFact.qname )
elif col == "LocalName":
cols.append( modelFact.qname.localName )
elif col == "Namespace":
cols.append( modelFact.qname.namespaceURI )
elif col == "contextRef":
cols.append( modelFact.contextID )
elif col == "unitRef":
cols.append( modelFact.unitID )
elif col == "Dec":
cols.append( modelFact.decimals )
elif col == "Prec":
cols.append( modelFact.precision )
elif col == "Lang":
cols.append( lang )
elif col == "Value":
cols.append( "(nil)" if modelFact.xsiNil == "true" else modelFact.effectiveValue.strip() )
elif col == "EntityScheme":
cols.append( modelFact.context.entityIdentifier[0] )
elif col == "EntityIdentifier":
cols.append( modelFact.context.entityIdentifier[1] )
elif col == "Start":
cols.append( XmlUtil.text(XmlUtil.child(modelFact.context.period, XbrlConst.xbrli, "startDate")) )
elif col == "End/Instant":
cols.append( XmlUtil.text(XmlUtil.child(modelFact.context.period, XbrlConst.xbrli, ("endDate","instant"))) )
elif col == "Dimensions":
for dimQname in sorted(modelFact.context.qnameDims.keys()):
cols.append( str(dimQname) )
cols.append( str(modelFact.context.dimMemberQname(dimQname)) )
elif col == "ID":
cols.append( concept.id )
elif col == "Type":
cols.append( concept.typeQname )
elif col == "PeriodType":
cols.append( concept.periodType )
elif col == "Balance":
cols.append( concept.balance )
elif col == "Documentation":
cols.append( concept.label(preferredLabel=XbrlConst.documentationLabel, fallbackToQname=False, lang=self.lang, strip=True, linkroleHint=XbrlConst.defaultLinkRole) )
elif modelFact.isTuple:
xmlRowElementName = 'tuple'
self.addRow(cols, treeIndent=indent, xmlRowElementName=xmlRowElementName, xmlRowEltAttr=attr, xmlCol0skipElt=xmlCol0skipElt)
self.viewFacts(modelFact.modelTupleFacts, indent + 1) |
5,543 | yolo collate fn | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper classes and functions for PyTorch detection data loaders
"""
import random
from typing import Any, Callable, List, Tuple
import torch
from PIL import Image
from torch import Tensor
try:
from torchvision.transforms import functional as torchvision_functional
torchvision_import_error = None
except Exception as torchvision_error:
torchvision_functional = None
torchvision_import_error = torchvision_error
from sparseml.pytorch.utils import ssd_random_crop
__all__ = [
"AnnotatedImageTransforms",
"ssd_random_crop_image_and_annotations",
"random_horizontal_flip_image_and_annotations",
"yolo_collate_fn",
"ssd_collate_fn",
"bounding_box_and_labels_to_yolo_fmt",
]
class AnnotatedImageTransforms(object):
"""
Class for chaining transforms that take two parameters
(images and annotations for object detection).
:param transforms: List of transformations that take an image and annotation as
their parameters.
"""
def __init__(self, transforms: List):
self._transforms = transforms
@property
def transforms(self) -> List[Callable]:
"""
:return: a list of the transforms performed by this object
"""
return self._transforms
def __call__(self, image, annotations):
for transform in self._transforms:
image, annotations = transform(image, annotations)
return image, annotations
def ssd_random_crop_image_and_annotations(
image: Image.Image, annotations: Tuple[Tensor, Tensor]
) -> Tuple[Image.Image, Tuple[Tensor, Tensor]]:
"""
Wraps sparseml.pytorch.utils.ssd_random_crop to work in the
AnnotatedImageTransforms pipeline.
:param image: the image to crop
:param annotations: a tuple of bounding boxes and their labels for this image
:return: A tuple of the cropped image and annotations
"""
boxes, labels = annotations
if labels.numel() > 0:
image, boxes, labels = ssd_random_crop(image, boxes, labels)
return image, (boxes, labels)
def random_horizontal_flip_image_and_annotations(
image: Image.Image, annotations: Tuple[Tensor, Tensor], p: float = 0.5
) -> Tuple[Image.Image, Tuple[Tensor, Tensor]]:
"""
Performs a horizontal flip on given image and bounding boxes with probability p.
:param image: the image to randomly flip
:param annotations: a tuple of bounding boxes and their labels for this image
:param p: the probability to flip with. Default is 0.5
:return: A tuple of the randomly flipped image and annotations
"""
if torchvision_import_error is not None:
raise torchvision_import_error
boxes, labels = annotations
if random.random() < p:
if labels.numel() > 0:
boxes[:, [0, 2]] = 1.0 - boxes[:, [2, 0]] # flip width dimensions
image = torchvision_functional.hflip(image)
return image, (boxes, labels)
def METHOD_NAME(
batch: List[Any],
) -> Tuple[Tensor, Tuple[Tensor, Tensor, List[Tuple[Tensor, Tensor]]]]:
"""
Collate function to be used for creating a DataLoader with values for Yolo model
input.
:param batch: a batch of data points and annotations transformed by
bounding_box_and_labels_to_yolo_fmt
:return: the batch stacked as tensors for all values except for the
original annotations
"""
images = []
targets = []
annotations = []
for idx, (image, (target, annotation)) in enumerate(batch):
images.append(image.unsqueeze(0))
img_label = torch.ones(target.size(0), 1) * idx
targets.append(torch.cat((img_label, target), 1))
annotations.append(annotation)
images = torch.cat(images, 0)
targets = torch.cat(targets, 0)
return images, (targets, annotations)
def ssd_collate_fn(
batch: List[Any],
) -> Tuple[Tensor, Tuple[Tensor, Tensor, List[Tuple[Tensor, Tensor]]]]:
"""
Collate function to be used for creating a DataLoader with values transformed by
encode_annotation_bounding_boxes.
:param batch: a batch of data points transformed by encode_annotation_bounding_boxes
:return: the batch stacked as tensors for all values except for the
original annotations
"""
images = []
enc_boxes = []
enc_labels = []
annotations = []
for image, (enc_box, enc_label, annotation) in batch:
images.append(image.unsqueeze(0))
enc_boxes.append(enc_box.unsqueeze(0))
enc_labels.append(enc_label.unsqueeze(0))
annotations.append(annotation)
images = torch.cat(images, 0)
enc_boxes = torch.cat(enc_boxes, 0)
enc_labels = torch.cat(enc_labels, 0)
return images, (enc_boxes, enc_labels, annotations)
def bounding_box_and_labels_to_yolo_fmt(annotations):
boxes, labels = annotations
if boxes.numel() == 0:
return torch.zeros(0, 5)
cx = (boxes[:, 0] + boxes[:, 2]) / 2
cy = (boxes[:, 1] + boxes[:, 3]) / 2
w = boxes[:, 2] - boxes[:, 0]
h = boxes[:, 3] - boxes[:, 1]
return torch.stack((labels, cx, cy, w, h)).T |
5,544 | xml close | ###############################################################################
#
# XMLwriter - A base class for XlsxWriter classes.
#
# Used in conjunction with XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright 2013-2023, John McNamara, jmcnamara@cpan.org
#
# Standard packages.
import re
from io import StringIO
class XMLwriter(object):
"""
Simple XML writer class.
"""
def __init__(self):
self.fh = None
self.escapes = re.compile('["&<>\n]')
self.internal_fh = False
def _set_filehandle(self, filehandle):
# Set the writer filehandle directly. Mainly for testing.
self.fh = filehandle
self.internal_fh = False
def _set_xml_writer(self, filename):
# Set the XML writer filehandle for the object.
if isinstance(filename, StringIO):
self.internal_fh = False
self.fh = filename
else:
self.internal_fh = True
self.fh = open(filename, "w", encoding="utf-8")
def METHOD_NAME(self):
# Close the XML filehandle if we created it.
if self.internal_fh:
self.fh.close()
def _xml_declaration(self):
# Write the XML declaration.
self.fh.write("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n""")
def _xml_start_tag(self, tag, attributes=[]):
# Write an XML start tag with optional attributes.
for key, value in attributes:
value = self._escape_attributes(value)
tag += ' %s="%s"' % (key, value)
self.fh.write("<%s>" % tag)
def _xml_start_tag_unencoded(self, tag, attributes=[]):
# Write an XML start tag with optional, unencoded, attributes.
# This is a minor speed optimization for elements that don't
# need encoding.
for key, value in attributes:
tag += ' %s="%s"' % (key, value)
self.fh.write("<%s>" % tag)
def _xml_end_tag(self, tag):
# Write an XML end tag.
self.fh.write("</%s>" % tag)
def _xml_empty_tag(self, tag, attributes=[]):
# Write an empty XML tag with optional attributes.
for key, value in attributes:
value = self._escape_attributes(value)
tag += ' %s="%s"' % (key, value)
self.fh.write("<%s/>" % tag)
def _xml_empty_tag_unencoded(self, tag, attributes=[]):
# Write an empty XML tag with optional, unencoded, attributes.
# This is a minor speed optimization for elements that don't
# need encoding.
for key, value in attributes:
tag += ' %s="%s"' % (key, value)
self.fh.write("<%s/>" % tag)
def _xml_data_element(self, tag, data, attributes=[]):
# Write an XML element containing data with optional attributes.
end_tag = tag
for key, value in attributes:
value = self._escape_attributes(value)
tag += ' %s="%s"' % (key, value)
data = self._escape_data(data)
self.fh.write("<%s>%s</%s>" % (tag, data, end_tag))
def _xml_string_element(self, index, attributes=[]):
# Optimized tag writer for <c> cell string elements in the inner loop.
attr = ""
for key, value in attributes:
value = self._escape_attributes(value)
attr += ' %s="%s"' % (key, value)
self.fh.write("""<c%s t="s"><v>%d</v></c>""" % (attr, index))
def _xml_si_element(self, string, attributes=[]):
# Optimized tag writer for shared strings <si> elements.
attr = ""
for key, value in attributes:
value = self._escape_attributes(value)
attr += ' %s="%s"' % (key, value)
string = self._escape_data(string)
self.fh.write("""<si><t%s>%s</t></si>""" % (attr, string))
def _xml_rich_si_element(self, string):
# Optimized tag writer for shared strings <si> rich string elements.
self.fh.write("""<si>%s</si>""" % string)
def _xml_number_element(self, number, attributes=[]):
# Optimized tag writer for <c> cell number elements in the inner loop.
attr = ""
for key, value in attributes:
value = self._escape_attributes(value)
attr += ' %s="%s"' % (key, value)
self.fh.write("""<c%s><v>%.16G</v></c>""" % (attr, number))
def _xml_formula_element(self, formula, result, attributes=[]):
# Optimized tag writer for <c> cell formula elements in the inner loop.
attr = ""
for key, value in attributes:
value = self._escape_attributes(value)
attr += ' %s="%s"' % (key, value)
self.fh.write(
"""<c%s><f>%s</f><v>%s</v></c>"""
% (attr, self._escape_data(formula), self._escape_data(result))
)
def _xml_inline_string(self, string, preserve, attributes=[]):
# Optimized tag writer for inlineStr cell elements in the inner loop.
attr = ""
t_attr = ""
# Set the <t> attribute to preserve whitespace.
if preserve:
t_attr = ' xml:space="preserve"'
for key, value in attributes:
value = self._escape_attributes(value)
attr += ' %s="%s"' % (key, value)
string = self._escape_data(string)
self.fh.write(
"""<c%s t="inlineStr"><is><t%s>%s</t></is></c>""" % (attr, t_attr, string)
)
def _xml_rich_inline_string(self, string, attributes=[]):
# Optimized tag writer for rich inlineStr in the inner loop.
attr = ""
for key, value in attributes:
value = self._escape_attributes(value)
attr += ' %s="%s"' % (key, value)
self.fh.write("""<c%s t="inlineStr"><is>%s</is></c>""" % (attr, string))
def _escape_attributes(self, attribute):
# Escape XML characters in attributes.
try:
if not self.escapes.search(attribute):
return attribute
except TypeError:
return attribute
attribute = (
attribute.replace("&", "&")
.replace('"', """)
.replace("<", "<")
.replace(">", ">")
.replace("\n", "
")
)
return attribute
def _escape_data(self, data):
# Escape XML characters in data sections of tags. Note, this
# is different from _escape_attributes() in that double quotes
# are not escaped by Excel.
try:
if not self.escapes.search(data):
return data
except TypeError:
return data
data = data.replace("&", "&").replace("<", "<").replace(">", ">")
return data |
5,545 | create app | """Fixtures and testing utilities for :pypi:`pytest <pytest>`."""
import os
from contextlib import contextmanager
from typing import TYPE_CHECKING, Any, Mapping, Sequence, Union # noqa
import pytest
if TYPE_CHECKING:
from celery import Celery
from ..worker import WorkController
else:
Celery = WorkController = object
NO_WORKER = os.environ.get('NO_WORKER')
# pylint: disable=redefined-outer-name
# Well, they're called fixtures....
def pytest_configure(config):
"""Register additional pytest configuration."""
# add the pytest.mark.celery() marker registration to the pytest.ini [markers] section
# this prevents pytest 4.5 and newer from issuing a warning about an unknown marker
# and shows helpful marker documentation when running pytest --markers.
config.addinivalue_line(
"markers", "celery(**overrides): override celery configuration for a test case"
)
@contextmanager
def METHOD_NAME(enable_logging=False,
use_trap=False,
parameters=None,
**config):
# type: (Any, Any, Any, **Any) -> Celery
"""Utility context used to setup Celery app for pytest fixtures."""
from .testing.app import TestApp, setup_default_app
parameters = {} if not parameters else parameters
test_app = TestApp(
set_as_current=False,
enable_logging=enable_logging,
config=config,
**parameters
)
with setup_default_app(test_app, use_trap=use_trap):
yield test_app
@pytest.fixture(scope='session')
def use_celery_app_trap():
# type: () -> bool
"""You can override this fixture to enable the app trap.
The app trap raises an exception whenever something attempts
to use the current or default apps.
"""
return False
@pytest.fixture(scope='session')
def celery_session_app(request,
celery_config,
celery_parameters,
celery_enable_logging,
use_celery_app_trap):
# type: (Any, Any, Any, Any, Any) -> Celery
"""Session Fixture: Return app for session fixtures."""
mark = request.node.get_closest_marker('celery')
config = dict(celery_config, **mark.kwargs if mark else {})
with METHOD_NAME(enable_logging=celery_enable_logging,
use_trap=use_celery_app_trap,
parameters=celery_parameters,
**config) as app:
if not use_celery_app_trap:
app.set_default()
app.set_current()
yield app
@pytest.fixture(scope='session')
def celery_session_worker(
request, # type: Any
celery_session_app, # type: Celery
celery_includes, # type: Sequence[str]
celery_class_tasks, # type: str
celery_worker_pool, # type: Any
celery_worker_parameters, # type: Mapping[str, Any]
):
# type: (...) -> WorkController
"""Session Fixture: Start worker that lives throughout test suite."""
from .testing import worker
if not NO_WORKER:
for module in celery_includes:
celery_session_app.loader.import_task_module(module)
for class_task in celery_class_tasks:
celery_session_app.register_task(class_task)
with worker.start_worker(celery_session_app,
pool=celery_worker_pool,
**celery_worker_parameters) as w:
yield w
@pytest.fixture(scope='session')
def celery_enable_logging():
# type: () -> bool
"""You can override this fixture to enable logging."""
return False
@pytest.fixture(scope='session')
def celery_includes():
# type: () -> Sequence[str]
"""You can override this include modules when a worker start.
You can have this return a list of module names to import,
these can be task modules, modules registering signals, and so on.
"""
return ()
@pytest.fixture(scope='session')
def celery_worker_pool():
# type: () -> Union[str, Any]
"""You can override this fixture to set the worker pool.
The "solo" pool is used by default, but you can set this to
return e.g. "prefork".
"""
return 'solo'
@pytest.fixture(scope='session')
def celery_config():
# type: () -> Mapping[str, Any]
"""Redefine this fixture to configure the test Celery app.
The config returned by your fixture will then be used
to configure the :func:`celery_app` fixture.
"""
return {}
@pytest.fixture(scope='session')
def celery_parameters():
# type: () -> Mapping[str, Any]
"""Redefine this fixture to change the init parameters of test Celery app.
The dict returned by your fixture will then be used
as parameters when instantiating :class:`~celery.Celery`.
"""
return {}
@pytest.fixture(scope='session')
def celery_worker_parameters():
# type: () -> Mapping[str, Any]
"""Redefine this fixture to change the init parameters of Celery workers.
This can be used e. g. to define queues the worker will consume tasks from.
The dict returned by your fixture will then be used
as parameters when instantiating :class:`~celery.worker.WorkController`.
"""
return {}
@pytest.fixture()
def celery_app(request,
celery_config,
celery_parameters,
celery_enable_logging,
use_celery_app_trap):
"""Fixture creating a Celery application instance."""
mark = request.node.get_closest_marker('celery')
config = dict(celery_config, **mark.kwargs if mark else {})
with METHOD_NAME(enable_logging=celery_enable_logging,
use_trap=use_celery_app_trap,
parameters=celery_parameters,
**config) as app:
yield app
@pytest.fixture(scope='session')
def celery_class_tasks():
"""Redefine this fixture to register tasks with the test Celery app."""
return []
@pytest.fixture()
def celery_worker(request,
celery_app,
celery_includes,
celery_worker_pool,
celery_worker_parameters):
# type: (Any, Celery, Sequence[str], str, Any) -> WorkController
"""Fixture: Start worker in a thread, stop it when the test returns."""
from .testing import worker
if not NO_WORKER:
for module in celery_includes:
celery_app.loader.import_task_module(module)
with worker.start_worker(celery_app,
pool=celery_worker_pool,
**celery_worker_parameters) as w:
yield w
@pytest.fixture()
def depends_on_current_app(celery_app):
"""Fixture that sets app as current."""
celery_app.set_current() |
5,546 | verify urls | import os
import traceback
from requests import ConnectionError, Timeout
from toolset.utils.output_helper import log
# Cross-platform colored text
from colorama import Fore, Style
class FrameworkTest:
def __init__(self, name, directory, benchmarker, runTests,
args):
'''
Constructor
'''
self.name = name
self.directory = directory
self.benchmarker = benchmarker
self.runTests = runTests
self.approach = ""
self.classification = ""
self.database = ""
self.framework = ""
self.language = ""
self.orm = ""
self.platform = ""
self.webserver = ""
self.os = ""
self.database_os = ""
self.display_name = ""
self.notes = ""
self.port = ""
self.versus = ""
self.__dict__.update(args)
##########################################################################################
# Public Methods
##########################################################################################
def start(self):
'''
Start the test implementation
'''
test_log_dir = os.path.join(self.benchmarker.results.directory, self.name.lower())
build_log_dir = os.path.join(test_log_dir, 'build')
run_log_dir = os.path.join(test_log_dir, 'run')
try:
os.makedirs(build_log_dir)
except OSError:
pass
try:
os.makedirs(run_log_dir)
except OSError:
pass
result = self.benchmarker.docker_helper.build(self, build_log_dir)
if result != 0:
return None
return self.benchmarker.docker_helper.run(self, run_log_dir)
def is_accepting_requests(self):
'''
Determines whether this test implementation is up and accepting
requests.
'''
test_type = None
for any_type in self.runTests:
test_type = any_type
break
url = "http://%s:%s%s" % (self.benchmarker.config.server_host,
self.port,
self.runTests[test_type].get_url())
return self.benchmarker.docker_helper.test_client_connection(url)
def METHOD_NAME(self):
'''
Verifys each of the URLs for this test. This will simply curl the URL and
check for it's return status. For each url, a flag will be set on this
object for whether or not it passed.
Returns True if all verifications succeeded
'''
log_path = os.path.join(self.benchmarker.results.directory, self.name.lower())
result = True
def verify_type(test_type):
verificationPath = os.path.join(log_path, test_type)
try:
os.makedirs(verificationPath)
except OSError:
pass
with open(os.path.join(verificationPath, 'verification.txt'),
'w') as verification:
test = self.runTests[test_type]
log("VERIFYING %s" % test_type.upper(),
file=verification,
border='-',
color=Fore.WHITE + Style.BRIGHT)
base_url = "http://%s:%s" % (
self.benchmarker.config.server_host, self.port)
try:
# Verifies headers from the server. This check is made from the
# App Server using Pythons requests module. Will do a second check from
# the client to make sure the server isn't only accepting connections
# from localhost on a multi-machine setup.
results = test.verify(base_url)
# Now verify that the url is reachable from the client machine, unless
# we're already failing
if not any(result == 'fail'
for (result, reason, url) in results):
self.benchmarker.docker_helper.test_client_connection(
base_url + test.get_url())
except ConnectionError as e:
results = [('fail', "Server did not respond to request",
base_url)]
log("Verifying test %s for %s caused an exception: %s" %
(test_type, self.name, e),
color=Fore.RED)
except Timeout as e:
results = [('fail', "Connection to server timed out",
base_url)]
log("Verifying test %s for %s caused an exception: %s" %
(test_type, self.name, e),
color=Fore.RED)
except Exception as e:
results = [('fail', """Caused Exception in TFB
This almost certainly means your return value is incorrect,
but also that you have found a bug. Please submit an issue
including this message: %s\n%s""" % (e, traceback.format_exc()),
base_url)]
log("Verifying test %s for %s caused an exception: %s" %
(test_type, self.name, e),
color=Fore.RED)
traceback.format_exc()
test.failed = any(
result == 'fail' for (result, reason, url) in results)
test.warned = any(
result == 'warn' for (result, reason, url) in results)
test.passed = all(
result == 'pass' for (result, reason, url) in results)
def output_result(result, reason, url):
specific_rules_url = "https://github.com/TechEmpower/FrameworkBenchmarks/wiki/Project-Information-Framework-Tests-Overview#specific-test-requirements"
color = Fore.GREEN
if result.upper() == "WARN":
color = Fore.YELLOW
elif result.upper() == "FAIL":
color = Fore.RED
log(" {!s}{!s}{!s} for {!s}".format(
color, result.upper(), Style.RESET_ALL, url),
file=verification)
if reason is not None and len(reason) != 0:
for line in reason.splitlines():
log(" " + line, file=verification)
if not test.passed:
log(" See {!s}".format(specific_rules_url),
file=verification)
[output_result(r1, r2, url) for (r1, r2, url) in results]
if test.failed:
test.output_headers_and_body()
self.benchmarker.results.report_verify_results(self, test_type, 'fail')
elif test.warned:
test.output_headers_and_body()
self.benchmarker.results.report_verify_results(self, test_type, 'warn')
elif test.passed:
self.benchmarker.results.report_verify_results(self, test_type, 'pass')
else:
raise Exception(
"Unknown error - test did not pass,warn,or fail")
result = True
for test_type in self.runTests:
verify_type(test_type)
if self.runTests[test_type].failed:
result = False
return result |
5,547 | test metrics show json | import json
from dvc.cli import parse_args
from dvc.commands.metrics import CmdMetricsDiff, CmdMetricsShow
def test_metrics_diff(dvc, mocker, capsys):
cli_args = parse_args(
[
"metrics",
"diff",
"HEAD~10",
"HEAD~1",
"-R",
"--all",
"--md",
"--targets",
"target1",
"target2",
"--no-path",
]
)
assert cli_args.func == CmdMetricsDiff
cmd = cli_args.func(cli_args)
diff = {"metrics.yaml": {"": {"old": 1, "new": 3}}}
metrics_diff = mocker.patch("dvc.repo.metrics.diff.diff", return_value=diff)
show_diff_mock = mocker.patch("dvc.compare.show_diff")
assert cmd.run() == 0
metrics_diff.assert_called_once_with(
cmd.repo,
targets=["target1", "target2"],
a_rev="HEAD~10",
b_rev="HEAD~1",
recursive=True,
all=True,
)
show_diff_mock.assert_called_once_with(
diff,
title="Metric",
no_path=True,
precision=5,
markdown=True,
round_digits=True,
a_rev="HEAD~10",
b_rev="HEAD~1",
)
def test_metrics_diff_json(dvc, mocker, capsys):
cli_args = parse_args(
[
"metrics",
"diff",
"HEAD~10",
"HEAD~1",
"-R",
"--all",
"--json",
"--targets",
"target1",
"target2",
"--no-path",
"--precision",
"10",
]
)
assert cli_args.func == CmdMetricsDiff
cmd = cli_args.func(cli_args)
diff = {"metrics.yaml": {"": {"old": 1, "new": 3}}}
metrics_diff = mocker.patch("dvc.repo.metrics.diff.diff", return_value=diff)
show_diff_mock = mocker.patch("dvc.compare.show_diff")
assert cmd.run() == 0
out, _ = capsys.readouterr()
metrics_diff.assert_called_once_with(
cmd.repo,
targets=["target1", "target2"],
a_rev="HEAD~10",
b_rev="HEAD~1",
recursive=True,
all=True,
)
show_diff_mock.assert_not_called()
assert json.dumps(diff) in out
def test_metrics_show(dvc, mocker):
cli_args = parse_args(
[
"metrics",
"show",
"-R",
"--all-tags",
"--all-branches",
"--all-commits",
"target1",
"target2",
"--precision",
"8",
]
)
assert cli_args.func == CmdMetricsShow
cmd = cli_args.func(cli_args)
m1 = mocker.patch("dvc.repo.metrics.show.show", return_value={})
m2 = mocker.patch("dvc.compare.show_metrics", return_value="")
assert cmd.run() == 0
m1.assert_called_once_with(
cmd.repo,
["target1", "target2"],
recursive=True,
all_tags=True,
all_branches=True,
all_commits=True,
)
m2.assert_called_once_with(
{},
markdown=False,
all_tags=True,
all_branches=True,
all_commits=True,
precision=8,
round_digits=True,
)
def METHOD_NAME(dvc, mocker, capsys):
cli_args = parse_args(
[
"metrics",
"show",
"--json",
"-R",
"--all-tags",
"--all-branches",
"--all-commits",
"target1",
"target2",
"--precision",
"8",
]
)
assert cli_args.func == CmdMetricsShow
cmd = cli_args.func(cli_args)
d = {
"branch_1": {"metrics.json": {"b": {"ad": 1, "bc": 2}, "c": 4}},
"branch_2": {"metrics.json": {"a": 1, "b": {"ad": 3, "bc": 4}}},
}
metrics_show = mocker.patch("dvc.repo.metrics.show.show", return_value=d)
show_metrics_mock = mocker.patch("dvc.compare.show_metrics")
assert cmd.run() == 0
out, _ = capsys.readouterr()
metrics_show.assert_called_once_with(
cmd.repo,
["target1", "target2"],
recursive=True,
all_tags=True,
all_branches=True,
all_commits=True,
)
show_metrics_mock.assert_not_called()
assert json.dumps(d) in out |
5,548 | log to file | # Copyright 2017, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import datetime
import logging
import os
import sys
import mitogen
import mitogen.core
import mitogen.master
LOG = logging.getLogger('mitogen')
iteritems = getattr(dict, 'iteritems', dict.items)
if mitogen.core.PY3:
iteritems = dict.items
else:
iteritems = dict.iteritems
def disable_site_packages():
for entry in sys.path[:]:
if 'site-packages' in entry or 'Extras' in entry:
sys.path.remove(entry)
def _formatTime(record, datefmt=None):
dt = datetime.datetime.fromtimestamp(record.created)
return dt.strftime(datefmt)
def log_get_formatter():
datefmt = '%H:%M:%S.%f'
fmt = '%(asctime)s %(levelname).1s %(name)s: %(message)s'
formatter = logging.Formatter(fmt, datefmt)
formatter.formatTime = _formatTime
return formatter
def METHOD_NAME(path=None, io=False, level='INFO'):
log = logging.getLogger('')
if path:
fp = open(path, 'w', 1)
mitogen.core.set_cloexec(fp.fileno())
else:
fp = sys.stderr
level = os.environ.get('MITOGEN_LOG_LEVEL', level).upper()
io = level == 'IO'
if io:
level = 'DEBUG'
logging.getLogger('mitogen.io').setLevel(level)
level = getattr(logging, level, logging.INFO)
log.setLevel(level)
# Prevent accidental duplicate log_to_file() calls from generating
# duplicate output.
for handler_ in reversed(log.handlers):
if getattr(handler_, 'is_mitogen', None):
log.handlers.remove(handler_)
handler = logging.StreamHandler(fp)
handler.is_mitogen = True
handler.formatter = log_get_formatter()
log.handlers.insert(0, handler)
def run_with_router(func, *args, **kwargs):
broker = mitogen.master.Broker()
router = mitogen.master.Router(broker)
try:
return func(router, *args, **kwargs)
finally:
broker.shutdown()
broker.join()
def with_router(func):
def wrapper(*args, **kwargs):
return run_with_router(func, *args, **kwargs)
if mitogen.core.PY3:
wrapper.func_name = func.__name__
else:
wrapper.func_name = func.func_name
return wrapper
PASSTHROUGH = (
int, float, bool,
type(None),
mitogen.core.Context,
mitogen.core.CallError,
mitogen.core.Blob,
mitogen.core.Secret,
)
def cast(obj):
if isinstance(obj, dict):
return dict((cast(k), cast(v)) for k, v in iteritems(obj))
if isinstance(obj, (list, tuple)):
return [cast(v) for v in obj]
if isinstance(obj, PASSTHROUGH):
return obj
if isinstance(obj, mitogen.core.UnicodeType):
return mitogen.core.UnicodeType(obj)
if isinstance(obj, mitogen.core.BytesType):
return mitogen.core.BytesType(obj)
raise TypeError("Cannot serialize: %r: %r" % (type(obj), obj)) |
5,549 | ping loop | # -*- coding: utf-8 -*-
from asyncio import sleep, ensure_future, wait_for, TimeoutError
from .functions import milliseconds, iso8601, deep_extend
from ccxt import NetworkError, RequestTimeout, NotSupported
from ccxt.async_support.base.ws.future import Future
class Client(object):
url = None
ws = None
futures = {}
options = {} # ws-specific options
subscriptions = {}
rejections = {}
on_message_callback = None
on_error_callback = None
on_close_callback = None
on_connected_callback = None
connectionStarted = None
connectionEstablished = None
isConnected = False
connectionTimeout = 10000 # ms, false to disable
connection = None
error = None # low-level networking exception, if any
connected = None # connection-related Future
keepAlive = 5000
heartbeat = True
maxPingPongMisses = 2.0 # how many missed pongs to raise a timeout
lastPong = None
ping = None # ping-function if defined
verbose = False # verbose output
gunzip = False
inflate = False
throttle = None
connecting = False
asyncio_loop = None
ping_looper = None
receive_looper = None
def __init__(self, url, on_message_callback, on_error_callback, on_close_callback, on_connected_callback, config={}):
defaults = {
'url': url,
'futures': {},
'subscriptions': {},
'rejections': {},
'on_message_callback': on_message_callback,
'on_error_callback': on_error_callback,
'on_close_callback': on_close_callback,
'on_connected_callback': on_connected_callback,
}
settings = {}
settings.update(defaults)
settings.update(config)
for key in settings:
if hasattr(self, key) and isinstance(getattr(self, key), dict):
setattr(self, key, deep_extend(getattr(self, key), settings[key]))
else:
setattr(self, key, settings[key])
# connection-related Future
self.connected = Future()
def future(self, message_hash):
if message_hash not in self.futures or self.futures[message_hash].cancelled():
self.futures[message_hash] = Future()
future = self.futures[message_hash]
if message_hash in self.rejections:
future.reject(self.rejections[message_hash])
del self.rejections[message_hash]
return future
def resolve(self, result, message_hash):
if self.verbose and message_hash is None:
self.log(iso8601(milliseconds()), 'resolve received None messageHash')
if message_hash in self.futures:
future = self.futures[message_hash]
future.resolve(result)
del self.futures[message_hash]
return result
def reject(self, result, message_hash=None):
if message_hash:
if message_hash in self.futures:
future = self.futures[message_hash]
future.reject(result)
del self.futures[message_hash]
else:
self.rejections[message_hash] = result
else:
message_hashes = list(self.futures.keys())
for message_hash in message_hashes:
self.reject(result, message_hash)
return result
async def receive_loop(self):
if self.verbose:
self.log(iso8601(milliseconds()), 'receive loop')
while not self.closed():
try:
message = await self.receive()
# self.log(iso8601(milliseconds()), 'received', message)
self.handle_message(message)
except Exception as e:
error = NetworkError(str(e))
if self.verbose:
self.log(iso8601(milliseconds()), 'receive_loop', 'Exception', error)
self.reset(error)
async def open(self, session, backoff_delay=0):
# exponential backoff for consequent connections if necessary
if backoff_delay:
await sleep(backoff_delay)
if self.verbose:
self.log(iso8601(milliseconds()), 'connecting to', self.url, 'with timeout', self.connectionTimeout, 'ms')
self.connectionStarted = milliseconds()
try:
coroutine = self.create_connection(session)
self.connection = await wait_for(coroutine, timeout=int(self.connectionTimeout / 1000))
self.connecting = False
self.connectionEstablished = milliseconds()
self.isConnected = True
if self.verbose:
self.log(iso8601(milliseconds()), 'connected')
self.connected.resolve(self.url)
self.on_connected_callback(self)
# run both loops forever
self.ping_looper = ensure_future(self.METHOD_NAME(), loop=self.asyncio_loop)
self.receive_looper = ensure_future(self.receive_loop(), loop=self.asyncio_loop)
except TimeoutError:
# connection timeout
error = RequestTimeout('Connection timeout')
if self.verbose:
self.log(iso8601(milliseconds()), 'RequestTimeout', error)
self.on_error(error)
except Exception as e:
# connection failed or rejected (ConnectionRefusedError, ClientConnectorError)
error = NetworkError(e)
if self.verbose:
self.log(iso8601(milliseconds()), 'NetworkError', error)
self.on_error(error)
def connect(self, session, backoff_delay=0):
if not self.connection and not self.connecting:
self.connecting = True
ensure_future(self.open(session, backoff_delay), loop=self.asyncio_loop)
return self.connected
def on_error(self, error):
if self.verbose:
self.log(iso8601(milliseconds()), 'on_error', error)
self.error = error
self.reset(error)
self.on_error_callback(self, error)
if not self.closed():
ensure_future(self.close(1006), loop=self.asyncio_loop)
def on_close(self, code):
if self.verbose:
self.log(iso8601(milliseconds()), 'on_close', code)
if not self.error:
self.reset(NetworkError('Connection closed by remote server, closing code ' + str(code)))
self.on_close_callback(self, code)
if not self.closed():
ensure_future(self.close(code), loop=self.asyncio_loop)
def reset(self, error):
self.reject(error)
async def METHOD_NAME(self):
if self.verbose:
self.log(iso8601(milliseconds()), 'ping loop')
def receive(self):
raise NotSupported('receive() not implemented')
def handle_message(self, message):
raise NotSupported('handle_message() not implemented')
def closed(self):
raise NotSupported('closed() not implemented')
async def send(self, message):
raise NotSupported('send() not implemented')
async def close(self, code=1000):
raise NotSupported('close() not implemented')
def create_connection(self, session):
raise NotSupported('create_connection() not implemented')
def log(self, *args):
print(*args) |
5,550 | test items | import json
from xml.etree import ElementTree as etree
import pytest
from pycsw.ogc.api.records import API
pytestmark = pytest.mark.functional
def test_landing_page(config_virtual_collections):
api = API(config_virtual_collections)
headers, status, content = api.landing_page({}, {'f': 'json'})
content = json.loads(content)
assert headers['Content-Type'] == 'application/json'
assert status == 200
assert len(content['links']) == 15
for link in content['links']:
assert link['href'].startswith(api.config['server']['url'])
headers, status, content = api.landing_page({}, {'f': 'html'})
assert status == 200
assert headers['Content-Type'] == 'text/html'
def test_openapi(config_virtual_collections):
api = API(config_virtual_collections)
headers, status, content = api.openapi({}, {'f': 'json'})
assert status == 200
json.loads(content)
assert headers['Content-Type'] == 'application/vnd.oai.openapi+json;version=3.0' # noqa
headers, status, content = api.openapi({}, {'f': 'html'})
assert status == 200
assert headers['Content-Type'] == 'text/html'
def test_conformance(config_virtual_collections):
api = API(config_virtual_collections)
content = json.loads(api.conformance({}, {})[2])
assert len(content['conformsTo']) == 18
def test_collections(config_virtual_collections):
api = API(config_virtual_collections)
content = json.loads(api.collections({}, {})[2])
assert len(content['links']) == 2
assert len(content['collections']) == 2
content = json.loads(api.collections({}, {})[2])['collections'][0]
assert len(content['links']) == 3
assert content['id'] == 'metadata:main'
assert content['title'] == 'pycsw Geospatial Catalogue'
assert content['description'] == 'pycsw is an OARec and OGC CSW server implementation written in Python' # noqa
assert content['itemType'] == 'record'
def test_queryables(config_virtual_collections):
api = API(config_virtual_collections)
content = json.loads(api.queryables({}, {})[2])
assert content['type'] == 'object'
assert content['title'] == 'pycsw Geospatial Catalogue'
assert content['$id'] == 'http://localhost/pycsw/oarec/collections/metadata:main/queryables' # noqa
assert content['$schema'] == 'http://json-schema.org/draft/2019-09/schema'
assert len(content['properties']) == 12
assert 'geometry' in content['properties']
assert content['properties']['geometry']['$ref'] == 'https://geojson.org/schema/Polygon.json' # noqa
def METHOD_NAME(config_virtual_collections):
api = API(config_virtual_collections)
content = json.loads(api.items({}, None, {})[2])
assert content['type'] == 'FeatureCollection'
assert len(content['links']) == 5
assert content['numberMatched'] == 12
assert content['numberReturned'] == 10
assert len(content['features']) == 10
assert len(content['features']) == content['numberReturned']
params = {'q': 'Lorem'}
content = json.loads(api.items({}, None, params)[2])
assert content['numberMatched'] == 5
assert content['numberReturned'] == 5
assert len(content['features']) == content['numberReturned']
params = {'q': 'Lorem dolor'}
content = json.loads(api.items({}, None, params)[2])
assert content['numberMatched'] == 1
assert content['numberReturned'] == 1
assert len(content['features']) == content['numberReturned']
params = {'bbox': '-50,0,50,80'}
content = json.loads(api.items({}, None, params)[2])
assert content['numberMatched'] == 3
assert content['numberReturned'] == 3
assert len(content['features']) == content['numberReturned']
params = {'bbox': '-50,0,50,80', 'q': 'Lorem'}
content = json.loads(api.items({}, None, params)[2])
assert content['numberMatched'] == 1
assert content['numberReturned'] == 1
assert len(content['features']) == content['numberReturned']
params = {'filter': "title LIKE '%%Lorem%%'"}
content = json.loads(api.items({}, None, params)[2])
assert content['numberMatched'] == 2
assert content['numberReturned'] == 2
assert len(content['features']) == content['numberReturned']
params = {'filter': "title LIKE '%%Lorem%%'", 'q': 'iPsUm'}
content = json.loads(api.items({}, None, params)[2])
assert content['numberMatched'] == 2
assert content['numberReturned'] == 2
assert len(content['features']) == content['numberReturned']
params = {'limit': 0}
content = json.loads(api.items({}, None, params)[2])
assert content['code'] == 'InvalidParameterValue'
assert content['description'] == 'Limit must be a positive integer'
params = {'limit': 4}
content = json.loads(api.items({}, None, params)[2])
assert content['numberMatched'] == 12
assert content['numberReturned'] == 4
assert len(content['features']) == content['numberReturned']
params = {'limit': 4, 'offset': 10}
content = json.loads(api.items({}, None, params)[2])
assert content['numberMatched'] == 12
assert content['numberReturned'] == 2
assert len(content['features']) == content['numberReturned']
params = {'sortby': 'title'}
content = json.loads(api.items({}, None, params)[2])
assert content['numberMatched'] == 12
assert content['numberReturned'] == 10
assert content['features'][5]['properties']['title'] == 'Lorem ipsum'
params = {'sortby': '-title'}
content = json.loads(api.items({}, None, params)[2])
assert content['numberMatched'] == 12
assert content['numberReturned'] == 10
assert content['features'][5]['properties']['title'] == 'Lorem ipsum dolor sit amet' # noqa
cql_json = {'op': '=', 'args': [{'property': 'title'}, 'Lorem ipsum']}
content = json.loads(api.items({}, cql_json, {})[2])
assert content['numberMatched'] == 1
assert content['numberReturned'] == 1
assert len(content['features']) == content['numberReturned']
cql_json = {'op': '=', 'args': [{'property': 'title'}, 'Lorem ipsum']}
content = json.loads(api.items({}, cql_json, {'limit': 1})[2])
assert content['numberMatched'] == 1
assert content['numberReturned'] == 1
assert len(content['features']) == content['numberReturned']
cql_json = {'op': 'like', 'args': [{'property': 'title'}, 'lorem%']}
content = json.loads(api.items({}, cql_json, {})[2])
assert content['numberMatched'] == 2
assert content['numberReturned'] == 2
assert len(content['features']) == content['numberReturned']
def test_item(config_virtual_collections):
api = API(config_virtual_collections)
item = 'urn:uuid:19887a8a-f6b0-4a63-ae56-7fba0e17801f'
content = json.loads(api.item({}, {}, 'metadata:main', item)[2])
assert content['id'] == item
assert content['type'] == 'Feature'
assert content['properties']['title'] == 'Lorem ipsum'
assert content['properties']['keywords'] == ['Tourism--Greece']
item = 'urn:uuid:19887a8a-f6b0-4a63-ae56-7fba0e17801f'
params = {'f': 'xml'}
content = api.item({}, params, 'metadat:main', item)[2]
e = etree.fromstring(content)
element = e.find('{http://purl.org/dc/elements/1.1/}identifier').text
assert element == item
element = e.find('{http://purl.org/dc/elements/1.1/}type').text
assert element == 'http://purl.org/dc/dcmitype/Image'
element = e.find('{http://purl.org/dc/elements/1.1/}title').text
assert element == 'Lorem ipsum'
element = e.find('{http://purl.org/dc/elements/1.1/}subject').text
assert element == 'Tourism--Greece' |
5,551 | test error checking | # Copyright (c) Meta Platforms, Inc. and affiliates.
import vpdq # type: ignore
import pytest
import test_util
from pathlib import Path
import glob
import re
from typing import Union, Sequence, NamedTuple
from collections import namedtuple
DIR = Path(__file__).parent
VPDQ_DIR = DIR.parent.parent
SAMPLE_HASH_FOLDER = VPDQ_DIR / "sample-hashes"
SAMPLE_VIDEOS = VPDQ_DIR.parent / Path("tmk/sample-videos")
DISTANCE_TOLERANCE = 31
QUALITY_TOLERANCE = 50
PROJECT_DIR = Path(__file__).parents[3]
HASH_FOLDER = PROJECT_DIR / SAMPLE_HASH_FOLDER
VIDEO_FOLDER = PROJECT_DIR / SAMPLE_VIDEOS
test_hashes = {}
sample_hashes = {}
def get_test_file_paths() -> Sequence[Path]:
test_files = []
for fileStr in glob.iglob(f"{VIDEO_FOLDER}/**/*.mp4", recursive=True):
file = Path(fileStr)
if not (VIDEO_FOLDER / f"{file.name}").is_file():
print(f"Video file {file.name} doesn't exist. Skipping.")
continue
test_files.append(file)
assert len(test_files) > 0
return test_files
def test_vpdq_utils():
test_files = get_test_file_paths()
sample = Path(f"{HASH_FOLDER}/{test_files[0].stem}.txt")
assert sample.is_file()
ret = test_util.read_file_to_hash(sample)
with open(sample, "r") as file:
lines = file.readlines()
for line, feature in zip(lines, ret):
line = line.strip()
content = line.split(",")
hex_hash = content[2]
assert vpdq.str_to_hash(hex_hash) == feature.hash
assert vpdq.hash_to_hex(feature.hash) == hex_hash
def METHOD_NAME():
test_files = get_test_file_paths()
video_file = Path(f"{VIDEO_FOLDER}/{test_files[0].name}")
with pytest.raises(ValueError, match="Seconds_per_hash must be non-negative"):
vpdq.computeHash(input_video_filename=video_file, seconds_per_hash=-1)
with pytest.raises(ValueError, match="Downsample_width must be non-negative"):
vpdq.computeHash(input_video_filename=video_file, downsample_width=-1)
with pytest.raises(ValueError, match="Downsample_height must be non-negative"):
vpdq.computeHash(input_video_filename=video_file, downsample_height=-1)
with pytest.raises(ValueError, match="Input_video_filename doesn't exist"):
vpdq.computeHash(input_video_filename="nonexisting")
def get_downsampled_hash_files(
input_hash_file_path: Union[Path, str]
) -> Sequence[NamedTuple]:
"""
Get all the downsampled hash files that match the input hash file name
The input hash file name should be in the format of <base_name>.txt
The downsampled hash file name should be in the format of <base_name>-<width>x<height>.txt
"""
# Define the regular expression pattern to match the desired filenames
pattern = r"(.*)-(\d+)x(\d+)\.txt"
# Get the base file name without resolution and extension
base_file_name_match = re.match(r"(.*)(?=\.txt)", Path(input_hash_file_path).name)
if base_file_name_match is not None:
base_file_name = base_file_name_match.group(1)
else:
return []
pathanddim = namedtuple("pathanddim", ["path", "width", "height"])
all_matches = []
for fileStr in HASH_FOLDER.iterdir():
file = Path(fileStr)
if file.is_file():
matches = re.findall(pattern, file.name)
for match in matches:
name = match[0]
width = match[1]
height = match[2]
# Only print matches that have the same base name as the input file
if name == base_file_name:
new_match = pathanddim(Path(file), width, height)
all_matches.append(new_match)
return all_matches
def test_compare_hashes():
"""This regression test is creating hashes from sample videos and compare them with the provided hashes line by line.
Two VPDQ features are considered the same if each line of the hashes are within DISTANCE_TOLERANCE.
For hashes that have a quality lower than QUALITY_TOLERANCE, the test will skip them for comparing.
"""
test_files = get_test_file_paths()
for file in test_files:
# Load the hash file truth
hash_file = Path(f"{HASH_FOLDER}/{file.stem}.txt")
assert hash_file.is_file()
ret = test_util.read_file_to_hash(hash_file)
assert ret is not None
sample_hashes[file] = ret
# Calculate the hash of file
assert file.is_file()
ret = vpdq.computeHash(input_video_filename=file, seconds_per_hash=0)
assert ret is not None
test_hashes[file] = ret
print("Comparing hash for video:", file)
hash1 = test_hashes[file]
hash2 = sample_hashes[file]
assert len(hash1) == len(hash2)
for h1, h2 in zip(hash1, hash2):
if h1.quality >= QUALITY_TOLERANCE and h2.quality >= QUALITY_TOLERANCE:
assert h1.hamming_distance(h2) < DISTANCE_TOLERANCE
assert h1.frame_number == h2.frame_number
# Compare the downsampled hashes
for downsampled_hash_file_pathanddim in get_downsampled_hash_files(hash_file):
downsampled_hash_file = downsampled_hash_file_pathanddim.path
downsampled_width = int(downsampled_hash_file_pathanddim.width)
downsampled_height = int(downsampled_hash_file_pathanddim.height)
# Load the downsampled hash file truth
assert downsampled_hash_file.is_file()
ret = test_util.read_file_to_hash(downsampled_hash_file)
assert ret is not None
sample_hashes[downsampled_hash_file] = ret
# Calculated downsampled hash of test file
assert file.is_file()
ret = vpdq.computeHash(
input_video_filename=file,
seconds_per_hash=0,
downsample_width=downsampled_width,
downsample_height=downsampled_height,
)
assert ret is not None
test_hashes[downsampled_hash_file] = ret
print("Comparing hash for downsampled video:", downsampled_hash_file)
hash1 = test_hashes[downsampled_hash_file]
hash2 = sample_hashes[downsampled_hash_file]
assert len(hash1) == len(hash2)
for h1, h2 in zip(hash1, hash2):
if h1.quality >= QUALITY_TOLERANCE and h2.quality >= QUALITY_TOLERANCE:
assert h1.hamming_distance(h2) < DISTANCE_TOLERANCE
assert h1.frame_number == h2.frame_number |
5,552 | test nonsquare bin | import numpy as np
import Orange
from Orange.widgets.tests.base import WidgetTest
from orangecontrib.spectroscopy.utils import get_ndim_hyperspec
from orangecontrib.spectroscopy.widgets.owbin import OWBin
class TestOWBin(WidgetTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.mosaic = Orange.data.Table("agilent/5_mosaic_agg1024.dmt")
def setUp(self):
self.widget = self.create_widget(OWBin)
def test_load_unload(self):
# just to load the widget (it has no inputs)
pass
def test_bin(self):
self.widget.bin_shape = (2, 2)
self.widget._init_bins
self.send_signal(self.widget.Inputs.data, self.mosaic)
m = self.get_output(self.widget.Outputs.bindata)
np.testing.assert_equal(len(m.X), len(self.mosaic.X) / 2**2)
x_coords = self.mosaic[:, "map_x"].metas[:, 0]
x_coords_binned = np.array([x_coords[0:2].mean(), x_coords[2:4].mean()])
np.testing.assert_equal(m[:, "map_x"].metas[::4, 0], x_coords_binned)
y_coords = self.mosaic[:, "map_y"].metas[:, 0]
y_coords_binned = np.array([y_coords[0:8].mean(), y_coords[8:16].mean(),
y_coords[16:24].mean(), y_coords[24:32].mean()])
np.testing.assert_equal(m[:, "map_y"].metas[0:4, 0], y_coords_binned)
def test_bin_changed(self):
self.send_signal(self.widget.Inputs.data, self.mosaic)
self.widget.bin_0 = 2
self.widget.bin_1 = 2
self.widget._bin_changed()
m = self.get_output(self.widget.Outputs.bindata)
np.testing.assert_equal(len(m.X), len(self.mosaic.X) / 2**2)
def METHOD_NAME(self):
self.widget.bin_shape = (2, 4)
self.widget._init_bins()
self.send_signal(self.widget.Inputs.data, self.mosaic)
m = self.get_output(self.widget.Outputs.bindata)
np.testing.assert_equal(len(m.X), len(self.mosaic.X) / (2 * 4))
x_coords = self.mosaic[:, "map_x"].metas[:, 0]
x_coords_binned = np.array([x_coords[0:2].mean(), x_coords[2:4].mean()])
np.testing.assert_equal(m[:, "map_x"].metas[::2, 0], x_coords_binned)
y_coords = self.mosaic[:, "map_y"].metas[:, 0]
y_coords_binned = np.array([y_coords[0:16].mean(), y_coords[16:32].mean()])
np.testing.assert_equal(m[:, "map_y"].metas[0:2, 0], y_coords_binned)
def test_no_bin(self):
self.widget.bin_shape = (1, 1)
self.widget._init_bins()
self.send_signal(self.widget.Inputs.data, self.mosaic)
m = self.get_output(self.widget.Outputs.bindata)
# Comparing hypercube data and axes here instead of Tables because
# self.mosaic is built (row, column) i.e. (map_y, map_x)
# but bin_hyperspectra always returns (attr_0, attr_1) i.e. (map_x, map_y)
# so the resulting tables are arranged differently (but contain the same data).
xat = [v for v in m.domain.metas if v.name == "map_x"][0]
yat = [v for v in m.domain.metas if v.name == "map_y"][0]
attrs = [xat, yat]
hyper_orig = get_ndim_hyperspec(self.mosaic, attrs)
hyper_m = get_ndim_hyperspec(m, attrs)
np.testing.assert_equal(hyper_orig, hyper_m)
def test_invalid_bin(self):
self.widget.bin_shape = (3, 3)
self.widget._init_bins()
self.send_signal(self.widget.Inputs.data, self.mosaic)
self.assertTrue(self.widget.Error.invalid_block.is_shown())
self.assertIsNone(self.get_output(self.widget.Outputs.bindata))
def test_invalid_axis(self):
data = self.mosaic.copy()
with data.unlocked():
data.metas[:, 0] = np.nan
self.send_signal(self.widget.Inputs.data, data)
self.assertTrue(self.widget.Error.invalid_axis.is_shown())
self.send_signal(self.widget.Inputs.data, None)
self.assertFalse(self.widget.Error.invalid_axis.is_shown())
def test_nan_in_image(self):
data = self.mosaic.copy()
with data.unlocked():
data.X[1, 2] = np.nan
self.send_signal(self.widget.Inputs.data, data)
self.assertTrue(self.widget.Warning.nan_in_image.is_shown())
self.send_signal(self.widget.Inputs.data, self.mosaic)
self.assertFalse(self.widget.Warning.nan_in_image.is_shown()) |
5,553 | set up test data | from datetime import datetime
from functools import reduce
import operator
from django.contrib.auth.models import Group, Permission
from django.db.models import Q
from django.urls import reverse
from django.utils.crypto import get_random_string
from django.test import TestCase, override_settings
from accounts.models import User
from zentral.contrib.inventory.models import MachineSnapshotCommit, MetaMachine
from zentral.contrib.osquery.compliance_checks import sync_query_compliance_check
from zentral.contrib.osquery.models import Query
from zentral.core.compliance_checks.models import MachineStatus, Status
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
class InventoryComplianceChecksViewsTestCase(TestCase):
@classmethod
def METHOD_NAME(cls):
# user
cls.user = User.objects.create_user("godzilla", "godzilla@zentral.io", get_random_string(12))
cls.group = Group.objects.create(name=get_random_string(12))
cls.user.groups.set([cls.group])
# machine
cls.serial_number = "0123456789"
MachineSnapshotCommit.objects.commit_machine_snapshot_tree({
"source": {"module": "tests.zentral.io", "name": "Zentral Tests"},
"serial_number": cls.serial_number,
"os_version": {'name': 'OS X', 'major': 10, 'minor': 11, 'patch': 1},
"osx_app_instances": [
{'app': {'bundle_id': 'io.zentral.baller',
'bundle_name': 'Baller.app',
'bundle_version': '123',
'bundle_version_str': '1.2.3'},
'bundle_path': "/Applications/Baller.app"}
]
})
cls.machine = MetaMachine(cls.serial_number)
cls.url_msn = cls.machine.get_urlsafe_serial_number()
# utility methods
def _force_check_query(self):
sql = "select 'OK' as ztl_status;"
query = Query.objects.create(name=get_random_string(12), sql=sql)
sync_query_compliance_check(query, True)
return query
def _login_redirect(self, url):
response = self.client.get(url)
self.assertRedirects(response, "{u}?next={n}".format(u=reverse("login"), n=url))
def _login(self, *permissions):
if permissions:
permission_filter = reduce(operator.or_, (
Q(content_type__app_label=app_label, codename=codename)
for app_label, codename in (
permission.split(".")
for permission in permissions
)
))
self.group.permissions.set(list(Permission.objects.filter(permission_filter)))
else:
self.group.permissions.clear()
self.client.force_login(self.user)
# machine
def test_machine_no_compliance_checks(self):
self._force_check_query()
self._login(
'compliance_checks.view_machinestatus',
'inventory.view_machinesnapshot',
'osquery.view_query',
)
response = self.client.get(self.machine.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "inventory/machine_detail.html")
self.assertContains(response, "0 Compliance checks") # no status
def test_machine_one_compliance_check_other_machine(self):
query = self._force_check_query()
MachineStatus.objects.create(
serial_number=get_random_string(12), # no the tested machine
compliance_check=query.compliance_check,
compliance_check_version=query.compliance_check.version,
status=Status.OK.value,
status_time=datetime.utcnow()
)
self._login(
'compliance_checks.view_machinestatus',
'inventory.view_machinesnapshot',
'osquery.view_query',
)
response = self.client.get(self.machine.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "inventory/machine_detail.html")
self.assertContains(response, "0 Compliance checks")
def test_machine_one_compliance_check(self):
query = self._force_check_query()
MachineStatus.objects.create(
serial_number=self.machine.serial_number,
compliance_check=query.compliance_check,
compliance_check_version=query.compliance_check.version,
status=Status.OK.value,
status_time=datetime.utcnow()
)
self._login(
'compliance_checks.view_machinestatus',
'inventory.view_machinesnapshot',
'osquery.view_query',
)
response = self.client.get(self.machine.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "inventory/machine_detail.html")
self.assertContains(response, "1 Compliance check")
self.assertContains(response, query.name)
cc_redirect_link = reverse("compliance_checks:redirect", args=(query.compliance_check.pk,))
self.assertContains(response, cc_redirect_link)
compliance_check_statuses = response.context["compliance_check_statuses"]
self.assertEqual(len(compliance_check_statuses), 1)
self.assertEqual(compliance_check_statuses[0][0], cc_redirect_link)
self.assertEqual(compliance_check_statuses[0][1], query.compliance_check.name)
self.assertEqual(compliance_check_statuses[0][2], Status.OK)
def test_machine_one_compliance_check_no_perms(self):
query = self._force_check_query()
MachineStatus.objects.create(
serial_number=self.machine.serial_number,
compliance_check=query.compliance_check,
compliance_check_version=query.compliance_check.version,
status=Status.FAILED.value,
status_time=datetime.utcnow()
)
self._login(
'compliance_checks.view_machinestatus',
'inventory.view_machinesnapshot',
# 'osquery.view_query', will block the link
)
response = self.client.get(self.machine.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "inventory/machine_detail.html")
self.assertContains(response, "1 Compliance check")
self.assertContains(response, query.name)
cc_redirect_link = reverse("compliance_checks:redirect", args=(query.compliance_check.pk,))
self.assertNotContains(response, cc_redirect_link)
compliance_check_statuses = response.context["compliance_check_statuses"]
self.assertEqual(len(compliance_check_statuses), 1)
self.assertIsNone(compliance_check_statuses[0][0])
self.assertEqual(compliance_check_statuses[0][1], query.compliance_check.name)
self.assertEqual(compliance_check_statuses[0][2], Status.FAILED) |
5,554 | simple relu |
import unittest
import numpy as np
import copy
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python.model_helper import ModelHelper
from caffe2.python.models import resnet
from caffe2.python import workspace, brew
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl.rewrite_graph as rewrite_graph
def deterministic_io(model):
model = copy.deepcopy(model)
for i, op in enumerate(model.InitProto().op):
op.device_option.random_seed = i + 1
if not model.Proto().external_output:
model.Proto().external_output.extend([model.Proto().op[-1].output[0]])
return model
def simple_fc():
model = ModelHelper(name="r")
brew.fc(model, "data", "fc", 10, 10)
return model, [(1, 10)]
def double_matmul():
model = ModelHelper(name="r")
fc0 = brew.fc(model, "data", "fc0", 10, 10)
fc1 = brew.fc(model, fc0, "fc1", 10, 10)
model.Proto().external_output[:] = [str(fc0), str(fc1)]
return model, [(1, 10)]
def METHOD_NAME():
model = ModelHelper(name="r")
brew.relu(model, "data", "fc")
return model, [(1, 10)]
def simple_mlp():
model = ModelHelper(name="r")
brew.relu(
model,
brew.fc(
model,
brew.relu(
model,
brew.fc(
model,
"data",
"fc1",
10,
10),
"rl1"),
"fc2",
10,
10),
"rl2")
return model, [(1, 10)]
def simple_cnn():
model = ModelHelper(name="r", arg_scope={"order": "NCHW", "is_test": True})
brew.conv(
model, "data", 'conv1', 3, 16, kernel=3, stride=1
)
brew.spatial_bn(
model, 'conv1', 'conv1_spatbn', 16, epsilon=1e-3
)
brew.relu(model, 'conv1_spatbn', 'relu1')
return model, [(1, 3, 32, 32)]
def alexnet():
model = ModelHelper(name="r", arg_scope={"order": "NCHW", "is_test": True})
conv1 = brew.conv(
model,
"data",
"conv1",
3,
64,
11, ('XavierFill', {}), ('ConstantFill', {}),
stride=4,
pad=2
)
relu1 = brew.relu(model, conv1, "conv1")
pool1 = brew.max_pool(model, relu1, "pool1", kernel=3, stride=2, pad=0,
legacy_pad=3)
lrn1 = brew.lrn(
model, pool1, "pool1_lrn", size=5, alpha=1.0e-4, beta=0.75, bias=1.0)
conv2 = brew.conv(
model,
lrn1,
"conv2",
64,
192,
5,
('XavierFill', {}),
('ConstantFill', {}),
pad=2
)
relu2 = brew.relu(model, conv2, "conv2")
pool2 = brew.max_pool(model, relu2, "pool2", kernel=3, stride=2)
lrn2 = brew.lrn(
model, pool2, "pool2_lrn", size=5, alpha=1.0e-4, beta=0.75, bias=1.0)
conv3 = brew.conv(
model,
lrn2,
"conv3",
192,
384,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = brew.relu(model, conv3, "conv3")
conv4 = brew.conv(
model,
relu3,
"conv4",
384,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = brew.relu(model, conv4, "conv4")
conv5 = brew.conv(
model,
relu4,
"conv5",
256,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = brew.relu(model, conv5, "conv5")
pool5 = brew.max_pool(model, relu5, "pool5", kernel=3, stride=2)
fc6 = brew.fc(
model,
pool5, "fc6", 256 * 6 * 6, 4096, ('XavierFill', {}),
('ConstantFill', {})
)
relu6 = brew.relu(model, fc6, "fc6")
fc7 = brew.fc(
model, relu6, "fc7", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relu7 = brew.relu(model, fc7, "fc7")
drop7 = brew.dropout(model, relu7, "fc7_dropout", is_test=1, ratio=0.5)
fc8 = brew.fc(
model, drop7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
relu8 = brew.relu(model, fc8, "fc8")
brew.dropout(model, relu8, "fc8_dropout", is_test=1, ratio=0.5)
return model, [(1, 3, 224, 224)]
def simple_resnet():
model = ModelHelper(name="r", arg_scope={"order": "NCHW", "is_test": True})
resnet.create_resnet_32x32(
model, "data", num_input_channels=1, num_groups=1, num_labels=5,
is_test=True)
return model, [(1, 1, 32, 32)]
def complex_resnet():
model = ModelHelper(name="r", arg_scope={"order": "NCHW", "is_test": True})
resnet.create_resnet50(
model, "data", num_input_channels=1, num_labels=5, is_test=True,
no_loss=True)
return model, [(1, 1, 224, 224)]
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class MKLRewriteTest(hu.HypothesisTestCase):
@given(gen=st.sampled_from([METHOD_NAME, simple_fc,
simple_mlp, simple_cnn]))
def test_mkl_simple_rewrite(self, gen):
cpu_model, (shape,) = gen()
cpu_model = deterministic_io(cpu_model)
mkl_model = rewrite_graph.rewrite_model_helper_simple(cpu_model)
X = np.random.randn(*shape).astype(np.float32)
def run(model):
self.ws.run(model.InitProto())
self.ws.create_blob(model.Proto().external_input[0]).feed(X)
self.ws.run(model.Proto())
return self.ws.blobs[model.Proto().external_output[0]].fetch()
np.testing.assert_allclose(run(cpu_model), run(mkl_model),
atol=1e-4, rtol=1e-4)
def test_mkl_resnet_rewrite(self):
cpu_model, (shape,) = complex_resnet()
cpu_model = deterministic_io(cpu_model)
mkl_model = rewrite_graph.rewrite_model_helper_simple(cpu_model)
np.random.seed(1701)
X = np.random.randn(*shape).astype(np.float32)
def run(model):
self.ws.run(model.InitProto())
self.ws.create_blob(model.Proto().external_input[0]).feed(X)
self.ws.run(model.Proto())
return self.ws.blobs[model.Proto().external_output[0]].fetch()
np.testing.assert_allclose(run(cpu_model), run(mkl_model),
atol=1e-4, rtol=1e-4)
def test_mkl_multi_output_rewrite(self):
cpu_model, shapes = double_matmul()
cpu_model = deterministic_io(cpu_model)
mkl_model = rewrite_graph.rewrite_model_helper_simple(cpu_model)
np.random.seed(1701)
Xs = [np.random.randn(*shape).astype(np.float32) for shape in shapes]
def run(model):
self.ws.run(model.InitProto())
for (name, X) in zip(model.Proto().external_input, Xs):
self.ws.create_blob(name).feed(X)
print(model.Proto())
self.ws.run(model.Proto())
return [self.ws.blobs[name].fetch()
for name in model.Proto().external_output]
run(mkl_model)
np.testing.assert_allclose(run(cpu_model), run(mkl_model),
atol=1e-4, rtol=1e-4)
def test_mkl_alexnet_rewrite(self):
cpu_model, (shape,) = alexnet()
cpu_model = deterministic_io(cpu_model)
mkl_model = rewrite_graph.rewrite_model_helper_simple(cpu_model)
np.random.seed(1701)
X = np.random.randn(*shape).astype(np.float32)
def run(model):
self.ws.run(model.InitProto())
self.ws.create_blob(model.Proto().external_input[0]).feed(X)
self.ws.run(model.Proto())
return self.ws.blobs[model.Proto().external_output[0]].fetch()
np.testing.assert_allclose(run(cpu_model), run(mkl_model),
atol=1e-4, rtol=1e-4)
if __name__ == "__main__":
import unittest
unittest.main() |
5,555 | yes no | # ==========================================================================
# AIDA Detector description implementation
# --------------------------------------------------------------------------
# Copyright (C) Organisation europeenne pour la Recherche nucleaire (CERN)
# All rights reserved.
#
# For the licensing terms see $DD4hepINSTALL/LICENSE.
# For the list of contributors see $DD4hepINSTALL/doc/CREDITS.
#
# ==========================================================================
from __future__ import absolute_import
def METHOD_NAME(val):
if val:
return "YES"
return "NO "
def run():
import DigiTest
digi = DigiTest.Test(geometry=None, process_data=False)
info = digi.info
num_tests = 0
histo = digi.create_action('DigiDepositEnergyMonitor/TestHisto')
histo.histo1D_deposits = ("Energy", u"Some main deposit Title", 101, -0.5, 100.5)
num_tests = num_tests + 1
histo.histo1D_delta = ("Delta", u"Some delta Title", 50, -5, 5)
num_tests = num_tests + 1
histo.printProperties()
info('property: histo1D_deposits = %s [%s]' %
(str(histo.histo1D_deposits), str(histo.histo1D_deposits.__class__),))
num_tests = num_tests + 1
action = digi.input_action('DigiParallelActionSequence/Test')
action.add_property('property_int', 1)
info('property: has_property = %s [%s]' %
(METHOD_NAME(action.hasProperty('property_int')), str(action.property_int.__class__),))
info('property: property_int = %s' % (str(action.property_int),))
action.property_int = 123456
info('property: property_int = %s' % (str(action.property_int),))
if action.hasProperty('property_int'):
num_tests = num_tests + 1
action.add_vector_property('property_vector_int', [1, 2, 3])
info('property: has_property = %s [%s]' %
(METHOD_NAME(action.hasProperty('property_vector_int')), str(action.property_vector_int.__class__), ))
info('property: property_vector_int = %s' % (str(action.property_vector_int),))
action.property_vector_int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
info('property: property_vector_int = %s' % (str(action.property_vector_int),))
if action.hasProperty('property_vector_int'):
num_tests = num_tests + 1
action.add_list_property('property_list_int', [1, 2, 3])
info('property: has_property = %s [%s]' %
(METHOD_NAME(action.hasProperty('property_list_int')), str(action.property_list_int.__class__),))
info('property: property_list_int = %s' % (str(action.property_list_int),))
action.property_list_int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
info('property: property_list_int = %s' % (str(action.property_list_int),))
if action.hasProperty('property_list_int'):
num_tests = num_tests + 1
action.add_set_property('property_set_int', [1, 2, 3])
info('property: has_property = %s [%s]' %
(METHOD_NAME(action.hasProperty('property_set_int')), str(action.property_set_int.__class__),))
info('property: property_set_int = %s' % (str(action.property_set_int),))
action.property_set_int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
info('property: property_set_int = %s' % (str(action.property_set_int),))
if action.hasProperty('property_set_int'):
num_tests = num_tests + 1
action.add_property('property_double', 1.0)
info('property: has_property = %s [%s]' %
(METHOD_NAME(action.hasProperty('property_double')), str(action.property_double.__class__),))
info('property: property_double = %s' % (str(action.property_double),))
action.property_double = 123456.7
info('property: property_double = %s' % (str(action.property_double),))
if action.hasProperty('property_double'):
num_tests = num_tests + 1
action.add_vector_property('property_vector_double', [1.1, 2, 3])
info('property: has_property = %s [%s]' %
(METHOD_NAME(action.hasProperty('property_vector_double')), str(action.property_vector_double.__class__),))
info('property: property_vector_double = %s' % (str(action.property_vector_double),))
action.property_vector_double = [1.5, 2, 3, 4, 5, 6, 7, 8, 9, 0]
info('property: property_vector_double = %s' % (str(action.property_vector_double),))
if action.hasProperty('property_vector_double'):
num_tests = num_tests + 1
action.add_list_property('property_list_double', [1.1, 2, 3])
info('property: has_property = %s [%s]' %
(METHOD_NAME(action.hasProperty('property_list_double')), str(action.property_list_double.__class__), ))
info('property: property_list_double = %s' % (str(action.property_list_double),))
action.property_list_double = [1.5, 2, 3, 4, 5, 6, 7, 8, 9, 0]
info('property: property_list_double = %s' % (str(action.property_list_double),))
if action.hasProperty('property_list_double'):
num_tests = num_tests + 1
action.add_set_property('property_set_double', [1.1, 2, 3])
info('property: has_property = %s [%s]' %
(METHOD_NAME(action.hasProperty('property_set_double')), str(action.property_set_double.__class__),))
info('property: property_set_double = %s' % (str(action.property_set_double),))
action.property_set_double = [1.5, 2, 3, 4, 5, 6, 7, 8, 9, 0]
info('property: property_set_double = %s' % (str(action.property_set_double),))
if action.hasProperty('property_set_double'):
num_tests = num_tests + 1
action.add_property('property_string', "string_1")
info('property: has_property = %s [%s]' %
(METHOD_NAME(action.hasProperty('property_string')), str(action.property_string.__class__),))
info('property: property_string = %s' % (action.property_string,))
action.property_string = "string_1123456"
info('property: property_string = %s' % (action.property_string,))
if action.hasProperty('property_string'):
num_tests = num_tests + 1
action.add_vector_property('property_vector_string', ["string1", "string2", "string3"])
info('property: has_property = %s [%s]' %
(METHOD_NAME(action.hasProperty('property_vector_string')), str(action.property_vector_string.__class__),))
info('property: property_vector_string = %s' % (action.property_vector_string,))
action.property_vector_string = ["string1", "string2", "string3", "string4", "string5", "string6"]
info('property: property_vector_string = %s' % (action.property_vector_string,))
if action.hasProperty('property_vector_string'):
num_tests = num_tests + 1
action.add_position_property('property_position', (1., 2., 3.))
info('property: has_property = %s [%s]' %
(METHOD_NAME(action.hasProperty('property_position')), str(action.property_position.__class__),))
info('property: property_position = %s' % (action.property_position,))
action.property_position = (111.1, 222.2, 333.3)
info('property: property_position = %s' % (action.property_position,))
if action.hasProperty('property_position'):
num_tests = num_tests + 1
info('We checked %d properties interactions.' % (num_tests,))
if num_tests == 14:
info('Property test PASSED')
if __name__ == '__main__':
run() |
5,556 | root conf | from contextlib import asynccontextmanager as acm
from functools import partial
import logging
import os
from pathlib import Path
import pytest
import tractor
from piker import (
config,
)
from piker.service import (
Services,
)
from piker.log import get_console_log
def pytest_addoption(parser):
parser.addoption("--ll", action="store", dest='loglevel',
default=None, help="logging level to set when testing")
parser.addoption("--confdir", default=None,
help="Use a practice API account")
@pytest.fixture(scope='session')
def loglevel(request) -> str:
return request.config.option.loglevel
@pytest.fixture(scope='session')
def test_config():
dirname = os.path.dirname
dirpath = os.path.abspath(
os.path.join(
dirname(os.path.realpath(__file__)),
'data'
)
)
return dirpath
@pytest.fixture(scope='session', autouse=True)
def confdir(
request,
test_config: str,
):
'''
If the `--confdir` flag is not passed use the
broker config file found in that dir.
'''
confdir = request.config.option.confdir
if confdir is not None:
config._override_config_dir(confdir)
return confdir
_ci_env: bool = os.environ.get('CI', False)
@pytest.fixture(scope='session')
def ci_env() -> bool:
'''
Detect CI envoirment.
'''
return _ci_env
@pytest.fixture()
def log(
request: pytest.FixtureRequest,
loglevel: str,
) -> logging.Logger:
'''
Deliver a per-test-named ``piker.log`` instance.
'''
return get_console_log(
level=loglevel,
name=request.node.name,
)
@acm
async def _open_test_pikerd(
tmpconfdir: str,
reg_addr: tuple[str, int] | None = None,
loglevel: str = 'warning',
debug_mode: bool = False,
**kwargs,
) -> tuple[
str,
int,
tractor.Portal
]:
'''
Testing helper to startup the service tree and runtime on
a different port then the default to allow testing alongside
a running stack.
Calls `.service._actor_runtime.maybe_open_pikerd()``
to boot the root actor / tractor runtime.
'''
import random
from piker.service import maybe_open_pikerd
if reg_addr is None:
port = random.randint(6e3, 7e3)
reg_addr = ('127.0.0.1', port)
async with (
maybe_open_pikerd(
registry_addr=reg_addr,
loglevel=loglevel,
tractor_runtime_overrides={
'piker_test_dir': tmpconfdir,
},
debug_mode=debug_mode,
**kwargs,
) as service_manager,
):
# this proc/actor is the pikerd
assert service_manager is Services
async with tractor.wait_for_actor(
'pikerd',
arbiter_sockaddr=reg_addr,
) as portal:
raddr = portal.channel.raddr
assert raddr == reg_addr
yield (
raddr[0],
raddr[1],
portal,
service_manager,
)
@pytest.fixture
def tmpconfdir(
tmp_path: Path,
) -> Path:
'''
Ensure the `brokers.toml` file for the test run exists
since we changed it to not touch files by default.
Here we override the default (in the user dir) and
set the global module var the same as we do inside
the `tmpconfdir` fixture.
'''
tmpconfdir: Path = tmp_path / '_testing'
tmpconfdir.mkdir()
# touch the `brokers.toml` file since it won't
# exist in the tmp test dir by default!
# override config dir in the root actor (aka
# this top level testing process).
from piker import config
config._config_dir: Path = tmpconfdir
conf, path = config.load(
conf_name='brokers',
touch_if_dne=True,
)
assert path.is_file(), 'WTH.. `brokers.toml` not created!?'
yield tmpconfdir
# NOTE: the `tmp_dir` fixture will wipe any files older then 3 test
# sessions by default:
# https://docs.pytest.org/en/6.2.x/tmpdir.html#the-default-base-temporary-directory
# BUT, if we wanted to always wipe conf dir and all contained files,
# from shutil import rmtree
# rmtree(str(tmp_path))
@pytest.fixture
def METHOD_NAME(tmpconfdir) -> dict:
return config.load(
'conf',
touch_if_dne=True,
)
@pytest.fixture
def open_test_pikerd(
request: pytest.FixtureRequest,
tmp_path: Path,
tmpconfdir: Path,
loglevel: str,
):
tmpconfdir_str: str = str(tmpconfdir)
# NOTE: on linux the tmp config dir is generally located at:
# /tmp/pytest-of-<username>/pytest-<run#>/test_<current_test_name>/
# the default `pytest` config ensures that only the last 4 test
# suite run's dirs will be persisted, otherwise they are removed:
# https://docs.pytest.org/en/6.2.x/tmpdir.html#the-default-base-temporary-directory
print(f'CURRENT TEST CONF DIR: {tmpconfdir}')
conf = request.config
debug_mode: bool = conf.option.usepdb
if (
debug_mode
and conf.option.capture != 'no'
):
# TODO: how to disable capture dynamically?
# conf._configured = False
# conf._do_configure()
pytest.fail(
'To use `--pdb` (with `tractor` subactors) you also must also '
'pass `-s`!'
)
yield partial(
_open_test_pikerd,
# pass in a unique temp dir for this test request
# so that we can have multiple tests running (maybe in parallel)
# bwitout clobbering each other's config state.
tmpconfdir=tmpconfdir_str,
# bind in level from fixture, which is itself set by
# `--ll <value>` cli flag.
loglevel=loglevel,
debug_mode=debug_mode,
)
# TODO: teardown checks such as,
# - no leaked subprocs or shm buffers
# - all requested container service are torn down
# - certain ``tractor`` runtime state? |
5,557 | get music list | # Copyright 2020-2023 Capypara and the SkyTemple Contributors
#
# This file is part of SkyTemple.
#
# SkyTemple is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SkyTemple is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SkyTemple. If not, see <https://www.gnu.org/licenses/>.
from __future__ import annotations
from typing import Optional, List, Tuple
from range_typed_integers import u16
from skytemple_files.common.ppmdu_config.data import Pmd2Data
from skytemple_files.common.util import (
AutoString,
read_u16,
write_u16,
)
class DungeonMusicEntry(AutoString):
track_or_ref: u16
is_random_ref: bool
def __init__(
self,
data: Optional[u16],
track_ref: Optional[u16] = None,
is_random_ref: bool = False,
):
if track_ref is not None:
self.track_or_ref = track_ref
self.is_random_ref = is_random_ref
else:
assert data is not None
self.track_or_ref = u16(data & ~0x8000)
self.is_random_ref = data & 0x8000 > 0
assert data == self.to_int()
def to_int(self) -> u16:
if self.is_random_ref:
return u16(0x8000 + self.track_or_ref)
return self.track_or_ref
class HardcodedDungeonMusic:
@staticmethod
def METHOD_NAME(ov10: bytes, config: Pmd2Data) -> List[DungeonMusicEntry]:
block = config.bin_sections.overlay10.data.MUSIC_ID_TABLE
lst = []
for i in range(block.address, block.address + block.length, 2):
lst.append(
DungeonMusicEntry(
read_u16(ov10, i),
)
)
return lst
@staticmethod
def set_music_list(
value: List[DungeonMusicEntry], ov10: bytearray, config: Pmd2Data
) -> None:
block = config.bin_sections.overlay10.data.MUSIC_ID_TABLE
assert block.length is not None
expected_length = int(block.length / 2)
if len(value) != expected_length:
raise ValueError(
f"The list must have exactly the length of {expected_length} entries."
)
for i, entry in enumerate(value):
write_u16(ov10, entry.to_int(), block.address + i * 2)
@staticmethod
def get_random_music_list(
ov10: bytes, config: Pmd2Data
) -> List[Tuple[u16, u16, u16, u16]]:
block = config.bin_sections.overlay10.data.RANDOM_MUSIC_ID_TABLE
lst = []
for i in range(block.address, block.address + block.length, 8):
lst.append(
(
read_u16(ov10, i),
read_u16(ov10, i + 2),
read_u16(ov10, i + 4),
read_u16(ov10, i + 6),
)
)
return lst
@staticmethod
def set_random_music_list(
value: List[Tuple[u16, u16, u16, u16]], ov10: bytearray, config: Pmd2Data
) -> None:
block = config.bin_sections.overlay10.data.RANDOM_MUSIC_ID_TABLE
assert block.length is not None
expected_length = int(block.length / 8)
if len(value) != expected_length:
raise ValueError(
f"The list must have exactly the length of {expected_length} entries."
)
for i, (a, b, c, d) in enumerate(value):
write_u16(ov10, a, block.address + i * 8 + 0)
write_u16(ov10, b, block.address + i * 8 + 2)
write_u16(ov10, c, block.address + i * 8 + 4)
write_u16(ov10, d, block.address + i * 8 + 6) |
5,558 | draw box | from pybricks.media.ev3dev import Font, Image, ImageFile
from pybricks.parameters import Color
from typing import Union
class Screen:
"""
A stub class to represent the screen member of the EV3Brick class.
Attributes:
height (int): The height of the screen in pixels.
width (int): The width of the screen in pixels.
"""
def __init__(self):
self.width = 178 # type: int
self.height = 128 # type: int
def clear(self):
"""
Clears the screen. All pixels on the screen will be set to Color.WHITE.
"""
...
def draw_text(self, x: int, y: int, text: str, text_color: Color = Color.BLACK, background_color: Color = None):
"""
Draws text on the screen.
The most recent font set using set_font() will be used or Font.DEFAULT if no font has been set yet.
Args:
x (int): The x-axis value where the left side of the text will start.
y (int): The y-axis value where the top of the text will start.
text (str): The text to draw.
text_color (Color): The color used for drawing the text.
background_color (Color): The color used to fill the rectangle behind the text or None for transparent background.
"""
...
def print(self, *args, sep: str = "", end: str = "\n"):
"""
Prints a line of text on the screen.
This method works like the builtin print() function, but it writes on the screen instead.
You can set the font using set_font(). If no font has been set, Font.DEFAULT will be used. The text is always printed used black text with a white background.
Unlike the builtin print(), the text does not wrap if it is too wide to fit on the screen. It just gets cut off. But if the text would go off of the bottom of the screen, the entire image is scrolled up and the text is printed in the new blank area at the bottom of the screen.
Args:
args (object): Zero or more objects to print.
sep (str): Separator that will be placed between each object that is printed.
end (str): End of line that will be printed after the last object.
"""
...
def set_font(self, font: Font):
"""
Sets the font used for writing on the screen.
The font is used for both draw_text() and print().
Args:
font (Font): The font to use.
"""
...
def load_image(self, source: Union[str, Image, ImageFile]):
"""
Clears this image, then draws the source image centered in the screen.
Args:
source (ImageFile, Image, or str): The source Image. If the argument is a string (or ImageFile), then the source image is loaded from file.
"""
...
def draw_image(self, x: int, y: int, source: Union[str, Image, ImageFile], transparent: Color = None):
"""
Draws the source image on the screen.
Args:
x (int): The x-axis value where the left side of the image will start.
y (int): The y-axis value where the top of the image will start.
source (ImageFile, Image, str): The source Image. If the argument is a string (or ImageFile), then the source image is loaded from file.
transparent (Color): The color of image to treat as transparent or None for no transparency.
"""
...
def draw_pixel(self, x: int, y: int, color: Color = Color.BLACK):
"""
Draws a single pixel on the screen.
Args:
x (int): The x coordinate of the pixel.
y (int): The y coordinate of the pixel.
color (Color): The color of the pixel.
"""
...
def draw_line(self, x1: int, y1: int, x2: int, y2: int, width: int = 1, color: Color = Color.BLACK):
"""
Draws a line on the screen.
Args:
x1 (int): The x coordinate of the starting point of the line.
y1 (int): The y coordinate of the starting point of the line.
x2 (int): The x coordinate of the ending point of the line.
y2 (int): The y coordinate of the ending point of the line.
width (int): The width of the line in pixels.
color (Color): The color of the line.
"""
...
def METHOD_NAME(self, x1: int, y1: int, x2: int, y2: int, r: int = 0, fill: bool = False, color: Color = Color.BLACK):
"""
Draws a box on the screen.
Args:
x1 (int): The x coordinate of the left side of the box.
y1 (int): The y coordinate of the top of the box.
x2 (int): The x coordinate of the right side of the box.
y2 (int): The y coordinate of the bottom of the box.
r (int): The radius of the corners of the box.
fill (bool): If True, the box will be filled with color, otherwise only the outline of the box will be drawn.
color (Color): The color of the box.
"""
...
def draw_circle(self, x: int, y: int, r: int, fill: bool = False, color: Color = Color.BLACK):
"""
Draws a circle on the screen.
Args:
x (int): The x coordinate of the center of the circle.
y (int): The y coordinate of the center of the circle.
r (int): The radius of the circle.
fill (bool): If True, the circle will be filled with color, otherwise only the circumference will be drawn.
color (Color): The color of the circle.
"""
...
def save(self, filename: str):
"""
Saves the screen as a .png file.
Args:
filename (str): The path to the file to be saved.
Raises:
TypeError: filename is not a string
OSError: There was a problem saving the file.
"""
... |
5,559 | build | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.apple import fix_apple_shared_install_name
from conan.tools.METHOD_NAME import cross_building
from conan.tools.env import VirtualBuildEnv, VirtualRunEnv
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, replace_in_file, rm, rmdir
from conan.tools.gnu import PkgConfigDeps
from conan.tools.layout import basic_layout
from conan.tools.meson import Meson, MesonToolchain
from conan.tools.microsoft import is_msvc_static_runtime
import os
required_conan_version = ">=1.53.0"
class AtkConan(ConanFile):
name = "atk"
description = "set of accessibility interfaces that are implemented by other toolkits and applications"
topics = ("accessibility",)
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.atk.org"
license = "LGPL-2.1-or-later"
deprecated = "at-spi2-core"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.cppstd")
self.settings.rm_safe("compiler.libcxx")
def layout(self):
basic_layout(self, src_folder="src")
def requirements(self):
self.requires("glib/2.76.3")
def validate(self):
if self.options.shared and not self.dependencies["glib"].options.shared:
raise ConanInvalidConfiguration(
"Linking a shared library against static glib can cause unexpected behaviour."
)
if str(self.settings.compiler) == "Visual Studio" and not self.options.shared and \
is_msvc_static_runtime(self) and self.dependencies["glib"].options.shared:
raise ConanInvalidConfiguration("this specific configuration is prevented due to internal c3i limitations")
def build_requirements(self):
self.tool_requires("meson/1.1.1")
if not self.conf.get("tools.gnu:pkg_config", check_type=str):
self.tool_requires("pkgconf/1.9.3")
if hasattr(self, "settings_build") and cross_building(self):
self.tool_requires("glib/2.76.3")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
env = VirtualBuildEnv(self)
env.generate()
if not cross_building(self):
env = VirtualRunEnv(self)
env.generate(scope="build")
tc = MesonToolchain(self)
tc.project_options["introspection"] = False
tc.project_options["docs"] = False
tc.project_options["localedir"] = os.path.join(self.package_folder, "bin", "share", "locale")
tc.generate()
deps = PkgConfigDeps(self)
deps.generate()
def _patch_sources(self):
apply_conandata_patches(self)
replace_in_file(self, os.path.join(self.source_folder, "meson.build"), "subdir('tests')", "#subdir('tests')")
def METHOD_NAME(self):
self._patch_sources()
meson = Meson(self)
meson.configure()
meson.METHOD_NAME()
def package(self):
copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
meson = Meson(self)
meson.install()
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rm(self, "*.pdb", os.path.join(self.package_folder, "bin"))
fix_apple_shared_install_name(self)
fix_msvc_libname(self)
def package_info(self):
self.cpp_info.set_property("pkg_config_name", "atk")
self.cpp_info.libs = ["atk-1.0"]
self.cpp_info.includedirs = [os.path.join("include", "atk-1.0")]
def fix_msvc_libname(conanfile, remove_lib_prefix=True):
"""remove lib prefix & change extension to .lib in case of cl like compiler"""
if not conanfile.settings.get_safe("compiler.runtime"):
return
from conan.tools.files import rename
import glob
libdirs = getattr(conanfile.cpp.package, "libdirs")
for libdir in libdirs:
for ext in [".dll.a", ".dll.lib", ".a"]:
full_folder = os.path.join(conanfile.package_folder, libdir)
for filepath in glob.glob(os.path.join(full_folder, f"*{ext}")):
libname = os.path.basename(filepath)[0:-len(ext)]
if remove_lib_prefix and libname[0:3] == "lib":
libname = libname[3:]
rename(conanfile, filepath, os.path.join(os.path.dirname(filepath), f"{libname}.lib")) |
5,560 | ns create | import urllib
class CurieBackendException(Exception):
pass
class Backends(object):
backends = {}
@classmethod
def register(cls, scheme):
def do_reg(c):
cls.backends[scheme] = c
return c
return do_reg
@classmethod
def get_backend(cls, app, url, *args, **kargs):
pl = urllib.parse.urlparse(url)
b = cls.backends[pl.scheme]
return b(app, url, *args, **kargs)
class CurieBackend(object):
def __init__(self, app, url):
pass
def configs_list(self):
raise NotImplementedError
def configs_list_versions(self, config):
raise NotImplementedError
def configs_get(self, config, version=None):
raise NotImplementedError
def configs_create(self, data, name=None):
raise NotImplementedError
def configs_update(self, config, data):
raise NotImplementedError
def configs_delete(self, config):
raise NotImplementedError
def configs_revert(self, config, version):
raise NotImplementedError
def configs_clone(self, config, new_name):
raise NotImplementedError
def blobs_list(self, config, version=None):
raise NotImplementedError
def blobs_list_versions(self, config, blob):
raise NotImplementedError
def blob_get(self, config, blob, version=None):
raise NotImplementedError
def blobs_create(self, config, blob, data):
raise NotImplementedError
def blobs_update(self, config, blob, data):
raise NotImplementedError
def blobs_delete(self, config, blob):
raise NotImplementedError
def blobs_revert(self, config, blob, version):
raise NotImplementedError
def documents_list(self, config, version=None):
raise NotImplementedError
def documents_get(self, config, document, version=None):
raise NotImplementedError
def documents_list_versions(self, config, document):
raise NotImplementedError
def documents_create(self, config, document, data):
raise NotImplementedError
def documents_update(self, config, document, data):
raise NotImplementedError
def documents_delete(self, config, document):
raise NotImplementedError
def documents_revert(self, config, document, version):
raise NotImplementedError
def entries_list(self, config, document, version=None):
raise NotImplementedError
def entries_list_versions(self, config, document, entry):
raise NotImplementedError
def entries_get(self, config, document, entry, version=None):
raise NotImplementedError
def entries_create(self, config, document, data):
raise NotImplementedError
def entries_update(self, config, document, entry, data):
raise NotImplementedError
def entries_edit(self, config, document, entry, data):
raise NotImplementedError
def entries_delete(self, config, document, entry):
raise NotImplementedError
def entries_revert(self, config, document, entry, version):
raise NotImplementedError
def ns_list(self):
raise NotImplementedError
def ns_list_versions(self):
raise NotImplementedError
def ns_get(self, dbname, version=None):
raise NotImplementedError
def METHOD_NAME(self, dbname, data):
raise NotImplementedError
def ns_update(slef, dbname, data):
raise NotImplementedError
def ns_delete(self, dbname):
raise NotImplementedError
def ns_revert(self, dbname, version):
raise NotImplementedError
def ns_query(self, dbname, query):
raise NotImplementedError
def key_list(self, dbname):
raise NotImplementedError
def key_get(self, dbname, key, version=None):
raise NotImplementedError
def key_list_versions(self, dbname, key):
raise NotImplementedError
def key_set(self, dbname, key, value):
raise NotImplementedError
def key_delete(self, dbname, key):
raise NotImplementedError
from . import gitbackend |
5,561 | output | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network route-table route show",
)
class Show(AAZCommand):
"""Get the details of a route in a route table.
:example: Get the details of a route in a route table.
az network route-table route show -g MyResourceGroup --route-table-name MyRouteTable -n MyRoute -o table
"""
_aaz_info = {
"version": "2018-11-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/routetables/{}/routes/{}", "2018-11-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self.METHOD_NAME()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Route name.",
required=True,
id_part="child_name_1",
)
_args_schema.route_table_name = AAZStrArg(
options=["--route-table-name"],
help="Route table name.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.RoutesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def METHOD_NAME(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class RoutesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"routeName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"routeTableName", self.ctx.args.route_table_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2018-11-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.etag = AAZStrType()
_schema_on_200.id = AAZStrType()
_schema_on_200.name = AAZStrType()
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.properties
properties.address_prefix = AAZStrType(
serialized_name="addressPrefix",
)
properties.next_hop_ip_address = AAZStrType(
serialized_name="nextHopIpAddress",
)
properties.next_hop_type = AAZStrType(
serialized_name="nextHopType",
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
)
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"] |
5,562 | split dir filename ext | #!/usr/bin/python
# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.tools import freeze_graph
from tensorflow.python.tools import optimize_for_inference_lib
import argparse
import os
def METHOD_NAME(path):
# in case of '/tmp/.ssh/my.key.dat'
# this returns ('/tmp/.ssh', 'my.key', 'dat')
directory = os.path.split(path)[0]
ext = os.path.splitext(path)[1][1:] # remove '.', e.g., '.dat' -> 'dat'
filename = os.path.splitext(os.path.split(path)[1])[0]
return (directory, filename, ext)
def importGraphIntoSession(sess, filename):
# this should be called inside
# with tf.Session() as sess:
assert sess
(_, _, ext) = METHOD_NAME(filename)
if (ext.lower() == 'pb'):
with tf.gfile.GFile(filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
elif (ext.lower() == 'pbtxt'):
with open(filename, 'r') as reader:
graph_def = tf.GraphDef()
text_format.Parse(reader.read(), graph_def)
else:
print("# Error: unknown extension - " + ext)
tf.import_graph_def(graph_def)
def print_operation(op, op_count):
print("") # new line
print("OP #{}: {}, name = {}".format(op_count, op.type, op.name))
print("\tinputs:")
for input_tensor in op.inputs:
print("\t\t{} : name = {}".format(input_tensor.shape, input_tensor.name))
print("\toutputs:")
for output_tensor in op.outputs:
print("\t\t{}, name = {}".format(output_tensor.shape, output_tensor.name))
print("\tattributes:")
op_def = op.op_def
for attr_def in op.op_def.attr:
attr = op.get_attr(attr_def.name)
# skip Const value
if op.type == "Const" and attr_def.name == "value":
print("\t\t{}, name = {}".format("skipping value", attr_def.name))
else:
print("\t\t{}, name = {}".format(attr, attr_def.name))
print("") # new line
def print_graph_info(pb_path, optype_substring, name_prefix):
with tf.Session() as sess:
importGraphIntoSession(sess, pb_path)
op_seq = 1
op_count = 1
graph = sess.graph
ops = graph.get_operations()
for op in ops:
if optype_substring == "*" and (name_prefix == None
or op.name.startswith(name_prefix)):
print_operation(op, op_seq)
op_count += 1
elif op.type.lower().find(optype_substring.lower()) != -1 and (
name_prefix == None or op.name.startswith(name_prefix)):
print_operation(op, op_seq)
op_count += 1
else:
print("skipping {}, name = {}".format(op.type, op.name))
op_seq += 1
print("")
print("Total number of operations : " + str(op_count))
print("")
def print_summary(pb_path, optype_substring, name_prefix):
op_map = {}
op_count = 0
with tf.Session() as sess:
importGraphIntoSession(sess, pb_path)
graph = sess.graph
ops = graph.get_operations()
for op in ops:
process = False
if optype_substring == "*" and (name_prefix == None
or op.name.startswith(name_prefix)):
process = True
elif op.type.lower().find(optype_substring.lower()) != -1 and (
name_prefix == None or op.name.startswith(name_prefix)):
process = True
if process:
op_count += 1
if op_map.get(op.type) == None:
op_map[op.type] = 1
else:
op_map[op.type] += 1
# print op list
print("")
for op_type, count in op_map.items():
print("\t" + op_type + " : \t" + str(count))
print("")
print("Total number of operations : " + str(op_count))
print("Total number of operation types : " + str(len(op_map.keys())))
print("")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Prints information inside pb file')
parser.add_argument("pb_file", help="pb file to read")
parser.add_argument(
"op_subst",
help="substring of operations. only info of these operasions will be printed.")
parser.add_argument(
"--summary", help="print summary of operations", action="store_true")
parser.add_argument("--name_prefix", help="filtered by speficied name prefix")
args = parser.parse_args()
if args.summary:
print_summary(args.pb_file, args.op_subst, args.name_prefix)
else:
print_graph_info(args.pb_file, args.op_subst, args.name_prefix) |
5,563 | create mixed use type | """
This script creates a new use-type by aggregating values from a list of different use-types
"""
import os
import numpy as np
import pandas as pd
import cea
import cea.config
import cea.inputlocator
from cea.datamanagement.archetypes_mapper import calculate_average_multiuse
from cea.datamanagement.schedule_helper import calc_single_mixed_schedule, ScheduleData
from cea.utilities.schedule_reader import save_cea_schedule
__author__ = "Reynold Mok"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Reynold Mok, Jimeno Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
DEFAULT_USE_TYPE = 'MULTI_RES'
COLUMN_VAR_NAME_TEMPLATE = 'USE_{}'
COLUMN_VAR_VAL_TEMPLATE = 'USE_{}_R'
def METHOD_NAME(locator, internal_loads_df, indoor_comfort_df,
use_type_name, use_type_metadata, use_type_ratios_dict):
"""
Takes a list of use-types and their respective ratios to aggregate and create a new use-type
with schedules, internal loads and indoor comfort data
:param schedules_path:
:param internal_loads_df:
:param indoor_comfort_df:
:param use_type_name:
:param use_type_metadata:
:param use_type_ratios_dict:
:return:
"""
list_uses = use_type_ratios_dict.keys()
list_var_names = [COLUMN_VAR_NAME_TEMPLATE.format(i) for i in range(len(use_type_ratios_dict))]
list_var_values = [COLUMN_VAR_VAL_TEMPLATE.format(i) for i in range(len(use_type_ratios_dict))]
# Creating required parameters
properties_dict = {}
for i, (k, v) in enumerate(use_type_ratios_dict.items()):
properties_dict[list_var_names[i]] = k
properties_dict[list_var_values[i]] = v
properties_df = pd.DataFrame([properties_dict])
occupant_densities = calculate_occupant_density(list_uses, internal_loads_df)
print("Calculating internal loads...")
new_internal_loads_df = calculate_mixed_loads(properties_df, internal_loads_df, occupant_densities, list_uses,
use_type_name, list_var_names, list_var_values)
print("Calculating indoor comfort...")
new_indoor_comfort_df = calculate_mixed_loads(properties_df, indoor_comfort_df, occupant_densities, list_uses,
use_type_name, list_var_names, list_var_values)
prop_df_c = properties_df.copy()
prop_df_c['Name'] = '0' # Set a `Name` column as index for function to work
prop_df_c.set_index('Name', inplace=True)
schedule_data_all_uses = ScheduleData(locator)
internal_loads = internal_loads_df.set_index('code')
print("Calculating schedules...")
schedule_new_data, schedule_complementary_data = calc_single_mixed_schedule(list_uses, occupant_densities,
prop_df_c, internal_loads, '0',
schedule_data_all_uses, list_var_names,
list_var_values, use_type_metadata)
print("Writing to disk...")
use_type_properties_path = locator.get_database_use_types_properties()
with pd.ExcelWriter(use_type_properties_path) as writer:
new_internal_loads_df.to_excel(writer, sheet_name='INTERNAL_LOADS', index=False)
new_indoor_comfort_df.to_excel(writer, sheet_name='INDOOR_COMFORT', index=False)
schedule_path = os.path.join(locator.get_database_use_types_folder(), '{}.csv'.format(use_type_name))
save_cea_schedule(schedule_new_data, schedule_complementary_data, schedule_path)
def calculate_mixed_loads(properties_df, loads_df, occupant_densities, list_uses, use_type_name, list_var_names, list_var_values):
prop_df = properties_df.copy().merge(loads_df, left_on=list_var_names[0], right_on='code')
loads_df_columns = loads_df.columns
calculated_loads_df = calculate_average_multiuse(loads_df_columns, prop_df, occupant_densities,
list_uses, loads_df,
list_var_names, list_var_values).loc[:, loads_df_columns]
calculated_loads_df['code'] = use_type_name
# Remove rows that have the same `code` as new row
clean_loads_df = loads_df[loads_df['code'] != use_type_name]
return clean_loads_df.append(calculated_loads_df)
def calculate_occupant_density(use_types, internal_loads_df):
occupant_densities = {}
internal_loads = internal_loads_df.copy().set_index('code')
for use in use_types:
if internal_loads.loc[use, 'Occ_m2p'] > 0.0:
occupant_densities[use] = 1.0 / internal_loads.loc[use, 'Occ_m2p']
else:
occupant_densities[use] = 0.0
return occupant_densities
def main(config):
# Config Parameters
use_type_name = config.METHOD_NAME.use_type
use_type_metadata = config.METHOD_NAME.metadata
use_type_ratios = config.METHOD_NAME.ratios
use_type_ratios_dict = {k: float(v) for k, v in [ratio.split('|') for ratio in use_type_ratios]}
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
use_type_properties_df = pd.read_excel(locator.get_database_use_types_properties(), sheet_name=None)
internal_loads_df = use_type_properties_df['INTERNAL_LOADS']
indoor_comfort_df = use_type_properties_df['INDOOR_COMFORT']
assert use_type_name not in internal_loads_df['code'].tolist() and use_type_name not in indoor_comfort_df['code'].tolist(), \
'Use-type name {} already exists'.format(use_type_name)
METHOD_NAME(locator,
internal_loads_df=internal_loads_df,
indoor_comfort_df=indoor_comfort_df,
use_type_name=use_type_name,
use_type_metadata=use_type_metadata,
use_type_ratios_dict=use_type_ratios_dict)
if __name__ == '__main__':
main(cea.config.Configuration())
|
5,564 | extract data | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._event_hubs_operations import build_list_by_namespace_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class EventHubsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.servicebus.v2018_01_01_preview.aio.ServiceBusManagementClient`'s
:attr:`event_hubs` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_namespace(
self, resource_group_name: str, namespace_name: str, **kwargs: Any
) -> AsyncIterable["_models.Eventhub"]:
"""Gets all the Event Hubs in a service bus Namespace.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param namespace_name: The namespace name. Required.
:type namespace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Eventhub or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.servicebus.v2018_01_01_preview.models.Eventhub]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2018-01-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2018-01-01-preview")
)
cls: ClsType[_models.EventHubListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_namespace_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_namespace.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("EventHubListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, METHOD_NAME)
list_by_namespace.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/eventhubs"
} |
5,565 | modify doc | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2023, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from bokeh.core.enums import ButtonType
from bokeh.layouts import column
from bokeh.models import (
Circle,
ColumnDataSource,
CustomJS,
Dropdown,
Plot,
Range1d,
)
from tests.support.plugins.project import BokehModelPage, BokehServerPage
from tests.support.util.selenium import RECORD, find_element_for
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
pytest_plugins = (
"tests.support.plugins.project",
)
# XXX (bev) split dropdown (i.e. with default value) has serious problems
items = [("Item 1", "item_1_value"), ("Item 2", "item_2_value"), ("Item 3", "item_3_value")]
@pytest.mark.selenium
class Test_Dropdown:
def test_displays_menu_items(self, bokeh_model_page: BokehModelPage) -> None:
button = Dropdown(label="Dropdown button", menu=items)
page = bokeh_model_page(button)
button_el = find_element_for(page.driver, button, "button")
assert button_el.text == "Dropdown button"
button_el.click()
menu = find_element_for(page.driver, button, ".bk-menu")
assert menu.is_displayed()
@pytest.mark.parametrize('typ', list(ButtonType))
def test_displays_button_type(self, typ, bokeh_model_page: BokehModelPage) -> None:
button = Dropdown(label="Dropdown button", menu=items, button_type=typ)
page = bokeh_model_page(button)
button_el = find_element_for(page.driver, button, "button")
assert typ in button_el.get_attribute('class')
def test_server_on_change_round_trip(self, bokeh_server_page: BokehServerPage) -> None:
button = Dropdown(label="Dropdown button", menu=items)
def METHOD_NAME(doc):
source = ColumnDataSource(dict(x=[1, 2], y=[1, 1]))
plot = Plot(height=400, width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_glyph(source, Circle(x='x', y='y', size=20))
plot.tags.append(CustomJS(name="custom-action", args=dict(s=source), code=RECORD("data", "s.data")))
def cb(event):
item = event.item
if item == "item_1_value":
source.data = dict(x=[10, 20], y=[10, 10])
elif item == "item_2_value":
source.data = dict(x=[100, 200], y=[100, 100])
elif item == "item_3_value":
source.data = dict(x=[1000, 2000], y=[1000, 1000])
button.on_event('menu_item_click', cb)
doc.add_root(column(button, plot))
page = bokeh_server_page(METHOD_NAME)
button_el = find_element_for(page.driver, button, "button")
button_el.click()
item = find_element_for(page.driver, button, ".bk-menu > *:nth-child(1)")
item.click()
page.eval_custom_action()
results = page.results
assert results == {'data': {'x': [10, 20], 'y': [10, 10]}}
button_el = find_element_for(page.driver, button, "button")
button_el.click()
item = find_element_for(page.driver, button, ".bk-menu > *:nth-child(3)")
item.click()
page.eval_custom_action()
results = page.results
assert results == {'data': {'x': [1000, 2000], 'y': [1000, 1000]}}
button_el = find_element_for(page.driver, button, "button")
button_el.click()
item = find_element_for(page.driver, button, ".bk-menu > *:nth-child(2)")
item.click()
page.eval_custom_action()
results = page.results
assert results == {'data': {'x': [100, 200], 'y': [100, 100]}}
assert page.has_no_console_errors()
def test_js_on_change_executes(self, bokeh_model_page: BokehModelPage) -> None:
button = Dropdown(label="Dropdown button", menu=items)
button.js_on_event('menu_item_click', CustomJS(code=RECORD("value", "this.item")))
page = bokeh_model_page(button)
button_el = find_element_for(page.driver, button, "button")
button_el.click()
item = find_element_for(page.driver, button, ".bk-menu > *:nth-child(1)")
item.click()
results = page.results
assert results == {'value': "item_1_value"}
button_el = find_element_for(page.driver, button, "button")
button_el.click()
item = find_element_for(page.driver, button, ".bk-menu > *:nth-child(3)")
item.click()
results = page.results
assert results == {'value': "item_3_value"}
button_el = find_element_for(page.driver, button, "button")
button_el.click()
item = find_element_for(page.driver, button, ".bk-menu > *:nth-child(2)")
item.click()
results = page.results
assert results == {'value': "item_2_value"}
assert page.has_no_console_errors() |
5,566 | test shape of earth pre 6 | # Copyright iris-grib contributors
#
# This file is part of iris-grib and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Integration tests for grib2 file loading.
These tests load various files from iris-test-data, and compare the cube with a
reference CML file, to catch any unexpected changes over time.
"""
# import iris_grib.tests first so that some things can be initialised
# before importing anything else.
import iris_grib.tests as tests
import iris
_RESULTDIR_PREFIX = ("integration", "load_convert", "sample_file_loads")
@tests.skip_data
class TestBasicLoad(tests.IrisGribTest):
def test_load_rotated(self):
cubes = iris.load(
tests.get_data_path(("GRIB", "rotated_uk", "uk_wrongparam.grib1"))
)
self.assertCML(cubes, _RESULTDIR_PREFIX + ("rotated.cml",))
def test_load_time_bound(self):
cubes = iris.load(
tests.get_data_path(("GRIB", "time_processed", "time_bound.grib1"))
)
self.assertCML(cubes, _RESULTDIR_PREFIX + ("time_bound_grib1.cml",))
def test_load_time_processed(self):
cubes = iris.load(
tests.get_data_path(("GRIB", "time_processed", "time_bound.grib2"))
)
self.assertCML(cubes, _RESULTDIR_PREFIX + ("time_bound_grib2.cml",))
def test_load_3_layer(self):
cubes = iris.load(
tests.get_data_path(("GRIB", "3_layer_viz", "3_layer.grib2"))
)
cubes = iris.cube.CubeList([cubes[1], cubes[0], cubes[2]])
self.assertCML(cubes, _RESULTDIR_PREFIX + ("3_layer.cml",))
def test_load_masked(self):
gribfile = tests.get_data_path(
("GRIB", "missing_values", "missing_values.grib2")
)
cubes = iris.load(gribfile)
self.assertCML(cubes,
_RESULTDIR_PREFIX + ("missing_values_grib2.cml",))
def test_polar_stereo_grib1(self):
cube = iris.load_cube(
tests.get_data_path(("GRIB", "polar_stereo", "ST4.2013052210.01h"))
)
self.assertCML(cube, _RESULTDIR_PREFIX + ("polar_stereo_grib1.cml",))
def test_polar_stereo_grib2_grid_definition(self):
cube = iris.load_cube(
tests.get_data_path(
(
"GRIB",
"polar_stereo",
"CMC_glb_TMP_ISBL_1015_ps30km_2013052000_P006.grib2",
)
)
)
self.assertEqual(cube.shape, (200, 247))
pxc = cube.coord("projection_x_coordinate")
self.assertAlmostEqual(pxc.points.max(), 4769905.5125, places=4)
self.assertAlmostEqual(pxc.points.min(), -2610094.4875, places=4)
pyc = cube.coord("projection_y_coordinate")
self.assertAlmostEqual(pyc.points.max(), -216.1459, places=4)
self.assertAlmostEqual(pyc.points.min(), -5970216.1459, places=4)
self.assertEqual(pyc.coord_system, pxc.coord_system)
self.assertEqual(pyc.coord_system.grid_mapping_name, "stereographic")
self.assertEqual(pyc.coord_system.central_lat, 90.0)
self.assertEqual(pyc.coord_system.central_lon, 249.0)
self.assertEqual(pyc.coord_system.false_easting, 0.0)
self.assertEqual(pyc.coord_system.false_northing, 0.0)
self.assertEqual(pyc.coord_system.true_scale_lat, 60.0)
def test_lambert_grib1(self):
cube = iris.load_cube(
tests.get_data_path(("GRIB", "lambert", "lambert.grib1"))
)
self.assertCML(cube, _RESULTDIR_PREFIX + ("lambert_grib1.cml",))
def test_lambert_grib2(self):
cube = iris.load_cube(
tests.get_data_path(("GRIB", "lambert", "lambert.grib2"))
)
self.assertCML(cube, _RESULTDIR_PREFIX + ("lambert_grib2.cml",))
def test_regular_gg_grib1(self):
cube = iris.load_cube(
tests.get_data_path(("GRIB", "gaussian", "regular_gg.grib1"))
)
self.assertCML(cube, _RESULTDIR_PREFIX + ("regular_gg_grib1.cml",))
def test_regular_gg_grib2(self):
cube = iris.load_cube(
tests.get_data_path(("GRIB", "gaussian", "regular_gg.grib2"))
)
self.assertCML(cube, _RESULTDIR_PREFIX + ("regular_gg_grib2.cml",))
def test_reduced_ll(self):
cube = iris.load_cube(
tests.get_data_path(("GRIB", "reduced", "reduced_ll.grib1"))
)
self.assertCML(cube, _RESULTDIR_PREFIX + ("reduced_ll_grib1.cml",))
def test_reduced_gg(self):
cube = iris.load_cube(
tests.get_data_path(("GRIB", "reduced", "reduced_gg.grib2"))
)
self.assertCML(cube, _RESULTDIR_PREFIX + ("reduced_gg_grib2.cml",))
@tests.skip_data
class TestIjDirections(tests.IrisGribTest):
@staticmethod
def _old_compat_load(name):
filepath = tests.get_data_path(("GRIB", "ij_directions", name))
cube = iris.load_cube(filepath)
return cube
def test_ij_directions_ipos_jpos(self):
cubes = self._old_compat_load("ipos_jpos.grib2")
self.assertCML(cubes, _RESULTDIR_PREFIX + ("ipos_jpos.cml",))
def test_ij_directions_ipos_jneg(self):
cubes = self._old_compat_load("ipos_jneg.grib2")
self.assertCML(cubes, _RESULTDIR_PREFIX + ("ipos_jneg.cml",))
def test_ij_directions_ineg_jneg(self):
cubes = self._old_compat_load("ineg_jneg.grib2")
self.assertCML(cubes, _RESULTDIR_PREFIX + ("ineg_jneg.cml",))
def test_ij_directions_ineg_jpos(self):
cubes = self._old_compat_load("ineg_jpos.grib2")
self.assertCML(cubes, _RESULTDIR_PREFIX + ("ineg_jpos.cml",))
@tests.skip_data
class TestShapeOfEarth(tests.IrisGribTest):
@staticmethod
def _old_compat_load(name):
filepath = tests.get_data_path(("GRIB", "shape_of_earth", name))
cube = iris.load_cube(filepath)
return cube
def test_shape_of_earth_basic(self):
# pre-defined sphere
cube = self._old_compat_load("0.grib2")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_0.cml",))
def test_shape_of_earth_custom_1(self):
# custom sphere
cube = self._old_compat_load("1.grib2")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_1.cml",))
def test_shape_of_earth_IAU65(self):
# IAU65 oblate sphere
cube = self._old_compat_load("2.grib2")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_2.cml",))
def test_shape_of_earth_custom_3(self):
# custom oblate spheroid (km)
cube = self._old_compat_load("3.grib2")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_3.cml",))
def test_shape_of_earth_IAG_GRS80(self):
# IAG-GRS80 oblate spheroid
cube = self._old_compat_load("4.grib2")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_4.cml",))
def test_shape_of_earth_WGS84(self):
# WGS84
cube = self._old_compat_load("5.grib2")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_5.cml",))
def METHOD_NAME(self):
# pre-defined sphere
cube = self._old_compat_load("6.grib2")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_6.cml",))
def test_shape_of_earth_custom_7(self):
# custom oblate spheroid (m)
cube = self._old_compat_load("7.grib2")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_7.cml",))
def test_shape_of_earth_grib1(self):
# grib1 - same as grib2 shape 6, above
cube = self._old_compat_load("global.grib1")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_grib1.cml",))
if __name__ == "__main__":
tests.main() |
5,567 | sort partition | # Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import dask.dataframe as dd
from dask.dataframe.multi import merge, merge_asof
from dask.distributed import Client
import mlrun
from .base import BaseMerger
class DaskFeatureMerger(BaseMerger):
engine = "dask"
support_offline = True
def __init__(self, vector, **engine_args):
super().__init__(vector, **engine_args)
self.client = engine_args.get("dask_client")
self._dask_cluster_uri = engine_args.get("dask_cluster_uri")
def _reset_index(self, df):
to_drop = df.index.name is None
df = df.reset_index(drop=to_drop)
return df
def _asof_join(
self,
entity_df,
entity_timestamp_column: str,
featureset_name: str,
featureset_timestamp: str,
featureset_df: list,
left_keys: list,
right_keys: list,
):
def METHOD_NAME(partition, timestamp):
return partition.sort_values(timestamp)
entity_df = entity_df.map_partitions(
METHOD_NAME, timestamp=entity_timestamp_column
)
featureset_df = featureset_df.map_partitions(
METHOD_NAME, timestamp=featureset_timestamp
)
merged_df = merge_asof(
entity_df,
featureset_df,
left_on=entity_timestamp_column,
right_on=featureset_timestamp,
left_by=left_keys or None,
right_by=right_keys or None,
suffixes=("", f"_{featureset_name}_"),
)
for col in merged_df.columns:
if re.findall(f"_{featureset_name}_$", col):
self._append_drop_column(col)
return merged_df
def _join(
self,
entity_df,
entity_timestamp_column: str,
featureset_name,
featureset_timestamp,
featureset_df,
left_keys: list,
right_keys: list,
):
merged_df = merge(
entity_df,
featureset_df,
how=self._join_type,
left_on=left_keys,
right_on=right_keys,
suffixes=("", f"_{featureset_name}_"),
)
for col in merged_df.columns:
if re.findall(f"_{featureset_name}_$", col):
self._append_drop_column(col)
return merged_df
def get_status(self):
if self._result_df is None:
raise RuntimeError("unexpected status, no result df")
return "completed"
def get_df(self, to_pandas=True):
if to_pandas and hasattr(self._result_df, "dask"):
df = self._result_df.compute()
else:
df = self._result_df
self._set_indexes(df)
return df
def _create_engine_env(self):
if "index" not in self._index_columns:
self._append_drop_column("index")
# init the dask client if needed
if not self.client:
if self._dask_cluster_uri:
function = mlrun.import_function(self._dask_cluster_uri)
self.client = function.client
else:
self.client = Client()
def _get_engine_df(
self,
feature_set,
feature_set_name,
column_names=None,
start_time=None,
end_time=None,
time_column=None,
):
df = feature_set.to_dataframe(
columns=column_names,
df_module=dd,
start_time=start_time,
end_time=end_time,
time_column=time_column,
index=False,
)
return self._reset_index(df).persist()
def _rename_columns_and_select(self, df, rename_col_dict, columns=None):
return df.rename(
columns=rename_col_dict,
)
def _drop_columns_from_result(self):
self._result_df = self._result_df.drop(
columns=self._drop_columns, errors="ignore"
)
def _filter(self, query):
self._result_df = self._result_df.query(query)
def _order_by(self, order_by_active):
self._result_df.sort_values(by=order_by_active)
def _convert_entity_rows_to_engine_df(self, entity_rows):
if entity_rows is not None and not hasattr(entity_rows, "dask"):
return dd.from_pandas(entity_rows, npartitions=len(entity_rows.columns))
return entity_rows |
5,568 | get seasonal shows | # API information
# https://myanimelist.net/modules.php?go=api
from logging import debug, info, warning, error
import re
from .. import AbstractInfoHandler
from data.models import UnprocessedShow, ShowType
class InfoHandler(AbstractInfoHandler):
_show_link_base = "https://myanimelist.net/anime/{id}/"
_show_link_matcher = "https?://(?:.+?\.)?myanimelist\.net/anime/([0-9]+)"
_season_show_url = "https://myanimelist.net/anime/season"
_api_search_base = "https://myanimelist.net/api/anime/search.xml?q={q}"
def __init__(self):
super().__init__("mal", "MyAnimeList")
def get_link(self, link):
if link is None:
return None
return self._show_link_base.format(id=link.site_key)
def extract_show_id(self, url):
if url is not None:
match = re.match(self._show_link_matcher, url, re.I)
if match:
return match.group(1)
return None
def find_show(self, show_name, **kwargs):
url = self._api_search_base.format(q=show_name)
result = self._mal_api_request(url, **kwargs)
if result is None:
error("Failed to find show")
return list()
assert result.tag == "anime"
shows = list()
for child in result:
print(child)
assert child.tag == "entry"
id = child.find("id").text
name = child.find("title").text
more_names = [child.find("english").text]
show = UnprocessedShow(self.key, id, name, more_names, ShowType.UNKNOWN, 0, False)
shows.append(show)
return shows
def find_show_info(self, show_id, **kwargs):
debug("Getting show info for {}".format(show_id))
# Request show page from MAL
url = self._show_link_base.format(id=show_id)
response = self._mal_request(url, **kwargs)
if response is None:
error("Cannot get show page")
return None
# Parse show page
names_sib = response.find("h2", string="Alternative Titles")
# English
name_elem = names_sib.find_next_sibling("div")
if name_elem is None:
warning(" Name elem not found")
return None
name_english = name_elem.string
info(" English: {}".format(name_english))
names = [name_english]
return UnprocessedShow(self.key, id, None, names, ShowType.UNKNOWN, 0, False)
def get_episode_count(self, link, **kwargs):
debug("Getting episode count")
# Request show page from MAL
url = self._show_link_base.format(id=link.site_key)
response = self._mal_request(url, **kwargs)
if response is None:
error("Cannot get show page")
return None
# Parse show page (ugh, HTML parsing)
count_sib = response.find("span", string="Episodes:")
if count_sib is None:
error("Failed to find episode count sibling")
return None
count_elem = count_sib.find_next_sibling(string=re.compile("\d+"))
if count_elem is None:
warning(" Count not found")
return None
count = int(count_elem.strip())
debug(" Count: {}".format(count))
return count
def get_show_score(self, show, link, **kwargs):
debug("Getting show score")
# Request show page
url = self._show_link_base.format(id=link.site_key)
response = self._mal_request(url, **kwargs)
if response is None:
error("Cannot get show page")
return None
# Find score
score_elem = response.find("span", attrs={"itemprop": "ratingValue"})
try:
score = float(score_elem.string)
except:
warning(" Count not found")
return None
debug(" Score: {}".format(score))
return score
def METHOD_NAME(self, year=None, season=None, **kwargs):
#TODO: use year and season if provided
debug("Getting season shows: year={}, season={}".format(year, season))
# Request season page from MAL
response = self._mal_request(self._season_show_url, **kwargs)
if response is None:
error("Cannot get show list")
return list()
# Parse page (ugh, HTML parsing. Where's the useful API, MAL?)
lists = response.find_all(class_="seasonal-anime-list")
if len(lists) == 0:
error("Invalid page? Lists not found")
return list()
new_list = lists[0].find_all(class_="seasonal-anime")
if len(new_list) == 0:
error("Invalid page? Shows not found in list")
return list()
new_shows = list()
episode_count_regex = re.compile("(\d+|\?) eps?")
for show in new_list:
show_key = show.find(class_="genres")["id"]
title = str(show.find("a", class_="link-title").string)
title = _normalize_title(title)
more_names = [title[:-11]] if title.lower().endswith("2nd season") else list()
show_type = ShowType.TV #TODO, changes based on section/list
episode_count = episode_count_regex.search(show.find(class_="eps").find(string=episode_count_regex)).group(1)
episode_count = None if episode_count == "?" else int(episode_count)
has_source = show.find(class_="source").string != "Original"
new_shows.append(UnprocessedShow(self.key, show_key, title, more_names, show_type, episode_count, has_source))
return new_shows
# Private
def _mal_request(self, url, **kwargs):
return self.request(url, html=True, **kwargs)
def _mal_api_request(self, url, **kwargs):
if "username" not in self.config or "password" not in self.config:
error("Username and password required for MAL requests")
return None
auth = (self.config["username"], self.config["password"])
return self.request(url, auth=auth, xml=True, **kwargs)
def _convert_type(mal_type):
return None
def _normalize_title(title):
title = re.sub(" \(TV\)", "", title)
return title |
5,569 | install | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import socket
import llnl.util.tty as tty
from spack.package import *
def cmake_cache_entry(name, value, vtype=None):
"""
Helper that creates CMake cache entry strings used in
'host-config' files.
"""
if vtype is None:
if value == "ON" or value == "OFF":
vtype = "BOOL"
else:
vtype = "PATH"
return 'set({0} "{1}" CACHE {2} "")\n\n'.format(name, value, vtype)
class Apcomp(Package):
"""A multi use-case image compositor"""
homepage = "https://github.com/Alpine-DAV/ap_compositor"
git = "https://github.com/Alpine-DAV/ap_compositor.git"
url = (
"https://github.com/Alpine-DAV/ap_compositor/releases/download/v0.0.1/apcomp-v0.0.1.tar.gz"
)
maintainers("cyrush")
version("master", branch="master", submodules="True")
version("0.0.4", sha256="061876dd55e443de91a40d10662496f6bb58b0a3835aec78f5710f5a737d0494")
version("0.0.3", sha256="07e8c1d6a23205f4cc66d0a030e65a69e8344545f4d56213d968b67a410adc6e")
version("0.0.2", sha256="cb2e2c4524889408de2dd3d29665512c99763db13e6f5e35c3b55e52948c649c")
version("0.0.1", sha256="cbf85fe58d5d5bc2f468d081386cc8b79861046b3bb7e966edfa3f8e95b998b2")
variant("openmp", default=True, description="Build with openmp support")
variant("mpi", default=True, description="Build with MPI support")
variant("shared", default=True, description="Build Shared Library")
# set to false for systems that implicitly link mpi
variant("blt_find_mpi", default=True, description="Use BLT CMake Find MPI logic")
depends_on("cmake@3.9:", type="build")
depends_on("mpi", when="+mpi")
depends_on("llvm-openmp", when="+openmp %apple-clang")
root_cmakelists_dir = "src"
def METHOD_NAME(self, spec, prefix):
"""
Build and install APComp
"""
with working_dir("spack-build", create=True):
host_cfg_fname = self.create_host_config(spec, prefix)
cmake_args = []
# if we have a static build, we need to avoid any of
# spack's default cmake settings related to rpaths
# (see: https://github.com/LLNL/spack/issues/2658)
if "+shared" in spec:
cmake_args.extend(std_cmake_args)
else:
for arg in std_cmake_args:
if arg.count("RPATH") == 0:
cmake_args.append(arg)
cmake_args.extend(["-C", host_cfg_fname, "../src"])
print("Configuring APComp...")
cmake(*cmake_args)
print("Building APComp...")
make()
print("Installing APComp...")
make("install")
# install copy of host config for provenance
METHOD_NAME(host_cfg_fname, prefix)
def create_host_config(self, spec, prefix):
"""
This method creates a 'host-config' file that specifies
all of the options used to configure and build apcomp.
"""
#######################
# Compiler Info
#######################
c_compiler = env["SPACK_CC"]
cpp_compiler = env["SPACK_CXX"]
#######################################################################
# We directly fetch the names of the actual compilers to create a
# 'host config' file that works outside of the spack install env.
#######################################################################
sys_type = spec.architecture
# if on llnl systems, we can use the SYS_TYPE
if "SYS_TYPE" in env:
sys_type = env["SYS_TYPE"]
##############################################
# Find and record what CMake is used
##############################################
if "+cmake" in spec:
cmake_exe = spec["cmake"].command.path
else:
cmake_exe = which("cmake")
if cmake_exe is None:
msg = "failed to find CMake (and cmake variant is off)"
raise RuntimeError(msg)
cmake_exe = cmake_exe.path
host_cfg_fname = "%s-%s-%s-apcomp.cmake" % (socket.gethostname(), sys_type, spec.compiler)
cfg = open(host_cfg_fname, "w")
cfg.write("##################################\n")
cfg.write("# spack generated host-config\n")
cfg.write("##################################\n")
cfg.write("# {0}-{1}\n".format(sys_type, spec.compiler))
cfg.write("##################################\n\n")
# Include path to cmake for reference
cfg.write("# cmake from spack \n")
cfg.write("# cmake executable path: %s\n\n" % cmake_exe)
#######################
# Compiler Settings
#######################
cfg.write("#######\n")
cfg.write("# using %s compiler spec\n" % spec.compiler)
cfg.write("#######\n\n")
cfg.write("# c compiler used by spack\n")
cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", c_compiler))
cfg.write("# cpp compiler used by spack\n")
cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", cpp_compiler))
# shared vs static libs
if "+shared" in spec:
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "ON"))
else:
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "OFF"))
if "+openmp" in spec:
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "OFF"))
if "+mpi" in spec:
mpicc_path = spec["mpi"].mpicc
mpicxx_path = spec["mpi"].mpicxx
# if we are using compiler wrappers on cray systems
# use those for mpi wrappers, b/c spec['mpi'].mpicxx
# etc make return the spack compiler wrappers
# which can trip up mpi detection in CMake 3.14
if cpp_compiler == "CC":
mpicc_path = "cc"
mpicxx_path = "CC"
cfg.write(cmake_cache_entry("ENABLE_MPI", "ON"))
cfg.write(cmake_cache_entry("MPI_C_COMPILER", mpicc_path))
cfg.write(cmake_cache_entry("MPI_CXX_COMPILER", mpicxx_path))
if "+blt_find_mpi" in spec:
cfg.write(cmake_cache_entry("ENABLE_FIND_MPI", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_FIND_MPI", "OFF"))
mpiexe_bin = join_path(spec["mpi"].prefix.bin, "mpiexec")
if os.path.isfile(mpiexe_bin):
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
# vs the older versions which expect MPIEXEC
if self.spec["cmake"].satisfies("@3.10:"):
cfg.write(cmake_cache_entry("MPIEXEC_EXECUTABLE", mpiexe_bin))
else:
cfg.write(cmake_cache_entry("MPIEXEC", mpiexe_bin))
else:
cfg.write(cmake_cache_entry("ENABLE_MPI", "OFF"))
cfg.write("##################################\n")
cfg.write("# end spack generated host-config\n")
cfg.write("##################################\n")
cfg.close()
host_cfg_fname = os.path.abspath(host_cfg_fname)
tty.info("spack generated conduit host-config file: " + host_cfg_fname)
return host_cfg_fname |
5,570 | batch sizes | # Copyright 2019 The WPT Dashboard Project. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import gzip
import tempfile
import unittest
import warnings
import test_util
from wptscreenshot import WPTScreenshot
class WPTScreenshotTest(unittest.TestCase):
def setUp(self):
self.server, base_url = test_util.start_server(True)
self.api = base_url + '/api/screenshots/upload'
# We would like to make ResourceWarning (unclosed files) fatal, but
# -Werror::ResourceWarning does not work since the error is often
# "unraisable", so we have to use a context manager to record warnings.
self.context = contextlib.ExitStack()
# This is equivalent to a test-scope
# `with warnings.catch_warnings(record=True) as self.warnings`.
self.warnings = self.context.enter_context(
warnings.catch_warnings(record=True))
def tearDown(self):
if self.server.poll() is None:
self.server.kill()
self.context.close()
messages = [w.message for w in self.warnings]
self.assertListEqual(messages, [])
def METHOD_NAME(self, err_text):
s = []
for i in err_text.decode('ascii').splitlines():
s.append(int(i))
return s
def test_basic(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b'data:image/png;base64,0001\n')
f.write(b'data:image/png;base64,0002\n')
f.flush()
with WPTScreenshot(f.name, api=self.api, processes=1) as s:
s.process()
self.server.terminate()
_, err = self.server.communicate()
sizes = self.METHOD_NAME(err)
self.assertListEqual(sizes, [2])
def test_gzip(self):
with tempfile.NamedTemporaryFile(suffix='.gz') as f:
with gzip.GzipFile(filename=f.name, mode='wb') as g:
g.write(b'data:image/png;base64,0001\n')
g.write(b'data:image/png;base64,0002\n')
f.flush()
with WPTScreenshot(f.name, api=self.api, processes=1) as s:
s.process()
self.server.terminate()
_, err = self.server.communicate()
sizes = self.METHOD_NAME(err)
self.assertListEqual(sizes, [2])
def test_invalid_encoding(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b'\xc8\n')
f.flush()
with self.assertRaises(UnicodeDecodeError):
with WPTScreenshot(f.name, api=self.api, processes=1) as s:
s.process()
self.server.terminate()
_, err = self.server.communicate()
sizes = self.METHOD_NAME(err)
self.assertListEqual(sizes, [])
def test_invalid_gzip(self):
with tempfile.NamedTemporaryFile(suffix=".gz") as f:
f.write(b'Hello\n')
f.flush()
with self.assertRaises(OSError):
with WPTScreenshot(f.name, api=self.api, processes=1) as s:
s.process()
self.server.terminate()
_, err = self.server.communicate()
sizes = self.METHOD_NAME(err)
self.assertListEqual(sizes, [])
def test_multiple_batches(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b'data:image/png;base64,0001\n')
f.write(b'data:image/png;base64,0002\n')
f.write(b'data:image/png;base64,0003\n')
f.flush()
with WPTScreenshot(f.name, api=self.api, processes=2) as s:
s.MAXIMUM_BATCH_SIZE = 2
s.process()
self.server.terminate()
_, err = self.server.communicate()
sizes = self.METHOD_NAME(err)
self.assertSetEqual(set(sizes), {1, 2})
def test_errors(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b'invalid,0001\n')
f.write(b'data:image/png;base64,0002\n')
f.write(b'data:image/png;base64,0\n')
f.flush()
with self.assertLogs() as lm:
with WPTScreenshot(f.name, api=self.api, processes=1) as s:
s.process()
self.server.terminate()
_, err = self.server.communicate()
sizes = self.METHOD_NAME(err)
self.assertListEqual(sizes, [1])
self.assertListEqual(
lm.output,
['ERROR:wptscreenshot:Invalid data URI: invalid,0001',
'ERROR:wptscreenshot:Invalid base64: data:image/png;base64,0']) |
5,571 | upgrade | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Migrate from 0.14 to 0.15
Revision ID: 49a4a1e3779a
Revises: 057b088bfb32
Create Date: 2014-10-09 12:24:58.333096
"""
# revision identifiers, used by Alembic.
revision = '49a4a1e3779a'
down_revision = '057b088bfb32'
from alembic import op
import sqlalchemy as sa
def METHOD_NAME():
# System access policies
op.create_table('system_access_policy',
sa.Column('id', sa.Integer, nullable=False, primary_key=True),
sa.Column('system_id', sa.Integer,
sa.ForeignKey('system.id', name='system_access_policy_system_id_fk')),
mysql_engine='InnoDB'
)
op.create_table('system_access_policy_rule',
sa.Column('id', sa.Integer, nullable=False, primary_key=True),
sa.Column('policy_id', sa.Integer, sa.ForeignKey('system_access_policy.id',
name='system_access_policy_rule_policy_id_fk'), nullable=False),
sa.Column('user_id', sa.Integer, sa.ForeignKey('tg_user.user_id',
name='system_access_policy_rule_user_id_fk')),
sa.Column('group_id', sa.Integer, sa.ForeignKey('tg_group.group_id',
name='system_access_policy_rule_group_id_fk')),
sa.Column('permission', sa.Enum('edit_policy', 'edit_system',
'loan_any', 'loan_self', 'control_system', 'reserve')),
mysql_engine='InnoDB'
)
op.execute("""
INSERT INTO system_access_policy (system_id)
SELECT id FROM system
WHERE NOT EXISTS (SELECT 1 FROM system_access_policy
WHERE system_id = system.id)
""")
op.execute("""
INSERT INTO system_access_policy_rule
(policy_id, user_id, group_id, permission)
SELECT system_access_policy.id, NULL, NULL, 'control_system'
FROM system_access_policy
INNER JOIN system ON system_access_policy.system_id = system.id
WHERE NOT EXISTS (SELECT 1 FROM system_access_policy_rule
WHERE policy_id = system_access_policy.id
AND user_id IS NULL
AND group_id IS NULL
AND permission = 'control_system')
""")
op.execute("""
INSERT INTO system_access_policy_rule
(policy_id, user_id, group_id, permission)
SELECT system_access_policy.id, NULL, NULL, 'reserve'
FROM system_access_policy
INNER JOIN system ON system_access_policy.system_id = system.id
WHERE shared = TRUE
AND NOT EXISTS (SELECT 1 FROM system_group
WHERE system_id = system.id)
AND NOT EXISTS (SELECT 1 FROM system_access_policy_rule
WHERE policy_id = system_access_policy.id
AND user_id IS NULL
AND group_id IS NULL
AND permission = 'reserve')
""")
op.execute("""
INSERT INTO system_access_policy_rule
(policy_id, user_id, group_id, permission)
SELECT system_access_policy.id, NULL, system_group.group_id, 'reserve'
FROM system_access_policy
INNER JOIN system ON system_access_policy.system_id = system.id
INNER JOIN system_group ON system_group.system_id = system.id
WHERE shared = TRUE
AND system_group.admin = FALSE
AND NOT EXISTS (SELECT 1 FROM system_access_policy_rule
WHERE policy_id = system_access_policy.id
AND user_id IS NULL
AND group_id = system_group.group_id
AND permission = 'reserve')
""")
op.execute("""
INSERT INTO system_access_policy_rule
(policy_id, user_id, group_id, permission)
SELECT system_access_policy.id, NULL, system_group.group_id, permission.p
FROM system_access_policy
INNER JOIN system ON system_access_policy.system_id = system.id
INNER JOIN system_group ON system_group.system_id = system.id
JOIN (SELECT 'edit_policy' p
UNION SELECT 'edit_system' p
UNION SELECT 'loan_any' p
UNION SELECT 'loan_self' p
UNION SELECT 'control_system' p
UNION SELECT 'reserve' p) permission
WHERE system_group.admin = TRUE
AND NOT EXISTS (SELECT 1 FROM system_access_policy_rule
WHERE policy_id = system_access_policy.id
AND user_id IS NULL
AND group_id = system_group.group_id
AND permission = permission.p)
""")
# TurboGears Visit framework
# These don't contain any important data, just transient login sessions, so
# we can safely drop them during upgrade, and re-create them empty during
# downgrade.
op.drop_table('visit')
op.drop_table('visit_identity')
# Group name length
op.alter_column('tg_group', 'group_name',
type_=sa.Unicode(255), nullable=False)
# Task RPM filename
op.alter_column('task', 'rpm', type_=sa.Unicode(255))
op.create_unique_constraint('rpm', 'task', ['rpm'])
def downgrade():
# System access policies
op.drop_table('system_access_policy_rule')
op.drop_table('system_access_policy')
# TurboGears Visit framework
op.create_table('visit',
sa.Column('visit_key', sa.String(40), primary_key=True),
sa.Column('created', sa.DateTime, nullable=False),
sa.Column('expiry', sa.DateTime),
mysql_engine='InnoDB'
)
op.create_table('visit_identity',
sa.Column('visit_key', sa.String(40), primary_key=True),
sa.Column('user_id', sa.Integer,
sa.ForeignKey('tg_user.user_id'), nullable=False),
sa.Column('proxied_by_user_id', sa.Integer,
sa.ForeignKey('tg_user.user_id')),
mysql_engine='InnoDB'
)
# Group name length
op.alter_column('tg_group', 'group_name',
type_=sa.Unicode(16), nullable=False)
# Task RPM filename
op.drop_index('rpm', 'task')
op.alter_column('task', 'rpm', type_=sa.Unicode(2048)) |
5,572 | handle ratelimit | # Copyright (c) 2018 Philipp Wolfer <ph.wolfer@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from typing import Optional
import json
import logging
import os
import ssl
import time
from http.client import HTTPConnection, HTTPSConnection
HOST_NAME: Optional[str] = "api.listenbrainz.org"
PATH_SUBMIT = "/1/submit-listens"
SSL_CONTEXT: Optional[ssl.SSLContext] = ssl.create_default_context()
# to run against a local dev server
if os.getenv("QL_LISTENBRAINZ_DEV_SERVER") is not None:
HOST_NAME = os.getenv("QL_LISTENBRAINZ_DEV_SERVER")
SSL_CONTEXT = None
class Track:
"""
Represents a single track to submit.
See https://listenbrainz.readthedocs.io/en/latest/dev/json.html
"""
def __init__(self, artist_name, track_name,
release_name=None, additional_info={}):
"""
Create a new Track instance
@param artist_name as str
@param track_name as str
@param release_name as str
@param additional_info as dict
"""
self.artist_name = artist_name
self.track_name = track_name
self.release_name = release_name
self.additional_info = additional_info
@staticmethod
def from_dict(data):
return Track(
data["artist_name"],
data["track_name"],
data.get("release_name", None),
data.get("additional_info", {})
)
def to_dict(self):
return {
"artist_name": self.artist_name,
"track_name": self.track_name,
"release_name": self.release_name,
"additional_info": self.additional_info
}
def __repr__(self):
return "Track(%s, %s)" % (self.artist_name, self.track_name)
class ListenBrainzClient:
"""
Submit listens to ListenBrainz.org.
See https://listenbrainz.readthedocs.io/en/latest/dev/api.html
"""
def __init__(self, logger=logging.getLogger(__name__)):
self.__next_request_time = 0
self.user_token = None
self.logger = logger
def listen(self, listened_at, track):
"""
Submit a listen for a track
@param listened_at as int
@param entry as Track
"""
payload = _get_payload(track, listened_at)
return self._submit("single", [payload])
def playing_now(self, track):
"""
Submit a playing now notification for a track
@param track as Track
"""
payload = _get_payload(track)
return self._submit("playing_now", [payload])
def import_tracks(self, tracks):
"""
Import a list of tracks as (listened_at, Track) pairs
@param track as [(int, Track)]
"""
payload = _get_payload_many(tracks)
return self._submit("import", payload)
def _submit(self, listen_type, payload, retry=0):
self._wait_for_ratelimit()
self.logger.debug("ListenBrainz %s: %r", listen_type, payload)
data = {
"listen_type": listen_type,
"payload": payload
}
headers = {
"Authorization": "Token %s" % self.user_token,
"Content-Type": "application/json"
}
body = json.dumps(data)
print("submit: %s" % body)
if SSL_CONTEXT is not None:
conn = HTTPSConnection(HOST_NAME, context=SSL_CONTEXT)
else:
conn = HTTPConnection(HOST_NAME)
# XXX TODO, catch errors?
conn.request("POST", PATH_SUBMIT, body, headers)
response = conn.getresponse()
response_text = response.read()
try:
response_data = json.loads(response_text)
#Python3
#except json.JSONDecodeError:
# response_data = response_text
#Python2
except ValueError as e:
if str(e) != "No JSON object could be decoded":
raise e
response_data = response_text
self.METHOD_NAME(response)
log_msg = "Response %s: %r" % (response.status, response_data)
if response.status == 429 and retry < 5: # Too Many Requests
self.logger.warning(log_msg)
return self._submit(listen_type, payload, retry + 1)
elif response.status == 200:
self.logger.debug(log_msg)
else:
self.logger.error(log_msg)
return response
def _wait_for_ratelimit(self):
now = time.time()
if self.__next_request_time > now:
delay = self.__next_request_time - now
self.logger.debug("Rate limit applies, delay %d", delay)
time.sleep(delay)
def METHOD_NAME(self, response):
remaining = int(response.getheader("X-RateLimit-Remaining", 0))
reset_in = int(response.getheader("X-RateLimit-Reset-In", 0))
self.logger.debug("X-RateLimit-Remaining: %i", remaining)
self.logger.debug("X-RateLimit-Reset-In: %i", reset_in)
if remaining == 0:
self.__next_request_time = time.time() + reset_in
def _get_payload_many(tracks):
payload = []
for (listened_at, track) in tracks:
data = _get_payload(track, listened_at)
payload.append(data)
return payload
def _get_payload(track, listened_at=None):
data = {
"track_metadata": track.to_dict()
}
if listened_at is not None:
data["listened_at"] = listened_at
return data |
5,573 | mode select | from boxbranding import getBoxType, getMachineName, getHaveRCA, getHaveDVI, getHaveSCART, getHaveAVJACK
from Screens.Wizard import WizardSummary
from Screens.WizardLanguage import WizardLanguage
from Screens.Rc import Rc
from Components.AVSwitch import iAVSwitch as iAV
from Components.Pixmap import Pixmap
from Components.config import config, ConfigBoolean, configfile
from Components.SystemInfo import SystemInfo
from Tools.Directories import resolveFilename, SCOPE_SKIN, SCOPE_CURRENT_SKIN
from Tools.HardwareInfo import HardwareInfo
config.misc.showtestcard = ConfigBoolean(default=False)
class VideoWizardSummary(WizardSummary):
def __init__(self, session, parent):
WizardSummary.__init__(self, session, parent)
def setLCDPicCallback(self):
self.parent.setLCDTextCallback(self.setText)
def setLCDPic(self, file):
self["pic"].instance.setPixmapFromFile(file)
class VideoWizard(WizardLanguage, Rc):
skin = """
<screen position="fill" title="Welcome..." flags="wfNoBorder" >
<panel name="WizardMarginsTemplate"/>
<panel name="WizardPictureLangTemplate"/>
<panel name="RemoteControlTemplate"/>
<panel position="left" size="10,*" />
<panel position="right" size="10,*" />
<panel position="fill">
<widget name="text" position="top" size="*,270" font="Regular;23" valign="center" />
<panel position="fill">
<panel position="left" size="150,*">
<widget name="portpic" position="top" zPosition="10" size="150,150" transparent="1" alphatest="on"/>
</panel>
<panel position="fill" layout="stack">
<widget source="list" render="Listbox" position="fill" scrollbarMode="showOnDemand" >
<convert type="StringList" />
</widget>
<!--<widget name="config" position="fill" zPosition="1" scrollbarMode="showOnDemand" />-->
</panel>
</panel>
</panel>
</screen>"""
def __init__(self, session):
# FIXME anyone knows how to use relative paths from the plugin's directory?
self.xmlfile = resolveFilename(SCOPE_SKIN, "videowizard.xml")
self.hw = iAV # needed by VideoWizard.xml do not change
WizardLanguage.__init__(self, session, showSteps=False, showStepSlider=False)
Rc.__init__(self)
self["wizard"] = Pixmap()
self["portpic"] = Pixmap()
self.port = None
self.mode = None
self.rate = None
def createSummary(self):
print("[VideoWizard] createSummary")
return VideoWizardSummary
def markDone(self):
iAV.saveMode(self.port, self.mode, self.rate)
config.misc.videowizardenabled.value = 0
config.misc.videowizardenabled.save()
configfile.save()
def listInputChannels(self):
# hw_type = HardwareInfo().get_device_name()
# has_hdmi = HardwareInfo().has_hdmi()
list = []
for port in iAV.getPortList():
if iAV.isPortUsed(port):
descr = port
if descr == "Scart" and not SystemInfo["hasScart"]:
continue
if port != "DVI-PC":
list.append((descr, port))
list.sort(key=lambda x: x[0])
print("[VideoWizard] listInputChannels:", list)
return list
def inputSelectionMade(self, index):
print("[VideoWizard] inputSelectionMade:", index)
self.port = index
self.inputSelect(index)
def inputSelectionMoved(self):
# hw_type = HardwareInfo().get_device_name()
# has_hdmi = HardwareInfo().has_hdmi()
print("[VideoWizard] input selection moved:", self.selection)
self.inputSelect(self.selection)
if self["portpic"].instance is not None:
picname = self.selection
if picname == "Jack":
picname = "JACK"
if picname == "Scart-YPbPr":
picname = "Scart"
self["portpic"].instance.setPixmapFromFile(resolveFilename(SCOPE_CURRENT_SKIN, "icons/%s.png" % picname))
def inputSelect(self, port):
print("[VideoWizard] inputSelect:", port)
modeList = iAV.getModeList(self.selection)
print("[VideoWizard] modeList:", modeList)
self.port = port
if len(modeList) > 0:
ratesList = self.listRates(modeList[0][0])
iAV.setMode(port=port, mode=modeList[0][0], rate=ratesList[0][0])
def listModes(self):
list = []
print("[VideoWizard] modes for port", self.port)
for mode in iAV.getModeList(self.port):
# if mode[0] != "PC":
list.append((mode[0], mode[0]))
print("[VideoWizard] modeslist:", list)
return list
def modeSelectionMade(self, index):
print("[VideoWizard] modeSelectionMade:", index)
self.mode = index
self.METHOD_NAME(index)
def modeSelectionMoved(self):
print("[VideoWizard] mode selection moved:", self.selection)
self.METHOD_NAME(self.selection)
def METHOD_NAME(self, mode):
ratesList = self.listRates(mode)
print("[VideoWizard] ratesList:", ratesList)
if self.port == "HDMI" and mode in ("720p", "1080i", "1080p", "2160p"):
self.rate = "multi"
iAV.setMode(port=self.port, mode=mode, rate="multi")
else:
iAV.setMode(port=self.port, mode=mode, rate=ratesList[0][0])
def listRates(self, querymode=None):
if querymode is None:
querymode = self.mode
list = []
print("[VideoWizard] modes for port", self.port, "and mode", querymode)
for mode in iAV.getModeList(self.port):
print("[VideoWizard] mode:", mode)
if mode[0] == querymode:
for rate in mode[1]:
if self.port == "DVI-PC":
print("[VideoWizard] rate:", rate)
if rate == "640x480":
list.insert(0, (rate, rate))
continue
list.append((rate, rate))
return list
def rateSelectionMade(self, index):
print("[VideoWizard] rateSelectionMade:", index)
self.rate = index
self.rateSelect(index)
def rateSelectionMoved(self):
print("[VideoWizard] rate selection moved:", self.selection)
self.rateSelect(self.selection)
def rateSelect(self, rate):
iAV.setMode(port=self.port, mode=self.mode, rate=rate)
def showTestCard(self, selection=None):
if selection is None:
selection = self.selection
print("[VideoWizard] set config.misc.showtestcard to", {"yes": True, "no": False}[selection])
if selection == "yes":
config.misc.showtestcard.value = True
else:
config.misc.showtestcard.value = False
def keyNumberGlobal(self, number):
if number in (1, 2, 3):
if number == 1:
iAV.saveMode("HDMI", "720p", "multi")
elif number == 2:
iAV.saveMode("HDMI", "1080i", "multi")
elif number == 3:
iAV.saveMode("Scart", "Multi", "multi")
iAV.setConfiguredMode()
self.close()
WizardLanguage.keyNumberGlobal(self, number) |
5,574 | decide | #!/usr/bin/env python
# Copyright 2023, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" This test runner compiles all extension modules for standalone mode.
This is a test to reveal hidden dependencies on a system.
"""
import os
import sys
# Find nuitka package relative to us.
sys.path.insert(
0,
os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..")
),
)
# isort:start
import shutil
from nuitka.tools.testing.Common import (
check_output,
checkLoadedFileAccesses,
checkSucceedsWithCPython,
compileLibraryTest,
createSearchMode,
displayFileContents,
displayFolderContents,
displayRuntimeTraces,
getTempDir,
my_print,
setup,
test_logger,
)
from nuitka.tools.testing.RuntimeTracing import getRuntimeTraceOfLoadedFiles
from nuitka.utils.Execution import NuitkaCalledProcessError
from nuitka.utils.FileOperations import getFileContents, openTextFile
from nuitka.utils.ModuleNames import ModuleName
def displayError(dirname, filename):
assert dirname is None
dist_path = filename[:-3] + ".dist"
displayFolderContents("dist folder", dist_path)
inclusion_log_path = filename[:-3] + ".py.inclusion.log"
displayFileContents("inclusion log", inclusion_log_path)
def main():
setup(suite="extension_modules", needs_io_encoding=True)
search_mode = createSearchMode()
tmp_dir = getTempDir()
done = set()
def METHOD_NAME(root, filename):
if os.path.sep + "Cython" + os.path.sep in root:
return False
if (
root.endswith(os.path.sep + "matplotlib")
or os.path.sep + "matplotlib" + os.path.sep in root
):
return False
if filename.endswith("linux-gnu_d.so"):
return False
if root.endswith(os.path.sep + "msgpack"):
return False
first_part = filename.split(".")[0]
if first_part in done:
return False
done.add(first_part)
return filename.endswith((".so", ".pyd")) and not filename.startswith(
"libpython"
)
current_dir = os.path.normpath(os.getcwd())
current_dir = os.path.normcase(current_dir)
def action(stage_dir, root, path):
command = [
sys.executable,
os.path.join("..", "..", "bin", "nuitka"),
"--stand",
"--run",
"--output-dir=%s" % stage_dir,
"--remove-output",
"--no-progressbar",
]
filename = os.path.join(stage_dir, "importer.py")
assert path.startswith(root)
module_name = path[len(root) + 1 :]
module_name = module_name.split(".")[0]
module_name = module_name.replace(os.path.sep, ".")
module_name = ModuleName(module_name)
with openTextFile(filename, "w") as output:
plugin_names = set(["pylint-warnings"])
if module_name.hasNamespace("PySide2"):
plugin_names.add("pyside2")
elif module_name.hasNamespace("PySide6"):
plugin_names.add("pyside6")
elif module_name.hasNamespace("PyQt5"):
plugin_names.add("pyqt5")
elif module_name.hasNamespace("PyQt6"):
plugin_names.add("pyqt6")
else:
plugin_names.add("no-qt")
for plugin_name in plugin_names:
output.write("# nuitka-project: --enable-plugin=%s\n" % plugin_name)
# Make it an error to find unwanted bloat compiled in.
output.write("# nuitka-project: --noinclude-default-mode=error\n")
output.write("# nuitka-project: --standalone\n")
output.write("import " + module_name.asString() + "\n")
output.write("print('OK.')")
command += os.environ.get("NUITKA_EXTRA_OPTIONS", "").split()
command.append(filename)
if checkSucceedsWithCPython(filename):
try:
output = check_output(command).splitlines()
except NuitkaCalledProcessError as e:
my_print("SCRIPT:", filename, style="blue")
my_print(getFileContents(filename))
test_logger.sysexit("Error with compilation: %s" % e)
# only trying to check for no exception, pylint: disable=try-except-raise
except Exception:
raise
else:
assert os.path.exists(filename[:-3] + ".dist")
binary_filename = os.path.join(
filename[:-3] + ".dist",
"importer.exe" if os.name == "nt" else "importer",
)
loaded_filenames = getRuntimeTraceOfLoadedFiles(
logger=test_logger,
command=[binary_filename],
)
outside_accesses = checkLoadedFileAccesses(
loaded_filenames=loaded_filenames, current_dir=os.getcwd()
)
if outside_accesses:
displayError(None, filename)
displayRuntimeTraces(test_logger, binary_filename)
test_logger.warning(
"Should not access these file(s): '%r'." % outside_accesses
)
search_mode.onErrorDetected(1)
if output[-1] != b"OK.":
my_print(" ".join(command))
my_print(filename)
my_print(output)
test_logger.sysexit("FAIL.")
my_print("OK.")
assert not outside_accesses, outside_accesses
shutil.rmtree(filename[:-3] + ".dist")
else:
my_print("SKIP (does not work with CPython)")
compileLibraryTest(
search_mode=search_mode,
stage_dir=os.path.join(tmp_dir, "compile_extensions"),
METHOD_NAME=METHOD_NAME,
action=action,
)
my_print("FINISHED, all extension modules compiled.")
if __name__ == "__main__":
main() |
5,575 | setup device | from collections import OrderedDict
from multiprocessing import Array
from multiprocessing.connection import Connection
import numpy as np
from urh.dev.native.Device import Device
from urh.dev.native.lib import usrp
class USRP(Device):
DEVICE_METHODS = Device.DEVICE_METHODS.copy()
DEVICE_METHODS.update({"SET_SUBDEVICE": "set_subdevice", Device.Command.SET_ANTENNA_INDEX.name: "set_antenna"})
SYNC_RX_CHUNK_SIZE = 16384
SYNC_TX_CHUNK_SIZE = 16384 * 2
CONTINUOUS_TX_CHUNK_SIZE = -1 # take everything from queue
DEVICE_LIB = usrp
ASYNCHRONOUS = False
DATA_TYPE = np.float32
@classmethod
def get_device_list(cls):
return usrp.find_devices("")
@classmethod
def adapt_num_read_samples_to_sample_rate(cls, sample_rate):
cls.SYNC_RX_CHUNK_SIZE = 16384 * int(sample_rate / 1e6)
@classmethod
def METHOD_NAME(cls, ctrl_connection: Connection, device_identifier):
ret = usrp.open(device_identifier)
if device_identifier:
ctrl_connection.send("OPEN ({}):{}".format(device_identifier, ret))
else:
ctrl_connection.send("OPEN:" + str(ret))
success = ret == 0
if success:
device_repr = usrp.get_device_representation()
ctrl_connection.send(device_repr)
else:
ctrl_connection.send(usrp.get_last_error())
return success
@classmethod
def init_device(cls, ctrl_connection: Connection, is_tx: bool, parameters: OrderedDict):
usrp.set_tx(is_tx)
success = super().init_device(ctrl_connection, is_tx, parameters)
if success:
ctrl_connection.send("Current antenna is {} (possible antennas: {})".format(usrp.get_antenna(),
", ".join(usrp.get_antennas())))
return success
@classmethod
def shutdown_device(cls, ctrl_connection, is_tx: bool):
usrp.stop_stream()
usrp.destroy_stream()
ret = usrp.close()
ctrl_connection.send("CLOSE:" + str(ret))
return True
@classmethod
def prepare_sync_receive(cls, ctrl_connection: Connection):
ctrl_connection.send("Initializing stream...")
usrp.setup_stream()
return usrp.start_stream(cls.SYNC_RX_CHUNK_SIZE)
@classmethod
def receive_sync(cls, data_conn: Connection):
usrp.recv_stream(data_conn, cls.SYNC_RX_CHUNK_SIZE)
@classmethod
def prepare_sync_send(cls, ctrl_connection: Connection):
ctrl_connection.send("Initializing stream...")
usrp.setup_stream()
ret = usrp.start_stream(0)
ctrl_connection.send("Initialize stream:{0}".format(ret))
return ret
@classmethod
def send_sync(cls, data):
usrp.send_stream(data)
def __init__(self, center_freq, sample_rate, bandwidth, gain, if_gain=1, baseband_gain=1,
resume_on_full_receive_buffer=False):
super().__init__(center_freq=center_freq, sample_rate=sample_rate, bandwidth=bandwidth,
gain=gain, if_gain=if_gain, baseband_gain=baseband_gain,
resume_on_full_receive_buffer=resume_on_full_receive_buffer)
self.success = 0
self.error_codes = {4711: "Antenna index not supported on this device"}
self.subdevice = ""
def set_device_gain(self, gain):
super().set_device_gain(gain * 0.01)
@property
def has_multi_device_support(self):
return True
@property
def device_parameters(self):
return OrderedDict([
("SET_SUBDEVICE", self.subdevice),
(self.Command.SET_ANTENNA_INDEX.name, self.antenna_index),
(self.Command.SET_FREQUENCY.name, self.frequency),
(self.Command.SET_SAMPLE_RATE.name, self.sample_rate),
(self.Command.SET_BANDWIDTH.name, self.bandwidth),
(self.Command.SET_RF_GAIN.name, self.gain * 0.01),
("identifier", self.device_serial),
])
@staticmethod
def bytes_to_iq(buffer):
return np.frombuffer(buffer, dtype=np.float32).reshape((-1, 2), order="C")
@staticmethod
def iq_to_bytes(samples: np.ndarray):
arr = Array("f", 2 * len(samples), lock=False)
numpy_view = np.frombuffer(arr, dtype=np.float32)
numpy_view[:] = samples.flatten(order="C")
return arr |
5,576 | build analysis pipeline | # This is a template for the "aminer" logfile miner tool. Copy
# it to "config.py" and define your ruleset.
config_properties = {} # skipcq: PY-W0072
# Define the list of log resources to read from: the resources
# named here do not need to exist when aminer is started. This
# will just result in a warning. However if they exist, they have
# to be readable by the aminer process! Supported types are:
# * file://[path]: Read data from file, reopen it after rollover
# * unix://[path]: Open the path as UNIX local socket for reading
config_properties['LogResourceList'] = ['file:///tmp/syslog']
# Define the uid/gid of the process that runs the calculation
# after opening the log files:
config_properties['AminerUser'] = 'aminer'
config_properties['AminerGroup'] = 'aminer'
# Define the path, where aminer will listen for incoming remote
# control connections. When missing, no remote control socket
# will be created.
# config_properties['RemoteControlSocket'] = '/var/run/aminer-remote.socket'
# Read the analyis from this file. That part of configuration
# is separated from the main configuration so that it can be loaded
# only within the analysis child. Non-absolute path names are
# interpreted relatively to the main configuration file (this
# file). When empty, this configuration has to contain the configuration
# for the child also.
# config_properties['AnalysisConfigFile'] = 'analysis.py'
# Read and store information to be used between multiple invocations
# of py in this directory. The directory must only be accessible
# to the 'AminerUser' but not group/world readable. On violation,
# py will refuse to start. When undefined, '/var/lib/aminer'
# is used.
config_properties['Core.PersistenceDir'] = '/tmp/lib/aminer/analysis' # skipcq: BAN-B108
# Define a target e-mail address to send alerts to. When undefined,
# no e-mail notification hooks are added.
config_properties['MailAlerting.TargetAddress'] = 'mail@localhost'
# Sender address of e-mail alerts. When undefined, "sendmail"
# implementation on host will decide, which sender address should
# be used.
config_properties['MailAlerting.FromAddress'] = 'mail@localhost'
# Define, which text should be prepended to the standard aminer
# subject. Defaults to "py Alerts:"
config_properties['MailAlerting.SubjectPrefix'] = 'aminer Alerts:'
# Define a grace time after startup before aminer will react to
# an event and send the first alert e-mail. Defaults to 0 (any
# event can immediately trigger alerting).
config_properties['MailAlerting.AlertGraceTime'] = 0
# Define how many seconds to wait after a first event triggered
# the alerting procedure before really sending out the e-mail.
# In that timespan, events are collected and will be sent all
# using a single e-mail. Defaults to 10 seconds.
config_properties['MailAlerting.EventCollectTime'] = 10
# Define the minimum time between two alert e-mails in seconds
# to avoid spamming. All events during this timespan are collected
# and sent out with the next report. Defaults to 600 seconds.
config_properties['MailAlerting.MinAlertGap'] = 0
# Define the maximum time between two alert e-mails in seconds.
# When undefined this defaults to "MailAlerting.MinAlertGap".
# Otherwise this will activate an exponential backoff to reduce
# messages during permanent error states by increasing the alert
# gap by 50% when more alert-worthy events were recorded while
# the previous gap time was not yet elapsed.
config_properties['MailAlerting.MaxAlertGap'] = 600
# Define how many events should be included in one alert mail
# at most. This defaults to 1000
config_properties['MailAlerting.MaxEventsPerMessage'] = 1000
# Add your ruleset here:
def METHOD_NAME(analysis_context):
"""
Define the function to create pipeline for parsing the log data.
It has also to define an AtomizerFactory to instruct py how to process incoming data streams to create log atoms from them.
"""
# Build the parsing model:
from aminer.parsing.FirstMatchModelElement import FirstMatchModelElement
from aminer.parsing.SequenceModelElement import SequenceModelElement
from aminer.parsing.DateTimeModelElement import DateTimeModelElement
from aminer.parsing.FixedDataModelElement import FixedDataModelElement
from aminer.parsing.DelimitedDataModelElement import DelimitedDataModelElement
from aminer.parsing.AnyByteDataModelElement import AnyByteDataModelElement
service_children_disk_upgrade = [
DateTimeModelElement('Date', b'%d.%m.%Y %H:%M:%S'), FixedDataModelElement('UName', b' ubuntu '),
DelimitedDataModelElement('User', b' '), FixedDataModelElement('HD Repair', b' System rebooted for hard disk upgrade')]
service_children_home_path = [
FixedDataModelElement('Pwd', b'The Path of the home directory shown by pwd of the user '),
DelimitedDataModelElement('Username', b' '), FixedDataModelElement('Is', b' is: '), AnyByteDataModelElement('Path')]
parsing_model = FirstMatchModelElement('model', [
SequenceModelElement('Disk Upgrade', service_children_disk_upgrade),
SequenceModelElement('Home Path', service_children_home_path)])
# Some generic imports.
from aminer.analysis import AtomFilters
# Create all global handler lists here and append the real handlers later on.
# Use this filter to distribute all atoms to the analysis handlers.
atom_filter = AtomFilters.SubhandlerFilter(None)
from aminer.events.StreamPrinterEventHandler import StreamPrinterEventHandler
stream_printer_event_handler = StreamPrinterEventHandler(None)
anomaly_event_handlers = [stream_printer_event_handler]
# Now define the AtomizerFactory using the model. A simple line based one is usually sufficient.
from aminer.input.SimpleByteStreamLineAtomizerFactory import SimpleByteStreamLineAtomizerFactory
analysis_context.atomizer_factory = SimpleByteStreamLineAtomizerFactory(
parsing_model, [atom_filter], anomaly_event_handlers, default_timestamp_path_list=[''])
# Just report all unparsed atoms to the event handlers.
from aminer.analysis.UnparsedAtomHandlers import SimpleUnparsedAtomHandler
atom_filter.add_handler(SimpleUnparsedAtomHandler(anomaly_event_handlers), stop_when_handled_flag=True)
from aminer.analysis.NewMatchPathDetector import NewMatchPathDetector
new_match_path_detector = NewMatchPathDetector(analysis_context.aminer_config, anomaly_event_handlers, learn_mode=True)
analysis_context.register_component(new_match_path_detector, component_name=None)
atom_filter.add_handler(new_match_path_detector)
from aminer.analysis.NewMatchPathValueComboDetector import NewMatchPathValueComboDetector
new_match_path_value_combo_detector = NewMatchPathValueComboDetector(analysis_context.aminer_config, [
'/model/Home Path/Username', '/model/Home Path/Path'], anomaly_event_handlers, learn_mode=True)
analysis_context.register_component(new_match_path_value_combo_detector, component_name=None)
atom_filter.add_handler(new_match_path_value_combo_detector)
# Include the e-mail notification handler only if the configuration parameter was set.
from aminer.events.DefaultMailNotificationEventHandler import DefaultMailNotificationEventHandler
if DefaultMailNotificationEventHandler.CONFIG_KEY_MAIL_TARGET_ADDRESS in analysis_context.aminer_config.config_properties:
mail_notification_handler = DefaultMailNotificationEventHandler(analysis_context)
analysis_context.register_component(mail_notification_handler, component_name=None)
anomaly_event_handlers.append(mail_notification_handler) |
5,577 | deduplicate entries | # Copyright (c) 2020, Frappe Technologies and contributors
# License: MIT. See LICENSE
from typing import Protocol, runtime_checkable
import frappe
from frappe import _
from frappe.model.base_document import get_controller
from frappe.model.document import Document
from frappe.utils import cint
from frappe.utils.caching import site_cache
DEFAULT_LOGTYPES_RETENTION = {
"Error Log": 30,
"Activity Log": 90,
"Email Queue": 30,
"Scheduled Job Log": 90,
"Route History": 90,
"Submission Queue": 30,
"Prepared Report": 30,
"Webhook Request Log": 30,
"Integration Request": 90,
"Reminder": 30,
}
@runtime_checkable
class LogType(Protocol):
"""Interface requirement for doctypes that can be cleared using log settings."""
@staticmethod
def clear_old_logs(days: int) -> None:
...
@site_cache
def _supports_log_clearing(doctype: str) -> bool:
try:
controller = get_controller(doctype)
return issubclass(controller, LogType)
except Exception:
return False
class LogSettings(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.core.doctype.logs_to_clear.logs_to_clear import LogsToClear
from frappe.types import DF
logs_to_clear: DF.Table[LogsToClear]
# end: auto-generated types
def validate(self):
self.remove_unsupported_doctypes()
self.METHOD_NAME()
self.add_default_logtypes()
def remove_unsupported_doctypes(self):
for entry in list(self.logs_to_clear):
if _supports_log_clearing(entry.ref_doctype):
continue
msg = _("{} does not support automated log clearing.").format(frappe.bold(entry.ref_doctype))
if frappe.conf.developer_mode:
msg += "<br>" + _("Implement `clear_old_logs` method to enable auto error clearing.")
frappe.msgprint(msg, title=_("DocType not supported by Log Settings."))
self.remove(entry)
def METHOD_NAME(self):
seen = set()
for entry in list(self.logs_to_clear):
if entry.ref_doctype in seen:
self.remove(entry)
seen.add(entry.ref_doctype)
def add_default_logtypes(self):
existing_logtypes = {d.ref_doctype for d in self.logs_to_clear}
added_logtypes = set()
for logtype, retention in DEFAULT_LOGTYPES_RETENTION.items():
if logtype not in existing_logtypes and _supports_log_clearing(logtype):
if not frappe.db.exists("DocType", logtype):
continue
self.append("logs_to_clear", {"ref_doctype": logtype, "days": cint(retention)})
added_logtypes.add(logtype)
if added_logtypes:
frappe.msgprint(
_("Added default log doctypes: {}").format(",".join(added_logtypes)), alert=True
)
def clear_logs(self):
"""
Log settings can clear any log type that's registered to it and provides a method to delete old logs.
Check `LogDoctype` above for interface that doctypes need to implement.
"""
for entry in self.logs_to_clear:
controller: LogType = get_controller(entry.ref_doctype)
func = controller.clear_old_logs
# Only pass what the method can handle, this is considering any
# future addition that might happen to the required interface.
kwargs = frappe.get_newargs(func, {"days": entry.days})
func(**kwargs)
frappe.db.commit()
def register_doctype(self, doctype: str, days=30):
existing_logtypes = {d.ref_doctype for d in self.logs_to_clear}
if doctype not in existing_logtypes and _supports_log_clearing(doctype):
self.append("logs_to_clear", {"ref_doctype": doctype, "days": cint(days)})
else:
for entry in self.logs_to_clear:
if entry.ref_doctype == doctype:
entry.days = days
break
def run_log_clean_up():
doc = frappe.get_doc("Log Settings")
doc.remove_unsupported_doctypes()
doc.add_default_logtypes()
doc.save()
doc.clear_logs()
@frappe.whitelist()
def has_unseen_error_log():
if frappe.get_all("Error Log", filters={"seen": 0}, limit=1):
return {
"show_alert": True,
"message": _("You have unseen {0}").format(
'<a href="/app/List/Error%20Log/List"> Error Logs </a>'
),
}
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def get_log_doctypes(doctype, txt, searchfield, start, page_len, filters):
filters = filters or {}
filters.extend(
[
["istable", "=", 0],
["issingle", "=", 0],
["name", "like", f"%%{txt}%%"],
]
)
doctypes = frappe.get_list("DocType", filters=filters, pluck="name")
supported_doctypes = [(d,) for d in doctypes if _supports_log_clearing(d)]
return supported_doctypes[start:page_len]
LOG_DOCTYPES = [
"Scheduled Job Log",
"Activity Log",
"Route History",
"Email Queue",
"Email Queue Recipient",
"Error Log",
]
def clear_log_table(doctype, days=90):
"""If any logtype table grows too large then clearing it with DELETE query
is not feasible in reasonable time. This command copies recent data to new
table and replaces current table with new smaller table.
ref: https://mariadb.com/kb/en/big-deletes/#deleting-more-than-half-a-table
"""
from frappe.utils import get_table_name
if doctype not in LOG_DOCTYPES:
raise frappe.ValidationError(f"Unsupported logging DocType: {doctype}")
original = get_table_name(doctype)
temporary = f"{original} temp_table"
backup = f"{original} backup_table"
try:
frappe.db.sql_ddl(f"CREATE TABLE `{temporary}` LIKE `{original}`")
# Copy all recent data to new table
frappe.db.sql(
f"""INSERT INTO `{temporary}`
SELECT * FROM `{original}`
WHERE `{original}`.`modified` > NOW() - INTERVAL '{days}' DAY"""
)
frappe.db.sql_ddl(f"RENAME TABLE `{original}` TO `{backup}`, `{temporary}` TO `{original}`")
except Exception:
frappe.db.rollback()
frappe.db.sql_ddl(f"DROP TABLE IF EXISTS `{temporary}`")
raise
else:
frappe.db.sql_ddl(f"DROP TABLE `{backup}`") |
5,578 | get nodeman api | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.conf import settings
import env
from api.client import BKComponentClient
NODEMAN_API_ENTRY = env.BK_NODEMAN_API_ENTRY or "{}/{}".format(settings.BK_PAAS_ESB_HOST, "api/c/compapi/v2/nodeman")
NODEMAN_API_ENTRY_V2 = env.BK_NODEMAN_API_ENTRY or "{}/{}".format(
settings.BK_PAAS_ESB_HOST,
"api/c/compapi/{bk_api_ver}/nodeman/api".format(bk_api_ver=settings.DEFAULT_BK_API_VER),
)
def METHOD_NAME(api_name):
return "{}/{}/".format(NODEMAN_API_ENTRY, api_name)
def _get_nodeman_api_v2(api_name):
return "{}/{}/".format(NODEMAN_API_ENTRY_V2, api_name)
class BKNodeManClient(BKComponentClient):
def create_task(self, bk_biz_id, bk_cloud_id, node_type, op_type, creator, hosts):
return self._request(
method="post",
url=METHOD_NAME("create_task"),
data={
"bk_biz_id": bk_biz_id,
"bk_cloud_id": bk_cloud_id,
"node_type": node_type,
"op_type": op_type,
"creator": creator,
"hosts": hosts,
},
)
def get_task_info(self, bk_biz_id, job_id):
return self._request(
method="get",
url=METHOD_NAME("get_task_info"),
data={"bk_biz_id": bk_biz_id, "job_id": job_id},
)
def get_log(self, host_id, bk_biz_id):
return self._request(
method="get",
url=METHOD_NAME("get_log"),
data={"host_id": host_id, "bk_biz_id": bk_biz_id},
)
def search_host_plugin(self, bk_biz_id, pagesize, conditions):
return self._request(
method="post",
url=_get_nodeman_api_v2("plugin/search"),
data={"bk_biz_id": bk_biz_id, "pagesize": pagesize, "conditions": conditions},
)
def job_install(self, job_type, hosts, **kwargs):
data = {"job_type": job_type, "hosts": hosts}
data.update(kwargs)
return self._request(method="post", url=_get_nodeman_api_v2("job/install"), data=data)
def remove_host(self, bk_biz_id, bk_host_id, is_proxy):
return self._request(
method="post",
url=_get_nodeman_api_v2("remove_host"),
data={"bk_biz_id": bk_biz_id, "bk_host_id": bk_host_id, "is_proxy": is_proxy}, # 是否移除PROXY
)
def job_operate(self, job_type, bk_biz_id, bk_host_id):
return self._request(
method="post",
url=_get_nodeman_api_v2("job/operate"),
data={"job_type": job_type, "bk_biz_id": bk_biz_id, "bk_host_id": bk_host_id},
)
def job_details(self, job_id):
return self._request(method="post", url=_get_nodeman_api_v2("job/details"), data={"job_id": job_id})
def get_job_log(self, job_id, instance_id):
return self._request(
method="post",
url=_get_nodeman_api_v2("job/log"),
data={"job_id": job_id, "instance_id": instance_id},
)
def cloud_list(self):
return self._request(method="get", url=_get_nodeman_api_v2("cloud"), data={})
def ap_list(self):
return self._request(method="get", url=_get_nodeman_api_v2("ap"), data={})
def plugin_operate(self, params: dict):
return self._request(method="post", url=_get_nodeman_api_v2("plugin/operate"), data=params)
def plugin_process(self, category):
return self._request(method="post", url=_get_nodeman_api_v2("plugin/process"), data={"category": category})
def plugin_package(self, name, os):
return self._request(method="post", url=_get_nodeman_api_v2("plugin/package"), data={"name": name, "os": os})
def get_rsa_public_key(self, executor):
return self._request(
method="post",
url=METHOD_NAME("core/api/encrypt_rsa/fetch_public_keys"),
data={
"bk_app_code": settings.APP_CODE,
"bk_app_secret": settings.SECRET_KEY,
"bk_username": executor,
"names": ["DEFAULT"],
},
) |
5,579 | test delete all | # Copyright 2023 Avaiga Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import os
import pytest
from src.taipy.core.data._data_fs_repository import _DataFSRepository
from src.taipy.core.exceptions import ModelNotFound
from src.taipy.core.job._job_fs_repository import _JobFSRepository
from src.taipy.core.job._job_sql_repository import _JobSQLRepository
from src.taipy.core.job.job import Job, JobId, Task
from src.taipy.core.task._task_fs_repository import _TaskFSRepository
class TestJobRepository:
@pytest.mark.parametrize("repo", [_JobFSRepository, _JobSQLRepository])
def test_save_and_load(self, tmpdir, data_node, job, repo):
_DataFSRepository()._save(data_node)
task = Task("task_config_id", {}, print, [data_node], [data_node])
_TaskFSRepository()._save(task)
job._task = task
repository = repo()
repository.base_path = tmpdir
repository._save(job)
obj = repository._load(job.id)
assert isinstance(obj, Job)
@pytest.mark.parametrize("repo", [_JobFSRepository, _JobSQLRepository])
def test_exists(self, tmpdir, data_node, job, repo):
_DataFSRepository()._save(data_node)
task = Task("task_config_id", {}, print, [data_node], [data_node])
_TaskFSRepository()._save(task)
job._task = task
repository = repo()
repository.base_path = tmpdir
repository._save(job)
assert repository._exists(job.id)
assert not repository._exists("not-existed-job")
@pytest.mark.parametrize("repo", [_JobFSRepository, _JobSQLRepository])
def test_load_all(self, tmpdir, data_node, job, repo):
_DataFSRepository()._save(data_node)
task = Task("task_config_id", {}, print, [data_node], [data_node])
_TaskFSRepository()._save(task)
job._task = task
repository = repo()
repository.base_path = tmpdir
for i in range(10):
job.id = JobId(f"job-{i}")
repository._save(job)
jobs = repository._load_all()
assert len(jobs) == 10
@pytest.mark.parametrize("repo", [_JobFSRepository, _JobSQLRepository])
def test_load_all_with_filters(self, tmpdir, data_node, job, repo):
repository = repo()
repository.base_path = tmpdir
_DataFSRepository()._save(data_node)
task = Task("task_config_id", {}, print, [data_node], [data_node])
_TaskFSRepository()._save(task)
job._task = task
for i in range(10):
job.id = JobId(f"job-{i}")
repository._save(job)
objs = repository._load_all(filters=[{"id": "job-2"}])
assert len(objs) == 1
@pytest.mark.parametrize("repo", [_JobFSRepository, _JobSQLRepository])
def test_delete(self, tmpdir, data_node, job, repo):
repository = repo()
repository.base_path = tmpdir
_DataFSRepository()._save(data_node)
task = Task("task_config_id", {}, print, [data_node], [data_node])
_TaskFSRepository()._save(task)
job._task = task
repository._save(job)
repository._delete(job.id)
with pytest.raises(ModelNotFound):
repository._load(job.id)
@pytest.mark.parametrize("repo", [_JobFSRepository, _JobSQLRepository])
def METHOD_NAME(self, tmpdir, data_node, job, repo):
repository = repo()
repository.base_path = tmpdir
_DataFSRepository()._save(data_node)
task = Task("task_config_id", {}, print, [data_node], [data_node])
_TaskFSRepository()._save(task)
job._task = task
for i in range(10):
job.id = JobId(f"job-{i}")
repository._save(job)
assert len(repository._load_all()) == 10
repository._delete_all()
assert len(repository._load_all()) == 0
@pytest.mark.parametrize("repo", [_JobFSRepository, _JobSQLRepository])
def test_delete_many(self, tmpdir, data_node, job, repo):
repository = repo()
repository.base_path = tmpdir
_DataFSRepository()._save(data_node)
task = Task("task_config_id", {}, print, [data_node], [data_node])
_TaskFSRepository()._save(task)
job._task = task
for i in range(10):
job.id = JobId(f"job-{i}")
repository._save(job)
objs = repository._load_all()
assert len(objs) == 10
ids = [x.id for x in objs[:3]]
repository._delete_many(ids)
assert len(repository._load_all()) == 7
@pytest.mark.parametrize("repo", [_JobFSRepository, _JobSQLRepository])
def test_delete_by(self, tmpdir, data_node, job, repo):
repository = repo()
repository.base_path = tmpdir
_DataFSRepository()._save(data_node)
task = Task("task_config_id", {}, print, [data_node], [data_node])
_TaskFSRepository()._save(task)
job._task = task
# Create 5 entities with version 1.0 and 5 entities with version 2.0
for i in range(10):
job.id = JobId(f"job-{i}")
job._version = f"{(i+1) // 5}.0"
repository._save(job)
objs = repository._load_all()
assert len(objs) == 10
repository._delete_by("version", "1.0")
assert len(repository._load_all()) == 5
@pytest.mark.parametrize("repo", [_JobFSRepository, _JobSQLRepository])
def test_search(self, tmpdir, data_node, job, repo):
repository = repo()
repository.base_path = tmpdir
_DataFSRepository()._save(data_node)
task = Task("task_config_id", {}, print, [data_node], [data_node])
_TaskFSRepository()._save(task)
job._task = task
for i in range(10):
job.id = JobId(f"job-{i}")
repository._save(job)
assert len(repository._load_all()) == 10
objs = repository._search("id", "job-2")
assert len(objs) == 1
assert isinstance(objs[0], Job)
objs = repository._search("id", "job-2", filters=[{"version": "random_version_number"}])
assert len(objs) == 1
assert isinstance(objs[0], Job)
assert repository._search("id", "job-2", filters=[{"version": "non_existed_version"}]) == []
@pytest.mark.parametrize("repo", [_JobFSRepository, _JobSQLRepository])
def test_export(self, tmpdir, job, repo):
repository = repo()
repository.base_path = tmpdir
repository._save(job)
repository._export(job.id, tmpdir.strpath)
dir_path = repository.dir_path if repo == _JobFSRepository else os.path.join(tmpdir.strpath, "job")
assert os.path.exists(os.path.join(dir_path, f"{job.id}.json")) |
5,580 | notify translated | from meerk40t.core.node.node import Node
class RootNode(Node):
"""
RootNode is one of the few directly declarable node-types and serves as the base type for all Node classes.
The notifications are shallow. They refer *only* to the node in question, not to any children or parents.
"""
def __init__(self, context, **kwargs):
_ = context._
super().__init__(type="root", **kwargs)
self._root = self
self.context = context
self.listeners = []
self.add(type="branch ops", label=_("Operations"))
self.add(type="branch elems", label=_("Elements"))
self.add(type="branch reg", label=_("Regmarks"))
def __repr__(self):
return f"RootNode({str(self.context)})"
def __copy__(self):
return RootNode(self.context)
def is_draggable(self):
return False
def listen(self, listener):
self.listeners.append(listener)
def unlisten(self, listener):
self.listeners.remove(listener)
def notify_created(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "node_created"):
listen.node_created(node, **kwargs)
def notify_destroyed(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "node_destroyed"):
listen.node_destroyed(node, **kwargs)
def notify_attached(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "node_attached"):
listen.node_attached(node, **kwargs)
def notify_detached(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "node_detached"):
listen.node_detached(node, **kwargs)
def notify_changed(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "node_changed"):
listen.node_changed(node, **kwargs)
def notify_selected(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "selected"):
listen.selected(node, **kwargs)
def notify_emphasized(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "emphasized"):
listen.emphasized(node, **kwargs)
def notify_targeted(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "targeted"):
listen.targeted(node, **kwargs)
def notify_highlighted(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "highlighted"):
listen.highlighted(node, **kwargs)
def notify_modified(self, node=None, **kwargs):
"""
Notifies any listeners that a value in the tree has been changed such that the matrix or other property
values have changed. But that the underlying data object itself remains intact.
@param node: node that was modified.
@param kwargs:
@return:
"""
if node is None:
node = self
self._bounds = None
for listen in self.listeners:
if hasattr(listen, "modified"):
listen.modified(node, **kwargs)
def METHOD_NAME(self, node=None, dx=0, dy=0, **kwargs):
"""
Notifies any listeners that a value in the tree has been changed such that the matrix or other property
values have changed. But that the underlying data object itself remains intact.
@param node: node that was modified.
@param dx: translation change for node
@param dy: translation change for node
@param kwargs:
@return:
"""
if node is None:
node = self
if self._bounds is not None:
self._bounds = [
self._bounds[0] + dx,
self._bounds[1] + dy,
self._bounds[2] + dx,
self._bounds[3] + dy,
]
for listen in self.listeners:
if hasattr(listen, "translated"):
listen.translated(node, dx=dx, dy=dy) # , **kwargs)
def notify_scaled(self, node=None, sx=1, sy=1, ox=0, oy=0, **kwargs):
"""
Notifies any listeners that a value in the tree has been changed such that the matrix or other property
values have changed. But that the underlying data object itself remains intact.
@param node: node that was modified.
@param sx: scale_x value
@param sy: scale_y value
@param ox: offset_x value
@param oy: offset_y value
@param kwargs:
@return:
"""
if node is None:
node = self
if self._bounds is not None:
x0, y0, x1, y1 = self._bounds
if sx != 1.0:
d1 = x0 - ox
d2 = x1 - ox
x0 = ox + sx * d1
x1 = ox + sx * d2
if sy != 1.0:
d1 = y0 - oy
d2 = y1 - oy
y0 = oy + sy * d1
y1 = oy + sy * d2
self._bounds = [min(x0, x1), min(y0, y1), max(x0, x1), max(y0, y1)]
for listen in self.listeners:
if hasattr(listen, "scaled"):
listen.scaled(node, sx=sx, sy=sy, ox=ox, oy=oy) # , **kwargs)
def notify_altered(self, node=None, **kwargs):
"""
Notifies any listeners that a value in the tree has had its underlying data fundamentally changed and while
this may not be reflected by the properties any assumptions about the content of this node are no longer
valid.
@param node:
@param kwargs:
@return:
"""
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "altered"):
listen.altered(node, **kwargs)
def notify_expand(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "expand"):
listen.expand(node, **kwargs)
def notify_collapse(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "collapse"):
listen.collapse(node, **kwargs)
def notify_reorder(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "reorder"):
listen.reorder(node, **kwargs)
def notify_update(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "update"):
listen.update(node, **kwargs)
def notify_focus(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "focus"):
listen.focus(node, **kwargs) |
5,581 | test safe build s2 msi l2a | # -*- coding: utf-8 -*-
# Copyright 2018, CS GROUP - France, http://www.c-s.fr
#
# This file is part of EODAG project
# https://www.github.com/CS-SI/EODAG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import unittest
from pathlib import Path
from shutil import copyfile
from tempfile import TemporaryDirectory
import yaml
from tests.context import TEST_RESOURCES_PATH, AwsDownload, EOProduct, NotAvailableError
class TestSafeBuild(unittest.TestCase):
def setUp(self):
super(TestSafeBuild, self).setUp()
self.awsd = AwsDownload("some_provider", {})
self.logger = logging.getLogger("eodag.plugins.download.aws")
self.tmp_download_dir = TemporaryDirectory()
self.tmp_download_path = self.tmp_download_dir.name
with open(
os.path.join(TEST_RESOURCES_PATH, "safe_build", "aws_sentinel_chunks.yml"),
"r",
) as fh:
self.aws_sentinel_chunks = yaml.load(fh, Loader=yaml.SafeLoader)
def tearDown(self):
self.tmp_download_dir.cleanup()
def test_safe_build_out_of_pattern(self):
"""Cannot build SAFE product from out of pattern file"""
prod = EOProduct(
provider="some_provider",
properties=self.aws_sentinel_chunks["S1_SAR_GRD"]["properties"],
productType="S1_SAR_GRD",
)
def chunk():
return None
chunk.key = "path/to/some/dummy_chunk"
with self.assertRaisesRegex(
NotAvailableError, f"Ignored {chunk.key} out of SAFE matching pattern"
):
self.awsd.get_chunk_dest_path(
prod,
chunk,
build_safe=True,
)
def test_safe_build_no_safe(self):
"""Do not use patterns if no SAFE is asked"""
prod = EOProduct(
provider="some_provider",
properties=self.aws_sentinel_chunks["S1_SAR_GRD"]["properties"],
productType="S1_SAR_GRD",
)
def chunk():
return None
chunk.key = "path/to/some/dummy_chunk"
chunk_dest_path = self.awsd.get_chunk_dest_path(
prod,
chunk,
build_safe=False,
)
self.assertEqual(chunk_dest_path, chunk.key)
chunk_dest_path = self.awsd.get_chunk_dest_path(
prod, chunk, build_safe=False, dir_prefix="path/to"
)
self.assertEqual(chunk_dest_path, "some/dummy_chunk")
def test_safe_build_s1_sar_grd(self):
"""build S1_SAR_GRD SAFE product with empty files and check content"""
prod = EOProduct(
provider="some_provider",
properties=self.aws_sentinel_chunks["S1_SAR_GRD"]["properties"],
productType="S1_SAR_GRD",
)
product_path = os.path.join(self.tmp_download_path, prod.properties["title"])
def chunk():
return None
for chunk_key in self.aws_sentinel_chunks["S1_SAR_GRD"]["chunks"]:
chunk.key = chunk_key
chunk_dest_rel_path = self.awsd.get_chunk_dest_path(
prod,
chunk,
build_safe=True,
)
chunk_abs_path = os.path.join(product_path, chunk_dest_rel_path)
chunk_abs_path_dir = os.path.dirname(chunk_abs_path)
if not os.path.isdir(chunk_abs_path_dir):
os.makedirs(chunk_abs_path_dir)
Path(chunk_abs_path).touch()
copyfile(
os.path.join(TEST_RESOURCES_PATH, "safe_build", "manifest.safe.S1_SAR_GRD"),
os.path.join(
product_path, "%s.SAFE" % prod.properties["title"], "manifest.safe"
),
)
with self.assertLogs(self.logger, logging.WARN) as cm:
# assertLogs fails if no warning is raised
self.logger.warning("Dummy warning")
self.awsd.check_manifest_file_list(product_path)
self.assertEqual(len(cm.output), 1)
self.assertIn("Dummy warning", cm.output[0])
def METHOD_NAME(self):
"""build S2_MSI_L2A SAFE product with empty files and check content"""
prod = EOProduct(
provider="some_provider",
properties=self.aws_sentinel_chunks["S2_MSI_L2A"]["properties"],
productType="S2_MSI_L2A",
)
product_path = os.path.join(self.tmp_download_path, prod.properties["title"])
def chunk():
return None
for chunk_key in self.aws_sentinel_chunks["S2_MSI_L2A"]["chunks"]:
chunk.key = chunk_key
chunk_dest_rel_path = self.awsd.get_chunk_dest_path(
prod,
chunk,
build_safe=True,
)
chunk_abs_path = os.path.join(product_path, chunk_dest_rel_path)
chunk_abs_path_dir = os.path.dirname(chunk_abs_path)
if not os.path.isdir(chunk_abs_path_dir):
os.makedirs(chunk_abs_path_dir)
Path(chunk_abs_path).touch()
copyfile(
os.path.join(TEST_RESOURCES_PATH, "safe_build", "manifest.safe.S2_MSI_L2A"),
os.path.join(
product_path, "%s.SAFE" % prod.properties["title"], "manifest.safe"
),
)
self.awsd.finalize_s2_safe_product(product_path)
with self.assertLogs(self.logger, logging.WARN) as cm:
# assertLogs fails if no warning is raised
self.logger.warning("Dummy warning")
self.awsd.check_manifest_file_list(product_path)
self.assertEqual(len(cm.output), 1)
self.assertIn("Dummy warning", cm.output[0])
def test_safe_build_s2_msi_l1c(self):
"""build S2_MSI_L1C SAFE product with empty files and check content"""
prod = EOProduct(
provider="some_provider",
properties=self.aws_sentinel_chunks["S2_MSI_L1C"]["properties"],
productType="S2_MSI_L1C",
)
product_path = os.path.join(self.tmp_download_path, prod.properties["title"])
def chunk():
return None
for chunk_key in self.aws_sentinel_chunks["S2_MSI_L1C"]["chunks"]:
chunk.key = chunk_key
chunk_dest_rel_path = self.awsd.get_chunk_dest_path(
prod,
chunk,
build_safe=True,
)
chunk_abs_path = os.path.join(product_path, chunk_dest_rel_path)
chunk_abs_path_dir = os.path.dirname(chunk_abs_path)
if not os.path.isdir(chunk_abs_path_dir):
os.makedirs(chunk_abs_path_dir)
Path(chunk_abs_path).touch()
copyfile(
os.path.join(TEST_RESOURCES_PATH, "safe_build", "manifest.safe.S2_MSI_L1C"),
os.path.join(
product_path, "%s.SAFE" % prod.properties["title"], "manifest.safe"
),
)
self.awsd.finalize_s2_safe_product(product_path)
with self.assertLogs(self.logger, logging.WARN) as cm:
# assertLogs fails if no warning is raised
self.logger.warning("Dummy warning")
self.awsd.check_manifest_file_list(product_path)
self.assertEqual(len(cm.output), 2)
self.assertIn("Dummy warning", cm.output[0])
# known missing file, see https://github.com/CS-SI/eodag/pull/218#issuecomment-816770353
self.assertIn("PVI.jp2 is missing", cm.output[1]) |
5,582 | main |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: chat_client_sample_async.py
DESCRIPTION:
These samples demonstrate create a chat client, get a chat thread client,
create a chat thread, get a chat thread by id, list chat threads, delete
a chat thread by id.
You need to use azure.communication.configuration module to get user access
token and user identity before run this sample
USAGE:
python chat_client_sample_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_COMMUNICATION_SERVICE_ENDPOINT - Communication Service endpoint url
2) TOKEN - the user access token, from token_response.token
3) USER_ID - the user id, from token_response.identity
"""
import os
import asyncio
class ChatClientSamplesAsync(object):
from azure.communication.identity import CommunicationIdentityClient
connection_string = os.environ.get("COMMUNICATION_SAMPLES_CONNECTION_STRING", None)
if not connection_string:
raise ValueError("Set COMMUNICATION_SAMPLES_CONNECTION_STRING env before run this sample.")
identity_client = CommunicationIdentityClient.from_connection_string(connection_string)
user = identity_client.create_user()
tokenresponse = identity_client.get_token(user, scopes=["chat"])
token = tokenresponse.token
endpoint = os.environ.get("AZURE_COMMUNICATION_SERVICE_ENDPOINT", None)
if not endpoint:
raise ValueError("Set AZURE_COMMUNICATION_SERVICE_ENDPOINT env before run this sample.")
_thread_id = None
def create_chat_client(self):
token = self.token
endpoint = self.endpoint
thread_id = self._thread_id
# [START create_chat_client]
from azure.communication.chat.aio import ChatClient, CommunicationTokenCredential
# set `endpoint` to an existing ACS endpoint
chat_client = ChatClient(endpoint, CommunicationTokenCredential(token))
# [END create_chat_client]
print("chat_client created")
async def create_thread_async(self):
token = self.token
endpoint = self.endpoint
thread_id = self._thread_id
# [START create_thread]
from datetime import datetime
from azure.communication.chat.aio import ChatClient, CommunicationTokenCredential
from azure.communication.chat import ChatParticipant
# set `endpoint` to an existing ACS endpoint
chat_client = ChatClient(endpoint, CommunicationTokenCredential(token))
async with chat_client:
topic = "test topic"
participants = [ChatParticipant(
identifier=self.user,
display_name='name',
share_history_time=datetime.utcnow()
)]
# creates a new chat_thread everytime
create_chat_thread_result = await chat_client.create_chat_thread(topic, thread_participants=participants)
# creates a new chat_thread if not exists
idempotency_token = 'b66d6031-fdcc-41df-8306-e524c9f226b8' # unique identifier
create_chat_thread_result_w_repeatability_id = await chat_client.create_chat_thread(
topic,
thread_participants=participants,
idempotency_token=idempotency_token)
# [END create_thread]
self._thread_id = create_chat_thread_result.chat_thread.id
print("thread created, id: " + self._thread_id)
def get_chat_thread_client(self):
token = self.token
endpoint = self.endpoint
thread_id = self._thread_id
# [START get_chat_thread_client]
from azure.communication.chat.aio import ChatClient, CommunicationTokenCredential
# set `endpoint` to an existing ACS endpoint
chat_client = ChatClient(endpoint, CommunicationTokenCredential(token))
# set `thread_id` to an existing chat thread id
chat_thread_client = chat_client.get_chat_thread_client(thread_id)
# [END get_chat_thread_client]
print("chat_thread_client created with thread id: ", chat_thread_client.thread_id)
async def list_threads_async(self):
token = self.token
endpoint = self.endpoint
thread_id = self._thread_id
# [START list_threads]
from azure.communication.chat.aio import ChatClient, CommunicationTokenCredential
# set `endpoint` to an existing ACS endpoint
chat_client = ChatClient(endpoint, CommunicationTokenCredential(token))
async with chat_client:
from datetime import datetime, timedelta
start_time = datetime.utcnow() - timedelta(days=2)
chat_threads = chat_client.list_chat_threads(results_per_page=5, start_time=start_time)
print("list_threads succeeded with results_per_page is 5, and were created since 2 days ago.")
async for chat_thread_item_page in chat_threads.by_page():
async for chat_thread_item in chat_thread_item_page:
print("thread id: ", chat_thread_item.id)
# [END list_threads]
async def delete_thread_async(self):
token = self.token
endpoint = self.endpoint
thread_id = self._thread_id
# [START delete_thread]
from azure.communication.chat.aio import ChatClient, CommunicationTokenCredential
# set `endpoint` to an existing ACS endpoint
chat_client = ChatClient(endpoint, CommunicationTokenCredential(token))
async with chat_client:
# set `thread_id` to an existing chat thread id
await chat_client.delete_chat_thread(thread_id)
# [END delete_thread]
print("delete_thread succeeded")
def clean_up(self):
print("cleaning up: deleting created user.")
self.identity_client.delete_user(self.user)
async def METHOD_NAME():
sample = ChatClientSamplesAsync()
sample.create_chat_client()
await sample.create_thread_async()
sample.get_chat_thread_client()
await sample.list_threads_async()
await sample.delete_thread_async()
sample.clean_up()
if __name__ == '__main__':
asyncio.run(METHOD_NAME()) |
5,583 | submit production | """
desispec.scripts.submit_prod
============================
"""
import yaml
import numpy as np
import os
import sys
import time
import re
## Import some helper functions, you can see their definitions by uncomenting the bash shell command
from desispec.workflow.utils import verify_variable_with_environment, listpath
from desispec.scripts.submit_night import submit_night
def assign_survey(night, conf):
"""
Takes a desi production configuration (yaml) dictionary and determines
the survey corresponding to a given night based on the contents of the conf
dictionary, if psosible. Otherwise returns None.
Args:
night (int): The night you want to know the survey it corresponds to.
conf (dict): Dictionary that returned when the configuration yaml file was read in.
Returns:
survey, str. The survey the night was taken under, according to the conf file.
"""
for survey in conf['DateRanges']:
first, last = conf['DateRanges'][survey]
if night >= first and night <= last:
return survey
else:
return None
def get_all_nights():
"""
Returns a full list of all nights availabel in the DESI Raw data directory.
Returns:
nights, list. A list of nights on or after Jan 1 2020 in which data exists at NERSC.
"""
nights = list()
for n in listpath(os.getenv('DESI_SPECTRO_DATA')):
# - nights are 202YMMDD
if re.match('^202\d{5}$', n):
nights.append(int(n))
return nights
def METHOD_NAME(production_yaml, dry_run=False, error_if_not_available=False):
"""
Interprets a production_yaml file and submits the respective nights for processing
within the defined production.
Args:
production_yaml (str): Pathname of the yaml file that defines the production.
dry_run (bool, optional): Default is False. Should the jobs written to the processing table actually be submitted
for processing.
error_if_not_available (bool, optional): Default is True. Raise as error if the required exposure table doesn't exist,
otherwise prints an error and returns.
Returns:
None.
"""
if not os.path.exists(production_yaml):
raise IOError(f"Prod Yaml file doesn't exist: {production_yaml} not found. Exiting.")
conf = yaml.safe_load(open(production_yaml, 'rb'))
specprod = str(conf['name']).lower()
specprod = verify_variable_with_environment(var=specprod, var_name='specprod', env_name='SPECPROD')
if 'reservation' in conf:
reservation = str(conf['reservation'])
if reservation.lower() == 'none':
reservation = None
else:
reservation = None
if 'queue' in conf:
queue = conf['queue']
else:
queue = 'realtime'
if 'OVERWRITEEXISTING' in conf:
overwrite_existing = conf['OVERWRITEEXISTING']
else:
overwrite_existing = False
print(f'Using queue: {queue}')
if reservation is not None:
print(f'Using reservation: {reservation}')
if overwrite_existing:
print("Ignoring the fact that files exists and submitting those nights anyway")
all_nights = get_all_nights()
non_survey_nights = []
for night in all_nights:
survey = assign_survey(night, conf)
if survey is None:
non_survey_nights.append(night)
continue
elif survey in conf['ProcessData'] and conf['ProcessData'][survey] is False:
print(f'Asked not to process survey: {survey}, Not processing night={night}.', '\n\n\n')
continue
elif survey in conf['SkipNights'] and night in conf['SkipNights'][survey]:
print(f'Asked to skip night={night} (in survey: {survey}). Skipping.', '\n\n\n')
continue
else:
print(f'Processing {survey} night: {night}')
submit_night(night, proc_obstypes=None, dry_run=dry_run, queue=queue, reservation=reservation,
overwrite_existing=overwrite_existing, error_if_not_available=error_if_not_available)
print(f"Completed {night}. Sleeping for 30s")
time.sleep(30)
print("Skipped the following nights that were not assigned to a survey:")
print(non_survey_nights, '\n\n\n')
print("All nights submitted") |
5,584 | process resource | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n_azure.provider import resources
from c7n_azure.query import ChildTypeInfo
from c7n_azure.actions.base import AzureBaseAction
from c7n_azure.resources.arm import ChildArmResourceManager
from c7n.filters.core import type_schema
from c7n_azure.utils import ResourceIdParser
from msrestazure.tools import parse_resource_id
@resources.register('storage-container')
class StorageContainer(ChildArmResourceManager):
"""Storage Container Resource
:example:
Finds all containers with public access enabled
.. code-block:: yaml
policies:
- name: storage-container-public
description: |
Find all containers with public access enabled
resource: azure.storage-container
filters:
- type: value
key: properties.publicAccess
op: not-equal
value: None # Possible values: Blob, Container, None
"""
class resource_type(ChildTypeInfo):
doc_groups = ['Storage']
service = 'azure.mgmt.storage'
client = 'StorageManagementClient'
enum_spec = ('blob_containers', 'list', None)
parent_manager_name = 'storage'
diagnostic_settings_enabled = False
resource_type = 'Microsoft.Storage/storageAccounts/blobServices/containers'
raise_on_exception = False
default_report_fields = (
'name',
'properties.publicAccess',
'"c7n:parent-id"'
)
@classmethod
def extra_args(cls, parent_resource):
return {'resource_group_name': parent_resource['resourceGroup'],
'account_name': parent_resource['name']}
def get_resources(self, resource_ids):
client = self.get_client()
data = [
self.get_storage_container(rid, client)
for rid in resource_ids
]
return self.augment([r.serialize(True) for r in data])
def get_storage_container(self, resource_id, client):
parsed = parse_resource_id(resource_id)
return client.blob_containers.get(parsed.get('resource_group'),
parsed.get('name'), # Account name
parsed.get('resource_name')) # Container name
@StorageContainer.action_registry.register('set-public-access')
class StorageContainerSetPublicAccessAction(AzureBaseAction):
"""Action that updates the access level setting on Storage Containers.
Programmatically, this will be seen by updating the Public Access setting
:example:
Finds all Blob Storage Containers that are not private and sets them to private
.. code-block:: yaml
policies:
- name: set-non-production-accounts-private
resource: azure.storage-container
filters:
- type: value
key: properties.publicAccess
op: not-equal
value: None
actions:
- type: set-public-access
value: None
"""
schema = type_schema(
'set-public-access',
required=['value'],
**{
'value': {'enum': ['Container', 'Blob', 'None']}
}
)
def _prepare_processing(self):
self.client = self.manager.get_client()
def METHOD_NAME(self, resource):
resource_group = ResourceIdParser.get_resource_group(resource['id'])
account_name = ResourceIdParser.get_resource_name(resource['c7n:parent-id'])
blob_container = self.client.blob_containers.get(resource_group,
account_name,
resource['name'],
)
blob_container.public_access = self.data['value']
self.client.blob_containers.update(
resource_group,
account_name,
resource['name'],
blob_container
) |
5,585 | validate options | from cumulusci.core.exceptions import (
ApexCompilationException,
ApexException,
SalesforceException,
TaskOptionsError,
)
from cumulusci.core.utils import process_bool_arg
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
from cumulusci.utils import in_directory, inject_namespace
from cumulusci.utils.http.requests_utils import safe_json_from_response
class AnonymousApexTask(BaseSalesforceApiTask):
"""Executes anonymous apex from a file or string."""
task_docs = """
Use the `apex` option to run a string of anonymous Apex.
Use the `path` option to run anonymous Apex from a file.
Or use both to concatenate the string to the file contents.
"""
task_options = {
"path": {"description": "The path to an Apex file to run.", "required": False},
"apex": {
"description": "A string of Apex to run (after the file, if specified).",
"required": False,
},
"managed": {
"description": (
"If True, will insert the project's namespace prefix. "
"Defaults to False or no namespace."
),
"required": False,
},
"namespaced": {
"description": (
"If True, the tokens %%%NAMESPACED_RT%%% and %%%namespaced%%% "
"will get replaced with the namespace prefix for Record Types."
),
"required": False,
},
"param1": {
"description": (
"Parameter to pass to the Apex. Use as %%%PARAM_1%%% in the Apex code. "
"Defaults to an empty value."
),
"required": False,
},
"param2": {
"description": (
"Parameter to pass to the Apex. Use as %%%PARAM_2%%% in the Apex code. "
"Defaults to an empty value."
),
"required": False,
},
}
def METHOD_NAME(self):
super().METHOD_NAME()
if not self.options.get("path") and not self.options.get("apex"):
raise TaskOptionsError(
"You must specify either the `path` or `apex` option."
)
def _run_task(self):
apex = self._process_apex_from_path(self.options.get("path"))
apex += self._process_apex_string(self.options.get("apex"))
apex = self._prepare_apex(apex)
self.logger.info("Executing anonymous Apex")
result = self.tooling._call_salesforce(
method="GET",
url=f"{self.tooling.base_url}executeAnonymous",
params={"anonymousBody": apex},
)
self._check_result(result)
self.logger.info("Anonymous Apex Executed Successfully!")
def _process_apex_from_path(self, apex_path):
"""Process apex given via the --path task option"""
if not apex_path:
return ""
if not in_directory(apex_path, self.project_config.repo_root):
raise TaskOptionsError(
"Please specify a path inside your project repository. "
f"You specified: {apex_path}"
)
self.logger.info(f"Processing Apex from path: {apex_path}")
try:
with open(apex_path, "r", encoding="utf-8") as f:
apex = f.read()
except IOError:
raise TaskOptionsError(f"Could not find or read file: {apex_path}")
return apex
def _process_apex_string(self, apex_string):
"""Process the string of apex given via the --apex task option"""
apex = ""
if apex_string:
self.logger.info("Processing Apex from '--apex' option")
# append a newline so that we don't clash if
# apex was also given via the --path option
apex = "\n" + apex_string
return apex
def _prepare_apex(self, apex):
# Process namespace tokens
namespace = self.project_config.project__package__namespace
if "managed" in self.options:
managed = process_bool_arg(self.options["managed"])
else:
managed = (
bool(namespace) and namespace in self.org_config.installed_packages
)
if "namespaced" in self.options:
namespaced = process_bool_arg(self.options["namespaced"])
else:
namespaced = bool(namespace) and namespace == self.org_config.namespace
_, apex = inject_namespace(
"",
apex,
namespace=namespace,
managed=managed,
namespaced_org=namespaced,
)
# This is an extra token which is not handled by inject_namespace.
apex = apex.replace(
"%%%NAMESPACED_RT%%%", namespace + "." if namespaced else ""
)
# Process optional parameter token replacement
param1 = self.options.get("param1") or ""
apex = apex.replace("%%%PARAM_1%%%", param1)
param2 = self.options.get("param2") or ""
apex = apex.replace("%%%PARAM_2%%%", param2)
return apex
def _check_result(self, result):
# anon_results is an ExecuteAnonymous Result
# https://developer.salesforce.com/docs/atlas.en-us.apexcode.meta/apexcode/sforce_api_calls_executeanonymous_result.htm
anon_results = safe_json_from_response(result)
# A result of `None` (body == "null") with a 200 status code means that a gack occurred.
if anon_results is None:
raise SalesforceException(
"Anonymous Apex returned the result `null`. "
"This often indicates a gack occurred."
)
if not anon_results["compiled"]:
raise ApexCompilationException(
anon_results["line"], anon_results["compileProblem"]
)
if not anon_results["success"]:
raise ApexException(
anon_results["exceptionMessage"], anon_results["exceptionStackTrace"]
) |
5,586 | get folders with tasks | import collections
from ayon_api.graphql import GraphQlQuery, FIELD_VALUE, fields_to_dict
from .constants import DEFAULT_FOLDER_FIELDS
def folders_tasks_graphql_query(fields):
query = GraphQlQuery("FoldersQuery")
project_name_var = query.add_variable("projectName", "String!")
folder_ids_var = query.add_variable("folderIds", "[String!]")
parent_folder_ids_var = query.add_variable("parentFolderIds", "[String!]")
folder_paths_var = query.add_variable("folderPaths", "[String!]")
folder_names_var = query.add_variable("folderNames", "[String!]")
has_products_var = query.add_variable("folderHasProducts", "Boolean!")
project_field = query.add_field("project")
project_field.set_filter("name", project_name_var)
folders_field = project_field.add_field_with_edges("folders")
folders_field.set_filter("ids", folder_ids_var)
folders_field.set_filter("parentIds", parent_folder_ids_var)
folders_field.set_filter("names", folder_names_var)
folders_field.set_filter("paths", folder_paths_var)
folders_field.set_filter("hasProducts", has_products_var)
fields = set(fields)
fields.discard("tasks")
tasks_field = folders_field.add_field_with_edges("tasks")
tasks_field.add_field("name")
tasks_field.add_field("taskType")
nested_fields = fields_to_dict(fields)
query_queue = collections.deque()
for key, value in nested_fields.items():
query_queue.append((key, value, folders_field))
while query_queue:
item = query_queue.popleft()
key, value, parent = item
field = parent.add_field(key)
if value is FIELD_VALUE:
continue
for k, v in value.items():
query_queue.append((k, v, field))
return query
def METHOD_NAME(
con,
project_name,
folder_ids=None,
folder_paths=None,
folder_names=None,
parent_ids=None,
active=True,
fields=None
):
"""Query folders with tasks from server.
This is for v4 compatibility where tasks were stored on assets. This is
an inefficient way how folders and tasks are queried so it was added only
as compatibility function.
Todos:
Folder name won't be unique identifier, so we should add folder path
filtering.
Notes:
Filter 'active' don't have direct filter in GraphQl.
Args:
con (ServerAPI): Connection to server.
project_name (str): Name of project where folders are.
folder_ids (Iterable[str]): Folder ids to filter.
folder_paths (Iterable[str]): Folder paths used for filtering.
folder_names (Iterable[str]): Folder names used for filtering.
parent_ids (Iterable[str]): Ids of folder parents. Use 'None'
if folder is direct child of project.
active (Union[bool, None]): Filter active/inactive folders. Both
are returned if is set to None.
fields (Union[Iterable(str), None]): Fields to be queried
for folder. All possible folder fields are returned if 'None'
is passed.
Returns:
List[Dict[str, Any]]: Queried folder entities.
"""
if not project_name:
return []
filters = {
"projectName": project_name
}
if folder_ids is not None:
folder_ids = set(folder_ids)
if not folder_ids:
return []
filters["folderIds"] = list(folder_ids)
if folder_paths is not None:
folder_paths = set(folder_paths)
if not folder_paths:
return []
filters["folderPaths"] = list(folder_paths)
if folder_names is not None:
folder_names = set(folder_names)
if not folder_names:
return []
filters["folderNames"] = list(folder_names)
if parent_ids is not None:
parent_ids = set(parent_ids)
if not parent_ids:
return []
if None in parent_ids:
# Replace 'None' with '"root"' which is used during GraphQl
# query for parent ids filter for folders without folder
# parent
parent_ids.remove(None)
parent_ids.add("root")
if project_name in parent_ids:
# Replace project name with '"root"' which is used during
# GraphQl query for parent ids filter for folders without
# folder parent
parent_ids.remove(project_name)
parent_ids.add("root")
filters["parentFolderIds"] = list(parent_ids)
if fields:
fields = set(fields)
else:
fields = con.get_default_fields_for_type("folder")
fields |= DEFAULT_FOLDER_FIELDS
if active is not None:
fields.add("active")
query = folders_tasks_graphql_query(fields)
for attr, filter_value in filters.items():
query.set_variable_value(attr, filter_value)
parsed_data = query.query(con)
folders = parsed_data["project"]["folders"]
if active is None:
return folders
return [
folder
for folder in folders
if folder["active"] is active
] |
5,587 | init | #
# Project: MXCuBE
# https://github.com/mxcube
#
# This file is part of MXCuBE software.
#
# MXCuBE is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MXCuBE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MXCuBE. If not, see <http://www.gnu.org/licenses/>.
import logging
from enum import IntEnum, unique
import gevent.event
from mxcubecore.BaseHardwareObjects import ConfiguredObject
from mxcubecore.dispatcher import dispatcher
# import mxcubecore.model.procedure_model
# Using jsonschma for validating the JSCONSchemas
# https://json-schema.org/
# https://github.com/Julian/jsonschema
from jsonschema import validate, ValidationError
__credits__ = ["MXCuBE collaboration"]
# Temporary definition should use common denfinition from
# HardwareObject
@unique
class ProcedureState(IntEnum):
"""
Defines the valid Procedure states
"""
ERROR = 0
BUSY = 1
READY = 3
class AbstractProcedure(ConfiguredObject):
__content_roles = []
_ARGS_CLASS = ()
_KWARGS_CLASS = {}
_RESULT_CLASS = ()
@staticmethod
def set_args_class(args_class, kwargs_class):
"""
Sets the types of the data models used as arguments, cane be used to
set the argument classes runtime if the models are built dynamically,
i.e based on configuration not known before
Args:
args_class (tuple[BaseModel]) tuple of classes for args
kwargs_class (dict[[str]: [BaseModel]]) dictionary containing BaseModels
"""
AbstractProcedure._ARGS_CLASS = args_class
AbstractProcedure._KWARGS_CLASS = kwargs_class
@staticmethod
def set_result_class(_result_class):
"""
Sets the types of the data models returned by the Procedure, can
be used to set the result model runtime of the data model is built
dynamically, i.e based on configuration not known before
Returns:
(tuple[BaseModel]) tuple of classes for args
"""
AbstractProcedure._RESULT_CLASS = _result_class
def __init__(self, name):
super(AbstractProcedure, self).__init__(name)
self._msg = None
self._results = None
self._ready_event = gevent.event.Event()
self._task = None
self._state = ProcedureState.READY
# YML configuration options
# Category that the Procedure belongs to, configurable through
# YAML file and used by for listing and displaying the procedure
# in the right context.
self.category = ""
def _init(self):
pass
def METHOD_NAME(self):
pass
def _execute(self, data_model):
"""
Override to implement main task logic
Args:
data_model: sub class of mxcubecore.model.procedure_model
dict in Python 2.7 and Data class in Python 3.7. Data is validated
by the data_model object
Returns:
"""
pass
def _pre_execute(self, data_model):
"""
Override to implement pre execute task logic
Args:
data_model: sub class of mxcubecore.model.procedure_model
Data is validated by the data_model object
Returns:
"""
pass
def _post_execute(self, data_model):
"""
Override to implement post execute task logic
Args:
data_model: sub class of mxcubecore.model.procedure_model
Data is validated by the data_model object
Returns:
"""
pass
def _set_started(self):
"""
Emits procedureStarted signal
Returns:
"""
self._state = ProcedureState.BUSY
dispatcher.send(self, "procedureStarted")
def _set_successful(self):
"""
Emits procedureSuccessful signal
Returns:
"""
self._state = ProcedureState.READY
dispatcher.send(self, "procedureSuccessful", self.results)
def _set_error(self):
"""
Emits procedure error signal
Returns:
"""
self._state = ProcedureState.ERROR
dispatcher.send(self, "procedureError", self.msg)
def _set_stopped(self):
"""
Emits procedureStoped signal
Returns:
"""
self._state = ProcedureState.READY
dispatcher.send(self, "procedureStopped", self.results)
def _start(self, data_model):
"""
Internal start, for the moment executed in greenlet
"""
try:
self._set_started()
self._pre_execute(data_model)
self._execute(data_model)
except Exception as ex:
self._state = ProcedureState.ERROR
self._msg = "Procedure execution error (%s)" % str(ex)
logging.getLogger("HWR").exception(self._msg)
finally:
try:
self._post_execute(data_model)
except Exception as ex:
self._state = ProcedureState.ERROR
self._msg = "Procedure post_execute error (%s)" % str(ex)
logging.getLogger("HWR").exception(self._msg)
self._ready_event.set()
if self._state == ProcedureState.ERROR:
self._set_error()
else:
self._set_successful()
@property
def argument_schema(self):
"""
Schema for arguments passed to start
Returns:
dict{"args": tuple[JSONSchema], "kwargs": key: [JSONSchema]}
"""
return {
"args": tuple([s.schema_json() for s in self._ARGS_CLASS]),
"kwargs": {
key: value.schema_json() for (key, value) in self._KWARGS_CLASS.items()
},
}
@property
def result_schema(self):
"""
Schema for result
Returns:
tuple[JSONSchema]
"""
return (s.schema_json() for s in self._RESULT_CLASS)
@property
def msg(self):
"""
Last message produced by procedure
Returns:
str
"""
return self._msg
@property
def state(self):
"""
Execution state
Returns:
ProcedureState: The current state of the procedure
"""
return self._state
@property
def results(self):
"""
Results from procedure execution validated by RESULT_SCHEMA
if it is defined
Returns:
DataClass or frozendict
"""
return self._results
def start(self, data_model):
"""
Starts procedure
Args:
data_model: sub class of mxcubecore.model.procedure_model.
Data is validated by the data_model object
Returns:
(Greenlet) The gevent task
"""
if self._state != ProcedureState.READY:
self._msg = "Procedure (%s) is already running" % str(self)
logging.getLogger("HWR").error(self._msg)
else:
self._task = gevent.spawn(self._start, data_model)
return self._task
def stop(self):
"""
Stops the execution of procedure
Returns:
None
"""
gevent.kill(self._task)
self._set_stopped()
def wait(self):
"""
Waits for procedure to finish execution
Returns:
None
"""
self._ready_event.wait() |
5,588 | test utf8 argument name | # coding: utf-8
import pytest
import rpy2.rinterface as rinterface
import rpy2.rlike.container as rlc
rinterface.initr()
def _noconsole(x):
pass
@pytest.fixture(scope='module')
def silent_consolewrite():
module = rinterface.callbacks
name = 'consolewrite_print'
backup_func = getattr(module, name)
setattr(module, name, _noconsole)
try:
yield
finally:
setattr(module, name, backup_func)
def test_new():
x = 'a'
with pytest.raises(ValueError):
rinterface.SexpClosure(x)
def test_typeof():
sexp = rinterface.globalenv.find('plot')
assert sexp.typeof == rinterface.RTYPES.CLOSXP
def test_r_error():
r_sum = rinterface.baseenv['sum']
letters = rinterface.baseenv['letters']
with pytest.raises(rinterface.embedded.RRuntimeError),\
pytest.warns(rinterface.RRuntimeWarning):
r_sum(letters)
def test_string_argument():
r_nchar = rinterface.baseenv['::']('base', 'nchar')
res = r_nchar('foo')
assert res[0] == 3
def METHOD_NAME():
c = rinterface.globalenv.find('c')
d = dict([(u'哈哈', 1)])
res = c(**d)
assert u'哈哈' == res.do_slot('names')[0]
def test_emptystringparams():
d = dict([('', 1)])
with pytest.raises(ValueError):
rinterface.baseenv['list'](**d)
def test_closureenv_isenv():
exp = rinterface.parse('function() { }')
fun = rinterface.baseenv['eval'](exp)
assert isinstance(fun.closureenv, rinterface.SexpEnvironment)
def test_closureenv():
assert 'y' not in rinterface.globalenv
exp = rinterface.parse('function(x) { x[y] }')
fun = rinterface.baseenv['eval'](exp)
vec = rinterface.baseenv['letters']
assert isinstance(fun.closureenv, rinterface.SexpEnvironment)
with pytest.raises(rinterface.embedded.RRuntimeError):
with pytest.warns(rinterface.RRuntimeWarning):
fun(vec)
fun.closureenv['y'] = (rinterface
.IntSexpVector([1, ]))
assert 'a' == fun(vec)[0]
fun.closureenv['y'] = (rinterface
.IntSexpVector([2, ]))
assert 'b' == fun(vec)[0]
def test_call_s4_setClass():
# R's package "methods" can perform uncommon operations
r_setClass = rinterface.globalenv.find('setClass')
r_representation = rinterface.globalenv.find('representation')
attrnumeric = rinterface.StrSexpVector(['numeric', ])
classname = rinterface.StrSexpVector(['Track', ])
classrepr = r_representation(x=attrnumeric,
y=attrnumeric)
r_setClass(classname,
classrepr)
# TODO: where is the test ?
def test_call_OrdDict():
ad = rlc.OrdDict((('a', rinterface.IntSexpVector([2, ])),
('b', rinterface.IntSexpVector([1, ])),
(None, rinterface.IntSexpVector([5, ])),
('c', rinterface.IntSexpVector([0, ]))))
mylist = rinterface.baseenv['list'].rcall(tuple(ad.items()),
rinterface.globalenv)
names = [x for x in mylist.do_slot('names')]
for i in range(4):
assert ('a', 'b', '', 'c')[i] == names[i]
def test_call_OrdDictEnv():
ad = rlc.OrdDict(((None, rinterface.parse('sum(x)')), ))
env_a = rinterface.baseenv['new.env']()
env_a['x'] = rinterface.IntSexpVector([1, 2, 3])
sum_a = rinterface.baseenv['eval'].rcall(tuple(ad.items()), env_a)
assert 6 == sum_a[0]
env_b = rinterface.baseenv['new.env']()
env_b['x'] = rinterface.IntSexpVector([4, 5, 6])
sum_b = rinterface.baseenv['eval'].rcall(tuple(ad.items()), env_b)
assert 15 == sum_b[0]
def test_error_in_call():
r_sum = rinterface.baseenv['sum']
with pytest.raises(rinterface.embedded.RRuntimeError),\
pytest.warns(rinterface.RRuntimeWarning):
r_sum(2, 'a')
def test_missing_arg():
exp = rinterface.parse('function(x) { missing(x) }')
fun = rinterface.baseenv['eval'](exp)
nonmissing = rinterface.IntSexpVector([0, ])
missing = rinterface.MissingArg
assert not fun(nonmissing)[0]
assert fun(missing)[0]
def test_scalar_convert_integer():
assert 'integer' == rinterface.baseenv['typeof'](int(1))[0]
def test_scalar_convert_double():
assert 'double' == rinterface.baseenv['typeof'](1.0)[0]
def test_scalar_convert_boolean():
assert 'logical' == rinterface.baseenv['typeof'](True)[0]
@pytest.mark.parametrize('give_env', (True, False))
@pytest.mark.parametrize('use_rlock', (True, False))
def test_call_in_context(give_env, use_rlock):
ls = rinterface.baseenv['ls']
get = rinterface.baseenv['get']
if give_env:
env = rinterface.baseenv['new.env']()
else:
env = None
assert 'foo' not in ls()
with rinterface.local_context(env=env, use_rlock=use_rlock) as lc:
lc['foo'] = 123
assert tuple(get('foo')) == (123, )
assert 'foo' not in ls()
@pytest.mark.parametrize('use_rlock', (True, False))
def test_call_in_context_nested(use_rlock):
ls = rinterface.baseenv['ls']
get = rinterface.baseenv['get']
assert 'foo' not in ls()
with rinterface.local_context() as lc_a:
lc_a['foo'] = 123
assert tuple(get('foo')) == (123, )
with rinterface.local_context(use_rlock=use_rlock) as lc_b:
lc_b['foo'] = 456
assert tuple(get('foo')) == (456, )
assert tuple(get('foo')) == (123, )
assert 'foo' not in ls() |
5,589 | unchanged answer | import base64
import hashlib
import os.path
import random
import string
import urllib
from django import forms
from django.conf import settings
from django.core.files.uploadedfile import UploadedFile
from django.forms import ClearableFileInput
from django.shortcuts import resolve_url
from django.template.defaultfilters import filesizeformat
from django.utils.safestring import mark_safe
from quizzes.types.base import QuestionHelper, BaseConfigForm, MISSING_ANSWER_HTML
from submission.models.codefile import FILENAME_TYPES, validate_filename
FILE_SECRET_LENGTH = 32
def new_file_secret():
"""
A random secret for unauth access to uploaded files
"""
alphabet = string.ascii_uppercase + string.ascii_lowercase + string.digits
return ''.join(random.choice(alphabet) for _ in range(FILE_SECRET_LENGTH))
class CleanClearableFileInput(ClearableFileInput):
template_name = 'quizzes/clean_clearable_file_input.html'
def format_value(self, value):
# format as just the filename
if value and value.name:
_, filename = os.path.split(value.name)
return filename
else:
return 'none'
def value_from_datadict(self, data, files, name):
# override to accept the case "clear + file upload" without ValidationError
upload = super().value_from_datadict(data, files, name)
if not self.is_required and forms.CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
#if upload:
# return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
class FileAnswerField(forms.FileField):
widget = CleanClearableFileInput
def __init__(self, max_size: int, filename: str, filename_type: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_size = max_size
self.filename = filename
self.filename_type = filename_type
def clean(self, data, initial=None):
cleaned = super().clean(data)
if cleaned and cleaned.size > self.max_size * 1024:
raise forms.ValidationError('Submitted files can be at most %i kilobytes in size.' % (self.max_size,))
return cleaned
class FileAnswer(QuestionHelper):
name = 'File Upload'
class ConfigForm(BaseConfigForm):
max_size = forms.IntegerField(initial=10000, min_value=0, max_value=settings.MAX_SUBMISSION_SIZE, help_text='Maximum file size that can be uploaded by the student, in kilobytes.')
filename = forms.CharField(max_length=500, required=False, help_text='Required filename for submitted files. Interpreted as specified in the filename type. Blank for no restriction.')
filename_type = forms.ChoiceField(choices=FILENAME_TYPES, required=True, initial='EXT', help_text='How should your filename be interpreted?')
def get_entry_field(self, questionanswer=None, student=None):
max_size = self.version.config.get('max_size', 10000)
filename = self.version.config.get('filename', '')
filename_type = self.version.config.get('filename_type', 'EXT')
if questionanswer:
initial = questionanswer.file
else:
initial = None
helptext = None
if filename:
if filename_type == 'INS':
helptext = "Filename must be “%s” (case doesn't matter)." % (filename,)
elif filename_type == 'MAT':
helptext = "Filename must be “%s” (case sensitive)." % (filename,)
elif filename_type == 'EXT':
helptext = "Filename must end with “%s”." % (filename,)
elif filename_type == 'REX':
helptext = "Filename must match the regular expression “%s”." % (filename,)
field = FileAnswerField(required=False, max_length=100, max_size=max_size, filename=filename,
filename_type=filename_type, initial=initial, help_text=helptext,
validators=[lambda upfile: validate_filename(filename, filename_type, upfile.name)])
field.widget.attrs.update({'class': 'file-answer'})
return field
def to_jsonable(self, cleaned_data):
data = {}
if isinstance(cleaned_data, UploadedFile):
data['filename'] = cleaned_data.name
data['size'] = cleaned_data.size
data['content-type'] = cleaned_data.content_type
data['charset'] = cleaned_data.charset
data['secret'] = new_file_secret()
h = hashlib.sha256()
for c in cleaned_data.chunks(1000):
h.update(c)
data['sha256'] = h.hexdigest()
return {'data': data, '_file': cleaned_data}
@staticmethod
def METHOD_NAME(prev_ans, new_ans):
return (new_ans['_file'] is None
or ('sha256' in prev_ans['data'] and 'sha256' in new_ans['data']
and prev_ans['data']['sha256'] == new_ans['data']['sha256']
and 'filename' in prev_ans['data'] and 'filename' in new_ans['data']
and prev_ans['data']['filename'] == new_ans['data']['filename'])
)
def secret_url(self, questionanswer):
return settings.BASE_ABS_URL + resolve_url(
'offering:quiz:submitted_file',
course_slug=self.question.quiz.activity.offering.slug,
activity_slug=self.question.quiz.activity.slug,
userid=questionanswer.student.person.userid_or_emplid(),
answer_id=questionanswer.id,
secret=questionanswer.answer['data'].get('secret', '?')
)
def is_blank(self, questionanswer):
data = questionanswer.answer['data']
return not ('filename' in data and 'secret' in data)
def to_text(self, questionanswer):
data = questionanswer.answer['data']
if 'filename' in data and 'secret' in data:
return self.secret_url(questionanswer)
else:
return None
def to_html(self, questionanswer):
data = questionanswer.answer['data']
if 'filename' in data and 'secret' in data:
html = '<p><a href="%s">%s</a> (%s)</p>' % (
self.secret_url(questionanswer),
data['filename'],
filesizeformat(data['size']),
)
else:
html = MISSING_ANSWER_HTML
return mark_safe(html)
# unused but maybe useful later?
def to_data_url(self, questionanswer):
size = questionanswer.answer['data']['size']
if size < 1024 * 10:
data = questionanswer.file.read()
parts = [
'data:',
urllib.parse.quote(questionanswer.answer['data']['content-type']),
';base64,',
urllib.parse.quote(base64.b64encode(data).decode('ascii'))
]
content = ''.join(parts)
else:
content = 'file %i bytes, type %s' % (size, questionanswer.answer['data']['content-type'])
return content |
5,590 | ssa | import magma.ast_utils as ast_utils
import ast
import typing
from collections import defaultdict
import astor
import functools
def flatten(l: list):
"""
Non-recursive flatten that ignores non-list children
"""
flat = []
for item in l:
if not isinstance(item, list):
item = [item]
flat += item
return flat
class SSAVisitor(ast.NodeTransformer):
def __init__(self, phi_name):
super().__init__()
self.last_name = defaultdict(lambda: "")
self.var_counter = defaultdict(lambda: -1)
self.args = []
self.cond_stack = []
self.return_values = []
self.phi_name = phi_name
def write_name(self, var):
self.var_counter[var] += 1
self.last_name[var] = f"{var}_{self.var_counter[var]}"
def visit_Assign(self, node):
node.value = self.visit(node.value)
node.targets = flatten([self.visit(t) for t in node.targets])
return node
def visit_FunctionDef(self, node):
for a in node.args.args:
self.args.append(a.arg)
self.write_name(a.arg)
a.arg = f"{a.arg}_0"
node.body = flatten([self.visit(s) for s in node.body])
return node
def visit_Name(self, node):
if node.id not in self.last_name:
if node.id not in self.args and isinstance(node.ctx, ast.Store):
self.last_name[node.id] = f"{node.id}_0"
else:
return node
if isinstance(node.ctx, ast.Store):
self.write_name(node.id)
node.id = f"{self.last_name[node.id]}"
return node
def visit_If(self, node):
false_name = dict(self.last_name)
test = self.visit(node.test)
self.cond_stack.append(test)
result = flatten([self.visit(s) for s in node.body])
true_name = dict(self.last_name)
if node.orelse:
self.last_name = false_name
self.cond_stack[-1] = ast.UnaryOp(ast.Invert(),
self.cond_stack[-1])
result += flatten([self.visit(s) for s in node.orelse])
false_name = dict(self.last_name)
self.cond_stack.pop()
self.last_name = {**true_name, **false_name}
for var in self.last_name.keys():
if var in true_name and var in false_name and \
true_name[var] != false_name[var]:
phi_args = [
ast.Name(false_name[var], ast.Load()),
ast.Name(true_name[var], ast.Load())
]
self.write_name(var)
result.append(ast.Assign(
[ast.Name(self.last_name[var], ast.Store())],
ast.Call(ast.Name(self.phi_name, ast.Load()), [
ast.List(phi_args, ast.Load()),
test
], [])))
return result
def visit_Return(self, node):
self.return_values.append(self.cond_stack[:])
node.value = self.visit(node.value)
return node
class TransformReturn(ast.NodeTransformer):
def __init__(self):
self.counter = -1
def visit_Return(self, node):
self.counter += 1
name = f"__magma_ssa_return_value_{self.counter}"
return ast.Assign([ast.Name(name, ast.Store())], node.value)
class MoveReturn(ast.NodeTransformer):
def visit_Return(self, node):
return ast.Assign(
[ast.Name(f"__magma_ssa_return_value", ast.Store())],
node.value
)
def convert_tree_to_ssa(tree: ast.AST, defn_env: dict, phi_name: str = "phi"):
# tree = MoveReturn().visit(tree)
# tree.body.append(
# ast.Return(ast.Name("__magma_ssa_return_value", ast.Load())))
ssa_visitor = SSAVisitor(phi_name)
tree = ssa_visitor.visit(tree)
return_transformer = TransformReturn()
tree = return_transformer.visit(tree)
num_return_values = len(ssa_visitor.return_values)
for i in reversed(range(num_return_values)):
conds = ssa_visitor.return_values[i]
name = f"__magma_ssa_return_value_{i}"
if i == num_return_values - 1 or not conds:
if isinstance(tree.returns, ast.Tuple):
tree.body.append(ast.Assign(
[ast.Tuple([ast.Name(f"O{i}", ast.Store())
for i in range(len(tree.returns.elts))], ast.Store())],
ast.Name(name, ast.Load())
))
else:
tree.body.append(ast.Assign([ast.Name("O", ast.Load)],
ast.Name(name, ast.Load())))
else:
cond = conds[-1]
for c in conds[:-1]:
cond = ast.BinOp(cond, ast.BitAnd(), c)
if isinstance(tree.returns, ast.Tuple):
for i in range(len(tree.returns.elts)):
tree.body.append(ast.Assign(
[ast.Name(f"O{i}", ast.Store())],
ast.Call(ast.Name(phi_name, ast.Load()), [
ast.List([
ast.Name(f"O{i}", ast.Load()),
ast.Subscript(ast.Name(name, ast.Load()),
ast.Index(ast.Num(i)),
ast.Load())
], ast.Load()),
cond], []))
)
else:
tree.body.append(ast.Assign(
[ast.Name("O", ast.Store())],
ast.Call(ast.Name(phi_name, ast.Load()), [
ast.List([ast.Name("O", ast.Load()), ast.Name(name, ast.Load())],
ast.Load()), cond], []))
)
return tree, ssa_visitor.args
def METHOD_NAME(defn_env: dict, phi: typing.Union[str, typing.Callable],
fn: typing.Callable):
tree = ast_utils.get_func_ast(fn)
tree.decorator_list = ast_utils.filter_decorator(ssa,
tree.decorator_list,
defn_env)
if isinstance(phi, str):
phi_name = phi
else:
phi_name = ast_utils.gen_free_name(tree, defn_env)
tree, _ = convert_tree_to_ssa(tree, defn_env, phi_name=phi_name)
if not isinstance(phi, str):
defn_env[phi_name] = phi
tree.body.append(ast.Return(ast.Name("O", ast.Load())))
return ast_utils.compile_function_to_file(tree, defn_env=defn_env)
@ast_utils.inspect_enclosing_env
def ssa(defn_env: dict, phi: typing.Union[str, typing.Callable] = "phi"):
return functools.partial(METHOD_NAME, defn_env, phi) |
5,591 | test a test client side submit | import os
import shutil
import copy
import glob
from GangaCore.testlib.GangaUnitTest import GangaUnitTest
from GangaCore.testlib.file_utils import generate_unique_temp_file
class TestLocalFileClient(GangaUnitTest):
"""test for sjid in filename names explain each test"""
_managed_files = []
# Num of sj in tests
sj_len = 3
# This sets up a LocalFileConfiguration which works by placing a file on local storage
# somewhere we can test using standard tools
LocalFileConfig = {
'fileExtensions': [''],
'uploadOptions': {},
'backendPostprocess': {
'LSF': 'client',
'Dirac': 'client',
'PBS': 'client',
'Interactive': 'client',
'Local': 'client',
},
}
_ext = '.root'
def setUp(self):
"""
Configure the LocalFile for the test
"""
extra_opts = [
('PollThread', 'autostart', 'False'),
('Local', 'remove_workdir', 'False'),
('TestingFramework', 'AutoCleanup', 'False'),
('Output', 'LocalFile', self.LocalFileConfig),
('Output', 'FailJobIfNoOutputMatched', 'True'),
]
super(TestLocalFileClient, self).setUp(extra_opts=extra_opts)
@staticmethod
def cleanUp():
"""Cleanup the current temp jobs"""
from GangaCore.GPI import jobs
for j in jobs:
shutil.rmtree(j.backend.workdir, ignore_errors=True)
j.remove()
@classmethod
def tearDownTest(cls):
"""Cleanup the current temp objects"""
for file_ in TestLocalFileClient._managed_files:
if os.path.isfile(file_):
os.unlink(file_)
else:
print(("ERROR REMOVING FILE: '%s'" % str(file_)))
TestLocalFileClient._managed_files = []
def METHOD_NAME(self):
"""Test the client side code whilst stil using the Local backend"""
from GangaCore.GPI import LocalFile, Job, ArgSplitter
file_1 = generate_unique_temp_file(TestLocalFileClient._ext)
file_2 = generate_unique_temp_file(TestLocalFileClient._ext)
TestLocalFileClient._managed_files.append(file_1)
TestLocalFileClient._managed_files.append(file_2)
j = Job()
j.inputfiles = [LocalFile(file_1), LocalFile(file_2)]
j.splitter = ArgSplitter(args=[[_] for _ in range(TestLocalFileClient.sj_len)])
j.outputfiles = [LocalFile(namePattern='*' + TestLocalFileClient._ext)]
j.submit()
def test_b_testClientSideComplete(self):
"""Test the client side code whilst stil using the Local backend"""
from GangaCore.GPI import jobs
from GangaCore.GPIDev.Base.Proxy import stripProxy
from GangaTest.Framework.utils import sleep_until_completed
j = jobs[-1]
assert sleep_until_completed(j)
for sj in j.subjobs:
output_dir = stripProxy(sj).getOutputWorkspace(create=False).getPath()
assert os.path.isdir(output_dir)
# Check that the files were placed in the correct place on storage
for file_ in j.inputfiles:
for this_file in glob.glob(os.path.join(output_dir, file_.namePattern)):
assert os.path.isfile(this_file)
# Check that wildcard expansion happened correctly
assert len(stripProxy(sj).outputfiles[0].subfiles) == 2
assert len(sj.outputfiles) == 2
def test_c_testCopy(self):
from GangaCore.GPI import jobs, LocalFile
j = jobs[-1]
j2 = j.copy()
assert len(j2.outputfiles) == 1
assert j2.outputfiles[0] == LocalFile(
namePattern='*' + TestLocalFileClient._ext
)
assert len(j2.inputfiles) == 2
self.cleanUp()
class TestLocalFileWN(TestLocalFileClient):
"""test for sjid in filename names explain each test"""
LocalFileConfig = copy.deepcopy(TestLocalFileClient.LocalFileConfig)
LocalFileConfig['backendPostprocess']['Local'] = 'WN' |
5,592 | reset attempts | import json
import traceback
from datetime import datetime, timedelta
from dateutil.parser import parse
from django.conf import settings
import math
from django.db import models
from django.db.models.aggregates import Count
from jsonfield.fields import JSONField
from pillow_retry import const
from pillowtop.feed.couch import change_from_couch_row
from pillowtop.feed.interface import ChangeMeta
ERROR_MESSAGE_LENGTH = 512
def _get_extra_args(limit, reduce, skip):
extra_args = dict()
if not reduce and limit is not None:
extra_args.update(
limit=limit,
skip=skip
)
return extra_args
def path_from_object(obj):
path = "{0}.{1}".format(obj.__class__.__module__, obj.__class__.__name__)
return path
class PillowError(models.Model):
id = models.BigAutoField(primary_key=True)
doc_id = models.CharField(max_length=255, null=False)
pillow = models.CharField(max_length=255, null=False, db_index=True)
date_created = models.DateTimeField()
date_last_attempt = models.DateTimeField()
date_next_attempt = models.DateTimeField(db_index=True, null=True)
total_attempts = models.IntegerField(default=0)
current_attempt = models.IntegerField(default=0, db_index=True)
error_type = models.CharField(max_length=255, null=True, db_index=True)
error_traceback = models.TextField(null=True)
change = JSONField(null=True)
change_metadata = JSONField(null=True)
@property
def change_object(self):
change = change_from_couch_row(self.change if self.change else {'id': self.doc_id})
if self.change_metadata:
change.metadata = ChangeMeta.wrap(self.change_metadata)
change.document = None
return change
class Meta(object):
app_label = 'pillow_retry'
unique_together = ('doc_id', 'pillow',)
def add_attempt(self, exception, traceb, change_meta=None, date=None):
new_attempts = change_meta.attempts if change_meta else 1
self.current_attempt += new_attempts
self.total_attempts += new_attempts
self.date_last_attempt = date or datetime.utcnow()
self.error_type = path_from_object(exception)
self.error_traceback = "{}\n\n{}".format(exception, "".join(traceback.format_tb(traceb)))
if self.current_attempt <= const.PILLOW_RETRY_QUEUE_MAX_PROCESSING_ATTEMPTS:
time_till_next = const.PILLOW_RETRY_REPROCESS_INTERVAL * math.pow(self.current_attempt, 2)
self.date_next_attempt = self.date_last_attempt + timedelta(minutes=time_till_next)
else:
self.date_next_attempt = None
def METHOD_NAME(self):
self.current_attempt = 0
self.date_next_attempt = datetime.utcnow()
def has_next_attempt(self):
return self.current_attempt == 0 or (
self.total_attempts <= const.PILLOW_RETRY_MULTI_ATTEMPTS_CUTOFF and
self.current_attempt <= const.PILLOW_RETRY_QUEUE_MAX_PROCESSING_ATTEMPTS
)
@classmethod
def get_or_create(cls, change, pillow):
change.document = None
doc_id = change.id
try:
error = cls.objects.get(doc_id=doc_id, pillow=pillow.pillow_id)
except cls.DoesNotExist:
now = datetime.utcnow()
error = PillowError(
doc_id=doc_id,
pillow=pillow.pillow_id,
date_created=now,
date_last_attempt=now,
date_next_attempt=now,
change=change.to_dict()
)
if change.metadata:
error.date_created = change.metadata.original_publication_datetime
error.change_metadata = change.metadata.to_json()
return error
@classmethod
def get_errors_to_process(cls, utcnow, limit=None, skip=0):
"""
Get errors according the following rules:
date_next_attempt <= utcnow
AND
(
total_attempts <= multi_attempt_cutoff & current_attempt <= max_attempts
OR
total_attempts > multi_attempt_cutoff & current_attempt 0
)
where:
* multi_attempt_cutoff = const.PILLOW_RETRY_QUEUE_MAX_PROCESSING_ATTEMPTS * 3
* max_attempts = const.PILLOW_RETRY_QUEUE_MAX_PROCESSING_ATTEMPTS
:param utcnow: The current date and time in UTC.
:param limit: Paging limit param.
:param skip: Paging skip param.
"""
max_attempts = const.PILLOW_RETRY_QUEUE_MAX_PROCESSING_ATTEMPTS
multi_attempts_cutoff = const.PILLOW_RETRY_MULTI_ATTEMPTS_CUTOFF
query = PillowError.objects \
.filter(date_next_attempt__lte=utcnow) \
.filter(
models.Q(current_attempt=0) |
(models.Q(total_attempts__lte=multi_attempts_cutoff) & models.Q(current_attempt__lte=max_attempts))
)
# temporarily disable queuing of ConfigurableReportKafkaPillow errors
query = query.filter(~models.Q(pillow='corehq.apps.userreports.pillow.ConfigurableReportKafkaPillow'))
if limit is not None:
return query[skip:skip+limit]
else:
return query
@classmethod
def bulk_reset_attempts(cls, last_attempt_lt, attempts_gte=None):
if attempts_gte is None:
attempts_gte = const.PILLOW_RETRY_QUEUE_MAX_PROCESSING_ATTEMPTS
multi_attempts_cutoff = const.PILLOW_RETRY_MULTI_ATTEMPTS_CUTOFF
return PillowError.objects.filter(
models.Q(date_last_attempt__lt=last_attempt_lt),
models.Q(current_attempt__gte=attempts_gte) | models.Q(total_attempts__gte=multi_attempts_cutoff)
).update(
current_attempt=0,
date_next_attempt=datetime.utcnow()
) |
5,593 | test random progs | # Copyright (C) Unitary Fund
#
# This source code is licensed under the GPL license (v3) found in the
# LICENSE file in the root directory of this source tree.
"""NB: Copied in large part from rigetti/forest-benchmarking (Apache-2.0)
and modified to test a larger gateset.
"""
import inspect
import itertools
import random
from math import pi
import numpy as np
import pytest
from cirq import equal_up_to_global_phase
from pyquil.gates import (
CCNOT,
CNOT,
CPHASE,
CSWAP,
CZ,
ISWAP,
PHASE,
RX,
RY,
RZ,
SWAP,
XY,
H,
I,
S,
T,
X,
Y,
Z,
)
from pyquil.quil import Program
from pyquil.simulation.tools import program_unitary
from mitiq.interface.mitiq_pyquil.compiler import (
_CCNOT,
_CNOT,
_CPHASE,
_H,
_ISWAP,
_RY,
_S,
_SWAP,
_T,
_X,
_Y,
_Z,
basic_compile,
)
def test_basic_compile_defgate():
p = Program()
p.inst(RX(pi, 0))
p.defgate("test", [[0, 1], [1, 0]])
p.inst(("test", 2))
p.inst(RZ(pi / 2, 0))
assert p == basic_compile(p)
def test_CCNOT():
for perm in itertools.permutations([0, 1, 2]):
u1 = program_unitary(Program(CCNOT(*perm)), n_qubits=3)
u2 = program_unitary(_CCNOT(*perm), n_qubits=3)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
def test_CNOT():
u1 = program_unitary(Program(CNOT(0, 1)), n_qubits=2)
u2 = program_unitary(_CNOT(0, 1), n_qubits=2)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
u1 = program_unitary(Program(CNOT(1, 0)), n_qubits=2)
u2 = program_unitary(_CNOT(1, 0), n_qubits=2)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
def test_CPHASE():
for theta in np.linspace(-2 * np.pi, 2 * np.pi):
u1 = program_unitary(Program(CPHASE(theta, 0, 1)), n_qubits=2)
u2 = program_unitary(_CPHASE(theta, 0, 1), n_qubits=2)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
u1 = program_unitary(Program(CPHASE(theta, 1, 0)), n_qubits=2)
u2 = program_unitary(_CPHASE(theta, 1, 0), n_qubits=2)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
def test_H():
u1 = program_unitary(Program(H(0)), n_qubits=1)
u2 = program_unitary(_H(0), n_qubits=1)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
def test_ISWAP():
u1 = program_unitary(Program(ISWAP(0, 1)), n_qubits=2)
u2 = program_unitary(_ISWAP(0, 1), n_qubits=2)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
u1 = program_unitary(Program(ISWAP(1, 0)), n_qubits=2)
u2 = program_unitary(_ISWAP(1, 0), n_qubits=2)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
def test_RX():
for theta in np.linspace(-2 * np.pi, 2 * np.pi):
p = Program(RX(theta, 0))
u1 = program_unitary(p, n_qubits=1)
u2 = program_unitary(basic_compile(p), n_qubits=1)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
def test_RY():
for theta in np.linspace(-2 * np.pi, 2 * np.pi):
u1 = program_unitary(Program(RY(theta, 0)), n_qubits=1)
u2 = program_unitary(_RY(theta, 0), n_qubits=1)
assert equal_up_to_global_phase(u1, u2)
def test_S():
u1 = program_unitary(Program(S(0)), n_qubits=1)
u2 = program_unitary(_S(0), n_qubits=1)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
def test_SWAP():
u1 = program_unitary(Program(SWAP(0, 1)), n_qubits=2)
u2 = program_unitary(_SWAP(0, 1), n_qubits=2)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
u1 = program_unitary(Program(SWAP(1, 0)), n_qubits=2)
u2 = program_unitary(_SWAP(1, 0), n_qubits=2)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
def test_T():
u1 = program_unitary(Program(T(0)), n_qubits=1)
u2 = program_unitary(_T(0), n_qubits=1)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
def test_X():
u1 = program_unitary(Program(X(0)), n_qubits=1)
u2 = program_unitary(_X(0), n_qubits=1)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
def test_XY():
for theta in np.linspace(-2 * np.pi, 2 * np.pi):
p = Program(XY(theta, 0, 1))
u1 = program_unitary(p, n_qubits=2)
u2 = program_unitary(basic_compile(p), n_qubits=2)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
p = Program(XY(theta, 1, 0))
u1 = program_unitary(p, n_qubits=2)
u2 = program_unitary(basic_compile(p), n_qubits=2)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
def test_Y():
u1 = program_unitary(Program(Y(0)), n_qubits=1)
u2 = program_unitary(_Y(0), n_qubits=1)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
def test_Z():
u1 = program_unitary(Program(Z(0)), n_qubits=1)
u2 = program_unitary(_Z(0), n_qubits=1)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
# Note to developers: unsupported gates are commented out.
QUANTUM_GATES = {
"I": I,
"X": X,
"Y": Y,
"Z": Z,
"H": H,
"S": S,
"T": T,
"PHASE": PHASE,
"RX": RX,
"RY": RY,
"RZ": RZ,
"CZ": CZ,
"CNOT": CNOT,
"CCNOT": CCNOT,
# 'CPHASE00': CPHASE00,
# 'CPHASE01': CPHASE01,
# 'CPHASE10': CPHASE10,
"CPHASE": CPHASE,
"SWAP": SWAP,
# 'CSWAP': CSWAP,
"ISWAP": ISWAP,
# 'PSWAP': PSWAP
}
def _generate_random_program(n_qubits, length):
"""Randomly sample gates and arguments (qubits, angles)"""
if n_qubits < 3:
raise ValueError(
"Please request n_qubits >= 3 so we can use 3-qubit gates."
)
gates = list(QUANTUM_GATES.values())
prog = Program()
for _ in range(length):
gate = random.choice(gates)
possible_qubits = set(range(n_qubits))
sig = inspect.signature(gate)
param_vals = []
for param in sig.parameters:
if param in [
"qubit",
"q1",
"q2",
"control",
"control1",
"control2",
"target",
"target_1",
"target_2",
"classical_reg",
]:
param_val = random.choice(list(possible_qubits))
possible_qubits.remove(param_val)
elif param == "angle":
# TODO: support rx(theta)
if gate == RX:
param_val = random.choice([-1, -0.5, 0, 0.5, 1]) * pi
else:
param_val = random.uniform(-2 * pi, 2 * pi)
else:
raise ValueError("Unknown gate parameter {}".format(param))
param_vals.append(param_val)
prog += gate(*param_vals)
return prog
@pytest.fixture(params=list(range(3, 5)))
def n_qubits(request):
return request.param
@pytest.fixture(params=[2, 50, 67])
def prog_length(request):
return request.param
def METHOD_NAME(n_qubits, prog_length):
for repeat_i in range(10):
prog = _generate_random_program(n_qubits=n_qubits, length=prog_length)
u1 = program_unitary(prog, n_qubits=n_qubits)
u2 = program_unitary(basic_compile(prog), n_qubits=n_qubits)
assert equal_up_to_global_phase(u1, u2, atol=1e-12)
def test_unsupported_gate():
with pytest.raises(ValueError):
basic_compile(Program(CSWAP(0, 1, 2)))
def test_other_instructions():
p = Program("DECLARE ro BIT[2]")
assert p == basic_compile(p) |
5,594 | handler | #!/usr/bin/env python
#
# Copyright (C) 2017 Accton Technology Corporation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------------
# HISTORY:
# mm/dd/yyyy (A.D.)
# 11/13/2017: Polly Hsu, Create
# 1/10/2018: Jostar modify for as7716_32
# 2/27/2018: Roy Lee modify for as7312_54x
# ------------------------------------------------------------------
try:
import getopt
import sys
import logging
import logging.config
import time # this is only being used as part of the example
import signal
from as7312_54x.fanutil import FanUtil
from as7312_54x.thermalutil import ThermalUtil
except ImportError as e:
raise ImportError('%s - required module not found' % str(e))
# Deafults
VERSION = '1.0'
FUNCTION_NAME = 'accton_as7312_monitor'
DUTY_MAX = 100
global log_file
global log_level
# (LM75_1+ LM75_2+ LM75_3) is LM75 at i2c addresses 0x48, 0x49, and 0x4A.
# TMP = (LM75_1+ LM75_2+ LM75_3)/3
#1. If TMP < 35, All fans run with duty 31.25%.
#2. If TMP>=35 or the temperature of any one of fan is higher than 40,
# All fans run with duty 50%
#3. If TMP >= 40 or the temperature of any one of fan is higher than 45,
# All fans run with duty 62.5%.
#4. If TMP >= 45 or the temperature of any one of fan is higher than 50,
# All fans run with duty 100%.
#5. Any one of 6 fans is fault, set duty = 100%.
#6. Direction factor. If it is B2F direction, duty + 12%.
# MISC:
# 1.Check single LM75 before applied average.
# 2.If no matched fan speed is found from the policy,
# use FAN_DUTY_CYCLE_MIN as default speed
# Get current temperature
# 4.Decision 3: Decide new fan speed depend on fan direction/current fan speed/temperature
# Make a class we can use to capture stdout and sterr in the log
class accton_as7312_monitor(object):
# static temp var
_ori_temp = 0
_new_perc = 0
_ori_perc = 0
def __init__(self, log_file, log_level):
"""Needs a logger and a logger level."""
# set up logging to file
logging.basicConfig(
filename=log_file,
filemode='w',
level=log_level,
format= '[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s',
datefmt='%H:%M:%S'
)
# set up logging to console
if log_level == logging.DEBUG:
console = logging.StreamHandler()
console.setLevel(log_level)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.debug('SET. logfile:%s / loglevel:%d', log_file, log_level)
self.thermal = ThermalUtil()
self.fan = FanUtil()
def manage_fans(self):
max_duty = DUTY_MAX
fan_policy_f2b = {
0: [32, 0, 105000],
1: [50, 105000, 120000],
2: [63, 120000, 135000],
3: [max_duty, 135000, sys.maxsize],
}
fan_policy_b2f = {
0: [44, 0, 105000],
1: [63, 105000, 120000],
2: [75, 120000, 135000],
3: [max_duty, 135000, sys.maxsize],
}
fan_policy_single = {
0: 40000,
1: 45000,
2: 50000,
}
thermal = self.thermal
fan = self.fan
for x in range(fan.get_idx_fan_start(), fan.get_num_fans()+1):
fan_status = fan.get_fan_status(x)
if fan_status is None:
logging.debug('INFO. SET new_perc to %d (FAN stauts is None. fan_num:%d)', max_duty, x)
return False
if fan_status is False:
logging.debug('INFO. SET new_perc to %d (FAN fault. fan_num:%d)', max_duty, x)
fan.set_fan_duty_cycle(max_duty)
return True
#logging.debug('INFO. fan_status is True (fan_num:%d)', x)
fan_dir=fan.get_fan_dir(1)
if fan_dir == 1:
fan_policy = fan_policy_f2b
else:
fan_policy = fan_policy_b2f
#Decide fan duty by if any of sensors > fan_policy_single.
new_duty_cycle = fan_policy[0][0]
for x in range(thermal.get_idx_thermal_start(), thermal.get_num_thermals()+1):
single_thm = thermal._get_thermal_node_val(x)
for y in range(0, len(fan_policy_single)):
if single_thm > fan_policy_single[y]:
if fan_policy[y+1][0] > new_duty_cycle:
new_duty_cycle = fan_policy[y+1][0]
logging.debug('INFO. Single thermal sensor %d with temp %d > %d , new_duty_cycle=%d',
x, single_thm, fan_policy_single[y], new_duty_cycle)
single_result = new_duty_cycle
#Find if current duty matched any of define duty.
#If not, set it to highest one.
cur_duty_cycle = fan.get_fan_duty_cycle()
for x in range(0, len(fan_policy)):
if cur_duty_cycle == fan_policy[x][0]:
break
if x == len(fan_policy) :
fan.set_fan_duty_cycle(fan_policy[0][0])
cur_duty_cycle = max_duty
#Decide fan duty by if sum of sensors falls into any of fan_policy{}
get_temp = thermal.get_thermal_temp()
new_duty_cycle = cur_duty_cycle
for x in range(0, len(fan_policy)):
y = len(fan_policy) - x -1 #checked from highest
if get_temp > fan_policy[y][1] and get_temp < fan_policy[y][2] :
new_duty_cycle= fan_policy[y][0]
logging.debug('INFO. Sum of temp %d > %d , new_duty_cycle=%d', get_temp, fan_policy[y][1], new_duty_cycle)
sum_result = new_duty_cycle
if (sum_result>single_result):
new_duty_cycle = sum_result;
else:
new_duty_cycle = single_result
logging.debug('INFO. Final duty_cycle=%d', new_duty_cycle)
if(new_duty_cycle != cur_duty_cycle):
fan.set_fan_duty_cycle(new_duty_cycle)
return True
def METHOD_NAME(signum, frame):
fan = FanUtil()
logging.debug('INFO:Cause signal %d, set fan speed max.', signum)
fan.set_fan_duty_cycle(DUTY_MAX)
sys.exit(0)
def main(argv):
log_file = '%s.log' % FUNCTION_NAME
log_level = logging.INFO
if len(sys.argv) != 1:
try:
opts, args = getopt.getopt(argv,'hdl:',['lfile='])
except getopt.GetoptError:
print('Usage: %s [-d] [-l <log_file>]' % sys.argv[0])
return 0
for opt, arg in opts:
if opt == '-h':
print('Usage: %s [-d] [-l <log_file>]' % sys.argv[0])
return 0
elif opt in ('-d', '--debug'):
log_level = logging.DEBUG
elif opt in ('-l', '--lfile'):
log_file = arg
signal.signal(signal.SIGINT, METHOD_NAME)
signal.signal(signal.SIGTERM, METHOD_NAME)
monitor = accton_as7312_monitor(log_file, log_level)
# Loop forever, doing something useful hopefully:
while True:
monitor.manage_fans()
time.sleep(10)
if __name__ == '__main__':
main(sys.argv[1:]) |
5,595 | test unicode fast tainting | # -*- coding: utf-8 -*-
import random
import sys
import pytest
from ddtrace.appsec.iast import oce
try:
from ddtrace.appsec.iast._taint_tracking import OriginType
from ddtrace.appsec.iast._taint_tracking import Source
from ddtrace.appsec.iast._taint_tracking import TaintRange
from ddtrace.appsec.iast._taint_tracking import are_all_text_all_ranges
from ddtrace.appsec.iast._taint_tracking import contexts_reset
from ddtrace.appsec.iast._taint_tracking import create_context
from ddtrace.appsec.iast._taint_tracking import get_range_by_hash
from ddtrace.appsec.iast._taint_tracking import get_ranges
from ddtrace.appsec.iast._taint_tracking import is_notinterned_notfasttainted_unicode
from ddtrace.appsec.iast._taint_tracking import num_objects_tainted
from ddtrace.appsec.iast._taint_tracking import set_fast_tainted_if_notinterned_unicode
from ddtrace.appsec.iast._taint_tracking import set_ranges
from ddtrace.appsec.iast._taint_tracking import shift_taint_range
from ddtrace.appsec.iast._taint_tracking import shift_taint_ranges
from ddtrace.appsec.iast._taint_tracking import taint_pyobject
except (ImportError, AttributeError):
pytest.skip("IAST not supported for this Python version", allow_module_level=True)
def setup():
oce._enabled = True
def test_source_origin_refcount():
s1 = Source(name="name", value="val", origin=OriginType.COOKIE)
assert sys.getrefcount(s1) - 1 == 1 # getrefcount takes 1 while counting
s2 = s1
assert sys.getrefcount(s1) - 1 == 2
s3 = s1
assert sys.getrefcount(s1) - 1 == 3
del s2
assert sys.getrefcount(s1) - 1 == 2
# TaintRange does not increase refcount but should keep it alive
tr_sub = TaintRange(0, 1, s1)
assert sys.getrefcount(s1) - 1 == 2
del s1
assert sys.getrefcount(s3) - 1 == 1
assert sys.getrefcount(tr_sub.source) - 1 == 1
del s3
assert sys.getrefcount(tr_sub.source) - 1 == 1
_ = TaintRange(1, 2, tr_sub.source)
assert sys.getrefcount(tr_sub.source) - 1 == 1
_SOURCE1 = Source(name="name", value="value", origin=OriginType.COOKIE)
_SOURCE2 = Source(name="name2", value="value2", origin=OriginType.BODY)
_RANGE1 = TaintRange(0, 2, _SOURCE1)
_RANGE2 = TaintRange(1, 3, _SOURCE2)
def METHOD_NAME():
for i in range(5000):
s = "somestr" * random.randint(4 * i + 7, 4 * i + 9)
s_check = "somestr" * (4 * i + 10)
# Check that s is not interned since fast tainting only works on non-interned strings
assert s is not s_check
assert is_notinterned_notfasttainted_unicode(s), "%s,%s" % (i, len(s) // 7)
set_fast_tainted_if_notinterned_unicode(s)
assert not is_notinterned_notfasttainted_unicode(s)
b = b"foobar" * 4000
assert not is_notinterned_notfasttainted_unicode(b)
set_fast_tainted_if_notinterned_unicode(b)
assert not is_notinterned_notfasttainted_unicode(b)
ba = bytearray(b"sfdsdfsdf" * 4000)
assert not is_notinterned_notfasttainted_unicode(ba)
set_fast_tainted_if_notinterned_unicode(ba)
assert not is_notinterned_notfasttainted_unicode(ba)
c = 12345
assert not is_notinterned_notfasttainted_unicode(c)
set_fast_tainted_if_notinterned_unicode(c)
assert not is_notinterned_notfasttainted_unicode(c)
def test_set_get_ranges_str():
s1 = "abcde😁"
s2 = "defg"
set_ranges(s1, [_RANGE1, _RANGE2])
assert get_ranges(s1) == [_RANGE1, _RANGE2]
assert not get_ranges(s2)
def test_set_get_ranges_other():
s1 = 12345
s2 = None
set_ranges(s1, [_RANGE1, _RANGE2])
set_ranges(s2, [_RANGE1, _RANGE2])
assert not get_ranges(s1)
assert not get_ranges(s2)
def test_set_get_ranges_bytes():
b1 = b"ABCDE"
b2 = b"DEFG"
set_ranges(b1, [_RANGE2, _RANGE1])
assert get_ranges(b1) == [_RANGE2, _RANGE1]
assert not get_ranges(b2) == [_RANGE2, _RANGE1]
def test_set_get_ranges_bytearray():
b1 = bytearray(b"abcdef")
b2 = bytearray(b"abcdef")
set_ranges(b1, [_RANGE1, _RANGE2])
assert get_ranges(b1) == [_RANGE1, _RANGE2]
assert not get_ranges(b2) == [_RANGE1, _RANGE2]
def test_shift_taint_ranges():
r1 = TaintRange(0, 2, _SOURCE1)
r1_shifted = shift_taint_range(r1, 2)
assert r1_shifted == TaintRange(2, 2, _SOURCE1)
assert r1_shifted != r1
r2 = TaintRange(1, 3, _SOURCE1)
r3 = TaintRange(4, 6, _SOURCE2)
r2_shifted, r3_shifted = shift_taint_ranges([r2, r3], 2)
assert r2_shifted == TaintRange(3, 3, _SOURCE1)
assert r3_shifted == TaintRange(6, 6, _SOURCE1)
def test_are_all_text_all_ranges():
s1 = "abcdef"
s2 = "ghijk"
s3 = "xyzv"
num = 123456
source3 = Source(name="name3", value="value3", origin=OriginType.COOKIE)
source4 = Source(name="name4", value="value4", origin=OriginType.COOKIE)
range3 = TaintRange(2, 3, source3)
range4 = TaintRange(4, 5, source4)
set_ranges(s1, [_RANGE1, _RANGE2])
set_ranges(s2, [range3, _RANGE2])
set_ranges(s3, [range4, _RANGE1])
all_ranges, candidate_ranges = are_all_text_all_ranges(s1, (s2, s3, num))
# Ranges are inserted at the start except the candidate ones that are appended
assert all_ranges == [range3, _RANGE2, range4, _RANGE1, _RANGE1, _RANGE2]
assert candidate_ranges == [_RANGE1, _RANGE2]
def test_get_range_by_hash():
hash_r1 = hash(_RANGE1)
assert hash_r1 == _RANGE1.__hash__()
hash_r2_call = hash(_RANGE2)
hash_r2_method = _RANGE2.__hash__()
assert hash_r2_call == hash_r2_method
assert hash_r1 != hash_r2_call
assert get_range_by_hash(hash_r1, [_RANGE1, _RANGE2]) == _RANGE1
assert get_range_by_hash(hash_r2_call, [_RANGE1, _RANGE2]) == _RANGE2
def test_num_objects_tainted():
contexts_reset()
create_context()
a_1 = "abc123_len1"
a_2 = "def456__len2"
a_3 = "ghi789___len3"
assert num_objects_tainted() == 0
a_1 = taint_pyobject(
a_1,
source_name="test_num_objects_tainted",
source_value=a_1,
source_origin=OriginType.PARAMETER,
)
a_2 = taint_pyobject(
a_2,
source_name="test_num_objects_tainted",
source_value=a_2,
source_origin=OriginType.PARAMETER,
)
a_3 = taint_pyobject(
a_3,
source_name="test_num_objects_tainted",
source_value=a_3,
source_origin=OriginType.PARAMETER,
)
assert num_objects_tainted() == 3
def test_reset_objects():
contexts_reset()
create_context()
a_1 = "abc123"
a_2 = "def456"
assert num_objects_tainted() == 0
a_1 = taint_pyobject(
a_1,
source_name="test_num_objects_tainted",
source_value=a_1,
source_origin=OriginType.PARAMETER,
)
assert num_objects_tainted() == 1
contexts_reset()
create_context()
a_2 = taint_pyobject(
a_2,
source_name="test_num_objects_tainted",
source_value=a_2,
source_origin=OriginType.PARAMETER,
)
assert num_objects_tainted() == 1
contexts_reset()
create_context()
assert num_objects_tainted() == 0 |
5,596 | generate manifest | #
# Copyright (c) 2016 Nordic Semiconductor ASA
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of Nordic Semiconductor ASA nor the names of other
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# 4. This software must only be used in or with a processor manufactured by Nordic
# Semiconductor ASA, or in or with a processor manufactured by a third party that
# is used in combination with a processor manufactured by Nordic Semiconductor.
#
# 5. Any software provided in binary or object form under this license must not be
# reverse engineered, decompiled, modified and/or disassembled.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Python libraries
import json
import os
# Nordic libraries
from .model import FirmwareKeys, HexType
class ManifestGenerator:
def __init__(self, firmwares_data):
"""
The Manifest Generator constructor. Needs a data structure to generate a manifest from.
:type dict firmwares_data: The firmwares data structure describing the Nordic DFU package
"""
self.firmwares_data = firmwares_data
self.manifest = None
def METHOD_NAME(self):
self.manifest = Manifest()
for key in self.firmwares_data:
firmware_dict = self.firmwares_data[key]
if key == HexType.SD_BL:
_firmware = SoftdeviceBootloaderFirmware()
_firmware.info_read_only_metadata = FWMetaData()
_firmware.info_read_only_metadata.bl_size = firmware_dict[
FirmwareKeys.BL_SIZE
]
_firmware.info_read_only_metadata.sd_size = firmware_dict[
FirmwareKeys.SD_SIZE
]
else:
_firmware = Firmware()
# Strip path, add only filename
_firmware.bin_file = os.path.basename(
firmware_dict[FirmwareKeys.BIN_FILENAME]
)
_firmware.dat_file = os.path.basename(
firmware_dict[FirmwareKeys.DAT_FILENAME]
)
if key == HexType.APPLICATION or key == HexType.EXTERNAL_APPLICATION:
self.manifest.application = _firmware
elif key == HexType.BOOTLOADER:
self.manifest.bootloader = _firmware
elif key == HexType.SOFTDEVICE:
self.manifest.softdevice = _firmware
elif key == HexType.SD_BL:
self.manifest.softdevice_bootloader = _firmware
else:
raise NotImplementedError(
"Support for firmware type {0} not implemented yet.".format(key)
)
return self.to_json()
def to_json(self):
def remove_none_entries(d):
if not isinstance(d, dict):
return d
return dict(
(k, remove_none_entries(v)) for k, v in d.items() if v is not None
)
return json.dumps(
{"manifest": self.manifest},
default=lambda o: remove_none_entries(o.__dict__),
sort_keys=True,
indent=4,
separators=(",", ": "),
)
class FWMetaData:
def __init__(
self,
is_debug=None,
hw_version=None,
fw_version=None,
softdevice_req=None,
sd_size=None,
bl_size=None,
):
"""
The FWMetaData data model.
:param bool is_debug: debug mode on
:param int hw_version: hardware version
:param int fw_version: application or bootloader version
:param list softdevice_req: softdevice requirements
:param int sd_size SoftDevice size
:param int bl_size Bootloader size
:return:FWMetaData
"""
self.is_debug = is_debug
self.hw_version = hw_version
self.fw_version = fw_version
self.softdevice_req = softdevice_req
self.sd_size = sd_size
self.bl_size = bl_size
class Firmware:
def __init__(self, bin_file=None, dat_file=None, info_read_only_metadata=None):
"""
The firmware datamodel
:param str bin_file: Firmware binary file
:param str dat_file: Firmware .dat file (init packet for Nordic DFU)
:param int info_read_only_metadata: The metadata about this firwmare image
:return:
"""
self.dat_file = dat_file
self.bin_file = bin_file
if info_read_only_metadata:
self.info_read_only_metadata = FWMetaData(**info_read_only_metadata)
else:
self.info_read_only_metadata = None
class SoftdeviceBootloaderFirmware(Firmware):
def __init__(self, bin_file=None, dat_file=None, info_read_only_metadata=None):
"""
The SoftdeviceBootloaderFirmware data model
:param str bin_file: Firmware binary file
:param str dat_file: Firmware .dat file (init packet for Nordic DFU)
:param int info_read_only_metadata: The metadata about this firwmare image
:return: SoftdeviceBootloaderFirmware
"""
super().__init__(bin_file, dat_file, info_read_only_metadata)
class Manifest:
def __init__(
self,
application=None,
bootloader=None,
softdevice=None,
softdevice_bootloader=None,
):
"""
The Manifest data model.
:param dict application: Application firmware in package
:param dict bootloader: Bootloader firmware in package
:param dict softdevice: Softdevice firmware in package
:param dict softdevice_bootloader: Combined softdevice and bootloader firmware in package
:return: Manifest
"""
self.softdevice_bootloader = (
SoftdeviceBootloaderFirmware(**softdevice_bootloader)
if softdevice_bootloader
else None
)
self.softdevice = Firmware(**softdevice) if softdevice else None
self.bootloader = Firmware(**bootloader) if bootloader else None
self.application = Firmware(**application) if application else None
@staticmethod
def from_json(data):
"""
Parses a manifest according to Nordic DFU package specification.
:param str data: The manifest in string format
:return: Manifest
"""
kwargs = json.loads(data)
return Manifest(**kwargs["manifest"]) |
5,597 | back | # The MIT License (MIT)
#
# Copyright (c) 2007-2018 Einar Lielmanis, Liam Newman, and contributors.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
class InputScanner:
def __init__(self, input_string):
self.__six = __import__("six")
if input_string is None:
input_string = ""
self.__input = input_string
self.__input_length = len(self.__input)
self.__position = 0
def restart(self):
self.__position = 0
def METHOD_NAME(self):
if self.__position > 0:
self.__position -= 1
def hasNext(self):
return self.__position < self.__input_length
def next(self):
val = None
if self.hasNext():
val = self.__input[self.__position]
self.__position += 1
return val
def peek(self, index=0):
val = None
index += self.__position
if index >= 0 and index < self.__input_length:
val = self.__input[index]
return val
def test(self, pattern, index=0):
index += self.__position
return (
index >= 0
and index < self.__input_length
and bool(pattern.match(self.__input, index))
)
def testChar(self, pattern, index=0):
# test one character regex match
val = self.peek(index)
return val is not None and bool(pattern.match(val))
def match(self, pattern):
pattern_match = None
if self.hasNext():
pattern_match = pattern.match(self.__input, self.__position)
if bool(pattern_match):
self.__position = pattern_match.end(0)
return pattern_match
def read(self, starting_pattern, until_pattern=None, until_after=False):
val = ""
pattern_match = None
if bool(starting_pattern):
pattern_match = self.match(starting_pattern)
if bool(pattern_match):
val = pattern_match.group(0)
if bool(until_pattern) and (bool(pattern_match) or not bool(starting_pattern)):
val += self.readUntil(until_pattern, until_after)
return val
def readUntil(self, pattern, include_match=False):
val = ""
pattern_match = None
match_index = self.__position
if self.hasNext():
pattern_match = pattern.search(self.__input, self.__position)
if bool(pattern_match):
if include_match:
match_index = pattern_match.end(0)
else:
match_index = pattern_match.start(0)
else:
match_index = self.__input_length
val = self.__input[self.__position : match_index]
self.__position = match_index
return val
def readUntilAfter(self, pattern):
return self.readUntil(pattern, True)
def get_regexp(self, pattern, match_from=False):
result = None
# strings are converted to regexp
if isinstance(pattern, self.__six.string_types) and pattern != "":
result = re.compile(pattern)
elif pattern is not None:
result = re.compile(pattern.pattern)
return result
# css beautifier legacy helpers
def peekUntilAfter(self, pattern):
start = self.__position
val = self.readUntilAfter(pattern)
self.__position = start
return val
def lookBack(self, testVal):
start = self.__position - 1
return (
start >= len(testVal)
and self.__input[start - len(testVal) : start].lower() == testVal
) |
5,598 | create user | # pylint: disable=unused-argument
# pylint: disable=keyword-arg-before-vararg
# pylint: disable=inconsistent-return-statements
# pylint: disable=line-too-long
"""Custom authentication pipeline steps."""
from social_core.pipeline.partial import partial
from django.shortcuts import redirect, render
from django.contrib.auth.password_validation import validate_password
from django.core.exceptions import ValidationError
from django.core.mail import send_mail
from django.urls import reverse
from django.conf import settings
from tcf_website.models import User
def auth_allowed(backend, details, response, *args, **kwargs):
"""Route unallowed auth attempts to error page."""
if not backend.auth_allowed(response, details):
return redirect('/login/error', error=True)
return None
def password_validation(backend, details, request, response, *args, **kwargs):
"""Route unallowed auth attempts to error page."""
if backend.name != 'email':
return
if not response.get('login'):
if response.get('password') != response.get('password_confirm'):
return render(request, 'login/register_form.html', {
'error_message': ['passwords do not match']
})
try:
validate_password(response.get('password'))
except ValidationError as err:
return render(request, 'login/register_form.html', {
'error_message': err
})
else:
if not User.objects.filter(email=response.get('email')).exists():
return redirect('/login/password_error', error=True)
return {'password': response.get('password')}
@partial
def collect_extra_info(
strategy,
backend,
request,
details,
user=None,
*args,
**kwargs):
"""Collect extra information on sign up."""
if user:
return {'is_new': False}
# session 'grad_year' is set by the pipeline infrastructure
# because it exists in FIELDS_STORED_IN_SESSION
grad_year = strategy.session_get('grad_year', None)
if not grad_year:
# if we return something besides a dict or None, then that is
# returned to the user -- in this case we will redirect to a
# view that can be used to get a password
return redirect(f"/login/collect_extra_info/{backend.name}")
USER_FIELDS = ['email', 'username']
def METHOD_NAME(strategy, details, backend, user=None, *args, **kwargs):
"""
Add extra information to saved user.
Based on https://github.com/python-social-auth/social-core/blob/3.3.3/social_core/pipeline/user.py#L64
"""
# User has registered previously.
if user:
if backend.name == 'email':
details['fullname'] = user.full_name()
details['first_name'] = user.first_name
details['last_name'] = user.last_name
return {'is_new': False, 'details': details}
fields = dict((name, kwargs.get(name, details.get(name)))
for name in backend.setting('USER_FIELDS', USER_FIELDS))
if not fields:
return None
# Add graduation year and computing ID. This is extra info not
# automatically collected by python-social-auth.
fields['graduation_year'] = strategy.session_get('grad_year', None)
fields['computing_id'] = kwargs.get(
'email', details.get('email')).split('@')[0]
return {
'is_new': True,
'user': strategy.METHOD_NAME(**fields)
}
def check_user_password(strategy, backend, user, is_new=False, password="", *args, **kwargs):
"""
Saves password to user object if a new user (registering).
Otherwise, validates given password is correct.
"""
if backend.name != 'email':
return
if is_new:
user.set_password(password)
user.save()
elif not user.check_password(password):
return redirect('/login/password_error', error=True)
def validate_email(strategy, backend, code, partial_token):
"""
Used in auth pipeline to generate the verificaion email for account creation
"""
if not code.verified:
url = strategy.build_absolute_uri(
reverse('social:complete', args=(backend.name,))
) + '?verification_code=' + code.code + '&partial_token=' + partial_token
send_mail(
'theCourseForum Email Verification',
f'Please go to {url} to confirm your new account for theCourseForum',
settings.EMAIL_HOST_USER,
[code.email],
fail_silently=False,
) |
5,599 | tau | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Gas Data"""
__all__ = [
'photo_lengthscale',
'photo_timescale',
'fluorescence_band_strength',
'OHFluorescenceSA88'
]
from warnings import warn
import numpy as np
try:
import scipy
from scipy import interpolate
except ImportError:
scipy = None
import astropy.units as u
from astropy.io import ascii
from astropy.utils.data import get_pkg_data_filename
from .... import data as sbd
from .... import exceptions as sbe
from .... import bib
photo_lengthscale = { # (value, {key feature: ADS bibcode})
'H2O': {
'CS93': (2.4e4 * u.km,
{'H2O photodissociation lengthscale':
'1993Icar..105..235C'})
},
'OH': {
'CS93': (1.6e5 * u.km,
{'OH photodissociation lengthscale':
'1993Icar..105..235C'})
},
}
photo_timescale = { # (value, {key feature: ADS bibcode})
'H2O': {
'CS93': (5.2e4 * u.s,
{'H2O photodissociation timescale':
'1993Icar..105..235C'})
},
'OH': {
'CS93': (1.6e5 * u.s,
{'OH photodissociation timescale':
'1993Icar..105..235C'})
},
'HCN': {
'C94': (6.7e4 * u.s,
{'HCN photodissociation timescale':
'1994JGR....99.3777C'})
},
'CH3OH': {
'C94': (7.7e4 * u.s,
{'CH3OH photodissociation timescale':
'1994JGR....99.3777C'})
},
'H2CO': {
'C94': (5.0e3 * u.s,
{'H2CO photodissociation timescale':
'1994JGR....99.3777C'})
},
'CO': {
'CE83': (1.5e6 * u.s,
{'CO photodissociation timescale':
'1983A&A...126..170C'})
},
'CO2': {
'CE83': (5.0e5 * u.s,
{'CO2 photodissociation timescale':
'1983A&A...126..170C'})
},
'CN': {
'H92': ([3.15e5, 1.35e5] * u.s,
{'CN photodissociation timescale':
'1992Ap&SS.195....1H'})
},
}
class OHFluorescenceSA88:
"""OH fluorescence.
Valid for heliocentric radial velocities between -60 and 60 km/s,
and heliocentric distances greater than 0.5 au.
Based on Table 5 of Schleicher and A'Hearn 1988, The Fluorescence
of Cometary OH, ApJ 331, 1058-1077.
Parameters
----------
band : string
Initialize for this OH band. Valid bands may be found via
`OHFluorescenceSchleicher88.BANDS`.
Examples
--------
>>> from sbpy.activity.gas.data import OHFluorescenceSA88
>>> LN = OHFluorescenceSA88('0-0')
>>> print(LN(-1 * u.km / u.s)) # doctest: +FLOAT_CMP
[1.54e-15] erg / s
"""
BANDS = ['0-0', '1-0', '1-1', '2-2',
'0-1', '0-2', '1-2', '2-0', '2-1']
def __init__(self, band):
fn = get_pkg_data_filename('schleicher88.txt')
self.table5 = ascii.read(fn)
self._rdot = self.table5['rdot'].data * u.km / u.s
self._inversion = self.table5['I']
self._tau = self.table5['tau'] * u.s
self.basis = ['0-0', '1-0', '1-1', '2-2',
'0-0', '0-0', '1-1', '2-2', '2-2']
self.scales = [1.0, 1.0, 1.0, 1.0,
0.00356, 0.00021, 0.00610, 0.274, 1.921]
i = self.BANDS.index(band)
k = self.basis[i]
self._LN = u.Quantity(self.table5[k].data * self.scales[i],
'erg / s')
if scipy:
self._tck = interpolate.splrep(self.rdot.value, self.LN.value)
self._interp = self._spline
else:
warn(sbe.OptionalPackageUnavailable(
'scipy unavailable, using linear interpolation.'))
self._interp = self._linear
def _spline(self, rdot, rh):
return interpolate.splev(rdot, self._tck, ext=2) / rh**2
def _linear(self, rdot, rh):
return np.interp(rdot, self.rdot.value, self.LN.value) / rh**2
@bib.cite({'OH fluorescence band efficiency': '1988ApJ...331.1058S'})
@sbd.dataclass_input(eph=sbd.Ephem)
@sbd.quantity_to_dataclass(eph=(sbd.Ephem, 'rdot'))
def __call__(self, eph):
"""Fluorescence band efficiency.
Evaluated at the given heliocentric radial velocity, and,
optionally, heliocentric distance.
Parameters
----------
eph : `~astropy.units.Quantity`, `~sbpy.data.Ephem`
Heliocentric radial velocity as a
`~astropy.units.Quantity` or a column in an
`sbpy.data.Ephem` object. If heliocentric distance is
present, the value will be scaled by ``rh**-2``,
otherwise, the value at 1 au is returned.
Returns
-------
LN : `~astropy.units.Quantity`
Fluorescence band efficiency or luminosity per molecule.
Raises
------
`ValueError` when the heliocentric distance is < 0.5 au or
r-dot is outside the tabulated range.
"""
rdot = eph['rdot'].to(self.rdot.unit).value
if np.any(np.abs(rdot) > 60):
raise ValueError('r-dot must be between -60 and 60 km/s')
try:
rh = eph['rh'].to(u.au).value
except KeyError:
rh = 1
if np.any(rh < 0.5):
raise ValueError(
'At rh < 0.5 au the pumping rate is not small compared '
'to the rotational decay rate. See Schleicher & A\'Hearn '
'1988 for details.')
return self._interp(rdot, rh) * self.LN.unit
@property
def rdot(self):
"""Heliocentric radial velocity for tabulated data."""
return self._rdot
@property
def inversion(self):
"""Inversion (n_u - n_l) / (n_u + n_l)."""
return self._inversion
@property
def METHOD_NAME(self):
"""Lifetime via A^2 Σ^+(ν=2,3)."""
return self._tau
@property
def LN(self):
"""Tabulated fluorescence band efficiency (L/N)."""
return self._LN
fluorescence_band_strength = {
# (function, note, citation or None)
# for OHFluorescenceSA88, the citation is handled by the class
'OH 0-0': {
'SA88': (OHFluorescenceSA88('0-0'), 'Requires r-dot', None)
},
'OH 1-0': {
'SA88': (OHFluorescenceSA88('1-0'), 'Requires r-dot', None)
},
'OH 1-1': {
'SA88': (OHFluorescenceSA88('1-1'), 'Requires r-dot', None)
},
'OH 2-2': {
'SA88': (OHFluorescenceSA88('2-2'), 'Requires r-dot', None)
},
'OH 0-1': {
'SA88': (OHFluorescenceSA88('0-1'), 'Requires r-dot', None)
},
'OH 0-2': {
'SA88': (OHFluorescenceSA88('0-2'), 'Requires r-dot', None)
},
'OH 1-2': {
'SA88': (OHFluorescenceSA88('1-2'), 'Requires r-dot', None)
},
'OH 2-0': {
'SA88': (OHFluorescenceSA88('2-0'), 'Requires r-dot', None)
},
'OH 2-1': {
'SA88': (OHFluorescenceSA88('2-1'), 'Requires r-dot', None)
},
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.