id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
6,100 | test merged components correct | from sys import version_info
from typing import TYPE_CHECKING, Any
import pytest
from pydantic import BaseModel, Field
from litestar import Litestar, get, post
from litestar.exceptions import ImproperlyConfiguredException
from litestar.openapi.config import OpenAPIConfig
from litestar.openapi.spec import Components, Example, OpenAPIHeader, OpenAPIType, Schema
from litestar.testing import TestClient
if TYPE_CHECKING:
from litestar.handlers.http_handlers import HTTPRouteHandler
@pytest.mark.skipif(version_info < (3, 10), reason="pydantic serialization differences in lower python versions")
def METHOD_NAME() -> None:
components_one = Components(headers={"one": OpenAPIHeader()}, schemas={"test": Schema(type=OpenAPIType.STRING)})
components_two = Components(headers={"two": OpenAPIHeader()})
components_three = Components(examples={"example-one": Example(summary="an example")})
config = OpenAPIConfig(
title="my title", version="1.0.0", components=[components_one, components_two, components_three]
)
openapi = config.to_openapi_schema()
assert openapi.components
assert openapi.components.to_schema() == {
"schemas": {"test": {"type": "string"}},
"examples": {"example-one": {"summary": "an example"}},
"headers": {
"one": {
"name": "",
"in": "header",
"required": False,
"deprecated": False,
"allowEmptyValue": False,
"allowReserved": False,
},
"two": {
"name": "",
"in": "header",
"required": False,
"deprecated": False,
"allowEmptyValue": False,
"allowReserved": False,
},
},
}
def test_by_alias() -> None:
class RequestWithAlias(BaseModel):
first: str = Field(alias="second")
class ResponseWithAlias(BaseModel):
first: str = Field(alias="second")
@post("/")
def handler(data: RequestWithAlias) -> ResponseWithAlias:
return ResponseWithAlias(second=data.first)
app = Litestar(route_handlers=[handler], openapi_config=OpenAPIConfig(title="my title", version="1.0.0"))
assert app.openapi_schema
schemas = app.openapi_schema.to_schema()["components"]["schemas"]
request_key = "second"
assert schemas["RequestWithAlias"] == {
"properties": {request_key: {"type": "string"}},
"type": "object",
"required": [request_key],
"title": "RequestWithAlias",
}
response_key = "first"
assert schemas["ResponseWithAlias"] == {
"properties": {response_key: {"type": "string"}},
"type": "object",
"required": [response_key],
"title": "ResponseWithAlias",
}
with TestClient(app) as client:
response = client.post("/", json={request_key: "foo"})
assert response.json() == {response_key: "foo"}
def test_allows_customization_of_operation_id_creator() -> None:
def operation_id_creator(handler: "HTTPRouteHandler", _: Any, __: Any) -> str:
return handler.name or ""
@get(path="/1", name="x")
def handler_1() -> None:
return
@get(path="/2", name="y")
def handler_2() -> None:
return
app = Litestar(
route_handlers=[handler_1, handler_2],
openapi_config=OpenAPIConfig(title="my title", version="1.0.0", operation_id_creator=operation_id_creator),
)
assert app.openapi_schema.to_schema()["paths"] == {
"/1": {
"get": {
"deprecated": False,
"operationId": "x",
"responses": {"200": {"description": "Request fulfilled, document follows", "headers": {}}},
"summary": "Handler1",
}
},
"/2": {
"get": {
"deprecated": False,
"operationId": "y",
"responses": {"200": {"description": "Request fulfilled, document follows", "headers": {}}},
"summary": "Handler2",
}
},
}
def test_allows_customization_of_path() -> None:
app = Litestar(
openapi_config=OpenAPIConfig(title="my title", version="1.0.0", path="/custom_schema_path"),
)
assert app.openapi_config
assert app.openapi_config.path == "/custom_schema_path"
assert app.openapi_config.openapi_controller.path == "/custom_schema_path"
def test_raises_exception_when_no_config_in_place() -> None:
with pytest.raises(ImproperlyConfiguredException):
Litestar(route_handlers=[], openapi_config=None).update_openapi_schema() |
6,101 | test route | from unittest.mock import Mock
import pytest
from faust.web import Request, View, Web
from faust.web.exceptions import MethodNotAllowed
from tests.helpers import AsyncMock
@View.from_handler
async def foo(self, request):
return self, request
class Test_View:
@pytest.fixture
def web(self):
return Mock(name="web", autospec=Web)
@pytest.fixture
def view(self, *, app, web):
return foo(app, web)
def test_from_handler(self):
assert issubclass(foo, View)
def test_from_handler__not_callable(self):
with pytest.raises(TypeError):
View.from_handler(1)
def test_init(self, *, app, web, view):
assert view.app is app
assert view.web is web
assert view.methods == {
"head": view.head,
"get": view.get,
"post": view.post,
"patch": view.patch,
"delete": view.delete,
"put": view.put,
"options": view.options,
"search": view.search,
}
def test_get_methods(self, view):
assert view.get_methods() == set({"GET", "HEAD"})
@pytest.mark.parametrize(
"method",
[
"GET",
"POST",
"PATCH",
"DELETE",
"PUT",
"OPTIONS",
"SEARCH",
"get",
"post",
"patch",
"delete",
"put",
"options",
"search",
],
)
@pytest.mark.asyncio
async def test_dispatch(self, method, *, view):
request = Mock(name="request", autospec=Request)
request.method = method
request.match_info = {}
handler = AsyncMock(name=method)
view.methods[method.lower()] = handler
assert await view(request) is handler.return_value
handler.assert_called_once_with(request)
def test_path_for(self, *, view):
ret = view.path_for("name", foo=1)
assert ret is view.web.url_for.return_value
view.web.url_for.assert_called_once_with("name", foo=1)
def test_url_for__no_base(self, *, view):
view.app.conf.canonical_url = "http://example.com/"
ret = view.url_for("name", foo=1)
assert ret
def test_url_for__base(self, *, view):
ret = view.url_for("name", "http://example.bar", foo=1)
assert ret
@pytest.mark.asyncio
async def test_get(self, *, view):
req = Mock(name="request", autospec=Request)
assert (await view.methods["get"](req)) == (view, req)
@pytest.mark.asyncio
async def test_interface_get(self, *, app, web):
view = View(app, web)
with pytest.raises(MethodNotAllowed):
await view.get(Mock(name="request", autospec=Request))
@pytest.mark.asyncio
async def test_interface_post(self, *, view):
with pytest.raises(MethodNotAllowed):
await view.post(Mock(name="request", autospec=Request))
@pytest.mark.asyncio
async def test_interface_put(self, *, view):
with pytest.raises(MethodNotAllowed):
await view.put(Mock(name="request", autospec=Request))
@pytest.mark.asyncio
async def test_interface_patch(self, *, view):
with pytest.raises(MethodNotAllowed):
await view.patch(Mock(name="request", autospec=Request))
@pytest.mark.asyncio
async def test_interface_delete(self, *, view):
with pytest.raises(MethodNotAllowed):
await view.delete(Mock(name="request", autospec=Request))
@pytest.mark.asyncio
async def test_interface_options(self, *, view):
with pytest.raises(MethodNotAllowed):
await view.options(Mock(name="request", autospec=Request))
@pytest.mark.asyncio
async def test_interface_search(self, *, view):
with pytest.raises(MethodNotAllowed):
await view.search(Mock(name="request", autospec=Request))
def test_text(self, *, view, web):
response = view.text(
"foo",
content_type="app/json",
status=101,
reason="foo",
headers={"k": "v"},
)
web.text.assert_called_once_with(
"foo",
content_type="app/json",
status=101,
reason="foo",
headers={"k": "v"},
)
assert response is web.text()
def test_html(self, *, view, web):
response = view.html(
"foo",
status=101,
content_type="text/html",
reason="bar",
headers={"k": "v"},
)
web.html.assert_called_once_with(
"foo",
status=101,
content_type="text/html",
reason="bar",
headers={"k": "v"},
)
assert response is web.html()
def test_json(self, *, view, web):
response = view.json(
"foo",
status=101,
content_type="application/json",
reason="bar",
headers={"k": "v"},
)
web.json.assert_called_once_with(
"foo",
status=101,
content_type="application/json",
reason="bar",
headers={"k": "v"},
)
assert response is web.json()
def test_bytes(self, *, view, web):
response = view.bytes(
"foo",
content_type="app/json",
status=101,
reason="foo",
headers={"k": "v"},
)
web.bytes.assert_called_once_with(
"foo",
content_type="app/json",
status=101,
reason="foo",
headers={"k": "v"},
)
assert response is web.bytes()
def METHOD_NAME(self, *, view, web):
handler = Mock(name="handler")
res = view.route("pat", handler)
web.route.assert_called_with("pat", handler)
assert res is handler
def test_error(self, *, view, web):
response = view.error(303, "foo", arg="bharg", headers={"k": "v"})
web.json.assert_called_once_with(
{"error": "foo", "arg": "bharg"},
status=303,
reason=None,
content_type=None,
headers={"k": "v"},
)
assert response is web.json()
def test_notfound(self, *, view, web):
response = view.notfound(arg="bharg")
web.json.assert_called_once_with(
{"error": "Not Found", "arg": "bharg"},
status=404,
reason=None,
headers=None,
content_type=None,
)
assert response is web.json()
@pytest.mark.asyncio
async def test_read_request_content(self, *, view, web):
request = Mock(name="request")
web.read_request_content = AsyncMock()
ret = await view.read_request_content(request)
web.read_request_content.assert_called_once_with(request)
assert ret is web.read_request_content.return_value |
6,102 | is compatible | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Create a toymodel event stream of array events
"""
import logging
from typing import Dict
import astropy.units as u
import numpy as np
from ..containers import (
ArrayEventContainer,
DL1CameraContainer,
EventIndexContainer,
ObservationBlockContainer,
SchedulingBlockContainer,
)
from ..core import TelescopeComponent, traits
from ..image import toymodel
from .datalevels import DataLevel
from .eventsource import EventSource
logger = logging.getLogger(__name__)
class ToyEventSource(TelescopeComponent, EventSource):
# override input url from EventSource, we don't need one here
input_url = traits.Path(allow_none=True, default_value=None).tag(config=True)
trigger_probability = traits.FloatTelescopeParameter(
default_value=0.5, help="Probability that the telescope has an event"
).tag(config=True)
min_length_m = traits.FloatTelescopeParameter(
default_value=0.05, help="Minimum length m"
).tag(config=True)
max_length_m = traits.FloatTelescopeParameter(
default_value=0.3, help="Maximum length in m"
).tag(config=True)
min_eccentricity = traits.FloatTelescopeParameter(
default_value=0.8, help="Minimum eccentricity = sqrt(1 - width**2/length**2)"
).tag(config=True)
max_eccentricity = traits.FloatTelescopeParameter(
default_value=0.98, help="Maximum eccentricity = sqrt(1 - width**2/length**2)"
).tag(config=True)
min_skewness = traits.FloatTelescopeParameter(
default_value=0.1, help="Minimum skewness"
).tag(config=True)
max_skewness = traits.FloatTelescopeParameter(
default_value=0.5, help="Maximum skewness"
).tag(config=True)
seed = traits.Int(default_value=0, help="Seed for the rng").tag(config=True)
def __init__(self, subarray, config=None, parent=None, rng=None, **kwargs):
super().__init__(subarray=subarray, config=config, parent=parent, **kwargs)
self._camera_radii = {}
if rng is None:
self.rng = np.random.default_rng(self.seed)
else:
self.rng = rng
@staticmethod
def calc_width(eccentricity, length):
return length * np.sqrt(1 - eccentricity**2)
@property
def is_simulation(self):
return True
@property
def datalevels(self):
return (DataLevel.DL1_IMAGES,)
@property
def scheduling_blocks(self) -> Dict[int, SchedulingBlockContainer]:
sb = SchedulingBlockContainer(producer_id="ctapipe toymodel")
return {sb.sb_id: sb}
@property
def observation_blocks(self) -> Dict[int, ObservationBlockContainer]:
ob = ObservationBlockContainer(producer_id="ctapipe toymodel")
return {ob.ob_id: ob}
@staticmethod
def METHOD_NAME(file_path):
return False
def _generator(self):
self.event_id = 0
while True:
if self.event_id >= self.max_events:
break
yield self.generate_event()
self.event_id += 1
def generate_event(self):
event = ArrayEventContainer(
index=EventIndexContainer(obs_id=1, event_id=self.event_id),
trigger=None,
r0=None,
dl0=None,
dl2=None,
simulation=None,
count=self.event_id,
calibration=None,
)
for tel_id, telescope in self.subarray.tel.items():
if self.rng.uniform() >= self.trigger_probability.tel[tel_id]:
continue
cam = telescope.camera.geometry
# draw cog
r_fraction = np.sqrt(self.rng.uniform(0, 0.9))
r = r_fraction * cam.guess_radius()
phi = self.rng.uniform(0, 2 * np.pi)
x = r * np.cos(phi)
y = r * np.sin(phi)
# draw length
length = self.rng.uniform(
self.min_length_m.tel[tel_id], self.max_length_m.tel[tel_id]
)
eccentricity = self.rng.uniform(
self.min_eccentricity.tel[tel_id], self.max_eccentricity.tel[tel_id]
)
width = self.calc_width(eccentricity, length)
psi = self.rng.uniform(0, 360)
shower_area_ratio = (
2 * np.pi * width * length / cam.pix_area.mean().to_value(u.m**2)
)
intensity = self.rng.poisson(50) * shower_area_ratio
skewness = self.rng.uniform(
self.min_skewness.tel[tel_id], self.max_skewness.tel[tel_id]
)
model = toymodel.SkewedGaussian(
x=x,
y=y,
length=length * u.m,
width=width * u.m,
psi=psi * u.deg,
skewness=skewness,
)
image, _, _ = model.generate_image(cam, intensity)
event.dl1.tel[tel_id] = DL1CameraContainer(image=image)
return event |
6,103 | control service | from _typeshed import Incomplete
from collections.abc import Iterable
import _win32typing
from win32.lib.pywintypes import error as error
def GetThreadDesktop(ThreadId) -> _win32typing.PyHDESK: ...
def EnumWindowStations() -> tuple[tuple[str, Incomplete], ...]: ...
def GetUserObjectInformation(Handle: int, _type) -> None: ...
def SetUserObjectInformation(Handle: int, info, _type) -> None: ...
def OpenWindowStation(szWinSta, Inherit, DesiredAccess) -> _win32typing.PyHWINSTA: ...
def OpenDesktop(szDesktop, Flags, Inherit, DesiredAccess) -> _win32typing.PyHDESK: ...
def CreateDesktop(
Desktop, Flags, DesiredAccess, SecurityAttributes: _win32typing.PySECURITY_ATTRIBUTES
) -> _win32typing.PyHDESK: ...
def OpenInputDesktop(Flags, Inherit, DesiredAccess) -> _win32typing.PyHDESK: ...
def GetProcessWindowStation() -> _win32typing.PyHWINSTA: ...
def CreateWindowStation(
WindowStation, Flags, DesiredAccess, SecurityAttributes: _win32typing.PySECURITY_ATTRIBUTES
) -> _win32typing.PyHWINSTA: ...
def EnumServicesStatus(hSCManager: _win32typing.PySC_HANDLE, ServiceType, ServiceState) -> tuple[Incomplete, ...]: ...
def EnumServicesStatusEx(
SCManager: _win32typing.PySC_HANDLE, ServiceType, ServiceState, InfoLevel, GroupName: Incomplete | None = ...
) -> tuple[Incomplete, ...]: ...
def EnumDependentServices(hService: _win32typing.PySC_HANDLE, ServiceState) -> tuple[Incomplete, ...]: ...
def QueryServiceConfig(hService: _win32typing.PySC_HANDLE): ...
def StartService(hService: _win32typing.PySC_HANDLE, args: Iterable[str] | None) -> None: ...
def OpenService(scHandle: _win32typing.PySC_HANDLE, name: str, desiredAccess) -> _win32typing.PySC_HANDLE: ...
def OpenSCManager(machineName: str | None, dbName: str | None, desiredAccess: int) -> _win32typing.PySC_HANDLE: ...
def CloseServiceHandle(scHandle: _win32typing.PySC_HANDLE) -> None: ...
def QueryServiceStatus(hService: _win32typing.PySC_HANDLE) -> _win32typing.SERVICE_STATUS: ...
def QueryServiceStatusEx(hService: _win32typing.PySC_HANDLE) -> _win32typing.SERVICE_STATUS: ...
def SetServiceObjectSecurity(
Handle: _win32typing.PySC_HANDLE, SecurityInformation, SecurityDescriptor: _win32typing.PySECURITY_DESCRIPTOR
) -> None: ...
def QueryServiceObjectSecurity(Handle: _win32typing.PySC_HANDLE, SecurityInformation) -> _win32typing.PySECURITY_DESCRIPTOR: ...
def GetServiceKeyName(hSCManager: _win32typing.PySC_HANDLE, DisplayName): ...
def GetServiceDisplayName(hSCManager: _win32typing.PySC_HANDLE, ServiceName): ...
def SetServiceStatus(scHandle, serviceStatus: _win32typing.SERVICE_STATUS | tuple[int, int, int, int, int, int, int]) -> None: ...
def METHOD_NAME(scHandle: _win32typing.PySC_HANDLE, code) -> _win32typing.SERVICE_STATUS: ...
def DeleteService(scHandle: _win32typing.PySC_HANDLE) -> None: ...
def CreateService(
scHandle: _win32typing.PySC_HANDLE,
name: str,
displayName: str,
desiredAccess: int,
serviceType: int,
startType: int,
errorControl: int,
binaryFile: str,
loadOrderGroup: str | None,
bFetchTag: bool,
serviceDeps: Iterable[Incomplete] | None,
acctName: str | None,
password: str | None,
) -> _win32typing.PySC_HANDLE: ...
def ChangeServiceConfig(
hService: _win32typing.PySC_HANDLE,
serviceType: int,
startType: int,
errorControl: int,
binaryFile: str | None,
loadOrderGroup: str | None,
bFetchTag: bool,
serviceDeps: Iterable[Incomplete] | None,
acctName: str | None,
password: str | None,
displayName: str | None,
): ...
def LockServiceDatabase(sc_handle: _win32typing.PySC_HANDLE): ...
def UnlockServiceDatabase(lock): ...
def QueryServiceLockStatus(hSCManager: _win32typing.PySC_HANDLE) -> tuple[Incomplete, str, Incomplete]: ...
def ChangeServiceConfig2(hService: _win32typing.PySC_HANDLE, InfoLevel, info) -> None: ...
def QueryServiceConfig2(hService: _win32typing.PySC_HANDLE, InfoLevel): ...
DBT_CONFIGCHANGECANCELED: int
DBT_CONFIGCHANGED: int
DBT_CUSTOMEVENT: int
DBT_DEVICEARRIVAL: int
DBT_DEVICEQUERYREMOVE: int
DBT_DEVICEQUERYREMOVEFAILED: int
DBT_DEVICEREMOVECOMPLETE: int
DBT_DEVICEREMOVEPENDING: int
DBT_DEVICETYPESPECIFIC: int
DBT_QUERYCHANGECONFIG: int
DF_ALLOWOTHERACCOUNTHOOK: int
SC_ACTION_NONE: int
SC_ACTION_REBOOT: int
SC_ACTION_RESTART: int
SC_ACTION_RUN_COMMAND: int
SC_ENUM_PROCESS_INFO: int
SC_GROUP_IDENTIFIER: int
SC_MANAGER_ALL_ACCESS: int
SC_MANAGER_CONNECT: int
SC_MANAGER_CREATE_SERVICE: int
SC_MANAGER_ENUMERATE_SERVICE: int
SC_MANAGER_LOCK: int
SC_MANAGER_MODIFY_BOOT_CONFIG: int
SC_MANAGER_QUERY_LOCK_STATUS: int
SERVICE_ACCEPT_HARDWAREPROFILECHANGE: int
SERVICE_ACCEPT_NETBINDCHANGE: int
SERVICE_ACCEPT_PARAMCHANGE: int
SERVICE_ACCEPT_PAUSE_CONTINUE: int
SERVICE_ACCEPT_POWEREVENT: int
SERVICE_ACCEPT_PRESHUTDOWN: int
SERVICE_ACCEPT_SESSIONCHANGE: int
SERVICE_ACCEPT_SHUTDOWN: int
SERVICE_ACCEPT_STOP: int
SERVICE_ACTIVE: int
SERVICE_ALL_ACCESS: int
SERVICE_AUTO_START: int
SERVICE_BOOT_START: int
SERVICE_CHANGE_CONFIG: int
SERVICE_CONFIG_DELAYED_AUTO_START_INFO: int
SERVICE_CONFIG_DESCRIPTION: int
SERVICE_CONFIG_FAILURE_ACTIONS: int
SERVICE_CONFIG_FAILURE_ACTIONS_FLAG: int
SERVICE_CONFIG_PRESHUTDOWN_INFO: int
SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO: int
SERVICE_CONFIG_SERVICE_SID_INFO: int
SERVICE_CONTINUE_PENDING: int
SERVICE_CONTROL_CONTINUE: int
SERVICE_CONTROL_DEVICEEVENT: int
SERVICE_CONTROL_HARDWAREPROFILECHANGE: int
SERVICE_CONTROL_INTERROGATE: int
SERVICE_CONTROL_NETBINDADD: int
SERVICE_CONTROL_NETBINDDISABLE: int
SERVICE_CONTROL_NETBINDENABLE: int
SERVICE_CONTROL_NETBINDREMOVE: int
SERVICE_CONTROL_PARAMCHANGE: int
SERVICE_CONTROL_PAUSE: int
SERVICE_CONTROL_POWEREVENT: int
SERVICE_CONTROL_PRESHUTDOWN: int
SERVICE_CONTROL_SESSIONCHANGE: int
SERVICE_CONTROL_SHUTDOWN: int
SERVICE_CONTROL_STOP: int
SERVICE_DEMAND_START: int
SERVICE_DISABLED: int
SERVICE_DRIVER: int
SERVICE_ENUMERATE_DEPENDENTS: int
SERVICE_ERROR_CRITICAL: int
SERVICE_ERROR_IGNORE: int
SERVICE_ERROR_NORMAL: int
SERVICE_ERROR_SEVERE: int
SERVICE_FILE_SYSTEM_DRIVER: int
SERVICE_INACTIVE: int
SERVICE_INTERACTIVE_PROCESS: int
SERVICE_INTERROGATE: int
SERVICE_KERNEL_DRIVER: int
SERVICE_NO_CHANGE: int
SERVICE_PAUSE_CONTINUE: int
SERVICE_PAUSE_PENDING: int
SERVICE_PAUSED: int
SERVICE_QUERY_CONFIG: int
SERVICE_QUERY_STATUS: int
SERVICE_RUNNING: int
SERVICE_SID_TYPE_NONE: int
SERVICE_SID_TYPE_RESTRICTED: int
SERVICE_SID_TYPE_UNRESTRICTED: int
SERVICE_SPECIFIC_ERROR: int
SERVICE_START: int
SERVICE_START_PENDING: int
SERVICE_STATE_ALL: int
SERVICE_STOP: int
SERVICE_STOP_PENDING: int
SERVICE_STOPPED: int
SERVICE_SYSTEM_START: int
SERVICE_USER_DEFINED_CONTROL: int
SERVICE_WIN32: int
SERVICE_WIN32_OWN_PROCESS: int
SERVICE_WIN32_SHARE_PROCESS: int
UOI_FLAGS: int
UOI_NAME: int
UOI_TYPE: int
UOI_USER_SID: int
WSF_VISIBLE: int
HDESKType = _win32typing.PyHDESK
HWINSTAType = _win32typing.PyHWINSTA
UNICODE: int |
6,104 | presentation time range | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetAccountFilterResult',
'AwaitableGetAccountFilterResult',
'get_account_filter',
'get_account_filter_output',
]
@pulumi.output_type
class GetAccountFilterResult:
"""
An Account Filter.
"""
def __init__(__self__, first_quality=None, id=None, name=None, METHOD_NAME=None, system_data=None, tracks=None, type=None):
if first_quality and not isinstance(first_quality, dict):
raise TypeError("Expected argument 'first_quality' to be a dict")
pulumi.set(__self__, "first_quality", first_quality)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'presentation_time_range' to be a dict")
pulumi.set(__self__, "presentation_time_range", METHOD_NAME)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tracks and not isinstance(tracks, list):
raise TypeError("Expected argument 'tracks' to be a list")
pulumi.set(__self__, "tracks", tracks)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="firstQuality")
def first_quality(self) -> Optional['outputs.FirstQualityResponse']:
"""
The first quality.
"""
return pulumi.get(self, "first_quality")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="presentationTimeRange")
def METHOD_NAME(self) -> Optional['outputs.PresentationTimeRangeResponse']:
"""
The presentation time range.
"""
return pulumi.get(self, "presentation_time_range")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tracks(self) -> Optional[Sequence['outputs.FilterTrackSelectionResponse']]:
"""
The tracks selection conditions.
"""
return pulumi.get(self, "tracks")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetAccountFilterResult(GetAccountFilterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAccountFilterResult(
first_quality=self.first_quality,
id=self.id,
name=self.name,
METHOD_NAME=self.METHOD_NAME,
system_data=self.system_data,
tracks=self.tracks,
type=self.type)
def get_account_filter(account_name: Optional[str] = None,
filter_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountFilterResult:
"""
Get the details of an Account Filter in the Media Services account.
:param str account_name: The Media Services account name.
:param str filter_name: The Account Filter name
:param str resource_group_name: The name of the resource group within the Azure subscription.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['filterName'] = filter_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:media/v20230101:getAccountFilter', __args__, opts=opts, typ=GetAccountFilterResult).value
return AwaitableGetAccountFilterResult(
first_quality=pulumi.get(__ret__, 'first_quality'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
METHOD_NAME=pulumi.get(__ret__, 'presentation_time_range'),
system_data=pulumi.get(__ret__, 'system_data'),
tracks=pulumi.get(__ret__, 'tracks'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_account_filter)
def get_account_filter_output(account_name: Optional[pulumi.Input[str]] = None,
filter_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAccountFilterResult]:
"""
Get the details of an Account Filter in the Media Services account.
:param str account_name: The Media Services account name.
:param str filter_name: The Account Filter name
:param str resource_group_name: The name of the resource group within the Azure subscription.
"""
... |
6,105 | test mix adding removing tasks | from django.urls import reverse
from fiscal.forms import MembershipTaskForm
from fiscal.models import MembershipPersonRole, MembershipTask
from workshops.models import Membership
from workshops.tests.base import TestBase
class TestMembershipTaskFormLayout(TestBase):
def test_main_helper_layout(self):
form = MembershipTaskForm()
self.assertEqual(
list(form.helper.layout),
["membership", "person", "role", "EDITABLE", "id", "DELETE"],
)
def test_empty_helper_layout(self):
form = MembershipTaskForm()
self.assertEqual(len(form.helper_empty_form.layout), 5)
self.assertEqual(
list(form.helper_empty_form.layout)[:-1],
["membership", "person", "role", "id"],
)
self.assertEqual(form.helper_empty_form.layout[-1].fields, ["DELETE"])
class TestMembershipTasks(TestBase):
def setUp(self):
super().setUp()
self._setUpUsersAndLogin()
self.membership = Membership.objects.create(
public_status="public",
variant="partner",
agreement_start="2021-02-14",
agreement_end="2022-02-14",
contribution_type="financial",
public_instructor_training_seats=0,
additional_public_instructor_training_seats=0,
)
self.membership_person_role = MembershipPersonRole.objects.first()
def test_adding_new_tasks(self):
self.assertEqual(self.membership.membershiptask_set.count(), 0)
data = {
"form-TOTAL_FORMS": 2,
"form-INITIAL_FORMS": 0,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1000,
"form-0-membership": self.membership.pk,
"form-0-person": self.hermione.pk,
"form-0-role": self.membership_person_role.pk,
"form-0-id": "",
"form-0-EDITABLE": True,
"form-1-membership": self.membership.pk,
"form-1-person": self.harry.pk,
"form-1-role": self.membership_person_role.pk,
"form-1-id": "",
"form-1-EDITABLE": True,
}
response = self.client.post(
reverse("membership_tasks", args=[self.membership.pk]),
data=data,
follow=True,
)
self.assertRedirects(
response, reverse("membership_details", args=[self.membership.pk])
)
self.assertEqual(self.membership.membershiptask_set.count(), 2)
self.assertEqual(
list(self.membership.persons.all()), [self.hermione, self.harry]
)
def test_removing_tasks(self):
mt1 = MembershipTask.objects.create(
person=self.hermione,
membership=self.membership,
role=self.membership_person_role,
)
mt2 = MembershipTask.objects.create(
person=self.harry,
membership=self.membership,
role=self.membership_person_role,
)
data = {
"form-TOTAL_FORMS": 2,
"form-INITIAL_FORMS": 2,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1000,
"form-0-membership": self.membership.pk,
"form-0-person": mt1.person.pk,
"form-0-role": mt1.role.pk,
"form-0-id": mt1.pk,
"form-0-EDITABLE": True,
"form-0-DELETE": "on",
"form-1-membership": self.membership.pk,
"form-1-person": mt2.person.pk,
"form-1-role": mt2.role.pk,
"form-1-id": mt2.pk,
"form-1-EDITABLE": True,
"form-1-DELETE": "on",
}
response = self.client.post(
reverse("membership_tasks", args=[self.membership.pk]),
data=data,
follow=True,
)
self.assertRedirects(
response, reverse("membership_details", args=[self.membership.pk])
)
self.assertEqual(list(self.membership.persons.all()), [])
def METHOD_NAME(self):
mt1 = MembershipTask.objects.create(
person=self.hermione,
membership=self.membership,
role=self.membership_person_role,
)
mt2 = MembershipTask.objects.create(
person=self.harry,
membership=self.membership,
role=self.membership_person_role,
)
data = {
"form-TOTAL_FORMS": 3,
"form-INITIAL_FORMS": 2,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1000,
"form-0-membership": self.membership.pk,
"form-0-person": mt1.person.pk,
"form-0-role": mt1.role.pk,
"form-0-id": mt1.pk,
"form-0-EDITABLE": True,
"form-0-DELETE": "on",
"form-1-membership": self.membership.pk,
"form-1-person": mt2.person.pk,
"form-1-role": mt2.role.pk,
"form-1-id": mt2.pk,
"form-1-EDITABLE": True,
"form-1-DELETE": "on",
"form-2-membership": self.membership.pk,
"form-2-person": self.ron.pk,
"form-2-role": self.membership_person_role.pk,
"form-2-id": "",
"form-2-EDITABLE": True,
}
response = self.client.post(
reverse("membership_tasks", args=[self.membership.pk]),
data=data,
follow=True,
)
self.assertRedirects(
response, reverse("membership_details", args=[self.membership.pk])
)
self.assertEqual(list(self.membership.persons.all()), [self.ron]) |
6,106 | reduce metrics | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from fairseq import metrics, utils
from fairseq.criterions import register_criterion
from .label_smoothed_cross_entropy import (
LabelSmoothedCrossEntropyCriterion,
LabelSmoothedCrossEntropyCriterionConfig,
)
from dataclasses import dataclass, field
@dataclass
class LabelSmoothedCrossEntropyCriterionWithAlignmentConfig(
LabelSmoothedCrossEntropyCriterionConfig
):
alignment_lambda: float = field(
default=0.05, metadata={"help": "weight for the alignment loss"}
)
@register_criterion(
"label_smoothed_cross_entropy_with_alignment",
dataclass=LabelSmoothedCrossEntropyCriterionWithAlignmentConfig,
)
class LabelSmoothedCrossEntropyCriterionWithAlignment(
LabelSmoothedCrossEntropyCriterion
):
def __init__(self, task, sentence_avg, label_smoothing, alignment_lambda):
super().__init__(task, sentence_avg, label_smoothing)
self.alignment_lambda = alignment_lambda
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"nll_loss": utils.item(nll_loss.data) if reduce else nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
alignment_loss = None
# Compute alignment loss only for training set and non dummy batches.
if "alignments" in sample and sample["alignments"] is not None:
alignment_loss = self.compute_alignment_loss(sample, net_output)
if alignment_loss is not None:
logging_output["alignment_loss"] = utils.item(alignment_loss.data)
loss += self.alignment_lambda * alignment_loss
return loss, sample_size, logging_output
def compute_alignment_loss(self, sample, net_output):
attn_prob = net_output[1]["attn"][0]
bsz, tgt_sz, src_sz = attn_prob.shape
attn = attn_prob.view(bsz * tgt_sz, src_sz)
align = sample["alignments"]
align_weights = sample["align_weights"].float()
if len(align) > 0:
# Alignment loss computation. align (shape [:, 2]) contains the src-tgt index pairs corresponding to
# the alignments. align_weights (shape [:]) contains the 1 / frequency of a tgt index for normalizing.
loss = -(
(attn[align[:, 1][:, None], align[:, 0][:, None]]).log()
* align_weights[:, None]
).sum()
else:
return None
return loss
@staticmethod
def METHOD_NAME(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
nll_loss_sum = utils.item(
sum(log.get("nll_loss", 0) for log in logging_outputs)
)
alignment_loss_sum = utils.item(
sum(log.get("alignment_loss", 0) for log in logging_outputs)
)
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_scalar(
"alignment_loss",
alignment_loss_sum / sample_size / math.log(2),
sample_size,
round=3,
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True |
6,107 | primary key | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ListServiceTestKeysResult',
'AwaitableListServiceTestKeysResult',
'list_service_test_keys',
'list_service_test_keys_output',
]
@pulumi.output_type
class ListServiceTestKeysResult:
"""
Test keys payload
"""
def __init__(__self__, enabled=None, METHOD_NAME=None, primary_test_endpoint=None, secondary_key=None, secondary_test_endpoint=None):
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", METHOD_NAME)
if primary_test_endpoint and not isinstance(primary_test_endpoint, str):
raise TypeError("Expected argument 'primary_test_endpoint' to be a str")
pulumi.set(__self__, "primary_test_endpoint", primary_test_endpoint)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
if secondary_test_endpoint and not isinstance(secondary_test_endpoint, str):
raise TypeError("Expected argument 'secondary_test_endpoint' to be a str")
pulumi.set(__self__, "secondary_test_endpoint", secondary_test_endpoint)
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
Indicates whether the test endpoint feature enabled or not
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="primaryKey")
def METHOD_NAME(self) -> Optional[str]:
"""
Primary key
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="primaryTestEndpoint")
def primary_test_endpoint(self) -> Optional[str]:
"""
Primary test endpoint
"""
return pulumi.get(self, "primary_test_endpoint")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> Optional[str]:
"""
Secondary key
"""
return pulumi.get(self, "secondary_key")
@property
@pulumi.getter(name="secondaryTestEndpoint")
def secondary_test_endpoint(self) -> Optional[str]:
"""
Secondary test endpoint
"""
return pulumi.get(self, "secondary_test_endpoint")
class AwaitableListServiceTestKeysResult(ListServiceTestKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListServiceTestKeysResult(
enabled=self.enabled,
METHOD_NAME=self.METHOD_NAME,
primary_test_endpoint=self.primary_test_endpoint,
secondary_key=self.secondary_key,
secondary_test_endpoint=self.secondary_test_endpoint)
def list_service_test_keys(resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListServiceTestKeysResult:
"""
List test keys for a Service.
Azure REST API version: 2023-05-01-preview.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str service_name: The name of the Service resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:appplatform:listServiceTestKeys', __args__, opts=opts, typ=ListServiceTestKeysResult).value
return AwaitableListServiceTestKeysResult(
enabled=pulumi.get(__ret__, 'enabled'),
METHOD_NAME=pulumi.get(__ret__, 'primary_key'),
primary_test_endpoint=pulumi.get(__ret__, 'primary_test_endpoint'),
secondary_key=pulumi.get(__ret__, 'secondary_key'),
secondary_test_endpoint=pulumi.get(__ret__, 'secondary_test_endpoint'))
@_utilities.lift_output_func(list_service_test_keys)
def list_service_test_keys_output(resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListServiceTestKeysResult]:
"""
List test keys for a Service.
Azure REST API version: 2023-05-01-preview.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str service_name: The name of the Service resource.
"""
... |
6,108 | test random variable bcast | import numpy as np
import pytest
import aesara.tensor as at
from aesara import config, function
from aesara.gradient import NullTypeGradError, grad
from aesara.raise_op import Assert
from aesara.tensor.math import eq
from aesara.tensor.random.op import (
RandomState,
RandomVariable,
default_rng,
default_supp_shape_from_params,
)
from aesara.tensor.shape import specify_shape
from aesara.tensor.type import all_dtypes, iscalar, tensor
@pytest.fixture(scope="module", autouse=True)
def set_aesara_flags():
with config.change_flags(cxx="", compute_test_value="raise"):
yield
def test_default_supp_shape_from_params():
with pytest.raises(ValueError, match="^ndim_supp*"):
default_supp_shape_from_params(0, (np.array([1, 2]), 0))
res = default_supp_shape_from_params(
1, (np.array([1, 2]), np.eye(2)), rep_param_idx=0
)
assert res == (2,)
res = default_supp_shape_from_params(
1, (np.array([1, 2]), 0), param_shapes=((2,), ())
)
assert res == (2,)
with pytest.raises(ValueError, match="^Reference parameter*"):
default_supp_shape_from_params(1, (np.array(1),), rep_param_idx=0)
res = default_supp_shape_from_params(
2, (np.array([1, 2]), np.ones((2, 3, 4))), rep_param_idx=1
)
assert res == (3, 4)
def test_RandomVariable_basics():
str_res = str(
RandomVariable(
"normal",
0,
[0, 0],
"float32",
inplace=True,
)
)
assert str_res == "normal_rv{0, (0, 0), float32, True}"
# `ndims_params` should be a `Sequence` type
with pytest.raises(TypeError, match="^Parameter ndims_params*"):
RandomVariable(
"normal",
0,
0,
config.floatX,
inplace=True,
)
# `size` should be a `Sequence` type
with pytest.raises(TypeError, match="^Parameter size*"):
RandomVariable(
"normal",
0,
[0, 0],
config.floatX,
inplace=True,
)(0, 1, size={1, 2})
# No dtype
with pytest.raises(TypeError, match="^dtype*"):
RandomVariable(
"normal",
0,
[0, 0],
inplace=True,
)(0, 1)
# Confirm that `inplace` works
rv = RandomVariable(
"normal",
0,
[0, 0],
"normal",
inplace=True,
)
assert rv.inplace
assert rv.destroy_map == {0: [0]}
# A no-params `RandomVariable`
rv = RandomVariable(name="test_rv", ndim_supp=0, ndims_params=())
with pytest.raises(TypeError):
rv.make_node(rng=1)
# `RandomVariable._infer_shape` should handle no parameters
rv_shape = rv._infer_shape(at.constant([]), (), [])
assert rv_shape.equals(at.constant([], dtype="int64"))
# Integer-specificed `dtype`
dtype_1 = all_dtypes[1]
rv_node = rv.make_node(None, None, 1)
rv_out = rv_node.outputs[1]
rv_out.tag.test_value = 1
assert rv_out.dtype == dtype_1
with pytest.raises(NullTypeGradError):
grad(rv_out, [rv_node.inputs[0]])
def METHOD_NAME():
rv = RandomVariable("normal", 0, [0, 0], config.floatX, inplace=True)
mu = tensor(config.floatX, shape=(1, None, None))
mu.tag.test_value = np.zeros((1, 2, 3)).astype(config.floatX)
sd = tensor(config.floatX, shape=(None, None))
sd.tag.test_value = np.ones((2, 3)).astype(config.floatX)
s1 = iscalar()
s1.tag.test_value = 1
s2 = iscalar()
s2.tag.test_value = 2
s3 = iscalar()
s3.tag.test_value = 3
s3 = Assert("testing")(s3, eq(s1, 1))
res = rv(mu, sd, size=(s1, s2, s3))
assert res.broadcastable == (False,) * 3
size = at.as_tensor((1, 2, 3), dtype=np.int32).astype(np.int64)
res = rv(mu, sd, size=size)
assert res.broadcastable == (True, False, False)
res = rv(0, 1, size=at.as_tensor(1, dtype=np.int64))
assert res.broadcastable == (True,)
def test_RandomVariable_bcast_specify_shape():
rv = RandomVariable("normal", 0, [0, 0], config.floatX, inplace=True)
s1 = at.as_tensor(1, dtype=np.int64)
s2 = iscalar()
s2.tag.test_value = 2
s3 = iscalar()
s3.tag.test_value = 3
s3 = Assert("testing")(s3, eq(s1, 1))
size = specify_shape(at.as_tensor([s1, s3, s2, s2, s1]), (5,))
mu = tensor(config.floatX, shape=(None, None, 1))
mu.tag.test_value = np.random.normal(size=(2, 2, 1)).astype(config.floatX)
std = tensor(config.floatX, shape=(None, 1, 1))
std.tag.test_value = np.ones((2, 1, 1)).astype(config.floatX)
res = rv(mu, std, size=size)
assert res.type.shape == (1, None, None, None, 1)
def test_RandomVariable_floatX():
test_rv_op = RandomVariable(
"normal",
0,
[0, 0],
"floatX",
inplace=True,
)
assert test_rv_op.dtype == "floatX"
assert test_rv_op(0, 1).dtype == config.floatX
new_floatX = "float64" if config.floatX == "float32" else "float32"
with config.change_flags(floatX=new_floatX):
assert test_rv_op(0, 1).dtype == new_floatX
@pytest.mark.parametrize(
"seed, maker_op, numpy_res",
[
(3, RandomState, np.random.RandomState(3)),
(3, default_rng, np.random.default_rng(3)),
],
)
def test_random_maker_op(seed, maker_op, numpy_res):
seed = at.as_tensor_variable(seed)
z = function(inputs=[], outputs=[maker_op(seed)])()
aes_res = z[0]
assert maker_op.random_type.values_eq(aes_res, numpy_res)
def test_random_maker_ops_no_seed():
# Testing the initialization when seed=None
# Since internal states randomly generated,
# we just check the output classes
z = function(inputs=[], outputs=[RandomState()])()
aes_res = z[0]
assert isinstance(aes_res, np.random.RandomState)
z = function(inputs=[], outputs=[default_rng()])()
aes_res = z[0]
assert isinstance(aes_res, np.random.Generator) |
6,109 | to dense matrix | # The Uni-fold implementation is also open-sourced by the authors under Apache-2.0 license,
# and is publicly available at https://github.com/dptech-corp/Uni-Fold.
import copy as copy_lib
import functools
import gzip
import pickle
from typing import Any, Dict
import json
import numpy as np
from scipy import sparse as sp
from . import residue_constants as rc
from .data_ops import NumpyDict
# from typing import *
def lru_cache(maxsize=16, typed=False, copy=False, deepcopy=False):
if deepcopy:
def decorator(f):
cached_func = functools.lru_cache(maxsize, typed)(f)
@functools.wraps(f)
def wrapper(*args, **kwargs):
return copy_lib.deepcopy(cached_func(*args, **kwargs))
return wrapper
elif copy:
def decorator(f):
cached_func = functools.lru_cache(maxsize, typed)(f)
@functools.wraps(f)
def wrapper(*args, **kwargs):
return copy_lib.copy(cached_func(*args, **kwargs))
return wrapper
else:
decorator = functools.lru_cache(maxsize, typed)
return decorator
@lru_cache(maxsize=8, deepcopy=True)
def load_pickle_safe(path: str) -> Dict[str, Any]:
def load(path):
assert path.endswith('.pkl') or path.endswith(
'.pkl.gz'), f'bad suffix in {path} as pickle file.'
open_fn = gzip.open if path.endswith('.gz') else open
with open_fn(path, 'rb') as f:
return pickle.load(f)
ret = load(path)
ret = uncompress_features(ret)
return ret
@lru_cache(maxsize=8, copy=True)
def load_pickle(path: str) -> Dict[str, Any]:
def load(path):
assert path.endswith('.pkl') or path.endswith(
'.pkl.gz'), f'bad suffix in {path} as pickle file.'
open_fn = gzip.open if path.endswith('.gz') else open
with open_fn(path, 'rb') as f:
return pickle.load(f)
ret = load(path)
ret = uncompress_features(ret)
return ret
def correct_template_restypes(feature):
"""Correct template restype to have the same order as residue_constants."""
feature = np.argmax(feature, axis=-1).astype(np.int32)
new_order_list = rc.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE
feature = np.take(new_order_list, feature.astype(np.int32), axis=0)
return feature
def convert_all_seq_feature(feature: NumpyDict) -> NumpyDict:
feature['msa'] = feature['msa'].astype(np.uint8)
if 'num_alignments' in feature:
feature.pop('num_alignments')
# make_all_seq_key = lambda k: f'{k}_all_seq' if not k.endswith('_all_seq') else k
def make_all_seq_key(k):
if not k.endswith('_all_seq'):
return f'{k}_all_seq'
return k
return {make_all_seq_key(k): v for k, v in feature.items()}
def METHOD_NAME(spmat_dict: NumpyDict):
spmat = sp.coo_matrix(
(spmat_dict['data'], (spmat_dict['row'], spmat_dict['col'])),
shape=spmat_dict['shape'],
dtype=np.float32,
)
return spmat.toarray()
FEATS_DTYPE = {'msa': np.int32}
def uncompress_features(feats: NumpyDict) -> NumpyDict:
if 'sparse_deletion_matrix_int' in feats:
v = feats.pop('sparse_deletion_matrix_int')
v = METHOD_NAME(v)
feats['deletion_matrix'] = v
return feats
def filter(feature: NumpyDict, **kwargs) -> NumpyDict:
assert len(kwargs) == 1, f'wrong usage of filter with kwargs: {kwargs}'
if 'desired_keys' in kwargs:
feature = {
k: v
for k, v in feature.items() if k in kwargs['desired_keys']
}
elif 'required_keys' in kwargs:
for k in kwargs['required_keys']:
assert k in feature, f'cannot find required key {k}.'
elif 'ignored_keys' in kwargs:
feature = {
k: v
for k, v in feature.items() if k not in kwargs['ignored_keys']
}
else:
raise AssertionError(f'wrong usage of filter with kwargs: {kwargs}')
return feature
def compress_features(features: NumpyDict):
change_dtype = {
'msa': np.uint8,
}
sparse_keys = ['deletion_matrix_int']
compressed_features = {}
for k, v in features.items():
if k in change_dtype:
v = v.astype(change_dtype[k])
if k in sparse_keys:
v = sp.coo_matrix(v, dtype=v.dtype)
sp_v = {
'shape': v.shape,
'row': v.row,
'col': v.col,
'data': v.data
}
k = f'sparse_{k}'
v = sp_v
compressed_features[k] = v
return compressed_features |
6,110 | test call rpc crash handle generic | '''
Author: Ricardo Mateus <rmateus@suse.com>
'''
import pytest
from unittest.mock import MagicMock, patch, call
from . import mockery
mockery.setup_environment()
import sys
from ..modules import uyuni_config
from ..modules.uyuni_config import RPCClient, UyuniChannelsException, UyuniUsersException
class TestRPCClient:
"""
Test RPCClient object
"""
rpc_client = None
@patch("src.modules.uyuni_config.ssl", MagicMock())
@patch("src.modules.uyuni_config.xmlrpc", MagicMock())
def setup_method(self, method):
"""
Setup state per test.
:param method:
:return:
"""
self.rpc_client = RPCClient(user="user", password="password", url="https://somewhere")
self.rpc_client.conn.auth.login = MagicMock(return_value="My_token")
self.rpc_client.conn = MagicMock()
def teardown_method(self, method):
"""
Tear-down state per test.
:param method:
:return:
"""
self.rpc_client = None
uyuni_config.__pillar__ = {}
def test_init_called(self):
"""
Init method called
:return:
"""
assert self.rpc_client.get_user() == 'user'
assert self.rpc_client.token is None
def test_init_called_without_pillar(self):
"""
Init method called without user password and without any pillar data
:return:
"""
with pytest.raises(UyuniUsersException):
RPCClient(user="user")
def test_init_called_with_pillar(self):
"""
Init method called without user password and with pillar data defined
:return:
"""
uyuni_config.__pillar__ = {
"uyuni": {
"xmlrpc": {
"user": "admin_user",
"password": "password_user"
}
}
}
rpc_client = RPCClient(user="user")
assert rpc_client.get_user() == 'admin_user'
assert rpc_client._user == 'admin_user'
assert rpc_client._password == 'password_user'
assert rpc_client.token is None
def test_get_token(self):
"""
Test get_token method with reuse token
:return:
"""
my_mock1 = MagicMock(return_value="My_Special_Token")
my_mock2 = MagicMock(return_value="My_Special_Token_2")
self.rpc_client.conn.auth.login = my_mock1
token = self.rpc_client.get_token()
assert my_mock1.call_count == 1
assert token == "My_Special_Token"
assert uyuni_config.__context__.get("uyuni.auth_token_user") == "My_Special_Token"
self.rpc_client.get_token()
assert my_mock1.call_count == 1
self.rpc_client.conn.auth.login = my_mock2
self.rpc_client.get_token()
assert my_mock1.call_count == 1
assert my_mock2.call_count == 0
token = self.rpc_client.get_token(True)
assert my_mock1.call_count == 1
assert my_mock2.call_count == 1
assert token == "My_Special_Token_2"
assert uyuni_config.__context__.get("uyuni.auth_token_user") == "My_Special_Token_2"
def test_call_rpc(self):
"""
Call any XML-RPC method.
:return:
"""
self.rpc_client.token = "My_token"
out = self.rpc_client("uyuni.some_method")
mo = getattr(self.rpc_client.conn, "uyuni.some_method")
assert out is not None
assert mo.called
mo.assert_called_with("My_token")
out2 = self.rpc_client("uyuni.some_method_2", "my_arg")
mo2 = getattr(self.rpc_client.conn, "uyuni.some_method_2")
assert out2 is not None
assert mo2.called
mo2.assert_called_with("My_token", "my_arg")
def METHOD_NAME(self):
"""
Handle XML-RPC method crash wiht generic error
:return:
"""
self.rpc_client.token = "the_token"
exc = Exception("generic error when processing")
exc.faultCode = 2951
setattr(self.rpc_client.conn, "uyuni.some_method",
MagicMock(side_effect=exc))
with patch("src.modules.uyuni_config.log") as logger:
with pytest.raises(Exception):
self.rpc_client("uyuni.some_method")
mo = getattr(self.rpc_client.conn, "uyuni.some_method")
assert mo.called
mo.assert_called_with("the_token")
assert logger.error.call_args[0] == ('Unable to call RPC function: %s', 'generic error when processing')
def test_call_rpc_crash_handle_reauthenticate_error(self):
"""
Handle XML-RPC method crash whit reauthenticate error
:return:
"""
self.rpc_client.token = "the_token"
self.rpc_client.conn.auth.login = MagicMock(return_value="the_token_new")
exc = Exception("generic error when processing")
exc.faultCode = 2950
setattr(self.rpc_client.conn, "uyuni.some_method",
MagicMock(side_effect=exc))
with patch("src.modules.uyuni_config.log") as logger:
with pytest.raises(Exception):
self.rpc_client("uyuni.some_method")
mo = getattr(self.rpc_client.conn, "uyuni.some_method")
assert mo.call_count == 2
mo.assert_has_calls([call("the_token"), call("the_token_new")])
self.rpc_client.conn.auth.login.assert_called_once_with("user", "password")
assert self.rpc_client.get_token() == "the_token_new"
assert logger.error.call_args[0] == ('Unable to call RPC function: %s', 'generic error when processing')
def test_call_rpc_handle_reauthenticate(self):
"""
Handle XML-RPC method and reauthenticate
:return:
"""
self.rpc_client.token = "the_token"
self.rpc_client.conn.auth.login = MagicMock(return_value="the_token_new")
exc = Exception("generic error when processing")
exc.faultCode = 2950
setattr(self.rpc_client.conn, "uyuni.some_method",
MagicMock(side_effect=[exc, "return string"]))
assert self.rpc_client.get_token() == "the_token"
with patch("src.modules.uyuni_config.log") as logger:
out = self.rpc_client("uyuni.some_method")
mo = getattr(self.rpc_client.conn, "uyuni.some_method")
# pdb.set_trace()
assert out is not None
assert out == 'return string'
assert mo.call_count == 2
mo.assert_has_calls([call("the_token"), call("the_token_new")])
self.rpc_client.conn.auth.login.assert_called_once_with("user", "password")
assert self.rpc_client.get_token() == "the_token_new"
assert logger.warning.call_args[0] == ('Fall back to the second try due to %s', 'generic error when processing')
|
6,111 | preprocess uci adult | """
Utility functions to keep the example notebooks uncluttered with boilerplate.
"""
import re
from collections import OrderedDict
from pathlib import Path
from typing import Tuple
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
DATA_DIR = Path(__file__).parent / "data"
UCI_ADULT_TARGET_COL = "target"
def load_uci_adult() -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Downloads and pre-processes the UCI Adult dataset.
Returns
-------
train_set, test_set : tuple[pd.DataFrame, pd.DataFrame]
The pre-processed train and test datasets.
"""
try:
import wget
except ModuleNotFoundError as err:
print(f"Downloading this dataset requires the `wget` python package; got \"{err}\"")
# URLs for downloading dataset
base_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/"
train_url = base_url + "adult.data"
test_url = base_url + "adult.test"
names_url = base_url + "adult.names"
# Make local data directory
DATA_DIR.mkdir(exist_ok=True)
# Download data
train_path = wget.download(train_url, str(DATA_DIR))
test_path = wget.download(test_url, str(DATA_DIR))
names_path = wget.download(names_url, str(DATA_DIR))
return (
METHOD_NAME(train_path, names_path),
METHOD_NAME(test_path, names_path, skiprows=1),
)
def METHOD_NAME(data_path, names_path, **read_kwargs) -> pd.DataFrame:
# Load column names
column_map = OrderedDict()
line_regexp = re.compile(r"^([-\w]+): (.*)[.]$")
with open(names_path, "r") as f_in:
lines = f_in.readlines()
for l in lines:
match = line_regexp.match(l)
if not match: continue
col_name = match.group(1)
col_values = match.group(2).split(", ")
if len(col_values) == 1:
col_values = col_values[0]
column_map[col_name] = col_values
# Last column is the target
column_map[UCI_ADULT_TARGET_COL] = ["<=50K", ">50K"]
# Load data
data = pd.read_csv(
data_path,
header=None,
names=list(column_map.keys()),
index_col=None,
**read_kwargs)
# Set correct dtypes
data = data.astype({
col_name: (
float if col_value == "continuous" else "category"
) for col_name, col_value in column_map.items()
})
# Strip whitespace from categorical values
for col in data.columns:
if pd.api.types.is_categorical_dtype(data[col]):
data[col] = data[col].map(lambda val: val.strip())
return data
def compute_fairness_ratio(y_true: np.ndarray, y_pred: np.ndarray, s_true, metric: str) -> float:
"""Compute fairness metric as the disparity (group-wise ratio)
of a given performance metric.
Parameters
----------
y_true : np.ndarray
The true labels.
y_pred : np.ndarray
The binarized predictions.
s_true : np.ndarray
The sensitive attribute column.
metric : str
The performance metric used to compute disparity.
Returns
-------
value : float
The fairness metric value (between 0 and 1).
"""
metric = metric.lower()
valid_perf_metrics = ("fpr", "fnr", "tpr", "tnr")
def compute_metric(y_true, y_pred):
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
if metric == "fpr":
return fp / (fp + tn)
elif metric == "tnr":
return tn / (fp + tn)
elif metric == "fnr":
return fn / (fn + tp)
elif metric == "tpr":
return tp / (fn + tp)
else:
raise ValueError(f"Invalid metric chosen; must be one of {valid_perf_metrics}; got '{metric}'")
groupwise_metrics = []
for group in pd.Series(s_true).unique():
group_filter = (s_true == group)
groupwise_metrics.append(compute_metric(
y_true[group_filter],
y_pred[group_filter],
))
return min(groupwise_metrics) / max(groupwise_metrics) |
6,112 | test dont send maxpagesize | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from pagingversiontolerant import AutoRestPagingTestService
from custombaseurlpagingversiontolerant import AutoRestParameterizedHostTestPagingClient
from azure.core.exceptions import HttpResponseError
import pytest
@pytest.fixture
def client():
with AutoRestPagingTestService() as client:
yield client
@pytest.fixture
def custom_url_client():
with AutoRestParameterizedHostTestPagingClient(host="host:3000") as client:
yield client
def test_get_no_item_name_pages(client):
pages = client.paging.get_no_item_name_pages()
items = [i for i in pages]
assert len(items) == 1
assert items[0]["properties"]["id"] == 1
assert items[0]["properties"]["name"] == "Product"
def test_get_null_next_link_name_pages(client):
pages = client.paging.get_null_next_link_name_pages()
items = [i for i in pages]
assert len(items) == 1
assert items[0]["properties"]["id"] == 1
assert items[0]["properties"]["name"] == "Product"
def test_get_empty_next_link_name_pages(client):
pages = client.paging.get_empty_next_link_name_pages()
items = [i for i in pages]
assert len(items) == 1
assert items[0]["properties"]["id"] == 1
assert items[0]["properties"]["name"] == "Product"
def test_get_single_pages_with_cb(client):
def cb(list_of_obj):
for obj in list_of_obj:
obj["marked"] = True
return list_of_obj
pages = client.paging.get_single_pages(cls=cb)
assert all(obj["marked"] for obj in pages)
def test_get_single_pages(client):
pages = client.paging.get_single_pages()
items = [i for i in pages]
assert len(items) == 1
assert items[0]["properties"]["id"] == 1
assert items[0]["properties"]["name"] == "Product"
def test_get_single_pages_with_body_params(client):
pages = client.paging.get_single_pages_with_body_params({"name": "body"})
items = [i for i in pages]
assert len(items) == 1
assert items[0]["properties"]["id"] == 1
assert items[0]["properties"]["name"] == "Product"
def test_get_multiple_pages(client):
pages = client.paging.get_multiple_pages()
items = [i for i in pages]
assert len(items) == 10
def test_query_params(client):
pages = client.paging.get_with_query_params(required_query_parameter='100')
items = [i for i in pages]
assert len(items) == 2
def test_get_odata_multiple_pages(client):
pages = client.paging.get_odata_multiple_pages()
items = [i for i in pages]
assert len(items) == 10
def test_get_multiple_pages_retry_first(client):
pages = client.paging.get_multiple_pages_retry_first()
items = [i for i in pages]
assert len(items) == 10
def test_get_multiple_pages_retry_second(client):
pages = client.paging.get_multiple_pages_retry_second()
items = [i for i in pages]
assert len(items) == 10
def test_get_multiple_pages_with_offset(client):
pages = client.paging.get_multiple_pages_with_offset(offset=100)
items = [i for i in pages]
assert len(items) == 10
assert items[-1]["properties"]["id"] == 110
def test_get_single_pages_failure(client):
pages = client.paging.get_single_pages_failure()
with pytest.raises(HttpResponseError):
list(pages)
def test_get_multiple_pages_failure(client):
pages = client.paging.get_multiple_pages_failure()
with pytest.raises(HttpResponseError):
list(pages)
def test_get_multiple_pages_failure_uri(client):
pages = client.paging.get_multiple_pages_failure_uri()
with pytest.raises(HttpResponseError):
list(pages)
def test_paging_fragment_path(client):
pages = client.paging.get_multiple_pages_fragment_next_link(api_version="1.6", tenant="test_user")
items = [i for i in pages]
assert len(items) == 10
with pytest.raises(AttributeError):
# Be sure this method is not generated (Transform work)
client.paging.get_multiple_pages_fragment_next_link_next() # pylint: disable=E1101
def test_custom_url_get_pages_partial_url(custom_url_client):
paged = list(custom_url_client.paging.get_pages_partial_url("local"))
assert len(paged) == 2
assert paged[0]["properties"]["id"] == 1
assert paged[1]["properties"]["id"] == 2
def test_custom_url_get_pages_partial_url_operation(custom_url_client):
paged = list(custom_url_client.paging.get_pages_partial_url_operation("local"))
assert len(paged) == 2
assert paged[0]["properties"]["id"] == 1
assert paged[1]["properties"]["id"] == 2
def test_get_multiple_pages_lro(client):
"""LRO + Paging at the same time.
"""
from azure.mgmt.core.polling.arm_polling import ARMPolling
poller = client.paging.begin_get_multiple_pages_lro(polling=ARMPolling(timeout=0, request_id="test"))
pager = poller.result()
items = list(pager)
assert len(items) == 10
assert items[0]["properties"]["id"] == 1
assert items[1]["properties"]["id"] == 2
def test_item_name_with_xms_client_name(client):
pages = client.paging.get_paging_model_with_item_name_with_xms_client_name()
items = [i for i in pages]
assert len(items) == 1
def test_initial_response_no_items(client):
pages = client.paging.first_response_empty()
items = [i for i in pages]
assert len(items) == 1
def test_duplicate_params(client):
pages = list(client.paging.duplicate_params(filter="foo"))
assert len(pages) == 1
assert pages[0]["properties"]["id"] == 1
assert pages[0]["properties"]["name"] == "Product"
def METHOD_NAME(client):
list(client.paging.page_with_max_page_size())
def test_append_api_version(client):
pages = list(client.paging.append_api_version())
assert len(pages) == 1
assert pages[0]["properties"]["id"] == 1
assert pages[0]["properties"]["name"] == "Product"
def test_replace_api_version(client):
pages = list(client.paging.replace_api_version())
assert len(pages) == 1
assert pages[0]["properties"]["id"] == 1
assert pages[0]["properties"]["name"] == "Product" |
6,113 | test infrastructure required properties | import datetime as dt
import pytest
import pytz
import stix2
from .constants import FAKE_TIME, INFRASTRUCTURE_ID, INFRASTRUCTURE_KWARGS
EXPECTED_INFRASTRUCTURE = """{
"type": "infrastructure",
"spec_version": "2.1",
"id": "infrastructure--3000ae1b-784c-f03d-8abc-0a625b2ff018",
"created": "2017-01-01T12:34:56.000Z",
"modified": "2017-01-01T12:34:56.000Z",
"name": "Poison Ivy C2"
}"""
def test_infrastructure_with_all_required_properties():
now = dt.datetime(2017, 1, 1, 12, 34, 56, tzinfo=pytz.utc)
infra = stix2.v21.Infrastructure(
type="infrastructure",
id=INFRASTRUCTURE_ID,
created=now,
modified=now,
name="Poison Ivy C2",
)
assert infra.serialize(pretty=True) == EXPECTED_INFRASTRUCTURE
def test_infrastructure_autogenerated_properties(infrastructure):
assert infrastructure.type == 'infrastructure'
assert infrastructure.id == 'infrastructure--00000000-0000-4000-8000-000000000001'
assert infrastructure.created == FAKE_TIME
assert infrastructure.modified == FAKE_TIME
assert infrastructure.infrastructure_types == ['command-and-control']
assert infrastructure.name == "Poison Ivy C2"
assert infrastructure['type'] == 'infrastructure'
assert infrastructure['id'] == 'infrastructure--00000000-0000-4000-8000-000000000001'
assert infrastructure['created'] == FAKE_TIME
assert infrastructure['modified'] == FAKE_TIME
assert infrastructure['infrastructure_types'] == ['command-and-control']
assert infrastructure['name'] == "Poison Ivy C2"
def test_infrastructure_type_must_be_infrastructure():
with pytest.raises(stix2.exceptions.InvalidValueError) as excinfo:
stix2.v21.Infrastructure(type='xxx', **INFRASTRUCTURE_KWARGS)
assert excinfo.value.cls == stix2.v21.Infrastructure
assert excinfo.value.prop_name == "type"
assert excinfo.value.reason == "must equal 'infrastructure'."
assert str(excinfo.value) == "Invalid value for Infrastructure 'type': must equal 'infrastructure'."
def test_infrastructure_id_must_start_with_infrastructure():
with pytest.raises(stix2.exceptions.InvalidValueError) as excinfo:
stix2.v21.Infrastructure(id='my-prefix--', **INFRASTRUCTURE_KWARGS)
assert excinfo.value.cls == stix2.v21.Infrastructure
assert excinfo.value.prop_name == "id"
assert excinfo.value.reason == "must start with 'infrastructure--'."
assert str(excinfo.value) == "Invalid value for Infrastructure 'id': must start with 'infrastructure--'."
def METHOD_NAME():
with pytest.raises(stix2.exceptions.MissingPropertiesError) as excinfo:
stix2.v21.Infrastructure()
assert excinfo.value.cls == stix2.v21.Infrastructure
assert excinfo.value.properties == ["name"]
def test_infrastructure_required_property_name():
with pytest.raises(stix2.exceptions.MissingPropertiesError) as excinfo:
stix2.v21.Infrastructure(infrastructure_types=['command-and-control'])
assert excinfo.value.cls == stix2.v21.Infrastructure
assert excinfo.value.properties == ["name"]
def test_invalid_kwarg_to_infrastructure():
with pytest.raises(stix2.exceptions.ExtraPropertiesError) as excinfo:
stix2.v21.Infrastructure(my_custom_property="foo", **INFRASTRUCTURE_KWARGS)
assert excinfo.value.cls == stix2.v21.Infrastructure
assert excinfo.value.properties == ['my_custom_property']
assert str(excinfo.value) == "Unexpected properties for Infrastructure: (my_custom_property)."
@pytest.mark.parametrize(
"data", [
EXPECTED_INFRASTRUCTURE,
{
"type": "infrastructure",
"spec_version": "2.1",
"id": INFRASTRUCTURE_ID,
"created": "2017-01-01T12:34:56.000Z",
"modified": "2017-01-01T12:34:56.000Z",
"name": "Poison Ivy C2",
},
],
)
def test_parse_infrastructure(data):
infra = stix2.parse(data)
assert infra.type == 'infrastructure'
assert infra.spec_version == '2.1'
assert infra.id == INFRASTRUCTURE_ID
assert infra.created == dt.datetime(2017, 1, 1, 12, 34, 56, tzinfo=pytz.utc)
assert infra.modified == dt.datetime(2017, 1, 1, 12, 34, 56, tzinfo=pytz.utc)
assert infra.name == 'Poison Ivy C2'
def test_parse_infrastructure_kill_chain_phases():
kill_chain = """
"kill_chain_phases": [
{
"kill_chain_name": "lockheed-martin-cyber-kill-chain",
"phase_name": "reconnaissance"
}
]"""
data = EXPECTED_INFRASTRUCTURE.replace('infrastructure"', 'infrastructure",%s' % kill_chain)
infra = stix2.parse(data, version="2.1")
assert infra.kill_chain_phases[0].kill_chain_name == "lockheed-martin-cyber-kill-chain"
assert infra.kill_chain_phases[0].phase_name == "reconnaissance"
assert infra['kill_chain_phases'][0]['kill_chain_name'] == "lockheed-martin-cyber-kill-chain"
assert infra['kill_chain_phases'][0]['phase_name'] == "reconnaissance"
def test_parse_infrastructure_clean_kill_chain_phases():
kill_chain = """
"kill_chain_phases": [
{
"kill_chain_name": "lockheed-martin-cyber-kill-chain",
"phase_name": 1
}
]"""
data = EXPECTED_INFRASTRUCTURE.replace('2.1"', '2.1",%s' % kill_chain)
infra = stix2.parse(data, version="2.1")
assert infra['kill_chain_phases'][0]['phase_name'] == "1"
def test_infrastructure_invalid_last_before_first():
with pytest.raises(ValueError) as excinfo:
stix2.v21.Infrastructure(first_seen="2017-01-01T12:34:56.000Z", last_seen="2017-01-01T12:33:56.000Z", **INFRASTRUCTURE_KWARGS)
assert "'last_seen' must be greater than or equal to 'first_seen'" in str(excinfo.value) |
6,114 | test schedule send | # Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See LICENSE
from random import choice
from unittest.mock import MagicMock, PropertyMock, patch
import frappe
from frappe.email.doctype.newsletter.exceptions import (
NewsletterAlreadySentError,
NoRecipientFoundError,
)
from frappe.email.doctype.newsletter.newsletter import (
Newsletter,
confirmed_unsubscribe,
send_scheduled_email,
)
from frappe.email.queue import flush
from frappe.tests.utils import FrappeTestCase
from frappe.utils import add_days, getdate
emails = [
"test_subscriber1@example.com",
"test_subscriber2@example.com",
"test_subscriber3@example.com",
"test1@example.com",
]
newsletters = []
def get_dotted_path(obj: type) -> str:
klass = obj.__class__
module = klass.__module__
if module == "builtins":
return klass.__qualname__ # avoid outputs like 'builtins.str'
return f"{module}.{klass.__qualname__}"
class TestNewsletterMixin:
def setUp(self):
frappe.set_user("Administrator")
self.setup_email_group()
def tearDown(self):
frappe.set_user("Administrator")
for newsletter in newsletters:
frappe.db.delete(
"Email Queue",
{
"reference_doctype": "Newsletter",
"reference_name": newsletter,
},
)
frappe.delete_doc("Newsletter", newsletter)
frappe.db.delete("Newsletter Email Group", {"parent": newsletter})
newsletters.remove(newsletter)
def setup_email_group(self):
if not frappe.db.exists("Email Group", "_Test Email Group"):
frappe.get_doc({"doctype": "Email Group", "title": "_Test Email Group"}).insert()
for email in emails:
doctype = "Email Group Member"
email_filters = {"email": email, "email_group": "_Test Email Group"}
savepoint = "setup_email_group"
frappe.db.savepoint(savepoint)
try:
frappe.get_doc(
{
"doctype": doctype,
**email_filters,
}
).insert(ignore_if_duplicate=True)
except Exception:
frappe.db.rollback(save_point=savepoint)
frappe.db.set_value(doctype, email_filters, "unsubscribed", 0)
frappe.db.release_savepoint(savepoint)
def send_newsletter(self, published=0, schedule_send=None) -> str | None:
frappe.db.delete("Email Queue")
frappe.db.delete("Email Queue Recipient")
frappe.db.delete("Newsletter")
newsletter_options = {
"published": published,
"schedule_sending": bool(schedule_send),
"schedule_send": schedule_send,
}
newsletter = self.get_newsletter(**newsletter_options)
if schedule_send:
send_scheduled_email()
else:
newsletter.send_emails()
return newsletter.name
return newsletter
@staticmethod
def get_newsletter(**kwargs) -> "Newsletter":
"""Generate and return Newsletter object"""
doctype = "Newsletter"
newsletter_content = {
"subject": "_Test Newsletter",
"sender_name": "Test Sender",
"sender_email": "test_sender@example.com",
"content_type": "Rich Text",
"message": "Testing my news.",
}
similar_newsletters = frappe.get_all(doctype, newsletter_content, pluck="name")
for similar_newsletter in similar_newsletters:
frappe.delete_doc(doctype, similar_newsletter)
newsletter = frappe.get_doc({"doctype": doctype, **newsletter_content, **kwargs})
newsletter.append("email_group", {"email_group": "_Test Email Group"})
newsletter.save(ignore_permissions=True)
newsletter.reload()
newsletters.append(newsletter.name)
attached_files = frappe.get_all(
"File",
{
"attached_to_doctype": newsletter.doctype,
"attached_to_name": newsletter.name,
},
pluck="name",
)
for file in attached_files:
frappe.delete_doc("File", file)
return newsletter
class TestNewsletter(TestNewsletterMixin, FrappeTestCase):
def test_send(self):
self.send_newsletter()
email_queue_list = [frappe.get_doc("Email Queue", e.name) for e in frappe.get_all("Email Queue")]
self.assertEqual(len(email_queue_list), 4)
recipients = {e.recipients[0].recipient for e in email_queue_list}
self.assertTrue(set(emails).issubset(recipients))
def test_unsubscribe(self):
name = self.send_newsletter()
to_unsubscribe = choice(emails)
group = frappe.get_all(
"Newsletter Email Group", filters={"parent": name}, fields=["email_group"]
)
flush(from_test=True)
confirmed_unsubscribe(to_unsubscribe, group[0].email_group)
name = self.send_newsletter()
email_queue_list = [frappe.get_doc("Email Queue", e.name) for e in frappe.get_all("Email Queue")]
self.assertEqual(len(email_queue_list), 3)
recipients = [e.recipients[0].recipient for e in email_queue_list]
for email in emails:
if email != to_unsubscribe:
self.assertTrue(email in recipients)
def METHOD_NAME(self):
newsletter = self.send_newsletter(schedule_send=add_days(getdate(), 1))
newsletter.db_set("schedule_send", add_days(getdate(), -1)) # Set date in past
send_scheduled_email()
email_queue_list = [frappe.get_doc("Email Queue", e.name) for e in frappe.get_all("Email Queue")]
self.assertEqual(len(email_queue_list), 4)
recipients = [e.recipients[0].recipient for e in email_queue_list]
for email in emails:
self.assertTrue(email in recipients)
def test_newsletter_send_test_email(self):
"""Test "Send Test Email" functionality of Newsletter"""
newsletter = self.get_newsletter()
test_email = choice(emails)
newsletter.send_test_email(test_email)
self.assertFalse(newsletter.email_sent)
newsletter.save = MagicMock()
self.assertFalse(newsletter.save.called)
# check if the test email is in the queue
email_queue = frappe.get_all(
"Email Queue",
filters=[
["reference_doctype", "=", "Newsletter"],
["reference_name", "=", newsletter.name],
["Email Queue Recipient", "recipient", "=", test_email],
],
)
self.assertTrue(email_queue)
def test_newsletter_status(self):
"""Test for Newsletter's stats on onload event"""
newsletter = self.get_newsletter()
newsletter.email_sent = True
result = newsletter.get_sending_status()
self.assertTrue("total" in result)
self.assertTrue("sent" in result)
def test_already_sent_newsletter(self):
newsletter = self.get_newsletter()
newsletter.send_emails()
with self.assertRaises(NewsletterAlreadySentError):
newsletter.send_emails()
def test_newsletter_with_no_recipient(self):
newsletter = self.get_newsletter()
property_path = f"{get_dotted_path(newsletter)}.newsletter_recipients"
with patch(property_path, new_callable=PropertyMock) as mock_newsletter_recipients:
mock_newsletter_recipients.return_value = []
with self.assertRaises(NoRecipientFoundError):
newsletter.send_emails()
def test_send_scheduled_email_error_handling(self):
newsletter = self.get_newsletter(schedule_send=add_days(getdate(), -1))
job_path = "frappe.email.doctype.newsletter.newsletter.Newsletter.queue_all"
m = MagicMock(side_effect=frappe.OutgoingEmailError)
with self.assertRaises(frappe.OutgoingEmailError):
with patch(job_path, new_callable=m):
send_scheduled_email()
newsletter.reload()
self.assertEqual(newsletter.email_sent, 0)
def test_retry_partially_sent_newsletter(self):
frappe.db.delete("Email Queue")
frappe.db.delete("Email Queue Recipient")
frappe.db.delete("Newsletter")
newsletter = self.get_newsletter()
newsletter.send_emails()
email_queue_list = [frappe.get_doc("Email Queue", e.name) for e in frappe.get_all("Email Queue")]
self.assertEqual(len(email_queue_list), 4)
# delete a queue document to emulate partial send
queue_recipient_name = email_queue_list[0].recipients[0].recipient
email_queue_list[0].delete()
newsletter.email_sent = False
# make sure the pending recipient is only the one which has been deleted
self.assertEqual(newsletter.get_pending_recipients(), [queue_recipient_name])
# retry
newsletter.send_emails()
self.assertEqual(frappe.db.count("Email Queue"), 4)
self.assertTrue(newsletter.email_sent) |
6,115 | push write | # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
from botocore.compat import OrderedDict
from awscli.bcdoc.docstringparser import DocStringParser
from awscli.bcdoc.style import ReSTStyle
LOG = logging.getLogger('bcdocs')
class ReSTDocument(object):
def __init__(self, target='man'):
self.style = ReSTStyle(self)
self.target = target
self.parser = DocStringParser(self)
self.keep_data = True
self.do_translation = False
self.translation_map = {}
self.hrefs = {}
self._writes = []
self._last_doc_string = None
def _write(self, s):
if self.keep_data and s is not None:
self._writes.append(s)
def write(self, content):
"""
Write content into the document.
"""
self._write(content)
def writeln(self, content):
"""
Write content on a newline.
"""
self._write('%s%s\n' % (self.style.spaces(), content))
def peek_write(self):
"""
Returns the last content written to the document without
removing it from the stack.
"""
return self._writes[-1]
def pop_write(self):
"""
Removes and returns the last content written to the stack.
"""
return self._writes.pop()
def METHOD_NAME(self, s):
"""
Places new content on the stack.
"""
self._writes.append(s)
def getvalue(self):
"""
Returns the current content of the document as a string.
"""
if self.hrefs:
self.style.new_paragraph()
for refname, link in self.hrefs.items():
self.style.link_target_definition(refname, link)
return ''.join(self._writes).encode('utf-8')
def translate_words(self, words):
return [self.translation_map.get(w, w) for w in words]
def handle_data(self, data):
if data and self.keep_data:
self._write(data)
def include_doc_string(self, doc_string):
if doc_string:
try:
start = len(self._writes)
self.parser.feed(doc_string)
self.parser.close()
end = len(self._writes)
self._last_doc_string = (start, end)
except Exception:
LOG.debug('Error parsing doc string', exc_info=True)
LOG.debug(doc_string)
def remove_last_doc_string(self):
# Removes all writes inserted by last doc string
if self._last_doc_string is not None:
start, end = self._last_doc_string
del self._writes[start:end]
def write_from_file(self, filename):
with open(filename, 'r') as f:
for line in f.readlines():
self.writeln(line.strip())
class DocumentStructure(ReSTDocument):
def __init__(self, name, section_names=None, target='man', context=None):
"""Provides a Hierarichial structure to a ReSTDocument
You can write to it similiar to as you can to a ReSTDocument but
has an innate structure for more orginaztion and abstraction.
:param name: The name of the document
:param section_names: A list of sections to be included
in the document.
:param target: The target documentation of the Document structure
:param context: A dictionary of data to store with the strucuture. These
are only stored per section not the entire structure.
"""
super(DocumentStructure, self).__init__(target=target)
self._name = name
self._structure = OrderedDict()
self._path = [self._name]
self._context = {}
if context is not None:
self._context = context
if section_names is not None:
self._generate_structure(section_names)
@property
def name(self):
"""The name of the document structure"""
return self._name
@property
def path(self):
"""
A list of where to find a particular document structure in the
overlying document structure.
"""
return self._path
@path.setter
def path(self, value):
self._path = value
@property
def available_sections(self):
return list(self._structure)
@property
def context(self):
return self._context
def _generate_structure(self, section_names):
for section_name in section_names:
self.add_new_section(section_name)
def add_new_section(self, name, context=None):
"""Adds a new section to the current document structure
This document structure will be considered a section to the
current document structure but will in itself be an entirely
new document structure that can be written to and have sections
as well
:param name: The name of the section.
:param context: A dictionary of data to store with the strucuture. These
are only stored per section not the entire structure.
:rtype: DocumentStructure
:returns: A new document structure to add to but lives as a section
to the document structure it was instantiated from.
"""
# Add a new section
section = self.__class__(name=name, target=self.target,
context=context)
section.path = self.path + [name]
# Indent the section apporpriately as well
section.style.indentation = self.style.indentation
section.translation_map = self.translation_map
section.hrefs = self.hrefs
self._structure[name] = section
return section
def get_section(self, name):
"""Retrieve a section"""
return self._structure[name]
def delete_section(self, name):
"""Delete a section"""
del self._structure[name]
def flush_structure(self):
"""Flushes a doc structure to a ReSTructed string
The document is flushed out in a DFS style where sections and their
subsections' values are added to the string as they are visited.
"""
# We are at the root flush the links at the beginning of the
# document
if len(self.path) == 1:
if self.hrefs:
self.style.new_paragraph()
for refname, link in self.hrefs.items():
self.style.link_target_definition(refname, link)
value = self.getvalue()
for name, section in self._structure.items():
value += section.flush_structure()
return value
def getvalue(self):
return ''.join(self._writes).encode('utf-8')
def remove_all_sections(self):
self._structure = OrderedDict()
def clear_text(self):
self._writes = [] |
6,116 | test run module bug1764407 | # Tests invocation of the interpreter with various command line arguments
# All tests are executed with environment variables ignored
# See test_cmd_line_script.py for testing of script execution
import test.test_support
import sys
import unittest
from test.script_helper import (
assert_python_ok, assert_python_failure, spawn_python, kill_python,
python_exit_code
)
class CmdLineTest(unittest.TestCase):
@classmethod
def tearDownClass(cls):
if test.test_support.is_jython:
# GC is not immediate, so Popen.__del__ may be delayed.
# Try to force any Popen.__del__ errors within scope of test.
from test_weakref import extra_collect
extra_collect()
def start_python(self, *args):
p = spawn_python(*args)
return kill_python(p)
def exit_code(self, *args):
return python_exit_code(*args)
def test_directories(self):
self.assertNotEqual(self.exit_code('.'), 0)
self.assertNotEqual(self.exit_code('< .'), 0)
def verify_valid_flag(self, cmd_line):
data = self.start_python(cmd_line)
self.assertTrue(data == '' or data.endswith('\n'))
self.assertNotIn('Traceback', data)
self.assertNotIn('usage:', data)
def test_optimize(self):
self.verify_valid_flag('-O')
self.verify_valid_flag('-OO')
def test_q(self):
self.verify_valid_flag('-Qold')
self.verify_valid_flag('-Qnew')
self.verify_valid_flag('-Qwarn')
self.verify_valid_flag('-Qwarnall')
def test_site_flag(self):
self.verify_valid_flag('-S')
def test_usage(self):
self.assertIn('usage', self.start_python('-h'))
def test_version(self):
prefix = 'Jython' if test.test_support.is_jython else 'Python'
version = (prefix + ' %d.%d') % sys.version_info[:2]
self.assertTrue(self.start_python('-V').startswith(version))
def test_run_module(self):
# Test expected operation of the '-m' switch
# Switch needs an argument
self.assertNotEqual(self.exit_code('-m'), 0)
# Check we get an error for a nonexistent module
self.assertNotEqual(
self.exit_code('-m', 'fnord43520xyz'),
0)
# Check the runpy module also gives an error for
# a nonexistent module
self.assertNotEqual(
self.exit_code('-m', 'runpy', 'fnord43520xyz'),
0)
# All good if module is located and run successfully
self.assertEqual(
self.exit_code('-m', 'timeit', '-n', '1'),
0)
def METHOD_NAME(self):
# -m and -i need to play well together
# Runs the timeit module and checks the __main__
# namespace has been populated appropriately
p = spawn_python('-i', '-m', 'timeit', '-n', '1')
p.stdin.write('Timer\n')
p.stdin.write('exit()\n')
data = kill_python(p)
self.assertTrue(data.startswith('1 loop'))
self.assertIn('__main__.Timer', data)
def test_run_code(self):
# Test expected operation of the '-c' switch
# Switch needs an argument
self.assertNotEqual(self.exit_code('-c'), 0)
# Check we get an error for an uncaught exception
self.assertNotEqual(
self.exit_code('-c', 'raise Exception'),
0)
# All good if execution is successful
self.assertEqual(
self.exit_code('-c', 'pass'),
0)
@unittest.skipIf(test.test_support.is_jython,
"Hash randomisation is not supported in Jython.")
def test_hash_randomization(self):
# Verify that -R enables hash randomization:
self.verify_valid_flag('-R')
hashes = []
for i in range(2):
code = 'print(hash("spam"))'
data = self.start_python('-R', '-c', code)
hashes.append(data)
self.assertNotEqual(hashes[0], hashes[1])
# Verify that sys.flags contains hash_randomization
code = 'import sys; print sys.flags'
data = self.start_python('-R', '-c', code)
self.assertTrue('hash_randomization=1' in data)
def test_del___main__(self):
# Issue #15001: PyRun_SimpleFileExFlags() did crash because it kept a
# borrowed reference to the dict of __main__ module and later modify
# the dict whereas the module was destroyed
filename = test.test_support.TESTFN
self.addCleanup(test.test_support.unlink, filename)
with open(filename, "w") as script:
print >>script, "import sys"
print >>script, "del sys.modules['__main__']"
assert_python_ok(filename)
def test_unknown_options(self):
rc, out, err = assert_python_failure('-E', '-z')
self.assertIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -z'), 1)
self.assertEqual(b'', out)
# Add "without='-E'" to prevent _assert_python to append -E
# to env_vars and change the output of stderr
rc, out, err = assert_python_failure('-z', without='-E')
self.assertIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -z'), 1)
self.assertEqual(b'', out)
rc, out, err = assert_python_failure('-a', '-z', without='-E')
self.assertIn(b'Unknown option: -a', err)
# only the first unknown option is reported
self.assertNotIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -a'), 1)
self.assertEqual(b'', out)
def test_python_startup(self):
# Test that the file designated by [PJ]YTHONSTARTUP is executed when interactive.
# Note: this test depends on the -i option forcing Python to treat stdin as interactive.
filename = test.test_support.TESTFN
self.addCleanup(test.test_support.unlink, filename)
with open(filename, "w") as script:
print >>script, "print 6*7"
print >>script, "print 'Ni!'"
expected = ['42', 'Ni!']
def check(*args, **kwargs):
result = assert_python_ok(*args, **kwargs)
self.assertListEqual(expected, result[1].splitlines())
if test.test_support.is_jython:
# Jython produces a prompt before exit, but not CPython. Hard to say who is correct.
expected.append('>>> ')
# The Jython way is to set a registry item python.startup
check('-i', '-J-Dpython.startup={}'.format(filename))
# But a JYTHONSTARTUP environment variable is also supported
check('-i', JYTHONSTARTUP=filename)
else:
check('-i', PYTHONSTARTUP=filename)
@unittest.skipUnless(test.test_support.is_jython, "Requires write to sys.flags.inspect")
def test_python_inspect(self):
# Test that PYTHONINSPECT set during a script causes an interactive session to start.
# Note: this test depends on the -i option forcing Python to treat stdin as interactive,
# and on Jython permitting manipulation of sys.flags.inspect (which CPython won't)
# so that PYTHONINSPECT can have some effect.
filename = test.test_support.TESTFN
self.addCleanup(test.test_support.unlink, filename)
with open(filename, "w") as script:
print >>script, "import sys, os"
print >>script, "sys.flags.inspect = False"
print >>script, "os.environ['PYTHONINSPECT'] = 'whatever'"
print >>script, "print os.environ['PYTHONINSPECT']"
expected = ['whatever', '>>> ']
result = assert_python_ok('-i', filename)
self.assertListEqual(expected, result[1].splitlines())
def test_main():
test.test_support.run_unittest(CmdLineTest)
test.test_support.reap_children()
if __name__ == "__main__":
test_main() |
6,117 | test post no installation | from unittest.mock import patch
from django.db import router
from django.urls import reverse
from sentry.models.integrations.integration import Integration
from sentry.models.integrations.repository_project_path_config import RepositoryProjectPathConfig
from sentry.models.repository import Repository
from sentry.silo import SiloMode, unguarded_write
from sentry.testutils.cases import APITestCase
from sentry.testutils.silo import assume_test_silo_mode, region_silo_test
@region_silo_test(stable=True)
class OrganizationDeriveCodeMappingsTest(APITestCase):
def setUp(self):
super().setUp()
self.login_as(user=self.user)
self.project = self.create_project(organization=self.organization)
self.url = reverse(
"sentry-api-0-organization-derive-code-mappings",
args=[self.organization.slug],
)
self.repo = self.create_repo(
name="getsentry/sentry",
provider="integrations:github",
integration_id=self.integration.id,
project=self.project,
)
@patch("sentry.integrations.github.GitHubIntegration.get_trees_for_org")
def test_get_single_match(self, mock_get_trees_for_org):
config_data = {
"stacktraceFilename": "stack/root/file.py",
}
expected_matches = [
{
"filename": "stack/root/file.py",
"repo_name": "getsentry/codemap",
"repo_branch": "master",
"stacktrace_root": "/stack/root",
"source_path": "/source/root/",
}
]
with patch(
"sentry.integrations.utils.code_mapping.CodeMappingTreesHelper.list_file_matches",
return_value=expected_matches,
):
response = self.client.get(self.url, data=config_data, format="json")
assert mock_get_trees_for_org.call_count == 1
assert response.status_code == 200, response.content
assert response.data == expected_matches
@patch("sentry.integrations.github.GitHubIntegration.get_trees_for_org")
def test_get_multiple_matches(self, mock_get_trees_for_org):
config_data = {
"stacktraceFilename": "stack/root/file.py",
}
expected_matches = [
{
"filename": "stack/root/file.py",
"repo_name": "getsentry/codemap",
"repo_branch": "master",
"stacktrace_root": "/stack/root",
"source_path": "/source/root/",
},
{
"filename": "stack/root/file.py",
"repo_name": "getsentry/codemap",
"repo_branch": "master",
"stacktrace_root": "/stack/root",
"source_path": "/source/root/",
},
]
with patch(
"sentry.integrations.utils.code_mapping.CodeMappingTreesHelper.list_file_matches",
return_value=expected_matches,
):
response = self.client.get(self.url, data=config_data, format="json")
assert mock_get_trees_for_org.call_count == 1
assert response.status_code == 200, response.content
assert response.data == expected_matches
def test_get_no_installation(self):
config_data = {
"projectId": self.project.id,
"stacktraceFilename": "stack/root/file.py",
}
with assume_test_silo_mode(SiloMode.CONTROL), unguarded_write(
using=router.db_for_write(Integration)
):
Integration.objects.all().delete()
response = self.client.get(self.url, data=config_data, format="json")
assert response.status_code == 404, response.content
def test_post_simple(self):
config_data = {
"projectId": self.project.id,
"stackRoot": "/stack/root",
"sourceRoot": "/source/root",
"defaultBranch": "master",
"repoName": "getsentry/codemap",
}
response = self.client.post(self.url, data=config_data, format="json")
repo = Repository.objects.get(name="getsentry/codemap")
assert response.status_code == 201, response.content
assert response.data == {
"automaticallyGenerated": True,
"id": str(response.data["id"]),
"projectId": str(self.project.id),
"projectSlug": self.project.slug,
"repoId": str(repo.id),
"repoName": "getsentry/codemap",
"provider": {
"aspects": {},
"features": ["codeowners", "commits", "issue-basic", "stacktrace-link"],
"name": "GitHub",
"canDisable": False,
"key": "github",
"slug": "github",
"canAdd": True,
},
"integrationId": str(self.integration.id),
"stackRoot": "/stack/root",
"sourceRoot": "/source/root",
"defaultBranch": "master",
}
def METHOD_NAME(self):
config_data = {
"projectId": self.project.id,
"stackRoot": "/stack/root",
"sourceRoot": "/source/root",
"defaultBranch": "master",
"repoName": "name",
}
with assume_test_silo_mode(SiloMode.CONTROL), unguarded_write(
using=router.db_for_write(Integration)
):
Integration.objects.all().delete()
response = self.client.post(self.url, data=config_data, format="json")
assert response.status_code == 404, response.content
def test_post_existing_code_mapping(self):
RepositoryProjectPathConfig.objects.create(
project=self.project,
stack_root="/stack/root",
source_root="/source/root/wrong",
default_branch="master",
repository=self.repo,
organization_integration_id=self.organization_integration.id,
organization_id=self.organization_integration.organization_id,
integration_id=self.organization_integration.integration_id,
)
config_data = {
"projectId": self.project.id,
"stackRoot": "/stack/root",
"sourceRoot": "/source/root",
"defaultBranch": "master",
"repoName": "name",
}
response = self.client.post(self.url, data=config_data, format="json")
assert response.status_code == 201, response.content
new_code_mapping = RepositoryProjectPathConfig.objects.get(
project=self.project, stack_root="/stack/root"
)
assert new_code_mapping.source_root == "/source/root" |
6,118 | compute loss | import torch
import torch.nn as nn
import torch.nn.functional as F
from functorch import vmap, grad, combine_state_for_ensemble, make_functional_with_buffers
import functools
from .util import BenchmarkCase
class SimpleMLP(nn.Module):
def __init__(self):
super(SimpleMLP, self).__init__()
self.fc1 = nn.Linear(784, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, 10)
def forward(self, x):
x = x.flatten(1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
@classmethod
def make_input(cls, bs=None):
shape = [64, 1, 28, 28]
if bs is None:
return torch.randn(*shape)
return torch.randn(bs, *shape)
@classmethod
def make_target(cls, bs=None):
shape = [64]
if bs is None:
return torch.randint(10, shape)
return torch.randn(10, [bs] + shape)
class SimpleCNN(nn.Module):
def __init__(self):
super(SimpleCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
output = x
return output
@classmethod
def make_input(cls, bs=None):
shape = [64, 1, 28, 28]
if bs is None:
return torch.randn(*shape)
return torch.randn(bs, *shape)
@classmethod
def make_target(cls, bs=None):
shape = [64]
if bs is None:
return torch.randint(10, shape)
return torch.randn(10, [bs] + shape)
class VmapWrapper(BenchmarkCase):
def __init__(self, model_cls, device):
self.name_ = f'{model_cls.__name__}_vmap_{device}'
self.model = model_cls().to(device)
self.inputs = model_cls.make_input().to(device)
def name(self):
return self.name_
def run(self):
vmap(self.model)(self.inputs)
def ensemble_setup(self, model_cls, device):
num_models = 10
models = [model_cls().to(device) for _ in range(num_models)]
fmodel, params, buffers = combine_state_for_ensemble(models)
self.fmodel = fmodel
self.params = params
self.buffers = buffers
self.inputs = model_cls.make_input(num_models).to(device)
class EnsembleMultiWrapper(BenchmarkCase):
def __init__(self, model_cls, device):
self.name_ = f'{model_cls.__name__}_ensemble_multi_{device}'
ensemble_setup(self, model_cls, device)
def name(self):
return self.name_
def run(self):
vmap(self.fmodel)(self.params, self.buffers, self.inputs)
class EnsembleSingleWrapper(BenchmarkCase):
def __init__(self, model_cls, device):
self.name_ = f'{model_cls.__name__}_ensemble_single_{device}'
ensemble_setup(self, model_cls, device)
self.inputs = self.inputs[0]
def name(self):
return self.name_
def run(self):
vmap(self.fmodel, (0, 0, None))(self.params, self.buffers, self.inputs)
def loss_fn(predictions, targets):
return F.nll_loss(predictions, targets)
def METHOD_NAME(fmodel, params, buffers, sample, target):
sample = sample.unsqueeze(0) # prepend batch dimension for processing
target = target.unsqueeze(0)
prediction = fmodel(params, buffers, sample)
return loss_fn(prediction, target)
class PerSampleGradWrapper(BenchmarkCase):
def __init__(self, model_cls, device):
self.name_ = f'{model_cls.__name__}_persamplegrad_{device}'
model = model_cls().to(device)
self.model = make_functional_with_buffers(model)
self.inputs = model_cls.make_input().to(device)
self.targets = model_cls.make_target().to(device)
def name(self):
return self.name_
def run(self):
fmodel, params, buffers = self.model
loss = functools.partial(METHOD_NAME, fmodel)
vmap(grad(loss), (None, None, 0, 0))(params, buffers, self.inputs, self.targets) |
6,119 | test | """
Admonition extension for Python-Markdown
========================================
Adds rST-style admonitions. Inspired by [rST][] feature with the same name.
[rST]: http://docutils.sourceforge.net/docs/ref/rst/directives.html#specific-admonitions # noqa
See <https://Python-Markdown.github.io/extensions/admonition>
for documentation.
Original code Copyright [Tiago Serafim](https://www.tiagoserafim.com/).
All changes Copyright The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..blockprocessors import BlockProcessor
import xml.etree.ElementTree as etree
import re
class AdmonitionExtension(Extension):
""" Admonition extension for Python-Markdown. """
def extendMarkdown(self, md):
""" Add Admonition to Markdown instance. """
md.registerExtension(self)
md.parser.blockprocessors.register(AdmonitionProcessor(md.parser), 'admonition', 105)
class AdmonitionProcessor(BlockProcessor):
CLASSNAME = 'admonition'
CLASSNAME_TITLE = 'admonition-title'
RE = re.compile(r'(?:^|\n)!!! ?([\w\-]+(?: +[\w\-]+)*)(?: +"(.*?)")? *(?:\n|$)')
RE_SPACES = re.compile(' +')
def __init__(self, parser):
"""Initialization."""
super().__init__(parser)
self.current_sibling = None
self.content_indention = 0
def parse_content(self, parent, block):
"""Get sibling admonition.
Retrieve the appropriate sibling element. This can get tricky when
dealing with lists.
"""
old_block = block
the_rest = ''
# We already acquired the block via test
if self.current_sibling is not None:
sibling = self.current_sibling
block, the_rest = self.detab(block, self.content_indent)
self.current_sibling = None
self.content_indent = 0
return sibling, block, the_rest
sibling = self.lastChild(parent)
if sibling is None or sibling.get('class', '').find(self.CLASSNAME) == -1:
sibling = None
else:
# If the last child is a list and the content is sufficiently indented
# to be under it, then the content's sibling is in the list.
last_child = self.lastChild(sibling)
indent = 0
while last_child:
if (
sibling and block.startswith(' ' * self.tab_length * 2) and
last_child and last_child.tag in ('ul', 'ol', 'dl')
):
# The expectation is that we'll find an <li> or <dt>.
# We should get its last child as well.
sibling = self.lastChild(last_child)
last_child = self.lastChild(sibling) if sibling else None
# Context has been lost at this point, so we must adjust the
# text's indentation level so it will be evaluated correctly
# under the list.
block = block[self.tab_length:]
indent += self.tab_length
else:
last_child = None
if not block.startswith(' ' * self.tab_length):
sibling = None
if sibling is not None:
indent += self.tab_length
block, the_rest = self.detab(old_block, indent)
self.current_sibling = sibling
self.content_indent = indent
return sibling, block, the_rest
def METHOD_NAME(self, parent, block):
if self.RE.search(block):
return True
else:
return self.parse_content(parent, block)[0] is not None
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
if m.start() > 0:
self.parser.parseBlocks(parent, [block[:m.start()]])
block = block[m.end():] # removes the first line
block, theRest = self.detab(block)
else:
sibling, block, theRest = self.parse_content(parent, block)
if m:
klass, title = self.get_class_and_title(m)
div = etree.SubElement(parent, 'div')
div.set('class', '{} {}'.format(self.CLASSNAME, klass))
if title:
p = etree.SubElement(div, 'p')
p.text = title
p.set('class', self.CLASSNAME_TITLE)
else:
# Sibling is a list item, but we need to wrap it's content should be wrapped in <p>
if sibling.tag in ('li', 'dd') and sibling.text:
text = sibling.text
sibling.text = ''
p = etree.SubElement(sibling, 'p')
p.text = text
div = sibling
self.parser.parseChunk(div, block)
if theRest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, theRest)
def get_class_and_title(self, match):
klass, title = match.group(1).lower(), match.group(2)
klass = self.RE_SPACES.sub(' ', klass)
if title is None:
# no title was provided, use the capitalized classname as title
# e.g.: `!!! note` will render
# `<p class="admonition-title">Note</p>`
title = klass.split(' ', 1)[0].capitalize()
elif title == '':
# an explicit blank title should not be rendered
# e.g.: `!!! warning ""` will *not* render `p` with a title
title = None
return klass, title
def makeExtension(**kwargs): # pragma: no cover
return AdmonitionExtension(**kwargs) |
6,120 | test | import os
import csv
import random
import importlib
from ...utils.identifiers.arbitrary import ArbitraryIdentifier
from ...setup.requirements.compound import (
ChemblWebResourceClientRequirement,
RdkitRequirement,
)
from ... import logger
from ..shape import InputShapeSingle, InputShapeList, InputShapePairOfLists
from .examples import compound as test_examples
from . import EXAMPLES_FOLDER
EXAMPLES = "compound.tsv"
class IO(object):
def __init__(self, input_shape):
self.logger = logger
self.input_shape = input_shape
self.example_file = os.path.join(EXAMPLES_FOLDER, EXAMPLES)
self.setup()
self.identifier = importlib.import_module(
"ersilia.utils.identifiers.compound"
).CompoundIdentifier()
self.arbitrary_identifier = ArbitraryIdentifier()
if type(self.input_shape) is InputShapeSingle:
self.logger.debug(
"InputShapeSingle shape: {0}".format(self.input_shape.name)
)
self._example = self._example_single
self._parser = self._parse_single
self._test = test_examples.input_shape_single
if type(self.input_shape) is InputShapeList:
self.logger.debug("InputShapeList shape: {0}".format(self.input_shape.name))
self._example = self._example_list
self._parser = self._parse_list
self._test = test_examples.input_shape_list
if type(self.input_shape) is InputShapePairOfLists:
self.logger.debug(
"InputShapePairOfLists shape: {0}".format(self.input_shape.name)
)
self._example = self._example_pair_of_lists
self._parser = self._parse_pair_of_lists
self._test = test_examples.input_shape_pair_of_lists
def _sample_example_singlets(self, n_samples):
delimiter = None
if self.example_file.endswith(".tsv"):
delimiter = "\t"
if self.example_file.endswith(".csv"):
delimiter = ","
with open(self.example_file, "r") as f:
reader = csv.reader(f, delimiter=delimiter)
R = []
for r in reader:
R += [r]
idxs = [i for i in range(len(R))]
idxs = random.choices(idxs, k=n_samples)
D = []
for idx in idxs:
r = R[idx]
D += [{"key": r[0], "input": r[1], "text": r[2]}]
return D
def _example_single(self, n_samples):
D = self._sample_example_singlets(n_samples)
for d in D:
yield d
def _example_list(self, n_samples):
D = []
for _ in range(n_samples):
D_ = self._sample_example_singlets(10)
input = [x["input"] for x in D_]
text = self.string_delimiter().join(input)
key = self.arbitrary_identifier.encode(text)
D += [{"key": key, "input": input, "text": text}]
for d in D:
yield d
def _example_pair_of_lists(self, n_samples):
D = []
for _ in range(n_samples):
D_0 = self._sample_example_singlets(10)
D_1 = self._sample_example_singlets(10)
input_0 = [x["input"] for x in D_0]
input_1 = [x["input"] for x in D_1]
input = [input_0, input_1]
text = self.column_delimiter().join(
[
self.string_delimiter().join(input_0),
self.string_delimiter().join(input_1),
]
)
key = self.arbitrary_identifier.encode(text)
D += [{"key": key, "input": input, "text": text}]
for d in D:
yield d
def setup(self):
self.logger.debug(
"Checking RDKIT and other requirements necessary for compound inputs"
)
RdkitRequirement()
ChemblWebResourceClientRequirement()
def example(self, n_samples):
return self._example(n_samples)
def METHOD_NAME(self):
return self._test
def _parse_dict(self, datum):
if "key" in datum:
key = datum["key"]
else:
key = None
if "input" in datum:
inp = datum["input"]
else:
inp = None
if "text" in datum:
text = datum["text"]
else:
text = None
if key is not None and inp is not None and text is not None:
return {"key": key, "input": inp, "text": text}
# TODO
return datum
def _parse_text(self, datum):
text = datum
text_type = self.identifier.guess_type(text)
key = None
if text_type == "smiles":
inp = text
elif text_type == "inchikey":
inp = self.identifier.unichem_resolver(text)
key = text
else:
inp = self.identifier.chemical_identifier_resolver(text)
if key is None:
key = self.identifier.encode(inp)
result = {"key": key, "input": inp, "text": text}
return result
def _parse_single(self, datum):
return self._parse_text(datum)
def _parse_list(self, datum):
inp = datum
text = self.string_delimiter().join(inp)
key = self.arbitrary_identifier.encode(text)
result = {"key": key, "input": inp, "text": text}
return result
def _parse_pair_of_lists(self, datum):
inp = datum
text = self.column_delimiter().join(
[self.string_delimiter().join(inp[0]), self.string_delimiter().join(inp[1])]
)
key = self.arbitrary_identifier.encode(text)
result = {"key": key, "input": inp, "text": text}
return result
def parse(self, datum):
if type(datum) is dict:
return datum
else:
return self._parser(datum)
def is_input(self, text):
return self.identifier._is_smiles(text)
def is_key(self, text):
return self.identifier._is_inchikey(text)
def string_delimiter(self):
return "."
def column_delimiter(self):
return "," |
6,121 | test colorbar | import unittest
import matplotlib.axes
import matplotlib.lines
import matplotlib.patches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pyrolite.comp.codata import close
from pyrolite.util.plot.helpers import (
draw_vector,
init_spherical_octant,
nan_scatter,
plot_2dhull,
plot_cooccurence,
plot_pca_vectors,
plot_stdev_ellipses,
rect_from_centre,
vector_to_line,
)
from pyrolite.util.skl.transform import ALRTransform, ILRTransform
from pyrolite.util.synthetic import random_composition
try:
from sklearn.decomposition import PCA
HAVE_SKLEARN = True
except ImportError:
HAVE_SKLEARN = False
class TestPlotCooccurence(unittest.TestCase):
def setUp(self):
self.rdata = pd.DataFrame(
random_composition(size=200, D=4, missing="MCAR"),
columns=["MgO", "SiO2", "CaO", "TiO2"],
)
def test_default(self):
ax = plot_cooccurence(self.rdata)
def test_normalize(self):
for normalize in [True, False]:
with self.subTest(normalize=normalize):
ax = plot_cooccurence(self.rdata, normalize=normalize)
def test_log(self):
for log in [True, False]:
with self.subTest(log=log):
ax = plot_cooccurence(self.rdata, log=log)
def METHOD_NAME(self):
for colorbar in [True, False]:
with self.subTest(colorbar=colorbar):
ax = plot_cooccurence(self.rdata, colorbar=colorbar)
def test_external_ax(self):
fig, ax = plt.subplots(1)
ax = plot_cooccurence(self.rdata, ax=ax)
class TestPlotStDevEllipses(unittest.TestCase):
def setUp(self):
self.comp3d = random_composition(size=100, D=3)
self.T = ILRTransform()
self.comp2d = self.T.transform(self.comp3d)
def test_default(self):
for comp in [self.comp2d]:
with self.subTest(comp=comp):
plot_stdev_ellipses(comp, transform=self.T.inverse_transform)
def test_axis_specified(self):
for comp in [self.comp2d]:
with self.subTest(comp=comp):
fig, ax = plt.subplots(1, subplot_kw=dict(projection="ternary"))
plot_stdev_ellipses(comp, ax=ax, transform=self.T.inverse_transform)
def test_transform(self):
for tfm in [None, ILRTransform, ALRTransform]:
with self.subTest(tfm=tfm):
if callable(tfm):
T = tfm()
comp = T.transform(self.comp3d)
transform = T.inverse_transform
else:
transform = None
comp = self.comp2d
plot_stdev_ellipses(comp, transform=transform)
def tearDown(self):
plt.close("all")
@unittest.skipUnless(HAVE_SKLEARN, "Requires Scikit-learn")
class TestPlotPCAVectors(unittest.TestCase):
def setUp(self):
self.comp3d = random_composition(size=100, D=3)
self.T = ILRTransform()
self.comp2d = self.T.transform(self.comp3d)
def test_default(self):
for comp in [self.comp2d, self.comp3d]:
with self.subTest(comp=comp):
plot_pca_vectors(comp)
def test_axis_specified(self):
for comp in [self.comp2d, self.comp3d]:
with self.subTest(comp=comp):
fig, ax = plt.subplots()
plot_pca_vectors(comp, ax=ax)
def test_transform(self):
for tfm in [None, ILRTransform, ALRTransform]:
with self.subTest(tfm=tfm):
if callable(tfm):
T = tfm()
comp = T.transform(self.comp3d)
transform = T.inverse_transform
else:
transform = None
comp = self.comp2d
plot_pca_vectors(comp, transform=transform)
def tearDown(self):
plt.close("all")
@unittest.skipUnless(HAVE_SKLEARN, "Requires Scikit-learn")
class TestDrawVector(unittest.TestCase):
"""
Tests the draw_vector utility function.
"""
def setUp(self):
xs = 1.0 / (np.random.randn(5) + 4)
self.X = np.array([xs, 1 - xs])
self.X = close(self.X)
def test_plot(self):
fig, ax = plt.subplots(1)
pca = PCA(n_components=2)
d = self.X
pca.fit(d)
for variance, vector in zip(pca.explained_variance_, pca.components_):
v = vector[:2] * 3 * np.sqrt(variance)
draw_vector(pca.mean_[:2], pca.mean_[:2] + v, ax=ax)
def tearDown(self):
plt.close("all")
@unittest.skipUnless(HAVE_SKLEARN, "Requires Scikit-learn")
class TestVectorToLine(unittest.TestCase):
"""
Tests the vector_to_line utility function.
"""
def setUp(self):
xs = 1.0 / (np.random.randn(5) + 4)
self.X = np.array([xs, 1 - xs])
self.X = close(self.X)
def test_to_line(self):
pca = PCA(n_components=2)
d = self.X
pca.fit(d)
for variance, vector in zip(pca.explained_variance_, pca.components_):
line = vector_to_line(pca.mean_[:2], vector[:2], variance, spans=6)
self.assertTrue(isinstance(line, np.ndarray))
self.assertTrue(line.shape[1] == 2)
class Test2DHull(unittest.TestCase):
"""
Tests the plot_2dhull utility function.
"""
def setUp(self):
self.fig, self.ax = plt.subplots(1)
self.data = np.random.random((2, 10)).T
def test_2d_hull(self):
lines = plot_2dhull(self.data, ax=self.ax)
self.assertTrue(isinstance(lines[0], matplotlib.lines.Line2D))
def test_2d_hull_splines(self):
lines = plot_2dhull(self.data, ax=self.ax, splines=True)
self.assertTrue(isinstance(lines[0], matplotlib.lines.Line2D))
def tearDown(self):
plt.close("all")
class TestRectFromCentre(unittest.TestCase):
def setUp(self):
self.xy = (0.5, 1)
def test_default(self):
rect = rect_from_centre(*self.xy, dx=0.5, dy=1.0)
self.assertIsInstance(rect, matplotlib.patches.Rectangle)
class TestNaNScatter(unittest.TestCase):
def setUp(self):
# some x-y data with nans
self.xs, self.ys = np.random.randn(20), np.random.randn(20)
self.xs[self.xs < -0.5] = np.nan
self.ys[self.ys < -0.8] = np.nan
def test_default(self):
ax = nan_scatter(self.xs, self.ys)
self.assertIsInstance(ax, matplotlib.axes.Axes)
def test_secondary_plotting(self):
"""Test re-plotting on exsiting axes with a divider"""
ax = nan_scatter(self.xs[:10], self.ys[:10])
ax = nan_scatter(self.xs[10:], self.ys[10:], ax=ax)
self.assertIsInstance(ax, matplotlib.axes.Axes)
class TestInitSphericalOctant(unittest.TestCase):
def test_default(self):
ax = init_spherical_octant()
def test_angle_indicated(self):
ax = init_spherical_octant(angle_indicated=30)
if __name__ == "__main__":
unittest.main() |
6,122 | clear in memory account fields | ######################################################################
#
# File: b2sdk/account_info/in_memory.py
#
# Copyright 2019 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from __future__ import annotations
from functools import wraps
from .exception import MissingAccountData
from .upload_url_pool import UrlPoolAccountInfo
def _raise_missing_if_result_is_none(function):
"""
Raise MissingAccountData if function's result is None.
"""
@wraps(function)
def inner(*args, **kwargs):
assert function.__name__.startswith('get_')
result = function(*args, **kwargs)
if result is None:
# assumes that it is a "get_field_name"
raise MissingAccountData(function.__name__[4:])
return result
return inner
class InMemoryAccountInfo(UrlPoolAccountInfo):
"""
*AccountInfo* which keeps all data in memory.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.METHOD_NAME()
def clear(self):
self.METHOD_NAME()
return super().clear()
def METHOD_NAME(self):
self._account_id = None
self._application_key_id = None
self._allowed = None
self._api_url = None
self._application_key = None
self._auth_token = None
self._buckets = {}
self._download_url = None
self._recommended_part_size = None
self._absolute_minimum_part_size = None
self._realm = None
self._s3_api_url = None
def _set_auth_data(
self, account_id, auth_token, api_url, download_url, recommended_part_size,
absolute_minimum_part_size, application_key, realm, s3_api_url, allowed, application_key_id
):
self._account_id = account_id
self._application_key_id = application_key_id
self._auth_token = auth_token
self._api_url = api_url
self._download_url = download_url
self._absolute_minimum_part_size = absolute_minimum_part_size
self._recommended_part_size = recommended_part_size
self._application_key = application_key
self._realm = realm
self._s3_api_url = s3_api_url
self._allowed = allowed
def refresh_entire_bucket_name_cache(self, name_id_iterable):
self._buckets = dict(name_id_iterable)
def get_bucket_id_or_none_from_bucket_name(self, bucket_name):
return self._buckets.get(bucket_name)
def get_bucket_name_or_none_from_bucket_id(self, bucket_id: str) -> str | None:
for name, cached_id_ in self._buckets.items():
if cached_id_ == bucket_id:
return name
return None
def list_bucket_names_ids(self) -> list[tuple[str, str]]:
return [(name, id_) for name, id_ in self._buckets.items()]
def save_bucket(self, bucket):
self._buckets[bucket.name] = bucket.id_
def remove_bucket_name(self, bucket_name):
if bucket_name in self._buckets:
del self._buckets[bucket_name]
@_raise_missing_if_result_is_none
def get_account_id(self):
return self._account_id
@_raise_missing_if_result_is_none
def get_application_key_id(self):
return self._application_key_id
@_raise_missing_if_result_is_none
def get_account_auth_token(self):
return self._auth_token
@_raise_missing_if_result_is_none
def get_api_url(self):
return self._api_url
@_raise_missing_if_result_is_none
def get_application_key(self):
return self._application_key
@_raise_missing_if_result_is_none
def get_download_url(self):
return self._download_url
@_raise_missing_if_result_is_none
def get_recommended_part_size(self):
return self._recommended_part_size
@_raise_missing_if_result_is_none
def get_absolute_minimum_part_size(self):
return self._absolute_minimum_part_size
@_raise_missing_if_result_is_none
def get_realm(self):
return self._realm
@_raise_missing_if_result_is_none
def get_allowed(self):
return self._allowed
@_raise_missing_if_result_is_none
def get_s3_api_url(self):
return self._s3_api_url |
6,123 | fix input | import torch
from torch import Tensor
from torch_geometric.nn.models import LabelPropagation
from torch_geometric.typing import Adj, OptTensor
from torch_geometric.utils import one_hot
class CorrectAndSmooth(torch.nn.Module):
r"""The correct and smooth (C&S) post-processing model from the
`"Combining Label Propagation And Simple Models Out-performs Graph Neural
Networks"
<https://arxiv.org/abs/2010.13993>`_ paper, where soft predictions
:math:`\mathbf{Z}` (obtained from a simple base predictor) are
first corrected based on ground-truth training
label information :math:`\mathbf{Y}` and residual propagation
.. math::
\mathbf{e}^{(0)}_i &= \begin{cases}
\mathbf{y}_i - \mathbf{z}_i, & \text{if }i
\text{ is training node,}\\
\mathbf{0}, & \text{else}
\end{cases}
.. math::
\mathbf{E}^{(\ell)} &= \alpha_1 \mathbf{D}^{-1/2}\mathbf{A}
\mathbf{D}^{-1/2} \mathbf{E}^{(\ell - 1)} +
(1 - \alpha_1) \mathbf{E}^{(\ell - 1)}
\mathbf{\hat{Z}} &= \mathbf{Z} + \gamma \cdot \mathbf{E}^{(L_1)},
where :math:`\gamma` denotes the scaling factor (either fixed or
automatically determined), and then smoothed over the graph via label
propagation
.. math::
\mathbf{\hat{z}}^{(0)}_i &= \begin{cases}
\mathbf{y}_i, & \text{if }i\text{ is training node,}\\
\mathbf{\hat{z}}_i, & \text{else}
\end{cases}
.. math::
\mathbf{\hat{Z}}^{(\ell)} = \alpha_2 \mathbf{D}^{-1/2}\mathbf{A}
\mathbf{D}^{-1/2} \mathbf{\hat{Z}}^{(\ell - 1)} +
(1 - \alpha_2) \mathbf{\hat{Z}}^{(\ell - 1)}
to obtain the final prediction :math:`\mathbf{\hat{Z}}^{(L_2)}`.
.. note::
For an example of using the C&S model, see
`examples/correct_and_smooth.py
<https://github.com/pyg-team/pytorch_geometric/blob/master/examples/
correct_and_smooth.py>`_.
Args:
num_correction_layers (int): The number of propagations :math:`L_1`.
correction_alpha (float): The :math:`\alpha_1` coefficient.
num_smoothing_layers (int): The number of propagations :math:`L_2`.
smoothing_alpha (float): The :math:`\alpha_2` coefficient.
autoscale (bool, optional): If set to :obj:`True`, will automatically
determine the scaling factor :math:`\gamma`. (default: :obj:`True`)
scale (float, optional): The scaling factor :math:`\gamma`, in case
:obj:`autoscale = False`. (default: :obj:`1.0`)
"""
def __init__(self, num_correction_layers: int, correction_alpha: float,
num_smoothing_layers: int, smoothing_alpha: float,
autoscale: bool = True, scale: float = 1.0):
super().__init__()
self.autoscale = autoscale
self.scale = scale
self.prop1 = LabelPropagation(num_correction_layers, correction_alpha)
self.prop2 = LabelPropagation(num_smoothing_layers, smoothing_alpha)
def forward(self, y_soft: Tensor, *args) -> Tensor: # pragma: no cover
r"""Applies both :meth:`correct` and :meth:`smooth`."""
y_soft = self.correct(y_soft, *args)
return self.smooth(y_soft, *args)
def correct(self, y_soft: Tensor, y_true: Tensor, mask: Tensor,
edge_index: Adj, edge_weight: OptTensor = None) -> Tensor:
r"""
Args:
y_soft (torch.Tensor): The soft predictions :math:`\mathbf{Z}`
obtained from a simple base predictor.
y_true (torch.Tensor): The ground-truth label information
:math:`\mathbf{Y}` of training nodes.
mask (torch.Tensor): A mask or index tensor denoting which nodes
were used for training.
edge_index (torch.Tensor or SparseTensor): The edge connectivity.
edge_weight (torch.Tensor, optional): The edge weights.
(default: :obj:`None`)
"""
numel = int(mask.sum()) if mask.dtype == torch.bool else mask.size(0)
assert y_true.size(0) == numel
if y_true.dtype == torch.long and y_true.size(0) == y_true.numel():
y_true = one_hot(y_true.view(-1), num_classes=y_soft.size(-1),
dtype=y_soft.dtype)
error = torch.zeros_like(y_soft)
error[mask] = y_true - y_soft[mask]
if self.autoscale:
smoothed_error = self.prop1(error, edge_index,
edge_weight=edge_weight,
post_step=lambda x: x.clamp_(-1., 1.))
sigma = error[mask].abs().sum() / numel
scale = sigma / smoothed_error.abs().sum(dim=1, keepdim=True)
scale[scale.isinf() | (scale > 1000)] = 1.0
return y_soft + scale * smoothed_error
else:
def METHOD_NAME(x):
x[mask] = error[mask]
return x
smoothed_error = self.prop1(error, edge_index,
edge_weight=edge_weight,
post_step=METHOD_NAME)
return y_soft + self.scale * smoothed_error
def smooth(self, y_soft: Tensor, y_true: Tensor, mask: Tensor,
edge_index: Adj, edge_weight: OptTensor = None) -> Tensor:
r"""
Args:
y_soft (torch.Tensor): The corrected predictions :math:`\mathbf{Z}`
obtained from :meth:`correct`.
y_true (torch.Tensor): The ground-truth label information
:math:`\mathbf{Y}` of training nodes.
mask (torch.Tensor): A mask or index tensor denoting which nodes
were used for training.
edge_index (torch.Tensor or SparseTensor): The edge connectivity.
edge_weight (torch.Tensor, optional): The edge weights.
(default: :obj:`None`)
"""
numel = int(mask.sum()) if mask.dtype == torch.bool else mask.size(0)
assert y_true.size(0) == numel
if y_true.dtype == torch.long and y_true.size(0) == y_true.numel():
y_true = one_hot(y_true.view(-1), num_classes=y_soft.size(-1),
dtype=y_soft.dtype)
y_soft = y_soft.clone()
y_soft[mask] = y_true
return self.prop2(y_soft, edge_index, edge_weight=edge_weight)
def __repr__(self):
L1, alpha1 = self.prop1.num_layers, self.prop1.alpha
L2, alpha2 = self.prop2.num_layers, self.prop2.alpha
return (f'{self.__class__.__name__}(\n'
f' correct: num_layers={L1}, alpha={alpha1}\n'
f' smooth: num_layers={L2}, alpha={alpha2}\n'
f' autoscale={self.autoscale}, scale={self.scale}\n'
')') |
6,124 | on 200 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network nsg rule show",
)
class Show(AAZCommand):
"""Get the details of a network security group rule.
:example: Get the details of a network security group rule.
az network nsg rule show -g MyResourceGroup --nsg-name MyNsg -n MyNsgRule
"""
_aaz_info = {
"version": "2015-06-15",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/networksecuritygroups/{}/securityrules/{}", "2015-06-15"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.nsg_name = AAZStrArg(
options=["--nsg-name"],
help="Name of the network security group.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the network security group rule.",
required=True,
id_part="child_name_1",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.SecurityRulesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class SecurityRulesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.METHOD_NAME(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"networkSecurityGroupName", self.ctx.args.nsg_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"securityRuleName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2015-06-15",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def METHOD_NAME(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.etag = AAZStrType()
_schema_on_200.id = AAZStrType()
_schema_on_200.name = AAZStrType()
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.properties
properties.access = AAZStrType(
flags={"required": True},
)
properties.description = AAZStrType()
properties.destination_address_prefix = AAZStrType(
serialized_name="destinationAddressPrefix",
flags={"required": True},
)
properties.destination_port_range = AAZStrType(
serialized_name="destinationPortRange",
)
properties.direction = AAZStrType(
flags={"required": True},
)
properties.priority = AAZIntType()
properties.protocol = AAZStrType(
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
)
properties.source_address_prefix = AAZStrType(
serialized_name="sourceAddressPrefix",
flags={"required": True},
)
properties.source_port_range = AAZStrType(
serialized_name="sourcePortRange",
)
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"] |
6,125 | test inf | import unittest
import os
import tempfile
from filecmp import cmp
import py_compile
from pvactools.lib.filter import Filter, FilterCriterion
from tests.utils import *
class FilterTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.filter_path = os.path.join(pvactools_directory(), "pvactools", "lib", "filter.py")
cls.test_data_path= os.path.join(pvactools_directory(), "tests", "test_data", "filter")
def module_compiles(self):
self.assertTrue(py_compile.compile(self.filter_path))
def test_less_than(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(Filter(
os.path.join(
self.test_data_path,
'Test.combined.parsed.tsv'
),
output_file.name,
[FilterCriterion(
"Median MT IC50 Score",
"<",
"500",
exclude_nas=False
)],
).execute())
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_path, "Test.filtered.lt.tsv"),
False
))
def test_less_or_equal(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(Filter(
os.path.join(
self.test_data_path,
'Test.combined.parsed.tsv'
),
output_file.name,
[FilterCriterion(
"Median MT IC50 Score",
"<=",
"500",
exclude_nas=False
)],
).execute())
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_path, "Test.filtered.le.tsv"),
False
))
def test_equal(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(Filter(
os.path.join(
self.test_data_path,
'Test.combined.parsed.tsv'
),
output_file.name,
[FilterCriterion(
"Median MT IC50 Score",
"==",
"500",
exclude_nas=False
)],
).execute())
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_path, "Test.filtered.eq.tsv"),
False
))
def test_greater_or_equal(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(Filter(
os.path.join(
self.test_data_path,
'Test.combined.parsed.tsv'
),
output_file.name,
[FilterCriterion(
"Median MT IC50 Score",
">=",
"500",
exclude_nas=False
)],
).execute())
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_path, "Test.filtered.ge.tsv"),
False
))
def test_greater_than(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(Filter(
os.path.join(
self.test_data_path,
'Test.combined.parsed.tsv'
),
output_file.name,
[FilterCriterion(
"Median MT IC50 Score",
">",
"500",
exclude_nas=False
)],
).execute())
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_path, "Test.filtered.gt.tsv"),
False
))
def test_NA(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(Filter(
os.path.join(
self.test_data_path,
'Test.combined.parsed.tsv'
),
output_file.name,
[FilterCriterion(
"Tumor RNA Depth",
">",
"100",
exclude_nas=False
)],
).execute())
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_path, "Test.filtered.NA.tsv"),
False
))
def test_exclude_NA(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(Filter(
os.path.join(
self.test_data_path,
'Test.combined.parsed.tsv'
),
output_file.name,
[FilterCriterion(
"Tumor RNA Depth",
">",
"100",
exclude_nas=True
)],
).execute())
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_path, "Test.filtered.exclude_NA.tsv"),
False
))
def METHOD_NAME(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(Filter(
os.path.join(
self.test_data_path,
'input.inf.tsv'
),
output_file.name,
[FilterCriterion(
"Corresponding Fold Change",
">",
"100",
exclude_nas=True
)],
).execute())
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_path, "output.inf.tsv"),
False
)) |
6,126 | is custom emoji | # SPDX-License-Identifier: MIT
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
from . import utils
from .asset import Asset, AssetMixin
__all__ = ("PartialEmoji",)
if TYPE_CHECKING:
from datetime import datetime
from typing_extensions import Self
from .emoji import Emoji
from .state import ConnectionState
from .types.activity import ActivityEmoji as ActivityEmojiPayload
from .types.emoji import Emoji as EmojiPayload, PartialEmoji as PartialEmojiPayload
class _EmojiTag:
__slots__ = ()
id: int
def _to_partial(self) -> PartialEmoji:
raise NotImplementedError
class PartialEmoji(_EmojiTag, AssetMixin):
"""Represents a "partial" emoji.
This model will be given in two scenarios:
- "Raw" data events such as :func:`on_raw_reaction_add`
- Custom emoji that the bot cannot see from e.g. :attr:`Message.reactions`
.. container:: operations
.. describe:: x == y
Checks if two emoji are the same.
.. describe:: x != y
Checks if two emoji are not the same.
.. describe:: hash(x)
Return the emoji's hash.
.. describe:: str(x)
Returns the emoji rendered for Discord.
Attributes
----------
name: Optional[:class:`str`]
The custom emoji name, if applicable, or the unicode codepoint
of the non-custom emoji. This can be ``None`` if the emoji
got deleted (e.g. removing a reaction with a deleted emoji).
animated: :class:`bool`
Whether the emoji is animated or not.
id: Optional[:class:`int`]
The ID of the custom emoji, if applicable.
"""
__slots__ = ("animated", "name", "id")
_CUSTOM_EMOJI_RE = re.compile(
r"<?(?P<animated>a)?:?(?P<name>[A-Za-z0-9\_]+):(?P<id>[0-9]{17,19})>?"
)
if TYPE_CHECKING:
id: Optional[int]
def __init__(self, *, name: str, animated: bool = False, id: Optional[int] = None) -> None:
self.animated = animated
self.name = name
self.id = id
self._state = None
@classmethod
def from_dict(
cls, data: Union[PartialEmojiPayload, ActivityEmojiPayload, Dict[str, Any]]
) -> Self:
return cls(
animated=data.get("animated", False),
id=utils._get_as_snowflake(data, "id"),
name=data.get("name") or "",
)
@classmethod
def from_str(cls, value: str) -> Self:
"""Converts a Discord string representation of an emoji to a :class:`PartialEmoji`.
The formats accepted are:
- ``a:name:id``
- ``<a:name:id>``
- ``name:id``
- ``<:name:id>``
If the format does not match then it is assumed to be a unicode emoji.
.. versionadded:: 2.0
Parameters
----------
value: :class:`str`
The string representation of an emoji.
Returns
-------
:class:`PartialEmoji`
The partial emoji from this string.
"""
match = cls._CUSTOM_EMOJI_RE.match(value)
if match is not None:
groups = match.groupdict()
animated = bool(groups["animated"])
emoji_id = int(groups["id"])
name = groups["name"]
return cls(name=name, animated=animated, id=emoji_id)
return cls(name=value, id=None, animated=False)
def to_dict(self) -> EmojiPayload:
o: EmojiPayload = {
"name": self.name,
"id": self.id,
}
if self.animated:
o["animated"] = self.animated
return o
def _to_partial(self) -> PartialEmoji:
return self
@classmethod
def with_state(
cls,
state: ConnectionState,
*,
name: str,
animated: bool = False,
id: Optional[int] = None,
) -> Self:
self = cls(name=name, animated=animated, id=id)
self._state = state
return self
def __str__(self) -> str:
if self.id is None:
return self.name
if self.animated:
return f"<a:{self.name}:{self.id}>"
return f"<:{self.name}:{self.id}>"
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__} animated={self.animated} name={self.name!r} id={self.id}>"
)
def __eq__(self, other: Any) -> bool:
if self.is_unicode_emoji():
return isinstance(other, PartialEmoji) and self.name == other.name
if isinstance(other, _EmojiTag):
return self.id == other.id
return False
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __hash__(self) -> int:
return hash((self.id, self.name))
def METHOD_NAME(self) -> bool:
"""Whether the partial emoji is a custom non-Unicode emoji.
:return type: :class:`bool`
"""
return self.id is not None
def is_unicode_emoji(self) -> bool:
"""Whether the partial emoji is a Unicode emoji.
:return type: :class:`bool`
"""
return self.id is None
def _as_reaction(self) -> str:
if self.id is None:
return self.name
return f"{self.name}:{self.id}"
@property
def created_at(self) -> Optional[datetime]:
"""Optional[:class:`datetime.datetime`]: Returns the emoji's creation time in UTC, or None if it's a Unicode emoji.
.. versionadded:: 1.6
"""
if self.id is None:
return None
return utils.snowflake_time(self.id)
@property
def url(self) -> str:
""":class:`str`: Returns the URL of the emoji, if it is custom.
If this isn't a custom emoji then an empty string is returned
"""
if self.is_unicode_emoji():
return ""
fmt = "gif" if self.animated else "png"
return f"{Asset.BASE}/emojis/{self.id}.{fmt}"
async def read(self) -> bytes:
"""|coro|
Retrieves the data of this emoji as a :class:`bytes` object.
.. versionchanged:: 2.6
Raises :exc:`TypeError` instead of ``InvalidArgument``.
Raises
------
TypeError
The emoji is not a custom emoji.
DiscordException
There was no internal connection state.
HTTPException
Downloading the asset failed.
NotFound
The asset was deleted.
Returns
-------
:class:`bytes`
The content of the asset.
"""
if self.is_unicode_emoji():
raise TypeError("PartialEmoji is not a custom emoji")
return await super().read()
# utility method for unusual emoji model in forums
# (e.g. default reaction, tag emoji)
@staticmethod
def _emoji_to_name_id(
emoji: Optional[Union[str, Emoji, PartialEmoji]]
) -> Tuple[Optional[str], Optional[int]]:
if emoji is None:
return None, None
if isinstance(emoji, str):
emoji = PartialEmoji.from_str(emoji)
# note: API only supports exactly one of `name` and `id` being set
if emoji.id:
return None, emoji.id
else:
return emoji.name, None |
6,127 | get pending lists | from datetime import datetime
from loguru import logger
from sqlalchemy import Boolean, Column, DateTime, Integer, Unicode, func, select
from sqlalchemy.orm import relationship
from sqlalchemy.sql.elements import and_
from sqlalchemy.sql.schema import ForeignKey
from flexget import db_schema
from flexget.db_schema import versioned_base
from flexget.entry import Entry
from flexget.utils import json, serialization
from flexget.utils.database import entry_synonym, with_session
from flexget.utils.sqlalchemy_utils import table_schema
plugin_name = 'pending_list'
logger = logger.bind(name=plugin_name)
Base = versioned_base(plugin_name, 1)
@db_schema.upgrade(plugin_name)
def upgrade(ver, session):
if ver is None:
ver = 0
if ver == 0:
table = table_schema('wait_list_entries', session)
for row in session.execute(select(table.c.id, table.c.json)):
if not row['json']:
# Seems there could be invalid data somehow. See #2590
continue
data = json.loads(row['json'], decode_datetime=True)
# If title looked like a date, make sure it's a string
title = str(data.pop('title'))
e = Entry(title=title, **data)
session.execute(
table.update().where(table.c.id == row['id']).values(json=serialization.dumps(e))
)
ver = 1
return ver
class PendingListList(Base):
__tablename__ = 'pending_list_lists'
id = Column(Integer, primary_key=True)
name = Column(Unicode, unique=True)
added = Column(DateTime, default=datetime.now)
entries = relationship(
'PendingListEntry', backref='list', cascade='all, delete, delete-orphan', lazy='dynamic'
)
def to_dict(self):
return {'id': self.id, 'name': self.name, 'added_on': self.added}
class PendingListEntry(Base):
__tablename__ = 'wait_list_entries'
id = Column(Integer, primary_key=True)
list_id = Column(Integer, ForeignKey(PendingListList.id), nullable=False)
added = Column(DateTime, default=datetime.now)
title = Column(Unicode)
original_url = Column(Unicode)
_json = Column('json', Unicode)
entry = entry_synonym('_json')
approved = Column(Boolean)
def __init__(self, entry, pending_list_id):
self.title = entry['title']
self.original_url = entry.get('original_url') or entry['url']
self.entry = entry
self.list_id = pending_list_id
self.approved = False
def __repr__(self):
return '<PendingListEntry,title={},original_url={},approved={}>'.format(
self.title,
self.original_url,
self.approved,
)
def to_dict(self):
return {
'id': self.id,
'list_id': self.list_id,
'added_on': self.added,
'title': self.title,
'original_url': self.original_url,
'entry': json.coerce(self.entry),
'approved': self.approved,
}
@with_session
def METHOD_NAME(name=None, session=None):
logger.debug('retrieving pending lists')
query = session.query(PendingListList)
if name:
logger.debug('searching for pending lists with name {}', name)
query = query.filter(PendingListList.name.contains(name))
return query.all()
@with_session
def get_list_by_exact_name(name, session=None):
logger.debug('returning pending list with name {}', name)
return (
session.query(PendingListList)
.filter(func.lower(PendingListList.name) == name.lower())
.one()
)
@with_session
def get_list_by_id(list_id, session=None):
logger.debug('returning pending list with id {}', list_id)
return session.query(PendingListList).filter(PendingListList.id == list_id).one()
@with_session
def delete_list_by_id(list_id, session=None):
entry_list = get_list_by_id(list_id=list_id, session=session)
if entry_list:
logger.debug('deleting pending list with id {}', list_id)
session.delete(entry_list)
@with_session
def get_entries_by_list_id(
list_id,
start=None,
stop=None,
order_by='title',
descending=False,
approved=False,
filter=None,
entry_ids=None,
session=None,
):
logger.debug('querying entries from pending list with id {}', list_id)
query = session.query(PendingListEntry).filter(PendingListEntry.list_id == list_id)
if filter:
query = query.filter(func.lower(PendingListEntry.title).contains(filter.lower()))
if approved:
query = query.filter(PendingListEntry.approved is approved)
if entry_ids:
query = query.filter(PendingListEntry.id.in_(entry_ids))
if descending:
query = query.order_by(getattr(PendingListEntry, order_by).desc())
else:
query = query.order_by(getattr(PendingListEntry, order_by))
return query.slice(start, stop).all()
@with_session
def get_entry_by_title(list_id, title, session=None):
entry_list = get_list_by_id(list_id=list_id, session=session)
if entry_list:
logger.debug('fetching entry with title `{}` from list id {}', title, list_id)
return (
session.query(PendingListEntry)
.filter(and_(PendingListEntry.title == title, PendingListEntry.list_id == list_id))
.first()
)
@with_session
def get_entry_by_id(list_id, entry_id, session=None):
logger.debug('fetching entry with id {} from list id {}', entry_id, list_id)
return (
session.query(PendingListEntry)
.filter(and_(PendingListEntry.id == entry_id, PendingListEntry.list_id == list_id))
.one()
) |
6,128 | content | """
Parser for the .lang translation format.
"""
import codecs
import re
from parsimonious.exceptions import (
ParseError as ParsimoniousParseError,
VisitationError,
)
from parsimonious.grammar import Grammar
from parsimonious.nodes import NodeVisitor
from pontoon.sync.exceptions import ParseError
from pontoon.sync.formats.base import ParsedResource
from pontoon.sync.vcs.models import VCSTranslation
BLANK_LINE = "blank_line"
TAG_REGEX = re.compile(r"\{(ok|l10n-extra)\}")
class LangComment:
def __init__(self, marker, METHOD_NAME, end):
self.marker = marker
self.raw_content = METHOD_NAME
self.end = end
@property
def METHOD_NAME(self):
return self.raw_content.strip()
@property
def raw(self):
return self.marker + self.raw_content + self.end
class LangEntity(VCSTranslation):
def __init__(self, source_string, translation_string, tags):
super().__init__(
key=source_string, # Langfiles use the source as the key.
context="",
source_string=source_string,
strings={None: translation_string}, # Langfiles lack plural support
comments=[],
fuzzy=False, # Langfiles don't support fuzzy status
)
self.tags = set(tags)
# If the translation matches the source string without the {ok}
# tag, then the translation isn't actually valid, so we remove
# it.
if source_string == translation_string and "ok" not in tags:
del self.strings[None]
@property
def extra(self):
return {"tags": list(self.tags)}
class LangResource(ParsedResource):
def __init__(self, path, children):
self.path = path
self.children = children
@property
def translations(self):
return [c for c in self.children if isinstance(c, LangEntity)]
def save(self, locale):
with codecs.open(self.path, "w", "utf-8") as f:
for child in self.children:
if isinstance(child, LangEntity):
self.write_entity(f, child)
elif isinstance(child, LangComment):
f.write(child.raw)
elif child == BLANK_LINE:
f.write("\n")
def write_entity(self, f, entity):
f.write(f";{entity.source_string}\n")
translation = entity.strings.get(None, None)
if translation is None:
# No translation? Output the source string and remove {ok}.
translation = entity.source_string
entity.tags.discard("ok")
elif translation == entity.source_string:
# Translation is equal to the source? Include {ok}.
entity.tags.add("ok")
elif translation != entity.source_string:
# Translation is different? Remove {ok}, it's unneeded.
entity.tags.discard("ok")
if entity.extra.get("tags"):
tags = [f"{{{t}}}" for t in entity.tags]
translation = "{translation} {tags}".format(
translation=translation, tags=" ".join(tags)
)
f.write(f"{translation}\n")
class LangVisitor(NodeVisitor):
grammar = Grammar(
r"""
lang_file = (comment / entity / blank_line)*
comment = "#"+ line_content line_ending
line_content = ~r".*"
line_ending = ~r"$\n?"m # Match at EOL and EOF without newline.
blank_line = ~r"((?!\n)\s)*" line_ending
entity = string translation
string = ";" line_content line_ending
translation = line_content line_ending
"""
)
def visit_lang_file(self, node, children):
"""
Find comments that are associated with an entity and add them
to the entity's comments list. Also assign order to entities.
"""
comments = []
order = 0
for child in children:
if isinstance(child, LangComment):
comments.append(child)
continue
if isinstance(child, LangEntity):
child.comments = [c.METHOD_NAME for c in comments]
child.order = order
order += 1
comments = []
return children
def visit_comment(self, node, node_info):
marker, METHOD_NAME, end = node_info
return LangComment(node_text(marker), node_text(METHOD_NAME), node_text(end))
def visit_blank_line(self, node, _):
return BLANK_LINE
def visit_entity(self, node, node_info):
string, translation = node_info
# Strip tags out of translation if they exist.
tags = []
tag_matches = list(re.finditer(TAG_REGEX, translation))
if tag_matches:
tags = [m.group(1) for m in tag_matches]
translation = translation[: tag_matches[0].start()].strip()
if translation == "":
raise ParsimoniousParseError(
"Blank translation for key {key} is not allowed in langfiles.".format(
key=string
)
)
return LangEntity(string, translation, tags)
def visit_string(self, node, node_info):
marker, METHOD_NAME, end = node_info
return METHOD_NAME.text.strip()
def visit_translation(self, node, node_info):
METHOD_NAME, end = node_info
return METHOD_NAME.text.strip()
def generic_visit(self, node, children):
if children and len(children) == 1:
return children[0]
else:
return children or node
def node_text(node):
"""
Convert a Parsimonious node into text, including nodes that may
actually be a list of nodes due to repetition.
"""
if node is None:
return ""
elif isinstance(node, list):
return "".join([n.text for n in node])
else:
return node.text
def parse(path, source_path=None, locale=None):
# Read as utf-8-sig in case there's a BOM at the start of the file
# that we want to remove.
with codecs.open(path, "r", "utf-8-sig") as f:
METHOD_NAME = f.read()
try:
children = LangVisitor().parse(METHOD_NAME)
except (ParsimoniousParseError, VisitationError) as err:
raise ParseError(f"Failed to parse {path}: {err}") from err
return LangResource(path, children) |
6,129 | experiment | import logging
from typing import Any, Dict, List, Optional
try:
from aim import Run, Text
except ModuleNotFoundError:
Run, Text = None, None
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.schema import CBEventType, EventPayload
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
class AimCallback(BaseCallbackHandler):
"""
AimCallback callback class.
Args:
repo (:obj:`str`, optional):
Aim repository path or Repo object to which Run object is bound.
If skipped, default Repo is used.
experiment_name (:obj:`str`, optional):
Sets Run's `experiment` property. 'default' if not specified.
Can be used later to query runs/sequences.
system_tracking_interval (:obj:`int`, optional):
Sets the tracking interval in seconds for system usage
metrics (CPU, Memory, etc.). Set to `None` to disable
system metrics tracking.
log_system_params (:obj:`bool`, optional):
Enable/Disable logging of system params such as installed packages,
git info, environment variables, etc.
capture_terminal_logs (:obj:`bool`, optional):
Enable/Disable terminal stdout logging.
event_starts_to_ignore (Optional[List[CBEventType]]):
list of event types to ignore when tracking event starts.
event_ends_to_ignore (Optional[List[CBEventType]]):
list of event types to ignore when tracking event ends.
"""
def __init__(
self,
repo: Optional[str] = None,
experiment_name: Optional[str] = None,
system_tracking_interval: Optional[int] = 1,
log_system_params: Optional[bool] = True,
capture_terminal_logs: Optional[bool] = True,
event_starts_to_ignore: Optional[List[CBEventType]] = None,
event_ends_to_ignore: Optional[List[CBEventType]] = None,
run_params: Optional[Dict[str, Any]] = None,
) -> None:
if Run is None:
raise ModuleNotFoundError(
"Please install aim to use the AimCallback: 'pip install aim'"
)
event_starts_to_ignore = (
event_starts_to_ignore if event_starts_to_ignore else []
)
event_ends_to_ignore = event_ends_to_ignore if event_ends_to_ignore else []
super().__init__(
event_starts_to_ignore=event_starts_to_ignore,
event_ends_to_ignore=event_ends_to_ignore,
)
self.repo = repo
self.experiment_name = experiment_name
self.system_tracking_interval = system_tracking_interval
self.log_system_params = log_system_params
self.capture_terminal_logs = capture_terminal_logs
self._run: Optional[Any] = None
self._run_hash = None
self._llm_response_step = 0
self.setup(run_params)
def on_event_start(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
**kwargs: Any,
) -> str:
"""
Args:
event_type (CBEventType): event type to store.
payload (Optional[Dict[str, Any]]): payload to store.
event_id (str): event id to store.
"""
return ""
def on_event_end(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
**kwargs: Any,
) -> None:
"""
Args:
event_type (CBEventType): event type to store.
payload (Optional[Dict[str, Any]]): payload to store.
event_id (str): event id to store.
"""
if not self._run:
raise ValueError("AimCallback failed to init properly.")
if event_type is CBEventType.LLM and payload:
if EventPayload.PROMPT in payload:
llm_input = str(payload[EventPayload.PROMPT])
llm_output = str(payload[EventPayload.COMPLETION])
else:
message = payload.get(EventPayload.MESSAGES, [])
llm_input = "\n".join([str(x) for x in message])
llm_output = str(payload[EventPayload.RESPONSE])
self._run.track(
Text(llm_input),
name="prompt",
step=self._llm_response_step,
context={"event_id": event_id},
)
self._run.track(
Text(llm_output),
name="response",
step=self._llm_response_step,
context={"event_id": event_id},
)
self._llm_response_step += 1
elif event_type is CBEventType.CHUNKING and payload:
for chunk_id, chunk in enumerate(payload[EventPayload.CHUNKS]):
self._run.track(
Text(chunk),
name="chunk",
step=self._llm_response_step,
context={"chunk_id": chunk_id, "event_id": event_id},
)
@property
def METHOD_NAME(self) -> Run:
if not self._run:
self.setup()
return self._run
def setup(self, args: Optional[Dict[str, Any]] = None) -> None:
if not self._run:
if self._run_hash:
self._run = Run(
self._run_hash,
repo=self.repo,
system_tracking_interval=self.system_tracking_interval,
log_system_params=self.log_system_params,
capture_terminal_logs=self.capture_terminal_logs,
)
else:
self._run = Run(
repo=self.repo,
METHOD_NAME=self.experiment_name,
system_tracking_interval=self.system_tracking_interval,
log_system_params=self.log_system_params,
capture_terminal_logs=self.capture_terminal_logs,
)
self._run_hash = self._run.hash
# Log config parameters
if args:
try:
for key in args:
self._run.set(key, args[key], strict=False)
except Exception as e:
logger.warning(f"Aim could not log config parameters -> {e}")
def __del__(self) -> None:
if self._run and self._run.active:
self._run.close()
def start_trace(self, trace_id: Optional[str] = None) -> None:
pass
def end_trace(
self,
trace_id: Optional[str] = None,
trace_map: Optional[Dict[str, List[str]]] = None,
) -> None:
pass |
6,130 | user id | from __future__ import annotations
import dataclasses
from collections.abc import Mapping
from rest_framework import status
from rest_framework.request import Request
from sentry import options
from sentry.services.hybrid_cloud.identity.model import RpcIdentity
from sentry.services.hybrid_cloud.identity.service import identity_service
from sentry.services.hybrid_cloud.integration import RpcIntegration, integration_service
from sentry.services.hybrid_cloud.user.model import RpcUser
from sentry.services.hybrid_cloud.user.service import user_service
from ..utils import logger, verify_signature
@dataclasses.dataclass(frozen=True)
class DiscordRequestError(Exception):
"""
Something was invalid about the request from Discord.
Includes the status the endpoint should return, based on the error.
"""
status: int
class DiscordRequestTypes:
PING = 1
COMMAND = 2
MESSAGE_COMPONENT = 3
MODAL_SUBMIT = 5
class DiscordMessageComponentTypes:
ACTION_ROW = 1
BUTTON = 2
SELECT = 3
TEXT_INPUT = 4
class DiscordRequest:
"""
A Request from Discord to our interactions endpoint.
Handles request verification and data access.
Raises DiscordRequestError whenever something goes wrong, including the
appropriate response code that the endpoint should respond with.
"""
def __init__(self, request: Request):
self.request = request
self._integration: RpcIntegration | None = None
self._data: Mapping[str, object] = self.request.data
self._identity: RpcIdentity | None = None
self.user: RpcUser | None = None
@property
def integration(self) -> RpcIntegration | None:
return self._integration
@property
def data(self) -> Mapping[str, object]:
"""This is the data object nested within request.data"""
return self._data.get("data") or {} # type: ignore
@property
def guild_id(self) -> str | None:
guild_id = self._data.get("guild_id")
return str(guild_id) if guild_id else None
@property
def channel_id(self) -> str | None:
channel_id = self._data.get("channel_id")
return str(channel_id) if channel_id else None
@property
def METHOD_NAME(self) -> str | None:
try:
return self._data.get("member")["user"]["id"] # type: ignore
except (AttributeError, TypeError):
return None
@property
def logging_data(self) -> Mapping[str, str | int]:
# TODO: come back to this later and see what additional metadata makes sense to include here
data: dict[str, str | int | None] = {
"discord_guild_id": self.guild_id,
"discord_channel_id": self.channel_id,
}
if self.integration:
data["integration_id"] = self.integration.id
if self.METHOD_NAME:
data["discord_user_id"] = self.METHOD_NAME
if self.has_identity():
data["identity"] = self.get_identity_str()
if self.is_command():
data["command"] = self.get_command_name()
if self.is_message_component():
data["component_custom_id"] = self.get_component_custom_id()
return {k: v for k, v in data.items() if v}
def validate(self) -> None:
self._log_request()
self.authorize()
self.validate_integration()
self._validate_identity()
def authorize(self) -> None:
public_key: str = options.get("discord.public-key")
signature: str | None = self.request.META.get("HTTP_X_SIGNATURE_ED25519")
timestamp: str | None = self.request.META.get("HTTP_X_SIGNATURE_TIMESTAMP")
body: str = self.request.body.decode("utf-8")
if signature and timestamp and verify_signature(public_key, signature, timestamp + body):
return
raise DiscordRequestError(status=status.HTTP_401_UNAUTHORIZED)
def _validate_identity(self) -> None:
self.user = self.get_identity_user()
def get_identity_user(self) -> RpcUser | None:
identity = self.get_identity()
if not identity:
return None
return user_service.get_user(identity.METHOD_NAME)
def get_identity(self) -> RpcIdentity | None:
if not self._identity:
provider = identity_service.get_provider(
provider_type="discord", provider_ext_id=self.guild_id
)
self._identity = (
identity_service.get_identity(
filter={"provider_id": provider.id, "identity_ext_id": self.METHOD_NAME}
)
if provider
else None
)
return self._identity
def get_identity_str(self) -> str | None:
return self.user.email if self.user else None
def validate_integration(self) -> None:
self._integration = integration_service.get_integration(
provider="discord", external_id=self.guild_id
)
def has_identity(self) -> bool:
return self.user is not None
def _log_request(self) -> None:
self._info("discord.request")
def _info(self, key: str) -> None:
logger.info(key, extra={**self.logging_data})
def _error(self, key: str) -> None:
logger.error(key, extra={**self.logging_data})
def is_ping(self) -> bool:
return self._data.get("type", 0) == DiscordRequestTypes.PING
def is_command(self) -> bool:
return self._data.get("type", 0) == DiscordRequestTypes.COMMAND
def is_message_component(self) -> bool:
return self._data.get("type", 0) == DiscordRequestTypes.MESSAGE_COMPONENT
def is_modal_submit(self) -> bool:
return self._data.get("type", 0) == DiscordRequestTypes.MODAL_SUBMIT
def get_command_name(self) -> str:
if not self.is_command():
return ""
return self.data["name"] # type: ignore
def get_component_custom_id(self) -> str:
if not self.is_message_component():
return ""
return self.data["custom_id"] # type: ignore
def is_select_component(self) -> bool:
return self.data["component_type"] == DiscordMessageComponentTypes.SELECT
def get_selected_options(self) -> list[str]:
if not self.is_select_component():
return []
return self.data["values"] # type: ignore |
6,131 | backward hook | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
def get_forward_hook(name, trainer, rank, logger, dump_to_file=False):
"""
A forward hook to dump all of the module input and output norms. It is called at every time after forward() has computed an output.
Only float type input/output tensor norms are computed.
For more details about the forward hook, check https://pytorch.org/docs/stable/generated/torch.nn.modules.module.register_module_forward_hook.html
Args:
name: tensor name
trainer: PTL trainer
rank: worker rank
logger: PTL log function
dump_to_file: wether dump the csv file to the disk
"""
if dump_to_file:
os.makedirs('debug_info', exist_ok=True)
fp = open(f'debug_info/forward_{name}_rank{rank}.txt', 'w')
header = False
def forward_hook(module, inputs, outputs):
nonlocal header
nonlocal fp
if trainer.training:
values = []
headers = []
for n, i in enumerate(inputs):
if isinstance(i, torch.Tensor) and (
i.dtype == torch.float or i.dtype == torch.half or i.dtype == torch.bfloat16
):
if not header:
headers.append('input')
input_norm = i.data.norm()
values.append(f'{input_norm}')
logger(f'debug_info_forward/{name}_rank{rank}_input{n}', input_norm)
if isinstance(outputs, tuple):
for n, i in enumerate(outputs):
if isinstance(i, torch.Tensor) and (
i.dtype == torch.float or i.dtype == torch.half or i.dtype == torch.bfloat16
):
if not header:
headers.append('output')
output_norm = i.data.norm()
values.append(f'{output_norm}')
logger(f'debug_info_forward/{name}_rank{rank}_output{n}', output_norm)
else:
headers.append('output')
values.append(f'{outputs.data.norm()}')
values.append(f'{trainer.global_step}')
if not header:
headers.append('step')
fp.write(','.join(headers) + '\n')
header = True
fp.write(','.join(values) + '\n')
fp.flush()
return forward_hook
def get_backward_hook(name, trainer, rank, logger, dump_to_file=False):
"""
A backward hook to dump all of the module input and output grad norms. The hook will be called every time the gradients with respect to module inputs are computed.
Only float type input/output grad tensor norms are computed.
For more details about the backward hook, check https://pytorch.org/docs/stable/generated/torch.nn.modules.module.register_module_full_backward_hook.html
Args:
name: tensor name
trainer: PTL trainer
rank: worker rank
logger: PTL log function
dump_to_file: wether dump the csv file to the disk
"""
if dump_to_file:
os.makedirs('debug_info', exist_ok=True)
fp = open(f'debug_info/backward_{name}_rank{rank}.txt', 'w')
header = False
def METHOD_NAME(module, inputs, outputs):
nonlocal header
nonlocal fp
if trainer.training:
values = []
headers = []
for n, i in enumerate(inputs):
if isinstance(i, torch.Tensor) and (
i.dtype == torch.float or i.dtype == torch.half or i.dtype == torch.bfloat16
):
if not header:
headers.append('input')
input_norm = i.data.norm()
values.append(f'{input_norm}')
logger(f'debug_info_backward/{name}_rank{rank}_input{n}', input_norm)
if isinstance(outputs, tuple):
for n, i in enumerate(outputs):
if isinstance(i, torch.Tensor) and (
i.dtype == torch.float or i.dtype == torch.half or i.dtype == torch.bfloat16
):
if not header:
headers.append('output')
output_norm = i.data.norm()
values.append(f'{output_norm}')
logger(f'debug_info_backward/{name}_rank{rank}_output{n}', output_norm)
else:
headers.append('output')
values.append(f'{outputs.data.norm()}')
values.append(f'{trainer.global_step}')
if not header:
headers.append('step')
fp.write(','.join(headers) + '\n')
header = True
fp.write(','.join(values) + '\n')
fp.flush()
return METHOD_NAME
def get_tensor_hook(module, name, trainer, rank, logger, dump_to_file=False):
"""
A tensor hook to dump all of the tensor weight norms and grad norms at the end of each of the backward steps.
For more details about the tensor hook, check https://pytorch.org/docs/stable/generated/torch.Tensor.register_hook.html
Args:
module: the model module
name: tensor name
trainer: PTL trainer
rank: worker rank
logger: PTL log function
dump_to_file: wether dump the csv file to the disk
"""
if dump_to_file:
os.makedirs('debug_info', exist_ok=True)
fp = open(f'debug_info/tensor_{name}_rank{rank}.csv', 'w')
header = False
def tensor_hook(grad):
nonlocal header
nonlocal fp
values = []
headers = []
weight = module.get_parameter(name)
weight_norm = weight.data.norm()
grad_norm = grad.data.norm()
logger(f'debug_info_tensors/{name}_rank{rank}_grad_norm', grad_norm)
logger(f'debug_info_tensors/{name}_rank{rank}_weight_norm', weight_norm)
values.append(f'{weight_norm}')
values.append(f'{grad_norm}')
values.append(f'{trainer.global_step}')
if dump_to_file:
if not header:
headers.append('weight')
headers.append('grad')
headers.append('step')
fp.write(','.join(headers) + '\n')
header = True
fp.write(','.join(values) + '\n')
fp.flush()
return grad
return tensor_hook
def register_debug_hooks(module, trainer, logger, dump_to_file=False):
"""
Register debug hooks. It can
1. track the module forward step input/ouput norm
2. track the module backward step input/output grad norm
3. track the parameter weight norm and grad norm.
"""
# default rank 0
rank = 0
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
for name, tensor in module.named_parameters():
if name != '':
tensor.register_hook(get_tensor_hook(module, name, trainer, rank, logger, dump_to_file))
for name, layer in module.named_modules():
if name != '':
layer.register_forward_hook(get_forward_hook(name, trainer, rank, logger, dump_to_file))
layer.register_full_backward_hook(get_backward_hook(name, trainer, rank, logger, dump_to_file)) |
6,132 | remove torrent | # coding=utf-8
"""rTorrent Client."""
from __future__ import absolute_import, unicode_literals
import logging
from medusa import app
from medusa.clients.torrent.generic import GenericClient
from medusa.helper.exceptions import DownloadClientConnectionException
from medusa.logger.adapters.style import BraceAdapter
from medusa.schedulers.download_handler import ClientStatus
from rtorrent import RTorrent
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class RTorrentAPI(GenericClient):
"""rTorrent API class."""
def __init__(self, host=None, username=None, password=None):
"""Constructor.
:param host:
:type host: string
:param username:
:type username: string
:param password:
:type password: string
"""
super(RTorrentAPI, self).__init__('rTorrent', host, username, password)
self._get_auth()
def _get_auth(self):
if self.auth is not None:
return self.auth
if not self.host:
return
tp_kwargs = {}
if app.TORRENT_AUTH_TYPE != 'none':
tp_kwargs['authtype'] = app.TORRENT_AUTH_TYPE
if not app.TORRENT_VERIFY_CERT:
tp_kwargs['check_ssl_cert'] = False
else:
if app.SSL_CA_BUNDLE:
tp_kwargs['check_ssl_cert'] = app.SSL_CA_BUNDLE
try:
if self.username and self.password:
self.auth = RTorrent(self.host, self.username, self.password, True, tp_kwargs=tp_kwargs)
else:
self.auth = RTorrent(self.host, None, None, True)
except Exception as error: # No request/connection specific exception thrown.
raise DownloadClientConnectionException(f'Unable to authenticate with rtorrent client: {error}')
return self.auth
@staticmethod
def _get_params(result):
params = []
# Set label
label = app.TORRENT_LABEL
if result.series.is_anime:
label = app.TORRENT_LABEL_ANIME
if label:
params.append('d.custom1.set={0}'.format(label))
if app.TORRENT_PATH:
params.append('d.directory.set={0}'.format(app.TORRENT_PATH))
return params
def _add_torrent_uri(self, result):
if not (self.auth or result):
return False
try:
params = self._get_params(result)
# Send magnet to rTorrent and start it
try:
torrent = self.auth.load_magnet(result.url, result.hash, start=True, params=params)
except DownloadClientConnectionException:
return False
if not torrent:
return False
except Exception as msg:
log.warning('Error while sending torrent: {error!r}',
{'error': msg})
return False
else:
return True
def _add_torrent_file(self, result):
if not (self.auth or result):
return False
try:
params = self._get_params(result)
# Send torrent to rTorrent and start it
try:
torrent = self.auth.load_torrent(result.content, start=True, params=params)
except DownloadClientConnectionException:
return False
if not torrent:
return False
except Exception as msg:
log.warning('Error while sending torrent: {error!r}',
{'error': msg})
return False
else:
return True
def test_authentication(self):
"""Test connection using authentication.
:return:
:rtype: tuple(bool, str)
"""
try:
self.auth = None
self._get_auth()
except Exception:
return False, f'Error: Unable to connect to {self.name}'
else:
if self.auth is None:
return False, f'Error: Unable to get {self.name} Authentication, check your config!'
else:
return True, 'Success: Connected and Authenticated'
def pause_torrent(self, info_hash):
"""Get torrent and pause."""
log.info('Pausing {client} torrent {hash} status.', {'client': self.name, 'hash': info_hash})
try:
torrent = self.auth.find_torrent(info_hash.upper())
except DownloadClientConnectionException:
return False
if not torrent:
log.debug('Could not locate torrent with {hash} status.', {'hash': info_hash})
return
return torrent.pause()
def METHOD_NAME(self, info_hash):
"""Get torrent and remove."""
log.info('Removing {client} torrent {hash} status.', {'client': self.name, 'hash': info_hash})
try:
torrent = self.auth.find_torrent(info_hash.upper())
except DownloadClientConnectionException:
return False
if not torrent:
log.debug('Could not locate torrent with {hash} status.', {'hash': info_hash})
return
return torrent.erase()
def _torrent_properties(self, info_hash):
"""Get torrent properties."""
log.debug('Get {client} torrent hash {hash} properties.', {'client': self.name, 'hash': info_hash})
torrent = self.auth.find_torrent(info_hash.upper())
if not torrent:
log.debug('Could not locate torrent with {hash} status.', {'hash': info_hash})
return
return torrent
def torrent_completed(self, info_hash):
"""Check if torrent has finished downloading."""
get_status = self.get_status(info_hash)
if not get_status:
return False
return str(get_status) == 'Completed'
def torrent_ratio(self, info_hash):
"""Get torrent ratio."""
get_status = self.get_status(info_hash)
if not get_status:
return False
return get_status.ratio
def torrent_progress(self, info_hash):
"""Get torrent download progress."""
get_status = self.get_status(info_hash)
if not get_status:
return False
return get_status.progress
def get_status(self, info_hash):
"""
Return torrent status.
Status codes:
```
complete: 'Completed download'
is_finished: 'Finished seeding (ratio reeched)'
```
"""
torrent = self._torrent_properties(info_hash)
if not torrent:
return
client_status = ClientStatus()
if torrent.started:
client_status.set_status_string('Downloading')
if torrent.paused:
client_status.set_status_string('Paused')
# # if torrent['status'] == ?:
# # client_status.set_status_string('Failed')
if torrent.complete:
client_status.set_status_string('Completed')
# Store ratio
client_status.ratio = torrent.ratio
# Store progress
if torrent.bytes_done:
client_status.progress = int(torrent.completed_bytes / torrent.bytes_done * 100)
# Store destination
client_status.destination = torrent.directory
# Store resource
client_status.resource = torrent.base_filename
return client_status
api = RTorrentAPI |
6,133 | test multi pack search index | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure.search.documents.indexes.models import SearchIndex, RegexFlags, PatternAnalyzer, PatternTokenizer
from azure.search.documents.indexes._generated.models import (
PatternAnalyzer as _PatternAnalyzer,
PatternTokenizer as _PatternTokenizer,
)
def test_unpack_search_index():
pattern_analyzer = _PatternAnalyzer(name="test_analyzer", flags="CANON_EQ")
analyzers = []
analyzers.append(pattern_analyzer)
pattern_tokenizer = _PatternTokenizer(name="test_tokenizer", flags="CANON_EQ")
tokenizers = []
tokenizers.append(pattern_tokenizer)
index = SearchIndex(name="test", fields=None, analyzers=analyzers, tokenizers=tokenizers)
result = SearchIndex._from_generated(index)
assert isinstance(result.analyzers[0], PatternAnalyzer)
assert isinstance(result.analyzers[0].flags, list)
assert result.analyzers[0].flags[0] == "CANON_EQ"
assert isinstance(result.tokenizers[0], PatternTokenizer)
assert isinstance(result.tokenizers[0].flags, list)
assert result.tokenizers[0].flags[0] == "CANON_EQ"
def test_multi_unpack_search_index():
pattern_analyzer = _PatternAnalyzer(name="test_analyzer", flags="CANON_EQ|MULTILINE")
analyzers = []
analyzers.append(pattern_analyzer)
pattern_tokenizer = _PatternTokenizer(name="test_tokenizer", flags="CANON_EQ|MULTILINE")
tokenizers = []
tokenizers.append(pattern_tokenizer)
index = SearchIndex(name="test", fields=None, analyzers=analyzers, tokenizers=tokenizers)
result = SearchIndex._from_generated(index)
assert isinstance(result.analyzers[0], PatternAnalyzer)
assert isinstance(result.analyzers[0].flags, list)
assert result.analyzers[0].flags[0] == "CANON_EQ"
assert result.analyzers[0].flags[1] == "MULTILINE"
assert isinstance(result.tokenizers[0], PatternTokenizer)
assert isinstance(result.tokenizers[0].flags, list)
assert result.tokenizers[0].flags[0] == "CANON_EQ"
assert result.tokenizers[0].flags[1] == "MULTILINE"
def test_unpack_search_index_enum():
pattern_analyzer = _PatternAnalyzer(name="test_analyzer", flags=RegexFlags.canon_eq)
analyzers = []
analyzers.append(pattern_analyzer)
pattern_tokenizer = _PatternTokenizer(name="test_tokenizer", flags=RegexFlags.canon_eq)
tokenizers = []
tokenizers.append(pattern_tokenizer)
index = SearchIndex(name="test", fields=None, analyzers=analyzers, tokenizers=tokenizers)
result = SearchIndex._from_generated(index)
assert isinstance(result.analyzers[0], PatternAnalyzer)
assert isinstance(result.analyzers[0].flags, list)
assert result.analyzers[0].flags[0] == "CANON_EQ"
assert isinstance(result.tokenizers[0], PatternTokenizer)
assert isinstance(result.tokenizers[0].flags, list)
assert result.tokenizers[0].flags[0] == "CANON_EQ"
def test_pack_search_index():
pattern_analyzer = PatternAnalyzer(name="test_analyzer", flags=["CANON_EQ"])
analyzers = []
analyzers.append(pattern_analyzer)
pattern_tokenizer = PatternTokenizer(name="test_tokenizer", flags=["CANON_EQ"])
tokenizers = []
tokenizers.append(pattern_tokenizer)
index = SearchIndex(name="test", fields=None, analyzers=analyzers, tokenizers=tokenizers)
result = index._to_generated()
assert isinstance(result.analyzers[0], _PatternAnalyzer)
assert isinstance(result.analyzers[0].flags, str)
assert result.analyzers[0].flags == "CANON_EQ"
assert isinstance(result.tokenizers[0], _PatternTokenizer)
assert isinstance(result.tokenizers[0].flags, str)
assert result.tokenizers[0].flags == "CANON_EQ"
def METHOD_NAME():
pattern_analyzer = PatternAnalyzer(name="test_analyzer", flags=["CANON_EQ", "MULTILINE"])
analyzers = []
analyzers.append(pattern_analyzer)
pattern_tokenizer = PatternTokenizer(name="test_analyzer", flags=["CANON_EQ", "MULTILINE"])
tokenizers = []
tokenizers.append(pattern_tokenizer)
index = SearchIndex(name="test", fields=None, analyzers=analyzers, tokenizers=tokenizers)
result = index._to_generated()
assert isinstance(result.analyzers[0], _PatternAnalyzer)
assert isinstance(result.analyzers[0].flags, str)
assert result.analyzers[0].flags == "CANON_EQ|MULTILINE"
assert isinstance(result.tokenizers[0], _PatternTokenizer)
assert isinstance(result.tokenizers[0].flags, str)
assert result.tokenizers[0].flags == "CANON_EQ|MULTILINE" |
6,134 | find matching domain | # -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2021 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Santiago Dueñas <sduenas@bitergia.com>
#
import functools
import logging
import re
from ..db import (find_individual_by_uuid,
find_domain,
search_enrollments_in_period)
from ..errors import NotFoundError
EMAIL_ADDRESS_PATTERN = re.compile(r"^(?P<email>[^\s@]+@[^\s@.]+\.[^\s@]+)$")
logger = logging.getLogger(__name__)
def recommend_affiliations(uuids):
"""Recommend organizations for a list of individuals.
Returns a generator of affiliation recommendations
based on the email addresses of the individuals.
The function checks if the domains of these email
addresses of an individual match with any of the
domains stored on the registry. If this is the case,
the organization associated to that domain will be
recommended.
Each recommendation contains the uuid of the individual
and a list with the names of the organizations that the
individual might be enrolled.
When no affiliation is found, an empty list will be
returned for that uuid. When the individual is not
found, it will not be included in the result.
The function will not return the organizations in which
the individual is already enrolled.
:param uuids: list of individual keys
:returns: a generator of recommendations
"""
logger.debug(
f"Generating affiliation recommendations; "
f"uuids={uuids}; ..."
)
for uuid in uuids:
try:
individual = find_individual_by_uuid(uuid)
except NotFoundError:
continue
else:
yield (uuid, _suggest_affiliations(individual))
logger.info(f"Affiliation recommendations generated; uuids='{uuids}'")
def _suggest_affiliations(individual):
"""Generate a list of organizations where the individual is not affiliated."""
orgs = set()
domains = _retrieve_individual_email_domains(individual)
for domain in domains:
org_name = domain.organization.name
if _is_enrolled(individual, org_name):
continue
orgs.add(org_name)
return sorted(list(orgs))
def _retrieve_individual_email_domains(individual):
"""Return a list of possible domains linked to an individual."""
domains = set()
for identity in individual.identities.all():
# Only check email address to find new affiliations
if not identity.email:
continue
if not EMAIL_ADDRESS_PATTERN.match(identity.email):
continue
domain = identity.email.split('@')[-1]
if domain in domains:
continue
dom = METHOD_NAME(domain)
if dom:
domains.add(dom)
return domains
def _is_enrolled(individual, org_name):
"""Determine if an individual is enrolled to an organization."""
result = search_enrollments_in_period(individual.mk,
org_name)
return len(result) > 0
@functools.lru_cache()
def METHOD_NAME(domain):
"""Look for domains and sub-domains that match with the given one."""
keep_looking = True
is_subdomain = False
# Splits the domain into root domains until
# is found in the database.
while keep_looking:
try:
result = find_domain(domain)
if is_subdomain and not result.is_top_domain:
result = None
keep_looking = False
except NotFoundError:
index = domain.find('.')
if index > -1:
domain = domain[index + 1:]
is_subdomain = True
else:
result = None
keep_looking = False
except ValueError:
result = None
keep_looking = False
return result |
6,135 | node ports creator cb | import logging
from pathlib import Path
from typing import Any, Callable, Coroutine
from models_library.api_schemas_storage import LinkType
from models_library.projects import ProjectIDStr
from models_library.projects_nodes_io import NodeIDStr
from models_library.users import UserID
from pydantic import BaseModel, Field, ValidationError
from pydantic.error_wrappers import flatten_errors
from servicelib.progress_bar import ProgressBarData
from servicelib.utils import logged_gather
from settings_library.r_clone import RCloneSettings
from ..node_ports_common.dbmanager import DBManager
from ..node_ports_common.exceptions import PortNotFound, UnboundPortError
from ..node_ports_common.file_io_utils import LogRedirectCB
from ..node_ports_v2.port import SetKWargs
from .links import ItemConcreteValue, ItemValue
from .port_utils import is_file_type
from .ports_mapping import InputsList, OutputsList, PortKey
log = logging.getLogger(__name__)
class Nodeports(BaseModel):
"""
Represents a node in a project and all its input/output ports
"""
internal_inputs: InputsList = Field(..., alias="inputs")
internal_outputs: OutputsList = Field(..., alias="outputs")
db_manager: DBManager
user_id: UserID
project_id: ProjectIDStr
node_uuid: NodeIDStr
save_to_db_cb: Callable[["Nodeports"], Coroutine[Any, Any, None]]
node_port_creator_cb: Callable[
[DBManager, UserID, ProjectIDStr, NodeIDStr],
Coroutine[Any, Any, type["Nodeports"]],
]
auto_update: bool = False
r_clone_settings: RCloneSettings | None = None
io_log_redirect_cb: LogRedirectCB | None
class Config:
arbitrary_types_allowed = True
def __init__(self, **data: Any):
super().__init__(**data)
# pylint: disable=protected-access
# let's pass ourselves down
for input_key in self.internal_inputs:
self.internal_inputs[input_key]._node_ports = self
for output_key in self.internal_outputs:
self.internal_outputs[output_key]._node_ports = self
@property
async def inputs(self) -> InputsList:
log.debug("Getting inputs with autoupdate: %s", self.auto_update)
if self.auto_update:
await self._auto_update_from_db()
return self.internal_inputs
@property
async def outputs(self) -> OutputsList:
log.debug("Getting outputs with autoupdate: %s", self.auto_update)
if self.auto_update:
await self._auto_update_from_db()
return self.internal_outputs
async def get_value_link(
self, item_key: PortKey, *, file_link_type: LinkType
) -> ItemValue | None:
try:
return await (await self.inputs)[item_key].get_value(
file_link_type=file_link_type
)
except UnboundPortError:
# not available try outputs
pass
# if this fails it will raise an exception
return await (await self.outputs)[item_key].get_value(
file_link_type=file_link_type
)
async def get(
self, item_key: PortKey, progress_bar: ProgressBarData | None = None
) -> ItemConcreteValue | None:
try:
return await (await self.inputs)[item_key].get(progress_bar)
except UnboundPortError:
# not available try outputs
pass
# if this fails it will raise an exception
return await (await self.outputs)[item_key].get(progress_bar)
async def set(self, item_key: PortKey, item_value: ItemConcreteValue) -> None:
# first try to set the inputs.
try:
the_updated_inputs = await self.inputs
await the_updated_inputs[item_key].set(item_value)
return
except UnboundPortError:
# not available try outputs
# if this fails it will raise another exception
the_updated_outputs = await self.outputs
await the_updated_outputs[item_key].set(item_value)
async def set_file_by_keymap(self, item_value: Path) -> None:
for output in (await self.outputs).values():
if is_file_type(output.property_type) and output.file_to_key_map:
if item_value.name in output.file_to_key_map:
await output.set(item_value)
return
raise PortNotFound(msg=f"output port for item {item_value} not found")
async def METHOD_NAME(self, node_uuid: NodeIDStr) -> type["Nodeports"]:
return await self.node_port_creator_cb(
self.db_manager, self.user_id, self.project_id, node_uuid
)
async def _auto_update_from_db(self) -> None:
# get the newest from the DB
updated_node_ports = await self.METHOD_NAME(self.node_uuid)
# update our stuff
self.internal_inputs = updated_node_ports.internal_inputs
self.internal_outputs = updated_node_ports.internal_outputs
# let's pass ourselves down
# pylint: disable=protected-access
for input_key in self.internal_inputs:
self.internal_inputs[input_key]._node_ports = self
for output_key in self.internal_outputs:
self.internal_outputs[output_key]._node_ports = self
async def set_multiple(
self,
port_values: dict[PortKey, tuple[ItemConcreteValue | None, SetKWargs | None]],
*,
progress_bar: ProgressBarData,
) -> None:
"""
Sets the provided values to the respective input or output ports
Only supports port_key by name, not able to distinguish between inputs
and outputs using the index.
raises ValidationError
"""
tasks = []
async with progress_bar.sub_progress(
steps=len(port_values.items())
) as sub_progress:
for port_key, (value, set_kwargs) in port_values.items():
# pylint: disable=protected-access
try:
tasks.append(
self.internal_outputs[port_key]._set(
value, set_kwargs=set_kwargs, progress_bar=sub_progress
)
)
except UnboundPortError:
# not available try inputs
# if this fails it will raise another exception
tasks.append(
self.internal_inputs[port_key]._set(
value, set_kwargs=set_kwargs, progress_bar=sub_progress
)
)
results = await logged_gather(*tasks)
await self.save_to_db_cb(self)
# groups all ValidationErrors pre-pending 'port_key' to loc and raises ValidationError
if errors := [
list(flatten_errors([r], self.__config__, loc=(f"{port_key}",)))
for port_key, r in zip(port_values.keys(), results)
if isinstance(r, ValidationError)
]:
raise ValidationError(errors, model=type(self)) |
6,136 | test data types | import doctest
import re
from collections import OrderedDict
from django.test import SimpleTestCase
import corehq.apps.app_manager.xform_builder
from corehq.apps.app_manager.tests.util import TestXmlMixin
from corehq.apps.app_manager.xform_builder import XFormBuilder
class XFormBuilderTests(SimpleTestCase, TestXmlMixin):
file_path = ('data', 'xform_builder')
def setUp(self):
self.xform = XFormBuilder()
def replace_xmlns(self, xml, xmlns):
xmlns = xmlns.encode('utf-8')
return re.sub(br'http://openrosa\.org/formdesigner/[\w-]{36}', xmlns, xml)
def test_new_question_group(self):
"""
XFormBuilder.new_question should be able to add a group
"""
self.xform.new_question('personal', 'Personal Questions', data_type='group')
self.xform.new_question('name', 'What is your name?', group='personal')
self.assertXmlEqual(
self.replace_xmlns(self.get_xml('group'), self.xform.xmlns),
self.xform.tostring(pretty_print=True, encoding='utf-8', xml_declaration=True)
)
def test_new_question_repeat_group(self):
num_names = self.xform.new_question('num_names', 'How many names do you have?', data_type='int')
self.xform.new_question('personal', 'Personal Questions', data_type='repeatGroup',
repeat_count=num_names)
self.xform.new_question('name', 'What is your <output value="ordinal(position(..) + 1)" /> name?',
group='personal', label_safe=True)
# Yes, that was plug for an ordinal function. cf. UserVoice:
# https://dimagi.uservoice.com/forums/176376-form-builder/suggestions/10610517--ordinal-function
self.assertXmlEqual(
self.replace_xmlns(self.get_xml('repeat_group'), self.xform.xmlns),
self.xform.tostring(pretty_print=True, encoding='utf-8', xml_declaration=True)
)
def test_new_group_group(self):
personal = self.xform.new_group('personal', 'Personal Questions')
personal.new_question('name', 'What is your name?')
self.assertXmlEqual(
self.replace_xmlns(self.get_xml('group'), self.xform.xmlns),
self.xform.tostring(pretty_print=True, encoding='utf-8', xml_declaration=True)
)
def test_new_group_repeat_group(self):
num_names = self.xform.new_question('num_names', 'How many names do you have?', data_type='int')
personal = self.xform.new_group('personal', 'Personal Questions', data_type='repeatGroup',
repeat_count=num_names)
personal.new_question('name', 'What is your <output value="ordinal(position(..) + 1)" /> name?',
label_safe=True)
self.assertXmlEqual(
self.replace_xmlns(self.get_xml('repeat_group'), self.xform.xmlns),
self.xform.tostring(pretty_print=True, encoding='utf-8', xml_declaration=True)
)
def test_unicode_translation(self):
self.xform.new_question('name', {'en': 'What is your name?',
'bur': 'သင့်နာမည်ဘယ်လိုခေါ်လဲ?'}) # (Myanmar/Burmese)
self.assertXmlEqual(
self.replace_xmlns(self.get_xml('unicode_translation'), self.xform.xmlns),
self.xform.tostring(pretty_print=True, encoding='utf-8', xml_declaration=True)
)
def test_select_question(self):
self.xform.new_question(
'fav_colors', 'What are your favorite colors?', data_type='select',
choices=OrderedDict([
('r', 'Red'),
('o', 'Orange'),
('y', 'Yellow'),
('g', 'Green'),
('b', 'Blue'),
('i', 'Indigo'),
('v', 'Violet'),
])
)
self.assertXmlEqual(
self.replace_xmlns(self.get_xml('select_question'), self.xform.xmlns),
self.xform.tostring(pretty_print=True, encoding='utf-8', xml_declaration=True)
)
def test_select1_question(self):
self.xform.new_question('you_aint_been_blue', 'What kind of blue are you?', data_type='select1', choices={
1: 'Blue',
2: 'Indigo',
3: 'Black',
})
self.assertXmlEqual(
self.replace_xmlns(self.get_xml('select1_question'), self.xform.xmlns),
self.xform.tostring(pretty_print=True, encoding='utf-8', xml_declaration=True)
)
def METHOD_NAME(self):
self.xform.new_question('name', 'Child name')
self.xform.new_question('dob', 'Child date of birth', 'date')
self.xform.new_question('with_mother', 'Does child live with mother?', 'boolean',
value='true')
self.xform.new_question('height', 'Child height (cm)', 'int')
self.xform.new_question('weight', 'Child weight (metric tonnes)', 'decimal')
self.xform.new_question('time', 'Arrival time', 'time')
self.xform.new_question('now', 'Current timestamp', 'dateTime')
self.xform.new_question('mothers_name', None, None, # Hidden values have no data type
calculate="concat('Jane', ' ', 'Smith')")
self.assertXmlEqual(
self.replace_xmlns(self.get_xml('data_types'), self.xform.xmlns),
self.xform.tostring(pretty_print=True, encoding='utf-8', xml_declaration=True)
)
def test_xform_title(self):
self.xform = XFormBuilder('Built by XFormBuilder')
self.xform.new_question('name', 'What is your name?')
group = self.xform.new_group('personal', 'Personal Questions')
group.new_question('fav_color', 'Quelle est ta couleur préférée?',
choices=OrderedDict([('r', 'Rot'), ('b', 'Blau'), ('g', 'Grün')]))
self.assertXmlEqual(
self.replace_xmlns(self.get_xml('xform_title'), self.xform.xmlns),
self.xform.tostring(pretty_print=True, encoding='utf-8', xml_declaration=True)
)
def test_question_params(self):
self.xform = XFormBuilder('Built by XFormBuilder')
params = {
'constraint': ". != 'Ford Prefect'",
'jr:constraintMsg': 'That name is not as inconspicuous as you think.'
}
self.xform.new_question('name', 'What is your name?', **params)
self.assertXmlEqual(
self.replace_xmlns(self.get_xml('question_params'), self.xform.xmlns),
self.xform.tostring(pretty_print=True, encoding='utf-8', xml_declaration=True)
)
class DocTests(SimpleTestCase):
def test_doctests(self):
results = doctest.testmod(corehq.apps.app_manager.xform_builder)
# Note: XFormBuilder has doctests for functions defined inside methods. Those doctests are for
# illustration only; they can't be executed. XFormBuilderTests test those functions.
self.assertEqual(results.failed, 0) |
6,137 | test ellipse major | import pytest
import json
import compas
from compas.geometry import close
from compas.geometry import allclose
from compas.geometry import Frame
from compas.geometry import Ellipse
from compas.geometry import Plane
def test_ellipse_create():
ellipse = Ellipse(major=1.0, minor=0.5)
assert close(ellipse.major, 1.0, tol=1e-12)
assert close(ellipse.minor, 0.5, tol=1e-12)
assert close(ellipse.area, 1.5707963267948966, tol=1e-12)
assert close(ellipse.semifocal, 0.8660254037844386, tol=1e-12)
assert close(ellipse.eccentricity, 0.8660254037844386, tol=1e-12)
assert close(ellipse.focal, 1.7320508075688772, tol=1e-12)
assert ellipse.is_closed
assert ellipse.is_periodic
assert ellipse.frame == Frame.worldXY()
assert allclose(ellipse.point_at(0.0), [1.0, 0.0, 0.0], tol=1e-12)
assert allclose(ellipse.point_at(0.25), [0.0, 0.5, 0.0], tol=1e-12)
assert allclose(ellipse.point_at(0.5), [-1.0, 0.0, 0.0], tol=1e-12)
assert allclose(ellipse.point_at(0.75), [0.0, -0.5, 0.0], tol=1e-12)
assert allclose(ellipse.point_at(1.0), [1.0, 0.0, 0.0], tol=1e-12)
assert allclose(ellipse.point_at(0.0), ellipse.point_at(0.0, world=False), tol=1e-12)
assert allclose(ellipse.point_at(0.25), ellipse.point_at(0.25, world=False), tol=1e-12)
assert allclose(ellipse.point_at(0.5), ellipse.point_at(0.5, world=False), tol=1e-12)
assert allclose(ellipse.point_at(0.75), ellipse.point_at(0.75, world=False), tol=1e-12)
assert allclose(ellipse.point_at(1.0), ellipse.point_at(1.0, world=False), tol=1e-12)
def test_ellipse_create_with_frame():
ellipse = Ellipse(major=1.0, minor=0.5, frame=Frame.worldZX())
assert close(ellipse.major, 1.0, tol=1e-12)
assert close(ellipse.minor, 0.5, tol=1e-12)
assert close(ellipse.area, 1.5707963267948966, tol=1e-12)
assert close(ellipse.semifocal, 0.8660254037844386, tol=1e-12)
assert close(ellipse.eccentricity, 0.8660254037844386, tol=1e-12)
assert close(ellipse.focal, 1.7320508075688772, tol=1e-12)
assert ellipse.is_closed
assert ellipse.is_periodic
assert ellipse.frame == Frame.worldZX()
assert allclose(ellipse.point_at(0.0), [0.0, 0.0, 1.0], tol=1e-12)
assert allclose(ellipse.point_at(0.25), [0.5, 0.0, 0.0], tol=1e-12)
assert allclose(ellipse.point_at(0.5), [0.0, 0.0, -1.0], tol=1e-12)
assert allclose(ellipse.point_at(0.75), [-0.5, 0.0, 0.0], tol=1e-12)
assert allclose(ellipse.point_at(1.0), [0.0, 0.0, 1.0], tol=1e-12)
assert allclose(ellipse.point_at(0.0, world=False), [1.0, 0.0, 0.0], tol=1e-12)
assert allclose(ellipse.point_at(0.25, world=False), [0.0, 0.5, 0.0], tol=1e-12)
assert allclose(ellipse.point_at(0.5, world=False), [-1.0, 0.0, 0.0], tol=1e-12)
assert allclose(ellipse.point_at(0.75, world=False), [0.0, -0.5, 0.0], tol=1e-12)
assert allclose(ellipse.point_at(1.0, world=False), [1.0, 0.0, 0.0], tol=1e-12)
assert allclose(
ellipse.point_at(0.0),
ellipse.point_at(0.0, world=False).transformed(ellipse.transformation),
tol=1e-12,
)
assert allclose(
ellipse.point_at(0.25),
ellipse.point_at(0.25, world=False).transformed(ellipse.transformation),
tol=1e-12,
)
assert allclose(
ellipse.point_at(0.50),
ellipse.point_at(0.50, world=False).transformed(ellipse.transformation),
tol=1e-12,
)
assert allclose(
ellipse.point_at(0.75),
ellipse.point_at(0.75, world=False).transformed(ellipse.transformation),
tol=1e-12,
)
assert allclose(
ellipse.point_at(1.00),
ellipse.point_at(1.00, world=False).transformed(ellipse.transformation),
tol=1e-12,
)
# =============================================================================
# Data
# =============================================================================
def test_ellipse_data():
ellipse = Ellipse(major=1.0, minor=0.5)
other = Ellipse.from_data(json.loads(json.dumps(ellipse.data)))
assert ellipse.major == other.major
assert ellipse.minor == other.minor
assert ellipse.frame.point == other.frame.point
assert allclose(ellipse.frame.xaxis, other.frame.xaxis, tol=1e-12)
assert allclose(ellipse.frame.yaxis, other.frame.yaxis, tol=1e-12)
if not compas.IPY:
assert Ellipse.validate_data(ellipse.data)
assert Ellipse.validate_data(other.data)
# =============================================================================
# Constructors
# =============================================================================
def test_ellipse_create_from_point_major_minor():
ellipse = Ellipse.from_point_major_minor([1.0, 2.0, 3.0], 1.0, 0.5)
assert close(ellipse.major, 1.0, tol=1e-12)
assert close(ellipse.minor, 0.5, tol=1e-12)
assert close(ellipse.area, 1.5707963267948966, tol=1e-12)
assert close(ellipse.semifocal, 0.8660254037844386, tol=1e-12)
assert close(ellipse.eccentricity, 0.8660254037844386, tol=1e-12)
assert close(ellipse.focal, 1.7320508075688772, tol=1e-12)
assert ellipse.is_closed
assert ellipse.is_periodic
assert allclose(ellipse.frame.point, [1, 2, 3], tol=1e-12)
assert allclose(ellipse.frame.xaxis, Frame.worldXY().xaxis, tol=1e-12)
assert allclose(ellipse.frame.yaxis, Frame.worldXY().yaxis, tol=1e-12)
assert allclose(ellipse.frame.zaxis, Frame.worldXY().zaxis, tol=1e-12)
def test_ellipse_create_from_plane_major_minor():
plane = Plane([1.0, 2.0, 3.0], [0.0, 0.0, 1.0])
frame = Frame.from_plane(plane)
ellipse = Ellipse.from_plane_major_minor(plane, 1.0, 0.5)
assert close(ellipse.major, 1.0, tol=1e-12)
assert close(ellipse.minor, 0.5, tol=1e-12)
assert close(ellipse.area, 1.5707963267948966, tol=1e-12)
assert close(ellipse.semifocal, 0.8660254037844386, tol=1e-12)
assert close(ellipse.eccentricity, 0.8660254037844386, tol=1e-12)
assert close(ellipse.focal, 1.7320508075688772, tol=1e-12)
assert ellipse.is_closed
assert ellipse.is_periodic
assert allclose(ellipse.frame.point, frame.point, tol=1e-12)
assert allclose(ellipse.frame.xaxis, frame.xaxis, tol=1e-12)
assert allclose(ellipse.frame.yaxis, frame.yaxis, tol=1e-12)
assert allclose(ellipse.frame.zaxis, frame.zaxis, tol=1e-12)
# =============================================================================
# Properties and Geometry
# =============================================================================
def METHOD_NAME():
ellipse = Ellipse(major=1.0, minor=0.5)
assert close(ellipse.major, 1.0, tol=1e-12)
ellipse._major = None
with pytest.raises(ValueError):
ellipse.major
with pytest.raises(ValueError):
ellipse.major = -1.0
def test_ellipse_minor():
ellipse = Ellipse(major=1.0, minor=0.5)
assert close(ellipse.minor, 0.5, tol=1e-12)
ellipse._minor = None
with pytest.raises(ValueError):
ellipse.minor
with pytest.raises(ValueError):
ellipse.minor = -1.0
# =============================================================================
# Accessors
# =============================================================================
# =============================================================================
# Comparison
# =============================================================================
# =============================================================================
# Other Methods
# ============================================================================= |
6,138 | traverse file tree | """Diagnostic tool for a localstack instance running in a container."""
import inspect
import os
import socket
from typing import Dict, List, Union
from localstack import config
from localstack.constants import DEFAULT_VOLUME_DIR
from localstack.services.lambda_.invocation.docker_runtime_executor import IMAGE_PREFIX
from localstack.services.lambda_.invocation.lambda_models import IMAGE_MAPPING
from localstack.utils import bootstrap
from localstack.utils.analytics import usage
from localstack.utils.container_networking import get_main_container_name
from localstack.utils.container_utils.container_client import NoSuchImage
from localstack.utils.docker_utils import DOCKER_CLIENT
from localstack.utils.files import load_file
LAMBDA_IMAGES = (f"{IMAGE_PREFIX}{postfix}" for postfix in IMAGE_MAPPING.values())
DIAGNOSE_IMAGES = [
"localstack/bigdata",
"mongo",
*LAMBDA_IMAGES,
]
EXCLUDE_CONFIG_KEYS = {
"CONFIG_ENV_VARS",
"copyright",
"__builtins__",
"__cached__",
"__doc__",
"__file__",
"__loader__",
"__name__",
"__package__",
"__spec__",
}
ENDPOINT_RESOLVE_LIST = ["localhost.localstack.cloud", "api.localstack.cloud"]
INSPECT_DIRECTORIES = [DEFAULT_VOLUME_DIR, "/tmp"]
def get_localstack_logs() -> Union[str, Dict]:
try:
result = DOCKER_CLIENT.get_container_logs(get_main_container_name())
except Exception as e:
result = "error getting docker logs for container: %s" % e
return {"docker": result}
def get_localstack_config() -> Dict:
result = {}
for k, v in inspect.getmembers(config):
if k in EXCLUDE_CONFIG_KEYS:
continue
if inspect.isbuiltin(v):
continue
if inspect.isfunction(v):
continue
if inspect.ismodule(v):
continue
if inspect.isclass(v):
continue
if "typing." in str(type(v)):
continue
if hasattr(v, "__dict__"):
result[k] = v.__dict__
else:
result[k] = v
return result
def inspect_main_container() -> Union[str, Dict]:
try:
return DOCKER_CLIENT.inspect_container(get_main_container_name())
except Exception as e:
return f"inspect failed: {e}"
def get_localstack_version() -> Dict[str, str]:
return {
"build-date": os.environ.get("LOCALSTACK_BUILD_DATE"),
"build-git-hash": os.environ.get("LOCALSTACK_BUILD_GIT_HASH"),
"build-version": os.environ.get("LOCALSTACK_BUILD_VERSION"),
}
def resolve_endpoints() -> Dict[str, str]:
result = {}
for endpoint in ENDPOINT_RESOLVE_LIST:
try:
resolved_endpoint = socket.gethostbyname(endpoint)
except Exception as e:
resolved_endpoint = f"unable_to_resolve {e}"
result[endpoint] = resolved_endpoint
return result
def get_important_image_hashes() -> Dict[str, str]:
result = {}
for image in DIAGNOSE_IMAGES:
try:
image_version = DOCKER_CLIENT.inspect_image(image, pull=False)["RepoDigests"]
except NoSuchImage:
image_version = "not_present"
except Exception as e:
image_version = f"error: {e}"
result[image] = image_version
return result
def get_service_stats() -> Dict[str, str]:
from localstack.services.plugins import SERVICE_PLUGINS
return {service: state.value for service, state in SERVICE_PLUGINS.get_states().items()}
def get_file_tree() -> Dict[str, List[str]]:
return {d: METHOD_NAME(d) for d in INSPECT_DIRECTORIES}
def METHOD_NAME(root: str) -> List[str]:
try:
result = []
if config.in_docker():
for root, _, _ in os.walk(root):
result.append(root)
return result
except Exception as e:
return ["traversing files failed %s" % e]
def get_docker_image_details() -> Dict[str, str]:
return bootstrap.get_docker_image_details()
def get_host_kernel_version() -> str:
return load_file("/proc/version", "failed").strip()
def get_usage():
return usage.aggregate() |
6,139 | test hook error | # SPDX-FileCopyrightText: Red Hat, Inc.
# SPDX-License-Identifier: GPL-2.0-or-later
from __future__ import absolute_import
from __future__ import division
import importlib
import pytest
import six
from vdsm.common.exception import GeneralException, VdsmException
from vdsm.rpc.Bridge import DynamicBridge
from monkeypatch import MonkeyPatch
from testlib import VdsmTestCase as TestCaseBase
_COPIED_API_OBJECTS = (
'Global.ctorArgs',
'ISCSIConnection.ctorArgs',
'Image.ctorArgs',
'LVMVolumeGroup.ctorArgs',
'StorageDomain.Classes',
'StorageDomain.Types',
'StorageDomain.ctorArgs',
'StoragePool.ctorArgs',
'Task.ctorArgs',
'VM.ctorArgs',
'Volume.Formats',
'Volume.Roles',
'Volume.Types',
'Volume.ctorArgs',
)
class Host():
ctorArgs = []
def fenceNode(self, addr, port, agent, username, password, action,
secure=False, options='', policy=None):
if options == 'port=15':
return {'status': {'code': 0, 'message': 'Done'},
'power': 'on'}
else:
return {'status': {'code': -1, 'message': 'Failed'}}
def getCapabilities(self):
return {'status': {'code': 0, 'message': 'Done'},
'info': {'My caps': 'My capabilites'}}
def ping(self):
raise GeneralException("Kaboom!!!")
def getDeviceList(self, storageType=None, guids=(), checkStatus=True,
refresh=True):
if storageType != 3:
return {'status': {'code': -1, 'message': 'Failed'}}
if not isinstance(guids, tuple):
return {'status': {'code': -1, 'message': 'Failed'}}
if checkStatus:
return {'status': {'code': -1, 'message': 'Failed'}}
return {'status': {'code': 0, 'message': 'Done'},
'devList': []}
class VM():
ctorArgs = ['vmID']
def __init__(self, UUID):
self._UUID = UUID
def migrationCreate(self, params, incomingLimit):
if self._UUID == params['vmID'] and incomingLimit == 42:
return {'status': {'code': 0, 'message': 'Done'},
'migrationPort': 0, 'params': {}}
else:
return {'status': {'code': -1, 'message': 'Fail'}}
class StorageDomain():
ctorArgs = []
def detach(
self,
storagedomainID,
spUUID,
masterSdUUID=None,
masterVersion=0,
force=False):
if (spUUID == '00000002-0002-0002-0002-0000000000f6' and
masterSdUUID is None and masterVersion == 0 and
force is not False):
return {'status': {'code': 0, 'message': 'Done'}}
else:
return {'status': {'code': -1, 'message': 'Fail'}}
def getFakeAPI():
spec = importlib.machinery.ModuleSpec("vdsm.API", None)
_newAPI = importlib.util.module_from_spec(spec)
_vdsm = __import__('vdsm', globals(), locals())
_API = _vdsm.API
setattr(_newAPI, 'Global', Host)
setattr(_newAPI, 'StorageDomain', StorageDomain)
setattr(_newAPI, 'VM', VM)
# Copy required API objects to our version of API
for name in _COPIED_API_OBJECTS:
parts = name.split('.')
dstObj = _newAPI
srcObj = _API
# Walk the object hierarchy copying each component of the
# _COPIED_API_OBJECTS attribute from the real API to our fake one
for obj in parts:
srcObj = getattr(srcObj, obj)
try:
dstObj = getattr(dstObj, obj)
except AttributeError:
setattr(dstObj, obj, srcObj)
return _newAPI
def _get_api_instance(self, className, argObj):
className = self._convert_class_name(className)
apiObj = getattr(getFakeAPI(), className)
ctorArgs = self._get_args(argObj, apiObj.ctorArgs, [], [])
return apiObj(*ctorArgs)
@pytest.mark.xfail(six.PY2, reason="unsupported on py2")
class BridgeTests(TestCaseBase):
@MonkeyPatch(DynamicBridge, '_get_api_instance', _get_api_instance)
def testMethodWithManyOptionalAttributes(self):
bridge = DynamicBridge()
params = {"addr": "rack05-pdu01-lab4.tlv.redhat.com", "port": "",
"agent": "apc_snmp", "username": "emesika",
"password": "pass", "action": "off", "options": "port=15"}
self.assertEqual(bridge.dispatch('Host.fenceNode')(**params),
{'power': 'on'})
@MonkeyPatch(DynamicBridge, '_get_api_instance', _get_api_instance)
def testMethodWithNoParams(self):
bridge = DynamicBridge()
bridge.register_server_address('127.0.0.1')
self.assertEqual(bridge.dispatch('Host.getCapabilities')()
['My caps'], 'My capabilites')
bridge.unregister_server_address()
@MonkeyPatch(DynamicBridge, '_get_api_instance', _get_api_instance)
def testDetach(self):
bridge = DynamicBridge()
params = {"storagepoolID": "00000002-0002-0002-0002-0000000000f6",
"force": "True",
"storagedomainID": "773adfc7-10d4-4e60-b700-3272ee1871f9"}
self.assertEqual(bridge.dispatch('StorageDomain.detach')(**params),
None)
@MonkeyPatch(DynamicBridge, '_get_api_instance', _get_api_instance)
def METHOD_NAME(self):
bridge = DynamicBridge()
with self.assertRaises(VdsmException) as e:
bridge.dispatch('Host.ping')()
self.assertEqual(e.exception.code, 100)
@MonkeyPatch(DynamicBridge, '_get_api_instance', _get_api_instance)
def testMethodWithIntParam(self):
bridge = DynamicBridge()
params = {"vmID": "773adfc7-10d4-4e60-b700-3272ee1871f9",
"params": {"vmID": "773adfc7-10d4-4e60-b700-3272ee1871f9"},
"incomingLimit": 42}
self.assertEqual(bridge.dispatch('VM.migrationCreate')(**params),
{'migrationPort': 0, 'params': {}})
@MonkeyPatch(DynamicBridge, '_get_api_instance', _get_api_instance)
def testDefaultValues(self):
bridge = DynamicBridge()
params = {'storageType': 3, 'checkStatus': False}
self.assertEqual(bridge.dispatch('Host.getDeviceList')(**params),
[]) |
6,140 | get list domains tr te | """
Base class for Task
"""
from abc import abstractmethod
import warnings
from domainlab.compos.pcr.p_chain_handler import AbstractChainNodeHandler
from domainlab.tasks.task_utils import parse_domain_id
from domainlab.utils.logger import Logger
class NodeTaskDG(AbstractChainNodeHandler):
"""
Domain Generalization Classification Task
"""
def __init__(self, succ=None):
super().__init__(succ)
self._loader_tr = None
self._loader_te = None
self._loader_val = None
self._list_domains = None
self._list_domain_tr = None # versatile
self._name = None
self._args = None
self.dict_dset_all = {} # persist
self.dict_dset_tr = {} # versatile variable: which domains to use as training
self.dict_dset_te = {} # versatile
self.dict_dset_val = {} # versatile
self.dict_domain_class_count = {}
self.dim_d_tr = None # public, only used for diva
self._im_size = None
self._dict_domains2imgroot = {}
self._dict_domain_folder_name2class = {} # {"domain1": {"class1":car, "class2":dog}}
self._dict_domain_img_trans = {}
self.dict_att = {}
self.img_trans_te = None
self.dict_domain2imgroot = {}
self._dict_domain2filepath_list_im_tr = {} # {"photo": "xxx/yyy/file_of_path2imgs"}
self._dict_domain2filepath_list_im_val = {}
self._dict_domain2filepath_list_im_te = {}
self.dict_class_label_ind2name = None
self.conf_without_args() # configuration without init_business
def conf_without_args(self):
"""
configuration without init_business
"""
@abstractmethod
def init_business(self, args, node_algo_builder=None):
"""
construct task
"""
def get_list_domains(self):
"""
1. get list of domain names
2. better use method than property so new domains can be added
"""
return self._list_domains
def set_list_domains(self, list_domains):
"""
setter for self._list_domains
"""
self._list_domains = list_domains
@property
def isize(self):
"""
getter for input size: isize
"""
return self._im_size
@isize.setter
def isize(self, im_size):
"""
setter for input size: isize
"""
self._im_size = im_size
@property
def list_domain_tr(self):
"""
property getter of list of domains for this task
"""
if self._list_domain_tr is None:
raise RuntimeError("task not intialized!")
return self._list_domain_tr
@property
def loader_tr(self):
"""loader of mixed train domains"""
return self._loader_tr
@property
def loader_val(self):
"""loader of validation dataset on the training domains"""
return self._loader_val
@property
def loader_te(self):
"""loader of mixed test domains"""
return self._loader_te
@property
def task_name(self):
"""
The basic name of the task, without configurations
"""
# @FIXME: hardcoded position
return type(self).__name__[8:].lower()
def get_na(self, na_tr, na_te):
"""
task name appended with configurations
:param na_tr: training domain names
:param na_te: test domain names
"""
_, list_te = self.METHOD_NAME(na_tr, na_te)
str_te = "_".join(list_te)
# train domain names are too long
return "_".join([self.task_name, "te", str_te])
def is_myjob(self, request):
"""
:param request: string
"""
return request == self.task_name
def METHOD_NAME(self, tr_id, te_id):
"""
For static DG task, get train and test domains list.
:param tr_id: training domain ids;
int or str, or a list of int or str, or None;
if None, then assumed to be the complement of te_id.
:param te_id: test domain ids;
int or str, or a list of int or str; required.
:return: list of training domain names, list of test domain names.
"""
list_domains = self.get_list_domains()
list_domain_te = parse_domain_id(te_id, list_domains)
assert set(list_domain_te).issubset(set(list_domains))
if tr_id is None:
list_domain_tr = [did for did in list_domains if
did not in list_domain_te]
else:
list_domain_tr = parse_domain_id(tr_id, list_domains)
if not set(list_domain_tr).issubset(set(list_domains)):
raise RuntimeError(
f"training domain {list_domain_tr} is not \
subset of available domains {list_domains}")
if set(list_domain_tr) & set(list_domain_te):
logger = Logger.get_logger()
logger.warn(
"The sets of training and test domains overlap -- "
"be aware of data leakage or training to the test!"
)
warnings.warn(
"The sets of training and test domains overlap -- "
"be aware of data leakage or training to the test!",
RuntimeWarning
)
self.dim_d_tr = len(list_domain_tr)
self._list_domain_tr = list_domain_tr
return list_domain_tr, list_domain_te |
6,141 | l2 loss | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple, Union
import numpy as np
import torch
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from .utils import weighted_loss
@weighted_loss
def METHOD_NAME(pred: Tensor, target: Tensor) -> Tensor:
"""L2 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
assert pred.size() == target.size()
loss = torch.abs(pred - target)**2
return loss
@MODELS.register_module()
class L2Loss(BaseModule):
"""L2 loss.
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self,
neg_pos_ub: int = -1,
pos_margin: float = -1,
neg_margin: float = -1,
hard_mining: bool = False,
reduction: str = 'mean',
loss_weight: float = 1.0):
super(L2Loss, self).__init__()
self.neg_pos_ub = neg_pos_ub
self.pos_margin = pos_margin
self.neg_margin = neg_margin
self.hard_mining = hard_mining
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[float] = None,
reduction_override: Optional[str] = None) -> Tensor:
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (float, optional): Average factor that is used to
average the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
pred, weight, avg_factor = self.update_weight(pred, target, weight,
avg_factor)
loss_bbox = self.loss_weight * METHOD_NAME(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
def update_weight(self, pred: Tensor, target: Tensor, weight: Tensor,
avg_factor: float) -> Tuple[Tensor, Tensor, float]:
"""Update the weight according to targets."""
if weight is None:
weight = target.new_ones(target.size())
invalid_inds = weight <= 0
target[invalid_inds] = -1
pos_inds = target == 1
neg_inds = target == 0
if self.pos_margin > 0:
pred[pos_inds] -= self.pos_margin
if self.neg_margin > 0:
pred[neg_inds] -= self.neg_margin
pred = torch.clamp(pred, min=0, max=1)
num_pos = int((target == 1).sum())
num_neg = int((target == 0).sum())
if self.neg_pos_ub > 0 and num_neg / (num_pos +
1e-6) > self.neg_pos_ub:
num_neg = num_pos * self.neg_pos_ub
neg_idx = torch.nonzero(target == 0, as_tuple=False)
if self.hard_mining:
costs = METHOD_NAME(
pred, target, reduction='none')[neg_idx[:, 0],
neg_idx[:, 1]].detach()
neg_idx = neg_idx[costs.topk(num_neg)[1], :]
else:
neg_idx = self.random_choice(neg_idx, num_neg)
new_neg_inds = neg_inds.new_zeros(neg_inds.size()).bool()
new_neg_inds[neg_idx[:, 0], neg_idx[:, 1]] = True
invalid_neg_inds = torch.logical_xor(neg_inds, new_neg_inds)
weight[invalid_neg_inds] = 0
avg_factor = (weight > 0).sum()
return pred, weight, avg_factor
@staticmethod
def random_choice(gallery: Union[list, np.ndarray, Tensor],
num: int) -> np.ndarray:
"""Random select some elements from the gallery.
It seems that Pytorch's implementation is slower than numpy so we use
numpy to randperm the indices.
"""
assert len(gallery) >= num
if isinstance(gallery, list):
gallery = np.array(gallery)
cands = np.arange(len(gallery))
np.random.shuffle(cands)
rand_inds = cands[:num]
if not isinstance(gallery, np.ndarray):
rand_inds = torch.from_numpy(rand_inds).long().to(gallery.device)
return gallery[rand_inds] |
6,142 | fun write file | # Tasks for testing
import time
import sys, shutil
import typing as ty
from pathlib import Path
import functools
import operator
import subprocess as sp
import pytest
from fileformats.generic import File
from ..core import Workflow
from ..submitter import Submitter
from ... import mark
need_docker = pytest.mark.skipif(
shutil.which("docker") is None or sp.call(["docker", "info"]),
reason="no docker within the container",
)
no_win = pytest.mark.skipif(
sys.platform.startswith("win"),
reason="docker command not adjusted for windows docker",
)
need_slurm = pytest.mark.skipif(
not (bool(shutil.which("sbatch")) and bool(shutil.which("sacct"))),
reason="slurm not available",
)
need_sge = pytest.mark.skipif(
not (bool(shutil.which("qsub")) and bool(shutil.which("qacct"))),
reason="sge not available",
)
def result_no_submitter(shell_task, plugin=None):
"""helper function to return result when running without submitter"""
return shell_task()
def result_submitter(shell_task, plugin):
"""helper function to return result when running with submitter
with specific plugin
"""
with Submitter(plugin=plugin) as sub:
shell_task(submitter=sub)
return shell_task.result()
dot_check = sp.run(["which", "dot"], stdout=sp.PIPE, stderr=sp.PIPE)
if dot_check.stdout:
DOT_FLAG = True
else:
DOT_FLAG = False
@mark.task
def op_4var(a, b, c, d) -> str:
return f"{a} {b} {c} {d}"
@mark.task
def fun_addtwo(a: int) -> int:
import time
time.sleep(1)
if a == 3:
time.sleep(2)
return a + 2
@mark.task
def fun_addtwo_notype(a):
import time
time.sleep(1)
if a == 3:
time.sleep(2)
return a + 2
@mark.task
def fun_addtwo_with_threadcount(a: int, sgeThreads: int = 1) -> int:
import time
time.sleep(1)
if a == 3:
time.sleep(2)
return a + 2
@mark.task
def fun_addvar(
a: ty.Union[int, float], b: ty.Union[int, float]
) -> ty.Union[int, float]:
return a + b
@mark.task
def fun_addvar_notype(a, b):
return a + b
@mark.task
@mark.annotate({"return": {"sum": float, "sub": float}})
def fun_addsubvar(a: float, b: float):
return a + b, a - b
@mark.task
def fun_addvar_none(a: int, b: ty.Optional[int]) -> int:
if b is None:
return a
else:
return a + b
@mark.task
def fun_addvar_default(a: int, b: int = 1) -> int:
return a + b
@mark.task
def fun_addvar_default_notype(a, b=1):
return a + b
@mark.task
def fun_addvar3(a: int, b: int, c: int) -> int:
return a + b + c
@mark.task
def fun_addvar4(a: int, b: int, c: int, d: int) -> int:
return a + b + c + d
@mark.task
def moment(lst: ty.List[float], n: float) -> float:
return sum([i**n for i in lst]) / len(lst)
@mark.task
def fun_div(a: ty.Union[int, float], b: ty.Union[int, float]) -> float:
return a / b
@mark.task
def multiply(x: int, y: int) -> int:
return x * y
@mark.task
def multiply_list(x: list, y: int) -> list:
return x * y
@mark.task
def multiply_mixed(x: list, y: int) -> list:
return x * y
@mark.task
def add2(x: int) -> int:
if x == 1 or x == 12:
time.sleep(1)
return x + 2
@mark.task
def raise_xeq1(x: int) -> int:
if x == 1:
raise Exception("x is 1, so i'm raising an exception!")
return x
@mark.task
@mark.annotate({"return": {"out_add": float, "out_sub": float}})
def add2_sub2_res(res):
"""function that takes entire output as an input"""
return res["out"] + 2, res["out"] - 2
@mark.task
@mark.annotate({"return": {"out_add": ty.List[float], "out_sub": ty.List[float]}})
def add2_sub2_res_list(res):
"""function that takes entire output as an input"""
return [r["out"] + 2 for r in res], [r["out"] - 2 for r in res]
@mark.task
def power(a: int, b: int) -> int:
return a**b
@mark.task
def identity(x):
return x
@mark.task
def identity_2flds(
x1, x2
) -> ty.NamedTuple("Output", [("out1", ty.Any), ("out2", ty.Any)]):
return x1, x2
@mark.task
def ten(x) -> int:
return 10
@mark.task
def add2_wait(x: int) -> int:
time.sleep(2)
return x + 2
@mark.task
def list_output(x: int) -> ty.List[int]:
return [x, 2 * x, 3 * x]
@mark.task
def list_sum(x: ty.Sequence[ty.Union[int, float]]) -> ty.Union[int, float]:
return sum(x)
@mark.task
def fun_dict(d: dict) -> str:
kv_list = [f"{k}:{v}" for (k, v) in d.items()]
return "_".join(kv_list)
@mark.task
def METHOD_NAME(filename: Path, text="hello") -> File:
with open(filename, "w") as f:
f.write(text)
return File(filename)
@mark.task
def fun_write_file_list(
filename_list: ty.List[ty.Union[str, File, Path]], text="hi"
) -> ty.List[File]:
for ii, filename in enumerate(filename_list):
with open(filename, "w") as f:
f.write(f"from file {ii}: {text}")
filename_list = [Path(filename).absolute() for filename in filename_list]
return filename_list
@mark.task
def fun_write_file_list2dict(
filename_list: ty.List[ty.Union[str, File, Path]], text="hi"
) -> ty.Dict[str, ty.Union[File, int]]:
filename_dict = {}
for ii, filename in enumerate(filename_list):
with open(filename, "w") as f:
f.write(f"from file {ii}: {text}")
filename_dict[f"file_{ii}"] = Path(filename).absolute()
# adding an additional field with int
filename_dict["random_int"] = 20
return filename_dict
@mark.task
def fun_file(filename: File):
with open(filename) as f:
txt = f.read()
return txt
@mark.task
def fun_file_list(filename_list: ty.List[File]):
txt_list = []
for filename in filename_list:
with open(filename) as f:
txt_list.append(f.read())
return " ".join(txt_list)
def gen_basic_wf(name="basic-wf"):
"""
Generates `Workflow` of two tasks
Task Input
----------
x : int (5)
Task Output
-----------
out : int (9)
"""
wf = Workflow(name=name, input_spec=["x"])
wf.inputs.x = 5
wf.add(fun_addtwo(name="task1", a=wf.lzin.x, b=0))
wf.add(fun_addvar(name="task2", a=wf.task1.lzout.out, b=2))
wf.set_output([("out", wf.task2.lzout.out)])
return wf
def gen_basic_wf_with_threadcount(name="basic-wf-with-threadcount"):
"""
Generates `Workflow` of two tasks
Task Input
----------
x : int (5)
Task Output
-----------
out : int (9)
"""
wf = Workflow(name=name, input_spec=["x"])
wf.inputs.x = 5
wf.add(fun_addtwo_with_threadcount(name="task1", a=wf.lzin.x, sgeThreads=4))
wf.add(fun_addvar(name="task2", a=wf.task1.lzout.out, b=2))
wf.set_output([("out", wf.task2.lzout.out)])
return wf
def gen_basic_wf_with_threadcount_concurrent(name="basic-wf-with-threadcount"):
"""
Generates `Workflow` of two tasks
Task Input
----------
x : int (5)
Task Output
-----------
out : int (9)
"""
wf = Workflow(name=name, input_spec=["x"])
wf.inputs.x = 5
wf.add(fun_addtwo_with_threadcount(name="task1_1", a=wf.lzin.x, sgeThreads=4))
wf.add(fun_addtwo_with_threadcount(name="task1_2", a=wf.lzin.x, sgeThreads=2))
wf.add(fun_addvar(name="task2", a=wf.task1_1.lzout.out, b=2))
wf.set_output([("out1", wf.task2.lzout.out), ("out2", wf.task1_2.lzout.out)])
return wf
@mark.task
@mark.annotate({"return": {"sum": int, "products": ty.List[int]}})
def list_mult_sum(scalar: int, in_list: ty.List[int]) -> ty.Tuple[int, ty.List[int]]:
products = [scalar * x for x in in_list]
return functools.reduce(operator.add, products, 0), products
@mark.task
@mark.annotate({"return": {"x": str, "y": int, "z": float}})
def foo(a: str, b: int, c: float) -> ty.Tuple[str, int, float]:
return a, b, c |
6,143 | start | #!/usr/bin/env python
__author__ = "Ole Weidner"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
""" This examples shows how to run a job on a remote SLURM cluster
using the 'SLURM' job adaptor.
More information about the radical.saga job API can be found at:
http://radical-cybertools.github.com/radical.saga/doc/library/job/index.html
"""
import sys
import radical.saga as rs
js_url = "slurm://localhost/"
# ------------------------------------------------------------------------------
#
def METHOD_NAME():
try:
# Create a job service object that represent a remote pbs cluster.
# The keyword 'pbs' in the url scheme triggers the SGE adaptors
# and '+ssh' enables SGE remote access via SSH.
js = rs.job.Service(js_url)
# Next, we describe the job we want to run. A complete set of job
# description attributes can be found in the API documentation.
jd = rs.job.Description()
jd.environment = {'FILENAME': 'testfile'}
jd.wall_time_limit = 1 # minutes
jd.executable = '/bin/touch'
jd.arguments = ['$FILENAME']
jd.name = "examplejob"
# jd.queue = "normal"
# jd.project = "TG-MCB090174"
jd.working_directory = ".saga/test"
jd.output = "examplejob.out"
jd.error = "examplejob.err"
# Create a new job from the job description. The initial state of
# the job is 'New'.
job = js.create_job(jd)
# Check our job's id and state
print("Job State : %s" % (job.state))
# Now we can start our job.
print("starting job")
job.run()
print("Job ID : %s" % (job.id))
print("Job State : %s" % job.state)
print("Exitcode : %s" % job.exit_code)
print("Exec. hosts : %s" % job.execution_hosts)
print("Create time : %s" % job.created)
print("Start time : %s" % job.started)
print("End time : %s" % job.finished)
js.close()
except rs.SagaException as e:
# Catch all saga exceptions
print("An exception occured: (%s) %s " % (e.type, (str(e))))
# Get the whole traceback in case of an exception -
# this can be helpful for debugging the problem
print(" \n*** Backtrace:\n %s" % e.traceback)
return -1
# ------------------------------------------------------------------------------
#
def check(jobid):
try:
# Create a job service object to the same cluster
js = rs.job.Service(js_url)
# List all jobs that are known by the adaptor.
# This should show our job as well.
print("Listing active jobs: ")
for jid in js.list():
if jid == jobid:
print(' * %s' % jid)
else:
print(' - %s' % jid)
# reconnect to the given job
job = js.get_job(jobid)
print("Job State : %s" % job.state)
print("Exitcode : %s" % job.exit_code)
print("Exec. hosts : %s" % job.execution_hosts)
print("Create time : %s" % job.created)
print("Start time : %s" % job.started)
print("End time : %s" % job.finished)
js.close()
except rs.SagaException as e:
# Catch all saga exceptions
print("An exception occured: (%s) %s " % (e.type, (str(e))))
# Get the whole traceback in case of an exception -
# this can be helpful for debugging the problem
print(" \n*** Backtrace:\n %s" % e.traceback)
return -1
# ------------------------------------------------------------------------------
#
def stop(jobid):
try:
# Create a job service object to the same cluster and reconnect to job
js = rs.job.Service(js_url)
job = js.get_job(jobid)
print("Job ID : %s" % (job.id))
print("Job State : %s" % (job.state))
print("cacnel job")
job.cancel()
# wait for our job to complete
print("wait for job")
job.wait()
print("Job State : %s" % job.state)
print("Exitcode : %s" % job.exit_code)
print("Exec. hosts : %s" % job.execution_hosts)
print("Create time : %s" % job.created)
print("Start time : %s" % job.started)
print("End time : %s" % job.finished)
js.close()
return 0
except rs.SagaException as e:
# Catch all saga exceptions
print("An exception occured: (%s) %s " % (e.type, (str(e))))
# Get the whole traceback in case of an exception -
# this can be helpful for debugging the problem
print(" \n*** Backtrace:\n %s" % e.traceback)
return -1
# ------------------------------------------------------------------------------
#
if __name__ == "__main__":
if len(sys.argv) < 2:
print("\n\tusage: %s [start | check | stop] <jobid>\n" % sys.argv[0])
sys.exit(-1)
if sys.argv[1] == 'start': sys.exit(METHOD_NAME())
elif sys.argv[1] == 'check': sys.exit(check(sys.argv[2]))
elif sys.argv[1] == 'stop' : sys.exit(stop(sys.argv[2]))
|
6,144 | get action | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetActionResult',
'AwaitableGetActionResult',
'get_action',
'get_action_output',
]
@pulumi.output_type
class GetActionResult:
"""
Action for alert rule.
"""
def __init__(__self__, etag=None, id=None, logic_app_resource_id=None, name=None, system_data=None, type=None, workflow_id=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if logic_app_resource_id and not isinstance(logic_app_resource_id, str):
raise TypeError("Expected argument 'logic_app_resource_id' to be a str")
pulumi.set(__self__, "logic_app_resource_id", logic_app_resource_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if workflow_id and not isinstance(workflow_id, str):
raise TypeError("Expected argument 'workflow_id' to be a str")
pulumi.set(__self__, "workflow_id", workflow_id)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="logicAppResourceId")
def logic_app_resource_id(self) -> str:
"""
Logic App Resource Id, /subscriptions/{my-subscription}/resourceGroups/{my-resource-group}/providers/Microsoft.Logic/workflows/{my-workflow-id}.
"""
return pulumi.get(self, "logic_app_resource_id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="workflowId")
def workflow_id(self) -> Optional[str]:
"""
The name of the logic app's workflow.
"""
return pulumi.get(self, "workflow_id")
class AwaitableGetActionResult(GetActionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetActionResult(
etag=self.etag,
id=self.id,
logic_app_resource_id=self.logic_app_resource_id,
name=self.name,
system_data=self.system_data,
type=self.type,
workflow_id=self.workflow_id)
def METHOD_NAME(action_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
rule_id: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetActionResult:
"""
Gets the action of alert rule.
:param str action_id: Action ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str rule_id: Alert rule ID
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['actionId'] = action_id
__args__['resourceGroupName'] = resource_group_name
__args__['ruleId'] = rule_id
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20230601preview:getAction', __args__, opts=opts, typ=GetActionResult).value
return AwaitableGetActionResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
logic_app_resource_id=pulumi.get(__ret__, 'logic_app_resource_id'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'),
workflow_id=pulumi.get(__ret__, 'workflow_id'))
@_utilities.lift_output_func(METHOD_NAME)
def get_action_output(action_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rule_id: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetActionResult]:
"""
Gets the action of alert rule.
:param str action_id: Action ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str rule_id: Alert rule ID
:param str workspace_name: The name of the workspace.
"""
... |
6,145 | test gradient accumulator distribution strategy | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class OptimizationFTest(unittest.TestCase):
def assertListAlmostEqual(self, list1, list2, tol):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol)
def testGradientAccumulator(self):
accumulator = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0])])
accumulator([tf.constant([-2.0, 1.0])])
accumulator([tf.constant([-1.0, 2.0])])
with self.assertRaises(ValueError):
accumulator([tf.constant([1.0, 1.0]), tf.constant([2.0, 2.0])])
self.assertEqual(accumulator.step, 3)
self.assertEqual(len(accumulator.gradients), 1)
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1e-2)
accumulator.reset()
self.assertEqual(accumulator.step, 0)
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1e-2)
def METHOD_NAME(self):
context._context = None
ops.enable_eager_execution_internal()
physical_devices = tf.config.list_physical_devices("CPU")
if len(physical_devices) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0], [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()]
)
devices = tf.config.list_logical_devices(device_type="CPU")
strategy = tf.distribute.MirroredStrategy(devices=devices[:2])
with strategy.scope():
accumulator = GradientAccumulator()
variable = tf.Variable([4.0, 3.0])
optimizer, _ = create_optimizer(5e-5, 10, 5)
gradient_placeholder = tf.Variable([0.0, 0.0], trainable=False)
def accumulate_on_replica(gradient):
accumulator([gradient])
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients, [variable])))
@tf.function
def accumulate(grad1, grad2):
with strategy.scope():
local_variables = strategy.experimental_local_results(gradient_placeholder)
local_variables[0].assign(grad1)
local_variables[1].assign(grad2)
strategy.run(accumulate_on_replica, args=(gradient_placeholder,))
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(apply_on_replica)
def _check_local_values(grad1, grad2):
values = strategy.experimental_local_results(accumulator._gradients[0])
self.assertListAlmostEqual(values[0].value(), grad1, tol=1e-2)
self.assertListAlmostEqual(values[1].value(), grad2, tol=1e-2)
accumulate([1.0, 2.0], [-1.0, 1.0])
accumulate([3.0, -1.0], [-1.0, -1.0])
accumulate([-2.0, 2.0], [3.0, -2.0])
self.assertEqual(accumulator.step, 3)
_check_local_values([2.0, 3.0], [1.0, -2.0])
apply_grad()
self.assertListAlmostEqual(variable.value(), [4.0, 3.0], tol=1e-2)
accumulator.reset()
self.assertEqual(accumulator.step, 0)
_check_local_values([0.0, 0.0], [0.0, 0.0]) |
6,146 | to dict | import d20
from cogs5e.models.errors import InvalidSaveType
from utils import enums
from utils.functions import maybe_mod, reconcile_adv, verbose_stat
from . import Effect
from ..errors import AutomationException, NoSpellDC, TargetException
from ..results import SaveResult
from ..utils import stringify_intexpr
class Save(Effect):
def __init__(self, stat: str, fail: list, success: list, dc: str = None, adv: enums.AdvantageType = None, **kwargs):
super().__init__("save", **kwargs)
self.stat = stat
self.fail = fail
self.success = success
self.dc = dc
self.adv = adv
@classmethod
def from_data(cls, data):
data["fail"] = Effect.deserialize(data["fail"])
data["success"] = Effect.deserialize(data["success"])
if data.get("adv") is not None:
data["adv"] = enums.AdvantageType(data["adv"])
return super().from_data(data)
def METHOD_NAME(self):
out = super().METHOD_NAME()
fail = Effect.serialize(self.fail)
success = Effect.serialize(self.success)
out.update({"stat": self.stat, "fail": fail, "success": success})
if self.dc is not None:
out["dc"] = self.dc
if self.adv is not None:
out["adv"] = self.adv.value
return out
def run(self, autoctx):
super().run(autoctx)
if autoctx.target is None:
raise TargetException(
"Tried to make a save without a target! Make sure all Save effects are inside of a Target effect."
)
# ==== args ====
save = autoctx.args.last("save") or self.stat
sb = autoctx.args.get("sb", ephem=True)
auto_pass = autoctx.args.last("pass", type_=bool, ephem=True)
auto_fail = autoctx.args.last("fail", type_=bool, ephem=True)
hide = autoctx.args.last("h", type_=bool)
# ==== dc ====
dc_override = None
if self.dc:
try:
dc_override = autoctx.parse_intexpression(self.dc)
except Exception:
raise AutomationException(f"{self.dc!r} cannot be interpreted as a DC.")
# dc hierarchy: arg > self.dc > spell cast override > spellbook dc
dc = autoctx.caster.spellbook.dc
if dc_override:
dc = dc_override
elif autoctx.dc_override is not None:
dc = autoctx.dc_override
if autoctx.args.last("dc") is not None:
dc = maybe_mod(autoctx.args.last("dc"), dc)
if dc is None:
raise NoSpellDC("No spell save DC found. Use the `-dc` argument to specify one!")
# dc effects
bonus_effect_dc = autoctx.caster_active_effects(
mapper=lambda effect: effect.effects.dc_bonus, reducer=sum, default=0
)
dc += bonus_effect_dc
try:
save_skill = next(
s
for s in (
"strengthSave",
"dexteritySave",
"constitutionSave",
"intelligenceSave",
"wisdomSave",
"charismaSave",
)
if save.lower() in s.lower()
)
stat = save_skill[:3]
except StopIteration:
raise InvalidSaveType()
# ==== ieffects ====
# Combine args/ieffect advantages - adv/dis (#1552)
sadv_effects = autoctx.target_active_effects(
mapper=lambda effect: effect.effects.save_adv, reducer=lambda saves: set().union(*saves), default=set()
)
sdis_effects = autoctx.target_active_effects(
mapper=lambda effect: effect.effects.save_dis, reducer=lambda saves: set().union(*saves), default=set()
)
sadv = stat in sadv_effects
sdis = stat in sdis_effects
# ==== adv ====
adv = reconcile_adv(
adv=autoctx.args.last("sadv", type_=bool, ephem=True) or sadv or self.adv == enums.AdvantageType.ADV,
dis=autoctx.args.last("sdis", type_=bool, ephem=True) or sdis or self.adv == enums.AdvantageType.DIS,
)
# ==== execution ====
save_roll = None
autoctx.metavars["lastSaveRollTotal"] = 0
autoctx.metavars["lastSaveNaturalRoll"] = 0 # 1495
autoctx.metavars["lastSaveDC"] = dc
autoctx.metavars["lastSaveAbility"] = verbose_stat(stat)
autoctx.meta_queue(f"**DC**: {dc}")
if not autoctx.target.is_simple:
save_blurb = f"{stat.upper()} Save"
if auto_pass:
is_success = True
autoctx.queue(f"**{save_blurb}:** Automatic success!")
elif auto_fail:
is_success = False
autoctx.queue(f"**{save_blurb}:** Automatic failure!")
else:
save_dice = autoctx.target.get_save_dice(save_skill, adv=adv, sb=sb)
save_roll = d20.roll(save_dice)
is_success = save_roll.total >= dc
# get natural roll
d20_value = d20.utils.leftmost(save_roll.expr).total
autoctx.metavars["lastSaveRollTotal"] = save_roll.total # 1362
autoctx.metavars["lastSaveNaturalRoll"] = d20_value # 1495
success_str = "; Success!" if is_success else "; Failure!"
out = f"**{save_blurb}**: {save_roll.result}{success_str}"
if not hide:
autoctx.queue(out)
else:
autoctx.add_pm(str(autoctx.ctx.author.id), out)
autoctx.queue(f"**{save_blurb}**: 1d20...{success_str}")
else:
autoctx.meta_queue(f"{stat.upper()} Save")
is_success = False
# Disable critical damage state for children (#1556)
original = autoctx.in_save
autoctx.in_save = True
if is_success:
children = self.on_success(autoctx)
else:
children = self.on_fail(autoctx)
autoctx.in_save = original # Restore proper crit state (#1556)
return SaveResult(
dc=dc, ability=save_skill, save_roll=save_roll, adv=adv, did_save=is_success, children=children
)
def on_success(self, autoctx):
autoctx.metavars["lastSaveDidPass"] = True
return self.run_children(self.success, autoctx)
def on_fail(self, autoctx):
autoctx.metavars["lastSaveDidPass"] = False
return self.run_children(self.fail, autoctx)
def build_str(self, caster, evaluator):
super().build_str(caster, evaluator)
dc = caster.spellbook.dc
if self.dc:
dc = stringify_intexpr(evaluator, self.dc)
out = f"DC {dc} {self.stat[:3].upper()} Save"
if self.fail:
fail_out = self.build_child_str(self.fail, caster, evaluator)
if fail_out:
out += f". Fail: {fail_out}"
if self.success:
success_out = self.build_child_str(self.success, caster, evaluator)
if success_out:
out += f". Success: {success_out}"
return out
@property
def children(self):
return super().children + self.fail + self.success |
6,147 | execute operations | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network vnet list-endpoint-services",
)
class ListEndpointServices(AAZCommand):
"""List which services support VNet service tunneling in a given region.
To learn more about service endpoints visit https://docs.microsoft.com/azure/virtual-network/virtual-network-service-endpoints-configure#azure-cli.
:example: List the endpoint services available for use in the West US region.
az network vnet list-endpoint-services -l westus -o table
"""
_aaz_info = {
"version": "2022-01-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/providers/microsoft.network/locations/{}/virtualnetworkavailableendpointservices", "2022-01-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self.METHOD_NAME, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.location = AAZResourceLocationArg(
required=True,
)
return cls._args_schema
def METHOD_NAME(self):
self.pre_operations()
self.AvailableEndpointServicesList(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class AvailableEndpointServicesList(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/virtualNetworkAvailableEndpointServices",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"location", self.ctx.args.location,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-01-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType()
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
return cls._schema_on_200
class _ListEndpointServicesHelper:
"""Helper class for ListEndpointServices"""
__all__ = ["ListEndpointServices"] |
6,148 | get serializer | from django_filters import rest_framework as filters
from drf_spectacular.utils import extend_schema
from dry_rest_permissions.generics import DRYPermissions
from rest_framework import mixins
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.generics import get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from care.facility.api.serializers.daily_round import DailyRoundSerializer
from care.facility.api.viewsets.mixins.access import AssetUserAccessMixin
from care.facility.models.daily_round import DailyRound
from care.facility.models.patient_consultation import PatientConsultation
from care.utils.queryset.consultation import get_consultation_queryset
DailyRoundAttributes = [f.name for f in DailyRound._meta.get_fields()]
class DailyRoundFilterSet(filters.FilterSet):
rounds_type = filters.CharFilter(method="filter_rounds_type")
def filter_rounds_type(self, queryset, name, value):
rounds_type = set()
values = value.split(",")
for v in values:
try:
rounds_type.add(DailyRound.RoundsTypeDict[v])
except KeyError:
pass
return queryset.filter(rounds_type__in=list(rounds_type))
class DailyRoundsViewSet(
AssetUserAccessMixin,
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
GenericViewSet,
):
serializer_class = DailyRoundSerializer
permission_classes = (
IsAuthenticated,
DRYPermissions,
)
queryset = DailyRound.objects.all().order_by("-id")
lookup_field = "external_id"
filterset_class = DailyRoundFilterSet
filter_backends = (filters.DjangoFilterBackend,)
FIELDS_KEY = "fields"
MAX_FIELDS = 20
PAGE_SIZE = 36 # One Round Per Hour
def get_queryset(self):
return self.queryset.filter(
consultation__external_id=self.kwargs["consultation_external_id"]
)
def METHOD_NAME(self, *args, **kwargs):
if "data" in kwargs:
kwargs["data"]["consultation"] = PatientConsultation.objects.get(
external_id=self.kwargs["consultation_external_id"]
).id
return super().METHOD_NAME(*args, **kwargs)
@extend_schema(tags=["daily_rounds"])
@action(methods=["POST"], detail=False)
def analyse(self, request, **kwargs):
# Request Body Validations
if self.FIELDS_KEY not in request.data:
raise ValidationError({"fields": "Field not present"})
if not isinstance(request.data[self.FIELDS_KEY], list):
raise ValidationError({"fields": "Must be an List"})
if len(request.data[self.FIELDS_KEY]) >= self.MAX_FIELDS:
raise ValidationError({"fields": f"Must be smaller than {self.MAX_FIELDS}"})
# Request Data Validations
# Calculate Base Fields ( From . seperated ones )
base_fields = [str(x).split(".")[0] for x in request.data[self.FIELDS_KEY]]
errors = {}
for field in base_fields:
if field not in DailyRoundAttributes:
errors[field] = "Not a valid field"
base_fields.append("external_id")
if errors:
raise ValidationError(errors)
page = request.data.get("page", 1)
# to_time = datetime.now() - timedelta(days=((page - 1) * self.DEFAULT_LOOKUP_DAYS))
# from_time = to_time - timedelta(days=self.DEFAULT_LOOKUP_DAYS)
consultation = get_object_or_404(
get_consultation_queryset(request.user).filter(
external_id=self.kwargs["consultation_external_id"]
)
)
daily_round_objects = DailyRound.objects.filter(
consultation=consultation
).order_by("-taken_at")
total_count = daily_round_objects.count()
daily_round_objects = daily_round_objects[
((page - 1) * self.PAGE_SIZE) : ((page * self.PAGE_SIZE) + 1)
]
final_data_rows = daily_round_objects.values("taken_at", *base_fields)
final_analytics = {}
for row in final_data_rows:
if not row["taken_at"]:
continue
row_data = {}
for field in base_fields:
row_data[field] = row[field]
row_data["id"] = row["external_id"]
del row_data["external_id"]
final_analytics[str(row["taken_at"])] = row_data
final_data = {
"results": final_analytics,
"count": total_count,
"page_size": self.PAGE_SIZE,
}
return Response(final_data) |
6,149 | get migration state | # Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Iterator, Optional
from structlog import get_logger
from hathor.indexes import IndexesManager
from hathor.storage import RocksDBStorage
from hathor.transaction.storage.exceptions import TransactionDoesNotExist
from hathor.transaction.storage.migrations import MigrationState
from hathor.transaction.storage.transaction_storage import BaseTransactionStorage
from hathor.util import json_dumpb, json_loadb
if TYPE_CHECKING:
import rocksdb
from hathor.transaction import BaseTransaction, TransactionMetadata
logger = get_logger()
_DB_NAME = 'data_v2.db'
_CF_NAME_TX = b'tx'
_CF_NAME_META = b'meta'
_CF_NAME_ATTR = b'attr'
_CF_NAME_MIGRATIONS = b'migrations'
class TransactionRocksDBStorage(BaseTransactionStorage):
"""This storage saves tx and metadata to the same key on RocksDB
It uses Protobuf serialization internally.
"""
def __init__(self, rocksdb_storage: RocksDBStorage, indexes: Optional[IndexesManager] = None):
self._cf_tx = rocksdb_storage.get_or_create_column_family(_CF_NAME_TX)
self._cf_meta = rocksdb_storage.get_or_create_column_family(_CF_NAME_META)
self._cf_attr = rocksdb_storage.get_or_create_column_family(_CF_NAME_ATTR)
self._cf_migrations = rocksdb_storage.get_or_create_column_family(_CF_NAME_MIGRATIONS)
self._rocksdb_storage = rocksdb_storage
self._db = rocksdb_storage.get_db()
super().__init__(indexes=indexes)
def _load_from_bytes(self, tx_data: bytes, meta_data: bytes) -> 'BaseTransaction':
from hathor.transaction.base_transaction import tx_or_block_from_bytes
from hathor.transaction.transaction_metadata import TransactionMetadata
tx = tx_or_block_from_bytes(tx_data)
tx._metadata = TransactionMetadata.create_from_json(json_loadb(meta_data))
tx.storage = self
return tx
def _tx_to_bytes(self, tx: 'BaseTransaction') -> bytes:
return bytes(tx)
def _meta_to_bytes(self, meta: 'TransactionMetadata') -> bytes:
return json_dumpb(meta.to_json())
def METHOD_NAME(self, migration_name: str) -> MigrationState:
key = migration_name.encode('ascii')
value = self._db.get((self._cf_migrations, key))
if value is not None:
return MigrationState.from_db_bytes(value)
return MigrationState.NOT_STARTED
def set_migration_state(self, migration_name: str, state: MigrationState) -> None:
key = migration_name.encode('ascii')
value = state.to_db_bytes()
self._db.put((self._cf_migrations, key), value)
def remove_transaction(self, tx: 'BaseTransaction') -> None:
super().remove_transaction(tx)
self._db.delete((self._cf_tx, tx.hash))
self._db.delete((self._cf_meta, tx.hash))
self._remove_from_weakref(tx)
def save_transaction(self, tx: 'BaseTransaction', *, only_metadata: bool = False) -> None:
super().save_transaction(tx, only_metadata=only_metadata)
self._save_transaction(tx, only_metadata=only_metadata)
self._save_to_weakref(tx)
def _save_transaction(self, tx: 'BaseTransaction', *, only_metadata: bool = False) -> None:
key = tx.hash
if not only_metadata:
tx_data = self._tx_to_bytes(tx)
self._db.put((self._cf_tx, key), tx_data)
meta_data = self._meta_to_bytes(tx.get_metadata(use_storage=False))
self._db.put((self._cf_meta, key), meta_data)
def transaction_exists(self, hash_bytes: bytes) -> bool:
may_exist, _ = self._db.key_may_exist((self._cf_tx, hash_bytes))
if not may_exist:
return False
tx_exists = self._db.get((self._cf_tx, hash_bytes)) is not None
return tx_exists
def _get_transaction(self, hash_bytes: bytes) -> 'BaseTransaction':
tx = self.get_transaction_from_weakref(hash_bytes)
if tx is not None:
return tx
tx = self._get_transaction_from_db(hash_bytes)
if not tx:
raise TransactionDoesNotExist(hash_bytes.hex())
assert tx._metadata is not None
assert tx.hash == hash_bytes
self._save_to_weakref(tx)
return tx
def _get_transaction_from_db(self, hash_bytes: bytes) -> Optional['BaseTransaction']:
key = hash_bytes
tx_data = self._db.get((self._cf_tx, key))
meta_data = self._db.get((self._cf_meta, key))
if tx_data is None:
return None
assert meta_data is not None, 'expected metadata to exist when tx exists'
tx = self._load_from_bytes(tx_data, meta_data)
return tx
def _get_tx(self, hash_bytes: bytes, tx_data: bytes) -> 'BaseTransaction':
tx = self.get_transaction_from_weakref(hash_bytes)
if tx is None:
meta_data = self._db.get((self._cf_meta, hash_bytes))
tx = self._load_from_bytes(tx_data, meta_data)
assert tx.hash == hash_bytes
self._save_to_weakref(tx)
return tx
def _get_all_transactions(self) -> Iterator['BaseTransaction']:
tx: Optional['BaseTransaction']
items = self._db.iteritems(self._cf_tx)
items.seek_to_first()
for key, tx_data in items:
_, hash_bytes = key
lock = self._get_lock(hash_bytes)
if lock:
with lock:
tx = self._get_tx(hash_bytes, tx_data)
else:
tx = self._get_tx(hash_bytes, tx_data)
assert tx is not None
yield tx
def is_empty(self) -> bool:
# We consider 3 or less transactions as empty, because we want to ignore the genesis
# block and txs
keys = self._db.iterkeys(self._cf_tx)
keys.seek_to_first()
count = 0
for key in keys:
count += 1
if count > 3:
return False
return True
def get_sst_files_sizes_by_cf(
self,
cfs: Optional[list['rocksdb.ColumnFamilyHandle']] = None
) -> dict[bytes, float]:
"""Get the SST files sizes of each Column Family in bytes
:param cfs: The list of column families, defaults to None, in which case all of them are returned
:return: A dict containing the names of the cfs and their sizes
"""
column_families = self._db.column_families if cfs is None else cfs
sizes: dict[bytes, float] = {}
for cf in column_families:
sizes[cf.name] = float(self._db.get_property(b'rocksdb.total-sst-files-size', cf))
return sizes
def add_value(self, key: str, value: str) -> None:
self._db.put((self._cf_attr, key.encode('utf-8')), value.encode('utf-8'))
def remove_value(self, key: str) -> None:
self._db.delete((self._cf_attr, key.encode('utf-8')))
def get_value(self, key: str) -> Optional[str]:
data = self._db.get((self._cf_attr, key.encode('utf-8')))
if data is None:
return None
else:
return data.decode() |
6,150 | output | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"monitor private-link-scope private-endpoint-connection wait",
)
class Wait(AAZWaitCommand):
"""Place the CLI in a waiting state until a condition is met.
"""
_aaz_info = {
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.insights/privatelinkscopes/{}/privateendpointconnections/{}", "2019-10-17-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self.METHOD_NAME()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the private endpoint connection associated with the private link scope.",
required=True,
id_part="child_name_1",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.scope_name = AAZStrArg(
options=["--scope-name"],
help="Name of the Azure Monitor Private Link Scope.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.PrivateEndpointConnectionsGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def METHOD_NAME(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=False)
return result
class PrivateEndpointConnectionsGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/privateLinkScopes/{scopeName}/privateEndpointConnections/{privateEndpointConnectionName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"privateEndpointConnectionName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"scopeName", self.ctx.args.scope_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2019-10-17-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.private_endpoint = AAZObjectType(
serialized_name="privateEndpoint",
)
properties.private_link_service_connection_state = AAZObjectType(
serialized_name="privateLinkServiceConnectionState",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
private_endpoint = cls._schema_on_200.properties.private_endpoint
private_endpoint.id = AAZStrType()
private_link_service_connection_state = cls._schema_on_200.properties.private_link_service_connection_state
private_link_service_connection_state.actions_required = AAZStrType(
serialized_name="actionsRequired",
flags={"read_only": True},
)
private_link_service_connection_state.description = AAZStrType(
flags={"required": True},
)
private_link_service_connection_state.status = AAZStrType(
flags={"required": True},
)
return cls._schema_on_200
class _WaitHelper:
"""Helper class for Wait"""
__all__ = ["Wait"] |
6,151 | recv | """
:codeauthor: Thomas Jackson <jacksontj.89@gmail.com>
"""
import hashlib
import logging
import pytest
import tornado.ioloop
import salt.crypt
import salt.transport.tcp
import salt.transport.zeromq
import salt.utils.stringutils
from tests.support.mock import MagicMock, patch
log = logging.getLogger(__name__)
pytestmark = [
pytest.mark.core_test,
]
def transport_ids(value):
return f"Transport({value})"
@pytest.fixture(params=("zeromq", "tcp"), ids=transport_ids)
def transport(request):
return request.param
async def test_zeromq_async_pub_channel_publish_port(temp_salt_master):
"""
test when connecting that we use the publish_port set in opts when its not 4506
"""
opts = dict(
temp_salt_master.config.copy(),
ipc_mode="ipc",
pub_hwm=0,
recon_randomize=False,
publish_port=455505,
recon_default=1,
recon_max=2,
master_ip="127.0.0.1",
acceptance_wait_time=5,
acceptance_wait_time_max=5,
sign_pub_messages=False,
)
opts["master_uri"] = "tcp://{interface}:{publish_port}".format(**opts)
ioloop = tornado.ioloop.IOLoop()
# Transport will connect to port given to connect method.
transport = salt.transport.zeromq.PublishClient(
opts, ioloop, host=opts["master_ip"], port=121212
)
with transport:
patch_socket = MagicMock(return_value=True)
patch_auth = MagicMock(return_value=True)
with patch.object(transport, "_socket", patch_socket):
await transport.connect(opts["publish_port"])
assert str(opts["publish_port"]) in patch_socket.mock_calls[0][1][0]
def test_zeromq_async_pub_channel_filtering_decode_message_no_match(
temp_salt_master,
):
"""
test zeromq PublishClient _decode_messages when
zmq_filtering enabled and minion does not match
"""
message = [
b"4f26aeafdb2367620a393c973eddbe8f8b846eb",
b"\x82\xa3enc\xa3aes\xa4load\xda\x00`\xeeR\xcf"
b"\x0eaI#V\x17if\xcf\xae\x05\xa7\xb3bN\xf7\xb2\xe2"
b'\xd0sF\xd1\xd4\xecB\xe8\xaf"/*ml\x80Q3\xdb\xaexg'
b"\x8e\x8a\x8c\xd3l\x03\\,J\xa7\x01i\xd1:]\xe3\x8d"
b"\xf4\x03\x88K\x84\n`\xe8\x9a\xad\xad\xc6\x8ea\x15>"
b"\x92m\x9e\xc7aM\x11?\x18;\xbd\x04c\x07\x85\x99\xa3\xea[\x00D",
]
opts = dict(
temp_salt_master.config.copy(),
ipc_mode="ipc",
pub_hwm=0,
zmq_filtering=True,
recon_randomize=False,
recon_default=1,
recon_max=2,
master_ip="127.0.0.1",
acceptance_wait_time=5,
acceptance_wait_time_max=5,
sign_pub_messages=False,
)
opts["master_uri"] = "tcp://{interface}:{publish_port}".format(**opts)
ioloop = tornado.ioloop.IOLoop()
transport = salt.transport.zeromq.PublishClient(
opts, ioloop, host=opts["master_ip"], port=121212
)
with transport:
with patch(
"salt.crypt.AsyncAuth.crypticle",
MagicMock(return_value={"tgt_type": "glob", "tgt": "*", "jid": 1}),
):
res = transport._decode_messages(message)
assert res is None
def test_zeromq_async_pub_channel_filtering_decode_message(
temp_salt_master, temp_salt_minion
):
"""
test AsyncZeroMQPublishClient _decode_messages when zmq_filtered enabled
"""
minion_hexid = salt.utils.stringutils.to_bytes(
hashlib.sha1(salt.utils.stringutils.to_bytes(temp_salt_minion.id)).hexdigest()
)
message = [
minion_hexid,
b"\x82\xa3enc\xa3aes\xa4load\xda\x00`\xeeR\xcf"
b"\x0eaI#V\x17if\xcf\xae\x05\xa7\xb3bN\xf7\xb2\xe2"
b'\xd0sF\xd1\xd4\xecB\xe8\xaf"/*ml\x80Q3\xdb\xaexg'
b"\x8e\x8a\x8c\xd3l\x03\\,J\xa7\x01i\xd1:]\xe3\x8d"
b"\xf4\x03\x88K\x84\n`\xe8\x9a\xad\xad\xc6\x8ea\x15>"
b"\x92m\x9e\xc7aM\x11?\x18;\xbd\x04c\x07\x85\x99\xa3\xea[\x00D",
]
opts = dict(
temp_salt_master.config.copy(),
id=temp_salt_minion.id,
ipc_mode="ipc",
pub_hwm=0,
zmq_filtering=True,
recon_randomize=False,
recon_default=1,
recon_max=2,
master_ip="127.0.0.1",
acceptance_wait_time=5,
acceptance_wait_time_max=5,
sign_pub_messages=False,
)
opts["master_uri"] = "tcp://{interface}:{publish_port}".format(**opts)
ioloop = tornado.ioloop.IOLoop()
transport = salt.transport.zeromq.PublishClient(
opts, ioloop, host=opts["master_ip"], port=121212
)
with transport:
with patch(
"salt.crypt.AsyncAuth.crypticle",
MagicMock(return_value={"tgt_type": "glob", "tgt": "*", "jid": 1}),
) as mock_test:
res = transport._decode_messages(message)
assert res["enc"] == "aes"
async def test_publish_client_connect_server_down(transport, io_loop):
opts = {"master_ip": "127.0.0.1"}
host = "127.0.0.1"
port = 111222
if transport == "zeromq":
client = salt.transport.zeromq.PublishClient(
opts, io_loop, host=host, port=port
)
await client.connect()
assert client._socket
elif transport == "tcp":
client = salt.transport.tcp.TCPPubClient(opts, io_loop, host=host, port=port)
try:
# XXX: This is an implimentation detail of the tcp transport.
# await client.connect(port)
io_loop.spawn_callback(client.connect)
except TimeoutError:
pass
except Exception: # pylint: disable=broad-except
log.error("Got exception", exc_info=True)
assert client._stream is None
client.close()
async def test_publish_client_connect_server_comes_up(transport, io_loop):
opts = {"master_ip": "127.0.0.1"}
host = "127.0.0.1"
port = 11122
if transport == "zeromq":
import asyncio
import zmq
ctx = zmq.asyncio.Context()
uri = f"tcp://{opts['master_ip']}:{port}"
msg = salt.payload.dumps({"meh": 123})
log.debug("TEST - Senging %r", msg)
client = salt.transport.zeromq.PublishClient(
opts, io_loop, host=host, port=port
)
await client.connect()
assert client._socket
socket = ctx.socket(zmq.PUB)
socket.setsockopt(zmq.BACKLOG, 1000)
socket.setsockopt(zmq.LINGER, -1)
socket.setsockopt(zmq.SNDHWM, 1000)
socket.bind(uri)
await asyncio.sleep(20)
async def METHOD_NAME():
return await client.METHOD_NAME(timeout=1)
task = asyncio.create_task(METHOD_NAME())
# Sleep to allow zmq to do it's thing.
await socket.send(msg)
await task
response = task.result()
assert response
client.close()
socket.close()
await asyncio.sleep(0.03)
ctx.term()
elif transport == "tcp":
import asyncio
import socket
client = salt.transport.tcp.TCPPubClient(opts, io_loop, host=host, port=port)
# XXX: This is an implimentation detail of the tcp transport.
# await client.connect(port)
io_loop.spawn_callback(client.connect)
assert client._stream is None
await asyncio.sleep(2)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind((opts["master_ip"], port))
sock.listen(128)
await asyncio.sleep(0.03)
msg = salt.payload.dumps({"meh": 123})
msg = salt.transport.frame.frame_msg(msg, header=None)
conn, addr = sock.accept()
conn.send(msg)
response = await client.METHOD_NAME()
assert response
else:
raise Exception(f"Unknown transport {transport}") |
6,152 | test present absent fuzzy | """
Tests for the MySQL states
"""
import pytest
import salt.utils.path
from tests.support.case import ModuleCase
from tests.support.mixins import SaltReturnAssertsMixin
NO_MYSQL = False
try:
import MySQLdb # pylint: disable=import-error,unused-import
except ImportError:
NO_MYSQL = True
if not salt.utils.path.which("mysqladmin"):
NO_MYSQL = True
@pytest.mark.skipif(
NO_MYSQL,
reason="Please install MySQL bindings and a MySQL Server before running "
"MySQL integration tests.",
)
class MysqlDatabaseStateTest(ModuleCase, SaltReturnAssertsMixin):
"""
Validate the mysql_database state
"""
user = "root"
password = "poney"
@pytest.mark.destructive_test
def setUp(self):
"""
Test presence of MySQL server, enforce a root password
"""
super().setUp()
NO_MYSQL_SERVER = True
# now ensure we know the mysql root password
# one of theses two at least should work
ret1 = self.run_state(
"cmd.run",
name='mysqladmin --host="localhost" -u '
+ self.user
+ ' flush-privileges password "'
+ self.password
+ '"',
)
ret2 = self.run_state(
"cmd.run",
name='mysqladmin --host="localhost" -u '
+ self.user
+ ' --password="'
+ self.password
+ '" flush-privileges password "'
+ self.password
+ '"',
)
key, value = ret2.popitem()
if value["result"]:
NO_MYSQL_SERVER = False
else:
self.skipTest("No MySQL Server running, or no root access on it.")
def _test_database(self, db_name, second_db_name, test_conn, **kwargs):
"""
Create db two times, test conn, remove it two times
"""
# In case of...
ret = self.run_state("mysql_database.absent", name=db_name, **kwargs)
ret = self.run_state("mysql_database.present", name=db_name, **kwargs)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment("The database " + db_name + " has been created", ret)
# 2nd run
ret = self.run_state("mysql_database.present", name=second_db_name, **kwargs)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment("Database " + db_name + " is already present", ret)
if test_conn:
# test root connection
ret = self.run_function(
"mysql.query", database=db_name, query="SELECT 1", **kwargs
)
if not isinstance(ret, dict) or "results" not in ret:
raise AssertionError(
(
"Unexpected result while testing connection on db '{}': {}"
).format(db_name, repr(ret))
)
self.assertEqual([["1"]], ret["results"])
# Now removing databases
kwargs.pop("character_set")
kwargs.pop("collate")
ret = self.run_state("mysql_database.absent", name=db_name, **kwargs)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment("Database " + db_name + " has been removed", ret)
# 2nd run
ret = self.run_state("mysql_database.absent", name=second_db_name, **kwargs)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
"Database " + db_name + " is not present, so it cannot be removed", ret
)
self.assertSaltStateChangesEqual(ret, {})
@pytest.mark.destructive_test
def test_present_absent(self):
"""
mysql_database.present
"""
self._test_database(
"testdb1",
"testdb1",
test_conn=True,
character_set="utf8",
collate="utf8_general_ci",
connection_user=self.user,
connection_pass=self.password,
connection_charset="utf8",
)
# TODO: test with variations on collate and charset, check for db alter
# once it will be done in mysql_database.present state
@pytest.mark.destructive_test
def METHOD_NAME(self):
"""
mysql_database.present with utf-8 andf fuzzy db name
"""
# this is : ":() ;,?@=`&'\
dbname_fuzzy = "\":() ;,?@=`&/'\\"
# \xe6\xa8\x99\ = \u6a19 = 標
# this is : "();,?:@=`&/標'\
dbname_utf8 = "\"();,?@=`&//\xe6\xa8\x99'\\"
dbname_unicode = "\"();,?@=`&//\u6a19'\\"
self._test_database(
dbname_fuzzy,
dbname_fuzzy,
test_conn=True,
character_set="utf8",
collate="utf8_general_ci",
connection_user=self.user,
connection_pass=self.password,
connection_charset="utf8",
)
# FIXME: MySQLdb bugs on dbnames with utf-8?
self._test_database(
dbname_utf8,
dbname_unicode,
test_conn=False,
character_set="utf8",
collate="utf8_general_ci",
connection_user=self.user,
connection_pass=self.password,
connection_charset="utf8",
# saltenv={"LC_ALL": "en_US.utf8"}
)
@pytest.mark.destructive_test
@pytest.mark.skip(reason="This tests needs issue #8947 to be fixed first")
def test_utf8_from_sls_file(self):
"""
Try to create/destroy an utf-8 database name from an sls file #8947
"""
expected_result = {
"mysql_database_|-A_|-foo \xe6\xba\x96`bar_|-present": {
"__run_num__": 0,
"comment": "The database foo \xe6\xba\x96`bar has been created",
"result": True,
},
"mysql_database_|-B_|-foo \xe6\xba\x96`bar_|-absent": {
"__run_num__": 1,
"comment": "Database foo \xe6\xba\x96`bar has been removed",
"result": True,
},
}
result = {}
ret = self.run_function("state.sls", mods="mysql_utf8")
if not isinstance(ret, dict):
raise AssertionError(
("Unexpected result while testing external mysql utf8 sls: {}").format(
repr(ret)
)
)
for item, descr in ret.items():
result[item] = {
"__run_num__": descr["__run_num__"],
"comment": descr["comment"],
"result": descr["result"],
}
self.assertEqual(expected_result, result) |
6,153 | test ordering display name success | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cm.models import (
Action,
ActionType,
Bundle,
Cluster,
ObjectType,
Prototype,
Upgrade,
)
from django.urls import reverse
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK
from adcm.tests.base import BaseTestCase
class TestClusterAPI(BaseTestCase):
def setUp(self) -> None:
super().setUp()
self.bundle = Bundle.objects.create(name="test_cluster_prototype")
self.cluster_prototype = Prototype.objects.create(
bundle=self.bundle,
version=2,
name="test_cluster_prototype",
)
self.cluster = Cluster.objects.create(prototype=self.cluster_prototype)
new_bundle = Bundle.objects.create(name="bundle")
self.service_prototype_1_1 = Prototype.objects.create(
bundle=new_bundle,
type=ObjectType.SERVICE,
shared=True,
name="service_prototype_1",
display_name="service_prototype_1",
version=2,
)
self.service_prototype_2_2 = Prototype.objects.create(
bundle=new_bundle,
type=ObjectType.SERVICE,
shared=True,
name="service_prototype_2",
display_name="service_prototype_2",
version=1,
)
def test_upgrade(self):
Upgrade.objects.create(
bundle=self.bundle,
min_version=1,
max_version=3,
action=Action.objects.create(
prototype=self.cluster_prototype,
type=ActionType.JOB,
state_available="any",
),
)
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-upgrade", kwargs={"cluster_id": self.cluster.pk}),
)
self.assertEqual(response.status_code, HTTP_200_OK)
def test_ordering_id_success(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-service-prototype", kwargs={"cluster_id": self.cluster.pk}),
data={"ordering": "id"},
)
response_json = response.json()
self.assertEqual(response.status_code, HTTP_200_OK)
self.assertEqual(len(response_json), 2)
self.assertListEqual(
[test_prototype["id"] for test_prototype in response_json],
sorted((self.service_prototype_1_1.pk, self.service_prototype_2_2.pk)),
)
def test_ordering_id_reverse_success(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-service-prototype", kwargs={"cluster_id": self.cluster.pk}),
data={"ordering": "-id"},
)
response_json = response.json()
self.assertEqual(response.status_code, HTTP_200_OK)
self.assertEqual(len(response_json), 2)
self.assertListEqual(
[test_prototype["id"] for test_prototype in response_json],
sorted((self.service_prototype_1_1.pk, self.service_prototype_2_2.pk), reverse=True),
)
def test_ordering_name_success(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-service-prototype", kwargs={"cluster_id": self.cluster.pk}),
data={"ordering": "name"},
)
response_json = response.json()
self.assertEqual(response.status_code, HTTP_200_OK)
self.assertEqual(len(response_json), 2)
self.assertListEqual(
[test_prototype["name"] for test_prototype in response_json],
sorted((self.service_prototype_1_1.name, self.service_prototype_2_2.name)),
)
def test_ordering_name_reverse_success(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-service-prototype", kwargs={"cluster_id": self.cluster.pk}),
data={"ordering": "-name"},
)
response_json = response.json()
self.assertEqual(response.status_code, HTTP_200_OK)
self.assertEqual(len(response_json), 2)
self.assertListEqual(
[test_prototype["name"] for test_prototype in response_json],
sorted((self.service_prototype_1_1.name, self.service_prototype_2_2.name), reverse=True),
)
def METHOD_NAME(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-service-prototype", kwargs={"cluster_id": self.cluster.pk}),
data={"ordering": "display_name"},
)
response_json = response.json()
self.assertEqual(response.status_code, HTTP_200_OK)
self.assertEqual(len(response_json), 2)
self.assertListEqual(
[test_prototype["display_name"] for test_prototype in response_json],
sorted((self.service_prototype_1_1.display_name, self.service_prototype_2_2.display_name)),
)
def test_ordering_display_name_reverse_success(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-service-prototype", kwargs={"cluster_id": self.cluster.pk}),
data={"ordering": "-display_name"},
)
response_json = response.json()
self.assertEqual(response.status_code, HTTP_200_OK)
self.assertEqual(len(response_json), 2)
self.assertListEqual(
[test_prototype["display_name"] for test_prototype in response_json],
sorted((self.service_prototype_1_1.display_name, self.service_prototype_2_2.display_name), reverse=True),
)
def test_ordering_version_success(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-service-prototype", kwargs={"cluster_id": self.cluster.pk}),
data={"ordering": "version"},
)
response_json = response.json()
self.assertEqual(response.status_code, HTTP_200_OK)
self.assertEqual(len(response_json), 2)
self.assertListEqual(
[int(test_prototype["version"]) for test_prototype in response_json],
sorted((self.service_prototype_1_1.version, self.service_prototype_2_2.version)),
)
def test_ordering_version_reverse_success(self):
response: Response = self.client.get(
path=reverse(viewname="v1:cluster-service-prototype", kwargs={"cluster_id": self.cluster.pk}),
data={"ordering": "-version"},
)
response_json = response.json()
self.assertEqual(response.status_code, HTTP_200_OK)
self.assertEqual(len(response_json), 2)
self.assertListEqual(
[int(test_prototype["version"]) for test_prototype in response_json],
sorted((self.service_prototype_1_1.version, self.service_prototype_2_2.version), reverse=True),
) |
6,154 | get row size | #!/bin/env python
__author__ = 'dongyun.zdy'
import getopt
import sys
import math
def material_model_form(args):
(
Nrow,
Ncol,
) = args
Trow_col = 0.02674675
Trow_once = 0.07931677
total_cost = 0 #Tstartup
total_cost += Nrow * (Trow_once + Ncol * Trow_col)
return total_cost
def array_model_form(args):
# (
# Nelem,
# ) = args
Telem_ence = 0.00898860
Telem_copy = 0.00631888
Nelem = args
ELEM_PER_PAGE = 1024
extend_cnt = math.ceil(math.log(float(Nelem)/ELEM_PER_PAGE, 2))
if extend_cnt < 0:
extend_cnt = 0
copy_cnt = ELEM_PER_PAGE * (math.pow(2, extend_cnt) - 1)
total_cost = Telem_ence * Nelem
#total_cost += Tmem_alloc * extend_cnt
total_cost += Telem_copy * copy_cnt
return total_cost
def METHOD_NAME(reserve, col):
size = 16
size += reserve * 16
col /= 8
size += col * (3 + 8 + 4 + 8 + 16 + 32 + 64 + 128)
size += col
return size
def get_miss_prob(Nrow, Ncol, Nord, Turn):
total_size = Nrow * METHOD_NAME(Nord, Ncol)
TLBcovered = Turn
if TLBcovered >= 0.9 * total_size:
hit = 0.9
else:
hit = TLBcovered / total_size
return 1 - hit
def sort_model_form(args,
params
):
(
Nrow,
Ncol,
Nordering
) = args
(
# Tstartup,
#Trowstore_once,
#Trowstore_col,
# Tarray_once,
# Tarray_elem_copy,
# Tordercol,
# Treserve_cell,
Tcompare,
# Trow_once,
Tmiss_K1,
Turn
# Tmiss_K2,
# Turn
) = params
total_cost = 0 #Tstartup
# total_cost += Nrow * Trow_once
#cost for rowstore
# total_cost += material_model_form((Nrow, Ncol))
# total_cost += 0.0044 * Nrow * Ncol * Nordering
# total_cost += Tordercol * Nrow * Nordering
#cost for push array
# total_cost += array_model_form(Nrow)
# cost for sorting
Nordering_cmp = Nordering
if Nordering >= 1:
Nordering_cmp = 1
compare_cost = Tcompare * Nordering_cmp + Tmiss_K1 * get_miss_prob(Nrow, Ncol, Nordering, Turn)
total_cost += Nrow * compare_cost * math.log(Nrow, 2)
#cost for get row
# total_cost += Nrow * (Tmiss_K2 * get_miss_prob(Nrow, Ncol, Nordering))
return total_cost
#
# def sort_model_form(args,
# params):
# (
# Nrow,
# Nordering,
# Ncol,
# ) = args
#
# (
# Tstartup,
# Trowstore_once,
# Trowstore_col,
# # Tarray_once,
# # Tarray_elem_copy,
# Treserve_cell,
# Tcompare
# ) = params
#
#
# total_cost = Tstartup
#
# #cost for row store
# total_cost += Nrow * (Trowstore_once + Ncol * Trowstore_col)
# total_cost += Treserve_cell * Nrow * Ncol * Nordering
#
# #cost for array
# # ELEM_PER_PAGE = 1024
# # extend_cnt = math.ceil(math.log(float(Nrow)/ELEM_PER_PAGE, 2))
# # copy_cnt = ELEM_PER_PAGE * (math.pow(2, extend_cnt) - 1)
# #total_cost += Tarray_once * Nrow + Tarray_elem_copy * copy_cnt
#
# #cost for sorting
# if Nordering > 2:
# Nordering_cmp = 2
# else:
# Nordering_cmp = Nordering
# compare_cost = Tcompare * Nordering_cmp
# total_cost += Nrow * compare_cost * math.log(Nrow, 2)
#
# return total_cost
def extract_info_from_line(line):
splited = line.split(",")
line_info = []
for item in splited:
line_info.append(float(item))
return line_info
# sys.argv.extend('-i sort.prep.double -o sort.fit.double -m sort.model.double'.split())
file_name = "get_total.data.prep"
output_fit_res = False
wrong_arg = False
opts,args = getopt.getopt(sys.argv[1:],"i:o:m:")
for op, value in opts:
if "-i" == op:
file_name = value
elif "-o" == op:
output_fit_res = True
out_file_name = value
elif "-m" == op:
model_file_name = value
else:
wrong_arg = True
if wrong_arg:
print "wrong arg"
sys.exit(1)
input_file = open(file_name, "r")
model_file = open(model_file_name, "r")
out_file = open(out_file_name, "w")
line = model_file.readline()
model_params = [float(p) for p in line.split(",")]
# if len(model_params) == 1:
# model_params = model_params[0]
for line in input_file:
if line.startswith('#'):
out_file.write(line)
continue
case_param = extract_info_from_line(line)
args = (case_param[0],
case_param[1],
case_param[2])
time = case_param[4]
cost_val = sort_model_form(args, model_params)
percent = (cost_val - time) / time
new_line = ",".join([line.strip(),str(cost_val),str(percent * 100)])
new_line += "\n"
out_file.write(new_line)
out_file.close()
|
6,155 | test included messages | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import logging
import pytest
import win32evtlog
from . import common
pytestmark = [pytest.mark.integration]
@pytest.mark.parametrize('server', ['localhost', '127.0.0.1'])
def test_expected(aggregator, dd_run_check, new_check, instance, report_event, server):
instance['server'] = server
check = new_check(instance)
report_event('message')
dd_run_check(check)
aggregator.assert_event(
'message',
alert_type='info',
priority='normal',
host=check.hostname,
source_type_name=check.SOURCE_TYPE_NAME,
aggregation_key=common.EVENT_SOURCE,
msg_title='Application/{}'.format(common.EVENT_SOURCE),
tags=[],
)
def test_recover_from_broken_subscribe(aggregator, dd_run_check, new_check, instance, event_reporter, caplog):
"""
Test the check can recover from a broken EvtSubscribe handle
Issue originally surfaced when the event publisher is unregistered while we
have an EvtSubscribe handle to one of it's channels. This is difficult to test
here so we mimic it by replacing the subscription handle.
"""
# Speed up test
instance['timeout'] = 0.1
check = new_check(instance)
# Run check_initializations to create EvtSubscribe
dd_run_check(check)
# Create an event
event_reporter.report('message').join()
# Mutate the subscription handle so that the check's EvtNext() fails
check._subscription = None
# Run the check to initiate the reset
# Enable debug logging so we see expected error message
with caplog.at_level(logging.DEBUG):
dd_run_check(check)
# Run the check again to collect the event we missed
dd_run_check(check)
# Assert we saw the expected error and we still got an event
assert 'The handle is invalid' in caplog.text
aggregator.assert_event('message')
@pytest.mark.parametrize(
'event_type, level',
[
pytest.param(win32evtlog.EVENTLOG_INFORMATION_TYPE, 'info', id='INFORMATION_TYPE'),
pytest.param(win32evtlog.EVENTLOG_WARNING_TYPE, 'warning', id='WARNING_TYPE'),
pytest.param(win32evtlog.EVENTLOG_ERROR_TYPE, 'error', id='ERROR_TYPE'),
],
)
def test_levels(aggregator, dd_run_check, new_check, instance, report_event, event_type, level):
check = new_check(instance)
report_event('foo', event_type=event_type)
dd_run_check(check)
aggregator.assert_event('foo', alert_type=level)
def test_event_priority(aggregator, dd_run_check, new_check, instance, report_event):
instance['event_priority'] = 'low'
check = new_check(instance)
report_event('foo')
dd_run_check(check)
aggregator.assert_event('foo', priority='low')
def test_event_id(aggregator, dd_run_check, new_check, instance, report_event):
instance['tag_event_id'] = True
check = new_check(instance)
report_event('foo')
dd_run_check(check)
aggregator.assert_event('foo', tags=['event_id:{}'.format(common.EVENT_ID)])
def METHOD_NAME(aggregator, dd_run_check, new_check, instance, report_event):
instance['included_messages'] = ['bar']
check = new_check(instance)
report_event('foo')
report_event('bar')
report_event('baz')
dd_run_check(check)
assert len(aggregator.events) == 1
aggregator.assert_event('bar')
def test_excluded_messages(aggregator, dd_run_check, new_check, instance, report_event):
instance['excluded_messages'] = ['bar']
check = new_check(instance)
report_event('foo')
report_event('bar')
report_event('baz')
dd_run_check(check)
assert len(aggregator.events) == 2
aggregator.assert_event('foo')
aggregator.assert_event('baz')
def test_excluded_messages_override(aggregator, dd_run_check, new_check, instance, report_event):
instance['included_messages'] = ['bar']
instance['excluded_messages'] = ['bar']
check = new_check(instance)
report_event('foo')
report_event('bar')
report_event('baz')
dd_run_check(check)
assert len(aggregator.events) == 0
def test_custom_query(aggregator, dd_run_check, new_check, instance, report_event):
instance['query'] = "*[System[Provider[@Name='{}']] and System[(Level=1 or Level=2)]]".format(common.EVENT_SOURCE)
check = new_check(instance)
report_event('foo', level='error')
report_event('bar')
dd_run_check(check)
assert len(aggregator.events) == 1
aggregator.assert_event('foo')
def test_bookmark(aggregator, dd_run_check, new_check, instance, report_event):
instance['start'] = 'oldest'
check = new_check(instance)
report_event('foo')
report_event('bar')
dd_run_check(check)
assert len(aggregator.events) > 1
aggregator.reset()
check = new_check(instance)
dd_run_check(check)
assert len(aggregator.events) == 0
report_event('foo')
dd_run_check(check)
assert len(aggregator.events) == 1
aggregator.assert_event('foo')
def test_query_override(aggregator, dd_run_check, new_check, instance, report_event):
instance['query'] = "*[System[Provider[@Name='foo']]]"
check = new_check(instance)
report_event('message')
dd_run_check(check)
assert len(aggregator.events) == 0
def test_sid(aggregator, dd_run_check, new_check, instance):
instance['tag_sid'] = True
instance['start'] = 'oldest'
instance['path'] = 'System'
instance['query'] = "*[System[Provider[@Name='Microsoft-Windows-Kernel-Boot']]]"
del instance['filters']
check = new_check(instance)
dd_run_check(check)
assert any(
'sid:NT AUTHORITY\\SYSTEM' in event['tags'] for event in aggregator.events
), 'Unable to find any expected `sid` tags' # no cov |
6,156 | get test https policy | # Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import subprocess
from typing import List
from zope.interface import implementer
from OpenSSL import SSL
from OpenSSL.SSL import Connection
from twisted.internet.address import IPv4Address
from twisted.internet.interfaces import IOpenSSLServerConnectionCreator
from twisted.internet.ssl import Certificate, trustRootFromCertificates
from twisted.protocols.tls import TLSMemoryBIOProtocol
from twisted.web.client import BrowserLikePolicyForHTTPS # noqa: F401
from twisted.web.iweb import IPolicyForHTTPS # noqa: F401
def METHOD_NAME() -> BrowserLikePolicyForHTTPS:
"""Get a test IPolicyForHTTPS which trusts the test CA cert
Returns:
IPolicyForHTTPS
"""
ca_file = get_test_ca_cert_file()
with open(ca_file) as stream:
content = stream.read()
cert = Certificate.loadPEM(content)
trust_root = trustRootFromCertificates([cert])
return BrowserLikePolicyForHTTPS(trustRoot=trust_root)
def get_test_ca_cert_file() -> str:
"""Get the path to the test CA cert
The keypair is generated with:
openssl genrsa -out ca.key 2048
openssl req -new -x509 -key ca.key -days 3650 -out ca.crt \
-subj '/CN=synapse test CA'
"""
return os.path.join(os.path.dirname(__file__), "ca.crt")
def get_test_key_file() -> str:
"""get the path to the test key
The key file is made with:
openssl genrsa -out server.key 2048
"""
return os.path.join(os.path.dirname(__file__), "server.key")
cert_file_count = 0
CONFIG_TEMPLATE = b"""\
[default]
basicConstraints = CA:FALSE
keyUsage=nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = %(sanentries)s
"""
def create_test_cert_file(sanlist: List[bytes]) -> str:
"""build an x509 certificate file
Args:
sanlist: a list of subjectAltName values for the cert
Returns:
The path to the file
"""
global cert_file_count
csr_filename = "server.csr"
cnf_filename = "server.%i.cnf" % (cert_file_count,)
cert_filename = "server.%i.crt" % (cert_file_count,)
cert_file_count += 1
# first build a CSR
subprocess.check_call(
[
"openssl",
"req",
"-new",
"-key",
get_test_key_file(),
"-subj",
"/",
"-out",
csr_filename,
]
)
# now a config file describing the right SAN entries
sanentries = b",".join(sanlist)
with open(cnf_filename, "wb") as f:
f.write(CONFIG_TEMPLATE % {b"sanentries": sanentries})
# finally the cert
ca_key_filename = os.path.join(os.path.dirname(__file__), "ca.key")
ca_cert_filename = get_test_ca_cert_file()
subprocess.check_call(
[
"openssl",
"x509",
"-req",
"-in",
csr_filename,
"-CA",
ca_cert_filename,
"-CAkey",
ca_key_filename,
"-set_serial",
"1",
"-extfile",
cnf_filename,
"-out",
cert_filename,
]
)
return cert_filename
@implementer(IOpenSSLServerConnectionCreator)
class TestServerTLSConnectionFactory:
"""An SSL connection creator which returns connections which present a certificate
signed by our test CA."""
def __init__(self, sanlist: List[bytes]):
"""
Args:
sanlist: a list of subjectAltName values for the cert
"""
self._cert_file = create_test_cert_file(sanlist)
def serverConnectionForTLS(self, tlsProtocol: TLSMemoryBIOProtocol) -> Connection:
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_certificate_file(self._cert_file)
ctx.use_privatekey_file(get_test_key_file())
return Connection(ctx, None)
# A dummy address, useful for tests that use FakeTransport and don't care about where
# packets are going to/coming from.
dummy_address = IPv4Address("TCP", "127.0.0.1", 80) |
6,157 | get next | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.Cache/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.redis.RedisManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Lists all of the available REST API operations of the Microsoft.Cache provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.redis.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(METHOD_NAME, extract_data)
list.metadata = {"url": "/providers/Microsoft.Cache/operations"} |
6,158 | num overlap neighbors | from PYB11Generator import *
@PYB11holder("std::shared_ptr")
@PYB11template("Dimension")
class FlatConnectivity:
PYB11typedefs = """
typedef typename %(Dimension)s::Vector Vector;
typedef typename std::array<int, %(Dimension)s::nDim> ArrayDim;
"""
def pyinit(self):
"Indexing for bilinear form"
@PYB11const
def indexingInitialized(self):
return "bool"
@PYB11const
def overlapIndexingInitialized(self):
return "bool"
@PYB11const
def globalIndexingInitialized(self):
return "bool"
@PYB11const
def surfaceIndexingInitialized(self):
return "bool"
@PYB11const
def boundaryInformationInitialized(self):
return "bool"
@PYB11const
def firstGlobalIndex(self):
return "int"
@PYB11const
def lastGlobalIndex(self):
return "int"
@PYB11const
def numNodes(self):
return "int"
@PYB11const
def numInternalNodes(self):
return "int"
@PYB11const
def numGlobalNodes(self):
return "int"
@PYB11const
def numBoundaryNodes(self):
return "int"
@PYB11const
def nodeToLocal(self,
nodeListi = "const int",
nodei = "const int"):
return "int"
@PYB11const
def localToNode(self,
locali = "const int"):
return "std::pair<int, int>"
@PYB11const
def localToGlobal(self,
locali = "const int"):
return "int"
@PYB11const
def numNeighbors(self,
locali = "const int"):
return "int"
@PYB11const
def METHOD_NAME(self,
locali = "const int"):
return "int"
@PYB11const
def numConstNeighbors(self,
locali = "const int"):
return "int"
@PYB11const
def numConstOverlapNeighbors(self,
locali = "const int"):
return "int"
@PYB11const
def numNonConstNeighbors(self,
locali = "const int"):
return "int"
@PYB11const
def numNonConstOverlapNeighbors(self,
locali = "const int"):
return "int"
@PYB11const
def localToFlat(self,
locali = "const int",
localj = "const int"):
return "int"
@PYB11const
def localToFlatOverlap(self,
locali = "const int",
localj = "const int"):
return "int"
@PYB11const
def flatToLocal(self,
locali = "const int",
flatj = "const int"):
return "int"
@PYB11const
def flatOverlapToLocal(self,
locali = "const int",
flatj = "const int"):
return "int"
@PYB11const
def isConstantBoundaryNode(self,
locali = "const int"):
return "bool"
@PYB11const
def neighborIndices(self,
locali = "const int",
localNeighbors = "std::vector<int>&"):
return "void"
@PYB11const
def overlapNeighborIndices(self,
locali = "const int",
localNeighbors = "std::vector<int>&"):
return "void"
@PYB11const
def constNeighborIndices(self,
locali = "const int",
localNeighbors = "std::vector<int>&"):
return "void"
@PYB11const
def overlapConstNeighborIndices(self,
locali = "const int",
localNeighbors = "std::vector<int>&"):
return "void"
@PYB11const
def nonConstNeighborIndices(self,
locali = "const int",
localNeighbors = "std::vector<int>&"):
return "void"
@PYB11const
def overlapNonConstNeighborIndices(self,
locali = "const int",
localNeighbors = "std::vector<int>&"):
return "void"
@PYB11const
def globalNeighborIndices(self,
locali = "const int",
globalNeighborIndices = "std::vector<int>&"):
return "void"
@PYB11const
def globalOverlapNeighborIndices(self,
locali = "const int",
globalNeighborIndices = "std::vector<int>&"):
return "void"
@PYB11const
def numSurfaces(self,
locali = "const int"):
return "int"
@PYB11const
def surfaceIndex(self,
locali = "const int",
normal = "const Vector&"):
return "int"
@PYB11const
def normal(self,
locali = "const int",
flats = "const int"):
return "const Vector&"
@PYB11const
def numSurfacesForCell(self,
locali = "const int"):
return "int"
@PYB11const
def surfaceIndexForCell(self,
locali = "const int",
flats = "const int"):
return "int"
def computeIndices(self,
dataBase = "const DataBase<%(Dimension)s>&"):
return "void"
def computeOverlapIndices(self,
dataBase = "const DataBase<%(Dimension)s>&"):
return "void"
def computeGlobalIndices(self,
dataBase = "const DataBase<%(Dimension)s>&",
boundaries = "const std::vector<Boundary<%(Dimension)s>*>&"):
return "void"
def computeSurfaceIndices(self,
dataBase = "const DataBase<%(Dimension)s>&",
state = "const State<%(Dimension)s>&"):
return "void"
def computeBoundaryInformation(self,
dataBase = "const DataBase<%(Dimension)s>&",
boundaries = "const std::vector<Boundary<%(Dimension)s>*>&"):
return "void" |
6,159 | test setting by value | import pytest
import sqlalchemy as sa
from flexmock import flexmock
from sqlalchemy_utils import Choice, ChoiceType, ImproperlyConfigured
from sqlalchemy_utils.compat import _select_args
from sqlalchemy_utils.types.choice import Enum
class TestChoice:
def test_equality_operator(self):
assert Choice(1, 1) == 1
assert 1 == Choice(1, 1)
assert Choice(1, 1) == Choice(1, 1)
def test_non_equality_operator(self):
assert Choice(1, 1) != 2
assert not (Choice(1, 1) != 1)
def test_hash(self):
assert hash(Choice(1, 1)) == hash(1)
class TestChoiceType:
@pytest.fixture
def User(self, Base):
class User(Base):
TYPES = [
('admin', 'Admin'),
('regular-user', 'Regular user')
]
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
type = sa.Column(ChoiceType(TYPES))
def __repr__(self):
return 'User(%r)' % self.id
return User
@pytest.fixture
def init_models(self, User):
pass
def test_python_type(self, User):
type_ = User.__table__.c.type.type
assert type_.python_type
def test_string_processing(self, session, User):
flexmock(ChoiceType).should_receive('_coerce').and_return(
'admin'
)
user = User(
type='admin'
)
session.add(user)
session.commit()
user = session.query(User).first()
assert user.type.value == 'Admin'
def test_parameter_processing(self, session, User):
user = User(
type='admin'
)
session.add(user)
session.commit()
user = session.query(User).first()
assert user.type.value == 'Admin'
def test_scalar_attributes_get_coerced_to_objects(self, User):
user = User(type='admin')
assert isinstance(user.type, Choice)
def test_throws_exception_if_no_choices_given(self):
with pytest.raises(ImproperlyConfigured):
ChoiceType([])
def test_compilation(self, User, session):
query = sa.select(*_select_args(User.type))
# the type should be cacheable and not throw exception
session.execute(query)
class TestChoiceTypeWithCustomUnderlyingType:
def test_init_type(self):
type_ = ChoiceType([(1, 'something')], impl=sa.Integer)
assert type_.impl == sa.Integer
@pytest.mark.skipif('Enum is None')
class TestEnumType:
@pytest.fixture
def OrderStatus(self):
class OrderStatus(Enum):
unpaid = 0
paid = 1
return OrderStatus
@pytest.fixture
def Order(self, Base, OrderStatus):
class Order(Base):
__tablename__ = 'order'
id_ = sa.Column(sa.Integer, primary_key=True)
status = sa.Column(
ChoiceType(OrderStatus, impl=sa.Integer()),
default=OrderStatus.unpaid,
)
def __repr__(self):
return f'Order({self.id_!r}, {self.status!r})'
return Order
@pytest.fixture
def OrderNullable(self, Base, OrderStatus):
class OrderNullable(Base):
__tablename__ = 'order_nullable'
id_ = sa.Column(sa.Integer, primary_key=True)
status = sa.Column(
ChoiceType(OrderStatus, impl=sa.Integer()),
nullable=True,
)
return OrderNullable
@pytest.fixture
def init_models(self, Order, OrderNullable):
pass
def test_parameter_initialization(self, session, Order, OrderStatus):
order = Order()
session.add(order)
session.commit()
order = session.query(Order).first()
assert order.status is OrderStatus.unpaid
assert order.status.value == 0
def METHOD_NAME(self, session, Order, OrderStatus):
order = Order()
order.status = 1
session.add(order)
session.commit()
order = session.query(Order).first()
assert order.status is OrderStatus.paid
def test_setting_by_enum(self, session, Order, OrderStatus):
order = Order()
order.status = OrderStatus.paid
session.add(order)
session.commit()
order = session.query(Order).first()
assert order.status is OrderStatus.paid
def test_setting_value_that_resolves_to_none(
self,
session,
Order,
OrderStatus
):
order = Order()
order.status = 0
session.add(order)
session.commit()
order = session.query(Order).first()
assert order.status is OrderStatus.unpaid
def test_setting_to_wrong_enum_raises_valueerror(self, Order):
class WrongEnum(Enum):
foo = 0
bar = 1
order = Order()
with pytest.raises(ValueError):
order.status = WrongEnum.foo
def test_setting_to_uncoerceable_type_raises_valueerror(self, Order):
order = Order()
with pytest.raises(ValueError):
order.status = 'Bad value'
def test_order_nullable_stores_none(self, session, OrderNullable):
# With nullable=False as in `Order`, a `None` value is always
# converted to the default value, unless we explicitly set it to
# sqlalchemy.sql.null(), so we use this class to test our ability
# to set and retrive `None`.
order_nullable = OrderNullable()
assert order_nullable.status is None
order_nullable.status = None
session.add(order_nullable)
session.commit()
assert order_nullable.status is None |
6,160 | method | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network public-ip prefix delete",
)
class Delete(AAZCommand):
"""Delete a public IP prefix resource.
:example: Delete a public IP prefix resource. (autogenerated)
az network public-ip prefix delete --name MyPublicIPPrefix --resource-group MyResourceGroup
"""
_aaz_info = {
"version": "2018-11-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/publicipprefixes/{}", "2018-11-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="The name of the public IP prefix.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.PublicIPPrefixesDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class PublicIPPrefixesDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}",
**self.url_parameters
)
@property
def METHOD_NAME(self):
return "DELETE"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"publicIpPrefixName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2018-11-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
6,161 | libcurve close | #!/usr/bin/env python
# -*- coding: utf8 -*-
from curvefs_python import curvefs
from config import config
from logger.logger import *
cbdClient = curvefs.CBDClient()
class LibCurve:
def __init__(self):
rc = cbdClient.Init(config.client_conf)
logger.info("init success.")
if rc != 0:
print ("init client fail! rc=%s" % rc)
logger.debug("init client fail! rc=%s" % rc)
raise AssertionError
def libcurve_create(self, file_path, user_name, size, pass_word=""):
user_info_t = curvefs.UserInfo_t()
user_info_t.owner = user_name
user_info_t.password = pass_word
rc = cbdClient.Create(file_path, user_info_t, size)
if rc != 0:
# print("create file %s fail! rc=%s" %(file_path,rc))
logger.debug("create file %s fail! rc=%s" % (file_path,rc))
return rc
#raise AssertionError
else:
return rc
def libcurve_open(self, file_path, user_name, pass_word=""):
user_info_t = curvefs.UserInfo_t()
user_info_t.owner = user_name
user_info_t.password = pass_word
fd = cbdClient.Open(file_path, user_info_t)
logger.info("fd=%s" % fd)
return fd
def libcurve_write(self, fd, buf, offset, length):
rc = cbdClient.Write(fd, buf, offset, length)
if rc < 0:
logger.debug("write error, rc=%s" % rc)
return rc
raise AssertionError
else:
return rc
def libcurve_read(self, fd, buf, offset, length):
content = cbdClient.Read(fd, buf, offset, length)
#logger.debug(content)
return content
def libcurve_statfs(self, file_name, user_name, pass_word=""):
user_info_t = curvefs.UserInfo_t()
user_info_t.owner = user_name
user_info_t.password = pass_word
file_info = curvefs.FileInfo_t()
rc = cbdClient.StatFile(file_name, user_info_t, file_info)
if rc == 0:
return file_info
else:
logger.debug("statfs file %s fail! rc=%s" % (file_name,rc))
return rc
raise AssertionError
def libcurve_extend(self, file_path, user_name, new_size, pass_word=""):
user_info_t = curvefs.UserInfo_t()
user_info_t.owner = user_name
user_info_t.password = pass_word
rc = cbdClient.Extend(file_path, user_info_t, new_size)
if rc != 0:
logger.info("extend file fail. rc=%s" %rc)
return rc
#raise AssertionError
else:
return rc
def METHOD_NAME(self, fd):
rc = cbdClient.Close(fd)
if rc != 0:
logger.info("close file fail! rc=%s" % rc)
return rc
#raise AssertionError
else:
return rc
def libcurve_rename(self, user_name, old_path, new_path, pass_word=""):
user_info_t = curvefs.UserInfo_t()
user_info_t.owner = user_name
user_info_t.password = pass_word
rc = cbdClient.Rename(user_info_t, old_path, new_path)
if rc != 0:
logger.info("rename file fail! rc=%s" % rc)
return rc
raise AssertionError
else:
return rc
def libcurve_delete(self, filepath, user_name, pass_word=""):
user_info_t = curvefs.UserInfo_t()
user_info_t.owner = user_name
user_info_t.password = pass_word
rc = cbdClient.Unlink(filepath, user_info_t)
if rc != 0:
#print "delete file fail! rc=%s" % rc
logger.debug("delete file %s fail! rc=%s" % (filepath,rc))
# logger.info("delete file fail! rc=%s" % rc)
return rc
#raise AssertionError
else:
return rc
def libcurve_rmdir(self, dirpath, user_name, pass_word=""):
user_info_t = curvefs.UserInfo_t()
user_info_t.owner = user_name
user_info_t.password = pass_word
rc = cbdClient.Rmdir(dirpath, user_info_t)
if rc != 0:
#print "delete dir fail! rc=%s" % rc
logger.info("delete dir fail! rc=%s" % rc)
return rc
#raise AssertionError
else:
return rc
def libcurve_mkdir(self, dirpath, user_name, pass_word=""):
user_info_t = curvefs.UserInfo_t()
user_info_t.owner = user_name
user_info_t.password = pass_word
rc = cbdClient.Mkdir(dirpath, user_info_t)
if rc != 0:
#print "mkdir fail! rc=%s" % rc
logger.info("mkdir fail! rc=%s" % rc)
return rc
#raise AssertionError
else:
return rc
def libcurve_uninit():
rc = cbdClient.UnInit()
if rc != None:
print "uninit fail! rc=%s" % rc
logger.debug("uninit file fail! rc=%s" % rc)
return rc
raise AssertionError
else:
return 0 |
6,162 | start | #
# We use a background thread for sharing fds on Unix, and for sharing sockets on
# Windows.
#
# A client which wants to pickle a resource registers it with the resource
# sharer and gets an identifier in return. The unpickling process will connect
# to the resource sharer, sends the identifier and its pid, and then receives
# the resource.
#
import os
import signal
import socket
import sys
import threading
from . import process
from .context import reduction
from . import util
__all__ = ['stop']
if sys.platform == 'win32':
__all__ += ['DupSocket']
class DupSocket(object):
'''Picklable wrapper for a socket.'''
def __init__(self, sock):
new_sock = sock.dup()
def send(conn, pid):
share = new_sock.share(pid)
conn.send_bytes(share)
self._id = _resource_sharer.register(send, new_sock.close)
def detach(self):
'''Get the socket. This should only be called once.'''
with _resource_sharer.get_connection(self._id) as conn:
share = conn.recv_bytes()
return socket.fromshare(share)
else:
__all__ += ['DupFd']
class DupFd(object):
'''Wrapper for fd which can be used at any time.'''
def __init__(self, fd):
new_fd = os.dup(fd)
def send(conn, pid):
reduction.send_handle(conn, new_fd, pid)
def close():
os.close(new_fd)
self._id = _resource_sharer.register(send, close)
def detach(self):
'''Get the fd. This should only be called once.'''
with _resource_sharer.get_connection(self._id) as conn:
return reduction.recv_handle(conn)
class _ResourceSharer(object):
'''Manager for resouces using background thread.'''
def __init__(self):
self._key = 0
self._cache = {}
self._old_locks = []
self._lock = threading.Lock()
self._listener = None
self._address = None
self._thread = None
util.register_after_fork(self, _ResourceSharer._afterfork)
def register(self, send, close):
'''Register resource, returning an identifier.'''
with self._lock:
if self._address is None:
self.METHOD_NAME()
self._key += 1
self._cache[self._key] = (send, close)
return (self._address, self._key)
@staticmethod
def get_connection(ident):
'''Return connection from which to receive identified resource.'''
from .connection import Client
address, key = ident
c = Client(address, authkey=process.current_process().authkey)
c.send((key, os.getpid()))
return c
def stop(self, timeout=None):
'''Stop the background thread and clear registered resources.'''
from .connection import Client
with self._lock:
if self._address is not None:
c = Client(self._address,
authkey=process.current_process().authkey)
c.send(None)
c.close()
self._thread.join(timeout)
if self._thread.is_alive():
util.sub_warning('_ResourceSharer thread did '
'not stop when asked')
self._listener.close()
self._thread = None
self._address = None
self._listener = None
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
def _afterfork(self):
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
# If self._lock was locked at the time of the fork, it may be broken
# -- see issue 6721. Replace it without letting it be gc'ed.
self._old_locks.append(self._lock)
self._lock = threading.Lock()
if self._listener is not None:
self._listener.close()
self._listener = None
self._address = None
self._thread = None
def METHOD_NAME(self):
from .connection import Listener
assert self._listener is None
util.debug('starting listener and thread for sending handles')
self._listener = Listener(authkey=process.current_process().authkey)
self._address = self._listener.address
t = threading.Thread(target=self._serve)
t.daemon = True
t.start()
self._thread = t
def _serve(self):
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
while 1:
try:
with self._listener.accept() as conn:
msg = conn.recv()
if msg is None:
break
key, destination_pid = msg
send, close = self._cache.pop(key)
try:
send(conn, destination_pid)
finally:
close()
except:
if not util.is_exiting():
sys.excepthook(*sys.exc_info())
_resource_sharer = _ResourceSharer()
stop = _resource_sharer.stop |
6,163 | get test params | # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Kinematic transformers."""
__author__ = ["fkiraly"]
import numpy as np
import pandas as pd
from sktime.transformations.base import BaseTransformer
class KinematicFeatures(BaseTransformer):
r"""Kinematic feature transformer - velocity, acceleration, curvature.
Takes a discrete N-dimensional space curve, N>=1, and computes
a selection of kinematic features.
For noisy time series, is strongly recommended to pipeline this with
`KalmanFilterTransformerPK` or `KalmanFilterTransformerFP` (prior),
or other smoothing or trajectory fitting transformers,
as this transformer does not carry out its own smoothing.
For min/max/quantiles of velocity etc, pipeline with `SummaryTransformer` (post).
For a time series input :math:`x(t)`, observed at discrete times,
this transformer computes (when selected) discretized versions of:
* `"v"` - vector of velocity: :math:`\vec{v}(t) := \Delta x(t)`
* `"v_abs"` - absolute velocity: :math:`v(t) := \left| \Delta x(t) \right|`
* `"a"` - vector of velocity: :math:`\vec{a}(t) := \Delta \Delta x(t)`
* `"a_abs"` - absolute velocity: :math:`a(t) := \left| \Delta \Delta x(t) \right|`
* `"curv"` - curvature: :math:`c(t) := \frac{\sqrt{v(t)^2 a(t)^2 - \left\langle
\vec{v}(t), \vec{a}(t)\right\rangle^2}}{v(t)^3}`
where :math:`\Delta` denotes first finite differences, that is,
:math:`\Delta z(t) = z(t) - z(t-1)` for any discrete time series :math:`z(t)`.
Note: this estimator currently ignores non-equidistant location index,
and considers only the integer location index.
Parameters
----------
features : str or list of str, optional, default=["v_abs", "a_abs", "c_abs"]
list of features to compute, possible features:
* "v" - vector of velocity
* "v_abs" - absolute velocity
* "a" - vector of acceleration
* "a_abs" - absolute acceleration
* "curv" - curvature
Example
-------
>>> import numpy as np
>>> import pandas as pd
>>> from sktime.transformations.series.kinematic import KinematicFeatures
>>> traj3d = pd.DataFrame(columns=["x", "y", "z"])
>>> traj3d["x"] = pd.Series(np.sin(np.arange(200)/100))
>>> traj3d["y"] = pd.Series(np.cos(np.arange(200)/100))
>>> traj3d["z"] = pd.Series(np.arange(200)/100)
>>> t = KinematicFeatures()
>>> Xt = t.fit_transform(traj3d)
"""
_tags = {
"scitype:transform-input": "Series",
"scitype:transform-output": "Series",
"scitype:instancewise": True,
"scitype:transform-labels": "None",
"X_inner_mtype": "pd.DataFrame",
"y_inner_mtype": "None",
"univariate-only": False,
"requires_y": False,
"fit_is_empty": True,
"capability:inverse_transform": False,
"capability:unequal_length": True,
"handles-missing-data": False,
}
# todo: add any hyper-parameters and components to constructor
def __init__(self, features=None):
self.features = features
if features is None:
self._features = ["v_abs", "a_abs", "curv"]
elif isinstance(features, str):
self._features = [features]
else:
self._features = features
super().__init__()
def _transform(self, X, y=None):
"""Transform X and return a transformed version.
private _transform containing core logic, called from transform
Parameters
----------
X : pd.DataFrame
Data to be transformed
y : ignored, present for interface compliance
Returns
-------
transformed version of X
"""
features = self._features
res = pd.DataFrame()
def prepend_cols(df, prefix):
df.columns = [f"{prefix}__{col}" for col in df.columns]
return df
def absq_rows(df, col="absq"):
"""Compute DataFrame with one col, absolute value square of rows of df."""
abs_frame = df**2
abs_frame = abs_frame.agg(["sum"], axis=1)
abs_frame.columns = [col]
return abs_frame
def abs_rows(df, col="abs"):
"""Compute DataFrame with single column, absolute value of rows of df."""
return absq_rows(df, col=col) ** 0.5
def feature_query(queries):
"""Boolean, whether any of the features in queries is being asked for."""
if queries is str:
return queries in features
else:
return any([x in features for x in queries])
if feature_query(["v", "v_abs", "curv"]):
v_frame = X.diff()
v_frame = prepend_cols(v_frame, "v")
if feature_query(["v"]):
res = pd.concat([res, v_frame], axis=1)
if feature_query(["v_abs"]):
vabs_frame = abs_rows(v_frame, "v_abs")
vabs_frame.iloc[0] = np.nan
res = pd.concat([res, vabs_frame], axis=1)
if feature_query(["a", "a_abs", "curv"]):
a_frame = X.diff().diff()
a_frame = prepend_cols(a_frame, "a")
if feature_query(["a"]):
res = pd.concat([res, a_frame], axis=1)
if feature_query(["a_abs"]):
aabs_frame = abs_rows(a_frame, "a_abs")
aabs_frame.iloc[0] = np.nan
aabs_frame.iloc[1] = np.nan
res = pd.concat([res, aabs_frame], axis=1)
if feature_query(["curv"]):
vsq_frame = absq_rows(v_frame)
curv_frame = vsq_frame * absq_rows(a_frame)
curv_arr = curv_frame.values
cross_term = (v_frame.values * a_frame.values).sum(axis=1) ** 2
cross_term = cross_term.reshape(-1, 1)
curv_arr = (curv_arr - cross_term) / (vsq_frame.values**3)
curv_arr = np.abs(curv_arr) ** 0.5
curv_frame = pd.DataFrame(curv_arr, columns=["curv"], index=X.index)
res = pd.concat([res, curv_frame], axis=1)
return res
@classmethod
def METHOD_NAME(cls, parameter_set="default"):
"""Return testing parameter settings for the estimator.
Parameters
----------
parameter_set : str, default="default"
Name of the set of test parameters to return, for use in tests. If no
special parameters are defined for a value, will return `"default"` set.
There are currently no reserved values for transformers.
Returns
-------
params : dict or list of dict, default = {}
Parameters to create testing instances of the class
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`
"""
params1 = {}
params2 = {"features": ["v", "a"]}
return [params1, params2] |
6,164 | users for order benchmarks | import random
from decimal import Decimal
import pytest
from prices import Money, TaxedMoney
from .....account.models import User
from .....order import OrderEvents, OrderStatus
from .....order.models import Fulfillment, FulfillmentLine, Order, OrderEvent, OrderLine
from .....payment import ChargeStatus
from .....payment.models import Payment, Transaction
ORDER_COUNT_IN_BENCHMARKS = 10
EVENTS_PER_ORDER = 5
LINES_PER_ORDER = 3
TRANSACTIONS_PER_PAYMENT = 3
def _prepare_payment_transactions(payments):
transactions = []
for payment in payments:
transactions.extend(
[
Transaction(payment=payment, gateway_response=str(index))
for index in range(TRANSACTIONS_PER_PAYMENT)
]
)
return transactions
def _prepare_payments_for_order(order):
return [
Payment(
gateway="mirumee.payments.dummy",
order=order,
is_active=True,
charge_status=ChargeStatus.NOT_CHARGED,
),
Payment(
gateway="mirumee.payments.dummy",
order=order,
is_active=True,
charge_status=ChargeStatus.PARTIALLY_CHARGED,
captured_amount=Decimal("6.0"),
),
Payment(
gateway="mirumee.payments.dummy",
order=order,
is_active=True,
charge_status=ChargeStatus.FULLY_CHARGED,
captured_amount=Decimal("10.0"),
),
]
def _prepare_events_for_order(order):
return [
OrderEvent(order=order, type=random.choice(OrderEvents.CHOICES)[0])
for _ in range(EVENTS_PER_ORDER)
]
def _prepare_lines_for_order(order, variant_with_image):
price = TaxedMoney(
net=Money(amount=5, currency="USD"),
gross=Money(amount=5, currency="USD"),
)
return [
OrderLine(
order=order,
variant=variant_with_image,
quantity=5,
is_shipping_required=True,
is_gift_card=False,
unit_price=price,
total_price=price,
)
for _ in range(LINES_PER_ORDER)
]
@pytest.fixture
def METHOD_NAME(address):
users = [
User(
email=f"john.doe.{i}@example.com",
is_active=True,
default_billing_address=address.get_copy(),
default_shipping_address=address.get_copy(),
first_name=f"John_{i}",
last_name=f"Doe_{i}",
)
for i in range(ORDER_COUNT_IN_BENCHMARKS)
]
return User.objects.bulk_create(users)
@pytest.fixture
def orders_for_benchmarks(
channel_USD,
channel_PLN,
address,
payment_dummy,
METHOD_NAME,
variant_with_image,
shipping_method,
stocks_for_cc,
):
orders = [
Order(
channel=channel_USD if i % 2 else channel_PLN,
billing_address=address.get_copy(),
shipping_address=address.get_copy(),
shipping_method=shipping_method,
user=METHOD_NAME[i],
total=TaxedMoney(net=Money(i, "USD"), gross=Money(i, "USD")),
)
for i in range(ORDER_COUNT_IN_BENCHMARKS)
]
created_orders = Order.objects.bulk_create(orders)
payments = []
events = []
fulfillments = []
fulfillment_lines = []
lines = []
transactions = []
for index, order in enumerate(created_orders):
new_payments = _prepare_payments_for_order(order)
new_transactions = _prepare_payment_transactions(new_payments)
new_events = _prepare_events_for_order(order)
new_lines = _prepare_lines_for_order(order, variant_with_image)
payments.extend(new_payments)
transactions.extend(new_transactions)
events.extend(new_events)
lines.extend(new_lines)
fulfillment = Fulfillment(order=order, fulfillment_order=order.number)
fulfillments.append(fulfillment)
fulfillment_lines.append(
FulfillmentLine(
order_line=new_lines[0],
fulfillment=fulfillment,
stock=stocks_for_cc[index % 3] if index % 2 else None,
quantity=index,
)
)
Payment.objects.bulk_create(payments)
Transaction.objects.bulk_create(transactions)
OrderEvent.objects.bulk_create(events)
Fulfillment.objects.bulk_create(fulfillments)
OrderLine.objects.bulk_create(lines)
FulfillmentLine.objects.bulk_create(fulfillment_lines)
return created_orders
@pytest.fixture
def draft_orders_for_benchmarks(orders_for_benchmarks):
for order in orders_for_benchmarks:
order.status = OrderStatus.DRAFT
Order.objects.bulk_update(orders_for_benchmarks, ["status"])
return orders_for_benchmarks |
6,165 | ha relation joined | #!/usr/bin/env python
import sys
import os
import lib.utils as utils
import lib.ceph_utils as ceph
import lib.cluster_utils as cluster
from charmhelpers.contrib.peerstorage import (
peer_echo,
)
# CEPH
DATA_SRC_DST = '/var/lib/mysql'
SERVICE_NAME = os.getenv('JUJU_UNIT_NAME').split('/')[0]
POOL_NAME = SERVICE_NAME
LEADER_RES = 'res_mysql_vip'
def cluster_changed():
# Echo any passwords placed on peer relation
peer_echo(includes=['.passwd'])
def METHOD_NAME():
vip = utils.config_get('vip')
vip_iface = utils.config_get('vip_iface')
vip_cidr = utils.config_get('vip_cidr')
corosync_bindiface = utils.config_get('ha-bindiface')
corosync_mcastport = utils.config_get('ha-mcastport')
if None in [vip, vip_cidr, vip_iface]:
utils.juju_log('WARNING',
'Insufficient VIP information to configure cluster')
sys.exit(1)
# Starting configuring resources.
init_services = {'res_mysqld': 'mysql'}
# If the 'ha' relation has been made *before* the 'ceph' relation,
# it doesn't make sense to make it until after the 'ceph' relation is made
if not utils.is_relation_made('ceph', 'auth'):
utils.juju_log('INFO',
'*ceph* relation does not exist. '
'Not sending *ha* relation data yet')
return
else:
utils.juju_log('INFO',
'*ceph* relation exists. Sending *ha* relation data')
block_storage = 'ceph'
if utils.config_get('prefer-ipv6'):
res_mysql_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
vip_cidr = '64'
else:
res_mysql_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
resources = {
'res_mysql_rbd': 'ocf:ceph:rbd',
'res_mysql_fs': 'ocf:heartbeat:Filesystem',
'res_mysql_vip': res_mysql_vip,
'res_mysqld': 'upstart:mysql'}
rbd_name = utils.config_get('rbd-name')
resource_params = {
'res_mysql_rbd': 'params name="%s" pool="%s" user="%s" '
'secret="%s"' %
(rbd_name, POOL_NAME,
SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)),
'res_mysql_fs': 'params device="/dev/rbd/%s/%s" directory="%s" '
'fstype="ext4" op start start-delay="10s"' %
(POOL_NAME, rbd_name, DATA_SRC_DST),
'res_mysql_vip': 'params "%s"="%s" cidr_netmask="%s" nic="%s"' %
(vip_params, vip, vip_cidr, vip_iface),
'res_mysqld': 'op start start-delay="5s" op monitor interval="5s"'}
groups = {
'grp_mysql': 'res_mysql_rbd res_mysql_fs res_mysql_vip res_mysqld'}
for rel_id in utils.relation_ids('ha'):
utils.relation_set(rid=rel_id,
block_storage=block_storage,
corosync_bindiface=corosync_bindiface,
corosync_mcastport=corosync_mcastport,
resources=resources,
resource_params=resource_params,
init_services=init_services,
groups=groups)
def ha_relation_changed():
clustered = utils.relation_get('clustered')
if (clustered and cluster.is_leader(LEADER_RES)):
utils.juju_log('INFO', 'Cluster configured, notifying other services')
# Tell all related services to start using the VIP
for r_id in utils.relation_ids('shared-db'):
utils.relation_set(rid=r_id,
db_host=utils.config_get('vip'))
def ceph_joined():
utils.juju_log('INFO', 'Start Ceph Relation Joined')
ceph.install()
utils.juju_log('INFO', 'Finish Ceph Relation Joined')
def ceph_changed():
utils.juju_log('INFO', 'Start Ceph Relation Changed')
auth = utils.relation_get('auth')
key = utils.relation_get('key')
use_syslog = utils.relation_get('use_syslog')
if None in [auth, key]:
utils.juju_log('INFO', 'Missing key or auth in relation')
return
ceph.configure(service=SERVICE_NAME, key=key, auth=auth,
use_syslog=use_syslog)
if cluster.eligible_leader(LEADER_RES):
sizemb = int(utils.config_get('block-size')) * 1024
rbd_img = utils.config_get('rbd-name')
blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
rbd_pool_rep_count = utils.config_get('ceph-osd-replication-count')
ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
rbd_img=rbd_img, sizemb=sizemb,
fstype='ext4', mount_point=DATA_SRC_DST,
blk_device=blk_device,
system_services=['mysql'],
rbd_pool_replicas=rbd_pool_rep_count)
else:
utils.juju_log('INFO',
'This is not the peer leader. Not configuring RBD.')
# Stopping MySQL
if utils.running('mysql'):
utils.juju_log('INFO', 'Stopping MySQL...')
utils.stop('mysql')
# If 'ha' relation has been made before the 'ceph' relation
# it is important to make sure the ha-relation data is being
# sent.
if utils.is_relation_made('ha'):
utils.juju_log('INFO',
'*ha* relation exists. Making sure the ha'
' relation data is sent.')
METHOD_NAME()
return
utils.juju_log('INFO', 'Finish Ceph Relation Changed')
hooks = {
"ha-relation-joined": METHOD_NAME,
"ha-relation-changed": ha_relation_changed,
"ceph-relation-joined": ceph_joined,
"ceph-relation-changed": ceph_changed,
"cluster-relation-changed": cluster_changed,
}
utils.do_hooks(hooks) |
6,166 | test gdal origin | """
Tests for resistivity model to geotiff.
"""
import pytest
import numpy as np
from mtpy.utils import convert_modem_data_to_geogrid as conv
def test_strip_padding():
grid_axis = [-2., -1., 0., 1., 2.]
test = conv._strip_padding(grid_axis, 1)
expected = [-1., 0., 1.]
np.testing.assert_array_equal(test, expected)
# For 0 padding case, the array should come back untouched.
test = conv._strip_padding(grid_axis, 0)
expected = grid_axis
np.testing.assert_array_equal(test, expected)
# When keep_start=True, padding should only be removed from the
# end. This is used for stripping padding from Z-axis (only has
# padding cells on the bottom).
test = conv._strip_padding(test, 1, keep_start=True)
expected = [-2., -1., 0., 1.]
np.testing.assert_array_equal(test, expected)
def METHOD_NAME():
grid_east = [-2., -1., 0., 1., 2.]
grid_north = [-1.5, 0., 1.5]
# Center of the survey site, puts the grid into reference
center_point = 20., 150.
test = conv._get_gdal_origin(grid_east, 1., center_point[1],
grid_north, 1.5, center_point[0])
# Origin should be upper-left, so western-most point shifted 1/2
# cell west and northern-most point shifted 1/2 cell north.
expected = [147.5, 22.25]
np.testing.assert_array_equal(test, expected)
def test_target_grid_generation():
grid_east = [-2., -1., 0., 1., 2.]
grid_north = [-1.5, 0., 1.5]
test_grid_x, test_grid_y = conv._build_target_grid(grid_east, 1., grid_north, 1.5)
expected_x = np.array([
[-2., -1., 0., 1., 2],
[-2., -1., 0., 1., 2],
[-2., -1., 0., 1., 2]
])
expected_y = np.array([
[-1.5, -1.5, -1.5, -1.5, -1.5],
[0., 0., 0., 0., 0.],
[1.5, 1.5, 1.5, 1.5, 1.5]
])
# Testing to make sure the axes are in the correct order.
np.testing.assert_array_equal(test_grid_x, expected_x)
np.testing.assert_array_equal(test_grid_y, expected_y)
def test_strip_resgrid():
resgrid = np.array([
[[100., 100., 100., 100., 100.], [150., 150., 150., 150., 150.], [100., 100., 100, 100., 100.]],
[[150., 150., 150., 150., 150.], [100., 100., 100., 100., 100.], [150., 150., 150., 150., 150.]],
])
# Transpose to get [[Y], [X], [Z]]
resgrid = resgrid.T
test = conv._strip_resgrid(resgrid, 1, 1, 1)
# We're padding one cell off either side of X and Y, and one cell
# off the end of Z. So we're left with the [150.] array in middle
# of the top row, with two elements from either side removed.
expected = np.array([[[150., 150., 150.]]]).T # Again, note the transpose.
np.testing.assert_array_equal(test, expected)
def test_get_depth_indicies():
z_cells = np.asarray([0, 100, 500, 1000, 10000])
assert conv._get_depth_indicies(z_cells, [49]) == set([0])
assert conv._get_depth_indicies(z_cells, [500]) == set([2])
# Should round up to nearest depth
assert conv._get_depth_indicies(z_cells, [50]) == set([1])
# If two provided depths are closest to same index, only return
# that index (by returning a set)
assert conv._get_depth_indicies(z_cells, [50, 51]) == set([1])
# If no depths are provided, return every index in a set
assert conv._get_depth_indicies(z_cells, []) == {0, 1, 2, 3, 4}
def test_interpolate_depth_slice():
ce = np.array([-2., 0., 2.])
cse = 2.
cn = np.array([-3., -1.5, 0., 1.5, 3.])
csn = 1.5
# Dummy resisitvity model
resgrid = np.array([
[[100., 100., 100., 100., 100.], [150., 150., 150., 150., 150.], [100., 100., 100, 100., 100.]],
[[150., 150., 150., 150., 150.], [100., 100., 100., 100., 100.], [150., 150., 150., 150., 150.]]
])
# Transpose it to get [[Y], [X], [Z]]
resgrid = resgrid.T
tgx, tgy = np.meshgrid(np.arange(ce[0], ce[-1], cse),
np.arange(cn[0], cn[-1], csn))
res_slice = conv._interpolate_slice(ce, cn, resgrid, 0, tgx, tgy, log_scale=True)
expected = np.array([
[2., 2.17609126],
[2., 2.17609126],
[2., 2.17609126],
[2., 2.17609126],
])
assert np.allclose(res_slice, expected)
res_slice = conv._interpolate_slice(ce, cn, resgrid, 0, tgx, tgy, log_scale=False)
expected = np.array(
[[1024., 2381.063868],
[1024., 2381.063868],
[1024., 2381.063868],
[1024., 2381.063868]]
)
assert np.allclose(res_slice, expected)
def test_rotate_geotransform():
pixel_width, pixel_height = 5., 5.
origin_x, origin_y = 50., 100.
angle = 90.0
# GDAL transform:
# upperleft X, pixel width, row rotation, upperleft Y, column rotation, pixel height
gt = [origin_x, pixel_width, 0, origin_y, 0, pixel_height]
# Rotate about the upper-left
test = conv._rotate_transform(gt, angle, origin_x, origin_y)
expected = [gt[0], 3.061616997868383e-16, -5., gt[3], 5., 3.061616997868383e-16]
assert test == expected
# Rotate about a center point of (0., 0.)
test = conv._rotate_transform(gt, angle, 0., 0.)
expected = [100.0, 3.061616997868383e-16, -5., -49.99999999999999, 5., 3.061616997868383e-16] |
6,167 | test fail no service connection and overwrite | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import pathlib
import re
from unittest import TestCase
from metadata.config.common import ConfigurationError, load_config_file
from metadata.ingestion.ometa.ometa_api import OpenMetadata
from metadata.utils.logger import Loggers
from metadata.workflow.metadata import MetadataWorkflow
class WorkflowTest(TestCase):
def test_get_200(self):
key = "metadata.ingestion.sink.metadata_rest.MetadataRestSink"
if key.find(".") >= 0:
module_name, class_name = key.rsplit(".", 1)
my_class = getattr(importlib.import_module(module_name), class_name)
self.assertEqual((my_class is not None), True)
def test_get_4xx(self):
key = "metadata.ingestion.sink.MYSQL.mysqlSINK"
try:
if key.find(".") >= 0:
module_name, class_name = key.rsplit(".", 1)
getattr(importlib.import_module(module_name), class_name)
except ModuleNotFoundError:
self.assertRaises(ModuleNotFoundError)
def test_title_typeClassFetch(self):
is_file = True
file_type = "query-parser"
if is_file:
replace = file_type.replace("-", "_")
else:
replace = "".join(
[i.title() for i in file_type.replace("-", "_").split("_")]
)
self.assertEqual(replace, "query_parser")
def test_title_typeClassFetch_4xx(self):
is_file = False
file_type = "query-parser"
if is_file:
replace = file_type.replace("-", "_")
else:
replace = "".join(
[i.title() for i in file_type.replace("-", "_").split("_")]
)
self.assertEqual(replace, "QueryParser")
def test_execute_200(self):
current_dir = pathlib.Path(__file__).resolve().parent
config_file = current_dir.joinpath("mysql_test.yaml")
workflow_config = load_config_file(config_file)
workflow = MetadataWorkflow.create(workflow_config)
workflow.execute()
workflow.stop()
config = workflow.config.workflowConfig.openMetadataServerConfig
client = OpenMetadata(config).client
self.assertIsNotNone(
client.get("/services/databaseServices/name/local_mysql_test")
)
client.delete(
f"/services/databaseServices/"
f"{client.get('/services/databaseServices/name/local_mysql_test')['id']}"
f"?hardDelete=true&recursive=true"
)
def test_execute_4xx(self):
config_file = pathlib.Path("/tmp/mysql_test123")
try:
load_config_file(config_file)
except ConfigurationError:
self.assertRaises(ConfigurationError)
def METHOD_NAME(self):
current_dir = pathlib.Path(__file__).resolve().parent
config_file = current_dir.joinpath("mysql_test.yaml")
workflow_config = load_config_file(config_file)
del workflow_config["source"]["serviceConnection"]
workflow_config["workflowConfig"]["openMetadataServerConfig"][
"forceEntityOverwriting"
] = True
with self.assertRaises(AttributeError):
MetadataWorkflow.create(workflow_config)
def test_debug_not_show_authorization_headers(self):
current_dir = pathlib.Path(__file__).resolve().parent
config_file = current_dir.joinpath("mysql_test.yaml")
workflow_config = load_config_file(config_file)
workflow = MetadataWorkflow.create(workflow_config)
workflow_config["workflowConfig"]["loggerLevel"] = "DEBUG"
authorization_pattern = re.compile(
r".*['\"]?Authorization['\"]?: ?['\"]?[^*]*$"
)
with self.assertLogs(Loggers.OMETA.value, level="DEBUG") as logger:
workflow.execute()
self.assertFalse(
any(authorization_pattern.match(log) for log in logger.output),
"Authorization headers are displayed in the logs",
)
workflow.stop()
config = workflow.config.workflowConfig.openMetadataServerConfig
client = OpenMetadata(config).client
client.delete(
f"/services/databaseServices/"
f"{client.get('/services/databaseServices/name/local_mysql_test')['id']}"
f"?hardDelete=true&recursive=true"
) |
6,168 | set up app | import uuid
from django.test import SimpleTestCase, TestCase
from couchdbkit import ResourceNotFound
from unittest.mock import patch
from corehq.apps.app_manager.models import Module
from corehq.apps.app_manager.tests.app_factory import AppFactory
from corehq.apps.app_manager.tests.util import patch_get_xform_resource_overrides, TestXmlMixin, get_simple_form
class ReleaseFormsSetupMixin(object):
def METHOD_NAME(self):
self.factory = AppFactory(build_version='2.30.0')
training_module = self.factory.app.add_module(Module.new_training_module('training module', None))
self.releases_form = self.factory.app.new_form(training_module.id, "Untitled Form", None)
self.releases_form.is_release_notes_form=True
self.releases_form.xmlns = "http://openrosa.org/formdesigner/{}".format(uuid.uuid4().hex)
basic_module, self.basic_form = self.factory.new_basic_module("basic_module", "doctor", with_form=True)
self.basic_form.xmlns = "http://openrosa.org/formdesigner/{}".format(uuid.uuid4().hex)
@patch_get_xform_resource_overrides()
class ReleaseFormsEnabledTest(SimpleTestCase, ReleaseFormsSetupMixin, TestXmlMixin):
def setUp(self):
self.METHOD_NAME()
self.releases_form.enable_release_notes = True
super(ReleaseFormsEnabledTest, self).setUp()
def test_resource(self, *args):
# release form should be as xform-update-info
suite = self.factory.app.create_suite()
xpath = "./xform-update-info"
expected = """
<partial>
<xform-update-info>
<resource id="{id}" descriptor="Form: (Module training module) - Untitled Form">
<location authority="local">./modules-0/forms-0.xml</location>
<location authority="remote">./modules-0/forms-0.xml</location>
</resource>
</xform-update-info>
</partial>
""".format(id=self.releases_form.unique_id)
self.assertXmlPartialEqual(expected, suite, xpath)
# normal form should be still under xform
xpath = "./xform"
expected = """
<partial>
<xform>
<resource id="basic_module_form_0" descriptor="Form: (Module basic_module module) - basic_module form 0">
<location authority="local">./modules-1/forms-0.xml</location>
<location authority="remote">./modules-1/forms-0.xml</location>
</resource>
</xform>
</partial>
"""
# not included in resource
self.assertXmlPartialEqual(expected, suite, xpath)
def test_entry(self, *args):
suite = self.factory.app.create_suite()
expected = """
<partial>
<entry>
<form>{release_xmlns}</form>
<command id="m0-f0">
<text>
<locale id="forms.m0f0"/>
</text>
</command>
</entry>
<entry>
<form>{basic_xmlns}</form>
<command id="m1-f0">
<text>
<locale id="forms.m1f0"/>
</text>
</command>
</entry>
</partial>
""".format(
release_xmlns=self.releases_form.xmlns,
basic_xmlns=self.basic_form.xmlns)
# check entry exists
self.assertXmlPartialEqual(expected, suite, "./entry")
def test_command(self, *args):
# check command in suite/menu exists
suite = self.factory.app.create_suite()
expected = """
<partial>
<menu id="m0" root="training-root" >
<text>
<locale id="modules.m0"/>
</text>
<command id="m0-f0"/>
</menu>
<menu id="m1">
<text>
<locale id="modules.m1"/>
</text>
<command id="m1-f0"/>
</menu>
<menu id="training-root">
<text>
<locale id="training.root.title"/>
</text>
</menu>
</partial>
"""
self.assertXmlPartialEqual(expected, suite, "./menu")
@patch_get_xform_resource_overrides()
class ReleaseFormsDisabledTest(SimpleTestCase, ReleaseFormsSetupMixin, TestXmlMixin):
def setUp(self):
self.METHOD_NAME()
self.releases_form.enable_release_notes = False
super(ReleaseFormsDisabledTest, self).setUp()
def test_resource(self, *args):
# release form should be as xform-update-info
suite = self.factory.app.create_suite()
xpath = "./xform-update-info"
self.assertXmlDoesNotHaveXpath(suite, xpath)
expected = """
<partial>
<xform>
<resource id="basic_module_form_0" descriptor="Form: (Module basic_module module) - basic_module form 0">
<location authority="local">./modules-1/forms-0.xml</location>
<location authority="remote">./modules-1/forms-0.xml</location>
</resource>
</xform>
</partial>
"""
self.assertXmlPartialEqual(expected, suite, './xform')
def test_entry(self, *args):
suite = self.factory.app.create_suite()
expected = """
<partial>
<entry>
<form>{basic_xmlns}</form>
<command id="m1-f0">
<text>
<locale id="forms.m1f0"/>
</text>
</command>
</entry>
</partial>
""".format(
basic_xmlns=self.basic_form.xmlns)
# check entry exists
self.assertXmlPartialEqual(expected, suite, "./entry")
def test_command(self, *args):
# check command in suite/menu exists
suite = self.factory.app.create_suite()
expected = """
<partial>
<menu id="m1">
<text>
<locale id="modules.m1"/>
</text>
<command id="m1-f0"/>
</menu>
<menu id="training-root">
<text>
<locale id="training.root.title"/>
</text>
</menu>
</partial>
"""
self.assertXmlPartialEqual(expected, suite, "./menu")
class ReleaseNotesResourceFileTest(TestCase, ReleaseFormsSetupMixin, TestXmlMixin):
file_path = ('data',)
def setUp(self):
self.METHOD_NAME()
self.releases_form.source = get_simple_form(xmlns=self.releases_form.xmlns)
self.basic_form.source = get_simple_form(xmlns=self.basic_form.xmlns)
self.factory.app.save()
super(ReleaseNotesResourceFileTest, self).setUp()
@patch('corehq.apps.app_manager.models.validate_xform', return_value=None)
@patch('corehq.apps.app_manager.models.FormBase.is_a_disabled_release_form', return_value=False)
def test_enabled(self, *args):
# check form in resource files
self.factory.app.create_build_files()
copy = self.factory.app.make_build()
copy.save()
self.assertTrue(copy.lazy_fetch_attachment('files/modules-0/forms-0.xml'))
@patch('corehq.apps.app_manager.models.validate_xform', return_value=None)
@patch('corehq.apps.app_manager.models.FormBase.is_a_disabled_release_form', return_value=True)
def test_disabled(self, *args):
self.factory.app.create_build_files()
copy = self.factory.app.make_build()
copy.save()
with self.assertRaises(ResourceNotFound):
self.assertTrue(copy.lazy_fetch_attachment('files/modules-0/forms-0.xml')) |
6,169 | write doc | #!/usr/bin/env python
import inspect
from pathlib import Path
from IPython.terminal.ipapp import TerminalIPythonApp
from ipykernel.kernelapp import IPKernelApp
from traitlets import Undefined
from collections import defaultdict
here = (Path(__file__)).parent
options = here / "source" / "config" / "options"
generated = options / "config-generated.txt"
import textwrap
indent = lambda text,n: textwrap.indent(text,n*' ')
def interesting_default_value(dv):
if (dv is None) or (dv is Undefined):
return False
if isinstance(dv, (str, list, tuple, dict, set)):
return bool(dv)
return True
def format_aliases(aliases):
fmted = []
for a in aliases:
dashes = '-' if len(a) == 1 else '--'
fmted.append('``%s%s``' % (dashes, a))
return ', '.join(fmted)
def class_config_rst_doc(cls, trait_aliases):
"""Generate rST documentation for this class' config options.
Excludes traits defined on parent classes.
"""
lines = []
classname = cls.__name__
for k, trait in sorted(cls.class_traits(config=True).items()):
ttype = trait.__class__.__name__
fullname = classname + '.' + trait.name
lines += ['.. configtrait:: ' + fullname,
''
]
help = trait.help.rstrip() or 'No description'
lines.append(indent(inspect.cleandoc(help), 4) + '\n')
# Choices or type
if 'Enum' in ttype:
# include Enum choices
lines.append(indent(
':options: ' + ', '.join('``%r``' % x for x in trait.values), 4))
else:
lines.append(indent(':trait type: ' + ttype, 4))
# Default value
# Ignore boring default values like None, [] or ''
if interesting_default_value(trait.default_value):
try:
dvr = trait.default_value_repr()
except Exception:
dvr = None # ignore defaults we can't construct
if dvr is not None:
if len(dvr) > 64:
dvr = dvr[:61] + '...'
# Double up backslashes, so they get to the rendered docs
dvr = dvr.replace('\\n', '\\\\n')
lines.append(indent(':default: ``%s``' % dvr, 4))
# Command line aliases
if trait_aliases[fullname]:
fmt_aliases = format_aliases(trait_aliases[fullname])
lines.append(indent(':CLI option: ' + fmt_aliases, 4))
# Blank line
lines.append('')
return '\n'.join(lines)
def reverse_aliases(app):
"""Produce a mapping of trait names to lists of command line aliases.
"""
res = defaultdict(list)
for alias, trait in app.aliases.items():
res[trait].append(alias)
# Flags also often act as aliases for a boolean trait.
# Treat flags which set one trait to True as aliases.
for flag, (cfg, _) in app.flags.items():
if len(cfg) == 1:
classname = list(cfg)[0]
cls_cfg = cfg[classname]
if len(cls_cfg) == 1:
traitname = list(cls_cfg)[0]
if cls_cfg[traitname] is True:
res[classname+'.'+traitname].append(flag)
return res
def METHOD_NAME(name, title, app, preamble=None):
trait_aliases = reverse_aliases(app)
filename = options / (name + ".rst")
with open(filename, "w", encoding="utf-8") as f:
f.write(".. _" + name + "_options:" + "\n\n")
f.write(title + "\n")
f.write(("=" * len(title)) + "\n")
f.write("\n")
if preamble is not None:
f.write(preamble + '\n\n')
#f.write(app.document_config_options())
for c in app._classes_inc_parents():
f.write(class_config_rst_doc(c, trait_aliases))
f.write('\n')
if __name__ == '__main__':
# Touch this file for the make target
Path(generated).write_text("", encoding="utf-8")
METHOD_NAME('terminal', 'Terminal IPython options', TerminalIPythonApp())
METHOD_NAME('kernel', 'IPython kernel options', IPKernelApp(),
preamble=("These options can be used in :file:`ipython_kernel_config.py`. "
"The kernel also respects any options in `ipython_config.py`"),
) |
6,170 | test lesson loader missing icon | import os.path
from django.test import override_settings
from tests.BaseTestWithDB import BaseTestWithDB
from tests.at_a_distance.AtADistanceTestDataGenerator import AtADistanceTestDataGenerator
from at_a_distance.models import Lesson
from at_a_distance.management.commands._LessonLoader import AtADistanceLessonLoader
from utils.errors.CouldNotFindYAMLFileError import CouldNotFindYAMLFileError
from utils.errors.EmptyMarkdownFileError import EmptyMarkdownFileError
from utils.errors.MissingRequiredFieldError import MissingRequiredFieldError
from utils.errors.NoHeadingFoundInMarkdownFileError import NoHeadingFoundInMarkdownFileError
from utils.errors.InvalidYAMLValueError import InvalidYAMLValueError
class AtADistanceLessonsLoaderTest(BaseTestWithDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_data = AtADistanceTestDataGenerator()
self.loader_name = "lessons"
self.base_path = os.path.join(self.test_data.LOADER_ASSET_PATH, self.loader_name)
def test_basic_lesson_loader_configuration(self):
test_name = "basic-config"
lesson_loader = AtADistanceLessonLoader(
1,
structure_filename=f"{test_name}.yaml",
content_path=test_name,
base_path=self.base_path
)
lesson_loader.load()
self.assertQuerysetEqual(
Lesson.objects.all(),
["<Lesson: Lesson 1>"]
)
def test_lesson_loader_slug_set_correctly(self):
test_name = "valid-slug"
lesson_loader = AtADistanceLessonLoader(
1,
structure_filename=f"{test_name}.yaml",
content_path=test_name,
base_path=self.base_path
)
lesson_loader.load()
self.assertEquals(
Lesson.objects.all()[0].slug,
"valid-slug",
)
def test_lesson_loader_name_set_correctly(self):
test_name = "valid-name"
lesson_loader = AtADistanceLessonLoader(
1,
structure_filename=f"{test_name}.yaml",
content_path=test_name,
base_path=self.base_path
)
lesson_loader.load()
self.assertEquals(
Lesson.objects.get(slug=test_name).name,
"Lesson 1",
)
def test_lesson_loader_missing_name_text(self):
test_name = "missing-title"
lesson_loader = AtADistanceLessonLoader(
1,
structure_filename=f"{test_name}.yaml",
content_path=test_name,
base_path=self.base_path
)
self.assertRaises(
NoHeadingFoundInMarkdownFileError,
lesson_loader.load,
)
def test_lesson_loader_content_set_correctly(self):
test_name = "valid-content"
lesson_loader = AtADistanceLessonLoader(
1,
structure_filename=f"{test_name}.yaml",
content_path=test_name,
base_path=self.base_path
)
lesson_loader.load()
self.assertEquals(
Lesson.objects.get(slug=test_name).introduction,
"<p>Example content text.</p>",
)
def test_lesson_loader_missing_content_text(self):
test_name = "missing-content"
lesson_loader = AtADistanceLessonLoader(
1,
structure_filename=f"{test_name}.yaml",
content_path=test_name,
base_path=self.base_path
)
self.assertRaises(
EmptyMarkdownFileError,
lesson_loader.load,
)
def test_lesson_loader_missing_suitable_for_teaching_students(self):
test_name = "missing-suitable-for-teaching-students"
lesson_loader = AtADistanceLessonLoader(
1,
structure_filename=f"{test_name}.yaml",
content_path=test_name,
base_path=self.base_path
)
self.assertRaises(
MissingRequiredFieldError,
lesson_loader.load,
)
def test_lesson_loader_invalid_value_suitable_for_teaching_students(self):
test_name = "invalid-value-suitable-for-teaching-students"
lesson_loader = AtADistanceLessonLoader(
1,
structure_filename=f"{test_name}.yaml",
content_path=test_name,
base_path=self.base_path
)
self.assertRaises(
InvalidYAMLValueError,
lesson_loader.load,
)
def test_lesson_loader_missing_suitable_for_teaching_educators(self):
test_name = "missing-suitable-for-teaching-educators"
lesson_loader = AtADistanceLessonLoader(
1,
structure_filename=f"{test_name}.yaml",
content_path=test_name,
base_path=self.base_path
)
self.assertRaises(
MissingRequiredFieldError,
lesson_loader.load,
)
def test_lesson_loader_invalid_value_suitable_for_teaching_educators(self):
test_name = "invalid-value-suitable-for-teaching-educators"
lesson_loader = AtADistanceLessonLoader(
1,
structure_filename=f"{test_name}.yaml",
content_path=test_name,
base_path=self.base_path
)
self.assertRaises(
InvalidYAMLValueError,
lesson_loader.load,
)
@override_settings(STATIC_ROOT="tests/at_a_distance/loaders/assets/lessons/static")
def test_lesson_loader_valid_icon(self):
test_name = "valid-icon"
lesson_loader = AtADistanceLessonLoader(
1,
structure_filename=f"{test_name}.yaml",
content_path=test_name,
base_path=self.base_path
)
lesson_loader.load()
self.assertEquals(
Lesson.objects.get(slug=test_name).icon,
"img/valid-icon.png",
)
def METHOD_NAME(self):
test_name = "missing-icon"
lesson_loader = AtADistanceLessonLoader(
1,
structure_filename=f"{test_name}.yaml",
content_path=test_name,
base_path=self.base_path
)
lesson_loader.load()
self.assertIsNone(Lesson.objects.get(slug=test_name).icon)
def test_lesson_loader_updating_lesson(self):
test_name = "basic-config"
lesson_loader = AtADistanceLessonLoader(
1,
structure_filename=f"{test_name}.yaml",
content_path=test_name,
base_path=self.base_path
)
lesson_loader.load()
self.assertEqual(
Lesson.objects.get(slug=test_name).order_number,
1,
)
lesson_loader = AtADistanceLessonLoader(
2,
structure_filename=f"{test_name}.yaml",
content_path=test_name,
base_path=self.base_path
)
lesson_loader.load()
self.assertEqual(
Lesson.objects.get(slug=test_name).order_number,
2,
)
def test_lesson_loader_valid_supporting_resources(self):
test_name = "valid-supporting-resources"
lesson_loader = AtADistanceLessonLoader(
1,
structure_filename=f"{test_name}.yaml",
content_path=test_name,
base_path=self.base_path
)
lesson_loader.load()
self.assertIsNone(Lesson.objects.get(slug=test_name).icon)
def test_lesson_loader_missing_supporting_resources(self):
test_name = "missing-supporting-resources"
lesson_loader = AtADistanceLessonLoader(
1,
structure_filename=f"{test_name}.yaml",
content_path=test_name,
base_path=self.base_path
)
self.assertRaises(
CouldNotFindYAMLFileError,
lesson_loader.load,
) |
6,171 | command queue | from typing import Awaitable, Optional
from robot_server.robot.calibration.deck.user_flow import DeckCalibrationUserFlow
from robot_server.robot.calibration.deck.models import DeckCalibrationSessionStatus
from robot_server.service.session.errors import (
SessionCreationException,
CommandExecutionException,
)
from robot_server.service.session.command_execution import (
CallableExecutor,
Command,
CompletedCommand,
CommandQueue,
CommandExecutor,
)
from .base_session import BaseSession, SessionMetaData
from ..configuration import SessionConfiguration
from ..models.session import SessionType, DeckCalibrationResponseAttributes
from ..errors import UnsupportedFeature
class DeckCalibrationCommandExecutor(CallableExecutor):
async def execute(self, command: Command) -> CompletedCommand:
try:
return await super().execute(command)
except AssertionError as e:
raise CommandExecutionException(str(e))
class DeckCalibrationSession(BaseSession):
def __init__(
self,
configuration: SessionConfiguration,
instance_meta: SessionMetaData,
deck_cal_user_flow: DeckCalibrationUserFlow,
shutdown_handler: Optional[Awaitable[None]] = None,
):
super().__init__(configuration, instance_meta)
self._deck_cal_user_flow = deck_cal_user_flow
self._command_executor = DeckCalibrationCommandExecutor(
self._deck_cal_user_flow.handle_command
)
self._shutdown_coroutine = shutdown_handler
@classmethod
async def create(
cls, configuration: SessionConfiguration, instance_meta: SessionMetaData
) -> "BaseSession":
# if lights are on already it's because the user clicked the button,
# so a) we don't need to turn them on now and b) we shouldn't turn them
# off after
session_controls_lights = not (await configuration.hardware.get_lights())[
"rails"
]
await configuration.hardware.cache_instruments()
try:
deck_cal_user_flow = DeckCalibrationUserFlow(
hardware=configuration.hardware
)
except AssertionError as e:
raise SessionCreationException(str(e))
if session_controls_lights:
await configuration.hardware.set_lights(rails=True)
shutdown_handler: Optional[
Awaitable[None]
] = configuration.hardware.set_lights(rails=False)
else:
shutdown_handler = None
return cls(
configuration=configuration,
instance_meta=instance_meta,
deck_cal_user_flow=deck_cal_user_flow,
shutdown_handler=shutdown_handler,
)
@property
def command_executor(self) -> CommandExecutor:
return self._command_executor
@property
def METHOD_NAME(self) -> CommandQueue:
raise UnsupportedFeature()
@property
def session_type(self) -> SessionType:
return SessionType.deck_calibration
def get_response_model(self) -> DeckCalibrationResponseAttributes:
return DeckCalibrationResponseAttributes(
id=self.meta.identifier,
createParams=self.meta.create_params,
details=self._get_response_details(),
createdAt=self.meta.created_at,
)
def _get_response_details(self) -> DeckCalibrationSessionStatus:
# TODO(mc, 2020-09-17): get_pipette() returns an Optional value but
# DeckCalibrationSessionStatus has an exact type for instrument
supported_commands = self._deck_cal_user_flow.supported_commands
return DeckCalibrationSessionStatus(
instrument=self._deck_cal_user_flow.get_pipette(), # type: ignore[arg-type]
currentStep=self._deck_cal_user_flow.current_state,
labware=self._deck_cal_user_flow.get_required_labware(),
supportedCommands=supported_commands,
)
async def clean_up(self):
if self._shutdown_coroutine:
await self._shutdown_coroutine |
6,172 | encode response output | from typing import Any, Union, Dict, Optional, List
from ..types import (
InferenceRequest,
InferenceResponse,
RequestInput,
RequestOutput,
ResponseOutput,
MetadataTensor,
Parameters,
)
from ..settings import ModelSettings
from .base import (
find_input_codec,
find_input_codec_by_payload,
find_request_codec,
find_request_codec_by_payload,
RequestCodec,
InputCodecLike,
RequestCodecLike,
)
from .errors import CodecError
DefaultOutputPrefix = "output-"
DefaultInputPrefix = "input-"
InputOrOutput = Union[RequestInput, ResponseOutput]
Codec = Union[InputCodecLike, RequestCodecLike]
Parametrised = Union[
InferenceRequest, RequestInput, RequestOutput, ResponseOutput, InferenceResponse
]
Tagged = Union[MetadataTensor, ModelSettings]
DecodedParameterName = "_decoded_payload"
def inject_batch_dimension(shape: List[int]) -> List[int]:
"""
Utility method to ensure that 1-dimensional shapes
assume that `[N] == [N, D]`.
"""
if len(shape) > 1:
return shape
return shape + [1]
def _get_content_type(
request: Parametrised, metadata: Optional[Tagged] = None
) -> Optional[str]:
if request.parameters and request.parameters.content_type:
return request.parameters.content_type
if metadata is not None:
if metadata.parameters and metadata.parameters.content_type:
return metadata.parameters.content_type
return None
def _save_decoded(parametrised_obj: Parametrised, decoded_payload: Any):
if not parametrised_obj.parameters:
parametrised_obj.parameters = Parameters()
setattr(parametrised_obj.parameters, DecodedParameterName, decoded_payload)
def METHOD_NAME(
payload: Any,
request_output: RequestOutput,
metadata_outputs: Dict[str, MetadataTensor] = {},
) -> Optional[ResponseOutput]:
output_metadata = metadata_outputs.get(request_output.name)
content_type = _get_content_type(request_output, output_metadata)
codec = (
find_input_codec(content_type)
if content_type
else find_input_codec_by_payload(payload)
)
if not codec:
return None
return codec.encode_output(
name=request_output.name,
payload=payload,
)
def encode_inference_response(
payload: Any,
model_settings: ModelSettings,
) -> Optional[InferenceResponse]:
# TODO: Allow users to override codec through model's metadata
codec = find_request_codec_by_payload(payload)
if not codec:
return None
model_version = None
if model_settings.parameters:
model_version = model_settings.parameters.version
return codec.encode_response(model_settings.name, payload, model_version)
def decode_request_input(
request_input: RequestInput,
metadata_inputs: Dict[str, MetadataTensor] = {},
) -> Optional[Any]:
input_metadata = metadata_inputs.get(request_input.name)
content_type = _get_content_type(request_input, input_metadata)
if content_type is None:
return None
codec = find_input_codec(content_type)
if codec is None:
return None
decoded_payload = codec.decode_input(request_input)
_save_decoded(request_input, decoded_payload)
return decoded_payload
def decode_inference_request(
inference_request: InferenceRequest,
model_settings: Optional[ModelSettings] = None,
metadata_inputs: Dict[str, MetadataTensor] = {},
) -> Optional[Any]:
for request_input in inference_request.inputs:
decode_request_input(request_input, metadata_inputs)
request_content_type = _get_content_type(inference_request, model_settings)
if request_content_type is not None:
codec = find_request_codec(request_content_type)
if codec is not None:
decoded_payload = codec.decode_request(inference_request)
_save_decoded(inference_request, decoded_payload)
return decoded_payload
return inference_request
def has_decoded(parametrised_obj: Parametrised) -> bool:
if parametrised_obj.parameters:
return hasattr(parametrised_obj.parameters, DecodedParameterName)
return False
def get_decoded(parametrised_obj: Parametrised) -> Any:
if has_decoded(parametrised_obj):
return getattr(parametrised_obj.parameters, DecodedParameterName)
def get_decoded_or_raw(parametrised_obj: Parametrised) -> Any:
if not has_decoded(parametrised_obj):
if isinstance(parametrised_obj, RequestInput):
# If this is a RequestInput, return its data
return parametrised_obj.data
if isinstance(parametrised_obj, ResponseOutput):
# If this is a ResponseOutput, return its data
return parametrised_obj.data
# Otherwise, return full object
return parametrised_obj
return get_decoded(parametrised_obj)
class SingleInputRequestCodec(RequestCodec):
"""
The SingleInputRequestCodec can be used as a "meta-implementation" for other
codecs. Its goal to decode the whole request simply as the first decoded
element.
"""
InputCodec: Optional[InputCodecLike] = None
@classmethod
def can_encode(cls, payload: Any) -> bool:
if cls.InputCodec is None:
return False
return cls.InputCodec.can_encode(payload)
@classmethod
def encode_response(
cls,
model_name: str,
payload: Any,
model_version: Optional[str] = None,
**kwargs,
) -> InferenceResponse:
if cls.InputCodec is None:
raise NotImplementedError(
f"No input codec found for {type(cls)} request codec"
)
output = cls.InputCodec.encode_output(
f"{DefaultOutputPrefix}1", payload, **kwargs
)
return InferenceResponse(
model_name=model_name,
model_version=model_version,
parameters=Parameters(content_type=cls.ContentType),
outputs=[output],
)
@classmethod
def decode_response(cls, response: InferenceResponse) -> Any:
if len(response.outputs) != 1:
raise CodecError(
f"The '{cls.ContentType}' codec only supports a single output tensor "
f"({len(response.outputs)} were received)"
)
first_output = response.outputs[0]
if not has_decoded(first_output) and cls.InputCodec is not None:
decoded_payload = cls.InputCodec.decode_output(first_output) # type: ignore
_save_decoded(first_output, decoded_payload)
return get_decoded_or_raw(first_output)
@classmethod
def encode_request(cls, payload: Any, **kwargs) -> InferenceRequest:
if cls.InputCodec is None:
raise NotImplementedError(
f"No input codec found for {type(cls)} request codec"
)
inp = cls.InputCodec.encode_input(f"{DefaultInputPrefix}1", payload, **kwargs)
return InferenceRequest(
inputs=[inp], parameters=Parameters(content_type=cls.ContentType)
)
@classmethod
def decode_request(cls, request: InferenceRequest) -> Any:
if len(request.inputs) != 1:
raise CodecError(
f"The '{cls.ContentType}' codec only supports a single input tensor "
f"({len(request.inputs)} were received)"
)
first_input = request.inputs[0]
if not has_decoded(first_input) and cls.InputCodec is not None:
decoded_payload = cls.InputCodec.decode_input(first_input) # type: ignore
_save_decoded(first_input, decoded_payload)
return get_decoded_or_raw(first_input) |
6,173 | execute driver | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This script automates the process of generating C test files using
the csmith tool.
The script will:
* generate a fresh C source file
* compile the C source
* execute the C executable
* translate the C source to Rust source
* compile the Rust source
* execute the Rust executable
* check that the C and Rust executables produced the same output
"""
import subprocess
import os
import logging
from shutil import copyfile
import tempfile
import common
DEFAULT_CSMITH_HOME = "/usr/local/opt/csmith/include/csmith-2.3.0/runtime"
CSMITH_HOME = os.environ.get("CSMITH_HOME", DEFAULT_CSMITH_HOME)
CSMITH_CMD = ["csmith", "--no-bitfields", "--no-builtins"]
C_COMPILER = "clang"
RUST_COMPILER = "rustc"
CSMITH_TIMEOUT = 5 # seconds to wait for C compiled executable to run
def validate_csmith_home() -> None:
"""Check that csmith.h can be found in CSMITH_HOME."""
csmith_header = os.path.join(CSMITH_HOME, 'csmith.h')
if not os.access(csmith_header, os.R_OK):
print('Unable to access csmith header: %s' % csmith_header)
print('Please set the CSMITH_HOME environment variable to the '
'directory containing this header.')
exit(1)
def create_compile_commands(dirname: str, output_c_name: str) -> str:
"""Create a compile commands file suitable for compiling the given csmith source file."""
compile_commands_settings = [{
'directory': dirname,
'arguments':
[C_COMPILER,
"-I", CSMITH_HOME,
output_c_name],
'file': output_c_name}]
compile_commands_name = os.path.join(dirname, 'compile_commands.json')
with open(compile_commands_name, 'w') as filename:
filename.write(common.json_pp_obj(compile_commands_settings))
return compile_commands_name
def generate_c_source(dirname: str, output_c_name: str) -> None:
"""Generate a C source file using csmith."""
with open(output_c_name, 'w') as output_c:
logging.info("Generating C source file with csmith")
subprocess.run(CSMITH_CMD, cwd=dirname, stdout=output_c, check=True)
def transpile_file(dirname: str, output_c_name: str) -> None:
"""Translate the given C file to Rust."""
compile_commands_name = create_compile_commands(dirname, output_c_name)
common.transpile(compile_commands_name,
emit_build_files=False)
def compile_c_file(output_c_name: str, output_c_exe_name: str) -> None:
"""Compile the given C source file to produce the given executable."""
logging.info("Compiling C source file with clang")
compile_cmd = [
C_COMPILER,
"-I", CSMITH_HOME,
"-o", output_c_exe_name,
output_c_name]
subprocess.run(
compile_cmd,
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def METHOD_NAME(exe_name: str) -> bytes:
"""Execute the given executable and return its stdout output."""
logging.info("Executing: %s", exe_name)
exec_result = subprocess.run(
exe_name,
capture_output=True,
check=True,
timeout=CSMITH_TIMEOUT)
expected_output = exec_result.stdout
logging.info("Execution finished: %s", expected_output)
return expected_output
def compile_rust_file(output_c_name: str, output_rs_name: str, output_rs_exec_name: str) -> None:
"""Compile the given Rust source file."""
logging.info("Compiling translated Rust")
compile_rust_cmd = [RUST_COMPILER, '-Awarnings', output_rs_name, '-o', output_rs_exec_name]
try:
subprocess.run(compile_rust_cmd, check=True)
except:
logging.info("Compile failure, saving source files locally")
copyfile(output_c_name, 'output.c')
copyfile(output_rs_name, 'output.rs')
raise
def main() -> None:
"""Generate a new csmith test case and compare its execution to the translated Rust version."""
validate_csmith_home()
common.setup_logging()
with tempfile.TemporaryDirectory('_c2rust_csmith') as dirname:
# generate filenames based on tempdir
output_c_name = os.path.join(dirname, 'output.c')
output_c_exe_name = os.path.join(dirname, 'output.c.exe')
output_rs_name = os.path.join(dirname, 'output.rs')
output_rs_exec_name = os.path.join(dirname, 'output.rs.exe')
logging.info("Using temporary directory: %s", dirname)
# Generate and run C version
generate_c_source(dirname, output_c_name)
compile_c_file(output_c_name, output_c_exe_name)
expected_output = METHOD_NAME(output_c_exe_name)
# Generate and run Rust version
transpile_file(dirname, output_c_name)
compile_rust_file(output_c_name, output_rs_name, output_rs_exec_name)
actual_output = METHOD_NAME(output_rs_exec_name)
if expected_output == actual_output:
logging.info("Match")
else:
logging.info("FAILURE: %s %s", expected_output, actual_output)
copyfile(output_c_name, 'output.c')
copyfile(output_rs_name, 'output.rs')
if __name__ == "__main__":
main() |
6,174 | test compute cells can depend on other | # These tests are auto-generated with test data from:
# https://github.com/exercism/problem-specifications/tree/main/exercises/react/canonical-data.json
# File last updated on 2023-07-19
from functools import partial
import unittest
from react import (
InputCell,
ComputeCell,
)
class ReactTest(unittest.TestCase):
def test_input_cells_have_a_value(self):
input = InputCell(10)
self.assertEqual(input.value, 10)
def test_an_input_cell_s_value_can_be_set(self):
input = InputCell(4)
input.value = 20
self.assertEqual(input.value, 20)
def test_compute_cells_calculate_initial_value(self):
input = InputCell(1)
output = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
self.assertEqual(output.value, 2)
def test_compute_cells_take_inputs_in_the_right_order(self):
one = InputCell(1)
two = InputCell(2)
output = ComputeCell(
[
one,
two,
],
lambda inputs: inputs[0] + inputs[1] * 10,
)
self.assertEqual(output.value, 21)
def test_compute_cells_update_value_when_dependencies_are_changed(self):
input = InputCell(1)
output = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
input.value = 3
self.assertEqual(output.value, 4)
def METHOD_NAME(self):
input = InputCell(1)
times_two = ComputeCell(
[
input,
],
lambda inputs: inputs[0] * 2,
)
times_thirty = ComputeCell(
[
input,
],
lambda inputs: inputs[0] * 30,
)
output = ComputeCell(
[
times_two,
times_thirty,
],
lambda inputs: inputs[0] + inputs[1],
)
self.assertEqual(output.value, 32)
input.value = 3
self.assertEqual(output.value, 96)
def test_compute_cells_fire_callbacks(self):
input = InputCell(1)
output = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
cb1_observer = []
callback1 = self.callback_factory(cb1_observer)
output.add_callback(callback1)
input.value = 3
self.assertEqual(cb1_observer[-1], 4)
def test_callback_cells_only_fire_on_change(self):
input = InputCell(1)
output = ComputeCell([input], lambda inputs: 111 if inputs[0] < 3 else 222)
cb1_observer = []
callback1 = self.callback_factory(cb1_observer)
output.add_callback(callback1)
input.value = 2
self.assertEqual(cb1_observer, [])
input.value = 4
self.assertEqual(cb1_observer[-1], 222)
def test_callbacks_do_not_report_already_reported_values(self):
input = InputCell(1)
output = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
cb1_observer = []
callback1 = self.callback_factory(cb1_observer)
output.add_callback(callback1)
input.value = 2
self.assertEqual(cb1_observer[-1], 3)
input.value = 3
self.assertEqual(cb1_observer[-1], 4)
def test_callbacks_can_fire_from_multiple_cells(self):
input = InputCell(1)
plus_one = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
minus_one = ComputeCell(
[
input,
],
lambda inputs: inputs[0] - 1,
)
cb1_observer = []
cb2_observer = []
callback1 = self.callback_factory(cb1_observer)
callback2 = self.callback_factory(cb2_observer)
plus_one.add_callback(callback1)
minus_one.add_callback(callback2)
input.value = 10
self.assertEqual(cb1_observer[-1], 11)
self.assertEqual(cb2_observer[-1], 9)
def test_callbacks_can_be_added_and_removed(self):
input = InputCell(11)
output = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
cb1_observer = []
cb2_observer = []
cb3_observer = []
callback1 = self.callback_factory(cb1_observer)
callback2 = self.callback_factory(cb2_observer)
callback3 = self.callback_factory(cb3_observer)
output.add_callback(callback1)
output.add_callback(callback2)
input.value = 31
self.assertEqual(cb1_observer[-1], 32)
self.assertEqual(cb2_observer[-1], 32)
output.remove_callback(callback1)
output.add_callback(callback3)
input.value = 41
self.assertEqual(len(cb1_observer), 1)
self.assertEqual(cb2_observer[-1], 42)
self.assertEqual(cb3_observer[-1], 42)
def test_removing_a_callback_multiple_times_doesn_t_interfere_with_other_callbacks(
self,
):
input = InputCell(1)
output = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
cb1_observer = []
cb2_observer = []
callback1 = self.callback_factory(cb1_observer)
callback2 = self.callback_factory(cb2_observer)
output.add_callback(callback1)
output.add_callback(callback2)
output.remove_callback(callback1)
output.remove_callback(callback1)
output.remove_callback(callback1)
input.value = 2
self.assertEqual(cb1_observer, [])
self.assertEqual(cb2_observer[-1], 3)
def test_callbacks_should_only_be_called_once_even_if_multiple_dependencies_change(
self,
):
input = InputCell(1)
plus_one = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
minus_one1 = ComputeCell(
[
input,
],
lambda inputs: inputs[0] - 1,
)
minus_one2 = ComputeCell(
[
minus_one1,
],
lambda inputs: inputs[0] - 1,
)
output = ComputeCell(
[
plus_one,
minus_one2,
],
lambda inputs: inputs[0] * inputs[1],
)
cb1_observer = []
callback1 = self.callback_factory(cb1_observer)
output.add_callback(callback1)
input.value = 4
self.assertEqual(cb1_observer[-1], 10)
def test_callbacks_should_not_be_called_if_dependencies_change_but_output_value_doesn_t_change(
self,
):
input = InputCell(1)
plus_one = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
minus_one = ComputeCell(
[
input,
],
lambda inputs: inputs[0] - 1,
)
always_two = ComputeCell(
[
plus_one,
minus_one,
],
lambda inputs: inputs[0] - inputs[1],
)
cb1_observer = []
callback1 = self.callback_factory(cb1_observer)
always_two.add_callback(callback1)
input.value = 2
self.assertEqual(cb1_observer, [])
input.value = 3
self.assertEqual(cb1_observer, [])
input.value = 4
self.assertEqual(cb1_observer, [])
input.value = 5
self.assertEqual(cb1_observer, [])
# Utility functions.
def callback_factory(self, observer):
def callback(observer, value):
observer.append(value)
return partial(callback, observer) |
6,175 | test count events | """Testing stats reporting."""
# Authors: Alex Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import itertools
import pytest
import numpy as np
import mne
from mne.datasets import testing
from mne_bids import BIDSPath, write_raw_bids
from mne_bids.stats import count_events
from mne_bids.read import _from_tsv
from mne_bids.write import _write_tsv
data_path = testing.data_path(download=False)
def _make_dataset(root, subjects, tasks=(None,), runs=(None,), sessions=(None,)):
raw_fname = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif"
raw = mne.io.read_raw(raw_fname)
raw.info["line_freq"] = 60.0
events = mne.find_events(raw)
event_id = {
"auditory/left": 1,
"auditory/right": 2,
"visual/left": 3,
"visual/right": 4,
"face": 5,
"button": 32,
}
for subject, session, task, run in itertools.product(
subjects, sessions, tasks, runs
):
bids_path = BIDSPath(
subject=subject,
session=session,
run=run,
task=task,
root=root,
)
write_raw_bids(
raw,
bids_path,
events=events,
event_id=event_id,
overwrite=True,
verbose=False,
)
return root, events, event_id
def _check_counts(
counts, events, event_id, subjects, tasks=(None,), runs=(None,), sessions=(None,)
):
if (sessions[0] is None) and (runs[0] is None):
assert np.all(counts.index == subjects)
else:
assert np.all(counts.index.levels[0] == subjects)
if (sessions[0] is not None) and (runs[0] is not None):
assert np.all(counts.index.levels[1] == sessions)
assert np.all(counts.index.levels[2] == runs)
elif sessions[0] is not None:
assert np.all(counts.index.levels[1] == sessions)
elif runs[0] is not None:
assert np.all(counts.index.levels[1] == runs)
assert np.all(counts.columns.levels[0] == tasks)
assert sorted(counts.columns.levels[1]) == sorted(event_id.keys())
for k, v in event_id.items():
key = (subjects[0],)
if sessions[0] is not None:
key += (sessions[0],)
if runs[0] is not None:
key += (runs[0],)
key = key if len(key) > 1 else key[0]
assert counts.at[key, (tasks[0], k)] == (events[:, 2] == v).sum()
@pytest.mark.parametrize(
("subjects", "tasks", "runs", "sessions"),
[
(["01"], ["task1"], ["01"], ["01"]),
(["01", "02"], ["task1"], ["01"], ["01"]),
(["01", "02"], ["task1", "task2"], ["01"], ["01"]),
(["01"], ["task1", "task2"], [None], ["01"]),
(["01"], ["task1", "task2"], ["01"], [None]),
(["01"], ["task1", "task2"], [None], [None]),
],
)
@testing.requires_testing_data
def METHOD_NAME(tmp_path, subjects, tasks, runs, sessions):
"""Test the event counts."""
pytest.importorskip("pandas")
root, events, event_id = _make_dataset(tmp_path, subjects, tasks, runs, sessions)
counts = count_events(root)
_check_counts(counts, events, event_id, subjects, tasks, runs, sessions)
@testing.requires_testing_data
def test_count_events_bids_path(tmp_path):
"""Test the event counts passing a BIDSPath."""
pytest.importorskip("pandas")
root, events, event_id = _make_dataset(
tmp_path, subjects=["01", "02"], tasks=["task1"]
)
with pytest.raises(ValueError, match="datatype .*anat.* is not supported"):
bids_path = BIDSPath(root=root, subject="01", datatype="anat")
count_events(bids_path)
bids_path = BIDSPath(root=root, subject="01", datatype="meg")
counts = count_events(bids_path)
_check_counts(counts, events, event_id, subjects=["01"], tasks=["task1"])
@testing.requires_testing_data
def test_count_no_events_file(tmp_path):
"""Test count_events with no event present."""
pytest.importorskip("pandas")
raw_fname = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif"
raw = mne.io.read_raw(raw_fname)
raw.info["line_freq"] = 60.0
root = str(tmp_path)
bids_path = BIDSPath(
subject="01",
task="task1",
root=root,
)
write_raw_bids(raw, bids_path, overwrite=True, verbose=False)
with pytest.raises(ValueError, match="No events files found."):
count_events(root)
@testing.requires_testing_data
def test_count_no_events_column(tmp_path):
"""Test case where events.tsv doesn't contain [stim,trial]_type column."""
pytest.importorskip("pandas")
subject, task, run, session, datatype = "01", "task1", "01", "01", "meg"
root, events, event_id = _make_dataset(
tmp_path, [subject], [task], [run], [session]
)
# Delete the `stim_type` column.
events_tsv_fpath = BIDSPath(
root=root,
subject=subject,
task=task,
run=run,
session=session,
datatype=datatype,
suffix="events",
extension=".tsv",
).fpath
events_tsv = _from_tsv(events_tsv_fpath)
events_tsv["stim_type"] = events_tsv["trial_type"]
del events_tsv["trial_type"]
_write_tsv(fname=events_tsv_fpath, dictionary=events_tsv, overwrite=True)
counts = count_events(root)
_check_counts(counts, events, event_id, [subject], [task], [run], [session]) |
6,176 | is valid path | from __future__ import annotations
import glob
import os
from abc import ABC, abstractmethod
from pathlib import Path
from urllib.parse import urlparse
import smart_open
from airflow.hooks.base import BaseHook
from astro.constants import FileLocation
from astro.exceptions import DatabaseCustomError
from astro.options import LoadOptions
class BaseFileLocation(ABC):
"""Base Location abstract class"""
template_fields = ("path", "conn_id")
supported_conn_type: set[str] = set()
def __init__(self, path: str, conn_id: str | None = None, load_options: LoadOptions | None = None):
"""
Manages and provide interface for the operation for all the supported locations.
:param path: Path to a file in the filesystem/Object stores
:param conn_id: Airflow connection ID
"""
self.path: str = path
self.conn_id: str | None = conn_id
self.load_options: LoadOptions | None = load_options
self.validate_conn()
def validate_conn(self):
"""Check if the conn_id matches with provided path."""
if not self.conn_id:
return
connection_type = BaseHook.get_connection(self.conn_id).conn_type
if connection_type not in self.supported_conn_type:
raise ValueError(
f"Connection type {connection_type} is not supported for {self.path}. "
f"Supported types are {self.supported_conn_type}"
)
@property
def smartopen_uri(self) -> str:
"""
Changes the object URI (self.path) to a SmartOpen supported URI if necessary.
By default, does not change the self.path.
:return: URI compatible with SmartOpen for desired location.
"""
return self.path
@property
def hook(self):
raise NotImplementedError
@property
@abstractmethod
def location_type(self) -> FileLocation:
"""Property to identify location type"""
raise NotImplementedError
@property
@abstractmethod
def paths(self) -> list[str]:
"""Resolve patterns in path"""
raise NotImplementedError
@property
def transport_params(self) -> dict | None: # skipcq: PYL-R0201
"""Get credentials required by smart open to access files"""
return None
@property
@abstractmethod
def size(self) -> int:
"""Return the size in bytes of the given file"""
raise NotImplementedError
@property
@abstractmethod
def openlineage_dataset_namespace(self) -> str:
"""
Returns the open lineage dataset namespace as per
https://github.com/OpenLineage/OpenLineage/blob/main/spec/Naming.md
"""
raise NotImplementedError
@property
@abstractmethod
def openlineage_dataset_name(self) -> str:
"""
Returns the open lineage dataset name as per
https://github.com/OpenLineage/OpenLineage/blob/main/spec/Naming.md
"""
raise NotImplementedError
@staticmethod
def METHOD_NAME(path: str) -> bool:
"""
Check if the given path is either a valid URI or a local file
:param path: Either local filesystem path or remote URI
"""
try:
BaseFileLocation.get_location_type(path)
except ValueError:
return False
try:
result = urlparse(path)
if not (
(result.scheme and result.netloc and (result.port or result.port is None))
or os.path.isfile(path)
or BaseFileLocation.check_non_existing_local_file_path(path)
or glob.glob(result.path)
):
return False
return True
except ValueError:
return False
@staticmethod
def check_non_existing_local_file_path(path: str) -> bool:
"""Check if the path is valid by creating and temp file and then deleting it. Assumes the file don't exist"""
try:
Path(path).touch()
os.remove(path)
except OSError:
return False
return True
@staticmethod
def get_location_type(path: str) -> FileLocation:
"""Identify where a file is located
:param path: Path to a file in the filesystem/Object stores
"""
file_scheme = urlparse(path).scheme
if file_scheme == "":
location = FileLocation.LOCAL
else:
try:
location = FileLocation(file_scheme)
except ValueError:
raise ValueError(f"Unsupported scheme '{file_scheme}' from path '{path}'")
return location
def exists(self) -> bool:
"""Check if the file exists or not"""
try:
with smart_open.open(self.smartopen_uri, mode="r", transport_params=self.transport_params):
return True
except OSError:
return False
@property
def databricks_uri(self) -> str:
"""
Return a Databricks compatible URI. In most scenarios, it will be self.path. An exception is Microsoft WASB.
:return: self.path
"""
return self.path
def databricks_auth_settings(self) -> dict:
"""
Required settings to upload this file into databricks. Only needed for cloud storage systems
like S3
:return: A dictionary of settings keys to settings values
"""
return {}
def __repr__(self) -> str:
return f'{self.__class__.__name__}(path="{self.path}",conn_id="{self.conn_id}")'
def __str__(self) -> str:
"""String representation of location"""
return self.path
def __eq__(self, other) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.path == other.path and self.conn_id == other.conn_id
def __hash__(self) -> int:
return hash((self.path, self.conn_id))
def get_stream(self):
"""Create a file in the desired location using the smart_open.
:param df: pandas dataframe
"""
return smart_open.open(self.smartopen_uri, mode="wb", transport_params=self.transport_params)
def get_snowflake_stage_auth_sub_statement(self) -> str: # skipcq: PYL-R0201
raise DatabaseCustomError("In order to create a stage, `storage_integration` is required.")
@property
def snowflake_stage_path(self) -> str:
"""
Get the altered path if needed for stage creation in snowflake stage creation
"""
return self.path |
6,177 | create source image | from virttest import storage, data_dir
from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest
from provider.job_utils import get_event_by_condition
class BlockdevMirrorCancelReadyIOError(BlockdevMirrorNowaitTest):
"""
Cancel a ready job when target image is in error
"""
def __init__(self, test, params, env):
params['filter-node-name'] = params['filter_node_name']
super(BlockdevMirrorCancelReadyIOError, self).__init__(test, params, env)
def _blockdev_add_image(self, tag):
params = self.params.object_params(tag)
devices = self.main_vm.devices.images_define_by_params(tag,
params,
'disk')
devices.pop()
for dev in devices:
if self.main_vm.devices.get_by_qid(dev.get_qid()):
continue
ret = self.main_vm.devices.simple_hotplug(dev,
self.main_vm.monitor)
if not ret[1]:
self.test.fail("Failed to hotplug '%s': %s."
% (dev, ret[0]))
def _create_image(self, tag):
disk = self.disk_define_by_params(self.params, tag)
disk.create(self.params)
self.trash.append(disk)
def METHOD_NAME(self):
"""create source image of data image"""
self._create_image(self.params["source_images"])
def create_target_image(self):
"""create target image of mirror image"""
self._create_image(self.params["target_images"])
def add_source_image(self):
"""blockdev-add source image: protocol and format nodes only"""
self.METHOD_NAME()
self._blockdev_add_image(self.params["source_images"])
def add_target_image(self):
"""blockdev-add target image: protocol and format nodes only"""
self.create_target_image()
# Fixme if blkdebug driver is supported completely in avocado-vt
target = self.params["target_images"]
target_params = self.params.object_params(target)
target_filename = storage.get_image_filename(target_params,
data_dir.get_data_dir())
args = {'node-name': 'drive_target', 'driver': 'qcow2', 'file': {'driver': 'blkdebug',
'image': {'driver': 'file', 'filename': target_filename},
'set-state': [{'event': 'flush_to_disk', 'state': 1, 'new_state': 2}],
'inject-error': [{'event': 'flush_to_disk', 'once': True,
'immediately': True, 'state': 2}]}}
self.main_vm.monitor.cmd("blockdev-add", args)
def qemu_io_source(self):
qmp_cmd = "human-monitor-command"
filter_node = self.params['filter_node_name']
qemu_io_cmd = 'qemu-io %s "write 0 64k"' % filter_node
args = {'command-line': qemu_io_cmd}
self.main_vm.monitor.cmd(qmp_cmd, args)
def cancel_job(self):
self.main_vm.monitor.cmd("block-job-cancel", {'device': self._jobs[0]})
event = get_event_by_condition(
self.main_vm, 'BLOCK_JOB_ERROR',
self.params.get_numeric('job_cancelled_timeout', 60),
device=self._jobs[0], action="stop"
)
if event is None:
self.test.fail('Job failed to cancel')
def wait_till_job_ready(self):
event = get_event_by_condition(
self.main_vm, 'BLOCK_JOB_READY',
self.params.get_numeric('job_ready_timeout', 120),
device=self._jobs[0]
)
if event is None:
self.test.fail('Job failed to reach ready state')
def prepare_test(self):
self.prepare_main_vm()
def do_test(self):
self.add_source_image()
self.add_target_image()
self.blockdev_mirror()
self.wait_till_job_ready()
self.qemu_io_source()
self.cancel_job()
def run(test, params, env):
"""
Cancel a ready job with target in error
test steps:
1. boot VM.
2. hotplug 128M source node
3. hotplug target node with eject error event set
4. mirror from source to target
5. when mirror reach ready status, wirte data to source
node with qemu-io
6. cancel mirror job
7. check mirror job stopped with Block_job_error
:param test: test object
:param params: test configuration dict
:param env: env object
"""
mirror_test = BlockdevMirrorCancelReadyIOError(test, params, env)
mirror_test.run_test() |
6,178 | pre operations | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"automation hrwg hrw delete",
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete a hybrid runbook worker.
:example: Delete a hybrid worker
az automation hrwg hrw delete --automation-account-name accountName --resource-group groupName --hybrid-runbook-worker-group-name hybridRunbookWorkerGroupName --hybrid-runbook-worker-id hybridRunbookWorkerId
"""
_aaz_info = {
"version": "2022-08-08",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.automation/automationaccounts/{}/hybridrunbookworkergroups/{}/hybridrunbookworkers/{}", "2022-08-08"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return None
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.automation_account_name = AAZStrArg(
options=["--automation-account-name"],
help="The name of the automation account.",
required=True,
id_part="name",
)
_args_schema.hybrid_runbook_worker_group_name = AAZStrArg(
options=["--hybrid-runbook-worker-group-name"],
help="The hybrid runbook worker group name",
required=True,
id_part="child_name_1",
)
_args_schema.hybrid_runbook_worker_id = AAZStrArg(
options=["-n", "--name", "--hybrid-runbook-worker-id"],
help="The hybrid runbook worker id",
required=True,
id_part="child_name_2",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.METHOD_NAME()
self.HybridRunbookWorkersDelete(ctx=self.ctx)()
self.post_operations()
# @register_callback
def METHOD_NAME(self):
pass
# @register_callback
def post_operations(self):
pass
class HybridRunbookWorkersDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
if session.http_response.status_code in [204]:
return self.on_204(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/hybridRunbookWorkerGroups/{hybridRunbookWorkerGroupName}/hybridRunbookWorkers/{hybridRunbookWorkerId}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"automationAccountName", self.ctx.args.automation_account_name,
required=True,
),
**self.serialize_url_param(
"hybridRunbookWorkerGroupName", self.ctx.args.hybrid_runbook_worker_group_name,
required=True,
),
**self.serialize_url_param(
"hybridRunbookWorkerId", self.ctx.args.hybrid_runbook_worker_id,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-08-08",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
__all__ = ["Delete"] |
6,179 | try decode | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for EncodeBase64 and DecodeBase64."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
@test_util.run_deprecated_v1
class Base64OpsTest(test_util.TensorFlowTestCase):
def setUp(self):
self._msg = array_ops.placeholder(dtype=dtypes.string)
self._encoded_f = string_ops.encode_base64(self._msg, pad=False)
self._decoded_f = string_ops.decode_base64(self._encoded_f)
self._encoded_t = string_ops.encode_base64(self._msg, pad=True)
self._decoded_t = string_ops.decode_base64(self._encoded_t)
def _RemovePad(self, msg, base64_msg):
if len(msg) % 3 == 1:
return base64_msg[:-2]
if len(msg) % 3 == 2:
return base64_msg[:-1]
return base64_msg
def _RunTest(self, msg, pad):
with self.cached_session() as sess:
if pad:
encoded, decoded = sess.run([self._encoded_t, self._decoded_t],
feed_dict={self._msg: msg})
else:
encoded, decoded = sess.run([self._encoded_f, self._decoded_f],
feed_dict={self._msg: msg})
if not isinstance(msg, (list, tuple)):
msg = [msg]
encoded = [encoded]
decoded = [decoded]
base64_msg = [base64.urlsafe_b64encode(m) for m in msg]
if not pad:
base64_msg = [self._RemovePad(m, b) for m, b in zip(msg, base64_msg)]
for i in range(len(msg)):
self.assertEqual(base64_msg[i], encoded[i])
self.assertEqual(msg[i], decoded[i])
def testWithPythonBase64(self):
for pad in (False, True):
self._RunTest(b"", pad=pad)
for _ in range(100):
length = np.random.randint(1024 * 1024)
msg = np.random.bytes(length)
self._RunTest(msg, pad=pad)
def testShape(self):
for pad in (False, True):
for _ in range(10):
msg = [np.random.bytes(np.random.randint(20))
for _ in range(np.random.randint(10))]
self._RunTest(msg, pad=pad)
# Zero-element, non-trivial shapes.
for _ in range(10):
k = np.random.randint(10)
msg = np.empty((0, k), dtype=bytes)
encoded = string_ops.encode_base64(msg, pad=pad)
decoded = string_ops.decode_base64(encoded)
with self.cached_session() as sess:
encoded_value, decoded_value = self.evaluate([encoded, decoded])
self.assertEqual(encoded_value.shape, msg.shape)
self.assertEqual(decoded_value.shape, msg.shape)
def testInvalidInput(self):
def METHOD_NAME(enc):
self._decoded_f.eval(feed_dict={self._encoded_f: enc})
with self.cached_session():
# Invalid length.
msg = np.random.bytes(99)
enc = base64.urlsafe_b64encode(msg)
with self.assertRaisesRegexp(errors.InvalidArgumentError, "1 modulo 4"):
METHOD_NAME(enc + b"a")
# Invalid char used in encoding.
msg = np.random.bytes(34)
enc = base64.urlsafe_b64encode(msg)
for i in range(len(msg)):
with self.assertRaises(errors.InvalidArgumentError):
METHOD_NAME(enc[:i] + b"?" + enc[(i + 1):])
with self.assertRaises(errors.InvalidArgumentError):
METHOD_NAME(enc[:i] + b"\x80" + enc[(i + 1):]) # outside ascii range.
with self.assertRaises(errors.InvalidArgumentError):
METHOD_NAME(enc[:i] + b"+" + enc[(i + 1):]) # not url-safe.
with self.assertRaises(errors.InvalidArgumentError):
METHOD_NAME(enc[:i] + b"/" + enc[(i + 1):]) # not url-safe.
# Partial padding.
msg = np.random.bytes(34)
enc = base64.urlsafe_b64encode(msg)
with self.assertRaises(errors.InvalidArgumentError):
# enc contains == at the end. Partial padding is not allowed.
METHOD_NAME(enc[:-1])
# Unnecessary padding.
msg = np.random.bytes(33)
enc = base64.urlsafe_b64encode(msg)
with self.assertRaises(errors.InvalidArgumentError):
METHOD_NAME(enc + b"==")
with self.assertRaises(errors.InvalidArgumentError):
METHOD_NAME(enc + b"===")
with self.assertRaises(errors.InvalidArgumentError):
METHOD_NAME(enc + b"====")
# Padding in the middle. (Previous implementation was ok with this as long
# as padding char location was 2 or 3 (mod 4).
msg = np.random.bytes(33)
enc = base64.urlsafe_b64encode(msg)
for i in range(len(msg) - 1):
with self.assertRaises(errors.InvalidArgumentError):
METHOD_NAME(enc[:i] + b"=" + enc[(i + 1):])
for i in range(len(msg) - 2):
with self.assertRaises(errors.InvalidArgumentError):
METHOD_NAME(enc[:i] + b"==" + enc[(i + 2):])
if __name__ == "__main__":
test.main() |
6,180 | append address | """Illustrates a so-called "generic foreign key", in a similar fashion
to that of popular frameworks such as Django, ROR, etc. This
approach bypasses standard referential integrity
practices, in that the "foreign key" column is not actually
constrained to refer to any particular table; instead,
in-application logic is used to determine which table is referenced.
This approach is not in line with SQLAlchemy's usual style, as foregoing
foreign key integrity means that the tables can easily contain invalid
references and also have no ability to use in-database cascade functionality.
However, due to the popularity of these systems, as well as that it uses
the fewest number of tables (which doesn't really offer any "advantage",
though seems to be comforting to many) this recipe remains in
high demand, so in the interests of having an easy StackOverflow answer
queued up, here it is. The author recommends "table_per_related"
or "table_per_association" instead of this approach.
"""
from sqlalchemy import and_
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.ext.declarative import as_declarative
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import backref
from sqlalchemy.orm import foreign
from sqlalchemy.orm import relationship
from sqlalchemy.orm import remote
from sqlalchemy.orm import Session
@as_declarative()
class Base:
"""Base class which provides automated table name
and surrogate primary key column.
"""
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
class Address(Base):
"""The Address class.
This represents all address records in a
single table.
"""
street = Column(String)
city = Column(String)
zip = Column(String)
discriminator = Column(String)
"""Refers to the type of parent."""
parent_id = Column(Integer)
"""Refers to the primary key of the parent.
This could refer to any table.
"""
@property
def parent(self):
"""Provides in-Python access to the "parent" by choosing
the appropriate relationship.
"""
return getattr(self, "parent_%s" % self.discriminator)
def __repr__(self):
return "%s(street=%r, city=%r, zip=%r)" % (
self.__class__.__name__,
self.street,
self.city,
self.zip,
)
class HasAddresses:
"""HasAddresses mixin, creates a relationship to
the address_association table for each parent.
"""
@event.listens_for(HasAddresses, "mapper_configured", propagate=True)
def setup_listener(mapper, class_):
name = class_.__name__
discriminator = name.lower()
class_.addresses = relationship(
Address,
primaryjoin=and_(
class_.id == foreign(remote(Address.parent_id)),
Address.discriminator == discriminator,
),
backref=backref(
"parent_%s" % discriminator,
primaryjoin=remote(class_.id) == foreign(Address.parent_id),
),
)
@event.listens_for(class_.addresses, "append")
def METHOD_NAME(target, value, initiator):
value.discriminator = discriminator
class Customer(HasAddresses, Base):
name = Column(String)
class Supplier(HasAddresses, Base):
company_name = Column(String)
engine = create_engine("sqlite://", echo=True)
Base.metadata.create_all(engine)
session = Session(engine)
session.add_all(
[
Customer(
name="customer 1",
addresses=[
Address(
street="123 anywhere street", city="New York", zip="10110"
),
Address(
street="40 main street", city="San Francisco", zip="95732"
),
],
),
Supplier(
company_name="Ace Hammers",
addresses=[
Address(street="2569 west elm", city="Detroit", zip="56785")
],
),
]
)
session.commit()
for customer in session.query(Customer):
for address in customer.addresses:
print(address)
print(address.parent) |
6,181 | fake taxonomy | import base64
from datetime import datetime
from typing import Any
import falcon.testing
import pytest
from openfoodfacts.types import TaxonomyType
import robotoff.insights.importer
import robotoff.taxonomy
from robotoff.app.api import api
from robotoff.models import LogoAnnotation
from .models_utils import LogoAnnotationFactory, clean_db
@pytest.fixture()
def client():
return falcon.testing.TestClient(api)
@pytest.fixture(autouse=True)
def _set_up_and_tear_down(peewee_db):
with peewee_db:
clean_db()
# Run the test case.
yield
with peewee_db:
clean_db()
@pytest.fixture
def METHOD_NAME(monkeypatch):
def _get_taxonomy_mock(taxonomy_type: TaxonomyType, *args, **kwargs):
data: Any = None
if taxonomy_type is TaxonomyType.brand:
data = {"en:etorki": {"name": {"en": "Etorki"}}}
elif taxonomy_type is TaxonomyType.label:
data = {
"en:organic": {
"synonyms": {"fr": ["Bio"]},
"children": ["en:eu-organic"],
},
"en:eu-organic": {
"wikidata": {"en": "Q380448"},
"parents": ["en:organic"],
},
}
return robotoff.taxonomy.Taxonomy.from_dict(data) if data else None
monkeypatch.setattr(
robotoff.taxonomy,
"_get_taxonomy",
_get_taxonomy_mock,
)
_AUTH_HEADER = {"Authorization": "Basic " + base64.b64encode(b"a:b").decode("ascii")}
def test_logo_annotation_empty_payload(client):
"""A JSON payload with 'annotations' key must be provided."""
result = client.simulate_post(
"/api/v1/images/logos/annotate",
json={
"withCredentials": True,
},
headers=_AUTH_HEADER,
)
assert result.status_code == 400
assert result.json == {
"description": "'annotations' is a required property",
"title": "Request data failed validation",
}
def test_logo_annotation_invalid_logo_type(client):
"""The logo type must be valid."""
result = client.simulate_post(
"/api/v1/images/logos/annotate",
json={
"withCredentials": True,
"annotations": [
{"logo_id": 10, "value": "etorki", "type": "INVALID_TYPE"},
{"logo_id": 11, "value": "etorki", "type": "brand"},
],
},
headers=_AUTH_HEADER,
)
assert result.status_code == 400
assert result.json.get("title") == "Request data failed validation"
@pytest.mark.parametrize("logo_type", ["brand", "category", "label", "store"])
def test_logo_annotation_missing_value_when_required(logo_type, client):
"""A `value` is expected for some logo type."""
result = client.simulate_post(
"/api/v1/images/logos/annotate",
json={
"withCredentials": True,
"annotations": [{"logo_id": 10, "type": logo_type}],
},
headers=_AUTH_HEADER,
)
assert result.status_code == 400
assert result.json == {
"description": "'value' is a required property",
"title": "Request data failed validation",
}
def test_logo_annotation_incorrect_value_label_type(client, peewee_db):
"""A language-prefixed value is expected for label type."""
with peewee_db:
ann = LogoAnnotationFactory(
image_prediction__image__source_image="/images/2.jpg",
annotation_type=None,
)
result = client.simulate_post(
"/api/v1/images/logos/annotate",
json={
"withCredentials": True,
"annotations": [
{"logo_id": ann.id, "type": "label", "value": "eu-organic"}
],
},
headers=_AUTH_HEADER,
)
assert result.status_code == 400
assert result.json == {
"description": "language-prefixed value are required for label type (here: eu-organic)",
"title": "400 Bad Request",
}
def test_logo_annotation_brand(client, peewee_db, monkeypatch, mocker, METHOD_NAME):
barcode = "0000000000001"
source_image = "/000/000/000/0001/2.jpg"
with peewee_db:
ann = LogoAnnotationFactory(
barcode=barcode,
source_image=source_image,
annotation_type=None,
)
mocker.patch("robotoff.app.api.enqueue_job", return_value=None)
start = datetime.utcnow()
result = client.simulate_post(
"/api/v1/images/logos/annotate",
json={
"withCredentials": True,
"annotations": [{"logo_id": ann.id, "value": "etorki", "type": "brand"}],
},
headers=_AUTH_HEADER,
)
end = datetime.utcnow()
assert result.status_code == 200
assert result.json == {"annotated": 1}
with peewee_db:
ann = LogoAnnotation.get(LogoAnnotation.id == ann.id)
assert ann.annotation_type == "brand"
assert ann.annotation_value == "etorki"
assert ann.annotation_value_tag == "etorki"
assert ann.taxonomy_value == "Etorki"
assert ann.username == "a"
assert start <= ann.completed_at <= end
def test_logo_annotation_label(client, peewee_db, monkeypatch, METHOD_NAME, mocker):
"""This test will check that, given an image with a logo above the
confidence threshold, that is then fed into the ANN logos and labels model,
we annotate properly a product.
"""
barcode = "0000000000001"
source_image = "/000/000/000/0001/2.jpg"
with peewee_db:
ann = LogoAnnotationFactory(
barcode=barcode, source_image=source_image, annotation_type=None
)
mocker.patch("robotoff.app.api.enqueue_job", return_value=None)
start = datetime.utcnow()
result = client.simulate_post(
"/api/v1/images/logos/annotate",
json={
"withCredentials": True,
"annotations": [
{"logo_id": ann.id, "value": "en:eu-organic", "type": "label"}
],
},
headers=_AUTH_HEADER,
)
end = datetime.utcnow()
assert result.status_code == 200
assert result.json == {"annotated": 1}
with peewee_db:
ann = LogoAnnotation.get(LogoAnnotation.id == ann.id)
assert ann.annotation_type == "label"
assert ann.annotation_value == "en:eu-organic"
assert ann.annotation_value_tag == "en:eu-organic"
assert ann.taxonomy_value == "en:eu-organic"
assert ann.username == "a"
assert start <= ann.completed_at <= end |
6,182 | quote | #! /usr/bin/env python3
"""Conversions to/from quoted-printable transport encoding as per RFC 1521."""
# (Dec 1991 version).
__all__ = ["encode", "decode", "encodestring", "decodestring"]
ESCAPE = b'='
MAXLINESIZE = 76
HEX = b'0123456789ABCDEF'
EMPTYSTRING = b''
try:
from binascii import a2b_qp, b2a_qp
except ImportError:
a2b_qp = None
b2a_qp = None
def needsquoting(c, quotetabs, header):
"""Decide whether a particular byte ordinal needs to be quoted.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
"""
assert isinstance(c, bytes)
if c in b' \t':
return quotetabs
# if header, we have to escape _ because _ is used to escape space
if c == b'_':
return header
return c == ESCAPE or not (b' ' <= c <= b'~')
def METHOD_NAME(c):
"""Quote a single character."""
assert isinstance(c, bytes) and len(c)==1
c = ord(c)
return ESCAPE + bytes((HEX[c//16], HEX[c%16]))
def encode(input, output, quotetabs, header=False):
"""Read 'input', apply quoted-printable encoding, and write to 'output'.
'input' and 'output' are binary file objects. The 'quotetabs' flag
indicates whether embedded tabs and spaces should be quoted. Note that
line-ending tabs and spaces are always encoded, as per RFC 1521.
The 'header' flag indicates whether we are encoding spaces as _ as per RFC
1522."""
if b2a_qp is not None:
data = input.read()
odata = b2a_qp(data, quotetabs=quotetabs, header=header)
output.write(odata)
return
def write(s, output=output, lineEnd=b'\n'):
# RFC 1521 requires that the line ending in a space or tab must have
# that trailing character encoded.
if s and s[-1:] in b' \t':
output.write(s[:-1] + METHOD_NAME(s[-1:]) + lineEnd)
elif s == b'.':
output.write(METHOD_NAME(s) + lineEnd)
else:
output.write(s + lineEnd)
prevline = None
while 1:
line = input.readline()
if not line:
break
outline = []
# Strip off any readline induced trailing newline
stripped = b''
if line[-1:] == b'\n':
line = line[:-1]
stripped = b'\n'
# Calculate the un-length-limited encoded line
for c in line:
c = bytes((c,))
if needsquoting(c, quotetabs, header):
c = METHOD_NAME(c)
if header and c == b' ':
outline.append(b'_')
else:
outline.append(c)
# First, write out the previous line
if prevline is not None:
write(prevline)
# Now see if we need any soft line breaks because of RFC-imposed
# length limitations. Then do the thisline->prevline dance.
thisline = EMPTYSTRING.join(outline)
while len(thisline) > MAXLINESIZE:
# Don't forget to include the soft line break `=' sign in the
# length calculation!
write(thisline[:MAXLINESIZE-1], lineEnd=b'=\n')
thisline = thisline[MAXLINESIZE-1:]
# Write out the current line
prevline = thisline
# Write out the last line, without a trailing newline
if prevline is not None:
write(prevline, lineEnd=stripped)
def encodestring(s, quotetabs=False, header=False):
if b2a_qp is not None:
return b2a_qp(s, quotetabs=quotetabs, header=header)
from io import BytesIO
infp = BytesIO(s)
outfp = BytesIO()
encode(infp, outfp, quotetabs, header)
return outfp.getvalue()
def decode(input, output, header=False):
"""Read 'input', apply quoted-printable decoding, and write to 'output'.
'input' and 'output' are binary file objects.
If 'header' is true, decode underscore as space (per RFC 1522)."""
if a2b_qp is not None:
data = input.read()
odata = a2b_qp(data, header=header)
output.write(odata)
return
new = b''
while 1:
line = input.readline()
if not line: break
i, n = 0, len(line)
if n > 0 and line[n-1:n] == b'\n':
partial = 0; n = n-1
# Strip trailing whitespace
while n > 0 and line[n-1:n] in b" \t\r":
n = n-1
else:
partial = 1
while i < n:
c = line[i:i+1]
if c == b'_' and header:
new = new + b' '; i = i+1
elif c != ESCAPE:
new = new + c; i = i+1
elif i+1 == n and not partial:
partial = 1; break
elif i+1 < n and line[i+1:i+2] == ESCAPE:
new = new + ESCAPE; i = i+2
elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]):
new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3
else: # Bad escape sequence -- leave it in
new = new + c; i = i+1
if not partial:
output.write(new + b'\n')
new = b''
if new:
output.write(new)
def decodestring(s, header=False):
if a2b_qp is not None:
return a2b_qp(s, header=header)
from io import BytesIO
infp = BytesIO(s)
outfp = BytesIO()
decode(infp, outfp, header=header)
return outfp.getvalue()
# Other helper functions
def ishex(c):
"""Return true if the byte ordinal 'c' is a hexadecimal digit in ASCII."""
assert isinstance(c, bytes)
return b'0' <= c <= b'9' or b'a' <= c <= b'f' or b'A' <= c <= b'F'
def unhex(s):
"""Get the integer value of a hexadecimal number."""
bits = 0
for c in s:
c = bytes((c,))
if b'0' <= c <= b'9':
i = ord('0')
elif b'a' <= c <= b'f':
i = ord('a')-10
elif b'A' <= c <= b'F':
i = ord(b'A')-10
else:
assert False, "non-hex digit "+repr(c)
bits = bits*16 + (ord(c) - i)
return bits
def main():
import sys
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'td')
except getopt.error as msg:
sys.stdout = sys.stderr
print(msg)
print("usage: quopri [-t | -d] [file] ...")
print("-t: quote tabs")
print("-d: decode; default encode")
sys.exit(2)
deco = 0
tabs = 0
for o, a in opts:
if o == '-t': tabs = 1
if o == '-d': deco = 1
if tabs and deco:
sys.stdout = sys.stderr
print("-t and -d are mutually exclusive")
sys.exit(2)
if not args: args = ['-']
sts = 0
for file in args:
if file == '-':
fp = sys.stdin.buffer
else:
try:
fp = open(file, "rb")
except OSError as msg:
sys.stderr.write("%s: can't open (%s)\n" % (file, msg))
sts = 1
continue
try:
if deco:
decode(fp, sys.stdout.buffer)
else:
encode(fp, sys.stdout.buffer, tabs)
finally:
if file != '-':
fp.close()
if sts:
sys.exit(sts)
if __name__ == '__main__':
main() |
6,183 | sync recipe | """
helper functions for managing recipes
"""
import sys
from collections import OrderedDict
import solvebio as sb
import ruamel.yaml as yaml
import click
def create_recipe(description):
fields = [description['fields']]
recipe_version = description['version']
recipe_name = description['name']
recipe_description = description['description']
is_public = description['is_public']
sb.DatasetTemplate.create(
name="{} (v{})".format(recipe_name, recipe_version),
description="{}".format(recipe_description) if recipe_description else None,
template_type="recipe",
tags=["recipe"],
is_public=is_public,
version=recipe_version,
annotator_params={
"annotator": "parallel"
},
fields=fields
)
def delete_recipe(recipe_name):
existing_recipe = sb.DatasetTemplate.all(name=recipe_name)
if not existing_recipe:
click.echo("{} doesn't exist!".format(recipe_name))
return
for recipe in existing_recipe:
recipe.delete(force=True)
def METHOD_NAME(recipe):
delete_recipe("{} (v{})".format(recipe['name'], recipe['version']))
create_recipe(recipe)
def get_recipe_by_name_from_yml(all_recipes, name):
for recipe in all_recipes:
if recipe["name"] in name and recipe["version"] in name:
return recipe
click.echo("{} doesn't exist in the provided YAML file!".format(name))
return None
def load_recipes_from_yaml(yml_file):
with open(yml_file, 'r') as yml:
all_recipes = yaml.load(yml)
return all_recipes['recipes']
def get_public_recipes():
public_recipes = []
all_templates = sb.DatasetTemplate.all()
if all_templates:
for template in all_templates:
if template['template_type'] == "recipe" and template['is_public']:
public_recipes.append(template)
return public_recipes
def get_account_recipes(user):
account_recipes = []
all_templates = sb.DatasetTemplate.all()
if all_templates:
for template in all_templates:
if template['template_type'] == "recipe" \
and template['account'] == user["account"]["id"]:
account_recipes.append(template)
return account_recipes
def export_recipes_to_yaml(recipes, yml_file):
with open(yml_file, 'w') as outfile:
class RecipeDumper(yaml.Dumper):
pass
class literal(unicode):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
def _literal_representer(dumper, data):
return dumper.represent_scalar(
u'tag:yaml.org,2002:str', data, style='|')
RecipeDumper.add_representer(dict, _dict_representer)
RecipeDumper.add_representer(literal, _literal_representer)
# Needed for python2,
# otherwise: 'item': !!python/unicode "some string" is dumped
if sys.version_info < (3,0):
def represent_unicode(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data)
RecipeDumper.add_representer(unicode, represent_unicode)
yaml_recipes = []
for r in recipes:
recipe_expression = literal(unicode(dict(r)['fields'][0]['expression']))
dict(r)['fields'][0]['expression'] = recipe_expression
recipe_details = {
"name": dict(r)['name'],
"description": dict(r)['description'],
"template_type": "recipe",
"is_public": "True" if dict(r)['is_public'] else "False",
"version": dict(r)['version'],
"annotator_params": {
"annotator": "parallel"
},
"fields": {i: dict(r)['fields'][0][i]
for i in dict(r)['fields'][0]
if i not in ['state', 'dictitems'] and dict(r)['fields'][0][i]}
}
yaml_recipes.append(recipe_details)
yaml.dump({"recipes": yaml_recipes}, outfile, RecipeDumper, encoding='utf-8',
default_flow_style=False)
print("Wrote recipe to file: {}".format(yml_file)) |
6,184 | fit predict | # -*- coding: utf-8 -*-
"""Base class for clustering."""
__author__ = ["chrisholder", "TonyBagnall"]
__all__ = ["BaseClusterer"]
import time
from abc import ABC, abstractmethod
import numpy as np
from aeon.base import BaseCollectionEstimator
from aeon.utils.validation._dependencies import _check_estimator_deps
class BaseClusterer(BaseCollectionEstimator, ABC):
"""Abstract base class for time series clusterers.
Parameters
----------
n_clusters : int, default=None
Number of clusters for model.
"""
def __init__(self, n_clusters: int = None):
self.fit_time_ = 0
self.n_clusters = n_clusters
super(BaseClusterer, self).__init__()
_check_estimator_deps(self)
def fit(self, X, y=None) -> BaseCollectionEstimator:
"""Fit time series clusterer to training data.
Parameters
----------
X : 3D np.array (any number of channels, equal length series)
of shape (n_instances, n_channels, n_timepoints)
or 2D np.array (univariate, equal length series)
of shape (n_instances, n_timepoints)
or list of numpy arrays (any number of channels, unequal length series)
of shape [n_instances], 2D np.array (n_channels, n_timepoints_i), where
n_timepoints_i is length of series i
other types are allowed and converted into one of the above.
y: ignored, exists for API consistency reasons.
Returns
-------
self:
Fitted estimator.
"""
self.reset()
_start_time = int(round(time.time() * 1000))
X = self._preprocess_collection(X)
self._fit(X)
self.fit_time_ = int(round(time.time() * 1000)) - _start_time
self._is_fitted = True
return self
def predict(self, X, y=None) -> np.ndarray:
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : np.ndarray (2d or 3d array of shape (n_instances, series_length) or shape
(n_instances, n_channels, series_length)).
Time series instances to predict their cluster indexes.
y: ignored, exists for API consistency reasons.
Returns
-------
np.ndarray (1d array of shape (n_instances,))
Index of the cluster each time series in X belongs to.
"""
self.check_is_fitted()
X = self._preprocess_collection(X)
return self._predict(X)
def METHOD_NAME(self, X, y=None) -> np.ndarray:
"""Compute cluster centers and predict cluster index for each time series.
Convenience method; equivalent of calling fit(X) followed by predict(X)
Parameters
----------
X : np.ndarray (2d or 3d array of shape (n_instances, series_length) or shape
(n_instances, n_channels, series_length)).
Time series instances to train clusterer and then have indexes each belong
to return.
y: ignored, exists for API consistency reasons.
Returns
-------
np.ndarray (1d array of shape (n_instances,))
Index of the cluster each time series in X belongs to.
"""
self.fit(X)
return self.predict(X)
def predict_proba(self, X) -> np.ndarray:
"""Predicts labels probabilities for sequences in X.
Default behaviour is to call _predict and set the predicted class probability
to 1, other class probabilities to 0. Override if better estimates are
obtainable.
Parameters
----------
X : guaranteed to be of a type in self.get_tag("X_inner_mtype")
if self.get_tag("X_inner_mtype") = "numpy3D":
3D np.ndarray of shape = [n_instances, n_channels, series_length]
for list of other mtypes, see datatypes.SCITYPE_REGISTER
Returns
-------
y : 2D array of shape [n_instances, n_classes] - predicted class probabilities
1st dimension indices correspond to instance indices in X
2nd dimension indices correspond to possible labels (integers)
(i, j)-th entry is predictive probability that i-th instance is of class j
"""
self.check_is_fitted()
X = self._preprocess_collection(X)
return self._predict_proba(X)
def score(self, X, y=None) -> float:
"""Score the quality of the clusterer.
Parameters
----------
X : np.ndarray (2d or 3d array of shape (n_instances, series_length) or shape
(n_instances, n_channels, series_length)).
Time series instances to train clusterer and then have indexes each belong
to return.
y: ignored, exists for API consistency reasons.
Returns
-------
score : float
Score of the clusterer.
"""
self.check_is_fitted()
X = self._preprocess_collection(X)
return self._score(X, y)
def _predict_proba(self, X) -> np.ndarray:
"""Predicts labels probabilities for sequences in X.
Default behaviour is to call _predict and set the predicted class probability
to 1, other class probabilities to 0. Override if better estimates are
obtainable.
Parameters
----------
X : guaranteed to be of a type in self.get_tag("X_inner_mtype")
if self.get_tag("X_inner_mtype") = "numpy3D":
3D np.ndarray of shape = [n_instances, n_channels, series_length]
for list of other mtypes, see datatypes.SCITYPE_REGISTER
Returns
-------
y : 2D array of shape [n_instances, n_classes] - predicted class probabilities
1st dimension indices correspond to instance indices in X
2nd dimension indices correspond to possible labels (integers)
(i, j)-th entry is predictive probability that i-th instance is of class j
"""
preds = self._predict(X)
n_instances = len(preds)
n_clusters = self.n_clusters
if n_clusters is None:
n_clusters = max(preds) + 1
dists = np.zeros((X.shape[0], n_clusters))
for i in range(n_instances):
dists[i, preds[i]] = 1
return dists
@abstractmethod
def _score(self, X, y=None):
...
@abstractmethod
def _predict(self, X, y=None) -> np.ndarray:
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : np.ndarray (2d or 3d array of shape (n_instances, series_length) or shape
(n_instances,n_channels,series_length)).
Time series instances to predict their cluster indexes.
y: ignored, exists for API consistency reasons.
Returns
-------
np.ndarray (1d array of shape (n_instances,))
Index of the cluster each time series in X belongs to.
"""
...
@abstractmethod
def _fit(self, X, y=None):
"""Fit time series clusterer to training data.
Parameters
----------
X : np.ndarray (2d or 3d array of shape (n_instances, series_length) or shape
(n_instances,n_channels,series_length)).
Training time series instances to cluster.
Returns
-------
self:
Fitted estimator.
"""
... |
6,185 | test exception in job not canceled | # -*- coding: utf-8 -*-
"""
Testing of Scheduler.
"""
__author__ = 'Marcin Usielski'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = 'marcin.usielski@nokia.com'
from moler.scheduler import Scheduler
from moler.exceptions import WrongUsage
from moler.util.moler_test import MolerTest
import time
import pytest
import sys
try:
import asyncio
except ImportError: # pragma: nocover
try:
import trollius as asyncio
except ImportError:
raise ImportError(
'Support for asyncio requires either Python 3.4 or the asyncio package installed or trollius installed for python 2.7')
def test_job():
values = {'number': 0}
job = Scheduler.get_job(callback=callback, interval=0.1, callback_params={'param_dict': values})
job.start()
MolerTest.sleep(seconds=0.22)
job.cancel()
assert(2 == values['number'])
def test_exception_in_job_canceled():
values = {'number': 0}
job = Scheduler.get_job(callback=callback_exception, interval=0.1, callback_params={'param_dict': values}, cancel_on_exception=True)
job.start()
MolerTest.sleep(seconds=0.32)
job.cancel()
assert(2 == values['number'])
def METHOD_NAME():
values = {'number': 0}
job = Scheduler.get_job(callback=callback_exception, interval=0.1, callback_params={'param_dict': values}, cancel_on_exception=False)
job.start()
MolerTest.sleep(seconds=0.32)
job.cancel()
assert(3 == values['number'])
def test_long_job():
values = {'number': 0}
job = Scheduler.get_job(callback=callback_long, interval=0.1, callback_params={'param_dict': values}, cancel_on_exception=True)
job.start()
MolerTest.sleep(seconds=0.45)
job.cancel()
assert(2 == values['number'])
def test_wrong_usage():
with pytest.raises(WrongUsage):
Scheduler.change_kind('wrong_kind')
def test_job_callback_as_method():
values = {'number': 0}
obj = CallbackTest()
job = Scheduler.get_job(callback=obj.callback_method, interval=0.1, callback_params={'param_dict': values})
job.start()
MolerTest.sleep(seconds=0.22)
job.cancel()
assert(2 == values['number'])
assert(6 == obj.counter)
def test_2_jobs_concurrently():
values_1 = {'number': 0}
values_2 = {'number': 0}
job1 = Scheduler.get_job(callback=callback, interval=0.05, callback_params={'param_dict': values_1})
job2 = Scheduler.get_job(callback=callback, interval=0.10, callback_params={'param_dict': values_2})
job1.cancel()
job1.start()
job1.start()
job2.start()
MolerTest.sleep(seconds=0.23)
job1.cancel()
job1.cancel()
job2.cancel()
assert (2 == values_2['number'])
assert (4 == values_1['number'])
def test_thread_test_job():
Scheduler.change_kind("thread")
values = {'number': 0}
job = Scheduler.get_job(callback=callback, interval=0.1, callback_params={'param_dict': values})
job.start()
time.sleep(0.38)
job.cancel()
Scheduler.change_kind() # Set the default
assert (3 == values['number'])
@pytest.mark.skipif(sys.version_info < (3, 4), reason="requires python3.4 or higher")
def test_asyncio_test_job():
loop = asyncio.get_event_loop()
Scheduler.change_kind("asyncio")
values = {'number': 0}
job = Scheduler.get_job(callback=callback, interval=0.1, callback_params={'param_dict': values})
job.start()
loop.run_until_complete(asyncio.sleep(0.23))
job.cancel()
loop.stop()
Scheduler.change_kind() # Set the default
assert (2 == values['number'])
def test_cannot_create_more_objects():
with pytest.raises(WrongUsage):
Scheduler()
Scheduler()
def callback(param_dict):
param_dict['number'] += 1
def callback_long(param_dict):
param_dict['number'] += 1
MolerTest.sleep(seconds=0.12)
def callback_exception(param_dict):
param_dict['number'] += 1
if param_dict['number'] == 2:
return 2/0
class CallbackTest(object):
def __init__(self):
super(CallbackTest, self).__init__()
self.counter = 0
def callback_method(self, param_dict):
param_dict['number'] += 1
self.counter += 3 |
6,186 | remove labels | # Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An interface for quantum programs.
The quantum program represents a circuit (or other execution) that,
when combined with a run context, will become a quantum job.
"""
import abc
import datetime
from typing import Dict, List, Optional, Sequence, Set, TYPE_CHECKING, Union
import cirq
from cirq_google.cloud import quantum
if TYPE_CHECKING:
import cirq_google.engine.abstract_job as abstract_job
import cirq_google.engine.abstract_engine as abstract_engine
class AbstractProgram(abc.ABC):
"""An abstract object representing a quantum program.
This program generally wraps a `Circuit` with additional metadata.
When combined with an appropriate RunContext, this becomes a
Job that can run on either an Engine service or simulator.
Programs can also be a batch (list of circuits) or calibration
requests.
This is an abstract class that inheritors should implement.
"""
@abc.abstractmethod
def engine(self) -> 'abstract_engine.AbstractEngine':
"""Returns the parent Engine object.
Returns:
The program's parent Engine.
"""
@abc.abstractmethod
def get_job(self, job_id: str) -> 'abstract_job.AbstractJob':
"""Returns an AbstractJob for an existing id.
Args:
job_id: Unique ID of the job within the parent program.
Returns:
A AbstractJob for this program.
"""
@abc.abstractmethod
def list_jobs(
self,
created_before: Optional[Union[datetime.datetime, datetime.date]] = None,
created_after: Optional[Union[datetime.datetime, datetime.date]] = None,
has_labels: Optional[Dict[str, str]] = None,
execution_states: Optional[Set[quantum.ExecutionStatus.State]] = None,
) -> Sequence['abstract_job.AbstractJob']:
"""Returns the list of jobs for this program.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
created_after: retrieve jobs that were created after this date
or time.
created_before: retrieve jobs that were created after this date
or time.
has_labels: retrieve jobs that have labels on them specified by
this dict. If the value is set to `*`, jobs having the label
regardless of the label value will be returned. For example, to
query programs that have the shape label and have the color
label with value red can be queried using
{'color': 'red', 'shape':'*'}
execution_states: retrieve jobs that have an execution state that
is contained in `execution_states`. See
`quantum.ExecutionStatus.State` enum for accepted values.
Returns:
A sequence of `AbstractJob` objects that satisfy the constraints.
"""
@abc.abstractmethod
def create_time(self) -> 'datetime.datetime':
"""Returns when the program was created."""
@abc.abstractmethod
def update_time(self) -> 'datetime.datetime':
"""Returns when the program was last updated."""
@abc.abstractmethod
def description(self) -> str:
"""Returns the description of the program."""
@abc.abstractmethod
def set_description(self, description: str) -> 'AbstractProgram':
"""Sets the description of the program.
Params:
description: The new description for the program.
Returns:
This AbstractProgram.
"""
@abc.abstractmethod
def labels(self) -> Dict[str, str]:
"""Returns the labels of the program."""
@abc.abstractmethod
def set_labels(self, labels: Dict[str, str]) -> 'AbstractProgram':
"""Sets (overwriting) the labels for a previously created quantum program.
Params:
labels: The entire set of new program labels.
Returns:
This AbstractProgram.
"""
@abc.abstractmethod
def add_labels(self, labels: Dict[str, str]) -> 'AbstractProgram':
"""Adds new labels to a previously created quantum program.
Params:
labels: New labels to add to the existing program labels.
Returns:
This AbstractProgram.
"""
@abc.abstractmethod
def METHOD_NAME(self, keys: List[str]) -> 'AbstractProgram':
"""Removes labels with given keys from the labels of a previously
created quantum program.
Params:
label_keys: Label keys to remove from the existing program labels.
Returns:
This AbstractProgram.
"""
@abc.abstractmethod
def get_circuit(self, program_num: Optional[int] = None) -> cirq.Circuit:
"""Returns the cirq Circuit for the program. This is only
supported if the program was created with the V2 protos.
Args:
program_num: if this is a batch program, the index of the circuit in
the batch. This argument is zero-indexed. Negative values
indexing from the end of the list.
Returns:
The program's cirq Circuit.
"""
@abc.abstractmethod
def batch_size(self) -> int:
"""Returns the number of programs in a batch program.
Raises:
ValueError: if the program created was not a batch program.
"""
@abc.abstractmethod
def delete(self, delete_jobs: bool = False) -> None:
"""Deletes a previously created quantum program.
Params:
delete_jobs: If True will delete all the program's jobs, other this
will fail if the program contains any jobs.
"""
@abc.abstractmethod
def delete_job(self, job_id: str) -> None:
"""Removes a child job from this program.""" |
6,187 | radix sort |
# Radix Sort
# 1. Identify the maximum number: Find the maximum number in the given list. This is necessary to determine the number of digits we need to consider
# during the sorting process.
# 2. Perform counting sort for each digit position: Starting from the least significant digit (rightmost digit), perform the following steps for each
# digit position, moving towards the most significant digit (leftmost digit):
# a. Create a count array: Create a count array of size 10 (to represent digits 0-9) and initialize all elements to 0. This count array will be used
# to store the frequency of each digit at the current position.
# b. Count the frequencies: Iterate through the list of numbers and count the frequency of each digit at the current position. For example, if the
# current digit position is the units place, count the frequency of each digit from 0 to 9.
# c. Update the count array: Modify the count array such that each element represents the cumulative count of digits up to that index. This step
# ensures that the count array contains the correct positions for each digit in the sorted order.
# d. Distribute the numbers: Iterate through the list of numbers again, and for each number, find its digit at the current position. Use the count
# array to determine the correct position of the number in the output array and place it there. After placing the number, decrement the count for
# that digit in the count array.
# e. Collect the numbers: After distributing all the numbers, collect them back into the original array. The array will now be partially sorted
# based on the current digit position.
# 3. Repeat the counting sort for the next digit position: After collecting the numbers based on the least significant digit, move to the next digit
# position (towards the left) and repeat steps 2a to 2e for that digit. Continue this process until all the digits have been processed, from the least
# significant digit to the most significant digit.
# 4. Final sorted list: After completing the counting sort process for all digit positions, you will have a fully sorted list of numbers.
# Here's an example to illustrate the process:
# Sample Input: [170, 45, 75, 90, 802, 24, 2, 66]
# Maximum number: 802
# 1. First iteration (Least significant digit - rightmost digit):
# Create the count array: [0, 2, 1, 1, 0, 1, 0, 0, 0, 1]
# Update the count array: [0, 2, 3, 4, 4, 5, 5, 5, 5, 6]
# Distribute the numbers: [802, 2, 24, 45, 75, 170, 90, 66]
# Collect the numbers: [802, 2, 24, 45, 75, 170, 90, 66]
# 2. Second iteration (Next least significant digit):
# Create the count array: [1, 2, 1, 1, 1, 1, 0, 0, 0, 1]
# Update the count array: [1, 3, 4, 5, 6, 7, 7, 7, 7, 8]
# Distribute the numbers: [802, 2, 24, 45, 66, 75, 90, 170]
# Collect the numbers: [802, 2, 24, 45, 66, 75, 90, 170]
# 3. Third iteration (Most significant digit):
# Create the count array: [1, 1, 1, 1, 2, 2, 1, 0, 0, 0]
# Update the count array: [1, 2, 3, 4, 6, 8, 9, 9, 9, 9]
# Distribute the numbers: [2, 24, 45, 66, 75, 90, 170, 802]
# Collect the numbers: [2, 24, 45, 66, 75, 90, 170, 802]
# The final sorted list is [2, 24, 45, 66, 75, 90, 170, 802].
# Radix sort using counting sort works by sorting the numbers digit by digit, from the least significant digit to the most significant digit.
# The counting sort process distributes and collects the numbers based on each digit position, ensuring that the numbers are correctly ordered at
# each iteration. By repeating this process for each digit, the algorithm achieves a fully sorted list without the need for explicit element comparisons.
# Method to do Radix Sort
def countingSort(arr, exp1):
n = len(arr)
# The output array elements that will have sorted arr
output = [0] * (n)
# initialize count array as 0
count = [0] * (10)
# Store count of occurrences in count[]
for i in range(0, n):
index = arr[i] // exp1
count[index % 10] += 1
# Change count[i] so that count[i] now contains actual
# position of this digit in output array
for i in range(1, 10):
count[i] += count[i - 1]
# Build the output array
i = n - 1
while i >= 0:
index = arr[i] // exp1
output[count[index % 10] - 1] = arr[i]
count[index % 10] -= 1
i -= 1
# Copying the output array to arr[],
# so that arr now contains sorted numbers
i = 0
for i in range(0, len(arr)):
arr[i] = output[i]
# Method to do Radix Sort
def METHOD_NAME(arr):
# Find the maximum number to know number of digits
max1 = max(arr)
# Do counting sort for every digit. Note that instead
# of passing digit number, exp is passed. exp is 10^i
# where i is current digit number
exp = 1
while max1 / exp >= 1:
countingSort(arr, exp)
exp *= 10
# Driver code
arr = [170, 45, 75, 90, 802, 24, 2, 66]
# Function Call
METHOD_NAME(arr)
for i in range(len(arr)):
print(arr[i],end=" ")
# Time Compexity -> O((n+k)*d)
# Space Complexity -> O(n+k |
6,188 | transform audio | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contain the speech perturbation augmentation model."""
import numpy as np
from paddlespeech.s2t.frontend.augmentor.base import AugmentorBase
class SpeedPerturbAugmentor(AugmentorBase):
"""Augmentation model for adding speed perturbation."""
def __init__(self, rng, min_speed_rate=0.9, max_speed_rate=1.1,
num_rates=3):
"""speed perturbation.
The speed perturbation in kaldi uses sox-speed instead of sox-tempo,
and sox-speed just to resample the input,
i.e pitch and tempo are changed both.
"Why use speed option instead of tempo -s in SoX for speed perturbation"
https://groups.google.com/forum/#!topic/kaldi-help/8OOG7eE4sZ8
Sox speed:
https://pysox.readthedocs.io/en/latest/api.html#sox.transform.Transformer
See reference paper here:
http://www.danielpovey.com/files/2015_interspeech_augmentation.pdf
Espnet:
https://espnet.github.io/espnet/_modules/espnet/transform/perturb.html
Nemo:
https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/asr/parts/perturb.py#L92
Args:
rng (random.Random): Random generator object.
min_speed_rate (float): Lower bound of new speed rate to sample and should
not be smaller than 0.9.
max_speed_rate (float): Upper bound of new speed rate to sample and should
not be larger than 1.1.
num_rates (int, optional): Number of discrete rates to allow.
Can be a positive or negative integer. Defaults to 3.
If a positive integer greater than 0 is provided, the range of
speed rates will be discretized into `num_rates` values.
If a negative integer or 0 is provided, the full range of speed rates
will be sampled uniformly.
Note: If a positive integer is provided and the resultant discretized
range of rates contains the value '1.0', then those samples with rate=1.0,
will not be augmented at all and simply skipped. This is to unnecessary
augmentation and increase computation time. Effective augmentation chance
in such a case is = `prob * (num_rates - 1 / num_rates) * 100`% chance
where `prob` is the global probability of a sample being augmented.
Raises:
ValueError: when speed_rate error
"""
if min_speed_rate < 0.9:
raise ValueError(
"Sampling speed below 0.9 can cause unnatural effects")
if max_speed_rate > 1.1:
raise ValueError(
"Sampling speed above 1.1 can cause unnatural effects")
self._min_rate = min_speed_rate
self._max_rate = max_speed_rate
self._rng = rng
self._num_rates = num_rates
if num_rates > 0:
self._rates = np.linspace(
self._min_rate, self._max_rate, self._num_rates, endpoint=True)
def __call__(self, x, uttid=None, train=True):
if not train:
return x
self.METHOD_NAME(x)
return x
def METHOD_NAME(self, audio_segment):
"""Sample a new speed rate from the given range and
changes the speed of the given audio clip.
Note that this is an in-place transformation.
:param audio_segment: Audio segment to add effects to.
:type audio_segment: AudioSegment|SpeechSegment
"""
if self._num_rates < 0:
speed_rate = self._rng.uniform(self._min_rate, self._max_rate)
else:
speed_rate = self._rng.choice(self._rates)
# Skip perturbation in case of identity speed rate
if speed_rate == 1.0:
return
audio_segment.change_speed(speed_rate) |
6,189 | build list request | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def METHOD_NAME(
subscription_id: str,
location: str,
extension_type_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.KubernetesConfiguration/locations/{location}/extensionTypes/{extensionTypeName}/versions')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"location": _SERIALIZER.url("location", location, 'str'),
"extensionTypeName": _SERIALIZER.url("extension_type_name", extension_type_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ExtensionTypeVersionsOperations(object):
"""ExtensionTypeVersionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.kubernetesconfiguration.v2021_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
location: str,
extension_type_name: str,
**kwargs: Any
) -> Iterable["_models.ExtensionVersionList"]:
"""List available versions for an Extension Type.
:param location: extension location.
:type location: str
:param extension_type_name: Extension type name.
:type extension_type_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExtensionVersionList or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.kubernetesconfiguration.v2021_11_01_preview.models.ExtensionVersionList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExtensionVersionList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = METHOD_NAME(
subscription_id=self._config.subscription_id,
location=location,
extension_type_name=extension_type_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = METHOD_NAME(
subscription_id=self._config.subscription_id,
location=location,
extension_type_name=extension_type_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ExtensionVersionList", pipeline_response)
list_of_elem = deserialized.versions
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.KubernetesConfiguration/locations/{location}/extensionTypes/{extensionTypeName}/versions'} # type: ignore |
6,190 | translate on populate x | # -*- coding: utf-8 -*-
"""
Copyright (C) 2014-2020 OSMC (KodeKarnage)
This file is part of script.module.osmcsetting.updates
SPDX-License-Identifier: GPL-2.0-or-later
See LICENSES/GPL-2.0-or-later for more information.
"""
import os
import subprocess
from osmccommon import osmc_setting
from osmccommon.osmc_logging import StandardLogger
addon_id = "script.module.osmcsetting.updates"
log = StandardLogger(addon_id, os.path.basename(__file__)).log
class OSMCSettingClass(osmc_setting.OSMCSettingClass):
def __init__(self):
super(OSMCSettingClass, self).__init__()
self.addon_id = addon_id
self.short_name = 'Updates'
self.short_name_i18n = 32060
self.description = 'Manage updates'
self.description_i18n = 32061
self.reset_file = '/home/osmc/.factoryreset'
self.setting_data_method = {}
# 'mercury': {
# 'setting_value' : '',
# 'apply' : self.method_to_apply_changes_X,
# 'translate' : self.translate_on_populate_X,
# },
# 'venus': {'setting_value' : ''},
# 'earth': {'setting_value' : ''},
# 'mars': {'setting_value' : ''},
# 'jupiter': {'setting_value' : ''},
# 'saturn': {'setting_value' : ''},
# 'uranus': {'setting_value' : ''},
# 'neptune': {'setting_value' : ''},
# 'pluto': {'setting_value' : ''},
# }
self.populate_setting_data_method()
self.reboot_required = False
log('START')
for x, k in self.setting_data_method.items():
log("%s = %s" % (x, k.get('setting_value', 'no setting value')))
def populate_setting_data_method(self):
# this is the method to use if you are populating the dict from the settings.xml
latest_settings = self.settings_retriever_xml()
# cycle through the setting_data_method dict, and populate with the settings values
for key in self.setting_data_method.keys():
# grab the translate method (if there is one)
translate_method = self.setting_data_method.get(key, {}).get('translate', {})
# get the setting value, translate it if needed
if translate_method:
setting_value = translate_method(latest_settings[key])
else:
setting_value = latest_settings[key]
# add it to the dictionary
self.setting_data_method[key]['setting_value'] = setting_value
def run(self):
# check if kodi_reset file is present, if it is then set the bool as true, else set as false
if os.path.isfile(self.reset_file):
log('Kodi reset file found')
self.me.setSetting('kodi_reset', 'true')
else:
log('Kodi reset file not found')
self.me.setSetting('kodi_reset', 'false')
self.me.openSettings()
# check the kodi reset setting, if it is true then create the kodi_reset file, otherwise remove that file
if self.me.getSetting('kodi_reset') == 'true':
log('creating kodi reset file')
subprocess.call(['sudo', 'touch', self.reset_file])
else:
subprocess.call(['sudo', 'rm', self.reset_file])
log('END')
for x, k in self.setting_data_method.items():
log("%s = %s" % (x, k.get('setting_value', 'no setting value')))
def settings_retriever_xml(self):
latest_settings = {}
for key in self.setting_data_method.keys():
latest_settings[key] = self.me.getSetting(key)
return latest_settings
def method_to_apply_changes_X(self, data):
"""
Method for implementing changes to setting x.
"""
log('method_to_apply_changes_X')
def METHOD_NAME(self, data, reverse=False):
"""
Method to translate the data before adding to the setting_data_method dict.
This is useful if you are getting the populating from an external source like the Pi's config.txt.
This method could end with a call to another method to populate the settings.xml from that same source.
"""
# this is how you would negate the translating of the data when the settings window closes.
if reverse:
return data |
6,191 | test color scheduled | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright 2006 - 2021, Tomas Babej, Paul Beckingham, Federico Hernandez.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# https://www.opensource.org/licenses/mit-license.php
#
###############################################################################
import sys
import os
import unittest
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from basetest import Task, TestCase
REPO_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class TestBug1379(TestCase):
def setUp(self):
self.t = Task()
# Themes are a special case that cannot be set via "task config"
with open(self.t.taskrc, 'a') as fh:
fh.write("include " + REPO_DIR + "/doc/rc/no-color.theme\n")
self.t.config("color.alternate", "")
self.t.config("_forcecolor", "1")
self.t.config("color.label", "")
self.t.config("color.label.sort", "")
# For use with regex
self.RED = "\033\[31m"
self.CLEAR = "\033\[0m"
def test_color_BLOCKED(self):
"""color.tag.BLOCKED changes color of BLOCKED tasks"""
self.t.config("color.tag.BLOCKED", "red")
self.t("add Blocks")
self.t("add dep:1 Blocked")
code, out, err = self.t("all +BLOCKED")
self.assertRegex(out, self.RED + r".*Blocked.*" + self.CLEAR)
def test_color_UNBLOCKED(self):
"""color.tag.UNBLOCKED changes color of UNBLOCKED tasks"""
self.t.config("color.tag.UNBLOCKED", "red")
self.t("add Blocks")
self.t("add dep:1 Blocked")
code, out, err = self.t("all +UNBLOCKED")
self.assertRegex(out, self.RED + r".*Blocks.*" + self.CLEAR)
def test_color_BLOCKING(self):
"""color.tag.BLOCKING changes color of BLOCKING tasks"""
self.t.config("color.tag.BLOCKING", "red")
self.t("add Blocks")
self.t("add dep:1 Blocked")
code, out, err = self.t("all +BLOCKING")
self.assertRegex(out, self.RED + r".*Blocks.*" + self.CLEAR)
def METHOD_NAME(self):
"""color.tag.SCHEDULED changes color of SCHEDULED tasks"""
self.t.config("color.tag.SCHEDULED", "red")
self.t("add scheduled:tomorrow Have fun")
code, out, err = self.t("all +SCHEDULED")
self.assertRegex(out, self.RED + r".*Have fun.*" + self.CLEAR)
def test_color_UNTIL(self):
"""color.tag.UNTIL changes color of UNTIL tasks"""
self.t.config("color.tag.UNTIL", "red")
self.t("add until:tomorrow Urgent")
code, out, err = self.t("all +UNTIL")
self.assertRegex(out, self.RED + r".*Urgent.*" + self.CLEAR)
def test_color_WAITING(self):
"""color.tag.WAITING changes color of WAITING tasks"""
self.t.config("color.tag.WAITING", "red")
self.t("add wait:tomorrow Tomorrow")
code, out, err = self.t("all +WAITING")
self.assertRegex(out, self.RED + r".*Tomorrow.*" + self.CLEAR)
def test_color_PARENT(self):
"""color.tag.PARENT changes color of PARENT tasks"""
self.t.config("color.tag.PARENT", "red")
self.t("add recur:daily due:tomorrow Email")
code, out, err = self.t("all +PARENT")
self.assertRegex(out, self.RED + r".*Email.*" + self.CLEAR)
def test_color_CHILD(self):
"""color.tag.CHILD changes color of CHILD tasks"""
self.t.config("color.tag.CHILD", "red")
self.t("add recur:daily due:tomorrow Email")
code, out, err = self.t("all +CHILD")
self.assertRegex(out, self.RED + r".*Email.*" + self.CLEAR)
def test_color_PENDING(self):
"""color.tag.PENDING changes color of PENDING tasks"""
self.t.config("color.tag.PENDING", "red")
self.t("add Pending")
code, out, err = self.t("all +PENDING")
self.assertRegex(out, self.RED + r".*Pending.*" + self.CLEAR)
def test_color_COMPLETED(self):
"""color.tag.COMPLETED changes color of COMPLETED tasks"""
self.t.config("color.tag.COMPLETED", "red")
self.t.config("color.completed", "")
self.t("add Complete")
self.t("1 done")
code, out, err = self.t("all +COMPLETED")
self.assertRegex(out, self.RED + r".*Complete.*" + self.CLEAR)
def test_color_DELETED(self):
"""color.tag.DELETED changes color of DELETED tasks"""
self.t.config("color.tag.DELETED", "red")
self.t.config("color.deleted", "")
self.t("add Delete")
self.t("1 delete", input="y\n")
code, out, err = self.t("all +DELETED")
self.assertRegex(out, self.RED + r".*Delete.*" + self.CLEAR)
if __name__ == "__main__":
from simpletap import TAPTestRunner
unittest.main(testRunner=TAPTestRunner())
# vim: ai sts=4 et sw=4 ft=python |
6,192 | whitespace | import re
from easy_thumbnails import processors
from .settings import FILER_SUBJECT_LOCATION_IMAGE_DEBUG, FILER_WHITESPACE_COLOR
try:
from PIL import Image, ImageDraw
except ImportError:
try:
import Image
import ImageDraw
except ImportError:
raise ImportError("The Python Imaging Library was not found.")
RE_SUBJECT_LOCATION = re.compile(r'^(\d+),(\d+)$')
def normalize_subject_location(subject_location):
if subject_location:
if isinstance(subject_location, str):
m = RE_SUBJECT_LOCATION.match(subject_location)
if m:
return (int(m.group(1)), int(m.group(2)))
else:
try:
return (int(subject_location[0]), int(subject_location[1]))
except (TypeError, ValueError):
pass
return False
def scale_and_crop_with_subject_location(im, size, subject_location=False,
zoom=None, crop=False, upscale=False,
**kwargs):
"""
Like ``easy_thumbnails.processors.scale_and_crop``, but will use the
coordinates in ``subject_location`` to make sure that that part of the
image is in the center or at least somewhere on the cropped image.
Please note that this does *not* work correctly if the image has been
resized by a previous processor (e.g ``autocrop``).
``crop`` needs to be set for this to work, but any special cropping
parameters will be ignored.
"""
subject_location = normalize_subject_location(subject_location)
if not (subject_location and crop):
# use the normal scale_and_crop
return processors.scale_and_crop(im, size, zoom=zoom, crop=crop,
upscale=upscale, **kwargs)
# for here on we have a subject_location and cropping is on
# --snip-- this is a copy and paste of the first few
# lines of ``scale_and_crop``
source_x, source_y = (float(v) for v in im.size)
target_x, target_y = (float(v) for v in size)
if crop or not target_x or not target_y:
scale = max(target_x / source_x, target_y / source_y)
else:
scale = min(target_x / source_x, target_y / source_y)
# Handle one-dimensional targets.
if not target_x:
target_x = source_x * scale
elif not target_y:
target_y = source_y * scale
if zoom:
if not crop:
target_x = round(source_x * scale)
target_y = round(source_y * scale)
scale *= (100 + int(zoom)) / 100.0
if scale < 1.0 or (scale > 1.0 and upscale):
try:
im = im.resize((int(source_x * scale), int(source_y * scale)),
resample=Image.LANCZOS)
except AttributeError: # pragma: no cover
im = im.resize((int(source_x * scale), int(source_y * scale)),
resample=Image.ANTIALIAS)
# --endsnip-- begin real code
# ===============================
# subject location aware cropping
# ===============================
# res_x, res_y: the resolution of the possibly already resized image
res_x, res_y = (float(v) for v in im.size)
# subj_x, subj_y: the position of the subject (maybe already re-scaled)
subj_x = res_x * float(subject_location[0]) / source_x
subj_y = res_y * float(subject_location[1]) / source_y
ex = (res_x - min(res_x, target_x)) / 2
ey = (res_y - min(res_y, target_y)) / 2
fx, fy = res_x - ex, res_y - ey
# box_width, box_height: dimensions of the target image
box_width, box_height = fx - ex, fy - ey
# try putting the box in the center around the subject point
# (this will be partially outside of the image in most cases)
tex, tey = subj_x - (box_width / 2), subj_y - (box_height / 2)
tfx, tfy = subj_x + (box_width / 2), subj_y + (box_height / 2)
if tex < 0:
# its out of the img to the left, move both to the right until tex is 0
tfx = tfx - tex # tex is negative!
tex = 0
elif tfx > res_x:
# its out of the img to the right
tex = tex - (tfx - res_x)
tfx = res_x
if tey < 0:
# its out of the img to the top, move both to the bottom until tey is 0
tfy = tfy - tey # tey is negative!)
tey = 0
elif tfy > res_y:
# its out of the img to the bottom
tey = tey - (tfy - res_y)
tfy = res_y
if ex or ey:
crop_box = ((int(tex), int(tey), int(tfx), int(tfy)))
if FILER_SUBJECT_LOCATION_IMAGE_DEBUG:
# draw ellipse on focal point for Debugging
draw = ImageDraw.Draw(im)
esize = 10
draw.ellipse(((subj_x - esize, subj_y - esize),
(subj_x + esize, subj_y + esize)), outline="#FF0000")
im = im.crop(crop_box)
return im
def METHOD_NAME(image, size, METHOD_NAME=False, whitespace_color=None, **kwargs):
if not METHOD_NAME:
return image
if whitespace_color is None:
whitespace_color = FILER_WHITESPACE_COLOR
if whitespace_color is None:
whitespace_color = '#fff'
old_image = image
source_x, source_y = image.size
target_x, target_y = size
image = Image.new('RGBA', (target_x, target_y), whitespace_color)
if source_x < target_x and source_y < target_y: # whitespace all around
image.paste(old_image, (
(target_x - source_x) / 2, (target_y - source_y) / 2))
elif source_x < target_x: # whitespace on top and bottom only
image.paste(old_image, ((target_x - source_x) / 2, 0))
elif source_y < target_y: # whitespace on sides only
image.paste(old_image, (0, (target_y - source_y) / 2))
else: # no whitespace needed
image = old_image
return image |
6,193 | mktmp | from fud.stages import Stage, SourceType, Source
from fud.utils import shell, TmpDir
from fud.errors import MissingDynamicConfiguration
from pathlib import Path
# The temporary filename used for converting mrxl.data to verilog.data
_DATA_FILE = "data.json"
class MrXLStage(Stage):
"""
Stage that invokes the MrXL frontend.
"""
name = "mrxl"
def __init__(self):
"""
Initialize this stage. Initializing a stage *does not* construct its
computation graph.
"""
super().__init__(
src_state="mrxl",
target_state="calyx",
input_type=SourceType.Path,
output_type=SourceType.Stream,
description="Compiles MrXL to Calyx.",
)
@staticmethod
def pre_install():
pass
@staticmethod
def defaults():
"""
Specify defaults that should be added to fud's configuration file when
this stage is registered.
"""
return {"exec": "mrxl", "flags": ""}
def _define_steps(self, input, builder, config):
"""
Define the steps that will execute in this stage. Each step represents
a delayed computation that will occur when the stage is executed.
"""
# Commands at the top-level are evaluated when the computation is being
# staged
cmd = config["stages", self.name, "exec"]
flags = config.get(("stages", self.name, "flags")) or ""
# Computations within a step are delayed from being executed until
# the full execution pipeline is generated.
@builder.step()
def METHOD_NAME() -> SourceType.Directory:
"""
Make temporary directory to store Verilator build files.
"""
return TmpDir()
@builder.step(description="Set stages.mrxl.prog as `input`")
def set_mrxl_prog(mrxl_prog: SourceType.Path):
config["stages", "mrxl", "prog"] = str(mrxl_prog)
@builder.step(
description="Save verilog.data in `tmpdir` and update stages.verilog.data"
)
def save_data(tmpdir: SourceType.Directory, verilog_data: SourceType.String):
save_loc = Path(tmpdir.name) / _DATA_FILE
with open(save_loc, "w") as out:
out.write(verilog_data)
config["stages", "verilog", "data"] = save_loc
@builder.step(description=cmd)
def run_mrxl(mrxl_prog: SourceType.Path) -> SourceType.Stream:
return shell(f"{cmd} {str(mrxl_prog)} {flags}")
# Define a schedule using the steps.
# A schedule *looks* like an imperative program but actually represents
# a computation graph that is executed later on.
mrxl_data = config.get(["stages", "mrxl", "data"])
if mrxl_data is not None:
tmpdir = METHOD_NAME()
set_mrxl_prog(input)
mrxl_data_stage = MrXLDataStage()
mrxl_data_stage_input = Source.path(mrxl_data)
builder.ctx.append("mrxl-data")
verilog_data = builder.also_do(
mrxl_data_stage_input, mrxl_data_stage, config
)
builder.ctx.pop()
verilog_data = builder.convert_source_to(verilog_data, SourceType.String)
save_data(tmpdir, verilog_data)
return run_mrxl(input)
class MrXLDataStage(Stage):
"""
Stage that invokes the MrXL data converter.
"""
name = "mrxl-data"
def __init__(self):
"""
Initialize this stage. Initializing a stage *does not* construct its
computation graph.
"""
super().__init__(
src_state="mrxl-data",
target_state="verilog-data",
input_type=SourceType.Path,
output_type=SourceType.Stream,
description="Compiles MrXL-native input to Calyx-native input.",
)
@staticmethod
def pre_install():
pass
@staticmethod
def defaults():
"""
Specify defaults that should be added to fud's configuration file when
this stage is registered.
"""
return {}
def _define_steps(self, input, builder, config):
"""
Define the steps that will execute in this stage. Each step represents
a delayed computation that will occur when the stage is executed.
"""
# Commands at the top-level are evaluated when the computation is being
# staged
cmd = config["stages", "mrxl", "exec"]
# Computations within a step are delayed from being executed until
# the full execution pipeline is generated.
@builder.step(description="Dynamically retrieve the value of stages.mrxl.prog")
def get_mrxl_prog() -> SourceType.Path:
return Source(Path(config.get(["stages", "mrxl", "prog"])), SourceType.Path)
@builder.step()
def convert_mrxl_data_to_calyx_data(
data_path: SourceType.Path, mrxl_prog: SourceType.Path
) -> SourceType.Stream:
"""
Converts MrXL input into calyx input
"""
return shell(f"{cmd} {str(mrxl_prog.data)} --data {data_path} --convert")
# Define a schedule using the steps.
# A schedule *looks* like an imperative program but actually represents
# a computation graph that is executed later on.
mrxl_prog = get_mrxl_prog()
if mrxl_prog is None:
raise MissingDynamicConfiguration("mrxl.prog")
return convert_mrxl_data_to_calyx_data(input, mrxl_prog)
# Export the defined stages to fud
__STAGES__ = [MrXLStage, MrXLDataStage] |
6,194 | assert execution failed | import unittest
import unittest.mock as mock
import lvmlib
class ExecResultMixIn(object):
def assertExecutionSucceeded(self, exec_result):
returncode, stdout, stderr = exec_result
self.assertEqual(0, returncode)
def METHOD_NAME(self, exec_result):
returncode, stdout, stderr = exec_result
self.assertEqual(1, returncode)
class TestLVSubSystem(unittest.TestCase, ExecResultMixIn):
def test_lvcreate_is_mocked(self):
executable_injector = mock.Mock()
lvsubsystem = lvmlib.LVSubsystem(None, executable_injector)
self.assertTrue(
mock.call('/usr/sbin/lvcreate', lvsubsystem.fake_lvcreate)
in executable_injector.mock_calls
)
def test_lvremove_is_mocked(self):
executable_injector = mock.Mock()
lvsubsystem = lvmlib.LVSubsystem(None, executable_injector)
self.assertTrue(
mock.call('/usr/sbin/lvremove', lvsubsystem.fake_lvremove)
in executable_injector.mock_calls
)
def test_dmsetup_is_mocked(self):
executable_injector = mock.Mock()
lvsubsystem = lvmlib.LVSubsystem(None, executable_injector)
self.assertTrue(
mock.call('/sbin/dmsetup', lvsubsystem.fake_dmsetup)
in executable_injector.mock_calls
)
def test_add_volume_group(self):
lvsubsystem = lvmlib.LVSubsystem(None, mock.Mock())
lvsubsystem.add_volume_group('vg')
vg = lvsubsystem.get_volume_group('vg')
self.assertEqual('vg', vg.name)
def test_add_multiple_volume_groups(self):
lvsubsystem = lvmlib.LVSubsystem(None, mock.Mock())
lvsubsystem.add_volume_group('vg1')
lvsubsystem.add_volume_group('vg2')
lvsubsystem.add_volume_group('vg3')
vg1 = lvsubsystem.get_volume_group('vg1')
vg2 = lvsubsystem.get_volume_group('vg2')
vg3 = lvsubsystem.get_volume_group('vg3')
self.assertEqual('vg1', vg1.name)
self.assertEqual('vg2', vg2.name)
self.assertEqual('vg3', vg3.name)
def test_fake_lvcreate_creates_volume(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
vg = lvsubsystem.add_volume_group('vg')
exec_result = lvsubsystem.fake_lvcreate(
"someprog -n name -L 100 vg".split(), '')
lv, = lvsubsystem.get_logical_volumes_with_name('name')
self.assertEqual('name', lv.name)
self.assertEqual(lvsubsystem.get_volume_group('vg'), lv.volume_group)
self.assertTrue(lv.active)
self.assertTrue(lv.zeroed)
self.assertEqual(None, lv.tag)
self.assertEqual(100, lv.size_mb)
def test_fake_lvcreate_with_tags(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
lvsubsystem.add_volume_group('vg')
exec_result = lvsubsystem.fake_lvcreate(
"someprog -n name --addtag tagg -L 100 vg".split(), '')
lv, = lvsubsystem.get_logical_volumes_with_name('name')
self.assertEqual('tagg', lv.tag)
def test_fake_lvcreate_inactive(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
lvsubsystem.add_volume_group('vg')
exec_result = lvsubsystem.fake_lvcreate(
"someprog -n name --inactive -L 100 vg".split(), '')
lv, = lvsubsystem.get_logical_volumes_with_name('name')
self.assertFalse(lv.active)
def test_fake_lvcreate_non_zeroed(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
lvsubsystem.add_volume_group('vg')
exec_result = lvsubsystem.fake_lvcreate(
"someprog -n name --zero n -L 100 vg".split(), '')
lv, = lvsubsystem.get_logical_volumes_with_name('name')
self.assertFalse(lv.zeroed)
self.assertExecutionSucceeded(exec_result)
def test_get_the_correct_volume(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
lvsubsystem.add_volume_group('vg')
result1 = lvsubsystem.fake_lvcreate(
"someprog -n name1 --zero n -L 100 vg".split(), '')
result2 = lvsubsystem.fake_lvcreate(
"someprog -n name2 --zero n -L 200 vg".split(), '')
lv, = lvsubsystem.get_logical_volumes_with_name('name1')
self.assertEqual(100, lv.size_mb)
lv, = lvsubsystem.get_logical_volumes_with_name('name2')
self.assertEqual(200, lv.size_mb)
# Now remove them
lvsubsystem.fake_lvremove('someprog vg/name2'.split(), '')
def test_fake_lvcreate_called_with_wrong_params(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
lvsubsystem.add_volume_group('vg')
exec_result = lvsubsystem.fake_lvcreate(
"someprog --something-stupid -n name n -L 100 vg".split(), '')
self.METHOD_NAME(exec_result)
def test_fake_lvcreate_fails_if_no_volume_group_found(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
exec_result = lvsubsystem.fake_lvcreate(
"someprog -n name -L 100 nonexisting".split(), '')
self.METHOD_NAME(exec_result)
def test_fake_lvremove(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
lvsubsystem.add_volume_group('vg')
lvsubsystem.get_volume_group('vg').add_volume('lv', 100)
exec_result = lvsubsystem.fake_lvremove(
"someprog vg/lv".split(), '')
self.assertExecutionSucceeded(exec_result)
def test_fake_lvremove_with_force(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
lvsubsystem.add_volume_group('vg')
lvsubsystem.get_volume_group('vg').add_volume('lv', 100)
exec_result = lvsubsystem.fake_lvremove(
"someprog -f vg/lv".split(), '')
self.assertExecutionSucceeded(exec_result)
def test_fake_lvremove_with_bad_params(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
lvsubsystem.add_volume_group('vg')
lvsubsystem.get_volume_group('vg').add_volume('lv', 100)
exec_result = lvsubsystem.fake_lvremove(
"someprog -f vg/lv --stupid-parameter".split(), '')
self.METHOD_NAME(exec_result)
def test_fake_dmsetup_status_returns_zero(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
exec_result = lvsubsystem.fake_dmsetup(
"someprog status".split(), '')
self.assertExecutionSucceeded(exec_result) |
6,195 | save slide | # type_audiovideocast.py
import threading
import logging
import datetime
import zipfile
import os
from xml.dom import minidom
from django.conf import settings
from django.template.defaultfilters import slugify
from django.core.files.base import ContentFile
from pod.video.models import Video
from pod.video_encode_transcript import encode
from pod.enrichment.models import Enrichment
from ..utils import add_comment
DEFAULT_RECORDER_TYPE_ID = getattr(settings, "DEFAULT_RECORDER_TYPE_ID", 1)
ENCODE_VIDEO = getattr(settings, "ENCODE_VIDEO", "start_encode")
RECORDER_SKIP_FIRST_IMAGE = getattr(settings, "RECORDER_SKIP_FIRST_IMAGE", False)
if getattr(settings, "USE_PODFILE", False):
from pod.podfile.models import CustomImageModel
from pod.podfile.models import UserFolder
FILEPICKER = True
else:
FILEPICKER = False
from pod.main.models import CustomImageModel
log = logging.getLogger(__name__)
def process(recording):
log.info("START PROCESS OF RECORDING %s" % recording)
t = threading.Thread(target=encode_recording, args=[recording])
t.setDaemon(True)
t.start()
def save_video(recording, video_data, video_src):
recorder = recording.recorder
video = Video()
video.owner = recording.user
video.type = recorder.type
nom, ext = os.path.splitext(video_src)
ext = ext.lower()
video.video.save(
"record_" + slugify(recording.title) + ext,
ContentFile(video_data),
save=False,
)
# on recupere le nom du fichier sur le serveur
video.title = recording.title
video.save()
# on ajoute d'eventuels propriétaires additionnels
video.additional_owners.add(*recorder.additional_users.all())
# acces privé (mode brouillon)
video.is_draft = recorder.is_draft
# Accès restreint (eventuellement à des groupes ou par mot de passe)
video.is_restricted = recorder.is_restricted
video.restrict_access_to_groups.add(*recorder.restrict_access_to_groups.all())
video.password = recorder.password
# on ajoute les eventuelles chaines
video.channel.add(*recorder.channel.all())
# on ajoute les eventuels theme
video.theme.add(*recorder.theme.all())
# on ajoute les eventuelles disciplines
video.discipline.add(*recorder.discipline.all())
# Choix de la langue
video.main_lang = recorder.main_lang
# Choix des cursus
video.cursus = recorder.cursus
# mot clefs
video.tags = recorder.tags
# transcript
if getattr(settings, "USE_TRANSCRIPTION", False):
video.transcript = recorder.transcript
# Licence
video.licence = recorder.licence
# Allow downloading
video.allow_downloading = recorder.allow_downloading
# Is_360
video.is_360 = recorder.is_360
# Désactiver les commentaires
video.disable_comment = recorder.disable_comment
# add sites
video.sites.add(*recorder.sites.all())
video.save()
encode_video = getattr(encode, ENCODE_VIDEO)
encode_video(video.id)
return video
def METHOD_NAME(data, filename, video, enrichment, recording):
if len(data):
slide_name, ext = os.path.splitext(os.path.basename(filename))
if FILEPICKER:
homedir, created = UserFolder.objects.get_or_create(
name="home", owner=video.owner
)
videodir, created = UserFolder.objects.get_or_create(
name="%s" % video.slug, owner=video.owner
)
previousImage = CustomImageModel.objects.filter(
name__startswith=slugify(video.title + "_" + slide_name),
folder=videodir,
created_by=video.owner,
)
for img in previousImage:
img.delete()
image = CustomImageModel(folder=videodir, created_by=video.owner)
image.file.save(
slugify(video.title + "_" + slide_name) + ext,
ContentFile(data),
save=True,
)
image.save()
else:
image = CustomImageModel()
image.file.save(
slugify(video.title + "_" + slide_name) + ext,
ContentFile(data),
save=True,
)
image.save()
enrichment.type = "image"
enrichment.image = image
enrichment.save()
else:
add_comment(recording.id, "file %s is empty" % filename)
def save_enrichment(video, list_node_img, recording, media_name, zip):
previousEnrichment = None
i = 0
Enrichment.objects.filter(video=video).delete()
start_img = 1 if RECORDER_SKIP_FIRST_IMAGE else 0
for item in list_node_img[start_img:]: # skip the first
i += 1
add_comment(recording.id, ">> ITEM %s: %s" % (i, item.getAttribute("src")))
filename = media_name + "/%s" % item.getAttribute("src")
timecode = float("%s" % item.getAttribute("begin"))
timecode = int(round(timecode))
add_comment(recording.id, ">> timecode %s" % timecode)
# Enrichment
enrichment = Enrichment.objects.create(
video=video,
title="slide %s" % i,
start=timecode,
end=timecode + 1,
stop_video=False,
)
# Enrichment Image
data = zip.read(filename)
METHOD_NAME(data, filename, video, enrichment, recording)
if previousEnrichment is not None:
previousEnrichment.end = (
timecode - 1 if (timecode - 1 > 0) else previousEnrichment.end
)
previousEnrichment.save()
previousEnrichment = enrichment
video = Video.objects.get(id=video.id)
if previousEnrichment is not None and video.duration and video.duration > 0:
previousEnrichment.end = video.duration
previousEnrichment.save()
def get_video_source(xmldoc):
if xmldoc.getElementsByTagName("audio"):
return xmldoc.getElementsByTagName("audio").item(0).getAttribute("src")
if xmldoc.getElementsByTagName("video"):
return xmldoc.getElementsByTagName("video").item(0).getAttribute("src")
return None
def open_zipfile(recording):
try:
zip = zipfile.ZipFile(recording.source_file)
return zip
except FileNotFoundError as e:
add_comment(recording.id, "Error : %s" % e)
return -1
except zipfile.BadZipFile as e:
add_comment(recording.id, "Error : %s" % e)
return -1
def encode_recording(recording):
recording.comment = ""
recording.save()
add_comment(recording.id, "Start at %s\n--\n" % datetime.datetime.now())
zip = open_zipfile(recording)
if zip == -1:
return -1
media_name, ext = os.path.splitext(os.path.basename(recording.source_file))
add_comment(recording.id, "> media name %s" % media_name)
try:
smil = zip.open(media_name + "/cours.smil")
xmldoc = minidom.parse(smil)
smil.close()
except KeyError as e:
add_comment(recording.id, "Error : %s" % e)
zip.close()
return -1
video_src = get_video_source(xmldoc)
if video_src:
add_comment(recording.id, "> video file %s" % video_src)
video_data = zip.read(media_name + "/%s" % video_src)
video = save_video(recording, video_data, video_src)
list_node_img = xmldoc.getElementsByTagName("img")
add_comment(recording.id, "> slides found %s" % len(list_node_img))
if len(list_node_img):
save_enrichment(video, list_node_img, recording, media_name, zip)
else:
add_comment(recording.id, "No slides node found")
zip.close()
return -1
else:
add_comment(recording.id, "Error : No video source found")
zip.close()
return -1
zip.close()
add_comment(recording.id, "End processing zip file")
os.rename(recording.source_file, recording.source_file + "_treated") |
6,196 | get can parser | from cereal import car
from openpilot.common.conversions import Conversions as CV
from opendbc.can.parser import CANParser
from opendbc.can.can_define import CANDefine
from openpilot.selfdrive.car.interfaces import CarStateBase
from openpilot.selfdrive.car.chrysler.values import DBC, STEER_THRESHOLD, RAM_CARS
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
self.CP = CP
can_define = CANDefine(DBC[CP.carFingerprint]["pt"])
self.auto_high_beam = 0
self.button_counter = 0
self.lkas_car_model = -1
if CP.carFingerprint in RAM_CARS:
self.shifter_values = can_define.dv["Transmission_Status"]["Gear_State"]
else:
self.shifter_values = can_define.dv["GEAR"]["PRNDL"]
def update(self, cp, cp_cam):
ret = car.CarState.new_message()
# lock info
ret.doorOpen = any([cp.vl["BCM_1"]["DOOR_OPEN_FL"],
cp.vl["BCM_1"]["DOOR_OPEN_FR"],
cp.vl["BCM_1"]["DOOR_OPEN_RL"],
cp.vl["BCM_1"]["DOOR_OPEN_RR"]])
ret.seatbeltUnlatched = cp.vl["ORC_1"]["SEATBELT_DRIVER_UNLATCHED"] == 1
# brake pedal
ret.brake = 0
ret.brakePressed = cp.vl["ESP_1"]['Brake_Pedal_State'] == 1 # Physical brake pedal switch
ret.brakeLights = bool(cp.vl["ESP_1"]["BRAKE_PRESSED_ACC"])
# gas pedal
ret.gas = cp.vl["ECM_5"]["Accelerator_Position"]
ret.gasPressed = ret.gas > 1e-5
# car speed
if self.CP.carFingerprint in RAM_CARS:
ret.vEgoRaw = cp.vl["ESP_8"]["Vehicle_Speed"] * CV.KPH_TO_MS
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(cp.vl["Transmission_Status"]["Gear_State"], None))
else:
ret.vEgoRaw = (cp.vl["SPEED_1"]["SPEED_LEFT"] + cp.vl["SPEED_1"]["SPEED_RIGHT"]) / 2.
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(cp.vl["GEAR"]["PRNDL"], None))
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = not ret.vEgoRaw > 0.001
ret.wheelSpeeds = self.get_wheel_speeds(
cp.vl["ESP_6"]["WHEEL_SPEED_FL"],
cp.vl["ESP_6"]["WHEEL_SPEED_FR"],
cp.vl["ESP_6"]["WHEEL_SPEED_RL"],
cp.vl["ESP_6"]["WHEEL_SPEED_RR"],
unit=1,
)
# button presses
ret.leftBlinker, ret.rightBlinker = self.update_blinker_from_stalk(200, cp.vl["STEERING_LEVERS"]["TURN_SIGNALS"] == 1,
cp.vl["STEERING_LEVERS"]["TURN_SIGNALS"] == 2)
ret.genericToggle = cp.vl["STEERING_LEVERS"]["HIGH_BEAM_PRESSED"] == 1
# steering wheel
ret.steeringAngleDeg = cp.vl["STEERING"]["STEERING_ANGLE"] + cp.vl["STEERING"]["STEERING_ANGLE_HP"]
ret.steeringRateDeg = cp.vl["STEERING"]["STEERING_RATE"]
ret.steeringTorque = cp.vl["EPS_2"]["COLUMN_TORQUE"]
ret.steeringTorqueEps = cp.vl["EPS_2"]["EPS_TORQUE_MOTOR"]
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD
# cruise state
cp_cruise = cp_cam if self.CP.carFingerprint in RAM_CARS else cp
ret.cruiseState.available = cp_cruise.vl["DAS_3"]["ACC_AVAILABLE"] == 1
ret.cruiseState.enabled = cp_cruise.vl["DAS_3"]["ACC_ACTIVE"] == 1
ret.cruiseState.speed = cp_cruise.vl["DAS_4"]["ACC_SET_SPEED_KPH"] * CV.KPH_TO_MS
ret.cruiseState.nonAdaptive = cp_cruise.vl["DAS_4"]["ACC_STATE"] in (1, 2) # 1 NormalCCOn and 2 NormalCCSet
ret.cruiseState.standstill = cp_cruise.vl["DAS_3"]["ACC_STANDSTILL"] == 1
ret.accFaulted = cp_cruise.vl["DAS_3"]["ACC_FAULTED"] != 0
if self.CP.carFingerprint in RAM_CARS:
# Auto High Beam isn't Located in this message on chrysler or jeep currently located in 729 message
self.auto_high_beam = cp_cam.vl["DAS_6"]['AUTO_HIGH_BEAM_ON']
ret.steerFaultTemporary = cp.vl["EPS_3"]["DASM_FAULT"] == 1
else:
ret.steerFaultTemporary = cp.vl["EPS_2"]["LKAS_TEMPORARY_FAULT"] == 1
ret.steerFaultPermanent = cp.vl["EPS_2"]["LKAS_STATE"] == 4
# blindspot sensors
if self.CP.enableBsm:
ret.leftBlindspot = cp.vl["BSM_1"]["LEFT_STATUS"] == 1
ret.rightBlindspot = cp.vl["BSM_1"]["RIGHT_STATUS"] == 1
self.lkas_car_model = cp_cam.vl["DAS_6"]["CAR_MODEL"]
self.button_counter = cp.vl["CRUISE_BUTTONS"]["COUNTER"]
return ret
@staticmethod
def get_cruise_messages():
messages = [
("DAS_3", 50),
("DAS_4", 50),
]
return messages
@staticmethod
def METHOD_NAME(CP):
messages = [
# sig_address, frequency
("ESP_1", 50),
("EPS_2", 100),
("ESP_6", 50),
("STEERING", 100),
("ECM_5", 50),
("CRUISE_BUTTONS", 50),
("STEERING_LEVERS", 10),
("ORC_1", 2),
("BCM_1", 1),
]
if CP.enableBsm:
messages.append(("BSM_1", 2))
if CP.carFingerprint in RAM_CARS:
messages += [
("ESP_8", 50),
("EPS_3", 50),
("Transmission_Status", 50),
]
else:
messages += [
("GEAR", 50),
("SPEED_1", 100),
]
messages += CarState.get_cruise_messages()
return CANParser(DBC[CP.carFingerprint]["pt"], messages, 0)
@staticmethod
def get_cam_can_parser(CP):
messages = [
("DAS_6", 4),
]
if CP.carFingerprint in RAM_CARS:
messages += CarState.get_cruise_messages()
return CANParser(DBC[CP.carFingerprint]["pt"], messages, 2) |
6,197 | test from pennylane | # Copyright (C) Unitary Fund
#
# This source code is licensed under the GPL license (v3) found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for Pennylane <-> Cirq conversions."""
import cirq
import numpy as np
import pennylane as qml
import pytest
from mitiq.interface.mitiq_pennylane import (
UnsupportedQuantumTapeError,
from_pennylane,
to_pennylane,
)
from mitiq.utils import _equal
def METHOD_NAME():
with qml.tape.QuantumTape() as tape:
qml.CNOT(wires=[0, 1])
circuit = from_pennylane(tape)
correct = cirq.Circuit(cirq.CNOT(*cirq.LineQubit.range(2)))
assert _equal(circuit, correct, require_qubit_equality=False)
def test_from_pennylane_unsupported_tapes():
with qml.tape.QuantumTape() as tape:
qml.CZ(wires=[0, "a"])
with pytest.raises(UnsupportedQuantumTapeError, match="could not sort"):
from_pennylane(tape)
def test_no_variance():
with qml.tape.QuantumTape() as tape:
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with pytest.raises(
UnsupportedQuantumTapeError,
match="Measurements are not supported on the input tape.",
):
from_pennylane(tape)
@pytest.mark.parametrize("random_state", range(10))
def test_to_from_pennylane(random_state):
circuit = cirq.testing.random_circuit(
qubits=4, n_moments=2, op_density=1, random_state=random_state
)
converted = from_pennylane(to_pennylane(circuit))
# Gates (e.g. iSWAP) aren't guaranteed to be preserved. Check unitary
# instead of circuit equality.
cirq.testing.assert_allclose_up_to_global_phase(
cirq.unitary(converted), cirq.unitary(circuit), atol=1e-7
)
def test_to_from_pennylane_cnot_same_gates():
qreg = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.CNOT(*qreg))
converted = from_pennylane(to_pennylane(circuit))
assert _equal(circuit, converted, require_qubit_equality=False)
def test_to_from_pennylane_identity():
q = cirq.LineQubit(0)
# Empty circuit
circuit = cirq.Circuit()
converted = from_pennylane(to_pennylane(circuit))
assert _equal(circuit, converted, require_qubit_equality=False)
circuit = cirq.Circuit(cirq.I(q))
# Identity gate
converted = from_pennylane(to_pennylane(circuit))
# TODO: test circuit equality after Identity operation will be added
# to PennyLane (https://github.com/PennyLaneAI/pennylane/issues/1632)
assert np.allclose(cirq.unitary(circuit), cirq.unitary(converted))
def test_non_consecutive_wires_error():
with qml.tape.QuantumTape() as tape:
qml.CNOT(wires=[0, 2])
with pytest.raises(
UnsupportedQuantumTapeError,
match="contiguously pack",
):
from_pennylane(tape)
def test_integration():
gates = [
qml.PauliX(wires=0),
qml.PauliY(wires=0),
qml.PauliZ(wires=0),
qml.S(wires=0),
qml.T(wires=0),
qml.RX(0.4, wires=0),
qml.RY(0.4, wires=0),
qml.RZ(0.4, wires=0),
qml.Hadamard(wires=0),
qml.Rot(0.4, 0.5, 0.6, wires=1),
qml.CRot(0.4, 0.5, 0.6, wires=(0, 1)),
qml.Toffoli(wires=(0, 1, 2)),
qml.SWAP(wires=(0, 1)),
qml.CSWAP(wires=(0, 1, 2)),
qml.U1(0.4, wires=0),
qml.U2(0.4, 0.5, wires=0),
qml.U3(0.4, 0.5, 0.6, wires=0),
qml.CRX(0.4, wires=(0, 1)),
qml.CRY(0.4, wires=(0, 1)),
qml.CRZ(0.4, wires=(0, 1)),
]
layers = 3
np.random.seed(1967)
gates_per_layers = [np.random.permutation(gates) for _ in range(layers)]
with qml.tape.QuantumTape() as tape:
np.random.seed(1967)
for gates in gates_per_layers:
for gate in gates:
qml.apply(gate)
base_circ = from_pennylane(tape)
tape_recovered = to_pennylane(base_circ)
circ_recovered = from_pennylane(tape_recovered)
u_1 = cirq.unitary(base_circ)
u_2 = cirq.unitary(circ_recovered)
cirq.testing.assert_allclose_up_to_global_phase(u_1, u_2, atol=0) |
6,198 | length group | import re
from statistics import mean
from . import tokenizer
def joinLine(columns):
return "\t".join(columns)
def unclump(s):
"""
Replacess $'s with spaces. The reverse of clumpFractions.
"""
return re.sub(r"\$", " ", s)
def getFeatures(token, index, tokens):
"""
Returns a list of features for a given token.
"""
length = len(tokens)
return [
("I%s" % index),
("L%s" % METHOD_NAME(length)),
("Yes" if isCapitalized(token) else "No") + "CAP",
("Yes" if insideParenthesis(token, tokens) else "No") + "PAREN",
]
def singularize(word):
"""
A poor replacement for the pattern.en singularize function, but ok for now.
"""
units = {
"cups": "cup",
"tablespoons": "tablespoon",
"teaspoons": "teaspoon",
"pounds": "pound",
"ounces": "ounce",
"cloves": "clove",
"sprigs": "sprig",
"pinches": "pinch",
"bunches": "bunch",
"slices": "slice",
"grams": "gram",
"heads": "head",
"quarts": "quart",
"stalks": "stalk",
"pints": "pint",
"pieces": "piece",
"sticks": "stick",
"dashes": "dash",
"fillets": "fillet",
"cans": "can",
"ears": "ear",
"packages": "package",
"strips": "strip",
"bulbs": "bulb",
"bottles": "bottle",
}
if word in units.keys():
return units[word]
else:
return word
def isCapitalized(token):
"""
Returns true if a given token starts with a capital letter.
"""
return re.match(r"^[A-Z]", token) is not None
def METHOD_NAME(actualLength):
"""
Buckets the length of the ingredient into 6 buckets.
"""
for n in [4, 8, 12, 16, 20]:
if actualLength < n:
return str(n)
return "X"
def insideParenthesis(token, tokens):
"""
Returns true if the word is inside parenthesis in the phrase.
"""
if token in ["(", ")"]:
return True
else:
line = " ".join(tokens)
return (
re.match(r".*\(.*" + re.escape(token) + r".*\).*", line) is not None # noqa: W605 - invalid dscape sequence
)
def displayIngredient(ingredient):
"""
Format a list of (tag, [tokens]) tuples as an HTML string for display.
displayIngredient([("qty", ["1"]), ("name", ["cat", "pie"])])
# => <span class='qty'>1</span> <span class='name'>cat pie</span>
"""
return "".join(["<span class='{}'>{}</span>".format(tag, " ".join(tokens)) for tag, tokens in ingredient])
# HACK: fix this
def smartJoin(words):
"""
Joins list of words with spaces, but is smart about not adding spaces
before commas.
"""
input = " ".join(words)
# replace " , " with ", "
input = input.replace(" , ", ", ")
# replace " ( " with " ("
input = input.replace("( ", "(")
# replace " ) " with ") "
input = input.replace(" )", ")")
return input
def import_data(lines):
"""
This thing takes the output of CRF++ and turns it into an actual
data structure.
"""
data = [{}]
display = [[]]
prevTag = None
confidence_all = [{}]
#
# iterate lines in the data file, which looks like:
#
# # 0.511035
# 1/2 I1 L12 NoCAP X B-QTY/0.982850
# teaspoon I2 L12 NoCAP X B-UNIT/0.982200
# fresh I3 L12 NoCAP X B-COMMENT/0.716364
# thyme I4 L12 NoCAP X B-NAME/0.816803
# leaves I5 L12 NoCAP X I-NAME/0.960524
# , I6 L12 NoCAP X B-COMMENT/0.772231
# finely I7 L12 NoCAP X I-COMMENT/0.825956
# chopped I8 L12 NoCAP X I-COMMENT/0.893379
#
# # 0.505999
# Black I1 L8 YesCAP X B-NAME/0.765461
# pepper I2 L8 NoCAP X I-NAME/0.756614
# , I3 L8 NoCAP X OTHER/0.798040
# to I4 L8 NoCAP X B-COMMENT/0.683089
# taste I5 L8 NoCAP X I-COMMENT/0.848617
#
# i.e. the output of crf_test -v 1
#
for line in lines:
# blank line starts a new ingredient
if line in ("", "\n"):
data.append({})
display.append([])
prevTag = None
confidence_all.append({})
# ignore comments
elif line[0] == "#":
pass
# otherwise it's a token
# e.g.: potato \t I2 \t L5 \t NoCAP \t B-NAME/0.978253
else:
columns = re.split("\t", line.strip())
token = columns[0].strip()
# unclump fractions
token = unclump(token)
# turn B-NAME/123 back into "name"
tag, confidence = re.split(r"/", columns[-1], 1)
tag = re.sub(r"^[BI]\-", "", tag).lower() # noqa: W605 - invalid dscape sequence
# ====================
# Confidence Getter
if prevTag != tag:
if confidence_all[-1].get(tag):
confidence_all[-1][tag].append(confidence)
else:
confidence_all[-1][tag] = [confidence]
else:
if confidence_all[-1].get(tag):
confidence_all[-1][tag].append(confidence)
else:
confidence_all[-1][tag] = [confidence]
# ---- DISPLAY ----
# build a structure which groups each token by its tag, so we can
# rebuild the original display name later.
if prevTag != tag:
display[-1].append((tag, [token]))
prevTag = tag
else:
display[-1][-1][1].append(token)
# ^- token
# ^---- tag
# ^-------- ingredient
# ---- DATA ----
# build a dict grouping tokens by their tag
# initialize this attribute if this is the first token of its kind
if tag not in data[-1]:
data[-1][tag] = []
# HACK: If this token is a unit, singularize it so Scoop accepts it.
if tag == "unit":
token = singularize(token)
data[-1][tag].append(token)
# reassemble the output into a list of dicts.
output = [{k: smartJoin(tokens) for k, tokens in ingredient.items()} for ingredient in data if len(ingredient)]
# Preclean Confidence
for i, c in enumerate(confidence_all):
avg_of_all = []
for k, v in c.items():
v = [float(x) for x in v]
avg = round(mean(v), 2)
avg_of_all.append(avg)
confidence_all[i][k] = avg
if avg_of_all:
confidence_all[i]["average"] = round(mean(avg_of_all), 2)
# Add the raw ingredient phrase
for i, _ in enumerate(output):
output[i]["input"] = smartJoin([" ".join(tokens) for _, tokens in display[i]])
output[i]["confidence"] = confidence_all[i]
return output
def export_data(lines):
"""Parse "raw" ingredient lines into CRF-ready output"""
output = []
for line in lines:
line_clean = re.sub("<[^<]+?>", "", line)
tokens = tokenizer.tokenize(line_clean)
for i, token in enumerate(tokens):
features = getFeatures(token, i + 1, tokens)
output.append(joinLine([token] + features))
output.append("")
return "\n".join(output) |
6,199 | extract addrs | # -*- coding: utf-8 -*-
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flask
import json
import logging
import re
import urllib
from email import utils
from google.appengine.api import mail
import settings
from framework import cloud_tasks_helpers
# Parsing very large messages could cause out-of-memory errors.
MAX_BODY_SIZE = 20 * 1024 * 1024 # 20 MB
def require_task_header():
"""Abort if this is not a Google Cloud Tasks request."""
if settings.UNIT_TEST_MODE or settings.DEV_MODE:
return
if 'X-AppEngine-QueueName' not in flask.request.headers:
flask.abort(403, msg='Lacking X-AppEngine-QueueName header')
def get_param(request, name, required=True):
"""Get the specified JSON parameter."""
json_body = request.get_json(force=True)
val = json_body.get(name)
if required and not val:
flask.abort(400, msg='Missing parameter %r' % name)
return val
def handle_outbound_mail_task():
"""Task to send a notification email to one recipient."""
require_task_header()
to = get_param(flask.request, 'to')
from_user = get_param(flask.request, 'from_user', required=False)
subject = get_param(flask.request, 'subject')
email_html = get_param(flask.request, 'html')
references = get_param(flask.request, 'references', required=False)
reply_to = get_param(flask.request, 'reply_to', required=False)
if settings.SEND_ALL_EMAIL_TO and to != settings.REVIEW_COMMENT_MAILING_LIST:
to_user, to_domain = to.split('@')
to = settings.SEND_ALL_EMAIL_TO % {'user': to_user, 'domain': to_domain}
sender = 'Chromestatus <admin@%s.appspotmail.com>' % settings.APP_ID
if from_user:
sender = '%s via Chromestatus <admin+%s@%s.appspotmail.com>' % (
from_user, from_user, settings.APP_ID)
message = mail.EmailMessage(
sender=sender, to=to, subject=subject, html=email_html)
if reply_to:
message.reply_to = reply_to
message.check_initialized()
if references:
message.headers = {
'References': references,
'In-Reply-To': references,
}
logging.info('Will send the following email:\n')
logging.info('Sender: %s', message.sender)
logging.info('To: %s', message.to)
logging.info('Subject: %s', message.subject)
if reply_to:
logging.info('Reply-To: %s', message.reply_to)
logging.info('References: %s', references or '(not included)')
logging.info('In-Reply-To: %s', references or '(not included)')
logging.info('Body:\n%s', message.html[:settings.MAX_LOG_LINE])
if settings.SEND_EMAIL:
message.send()
logging.info('Email sent')
else:
logging.info('Email not sent because of settings.SEND_EMAIL')
return {'message': 'Done'}
BAD_WRAP_RE = re.compile('=\r\n')
BAD_EQ_RE = re.compile('=3D')
IS_INTERNAL_HANDLER = True
# For docs on AppEngine's bounce email handling, see:
# https://cloud.google.com/appengine/docs/python/mail/bounce
# Source code is in file:
# google_appengine/google/appengine/ext/webapp/mail_handlers.py
def handle_bounce():
"""Handler to notice when email to given user is bouncing."""
receive(mail.BounceNotification(flask.request.form))
return {'message': 'Done'}
def receive(bounce_message):
email_addr = bounce_message.original.get('to')
subject = 'Mail to %r bounced' % email_addr
logging.info(subject)
# TODO(jrobbins): Re-implement this without depending on models.
# Instead create a task and then have that processed in py3.
# pref_list = user_models.UserPref.get_prefs_for_emails([email_addr])
# user_pref = pref_list[0]
# user_pref.bounced = True
# user_pref.put()
# Escalate to someone who might do something about it, e.g.
# find a new owner for a component.
body = ('The following message bounced.\n'
'=================\n'
'From: {from}\n'
'To: {to}\n'
'Subject: {subject}\n\n'
'{text}\n'.format(**bounce_message.original))
logging.info(body)
message = mail.EmailMessage(
sender='Chromestatus <admin@%s.appspotmail.com>' % settings.APP_ID,
to=settings.BOUNCE_ESCALATION_ADDR, subject=subject, body=body)
message.check_initialized()
if settings.SEND_EMAIL:
message.send()
def METHOD_NAME(header_value):
"""Given a message header value, return email address found there."""
friendly_addr_pairs = utils.getaddresses(header_value)
return [addr for _friendly, addr in friendly_addr_pairs]
def get_incoming_message():
"""Get an email message object from the request data."""
data = flask.request.get_data(as_text=True)
msg = mail.InboundEmailMessage(data).original
return msg
def handle_incoming_mail(addr=None):
"""Handle an incoming email by making a task to examine it.
This code checks some basic properties of the incoming message
to make sure that it is worth examining. Then it puts all the
relevent fields into a dict and makes a new Cloud Task which
is futher processed in python 3 code.
"""
logging.info('Request Headers: %r', flask.request.headers)
logging.info('\n\n\nPOST for InboundEmail and addr is %r', addr)
if addr != settings.INBOUND_EMAIL_ADDR:
logging.info('Message not sent directly to our address')
return {'message': 'Wrong address'}
if flask.request.content_length > MAX_BODY_SIZE:
logging.info('Message too big, ignoring')
return {'message': 'Too big'}
msg = get_incoming_message()
precedence = msg.get('precedence', '')
if precedence.lower() in ['bulk', 'junk']:
logging.info('Precedence: %r indicates an autoresponder', precedence)
return {'message': 'Wrong precedence'}
from_addrs = (METHOD_NAME(msg.get_all('x-original-from', [])) or
METHOD_NAME(msg.get_all('from', [])))
if from_addrs:
from_addr = from_addrs[0]
else:
logging.info('could not parse from addr')
return {'message': 'Missing From'}
in_reply_to = msg.get('in-reply-to', '')
body = u''
for part in msg.walk():
# We only process plain text emails.
if part.get_content_type() == 'text/plain':
body = part.get_payload(decode=True)
if not isinstance(body, str):
body = body.decode('utf-8')
break # Only consider the first text part.
to_addr = urllib.parse.unquote(addr)
subject = msg.get('subject', '')
task_dict = {
'to_addr': to_addr,
'from_addr': from_addr,
'subject': subject,
'in_reply_to': in_reply_to,
'body': body,
}
logging.info('task_dict is %r', task_dict)
cloud_tasks_helpers.enqueue_task(
'/tasks/detect-intent', task_dict)
return {'message': 'Done'}
def add_routes(app):
"""Add routes to the given flask app for email handlers."""
app.add_url_rule(
'/tasks/outbound-email', view_func=handle_outbound_mail_task,
methods=['POST'])
app.add_url_rule('/_ah/bounce', view_func=handle_bounce, methods=['POST'])
app.add_url_rule(
'/_ah/mail/<string:addr>', view_func=handle_incoming_mail, methods=['POST']) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.