id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
1,400 | native | from contextlib import suppress
from io import TextIOWrapper
from . import abc
class SpecLoaderAdapter:
"""
Adapt a package spec to adapt the underlying loader.
"""
def __init__(self, spec, adapter=lambda spec: spec.loader):
self.spec = spec
self.loader = adapter(spec)
def __getattr__(self, name):
return getattr(self.spec, name)
class TraversableResourcesLoader:
"""
Adapt a loader to provide TraversableResources.
"""
def __init__(self, spec):
self.spec = spec
def get_resource_reader(self, name):
return CompatibilityFiles(self.spec).METHOD_NAME()
def _io_wrapper(file, mode='r', *args, **kwargs):
if mode == 'r':
return TextIOWrapper(file, *args, **kwargs)
elif mode == 'rb':
return file
raise ValueError(
"Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode)
)
class CompatibilityFiles:
"""
Adapter for an existing or non-existent resource reader
to provide a compatibility .files().
"""
class SpecPath(abc.Traversable):
"""
Path tied to a module spec.
Can be read and exposes the resource reader children.
"""
def __init__(self, spec, reader):
self._spec = spec
self._reader = reader
def iterdir(self):
if not self._reader:
return iter(())
return iter(
CompatibilityFiles.ChildPath(self._reader, path)
for path in self._reader.contents()
)
def is_file(self):
return False
is_dir = is_file
def joinpath(self, other):
if not self._reader:
return CompatibilityFiles.OrphanPath(other)
return CompatibilityFiles.ChildPath(self._reader, other)
@property
def name(self):
return self._spec.name
def open(self, mode='r', *args, **kwargs):
return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs)
class ChildPath(abc.Traversable):
"""
Path tied to a resource reader child.
Can be read but doesn't expose any meaningful children.
"""
def __init__(self, reader, name):
self._reader = reader
self._name = name
def iterdir(self):
return iter(())
def is_file(self):
return self._reader.is_resource(self.name)
def is_dir(self):
return not self.is_file()
def joinpath(self, other):
return CompatibilityFiles.OrphanPath(self.name, other)
@property
def name(self):
return self._name
def open(self, mode='r', *args, **kwargs):
return _io_wrapper(
self._reader.open_resource(self.name), mode, *args, **kwargs
)
class OrphanPath(abc.Traversable):
"""
Orphan path, not tied to a module spec or resource reader.
Can't be read and doesn't expose any meaningful children.
"""
def __init__(self, *path_parts):
if len(path_parts) < 1:
raise ValueError('Need at least one path part to construct a path')
self._path = path_parts
def iterdir(self):
return iter(())
def is_file(self):
return False
is_dir = is_file
def joinpath(self, other):
return CompatibilityFiles.OrphanPath(*self._path, other)
@property
def name(self):
return self._path[-1]
def open(self, mode='r', *args, **kwargs):
raise FileNotFoundError("Can't open orphan path")
def __init__(self, spec):
self.spec = spec
@property
def _reader(self):
with suppress(AttributeError):
return self.spec.loader.get_resource_reader(self.spec.name)
def METHOD_NAME(self):
"""
Return the native reader if it supports files().
"""
reader = self._reader
return reader if hasattr(reader, 'files') else self
def __getattr__(self, attr):
return getattr(self._reader, attr)
def files(self):
return CompatibilityFiles.SpecPath(self.spec, self._reader)
def wrap_spec(package):
"""
Construct a package spec with traversable compatibility
on the spec/loader/reader.
"""
return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) |
1,401 | url | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"notification-hub namespace authorization-rule list",
is_experimental=True,
)
class List(AAZCommand):
"""List the authorization rules for a namespace.
:example: List authorization rules of the namespace
az notification-hub namespace authorization-rule list --resource-group MyResourceGroup --namespace-name my-namespace
"""
_aaz_info = {
"version": "2017-04-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.notificationhubs/namespaces/{}/authorizationrules", "2017-04-01"],
]
}
AZ_SUPPORT_PAGINATION = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.namespace_name = AAZStrArg(
options=["--namespace-name"],
help="The namespace name",
required=True,
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.NamespacesListAuthorizationRules(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class NamespacesListAuthorizationRules(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def METHOD_NAME(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/AuthorizationRules",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"namespaceName", self.ctx.args.namespace_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2017-04-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.location = AAZStrType()
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.sku = AAZObjectType()
_element.tags = AAZDictType()
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.value.Element.properties
properties.claim_type = AAZStrType(
serialized_name="claimType",
flags={"read_only": True},
)
properties.claim_value = AAZStrType(
serialized_name="claimValue",
flags={"read_only": True},
)
properties.created_time = AAZStrType(
serialized_name="createdTime",
flags={"read_only": True},
)
properties.key_name = AAZStrType(
serialized_name="keyName",
flags={"read_only": True},
)
properties.modified_time = AAZStrType(
serialized_name="modifiedTime",
flags={"read_only": True},
)
properties.primary_key = AAZStrType(
serialized_name="primaryKey",
flags={"read_only": True},
)
properties.revision = AAZIntType(
flags={"read_only": True},
)
properties.rights = AAZListType()
properties.secondary_key = AAZStrType(
serialized_name="secondaryKey",
flags={"read_only": True},
)
rights = cls._schema_on_200.value.Element.properties.rights
rights.Element = AAZStrType()
sku = cls._schema_on_200.value.Element.sku
sku.capacity = AAZIntType()
sku.family = AAZStrType()
sku.name = AAZStrType(
flags={"required": True},
)
sku.size = AAZStrType()
sku.tier = AAZStrType()
tags = cls._schema_on_200.value.Element.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ListHelper:
"""Helper class for List"""
__all__ = ["List"] |
1,402 | certificate arn | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetServiceResult',
'AwaitableGetServiceResult',
'get_service',
'get_service_output',
]
@pulumi.output_type
class GetServiceResult:
"""
A collection of values returned by getService.
"""
def __init__(__self__, arn=None, auth_type=None, METHOD_NAME=None, custom_domain_name=None, dns_entries=None, id=None, name=None, service_identifier=None, status=None, tags=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if auth_type and not isinstance(auth_type, str):
raise TypeError("Expected argument 'auth_type' to be a str")
pulumi.set(__self__, "auth_type", auth_type)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'certificate_arn' to be a str")
pulumi.set(__self__, "certificate_arn", METHOD_NAME)
if custom_domain_name and not isinstance(custom_domain_name, str):
raise TypeError("Expected argument 'custom_domain_name' to be a str")
pulumi.set(__self__, "custom_domain_name", custom_domain_name)
if dns_entries and not isinstance(dns_entries, list):
raise TypeError("Expected argument 'dns_entries' to be a list")
pulumi.set(__self__, "dns_entries", dns_entries)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if service_identifier and not isinstance(service_identifier, str):
raise TypeError("Expected argument 'service_identifier' to be a str")
pulumi.set(__self__, "service_identifier", service_identifier)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> str:
"""
ARN of the service.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="authType")
def auth_type(self) -> str:
"""
Type of IAM policy. Either `NONE` or `AWS_IAM`.
"""
return pulumi.get(self, "auth_type")
@property
@pulumi.getter(name="certificateArn")
def METHOD_NAME(self) -> str:
"""
Amazon Resource Name (ARN) of the certificate.
"""
return pulumi.get(self, "certificate_arn")
@property
@pulumi.getter(name="customDomainName")
def custom_domain_name(self) -> str:
"""
Custom domain name of the service.
"""
return pulumi.get(self, "custom_domain_name")
@property
@pulumi.getter(name="dnsEntries")
def dns_entries(self) -> Sequence['outputs.GetServiceDnsEntryResult']:
"""
DNS name of the service.
"""
return pulumi.get(self, "dns_entries")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="serviceIdentifier")
def service_identifier(self) -> str:
return pulumi.get(self, "service_identifier")
@property
@pulumi.getter
def status(self) -> str:
"""
Status of the service.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
List of tags associated with the service.
"""
return pulumi.get(self, "tags")
class AwaitableGetServiceResult(GetServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServiceResult(
arn=self.arn,
auth_type=self.auth_type,
METHOD_NAME=self.METHOD_NAME,
custom_domain_name=self.custom_domain_name,
dns_entries=self.dns_entries,
id=self.id,
name=self.name,
service_identifier=self.service_identifier,
status=self.status,
tags=self.tags)
def get_service(name: Optional[str] = None,
service_identifier: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServiceResult:
"""
Data source for managing an AWS VPC Lattice Service.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.vpclattice.get_service(name="example")
```
:param str name: Service name.
:param str service_identifier: ID or Amazon Resource Name (ARN) of the service network.
:param Mapping[str, str] tags: List of tags associated with the service.
"""
__args__ = dict()
__args__['name'] = name
__args__['serviceIdentifier'] = service_identifier
__args__['tags'] = tags
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:vpclattice/getService:getService', __args__, opts=opts, typ=GetServiceResult).value
return AwaitableGetServiceResult(
arn=pulumi.get(__ret__, 'arn'),
auth_type=pulumi.get(__ret__, 'auth_type'),
METHOD_NAME=pulumi.get(__ret__, 'certificate_arn'),
custom_domain_name=pulumi.get(__ret__, 'custom_domain_name'),
dns_entries=pulumi.get(__ret__, 'dns_entries'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
service_identifier=pulumi.get(__ret__, 'service_identifier'),
status=pulumi.get(__ret__, 'status'),
tags=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(get_service)
def get_service_output(name: Optional[pulumi.Input[Optional[str]]] = None,
service_identifier: Optional[pulumi.Input[Optional[str]]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetServiceResult]:
"""
Data source for managing an AWS VPC Lattice Service.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.vpclattice.get_service(name="example")
```
:param str name: Service name.
:param str service_identifier: ID or Amazon Resource Name (ARN) of the service network.
:param Mapping[str, str] tags: List of tags associated with the service.
"""
... |
1,403 | convert stack polar | import numpy as np
import matplotlib.pyplot as plt
from py4DSTEM.process.utils.elliptical_coords import * ## What else is used here? These fns have
## moved around some. In general, specifying
## the fns is better practice. TODO: change
## this import
from py4DSTEM.process.calibration import fit_ellipse_amorphous_ring
import matplotlib
from tqdm import tqdm
# this fixes figure sizes on HiDPI screens
matplotlib.rcParams["figure.dpi"] = 200
plt.ion()
def fit_stack(datacube, init_coefs, mask=None):
"""
This will fit an ellipse using the polar elliptical transform code to all the
diffraction patterns. It will take in a datacube and return a coefficient array which
can then be used to map strain, fit the centers, etc.
Args:
datacute: a datacube of diffraction data
init_coefs: an initial starting guess for the fit
mask: a mask, either 2D or 4D, for either one mask for the whole stack, or one
per pattern.
Returns:
an array of coefficients of the fit
"""
coefs_array = np.zeros([i for i in datacube.data.shape[0:2]] + [len(init_coefs)])
for i in tqdm(range(datacube.R_Nx)):
for j in tqdm(range(datacube.R_Ny)):
if len(mask.shape) == 2:
mask_current = mask
elif len(mask.shape) == 4:
mask_current = mask[i, j, :, :]
coefs = fit_ellipse_amorphous_ring(
datacube.data[i, j, :, :], init_coefs, mask=mask_current
)
coefs_array[i, j] = coefs
return coefs_array
def calculate_coef_strain(coef_cube, r_ref):
"""
This function will calculate the strains from a 3D matrix output by fit_stack
Coefs order:
* I0 the intensity of the first gaussian function
* I1 the intensity of the Janus gaussian
* sigma0 std of first gaussian
* sigma1 inner std of Janus gaussian
* sigma2 outer std of Janus gaussian
* c_bkgd a constant offset
* R center of the Janus gaussian
* x0,y0 the origin
* B,C 1x^2 + Bxy + Cy^2 = 1
Args:
coef_cube: output from fit_stack
r_ref: a reference 0 strain radius - needed because we fit r as well as B and C
Returns:
(3-tuple) A 3-tuple containing:
* **exx**: strain in the x axis direction in image coordinates
* **eyy**: strain in the y axis direction in image coordinates
* **exy**: shear
"""
R = coef_cube[:, :, 6]
r_ratio = (
R / r_ref
) # this is a correction factor for what defines 0 strain, and must be applied to A, B and C. This has been found _experimentally_! TODO have someone else read this
A = 1 / r_ratio**2
B = coef_cube[:, :, 9] / r_ratio**2
C = coef_cube[:, :, 10] / r_ratio**2
exx, eyy, exy = np.empty_like(A), np.empty_like(C), np.empty_like(B)
for i in range(A.shape[0]):
for j in range(A.shape[1]):
m_ellipse = np.asarray([[A[i, j], B[i, j] / 2], [B[i, j] / 2, C[i, j]]])
e_vals, e_vecs = np.linalg.eig(m_ellipse)
ang = np.arctan2(e_vecs[1, 0], e_vecs[0, 0])
rot_matrix = np.asarray(
[[np.cos(ang), -np.sin(ang)], [np.sin(ang), np.cos(ang)]]
)
transformation_matrix = np.diag(np.sqrt(e_vals))
transformation_matrix = rot_matrix @ transformation_matrix @ rot_matrix.T
exx[i, j] = transformation_matrix[0, 0] - 1
eyy[i, j] = transformation_matrix[1, 1] - 1
exy[i, j] = 0.5 * (
transformation_matrix[0, 1] + transformation_matrix[1, 0]
)
return exx, eyy, exy
def plot_strains(strains, cmap="RdBu_r", vmin=None, vmax=None, mask=None):
"""
This function will plot strains with a unified color scale.
Args:
strains (3-tuple of arrays): (exx, eyy, exy)
cmap, vmin, vmax: imshow parameters
mask: real space mask of values not to show (black)
"""
cmap = plt.get_cmap(cmap)
if vmin is None:
vmin = np.min(strains)
if vmax is None:
vmax = np.max(strains)
if mask is None:
mask = np.ones_like(strains[0])
else:
cmap.set_under("black")
cmap.set_over("black")
cmap.set_bad("black")
mask = mask.astype(bool)
for i in strains:
i[mask] = np.nan
plt.figure(88, figsize=(9, 5.8), clear=True)
f, (ax1, ax2, ax3) = plt.subplots(1, 3, num=88)
ax1.imshow(strains[0], cmap=cmap, vmin=vmin, vmax=vmax)
ax1.tick_params(
axis="both",
which="both",
bottom=False,
top=False,
left=False,
right=False,
labelbottom=False,
labelleft=False,
)
ax1.set_title(r"$\epsilon_{xx}$")
ax2.imshow(strains[1], cmap=cmap, vmin=vmin, vmax=vmax)
ax2.tick_params(
axis="both",
which="both",
bottom=False,
top=False,
left=False,
right=False,
labelbottom=False,
labelleft=False,
)
ax2.set_title(r"$\epsilon_{yy}$")
im = ax3.imshow(strains[2], cmap=cmap, vmin=vmin, vmax=vmax)
ax3.tick_params(
axis="both",
which="both",
bottom=False,
top=False,
left=False,
right=False,
labelbottom=False,
labelleft=False,
)
ax3.set_title(r"$\epsilon_{xy}$")
cbar_ax = f.add_axes([0.125, 0.25, 0.775, 0.05])
f.colorbar(im, cax=cbar_ax, orientation="horizontal")
return
def METHOD_NAME(datacube, coef_cube):
"""
This function will take the coef_cube from fit_stack and apply it to the image stack,
to return polar transformed images.
Args:
datacube: data in datacube format
coef_cube: coefs from fit_stack
Returns:
polar transformed datacube
"""
return datacube_polar
def compute_polar_stack_symmetries(datacube_polar):
"""
This function will take in a datacube of polar-transformed diffraction patterns, and
do the autocorrelation, before taking the fourier transform along the theta
direction, such that symmetries can be measured. They will be plotted by a different
function
Args:
datacube_polar: diffraction pattern cube that has been polar transformed
Returns:
the normalized fft along the theta direction of the autocorrelated patterns in
datacube_polar
"""
return datacube_symmetries
def plot_symmetries(datacube_symmetries, sym_order):
"""
This function will take in a datacube from compute_polar_stack_symmetries and plot a
specific symmetry order.
Args:
datacube_symmetries: result of compute_polar_stack_symmetries, the stack of
fft'd autocorrelated diffraction patterns
sym_order: symmetry order desired to plot
Returns:
None
"""
return None |
1,404 | prefix | # This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
# We define two classes here.
#
# Mark(source, line, column)
# It's just a record and its only use is producing nice error messages.
# Parser does not use it for any other purposes.
#
# Reader(source, data)
# Reader determines the encoding of `data` and converts it to unicode.
# Reader provides the following methods and attributes:
# reader.peek(length=1) - return the next `length` characters
# reader.forward(length=1) - move the current position to `length` characters.
# reader.index - the number of the current character.
# reader.line, stream.column - the line and the column of the current character.
__all__ = ['Reader', 'ReaderError']
from .error import YAMLError, Mark
import codecs, re
class ReaderError(YAMLError):
def __init__(self, name, position, character, encoding, reason):
self.name = name
self.character = character
self.position = position
self.encoding = encoding
self.reason = reason
def __str__(self):
if isinstance(self.character, bytes):
return "'%s' codec can't decode byte #x%02x: %s\n" \
" in \"%s\", position %d" \
% (self.encoding, ord(self.character), self.reason,
self.name, self.position)
else:
return "unacceptable character #x%04x: %s\n" \
" in \"%s\", position %d" \
% (self.character, self.reason,
self.name, self.position)
class Reader(object):
# Reader:
# - determines the data encoding and converts it to a unicode string,
# - checks if characters are in allowed range,
# - adds '\0' to the end.
# Reader accepts
# - a `bytes` object,
# - a `str` object,
# - a file-like object with its `read` method returning `str`,
# - a file-like object with its `read` method returning `unicode`.
# Yeah, it's ugly and slow.
def __init__(self, stream):
self.name = None
self.stream = None
self.stream_pointer = 0
self.eof = True
self.buffer = ''
self.pointer = 0
self.raw_buffer = None
self.raw_decode = None
self.encoding = None
self.index = 0
self.line = 0
self.column = 0
if isinstance(stream, str):
self.name = "<unicode string>"
self.check_printable(stream)
self.buffer = stream+'\0'
elif isinstance(stream, bytes):
self.name = "<byte string>"
self.raw_buffer = stream
self.determine_encoding()
else:
self.stream = stream
self.name = getattr(stream, 'name', "<file>")
self.eof = False
self.raw_buffer = None
self.determine_encoding()
def peek(self, index=0):
try:
return self.buffer[self.pointer+index]
except IndexError:
self.update(index+1)
return self.buffer[self.pointer+index]
def METHOD_NAME(self, length=1):
if self.pointer+length >= len(self.buffer):
self.update(length)
return self.buffer[self.pointer:self.pointer+length]
def forward(self, length=1):
if self.pointer+length+1 >= len(self.buffer):
self.update(length+1)
while length:
ch = self.buffer[self.pointer]
self.pointer += 1
self.index += 1
if ch in '\n\x85\u2028\u2029' \
or (ch == '\r' and self.buffer[self.pointer] != '\n'):
self.line += 1
self.column = 0
elif ch != '\uFEFF':
self.column += 1
length -= 1
def get_mark(self):
if self.stream is None:
return Mark(self.name, self.index, self.line, self.column,
self.buffer, self.pointer)
else:
return Mark(self.name, self.index, self.line, self.column,
None, None)
def determine_encoding(self):
while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
self.update_raw()
if isinstance(self.raw_buffer, bytes):
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
self.raw_decode = codecs.utf_16_le_decode
self.encoding = 'utf-16-le'
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
self.raw_decode = codecs.utf_16_be_decode
self.encoding = 'utf-16-be'
else:
self.raw_decode = codecs.utf_8_decode
self.encoding = 'utf-8'
self.update(1)
NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]')
def check_printable(self, data):
match = self.NON_PRINTABLE.search(data)
if match:
character = match.group()
position = self.index+(len(self.buffer)-self.pointer)+match.start()
raise ReaderError(self.name, position, ord(character),
'unicode', "special characters are not allowed")
def update(self, length):
if self.raw_buffer is None:
return
self.buffer = self.buffer[self.pointer:]
self.pointer = 0
while len(self.buffer) < length:
if not self.eof:
self.update_raw()
if self.raw_decode is not None:
try:
data, converted = self.raw_decode(self.raw_buffer,
'strict', self.eof)
except UnicodeDecodeError as exc:
character = self.raw_buffer[exc.start]
if self.stream is not None:
position = self.stream_pointer-len(self.raw_buffer)+exc.start
else:
position = exc.start
raise ReaderError(self.name, position, character,
exc.encoding, exc.reason)
else:
data = self.raw_buffer
converted = len(data)
self.check_printable(data)
self.buffer += data
self.raw_buffer = self.raw_buffer[converted:]
if self.eof:
self.buffer += '\0'
self.raw_buffer = None
break
def update_raw(self, size=4096):
data = self.stream.read(size)
if self.raw_buffer is None:
self.raw_buffer = data
else:
self.raw_buffer += data
self.stream_pointer += len(data)
if not data:
self.eof = True |
1,405 | test get auxreader for | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import pytest
from numpy.testing import assert_array_equal
import numpy as np
import os
import MDAnalysis as mda
from MDAnalysisTests.datafiles import (AUX_XVG, XVG_BAD_NCOL, XVG_BZ2,
COORDINATES_XTC, COORDINATES_TOPOLOGY)
from MDAnalysisTests.auxiliary.base import (BaseAuxReaderTest, BaseAuxReference)
from MDAnalysis.auxiliary.XVG import XVGStep
class XVGReference(BaseAuxReference):
def __init__(self):
super(XVGReference, self).__init__()
self.testdata = AUX_XVG
self.reader = mda.auxiliary.XVG.XVGReader
# add the auxdata and format for .xvg to the reference description
self.description['auxdata'] = os.path.abspath(self.testdata)
self.description['format'] = self.reader.format
# for testing the selection of data/time
self.time_selector = 0 # take time as first value in auxilairy
self.select_time_ref = np.arange(self.n_steps)
self.data_selector = [1,2] # select the second/third columns from auxiliary
self.select_data_ref = [self.format_data([2*i, 2**i]) for i in range(self.n_steps)]
class TestXVGStep():
@staticmethod
@pytest.fixture()
def step():
return XVGStep()
def test_select_time_none(self, step):
st = step._select_time(None)
assert st is None
def test_select_time_invalid_index(self, step):
with pytest.raises(ValueError, match="Time selector must be single index"):
step._select_time([0])
def test_select_data_none(self, step):
st = step._select_data(None)
assert st is None
class TestXVGReader(BaseAuxReaderTest):
@staticmethod
@pytest.fixture()
def ref():
return XVGReference()
@staticmethod
@pytest.fixture
def ref_universe(ref):
u = mda.Universe(COORDINATES_TOPOLOGY, COORDINATES_XTC)
# TODO: Change order of aux_spec and auxdata for 3.0 release, cf. Issue #3811
u.trajectory.add_auxiliary('test', ref.testdata)
return u
@staticmethod
@pytest.fixture()
def reader(ref):
return ref.reader(
ref.testdata,
initial_time=ref.initial_time,
dt=ref.dt, auxname=ref.name,
time_selector=None,
data_selector=None
)
def test_changing_n_col_raises_ValueError(self, ref, reader):
# if number of columns in .xvg file is not consistent, a ValueError
# should be raised
with pytest.raises(ValueError):
reader = ref.reader(XVG_BAD_NCOL)
next(reader)
def test_time_selector_out_of_range_raises_ValueError(self, ref, reader):
# if time_selector is not a valid index of _data, a ValueError
# should be raised
with pytest.raises(ValueError):
reader.time_selector = len(reader.auxstep._data)
def test_data_selector_out_of_range_raises_ValueError(self, ref, reader):
# if data_selector is not a valid index of _data, a ValueError
# should be raised
with pytest.raises(ValueError):
reader.data_selector = [len(reader.auxstep._data)]
class XVGFileReference(XVGReference):
def __init__(self):
super(XVGFileReference, self).__init__()
self.reader = mda.auxiliary.XVG.XVGFileReader
self.format = "XVG-F"
self.description['format'] = self.format
class TestXVGFileReader(TestXVGReader):
@staticmethod
@pytest.fixture()
def ref():
return XVGFileReference()
@staticmethod
@pytest.fixture
def ref_universe(ref):
u = mda.Universe(COORDINATES_TOPOLOGY, COORDINATES_XTC)
# TODO: Change order of aux_spec and auxdata for 3.0 release, cf. Issue #3811
u.trajectory.add_auxiliary('test', ref.testdata)
return u
@staticmethod
@pytest.fixture()
def reader(ref):
return ref.reader(
ref.testdata,
initial_time=ref.initial_time,
dt=ref.dt,
auxname=ref.name,
time_selector=None,
data_selector=None
)
def METHOD_NAME(self, ref, reader):
# Default reader of .xvg files is intead XVGReader, not XVGFileReader
# so test specifying format
reader = mda.auxiliary.core.get_auxreader_for(ref.testdata,
format=ref.format)
assert reader == ref.reader
def test_reopen(self, reader):
reader._reopen()
# should start us back at before step 0, so next takes us to step 0
reader.next()
assert reader.step == 0
def test_xvg_bz2():
reader = mda.auxiliary.XVG.XVGReader(XVG_BZ2)
assert_array_equal(reader.read_all_times(), np.array([0., 50., 100.]))
def test_xvg_file_bz2():
reader = mda.auxiliary.XVG.XVGFileReader(XVG_BZ2)
assert_array_equal(reader.read_all_times(), np.array([0., 50., 100.])) |
1,406 | test flush oneway | # Copyright 2017 Workiva
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from base64 import b64encode
from struct import pack_into
import unittest
from mock import Mock
from mock import patch
from thrift.transport.TTransport import TTransportException
from frugal.gae.transport import TUrlfetchTransport
@patch('frugal.gae.transport.urlfetch_transport._urlfetch')
class TestFUrlfetchTransport(unittest.TestCase):
def test_request(self, mock_urlfetch):
url = 'http://localhost:8080/frugal'
headers = {'foo': 'bar'}
resp = Mock(status=200)
response = b'response'
buff = bytearray(4)
pack_into('!I', buff, 0, len(response))
resp_body = b64encode(buff + response)
resp = Mock(status_code=200, content=resp_body)
mock_urlfetch.return_value = resp
def get_headers():
return {'baz': 'qux'}
tr = TUrlfetchTransport(url, headers=headers, get_headers=get_headers)
deadline = 5
tr.set_timeout(deadline * 1000)
tr.open()
self.assertTrue(tr.isOpen())
data = b'helloworld'
buff = bytearray(4)
pack_into('!I', buff, 0, len(data))
encoded_frame = b64encode(buff + data)
tr.write(data)
tr.flush()
mock_urlfetch.assert_called_once_with(
url, encoded_frame, False, deadline,
{'foo': 'bar', 'baz': 'qux', 'Content-Length': '20',
'Content-Type': 'application/x-frugal',
'Content-Transfer-Encoding': 'base64', 'User-Agent':
'Python/TBaseHttpTransport'},
)
resp = tr.read(len(response))
self.assertEqual(response, resp)
tr.close()
self.assertTrue(tr.isOpen()) # open/close are no-ops
def test_request_https(self, mock_urlfetch):
url = 'https://localhost:8080/frugal'
resp = Mock(status=200)
response = b'response'
buff = bytearray(4)
pack_into('!I', buff, 0, len(response))
resp_body = b64encode(buff + response)
resp = Mock(status_code=200, content=resp_body)
mock_urlfetch.return_value = resp
tr = TUrlfetchTransport(url)
data = b'helloworld'
buff = bytearray(4)
pack_into('!I', buff, 0, len(data))
encoded_frame = b64encode(buff + data)
tr.write(data)
tr.flush()
mock_urlfetch.assert_called_once_with(
url, encoded_frame, True, None,
{'Content-Length': '20', 'Content-Type': 'application/x-frugal',
'Content-Transfer-Encoding': 'base64', 'User-Agent':
'Python/TBaseHttpTransport'},
)
resp = tr.read(len(response))
self.assertEqual(response, resp)
def test_flush_no_body(self, mock_urlfetch):
url = 'http://localhost:8080/frugal'
tr = TUrlfetchTransport(url)
tr.flush()
self.assertFalse(mock_urlfetch.called)
def test_flush_bad_response(self, mock_urlfetch):
url = 'http://localhost:8080/frugal'
resp = Mock(status_code=500)
mock_urlfetch.return_value = resp
tr = TUrlfetchTransport(url)
data = b'helloworld'
buff = bytearray(4)
pack_into('!I', buff, 0, len(data))
encoded_frame = b64encode(buff + data)
tr.write(data)
with self.assertRaises(TTransportException):
tr.flush()
mock_urlfetch.assert_called_once_with(
url, encoded_frame, False, None,
{'Content-Length': '20', 'Content-Type': 'application/x-frugal',
'Content-Transfer-Encoding': 'base64', 'User-Agent':
'Python/TBaseHttpTransport'},
)
def test_flush_bad_oneway_response(self, mock_urlfetch):
url = 'http://localhost:8080/frugal'
buff = bytearray(4)
pack_into('!I', buff, 0, 10)
resp_body = b64encode(buff)
resp = Mock(status_code=200, content=resp_body)
mock_urlfetch.return_value = resp
tr = TUrlfetchTransport(url)
data = b'helloworld'
buff = bytearray(4)
pack_into('!I', buff, 0, len(data))
encoded_frame = b64encode(buff + data)
tr.write(data)
with self.assertRaises(TTransportException):
tr.flush()
mock_urlfetch.assert_called_once_with(
url, encoded_frame, False, None,
{'Content-Length': '20', 'Content-Type': 'application/x-frugal',
'Content-Transfer-Encoding': 'base64', 'User-Agent':
'Python/TBaseHttpTransport'},
)
def METHOD_NAME(self, mock_urlfetch):
url = 'http://localhost:8080/frugal'
buff = bytearray(4)
pack_into('!I', buff, 0, 0)
resp_body = b64encode(buff)
resp = Mock(status_code=200, content=resp_body)
mock_urlfetch.return_value = resp
tr = TUrlfetchTransport(url)
data = b'helloworld'
buff = bytearray(4)
pack_into('!I', buff, 0, len(data))
encoded_frame = b64encode(buff + data)
tr.write(data)
tr.flush()
mock_urlfetch.assert_called_once_with(
url, encoded_frame, False, None,
{'Content-Length': '20', 'Content-Type': 'application/x-frugal',
'Content-Transfer-Encoding': 'base64', 'User-Agent':
'Python/TBaseHttpTransport'},
)
resp = tr.read(10)
self.assertEqual(b'', resp) |
1,407 | pyinit | #-------------------------------------------------------------------------------
# ConnectivityMap
#-------------------------------------------------------------------------------
from PYB11Generator import *
@PYB11template("Dimension")
class ConnectivityMap:
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
typedef NodeList<%(Dimension)s> NodeListType;
"""
#...........................................................................
# Constructors
def METHOD_NAME(self):
"Default constructor"
#...........................................................................
# Methods
def patchConnectivity(self,
flags = "const FieldList<%(Dimension)s, int>&",
old2new = "const FieldList<%(Dimension)s, int>&"):
"Patch the connectivity information"
return "void"
def removeConnectivity(self,
neighborsToCut = "const FieldList<%(Dimension)s, std::vector<std::vector<int>>>&"):
"""Remove connectivity between neighbors.
Note this method assumes neighbor info is symmetric, and removes the pair connectivity for each
member of a pair (maintaining symmetry)."""
return "void"
@PYB11returnpolicy("reference_internal")
@PYB11const
def connectivityForNode(self,
nodeList = "const NodeListType*",
nodeID = "const int"):
"Get the set of neighbors for the given (internal!) node in the given NodeList."
return "const std::vector<std::vector<int>>&"
@PYB11returnpolicy("reference_internal")
@PYB11const
@PYB11pycppname("connectivityForNode")
def connectivityForNode1(self,
nodeListID = "const int",
nodeID = "const int"):
"Get the set of neighbors for the given (internal!) node in the given NodeList."
return "const std::vector<std::vector<int>>&"
@PYB11returnpolicy("reference_internal")
@PYB11const
def intersectionConnectivity(self,
pair = "const NodePairIdxType&"):
"""Get the pre-computed intersection connectivity for points if it was requested.
Note, this is different than what we expect for overlap connectivity: in this
method the intersection points (k) are all points that points (i,j) have in
common when (i,j) are ALSO neighbors. Overlap connectivity may exist for
(i,j) even if (i,j) are not neighbors, and this set will miss such points."""
return "const std::vector<std::vector<int>>&"
@PYB11returnpolicy("reference_internal")
@PYB11const
def overlapConnectivityForNode(self,
nodeList = "const NodeListType*",
nodeID = "const int"):
"The set of points that have non-zero overlap with the given point."
return "const std::vector<std::vector<int>>&"
@PYB11returnpolicy("reference_internal")
@PYB11const
@PYB11pycppname("overlapConnectivityForNode")
def overlapConnectivityForNode1(self,
nodeListID = "const int",
nodeID = "const int"):
"The set of points that have non-zero overlap with the given point."
return "const std::vector<std::vector<int>>&"
@PYB11const
def connectivityIntersectionForNodes(self,
nodeListi = "const int",
i = "const int",
nodeListj = "const int",
j = "const int",
position = ("const FieldList<%(Dimension)s, Vector>&", "FieldList<%(Dimension)s, Vector>()")):
"Compute the common neighbors for a pair of nodes."
return "std::vector< std::vector<int> >"
@PYB11const
def connectivityUnionForNodes(self,
nodeListi = "const int",
i = "const int",
nodeListj ="const int",
j = "const int"):
"Compute the union of neighbors for a pair of nodes."
return "std::vector< std::vector<int> >"
@PYB11const
def numNeighborsForNode(self,
nodeListPtr = "const NodeListType*",
nodeID = "const int"):
"Compute the number of neighbors for the given node."
return "int"
@PYB11const
@PYB11pycppname("numNeighborsForNode")
def numNeighborsForNode1(self,
nodeListID = "const int",
nodeID = "const int"):
"Compute the number of neighbors for the given node."
return "int"
@PYB11const
def globalConnectivity(self,
boundaries = "std::vector<Boundary<%(Dimension)s>*>&"):
"Return the connectivity in terms of global node IDs."
return "std::map<int, std::vector<int> >"
@PYB11const
def calculatePairInteraction(self,
nodeListi = "const int",
i = "const int",
nodeListj = "const int",
j = "const int",
firstGhostNodej ="const int"):
"Function to determine if given node information (i and j), if the pair should already have been calculated by iterating over each others neighbors."
return "bool"
@PYB11const
def numNodes(self, nodeList="const int"):
"Return the number of nodes we should walk for the given NodeList."
return "int"
@PYB11const
def ithNode(self,
nodeList = "const int",
index = "const int"):
"The ith node (ordered) in the given NodeList."
return "int"
@PYB11returnpolicy("reference_internal")
@PYB11const
def nodeList(self, index="const int"):
"Get the ith NodeList or FluidNodeList."
return "const NodeListType&"
@PYB11const
def nodeListIndex(self, nodeList="const NodeListType*"):
"Return which NodeList index in order the given one would be in our connectivity."
return "unsigned"
@PYB11const
def valid(self):
"Check that the internal data structure is valid."
return "bool"
#...........................................................................
# Properties
buildGhostConnectivity = PYB11property(doc="Are we building connectivity for ghost nodes?")
buildOverlapConnectivity = PYB11property(doc="Are we building connectivity for nodes that overlap?")
buildIntersectionConnectivity = PYB11property(doc="Are we building the connectivity intersection for nodes that interact?")
nodeLists = PYB11property("const std::vector<const NodeListType*>&", "nodeLists",
returnpolicy="reference",
doc="The set of NodeLists we have connectivity for")
nodePairList = PYB11property(returnpolicy="reference",
doc="The connectivity as a set of (nodeListi, i, nodeListj, j)")
coupling = PYB11property("const NodeCoupling&", returnpolicy="reference",
doc="The coupling functor for pairs of nodes") |
1,408 | convert db precision to digits | from typing import List
from ..abcs.database_types import (
DbPath,
JSON,
Timestamp,
TimestampTZ,
Float,
Decimal,
Integer,
TemporalType,
Native_UUID,
Text,
FractionalType,
Boolean,
Date,
)
from ..abcs.mixins import AbstractMixin_MD5, AbstractMixin_NormalizeValue
from .base import BaseDialect, ThreadedDatabase, import_helper, ConnectError, Mixin_Schema
from .base import MD5_HEXDIGITS, CHECKSUM_HEXDIGITS, _CHECKSUM_BITSIZE, TIMESTAMP_PRECISION_POS, Mixin_RandomSample
SESSION_TIME_ZONE = None # Changed by the tests
@import_helper("postgresql")
def import_postgresql():
import psycopg2
import psycopg2.extras
psycopg2.extensions.set_wait_callback(psycopg2.extras.wait_select)
return psycopg2
class Mixin_MD5(AbstractMixin_MD5):
def md5_as_int(self, s: str) -> str:
return f"('x' || substring(md5({s}), {1+MD5_HEXDIGITS-CHECKSUM_HEXDIGITS}))::bit({_CHECKSUM_BITSIZE})::bigint"
class Mixin_NormalizeValue(AbstractMixin_NormalizeValue):
def normalize_timestamp(self, value: str, coltype: TemporalType) -> str:
if coltype.rounds:
return f"to_char({value}::timestamp({coltype.precision}), 'YYYY-mm-dd HH24:MI:SS.US')"
timestamp6 = f"to_char({value}::timestamp(6), 'YYYY-mm-dd HH24:MI:SS.US')"
return (
f"RPAD(LEFT({timestamp6}, {TIMESTAMP_PRECISION_POS+coltype.precision}), {TIMESTAMP_PRECISION_POS+6}, '0')"
)
def normalize_number(self, value: str, coltype: FractionalType) -> str:
return self.to_string(f"{value}::decimal(38, {coltype.precision})")
def normalize_boolean(self, value: str, _coltype: Boolean) -> str:
return self.to_string(f"{value}::int")
def normalize_json(self, value: str, _coltype: JSON) -> str:
return f"{value}::text"
class PostgresqlDialect(BaseDialect, Mixin_Schema):
name = "PostgreSQL"
ROUNDS_ON_PREC_LOSS = True
SUPPORTS_PRIMARY_KEY = True
SUPPORTS_INDEXES = True
MIXINS = {Mixin_Schema, Mixin_MD5, Mixin_NormalizeValue, Mixin_RandomSample}
TYPE_CLASSES = {
# Timestamps
"timestamp with time zone": TimestampTZ,
"timestamp without time zone": Timestamp,
"timestamp": Timestamp,
"date": Date,
# Numbers
"double precision": Float,
"real": Float,
"decimal": Decimal,
"smallint": Integer,
"integer": Integer,
"numeric": Decimal,
"bigint": Integer,
# Text
"character": Text,
"character varying": Text,
"varchar": Text,
"text": Text,
"json": JSON,
"jsonb": JSON,
"uuid": Native_UUID,
"boolean": Boolean,
}
def quote(self, s: str):
return f'"{s}"'
def to_string(self, s: str):
return f"{s}::varchar"
def concat(self, items: List[str]) -> str:
joined_exprs = " || ".join(items)
return f"({joined_exprs})"
def METHOD_NAME(self, p: int) -> int:
# Subtracting 2 due to wierd precision issues in PostgreSQL
return super().METHOD_NAME(p) - 2
def set_timezone_to_utc(self) -> str:
return "SET TIME ZONE 'UTC'"
def current_timestamp(self) -> str:
return "current_timestamp"
def type_repr(self, t) -> str:
if isinstance(t, TimestampTZ):
return f"timestamp ({t.precision}) with time zone"
return super().type_repr(t)
class PostgreSQL(ThreadedDatabase):
dialect = PostgresqlDialect()
SUPPORTS_UNIQUE_CONSTAINT = True
CONNECT_URI_HELP = "postgresql://<user>:<password>@<host>/<database>"
CONNECT_URI_PARAMS = ["database?"]
default_schema = "public"
def __init__(self, *, thread_count, **kw):
self._args = kw
super().__init__(thread_count=thread_count)
def create_connection(self):
if not self._args:
self._args["host"] = None # psycopg2 requires 1+ arguments
pg = import_postgresql()
try:
c = pg.connect(**self._args)
if SESSION_TIME_ZONE:
c.cursor().execute(f"SET TIME ZONE '{SESSION_TIME_ZONE}'")
return c
except pg.OperationalError as e:
raise ConnectError(*e.args) from e
def select_table_schema(self, path: DbPath) -> str:
database, schema, table = self._normalize_table_path(path)
info_schema_path = ["information_schema", "columns"]
if database:
info_schema_path.insert(0, database)
return (
f"SELECT column_name, data_type, datetime_precision, numeric_precision, numeric_scale FROM {'.'.join(info_schema_path)} "
f"WHERE table_name = '{table}' AND table_schema = '{schema}'"
)
def select_table_unique_columns(self, path: DbPath) -> str:
database, schema, table = self._normalize_table_path(path)
info_schema_path = ["information_schema", "key_column_usage"]
if database:
info_schema_path.insert(0, database)
return (
"SELECT column_name "
f"FROM {'.'.join(info_schema_path)} "
f"WHERE table_name = '{table}' AND table_schema = '{schema}'"
)
def _normalize_table_path(self, path: DbPath) -> DbPath:
if len(path) == 1:
return None, self.default_schema, path[0]
elif len(path) == 2:
return None, path[0], path[1]
elif len(path) == 3:
return path
raise ValueError(
f"{self.name}: Bad table path for {self}: '{'.'.join(path)}'. Expected format: table, schema.table, or database.schema.table"
) |
1,409 | test field alias order irrelevant | from dagster import Field, Permissive, Selector, Shape
from dagster._config import compute_fields_hash
def _hash(fields):
return compute_fields_hash(fields, description=None)
def test_compute_fields_hash():
assert isinstance(_hash({"some_int": Field(int)}), str)
def test_hash_diff():
assert _hash({"some_int": Field(int)}) != _hash({"another_int": Field(int)})
assert _hash({"same_name": Field(int)}) != _hash({"same_name": Field(str)})
assert _hash({"same_name": Field(int)}) != _hash({"same_name": Field(int, is_required=False)})
assert _hash({"same_name": Field(int)}) != _hash(
{"same_name": Field(int, is_required=False, default_value=2)}
)
assert _hash({"same_name": Field(int, is_required=False)}) != _hash(
{"same_name": Field(int, is_required=False, default_value=2)}
)
assert _hash({"same_name": Field(int)}) != _hash({"same_name": Field(int, description="desc")})
def test_construct_same_dicts():
int_dict_1 = Shape(fields={"an_int": Field(int)})
int_dict_2 = Shape(fields={"an_int": Field(int)})
# assert identical object
assert int_dict_1 is int_dict_2
# assert equivalent key
assert int_dict_1.key == int_dict_2.key
def test_construct_same_fields_different_aliases():
int_dict_1 = Shape(fields={"an_int": Field(int)}, field_aliases={"an_int": "foo"})
int_dict_2 = Shape(fields={"an_int": Field(int)}, field_aliases={"an_int": "bar"})
assert int_dict_1 is not int_dict_2
assert not int_dict_1.key == int_dict_2.key
def test_field_order_irrelevant():
int_dict_1 = Shape(fields={"an_int": Field(int), "another_int": Field(int)})
int_dict_2 = Shape(fields={"another_int": Field(int), "an_int": Field(int)})
# assert identical object
assert int_dict_1 is int_dict_2
# assert equivalent key
assert int_dict_1.key == int_dict_2.key
def METHOD_NAME():
int_dict_1 = Shape(
fields={"an_int": Field(int), "another_int": Field(int)},
field_aliases={"an_int": "foo", "another_int": "bar"},
)
int_dict_2 = Shape(
fields={"an_int": Field(int), "another_int": Field(int)},
field_aliases={"another_int": "bar", "an_int": "foo"},
)
assert int_dict_1 is int_dict_2
assert int_dict_1.key == int_dict_2.key
def test_construct_different_dicts():
int_dict = Shape(fields={"an_int": Field(int)})
string_dict = Shape(fields={"a_string": Field(str)})
assert int_dict is not string_dict
assert int_dict.key != string_dict.key
def test_construct_permissive_dict_same_same():
assert Permissive() is Permissive()
def test_construct_same_perm_dicts():
int_perm_dict_1 = Permissive(fields={"an_int": Field(int)})
int_perm_dict_2 = Permissive(fields={"an_int": Field(int)})
# assert identical object
assert int_perm_dict_1 is int_perm_dict_2
# assert equivalent key
assert int_perm_dict_1.key == int_perm_dict_2.key
def test_construct_different_perm_dicts():
int_perm_dict = Permissive(fields={"an_int": Field(int)})
string_perm_dict = Permissive(fields={"a_string": Field(str)})
assert int_perm_dict is not string_perm_dict
assert int_perm_dict.key != string_perm_dict.key
def test_construct_same_selectors():
int_selector_1 = Selector(fields={"an_int": Field(int)})
int_selector_2 = Selector(fields={"an_int": Field(int)})
# assert identical object
assert int_selector_1 is int_selector_2
# assert equivalent key
assert int_selector_1.key == int_selector_2.key
def test_construct_different_selectors():
int_selector = Selector(fields={"an_int": Field(int)})
string_selector = Selector(fields={"a_string": Field(str)})
assert int_selector is not string_selector
assert int_selector.key != string_selector.key
def test_kitchen_sink():
big_dict_1 = Shape(
{
"field_one": Field(int, default_value=2, is_required=False),
"field_two": Field(
Shape(
{
"nested_field_one": Field(bool),
"nested_selector": Field(
Selector(
{
"int_field_in_selector": Field(int),
"permissive_dict_in_selector": Field(Permissive()),
"permissive_dict_with_fields_in_selector": Field(
Permissive({"string_field": Field(str)})
),
}
)
),
}
)
),
}
)
big_dict_2 = Shape(
{
"field_one": Field(int, default_value=2, is_required=False),
"field_two": Field(
Shape(
fields={
"nested_field_one": Field(bool),
"nested_selector": Field(
Selector(
fields={
"permissive_dict_in_selector": Field(Permissive()),
"int_field_in_selector": Field(int),
"permissive_dict_with_fields_in_selector": Field(
Permissive(fields={"string_field": Field(str)})
),
}
)
),
}
)
),
}
)
assert big_dict_1 is big_dict_2
assert big_dict_1.key == big_dict_2.key
# differs way down in tree
big_dict_3 = Shape(
{
"field_one": Field(int, default_value=2, is_required=False),
"field_two": Field(
Shape(
fields={
"nested_field_one": Field(bool),
"nested_selector": Field(
Selector(
fields={
"permissive_dict_in_selector": Field(Permissive()),
"int_field_in_selector": Field(int),
"permissive_dict_with_fields_in_selector": Field(
Permissive(fields={"int_field": Field(int)})
),
}
)
),
}
)
),
}
)
assert big_dict_1 is not big_dict_3
assert big_dict_1.key != big_dict_3.key |
1,410 | reset | from collections import namedtuple
from typing import Any
import numpy as np
import randaugment.augmentation_transforms as augmentation_transforms
import randaugment.policies as found_policies
import torch
from torch import nn
# From: https://github.com/quark0/DARTS
Genotype = namedtuple("Genotype", "normal normal_concat reduce reduce_concat")
class EMA(nn.Module):
def __init__(self, mu):
super(EMA, self).__init__()
self.mu = mu
def register(self, params):
# We register copied tensors to buffer so they will
# be saved as part of state_dict.
for i, p in enumerate(params):
copy = p.clone().detach()
self.register_buffer("shadow" + str(i), copy)
def shadow_vars(self):
for b in self.buffers():
yield b
def forward(self, new_params):
for avg, new in zip(self.shadow_vars(), new_params):
new_avg = self.mu * avg + (1 - self.mu) * new.detach()
avg.data = new_avg.data
class EMAWrapper(nn.Module):
def __init__(self, ema_decay, model):
super(EMAWrapper, self).__init__()
self.model = model
self.ema = EMA(ema_decay)
self.ema.register(self.ema_vars())
# Create copies in case we have to resume.
for i, p in enumerate(self.ema_vars()):
copy = p.clone().detach()
self.register_buffer("curr" + str(i), copy)
def curr_vars(self):
for n, b in self.named_buffers():
if n[0:4] == "curr":
yield b
def ema_vars(self):
for p in self.model.parameters():
yield p
for n, b in self.model.named_buffers():
if "running_mean" or "running_var" in n:
yield b
def forward(self, *args):
return self.model(*args)
def update_ema(self):
self.ema(self.ema_vars())
def restore_ema(self):
for curr, shad, p in zip(self.curr_vars(), self.ema.shadow_vars(), self.ema_vars()):
curr.data = p.data
p.data = shad.data
def restore_latest(self):
for curr, p in zip(self.curr_vars(), self.ema_vars()):
p.data = curr.data
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.contiguous().view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def drop_path(x, drop_prob):
if drop_prob > 0.0:
keep_prob = 1.0 - drop_prob
mask = torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob)
x.div_(keep_prob)
x.mul_(mask)
return x
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1:y2, x1:x2] = 0.0
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
# From: https://github.com/yuhuixu1993/PC-DARTS
class CrossEntropyLabelSmooth(nn.Module):
"""
Assign small probability to non-target classes to hopefully learn faster and more generalizable features.
See this paper for more info:
https://arxiv.org/pdf/1906.02629.pdf
"""
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (-targets * log_probs).mean(0).sum()
return loss
# Memory efficient version for training from: https://github.com/lukemelas/EfficientNet-PyTorch/blob/master/efficientnet_pytorch/utils.py
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class Swish(nn.Module):
"""Swish activation function.
See: https://arxiv.org/abs/1710.05941
"""
def forward(self, x):
return SwishImplementation.apply(x)
class HSwish(nn.Module):
"""Hard Swish activation function.
See: https://arxiv.org/abs/1905.02244
"""
def forward(self, x):
return x * nn.functional.relu6(x + 3).div_(6)
class RandAugment(object):
"""
Augmentation policy learned by RL. From:
https://arxiv.org/abs/1805.09501
"""
def __init__(self):
self.policies = found_policies.randaug_policies()
def __call__(self, img):
policy = self.policies[np.random.choice(len(self.policies))]
final_img = augmentation_transforms.apply_policy(policy, img)
return final_img
class SqueezeAndExcitation(nn.Module):
"""Squeeze-and-Excitation module.
See: https://arxiv.org/abs/1709.01507
"""
def __init__(self, n_feature, n_hidden, spatial_dims=[2, 3], active_fn=None):
super(SqueezeAndExcitation, self).__init__()
self.n_feature = n_feature
self.n_hidden = n_hidden
self.spatial_dims = spatial_dims
self.se_reduce = nn.Conv2d(n_feature, n_hidden, 1, bias=True)
self.se_expand = nn.Conv2d(n_hidden, n_feature, 1, bias=True)
self.active_fn = active_fn()
def forward(self, x):
se_tensor = x.mean(self.spatial_dims, keepdim=True)
se_tensor = self.se_expand(self.active_fn(self.se_reduce(se_tensor)))
return torch.sigmoid(se_tensor) * x
def __repr__(self):
return "{}({}, {}, spatial_dims={}, active_fn={})".format(
self._get_name(),
self.n_feature,
self.n_hidden,
self.spatial_dims,
self.active_fn,
)
class AvgrageMeter(object):
def __init__(self):
self.METHOD_NAME()
def METHOD_NAME(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt |
1,411 | grepx | #!/usr/bin/env python
"""Create grep like remote behavior on show run or command output."""
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import sys
import os
import subprocess
import threading
try:
from Queue import Queue
except ImportError:
from queue import Queue
from datetime import datetime
from getpass import getpass
from netmiko import ConnectHandler
from netmiko.utilities import load_devices, display_inventory
from netmiko.utilities import obtain_all_devices
from netmiko.utilities import obtain_netmiko_filename, write_tmp_file, ensure_dir_exists
from netmiko.utilities import find_netmiko_dir
from netmiko.utilities import SHOW_RUN_MAPPER
GREP = "/bin/grep"
if not os.path.exists(GREP):
GREP = "/usr/bin/grep"
NETMIKO_BASE_DIR = "~/.netmiko"
ERROR_PATTERN = "%%%failed%%%"
__version__ = "0.1.0"
def METHOD_NAME(files, pattern, grep_options, use_colors=True):
"""Call system grep"""
if not isinstance(files, (list, tuple)):
files = [files]
if use_colors:
grep_options += ["--color=auto"]
# Make grep output look nicer by 'cd netmiko_full_dir'
_, netmiko_full_dir = find_netmiko_dir()
os.chdir(netmiko_full_dir)
# Convert files to strip off the directory
retrieve_file = lambda x: x.split("/")[-1] # noqa
files = [retrieve_file(a_file) for a_file in files]
files.sort()
grep_list = [GREP] + grep_options + [pattern] + files
proc = subprocess.Popen(grep_list, shell=False)
proc.communicate()
return ""
def ssh_conn(device_name, a_device, cli_command, output_q):
try:
net_connect = ConnectHandler(**a_device)
net_connect.enable()
output = net_connect.send_command_expect(cli_command)
net_connect.disconnect()
except Exception:
output = ERROR_PATTERN
output_q.put({device_name: output})
def parse_arguments(args):
"""Parse command-line arguments."""
description = "Grep pattern search on Netmiko output (defaults to running-config)"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"pattern", nargs="?", help="Pattern to search for", action="store", type=str
)
parser.add_argument(
"devices",
nargs="?",
help="Device or group to connect to",
action="store",
type=str,
)
parser.add_argument(
"--cmd",
help="Remote command to execute",
action="store",
default=None,
type=str,
)
parser.add_argument("--username", help="Username", action="store", type=str)
parser.add_argument("--password", help="Password", action="store_true")
parser.add_argument("--secret", help="Enable Secret", action="store_true")
parser.add_argument("--use-cache", help="Use cached files", action="store_true")
parser.add_argument(
"--list-devices", help="List devices from inventory", action="store_true"
)
parser.add_argument(
"--display-runtime", help="Display program runtime", action="store_true"
)
parser.add_argument(
"--hide-failed", help="Hide failed devices", action="store_true"
)
parser.add_argument("--version", help="Display version", action="store_true")
cli_args = parser.parse_args(args)
if not cli_args.list_devices and not cli_args.version:
if not cli_args.devices or not cli_args.pattern:
parser.error("Grep pattern or devices not specified.")
return cli_args
def main_ep():
sys.exit(main(sys.argv[1:]))
def main(args):
start_time = datetime.now()
cli_args = parse_arguments(args)
cli_username = cli_args.username if cli_args.username else None
cli_password = getpass() if cli_args.password else None
cli_secret = getpass("Enable secret: ") if cli_args.secret else None
version = cli_args.version
if version:
print("netmiko-grep v{}".format(__version__))
return 0
list_devices = cli_args.list_devices
if list_devices:
my_devices = load_devices()
display_inventory(my_devices)
return 0
cli_command = cli_args.cmd
cmd_arg = False
if cli_command:
cmd_arg = True
device_or_group = cli_args.devices.strip()
pattern = cli_args.pattern
use_cached_files = cli_args.use_cache
hide_failed = cli_args.hide_failed
output_q = Queue()
my_devices = load_devices()
if device_or_group == "all":
device_group = obtain_all_devices(my_devices)
else:
try:
devicedict_or_group = my_devices[device_or_group]
device_group = {}
if isinstance(devicedict_or_group, list):
for tmp_device_name in devicedict_or_group:
device_group[tmp_device_name] = my_devices[tmp_device_name]
else:
device_group[device_or_group] = devicedict_or_group
except KeyError:
return (
"Error reading from netmiko devices file."
" Device or group not found: {0}".format(device_or_group)
)
# Retrieve output from devices
my_files = []
failed_devices = []
if not use_cached_files:
for device_name, a_device in device_group.items():
if cli_username:
a_device["username"] = cli_username
if cli_password:
a_device["password"] = cli_password
if cli_secret:
a_device["secret"] = cli_secret
if not cmd_arg:
cli_command = SHOW_RUN_MAPPER.get(a_device["device_type"], "show run")
my_thread = threading.Thread(
target=ssh_conn, args=(device_name, a_device, cli_command, output_q)
)
my_thread.start()
# Make sure all threads have finished
main_thread = threading.current_thread()
for some_thread in threading.enumerate():
if some_thread != main_thread:
some_thread.join()
# Write files
while not output_q.empty():
my_dict = output_q.get()
netmiko_base_dir, netmiko_full_dir = find_netmiko_dir()
ensure_dir_exists(netmiko_base_dir)
ensure_dir_exists(netmiko_full_dir)
for device_name, output in my_dict.items():
file_name = write_tmp_file(device_name, output)
if ERROR_PATTERN not in output:
my_files.append(file_name)
else:
failed_devices.append(device_name)
else:
for device_name in device_group:
file_name = obtain_netmiko_filename(device_name)
try:
with open(file_name) as f:
output = f.read()
except IOError:
return "Some cache files are missing: unable to use --use-cache option."
if ERROR_PATTERN not in output:
my_files.append(file_name)
else:
failed_devices.append(device_name)
grep_options = []
METHOD_NAME(my_files, pattern, grep_options)
if cli_args.display_runtime:
print("Total time: {0}".format(datetime.now() - start_time))
if not hide_failed:
if failed_devices:
print("\n")
print("-" * 20)
print("Failed devices:")
failed_devices.sort()
for device_name in failed_devices:
print(" {}".format(device_name))
print()
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:])) |
1,412 | rate guard | # coding: utf-8
"""
Definition of the job dashboard interface.
"""
__all__ = ["BaseJobDashboard", "NoJobDashboard", "cache_by_status"]
import time
import functools
from contextlib import contextmanager
from abc import ABCMeta, abstractmethod
import six
from law.util import perf_counter
def cache_by_status(func):
"""
Decorator for :py:meth:`BaseJobDashboard.publish` (and inheriting classes) that caches the last
published status to decide if the a new publication is necessary or not. When the status did not
change since the last call, the actual publish method is not invoked and *None* is returned.
"""
@functools.wraps(func)
def wrapper(self, job_data, event, job_num, *args, **kwargs):
job_id = job_data["job_id"]
dashboard_status = self.map_status(job_data.get("status"), event)
# nothing to do when the status is invalid or did not change
if not dashboard_status or self._last_states.get(job_id) == dashboard_status:
return None
# set the new status
self._last_states[job_id] = dashboard_status
return func(self, job_data, event, job_num, *args, **kwargs)
return wrapper
class BaseJobDashboard(six.with_metaclass(ABCMeta, object)):
"""
Base class of a minimal job dashboard interface that is used from within
:py:class:`law.workflow.remote.BaseRemoteWorkflow`'s.
.. py:classattribute:: persistent_attributes
type: list
List of instance attributes that should be marked as being persistent. This is (e.g.) used
in the :py:class:`law.workflow.remote.BaseRemoteWorkflow` when saving job and submission
information to submission files. Common use cases are user information.
.. py:attribute:: max_rate
type: int
Maximum number of events that can be published per second. :py:meth:`rate_guard` uses this
value to delay function calls.
"""
cache_by_status = None
persistent_attributes = []
def __init__(self, max_rate=0):
super(BaseJobDashboard, self).__init__()
# maximum number of events per second
self.max_rate = max_rate
# timestamp of last event, used to ensure that max_rate is not exceeded
self._last_event_time = 0.0
# last dashboard status per job_id, used to prevent subsequent requests for jobs
# without any status change
self._last_states = {}
def get_persistent_config(self):
"""
Returns the values of all :py:attr:`persistent_attributes` of this instance in a dictionary.
"""
return {attr: getattr(self, attr) for attr in self.persistent_attributes}
def apply_config(self, config):
"""
Sets all attributes in a dictionary *config* to this instance. This can be understand as the
counterpart of :py:meth:`get_persistent_config`.
"""
for attr, value in six.iteritems(config):
if hasattr(self, attr):
setattr(self, attr, value)
@contextmanager
def METHOD_NAME(self):
"""
Context guard that ensures that decorated contexts are delayed in order to limit the number
of status publications per second, defined by :py:attr:`max_rate`. Example:
.. code-block:: python
# print some numbers, which will take 10 / max_rate seconds
for i in range(10):
with self.rate_guard():
print(i)
"""
now = 0.0
if self.max_rate > 0:
now = perf_counter()
diff = self._last_event_time + 1.0 / self.max_rate - now
if diff > 0:
time.sleep(diff)
try:
yield
finally:
self._last_event_time = now
def remote_hook_file(self):
"""
This method can return the path to a file that is considered as an input file to remote
jobs. This file can contain bash functions, environment variables, etc., that are necessary
to communicate with the implemented job dashboard. When *None* is returned, no file is sent.
"""
return None
def remote_hook_data(self, job_num, attempt):
"""
This method can return a dictionary that is sent with remote jobs in the format
``key1=value1 key2=value2 ...``. The returned dictionary should (but does not have to)
include the job number *job_num* and the retry *attempt*.
"""
return None
def create_tracking_url(self):
"""
This method can return a tracking url that refers to a web page that visualizes jobs. When
set, the url is shown in the central luigi scheduler.
"""
return None
@abstractmethod
def map_status(self, job_status, event):
"""
Maps the *job_status* (see :py:class:`law.job.base.BaseJobManager`) for a particular *event*
to the status name that is accepted by the implemented job dashobard. Possible events are:
- action.submit
- action.cancel
- status.pending
- status.running
- status.finished
- status.retry
- status.failed
"""
return
@abstractmethod
def publish(self, job_data, event, job_num, *args, **kwargs):
"""
Publishes the status of a job to the implemented job dashboard. *job_data* is a dictionary
that contains a *job_id* and a *status* string (see
:py:meth:`law.workflow.remote.StatusData.job_data`).
"""
return
BaseJobDashboard.cache_by_status = staticmethod(cache_by_status)
class NoJobDashboard(BaseJobDashboard):
"""
Null job dashboard implementation. Instances of this class actually does not publish any job
status. It can rather be used as a placeholder in situations where a job dashboard is required,
such as in :py:class:`law.workflow.remote.BaseRemoteWorkflow`.
"""
def map_status(self, *args, **kwargs):
""""""
return
def publish(self, *args, **kwargs):
""""""
return |
1,413 | method | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"monitor private-link-scope scoped-resource list",
is_preview=True,
)
class List(AAZCommand):
"""List all scoped resource of a private link scope resource.
:example: List all scoped resource of a private link scope resource.
az monitor private-link-scope scoped-resource list -g MyResourceGroup --scope-name MyScope
"""
_aaz_info = {
"version": "2019-10-17-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.insights/privatelinkscopes/{}/scopedresources", "2019-10-17-preview"],
]
}
AZ_SUPPORT_PAGINATION = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.scope_name = AAZStrArg(
options=["--scope-name"],
help="Name of the Azure Monitor Private Link Scope.",
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.PrivateLinkScopedResourcesListByPrivateLinkScope(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class PrivateLinkScopedResourcesListByPrivateLinkScope(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/privateLinkScopes/{scopeName}/scopedResources",
**self.url_parameters
)
@property
def METHOD_NAME(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"scopeName", self.ctx.args.scope_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2019-10-17-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
flags={"read_only": True},
)
_schema_on_200.value = AAZListType(
flags={"read_only": True},
)
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.value.Element.properties
properties.linked_resource_id = AAZStrType(
serialized_name="linkedResourceId",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
return cls._schema_on_200
class _ListHelper:
"""Helper class for List"""
__all__ = ["List"] |
1,414 | test soft voided | from hathor.conf import HathorSettings
from hathor.graphviz import GraphvizVisualizer
from hathor.simulator import FakeConnection, Simulator
from hathor.simulator.trigger import StopAfterNTransactions
from tests import unittest
from tests.simulation.base import SimulatorTestCase
from tests.utils import add_custom_tx, gen_new_double_spending
settings = HathorSettings()
class BaseSoftVoidedTestCase(SimulatorTestCase):
seed_config = 5988775361793628169
def _run_test(self, simulator, soft_voided_tx_ids):
manager1 = self.create_peer(soft_voided_tx_ids=set(soft_voided_tx_ids), simulator=simulator)
manager1.allow_mining_without_peers()
miner1 = simulator.create_miner(manager1, hashpower=5e6)
miner1.start()
simulator.run(60)
gen_tx1 = simulator.create_tx_generator(manager1, rate=3 / 60., hashpower=1e6, ignore_no_funds=True)
gen_tx1.start()
simulator.run(300)
gen_tx1.stop()
manager2 = self.create_peer(soft_voided_tx_ids=set(soft_voided_tx_ids), simulator=simulator)
manager2.soft_voided_tx_ids = set(soft_voided_tx_ids)
self.graphviz = GraphvizVisualizer(manager2.tx_storage, include_verifications=True, include_funds=True)
conn12 = FakeConnection(manager1, manager2, latency=0.001)
simulator.add_connection(conn12)
miner2 = simulator.create_miner(manager2, hashpower=10e6)
miner2.start()
gen_tx2 = simulator.create_tx_generator(manager2, rate=10 / 60., hashpower=1e6, ignore_no_funds=True)
gen_tx2.start()
trigger = StopAfterNTransactions(gen_tx2, quantity=1)
self.assertTrue(simulator.run(7200, trigger=trigger))
yield gen_tx2
simulator.run(300)
yield gen_tx2
miner1.stop()
miner2.stop()
simulator.run(300)
yield gen_tx2
gen_tx2.stop()
self.assertEqual(2, len(soft_voided_tx_ids))
txA_hash = soft_voided_tx_ids[0]
txB_hash = soft_voided_tx_ids[1]
self.graphviz.labels[txA_hash] = 'txA'
self.graphviz.labels[txB_hash] = 'txB'
txB = manager2.tx_storage.get_transaction(txB_hash)
# Get the tx confirmed by the soft voided that will be voided
tx_base = manager2.tx_storage.get_transaction(txB.parents[0])
txC = gen_new_double_spending(manager2, use_same_parents=False, tx=tx_base)
self.graphviz.labels[tx_base.hash] = 'tx_base'
txC.weight = 30
txC.parents = tx_base.parents
txC.update_hash()
self.graphviz.labels[txC.hash] = 'txC'
self.assertTrue(manager2.propagate_tx(txC, fails_silently=False))
metaC = txC.get_metadata()
self.assertIsNone(metaC.voided_by)
meta_base = tx_base.get_metadata()
self.assertEqual(meta_base.voided_by, {tx_base.hash})
# Create 2 blocks confirming C in order to keep this voidance when we add
# the block confirming the soft voided tx
blk1 = manager2.generate_mining_block()
if txC.hash not in blk1.parents:
blk1.parents[1] = txC.hash
blk1.update_timestamp(int(manager2.reactor.seconds()))
blk1.nonce = self.rng.getrandbits(32)
blk1.update_hash()
# Uncomment lines below to visualize the DAG and the blockchain.
# dot = self.graphviz.dot()
# dot.render('dot0')
self.assertTrue(manager2.propagate_tx(blk1, fails_silently=False))
blk1meta = blk1.get_metadata()
self.graphviz.labels[blk1.hash] = 'blk1'
self.assertIsNone(blk1meta.voided_by)
blk2 = manager2.generate_mining_block()
if txC.hash not in blk2.parents:
blk2.parents[1] = txC.hash
blk2.update_timestamp(int(manager2.reactor.seconds()))
blk2.nonce = self.rng.getrandbits(32)
blk2.update_hash()
self.assertTrue(manager2.propagate_tx(blk2, fails_silently=False))
blk2meta = blk2.get_metadata()
self.graphviz.labels[blk2.hash] = 'blk2'
self.assertIsNone(blk2meta.voided_by)
# Create block that confirms soft voided
blk3 = manager2.generate_mining_block()
if txB.hash not in blk3.parents:
blk3.parents[1] = txB.hash
blk3.nonce = self.rng.getrandbits(32)
blk3.update_hash()
self.assertTrue(manager2.propagate_tx(blk3, fails_silently=False))
blk3meta = blk3.get_metadata()
self.graphviz.labels[blk3.hash] = 'blk3'
simulator.run(10)
txD = add_custom_tx(manager2, [(txC, 0)], base_parent=txB)
self.graphviz.labels[txD.hash] = 'txD'
blk3meta = blk3.get_metadata()
self.assertEqual(blk3meta.voided_by, {tx_base.hash, blk3meta.hash})
metaD = txD.get_metadata()
self.assertEqual(metaD.voided_by, {tx_base.hash})
def _get_txA_hash(self):
simulator = Simulator(seed=self.simulator.seed)
simulator.start()
try:
it = self._run_test(simulator, set())
gen_tx = next(it)
txA_hash = gen_tx.latest_transactions[0]
finally:
simulator.stop()
return txA_hash
def _get_txB_hash(self, txA_hash):
simulator = Simulator(seed=self.simulator.seed)
simulator.start()
try:
it = self._run_test(simulator, set([txA_hash]))
_ = next(it)
_ = next(it)
gen_tx = next(it)
txB_hash = gen_tx.latest_transactions[0]
finally:
simulator.stop()
return txB_hash
def METHOD_NAME(self):
txA_hash = self._get_txA_hash()
txB_hash = self._get_txB_hash(txA_hash)
self.assertNotEqual(txA_hash, txB_hash)
soft_voided_tx_ids = [
txA_hash,
txB_hash,
]
for _ in self._run_test(self.simulator, soft_voided_tx_ids):
pass
class SyncV1SoftVoidedTestCase(unittest.SyncV1Params, BaseSoftVoidedTestCase):
__test__ = True
class SyncV2SoftVoidedTestCase(unittest.SyncV2Params, BaseSoftVoidedTestCase):
__test__ = True
# sync-bridge should behave like sync-v2
class SyncBridgeSoftVoidedTestCase(unittest.SyncBridgeParams, SyncV2SoftVoidedTestCase):
__test__ = True |
1,415 | process | from __future__ import print_function
import numpy as np
import os
import kaldiio
from multiprocessing import Pool
import argparse
from tqdm import tqdm
import math
from funasr.utils.types import str2triple_str
import logging
from typing import List, Union, Tuple, Sequence
from funasr.bin.sv_inference import inference_modelscope
import soundfile
import torch
class MultiProcessRunner:
def __init__(self, fn):
self.METHOD_NAME = fn
def run(self):
parser = argparse.ArgumentParser("")
# Task-independent options
parser.add_argument("--njobs", type=int, default=16)
parser.add_argument("--debug", action="store_true", default=False)
parser.add_argument("--no_pbar", action="store_true", default=False)
parser.add_argument("--verbose", action="store_true", default=False)
parser.add_argument("--log_level", type=str, default="INFO")
parser.add_argument("--sr", type=int, default=16000)
task_list, shared_param, args = self.prepare(parser)
chunk_size = int(math.ceil(float(len(task_list)) / args.njobs))
if args.verbose:
print("Split {} tasks into {} sub-tasks with chunk_size {}".format(len(task_list), args.njobs, chunk_size))
subtask_list = [(i, task_list[i * chunk_size: (i + 1) * chunk_size], shared_param, args)
for i in range(args.njobs)]
result_list = self.pool_run(subtask_list, args)
self.post(result_list, args)
def prepare(self, parser: argparse.ArgumentParser):
raise NotImplementedError("Please implement the prepare function.")
def post(self, results_list: list, args: argparse.Namespace):
raise NotImplementedError("Please implement the post function.")
def pool_run(self, tasks: list, args: argparse.Namespace):
results = []
if args.debug:
one_result = self.METHOD_NAME(tasks[0])
results.append(one_result)
else:
pool = Pool(args.njobs)
for one_result in tqdm(pool.imap(self.METHOD_NAME, tasks), total=len(tasks), ascii=True, disable=args.no_pbar):
results.append(one_result)
pool.close()
return results
class MyRunner(MultiProcessRunner):
def prepare(self, parser: argparse.ArgumentParser):
parser.add_argument(
"--gpu_inference",
type=bool,
default=False
)
parser.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append"
)
parser.add_argument(
"--gpu_devices",
type=lambda devices: devices.split(","),
default=None,
)
args = parser.parse_args()
logging.basicConfig(
level=args.log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if args.gpu_inference and (args.gpu_devices is None or len(args.gpu_devices) == 0):
logging.warning("gpu_inference is set to True, but gpu_devices is not given, use CPU instead.")
args.gpu_inference = False
if args.gpu_inference:
args.njobs = args.njobs * len(args.gpu_devices)
speech_dict = {}
ref_speech_dict = {}
for _path, _name, _type in args.data_path_and_name_and_type:
if _name == "speech":
speech_dict = self.read_data_path(_path)
elif _name == "ref_speech":
ref_speech_dict = self.read_data_path(_path)
task_list, args.njobs = self.get_key_list(args.data_path_and_name_and_type, args.njobs)
return task_list, [speech_dict, ref_speech_dict], args
def read_data_path(self, file_path):
results = {}
for line in open(file_path, "r"):
key, path = line.strip().split(" ", 1)
results[key] = path
return results
def get_key_list(
self,
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
njobs: int
):
first_data = data_path_and_name_and_type[0]
content = open(first_data[0], "r").readlines()
line_number = len(content)
njobs = min(njobs, line_number)
logging.warning("njobs is reduced to {}, since only {} lines exist in {}".format(
njobs, line_number, first_data[0],
))
key_list = [line.strip().split(" ", 1)[0] for line in content]
return key_list, njobs
def post(self, results_list: list, args: argparse.Namespace):
for results in results_list:
for key, value in results:
logging.info("{} {}".format(key, value))
def METHOD_NAME(task_args):
task_id, key_list, [speech_dict, ref_speech_dict], args = task_args
if args.gpu_inference:
device = args.gpu_devices[task_id % len(args.gpu_devices)]
torch.cuda.set_device("cuda:".format(device))
inference_func = inference_modelscope(
output_dir=None,
batch_size=1,
dtype="float32",
ngpu=1 if args.gpu_inference else 0,
seed=0,
num_workers=0,
log_level=logging.INFO,
key_file=None,
sv_train_config="sv.yaml",
sv_model_file="sv.pb",
model_tag=None,
allow_variable_data_keys=True,
streaming=False,
embedding_node="resnet1_dense",
sv_threshold=0.9465,
)
results = {}
for key in key_list:
speech = soundfile.read(speech_dict[key])[0]
ref_speech = soundfile.read(ref_speech_dict[key])[0]
ret = inference_func(None, (speech, ref_speech))
results[key] = ret["value"]
return results
if __name__ == '__main__':
my_runner = MyRunner(METHOD_NAME)
my_runner.run() |
1,416 | run | import queue
import subprocess
import threading
from steps.common import debug
class Podman:
def __init__(self, context, container_name):
self.context = context
self.container_name = container_name
self.container = None
debug(self.context, "Podman.__init__()")
self.new_container()
def __del__(self):
debug(self.context, "Podman.__del__()")
try:
self.kill()
except Exception:
pass
def kill(self):
debug(self.context, "Podman.kill()")
if not self.container:
return
self.container.kill()
self.container = None
def new_container(self):
debug(self.context, "Podman.new_container()")
# no need to stop the running container
# becuse the new container replaces an old container with the identical name
self.container = Container(self.context, name=self.container_name)
class ThreadedPodman:
def __init__(self, context, container_name_prefix, max_containers=1):
self.context = context
self.container = None
debug(self.context, "ThreadedPodman.__init__()")
self.max_containers = max_containers
self.container_name_prefix = container_name_prefix
self.container_name_num = 0
# produce new containers
self.container_producer_queue = queue.Queue(maxsize=self.max_containers)
self.container_producer_queue_is_stopping = threading.Event()
self.container_producer_queue_is_stopped = threading.Event()
self.container_producer_thread = threading.Thread(target=self.container_producer, daemon=True)
self.container_producer_thread.start()
# consume (kill) used containers
self.container_consumer_queue = queue.Queue()
self.container_consumer_thread = threading.Thread(target=self.container_consumer, daemon=True)
self.container_consumer_thread.start()
self.new_container()
def __del__(self):
debug(self.context, "ThreadedPodman.__del__()")
try:
self.kill()
except Exception:
pass
def kill(self):
debug(self.context, "ThreadedPodman.kill()")
self.container_producer_queue_is_stopping.set()
container = getattr(self, "container", None)
if container:
self.container_consumer_queue.put(container)
self.container = None
while not self.container_producer_queue_is_stopped.is_set():
try:
container = self.container_producer_queue.get(block=True, timeout=1)
self.container_consumer_queue.put(container)
except queue.Empty:
continue
# 'None' is a signal to finish processing the queue
self.container_consumer_queue.put(None)
self.container_producer_thread.join()
self.container_consumer_thread.join()
def container_producer(self):
while not self.container_producer_queue_is_stopping.is_set():
if self.container_name_prefix:
self.container_name_num += 1
container_name = f"{self.container_name_prefix}{self.container_name_num}"
else:
container_name = None
container = Container(self.context, name=container_name)
self.container_producer_queue.put(container, block=True)
self.container_producer_queue_is_stopped.set()
def container_consumer(self):
while True:
container = self.container_consumer_queue.get(block=True)
if container is None:
break
container.kill()
def new_container(self):
debug(self.context, "ThreadedPodman.new_container()")
if getattr(self, "container", None):
self.container_consumer_queue.put(self.container)
self.container = self.container_producer_queue.get(block=True)
debug(self.context, f"> {self.container}")
class Container:
def __init__(self, context, name=None):
self.context = context
debug(self.context, "Container.__init__()")
self.container_name = name
self.container_id = None
self.port = None
self.start()
def __del__(self):
try:
self.kill()
except Exception:
pass
def __repr__(self):
result = super().__repr__()
result += f"(port:{self.port}, id:{self.container_id}, name:{self.container_name})"
return result
def METHOD_NAME(self, args, check=True):
cmd = ["podman"] + args
debug(self.context, "Running command:", cmd)
proc = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
check=check,
)
debug(self.context, "> return code:", proc.returncode)
debug(self.context, "> stdout:", proc.stdout)
debug(self.context, "> stderr:", proc.stderr)
return proc
def start(self):
debug(self.context, "Container.start()")
args = [
"run",
"--hostname", "obs-server-behave",
]
if self.container_name:
args += [
"--name", self.container_name,
"--replace",
"--stop-signal", "SIGKILL",
]
args += [
"--rm",
"--detach",
"--interactive",
"--tty",
"-p", "443",
"obs-server"
]
proc = self.METHOD_NAME(args)
lines = proc.stdout.strip().splitlines()
self.container_id = lines[-1]
self.wait_on_systemd()
self.port = self.get_port()
def kill(self):
if not self.container_id:
return
debug(self.context, "Container.kill()")
args = ["kill", self.container_id]
self.METHOD_NAME(args)
self.container_id = None
def restart(self):
debug(self.context, "Container.restart()")
self.kill()
self.start()
def wait_on_systemd(self):
args = [
"exec",
self.container_id,
"/usr/bin/systemctl", "is-system-running", "--wait"
]
self.METHOD_NAME(args, check=False)
def get_port(self):
args = ["port", self.container_id]
proc = self.METHOD_NAME(args)
lines = proc.stdout.strip().splitlines()
for line in lines:
if line.startswith("443/tcp"):
# return <port> from: "443/tcp -> 0.0.0.0:<port>"
return line.split(":")[-1]
raise RuntimeError(f"Could not determine port of container {self.container_id}") |
1,417 | main | #!/usr/bin/env python3
#===========================================================================
#
# Print a json file
#
#===========================================================================
from __future__ import print_function
import os
import sys
import shutil
import subprocess
from optparse import OptionParser
import time
from datetime import datetime
from datetime import date
from datetime import timedelta
import json
def METHOD_NAME():
# globals
global thisScriptName
thisScriptName = os.path.basename(__file__)
global thisScriptDir
thisScriptDir = os.path.dirname(__file__)
os.chdir(thisScriptDir)
thisScriptDir = os.getcwd()
global options
# parse the command line
usage = "usage: " + thisScriptName + " [options]"
homeDir = os.environ['HOME']
parser = OptionParser(usage)
parser.add_option('--debug',
dest='debug', default=True,
action="store_true",
help='Set debugging on')
parser.add_option('--path',
dest='path', default='test.json',
help='path to json file')
(options, args) = parser.parse_args()
# runtime
now = time.gmtime()
nowTime = datetime(now.tm_year, now.tm_mon, now.tm_mday,
now.tm_hour, now.tm_min, now.tm_sec)
dateStr = nowTime.strftime("%Y%m%d")
# debug print
if (options.debug):
print("Running %s:" % thisScriptName, file=sys.stderr)
print(" path: ", options.path, file=sys.stderr)
# read in the file
try:
fp = open(options.path, 'r')
except IOError as e:
print("ERROR - ", thisScriptName, file=sys.stderr)
print(" Cannot open file:", makefilePath, file=sys.stderr)
print(" dir: ", options.coreDir, file=sys.stderr)
return valueList
lines = fp.readlines()
fp.close()
srcCount = 0
binCount = 0
tgzCount = 0
rpmCount = 0
debCount = 0
rbCount = 0
dmgCount = 0
otherCount = 0
name = ""
for line in lines:
line = line.rstrip()
if (line.find('name') >= 0):
name = line
elif (line.find('download_count') >= 0):
countParts = line.split(':')
countStr = countParts[1].strip(',')
countNum = int(countStr)
print("====================", file=sys.stderr)
print("name: ", name, file=sys.stderr)
print("countNum: ", countNum, file=sys.stderr)
if (name.find('.src.') >= 0):
print("====>> src <<====", file=sys.stderr)
srcCount = srcCount + countNum
elif (name.find('.rpm') >= 0):
print("====>> rpm <<====", file=sys.stderr)
rpmCount = rpmCount + countNum
elif (name.find('.deb') >= 0):
print("====>> deb <<====", file=sys.stderr)
debCount = debCount + countNum
elif (name.find('.dmg') >= 0):
print("====>> dmg <<====", file=sys.stderr)
dmgCount = dmgCount + countNum
elif (name.find('.bin.') >= 0):
print("====>> bin <<====", file=sys.stderr)
binCount = binCount + countNum
elif (name.find('.x86_64.') >= 0):
print("====>> bin <<====", file=sys.stderr)
binCount = binCount + countNum
elif (name.find('.tgz') >= 0):
print("====>> tgz <<====", file=sys.stderr)
tgzCount = tgzCount + countNum
elif (name.find('.rb') >= 0):
print("====>> rb <<====", file=sys.stderr)
rbCount = rbCount + countNum
else:
otherCount = otherCount + countNum
print("====>> other <<====", file=sys.stderr)
totalBinCount = binCount + rpmCount + debCount + dmgCount
totalSrcCount = srcCount + tgzCount
print("totalSrcCount: ", totalSrcCount, file=sys.stderr)
print("totalBinCount: ", totalBinCount, file=sys.stderr)
print("srcCount: ", srcCount, file=sys.stderr)
print("binCount: ", binCount, file=sys.stderr)
print("tgzCount: ", tgzCount, file=sys.stderr)
print("rpmCount: ", rpmCount, file=sys.stderr)
print("debCount: ", debCount, file=sys.stderr)
print("dmgCount: ", dmgCount, file=sys.stderr)
print("rbCount: ", rbCount, file=sys.stderr)
print("otherCount: ", otherCount, file=sys.stderr)
# exit
sys.exit(0)
########################################################################
# Run a command in a shell, wait for it to complete
def shellCmd(cmd):
print("Running cmd:", cmd, file=sys.stderr)
try:
retcode = subprocess.check_call(cmd, shell=True)
if retcode != 0:
print("Child exited with code: ", retcode, file=sys.stderr)
sys.exit(1)
else:
if (options.verbose):
print("Child returned code: ", retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
sys.exit(1)
print(" done", file=sys.stderr)
########################################################################
# Run - entry point
if __name__ == "__main__":
METHOD_NAME() |
1,418 | get jinja2 package | # Copyright 2023, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Jinja folklore wrappers and handling of inline copy usage.
"""
import sys
from nuitka.__past__ import unicode
from .Importing import importFromInlineCopy
environments = {}
def unlikely_if(value):
if value:
return "unlikely"
else:
return ""
def unlikely_or_likely_from(value):
if value:
return "unlikely"
else:
return "likely"
_jinja2 = None
# For pkg resources, we need to keep a reference, after we delete it from
# "sys.modules" again.
_loaded_pkg_resources = None
def METHOD_NAME():
global _jinja2, _loaded_pkg_resources # singleton package using a cache, pylint: disable=global-statement
# Import dependencies, sadly we get to manage this ourselves.
importFromInlineCopy("markupsafe", must_exist=True)
# Newer Jinja2 may not use it, but we load it and remove it, so it
# does not interfere with anything else.
if "pkg_resources" not in sys.modules:
_loaded_pkg_resources = importFromInlineCopy("pkg_resources", must_exist=False)
_jinja2 = importFromInlineCopy("jinja2", must_exist=True)
# Unload if it was us loading it, as the inline copy is incomplete.
if _loaded_pkg_resources is not None:
del sys.modules["pkg_resources"]
return _jinja2
def getEnvironment(package_name, template_subdir, extensions):
key = package_name, template_subdir, extensions
if key not in environments:
jinja2 = METHOD_NAME()
if package_name is not None:
loader = jinja2.PackageLoader(package_name, template_subdir)
elif template_subdir is not None:
loader = jinja2.FileSystemLoader(template_subdir)
else:
loader = jinja2.BaseLoader()
env = jinja2.Environment(
loader=loader,
extensions=extensions,
trim_blocks=True,
lstrip_blocks=True,
)
# For shared global functions.
env.globals.update(
{
"unlikely_if": unlikely_if,
"unlikely_or_likely_from": unlikely_or_likely_from,
}
)
env.undefined = jinja2.StrictUndefined
environments[key] = env
return environments[key]
def getTemplate(
package_name, template_name, template_subdir="templates", extensions=()
):
return getEnvironment(
package_name=package_name,
template_subdir=template_subdir,
extensions=extensions,
).get_template(template_name)
def getTemplateC(
package_name, template_name, template_subdir="templates_c", extensions=()
):
return getEnvironment(
package_name=package_name,
template_subdir=template_subdir,
extensions=extensions,
).get_template(template_name)
def getTemplateFromString(template_str):
return getEnvironment(
package_name=None, template_subdir=None, extensions=()
).from_string(template_str.strip())
_template_cache = {}
def renderTemplateFromString(template_str, **kwargs):
# Avoid recreating templates, hoping to save some time.
if template_str not in _template_cache:
_template_cache[template_str] = getTemplateFromString(template_str)
result = _template_cache[template_str].render(**kwargs)
# Jinja produces unicode value, but our emission wants str, or else
# it messes up. TODO: We might switch to unicode one day or bytes
# for Python3 one day, but that seems to much work.
if str is not unicode:
return result.encode("utf8")
else:
return result |
1,419 | do messages | from datetime import timedelta
from dash.orgs.models import Org
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from django.utils.timezone import now
class Command(BaseCommand):
help = "Dumps information about the synced state of each org's contacts and messages"
MESSAGES = "messages"
CONTACTS = "contacts"
STUCK = "stuck"
ACTION_CHOICES = (MESSAGES, CONTACTS, STUCK)
def add_arguments(self, parser):
parser.add_argument("action", choices=self.ACTION_CHOICES, help="The action to perform")
parser.add_argument("org_ids", metavar="ORG", type=int, nargs="*", help="The orgs to analyze")
def handle(self, *args, **options):
action = options["action"]
org_ids = options["org_ids"]
orgs = Org.objects.filter(is_active=True).order_by("pk")
if org_ids:
orgs = orgs.filter(pk__in=org_ids)
if action == self.MESSAGES:
self.METHOD_NAME(orgs)
elif action == self.CONTACTS:
self.do_contacts(orgs)
elif action == self.STUCK:
if len(orgs) != 1:
raise CommandError("Action '%s' must be run against a single org" % action)
self.do_stuck(orgs.first())
def METHOD_NAME(self, orgs):
self.stdout.write("\nSummarizing messages for %d orgs...\n\n" % len(orgs))
header = (("ID", 4), ("Name", 16), ("Total", 12), ("Inactive", 10), ("Unhandled", 10), ("Stuck", 10))
self.stdout.write(row_to_str(header))
self.stdout.write("=" * row_width(header))
an_hour_ago = now() - timedelta(hours=1)
for org in orgs:
active = org.incoming_messages.filter(is_active=True)
inactive = org.incoming_messages.filter(is_active=False)
num_active = active.count()
num_inactive = inactive.count()
num_total = num_active + num_inactive
num_unhandled = org.incoming_messages.filter(is_handled=False).count()
num_stuck = org.incoming_messages.filter(is_handled=False, created_on__lt=an_hour_ago).count()
row = (
(org.id, 4),
(org.name, 16),
(num_total, 12),
(num_inactive, 10),
(num_unhandled, 10),
(num_stuck, 10),
)
self.stdout.write(row_to_str(row))
def do_contacts(self, orgs):
self.stdout.write("\nSummarizing contacts for %d orgs...\n\n" % len(orgs))
header = (("ID", 4), ("Name", 16), ("Total", 12), ("Inactive", 10), ("Stubs", 10), ("Stuck", 10))
self.stdout.write(row_to_str(header))
self.stdout.write("=" * row_width(header))
an_hour_ago = now() - timedelta(hours=1)
for org in orgs:
active = org.contacts.filter(is_active=True)
inactive = org.contacts.filter(is_active=False)
num_active = active.count()
num_inactive = inactive.count()
num_total = num_active + num_inactive
num_stubs = active.filter(is_stub=True).count()
num_stuck = active.filter(is_stub=True, created_on__lt=an_hour_ago).count()
row = ((org.id, 4), (org.name, 16), (num_total, 12), (num_inactive, 10), (num_stubs, 10), (num_stuck, 10))
self.stdout.write(row_to_str(row))
def do_stuck(self, org):
self.stdout.write("\nListing stuck messages for org %s (#%d)...\n\n" % (org.name, org.pk))
header = (("Msg ID", 12), ("Backend ID", 12), ("Created On", 20), ("Contact UUID", 38))
self.stdout.write(row_to_str(header))
self.stdout.write("=" * row_width(header))
an_hour_ago = now() - timedelta(hours=1)
stuck_messages = org.incoming_messages.filter(is_handled=False, created_on__lt=an_hour_ago)
stuck_messages = stuck_messages.select_related("contact").order_by("-created_on")
for msg in stuck_messages:
row = ((msg.pk, 12), (msg.backend_id, 12), (format_date(msg.created_on), 20), (msg.contact.uuid, 38))
self.stdout.write(row_to_str(row))
def row_to_str(row):
return "".join([str(cell[0]).ljust(cell[1]) for cell in row])
def row_width(row):
return sum([cell[1] for cell in row])
def format_date(dt):
return dt.astimezone(timezone.utc).strftime("%b %d, %Y %H:%M") if dt else "" |
1,420 | default value | # SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
from Qt import QtCore, QtWidgets
from functools import partial
class Config(QtCore.QSettings):
"""Persistent application settings.
Methods are also provided for easily attaching widgets to settings.
"""
def __init__(self, default_settings, organization=None, application=None,
parent=None):
super(Config, self).__init__(organization, application, parent)
self.default_settings = default_settings
def value(self, key, type_=None):
"""Get the value of a setting.
If `type` is not provided, the key must be for a known setting,
present in `self.default_settings`. Conversely if `type` IS provided,
the key must be for an unknown setting.
"""
if type_ is None:
default = self.METHOD_NAME(key)
val = self._value(key, default)
if type(val) == type(default):
return val
else:
return self._convert_value(val, type(default))
else:
val = self._value(key, None)
if val is None:
return None
return self._convert_value(val, type_)
def get(self, key, type_=None):
return self.value(key, type_)
def get_string_list(self, key):
"""Get a list of strings."""
strings = []
size = self.beginReadArray(key)
for i in range(size):
self.setArrayIndex(i)
entry = str(self._value("entry"))
strings.append(entry)
self.endArray()
return strings
def prepend_string_list(self, key, value, max_length_key):
"""Prepend a fixed-length string list with a new string.
The oldest string will be removed from the list. If the string is
already in the list, it is shuffled to the top. Use this to implement
things like a 'most recent files' entry.
"""
max_len = self.get(max_length_key)
strings = self.get_string_list(key)
strings = [value] + [x for x in strings if x != value]
strings = strings[:max_len]
self.beginWriteArray(key)
for i in range(len(strings)):
self.setArrayIndex(i)
self.setValue("entry", strings[i])
self.endArray()
def attach(self, widget, key):
if isinstance(widget, QtWidgets.QComboBox):
self._attach_combobox(widget, key)
elif isinstance(widget, QtWidgets.QCheckBox):
self._attach_checkbox(widget, key)
else:
raise NotImplementedError
def _value(self, key, defaultValue=None):
val = super(Config, self).value(key, defaultValue)
if hasattr(val, "toPyObject"):
val = val.toPyObject()
return val
@classmethod
def _convert_value(cls, value, type_):
if type_ is bool:
return (str(value).lower() == "true")
else:
return type_(value)
def _attach_checkbox(self, widget, key):
if widget.isTristate():
raise NotImplementedError
value = self.value(key)
widget.setCheckState(QtCore.Qt.Checked if value else QtCore.Qt.Unchecked)
widget.stateChanged.connect(
partial(self._checkbox_stateChanged, widget, key))
def _checkbox_stateChanged(self, widget, key):
value = widget.isChecked()
self.setValue(key, value)
def _attach_combobox(self, widget, key):
value = str(self.value(key))
index = widget.findText(value)
if index == -1:
widget.setEditText(value)
else:
widget.setCurrentIndex(index)
widget.currentIndexChanged.connect(
partial(self._combobox_currentIndexChanged, widget, key))
widget.editTextChanged.connect(
partial(self._combobox_editTextChanged, widget, key))
def _combobox_currentIndexChanged(self, widget, key, index):
value = widget.itemText(index)
self.setValue(key, value)
def _combobox_editTextChanged(self, widget, key, txt):
self.setValue(key, txt)
def METHOD_NAME(self, key):
keys = key.lstrip('/').split('/')
value = self.default_settings
for k in keys:
try:
value = value[k]
except KeyError:
raise ValueError("No such application setting: %r" % key)
return value |
1,421 | test strip bom | import sys
import codecs
from unittest import TestCase
import simplejson as json
from simplejson.compat import unichr, text_type, b, BytesIO
class TestUnicode(TestCase):
def test_encoding1(self):
encoder = json.JSONEncoder(encoding='utf-8')
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
self.assertEqual(ju, js)
def test_encoding2(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = json.dumps(u, encoding='utf-8')
js = json.dumps(s, encoding='utf-8')
self.assertEqual(ju, js)
def test_encoding3(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u)
self.assertEqual(j, '"\\u03b1\\u03a9"')
def test_encoding4(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u])
self.assertEqual(j, '["\\u03b1\\u03a9"]')
def test_encoding5(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u, ensure_ascii=False)
self.assertEqual(j, u'"' + u + u'"')
def test_encoding6(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u], ensure_ascii=False)
self.assertEqual(j, u'["' + u + u'"]')
def test_big_unicode_encode(self):
u = u'\U0001d120'
self.assertEqual(json.dumps(u), '"\\ud834\\udd20"')
self.assertEqual(json.dumps(u, ensure_ascii=False), u'"\U0001d120"')
def test_big_unicode_decode(self):
u = u'z\U0001d120x'
self.assertEqual(json.loads('"' + u + '"'), u)
self.assertEqual(json.loads('"z\\ud834\\udd20x"'), u)
def test_unicode_decode(self):
for i in range(0, 0xd7ff):
u = unichr(i)
#s = '"\\u{0:04x}"'.format(i)
s = '"\\u%04x"' % (i,)
self.assertEqual(json.loads(s), u)
def test_object_pairs_hook_with_unicode(self):
s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4),
(u"qrt", 5), (u"pad", 6), (u"hoy", 7)]
self.assertEqual(json.loads(s), eval(s))
self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p)
od = json.loads(s, object_pairs_hook=json.OrderedDict)
self.assertEqual(od, json.OrderedDict(p))
self.assertEqual(type(od), json.OrderedDict)
# the object_pairs_hook takes priority over the object_hook
self.assertEqual(json.loads(s,
object_pairs_hook=json.OrderedDict,
object_hook=lambda x: None),
json.OrderedDict(p))
def test_default_encoding(self):
self.assertEqual(json.loads(u'{"a": "\xe9"}'.encode('utf-8')),
{'a': u'\xe9'})
def test_unicode_preservation(self):
self.assertEqual(type(json.loads(u'""')), text_type)
self.assertEqual(type(json.loads(u'"a"')), text_type)
self.assertEqual(type(json.loads(u'["a"]')[0]), text_type)
def test_ensure_ascii_false_returns_unicode(self):
# http://code.google.com/p/simplejson/issues/detail?id=48
self.assertEqual(type(json.dumps([], ensure_ascii=False)), text_type)
self.assertEqual(type(json.dumps(0, ensure_ascii=False)), text_type)
self.assertEqual(type(json.dumps({}, ensure_ascii=False)), text_type)
self.assertEqual(type(json.dumps("", ensure_ascii=False)), text_type)
def test_ensure_ascii_false_bytestring_encoding(self):
# http://code.google.com/p/simplejson/issues/detail?id=48
doc1 = {u'quux': b('Arr\xc3\xaat sur images')}
doc2 = {u'quux': u'Arr\xeat sur images'}
doc_ascii = '{"quux": "Arr\\u00eat sur images"}'
doc_unicode = u'{"quux": "Arr\xeat sur images"}'
self.assertEqual(json.dumps(doc1), doc_ascii)
self.assertEqual(json.dumps(doc2), doc_ascii)
self.assertEqual(json.dumps(doc1, ensure_ascii=False), doc_unicode)
self.assertEqual(json.dumps(doc2, ensure_ascii=False), doc_unicode)
def test_ensure_ascii_linebreak_encoding(self):
# http://timelessrepo.com/json-isnt-a-javascript-subset
s1 = u'\u2029\u2028'
s2 = s1.encode('utf8')
expect = '"\\u2029\\u2028"'
expect_non_ascii = u'"\u2029\u2028"'
self.assertEqual(json.dumps(s1), expect)
self.assertEqual(json.dumps(s2), expect)
self.assertEqual(json.dumps(s1, ensure_ascii=False), expect_non_ascii)
self.assertEqual(json.dumps(s2, ensure_ascii=False), expect_non_ascii)
def test_invalid_escape_sequences(self):
# incomplete escape sequence
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u12')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u123')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1234')
# invalid escape sequence
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u123x"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u12x4"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1x34"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ux234"')
if sys.maxunicode > 65535:
# invalid escape sequence for low surrogate
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u0"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u00"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u000"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u000x"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u00x0"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u0x00"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\ux000"')
def test_ensure_ascii_still_works(self):
# in the ascii range, ensure that everything is the same
for c in map(unichr, range(0, 127)):
self.assertEqual(
json.dumps(c, ensure_ascii=False),
json.dumps(c))
snowman = u'\N{SNOWMAN}'
self.assertEqual(
json.dumps(c, ensure_ascii=False),
'"' + c + '"')
def METHOD_NAME(self):
content = u"\u3053\u3093\u306b\u3061\u308f"
json_doc = codecs.BOM_UTF8 + b(json.dumps(content))
self.assertEqual(json.load(BytesIO(json_doc)), content)
for doc in json_doc, json_doc.decode('utf8'):
self.assertEqual(json.loads(doc), content) |
1,422 | get hunt comment | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetHuntCommentResult',
'AwaitableGetHuntCommentResult',
'get_hunt_comment',
'get_hunt_comment_output',
]
@pulumi.output_type
class GetHuntCommentResult:
"""
Represents a Hunt Comment in Azure Security Insights
"""
def __init__(__self__, etag=None, id=None, message=None, name=None, system_data=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if message and not isinstance(message, str):
raise TypeError("Expected argument 'message' to be a str")
pulumi.set(__self__, "message", message)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def message(self) -> str:
"""
The message for the comment
"""
return pulumi.get(self, "message")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetHuntCommentResult(GetHuntCommentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetHuntCommentResult(
etag=self.etag,
id=self.id,
message=self.message,
name=self.name,
system_data=self.system_data,
type=self.type)
def METHOD_NAME(hunt_comment_id: Optional[str] = None,
hunt_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHuntCommentResult:
"""
Gets a hunt comment
:param str hunt_comment_id: The hunt comment id (GUID)
:param str hunt_id: The hunt id (GUID)
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['huntCommentId'] = hunt_comment_id
__args__['huntId'] = hunt_id
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20230701preview:getHuntComment', __args__, opts=opts, typ=GetHuntCommentResult).value
return AwaitableGetHuntCommentResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
message=pulumi.get(__ret__, 'message'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(METHOD_NAME)
def get_hunt_comment_output(hunt_comment_id: Optional[pulumi.Input[str]] = None,
hunt_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetHuntCommentResult]:
"""
Gets a hunt comment
:param str hunt_comment_id: The hunt comment id (GUID)
:param str hunt_id: The hunt id (GUID)
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
... |
1,423 | test can multiple values | # Copyright 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import shutil
from io import BytesIO
import mutagen
from tests import TestCase, get_data_path, mkstemp
from quodlibet.formats.mp4 import MP4File
from quodlibet.formats._image import EmbeddedImage
import mutagen.mp4
from .helper import get_temp_copy
class TMP4File(TestCase):
def setUp(self):
self.f = get_temp_copy(get_data_path('test.m4a'))
self.song = MP4File(self.f)
def tearDown(self):
os.unlink(self.f)
def _assert_tag_supported(self, tag, value="SomeTestValue"):
self.song[tag] = value
self.song.write()
self.song.reload()
self.assertEqual(self.song(tag), value)
def test_format(self):
self.assertEqual(self.song("~format"), "MPEG-4")
def test_codec(self):
self.assertEqual(self.song("~codec"), "AAC LC")
def test_encoding(self):
self.assertEqual(self.song("~encoding"), "FAAC 1.24")
def test_mb_release_track_id(self):
tag = mutagen.mp4.MP4(self.f)
tag["----:com.apple.iTunes:MusicBrainz Release Track Id"] = [b"foo"]
tag.save()
song = MP4File(self.f)
self.assertEqual(song("musicbrainz_releasetrackid"), u"foo")
song["musicbrainz_releasetrackid"] = u"bla"
song.write()
tag = mutagen.mp4.MP4(self.f)
self.assertEqual(
tag["----:com.apple.iTunes:MusicBrainz Release Track Id"],
[b"bla"])
def test_basic(self):
self._assert_tag_supported("title")
self._assert_tag_supported("artist")
self._assert_tag_supported("albumartist")
self._assert_tag_supported("album")
self._assert_tag_supported("genre")
self._assert_tag_supported("date")
def test_basic_numeric(self):
self._assert_tag_supported("tracknumber", "12")
self._assert_tag_supported("discnumber", "1")
self._assert_tag_supported("bpm", "132")
def test_less_common_tags(self):
self._assert_tag_supported("discsubtitle")
self._assert_tag_supported("mood")
self._assert_tag_supported("conductor")
self._assert_tag_supported("description")
def test_replaygain_tags(self):
self._assert_tag_supported('replaygain_album_gain', '-5.67 dB')
self._assert_tag_supported('replaygain_album_peak', '1.0')
self._assert_tag_supported('replaygain_track_gain', '-5.67 dB')
self._assert_tag_supported('replaygain_track_peak', '1.0')
self._assert_tag_supported('replaygain_reference_loudness', '89 dB')
def test_length(self):
self.assertAlmostEqual(self.song("~#length"), 3.7079, 3)
def test_bitrate(self):
self.assertEqual(self.song("~#bitrate"), 2)
def test_channels(self):
assert self.song("~#channels") == 2
def test_samplerate(self):
assert self.song("~#samplerate") == 44100
def test_bitdepth(self):
assert self.song("~#bitdepth") == 16
def test_bpm_rounds(self):
self.song["bpm"] = "98.76"
self.song.write()
self.song.reload()
self.assertEqual(self.song("bpm"), "99")
self.assertEqual(self.song("~#bpm"), 99)
def test_empty_disk_trkn(self):
for key in ["trkn", "disk"]:
tag = mutagen.mp4.MP4(self.f)
tag[key] = []
tag.save()
tag = mutagen.mp4.MP4(self.f)
assert tag[key] == []
self.song.reload()
def test_write(self):
self.song.write()
def test_can_change(self):
self.assertTrue(self.song.can_change("title"))
self.assertFalse(self.song.can_change("foobar"))
self.assertTrue("albumartist" in self.song.can_change())
def test_invalid(self):
path = get_data_path('empty.xm')
self.assertTrue(os.path.exists(path))
self.assertRaises(Exception, MP4File, path)
def test_get_image(self):
image = self.song.get_primary_image()
self.assertTrue(image)
self.assertEqual(image.mime_type, "image/png")
def test_get_images(self):
images = self.song.get_images()
self.assertTrue(images and len(images) == 2)
def test_get_image_non(self):
tag = mutagen.mp4.MP4(self.f)
tag.pop("covr", None)
tag.save()
self.song.reload()
self.assertFalse(self.song.get_primary_image())
def test_clear_images(self):
self.assertTrue(self.song.valid())
self.assertTrue(self.song.has_images)
self.song.clear_images()
self.assertFalse(self.song.has_images)
self.assertFalse(self.song.get_primary_image())
tag = mutagen.mp4.MP4(self.f)
self.assertFalse("covr" in tag)
def test_set_image(self):
self.assertTrue(self.song.has_images)
fileobj = BytesIO(b"foo")
image = EmbeddedImage(fileobj, "image/jpeg", 10, 10, 8)
self.song.set_image(image)
image = self.song.get_primary_image()
self.assertTrue(image)
self.assertEqual(image.read(), b"foo")
self.assertTrue(self.song.has_images)
def test_can_change_images(self):
self.assertTrue(self.song.can_change_images)
def METHOD_NAME(self):
self.assertEqual(self.song.can_multiple_values(), [])
self.assertFalse(self.song.can_multiple_values("artist"))
def test_m4b_support(self):
path = get_data_path('test.m4a')
fd, filename = mkstemp(suffix='m4b')
os.close(fd)
shutil.copy(path, filename)
self.song = MP4File(filename)
assert self.song("~format") == "MPEG-4"
self._assert_tag_supported("title") |
1,424 | inject or | """Admin request context class.
A request context provided by the admin server to admin route handlers.
"""
from typing import Mapping, Optional, Type
from ..core.profile import Profile, ProfileSession
from ..config.injector import Injector, InjectionError, InjectType
from ..config.injection_context import InjectionContext
from ..config.settings import Settings
from ..utils.classloader import DeferLoad
IN_MEM = DeferLoad("aries_cloudagent.core.in_memory.InMemoryProfile")
class AdminRequestContext:
"""Context established by the Conductor and passed into message handlers."""
def __init__(
self,
profile: Profile,
*,
context: InjectionContext = None,
settings: Mapping[str, object] = None,
root_profile: Profile = None,
metadata: dict = None
):
"""Initialize an instance of AdminRequestContext."""
self._context = (context or profile.context).start_scope("admin", settings)
self._profile = profile
self._root_profile = root_profile
self._metadata = metadata
@property
def injector(self) -> Injector:
"""Accessor for the associated `Injector` instance."""
return self._context.injector
@property
def profile(self) -> Profile:
"""Accessor for the associated `Profile` instance."""
return self._profile
@property
def root_profile(self) -> Optional[Profile]:
"""Accessor for the associated root_profile instance."""
return self._root_profile
@property
def metadata(self) -> dict:
"""Accessor for the associated metadata."""
return self._metadata
@property
def settings(self) -> Settings:
"""Accessor for the context settings."""
return self._context.settings
def session(self) -> ProfileSession:
"""Start a new interactive session with no transaction support requested."""
return self.profile.session(self._context)
def transaction(self) -> ProfileSession:
"""Start a new interactive session with commit and rollback support.
If the current backend does not support transactions, then commit
and rollback operations of the session will not have any effect.
"""
return self.profile.transaction(self._context)
def inject(
self,
base_cls: Type[InjectType],
settings: Mapping[str, object] = None,
) -> InjectType:
"""Get the provided instance of a given class identifier.
Args:
cls: The base class to retrieve an instance of
settings: An optional mapping providing configuration to the provider
Returns:
An instance of the base class, or None
"""
return self._context.inject(base_cls, settings)
def METHOD_NAME(
self,
base_cls: Type[InjectType],
settings: Mapping[str, object] = None,
default: Optional[InjectType] = None,
) -> Optional[InjectType]:
"""Get the provided instance of a given class identifier or default if not found.
Args:
base_cls: The base class to retrieve an instance of
settings: An optional dict providing configuration to the provider
default: default return value if no instance is found
Returns:
An instance of the base class, or None
"""
return self._context.METHOD_NAME(base_cls, settings, default)
def update_settings(self, settings: Mapping[str, object]):
"""Update the current scope with additional settings."""
self._context.update_settings(settings)
@classmethod
def test_context(
cls, session_inject: dict = None, profile: Profile = None
) -> "AdminRequestContext":
"""Quickly set up a new admin request context for tests."""
ctx = AdminRequestContext(profile or IN_MEM.resolved.test_profile())
setattr(ctx, "session_inject", {} if session_inject is None else session_inject)
setattr(ctx, "session", ctx._test_session)
return ctx
def _test_session(self) -> ProfileSession:
session = self.profile.session(self._context)
def _inject(base_cls):
if session._active and base_cls in self.session_inject:
ret = self.session_inject[base_cls]
if ret is None:
raise InjectionError(
"No instance provided for class: {}".format(base_cls.__name__)
)
return ret
return session._context.injector.inject(base_cls)
def _inject_or(base_cls, default=None):
if session._active and base_cls in self.session_inject:
ret = self.session_inject[base_cls]
if ret is None:
ret = default
return ret
return session._context.injector.METHOD_NAME(base_cls, default)
setattr(session, "inject", _inject)
setattr(session, "inject_or", _inject_or)
return session
def __repr__(self) -> str:
"""Provide a human readable representation of this object.
Returns:
A human readable representation of this object
"""
skip = ("session",)
items = (
"{}={}".format(k, repr(v))
for k, v in self.__dict__.items()
if k not in skip
)
return "<{}({})>".format(self.__class__.__name__, ", ".join(items)) |
1,425 | calculate voltage | #!/usr/bin/env python
#
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import rospy
from cob_msgs.msg import PowerState
from cob_phidgets.msg import AnalogSensor
class PowerStatePhidget():
PHIDGET_MAX_VALUE = 999
PHIDGET_MIN_VALUE = 0
PERIOD_RECORD_SIZE = 6
VOLTAGE_COLLECTION_TIME = 6.0 # sec
def __init__(self):
self.voltage = None
self.current = None
self.last_update = rospy.Time(0)
self.charging = False
try:
self.voltage_divider_factor = rospy.get_param("~voltage_divider_factor")
except KeyError:
raise KeyError("Parameter \"~voltage_divider_factor\" not found on parameter server.")
self.voltage_full = rospy.get_param("~voltage_full", 52.0)
self.voltage_empty = rospy.get_param("~voltage_empty", 38.0)
self.current_max = rospy.get_param("~current_max", 30.0)
self.current_min = rospy.get_param("~current_min", -30.0)
self.pub_power_state = rospy.Publisher('power_state', PowerState, queue_size=1)
self.sub_analog_sensors = rospy.Subscriber("analog_sensors", AnalogSensor, self.phidget_cb)
self.pr_next = 0
self.period_record = []
self.cb_avg_time = 0.1
self.voltage_bag_maxlen = 100
self.voltage_bag = []
def append_voltage_bag(self, num):
while len(self.voltage_bag) >= self.voltage_bag_maxlen:
self.voltage_bag.pop(0)
self.voltage_bag.append(num)
def METHOD_NAME(self):
if len(self.voltage_bag) > 0:
self.voltage = np.mean(self.voltage_bag)
def phidget_cb(self, msg):
# Estimate commands frequency; we do continuously as it can be very different depending on the
# publisher type, and we don't want to impose extra constraints to keep this package flexible
if len(self.period_record) < self.PERIOD_RECORD_SIZE:
self.period_record.append((rospy.Time.now() - self.last_update).to_sec())
else:
self.period_record[self.pr_next] = (rospy.Time.now() - self.last_update).to_sec()
self.pr_next += 1
self.pr_next %= len(self.period_record)
self.last_update = rospy.Time.now()
if len(self.period_record) <= self.PERIOD_RECORD_SIZE / 2:
# wait until we have some values; make a reasonable assumption (10 Hz) meanwhile
self.cb_avg_time = 0.1
else:
# enough; recalculate with the latest input
self.cb_avg_time = np.median(self.period_record)
# now set the max voltage bag size
self.voltage_bag_maxlen = int(self.VOLTAGE_COLLECTION_TIME / self.cb_avg_time)
voltage_raw = None
current_raw = None
for i in range(0, len(msg.uri)):
if msg.uri[i] == "voltage":
voltage_raw = msg.value[i]
if msg.uri[i] == "current":
current_raw = msg.value[i]
if voltage_raw != None:
# Calculation of real voltage
voltage = self.voltage_divider_factor * voltage_raw / self.PHIDGET_MAX_VALUE
voltage = round(voltage, 3)
self.append_voltage_bag(voltage)
if current_raw != None:
# Calculation of real current
self.current = self.current_min + (self.current_max - self.current_min) * (current_raw -
self.PHIDGET_MIN_VALUE) / (self.PHIDGET_MAX_VALUE - self.PHIDGET_MIN_VALUE)
self.current = round(self.current, 3)
if self.current > 0:
self.charging = True
else:
self.charging = False
def calculate_power_consumption(self):
if not self.charging and self.voltage != None and self.current != None:
return round(self.voltage * abs(self.current), 3)
else:
return 0.0
def calculate_relative_remaining_capacity(self):
percentage = None
if self.voltage != None:
percentage = round((self.voltage - self.voltage_empty) * 100 / (self.voltage_full - self.voltage_empty), 3)
percentage = min(percentage, 100)
percentage = max(percentage, 0)
return percentage
else:
return 0.0
def publish(self):
self.METHOD_NAME()
if self.voltage != None and self.current != None and (rospy.Time.now() - self.last_update) < rospy.Duration(1):
ps = PowerState()
ps.header.stamp = self.last_update
ps.voltage = self.voltage
ps.current = self.current
ps.power_consumption = self.calculate_power_consumption()
ps.relative_remaining_capacity = self.calculate_relative_remaining_capacity()
ps.charging = self.charging
self.pub_power_state.publish(ps)
if __name__ == "__main__":
rospy.init_node("power_state_phidget")
try:
psp = PowerStatePhidget()
except KeyError as e:
rospy.logerr("Shutting down: {}".format(e))
exit(1)
rospy.loginfo("power state phidget running")
rate = rospy.Rate(10)
while not rospy.is_shutdown():
psp.publish()
rate.sleep() |
1,426 | get param | # Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import logging
from pathlib import Path
from typing import Optional
import click
import git
from packit.local_project import LocalProject, LocalProjectBuilder, CALCULATE
from packit.utils.repo import git_remote_url_to_https_url
logger = logging.getLogger(__name__)
class LocalProjectParameter(click.ParamType):
"""
Path or url.
"""
name = "path_or_url"
def __init__(
self,
ref_param_name: Optional[str] = None,
pr_id_param_name: Optional[str] = None,
merge_pr_param_name: Optional[str] = None,
target_branch_param_name: Optional[str] = None,
) -> None:
super().__init__()
self.ref_param_name = ref_param_name
self.pr_id_param_name = pr_id_param_name
self.merge_pr_param_name = merge_pr_param_name
self.target_branch_param_name = target_branch_param_name
@staticmethod
def METHOD_NAME(param_name, ctx):
value = None
if param_name in ctx.params:
value = ctx.params[param_name]
else: # use the default
for param in ctx.command.params:
if param.name == param_name:
value = param.default
return value
def convert(self, value, param, ctx):
if isinstance(value, LocalProject):
return value
try:
pr_id = None
merge_pr = True
target_branch = None
ref = (
self.METHOD_NAME(self.ref_param_name, ctx) if self.ref_param_name else ""
)
if self.pr_id_param_name:
pr_id = self.METHOD_NAME(self.pr_id_param_name, ctx)
if self.merge_pr_param_name:
merge_pr = self.METHOD_NAME(self.merge_pr_param_name, ctx)
if self.target_branch_param_name:
target_branch = self.METHOD_NAME(self.target_branch_param_name, ctx)
path = Path(value)
if path.is_file():
logger.debug(f"Input is file {path}, taking its parent dir.")
path = path.parent
# General use-case, create fully featured project
builder = LocalProjectBuilder()
if path.is_dir():
logger.debug(f"Input is a directory: {path.absolute()}")
local_project = builder.build(
working_dir=path.absolute(),
ref=ref,
remote=ctx.obj.upstream_git_remote,
merge_pr=merge_pr,
target_branch=target_branch,
git_repo=CALCULATE,
git_project=CALCULATE,
git_service=CALCULATE,
full_name=CALCULATE,
namespace=CALCULATE,
repo_name=CALCULATE,
git_url=CALCULATE,
)
elif git_remote_url_to_https_url(value):
logger.debug(f"Input is a URL to a git repo: {value}")
local_project = builder.build(
git_url=value,
ref=ref,
remote=ctx.obj.upstream_git_remote,
pr_id=pr_id,
merge_pr=merge_pr,
target_branch=target_branch,
git_repo=CALCULATE,
working_dir=CALCULATE,
git_project=CALCULATE,
git_service=CALCULATE,
full_name=CALCULATE,
namespace=CALCULATE,
repo_name=CALCULATE,
)
else:
self.fail(
"Provided input path_or_url is not a directory nor an URL of a git repo."
)
if not (local_project.working_dir or local_project.git_url):
self.fail(
"Parameter is not an existing directory nor correct git url.",
param,
ctx,
)
return local_project
except Exception as ex:
self.fail(ex, param, ctx)
class GitRepoParameter(click.ParamType):
"""Parameter type to represent a Git repository on the local disk, and an
optional branch, in the format <path>:<branch>.
Attributes:
from_ref_param: Name of the CLI parameter which tells the start point of the branch
to be created, if the branch doesn't exist yet.
"""
name = "git_repo"
def __init__(self, from_ref_param: Optional[str] = None):
super().__init__()
self.from_ref_param = from_ref_param
def convert(self, value, param, ctx) -> git.Repo:
if isinstance(value, git.Repo):
return value
if not isinstance(value, str):
self.fail(f"{value!r} is not a string")
try:
path, _, branch = value.partition(":")
repo = git.Repo(path)
if not branch:
return repo
branch_exists = True
try:
repo.rev_parse(branch)
except git.BadName:
branch_exists = False
if self.from_ref_param is not None:
if ctx.params.get(self.from_ref_param):
repo.git.checkout("-B", branch, ctx.params[self.from_ref_param])
else:
self.fail(
f"Unable to create branch {branch!r} because "
f"{self.from_ref_param!r} is not specified",
param,
ctx,
)
elif branch_exists:
repo.git.checkout(branch)
else:
self.fail(
f"Cannot check out branch {branch!r} because it does not exist",
param,
ctx,
)
return repo
except git.NoSuchPathError:
self.fail(f"{path!r} does not exist", param, ctx)
except git.GitCommandError as ex:
self.fail(ex, param, ctx) |
1,427 | test l10n br sale discount percent with | # Copyright (C) 2022-Today - Engenere (<https://engenere.one>).
# @author Felipe Motter Pereira <felipe@engenere.one>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo.tests import Form, SavepointCase
class L10nBrSaleDiscount(SavepointCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.company = cls.env.ref("base.main_company")
cls.group_total_discount_id = cls.env.ref(
"l10n_br_sale.group_total_discount"
).id
cls.group_discount_per_value_id = cls.env.ref(
"l10n_br_sale.group_discount_per_value"
).id
sale_manager_user = cls.env.ref("sales_team.group_sale_manager")
fiscal_user = cls.env.ref("l10n_br_fiscal.group_user")
user_groups = [sale_manager_user.id, fiscal_user.id]
cls.user = (
cls.env["res.users"]
.with_user(cls.env.user)
.with_context(no_reset_password=True)
.create(
{
"name": "Test User",
"login": "test_user",
"email": "test@oca.com",
"company_id": cls.company.id,
"company_ids": [(4, cls.company.id)],
"groups_id": [(6, 0, user_groups)],
}
)
)
cls.env = cls.env(user=cls.user)
cls.cr = cls.env.cr
cls.partner = cls.env["res.partner"].create({"name": "Test"})
cls.product = cls.env["product.product"].create(
{
"name": "test_product",
"type": "service",
"list_price": 1000,
}
)
cls.order = Form(cls.env["sale.order"])
cls.order.partner_id = cls.partner
cls.order.fiscal_operation_id = cls.env.ref("l10n_br_fiscal.fo_venda")
cls.order = cls.order.save()
cls.order_line = cls.env["sale.order.line"].create(
{
"name": cls.product.name,
"product_id": cls.product.id,
"product_uom_qty": 1,
"product_uom": cls.product.uom_id.id,
"price_unit": 1000.00,
"order_id": cls.order.id,
"fiscal_operation_id": cls.env.ref("l10n_br_fiscal.fo_venda").id,
"fiscal_operation_line_id": cls.env.ref(
"l10n_br_fiscal.fo_venda_venda"
).id,
},
)
cls.sales_view_id = "l10n_br_sale.l10n_br_sale_order_form"
def test_l10n_br_sale_discount_value(self):
self.user.groups_id = [(4, self.group_discount_per_value_id)]
self.assertTrue(self.order_line.user_discount_value)
self.assertFalse(self.order_line.user_total_discount)
self.assertFalse(self.order_line.need_change_discount_value())
order = Form(self.order)
with order.order_line.edit(0) as line:
line.discount_value = 450
self.assertEqual(line.discount, 45)
line.price_unit = 2000
self.assertEqual(line.discount, 22.5)
with self.assertRaises(AssertionError):
line.discount = 20
def test_l10n_br_sale_discount_value_with_total(self):
self.user.groups_id = [(4, self.group_discount_per_value_id)]
self.user.groups_id = [(4, self.group_total_discount_id)]
self.assertTrue(self.order_line.user_discount_value)
self.assertTrue(self.order_line.user_total_discount)
self.assertTrue(self.order_line.need_change_discount_value())
self.order_line.discount_fixed = True
self.assertFalse(self.order_line.need_change_discount_value())
self.order_line.discount_fixed = False
order = Form(self.order)
order.discount_rate = 10
with order.order_line.edit(0) as line:
self.assertEqual(line.discount, 10)
self.assertEqual(line.discount_value, 100)
with self.assertRaises(AssertionError):
line.discount = 20
with self.assertRaises(AssertionError):
line.discount_value = 20
line.discount_fixed = True
line.discount_value = 450
self.assertEqual(line.discount, 45)
with self.assertRaises(AssertionError):
line.discount = 20
order.discount_rate = 15
with order.order_line.edit(0) as line:
self.assertEqual(line.discount, 45)
self.assertEqual(line.discount_value, 450)
line.discount_fixed = False
self.assertEqual(line.discount, 15)
self.assertEqual(line.discount_value, 150)
def test_l10n_br_sale_discount_percent(self):
self.assertFalse(self.order_line.user_discount_value)
self.assertFalse(self.order_line.user_total_discount)
self.assertTrue(self.order_line.need_change_discount_value())
order = Form(self.order)
with order.order_line.edit(0) as line:
line.discount = 33
self.assertEqual(line.discount_value, 330)
line.price_unit = 2000
self.assertEqual(line.discount_value, 660)
with self.assertRaises(AssertionError):
line.discount_value = 20
def METHOD_NAME(self):
self.user.groups_id = [(4, self.group_total_discount_id)]
self.assertFalse(self.order_line.user_discount_value)
self.assertTrue(self.order_line.user_total_discount)
self.assertTrue(self.order_line.need_change_discount_value())
self.order_line.discount_fixed = True
self.assertTrue(self.order_line.need_change_discount_value())
self.order_line.discount_fixed = False
order = Form(self.order)
order.discount_rate = 15
with order.order_line.edit(0) as line:
self.assertEqual(line.discount, 15)
self.assertEqual(line.discount_value, 150)
with self.assertRaises(AssertionError):
line.discount = 20
with self.assertRaises(AssertionError):
line.discount_value = 20
line.discount_fixed = True
line.discount = 50
self.assertEqual(line.discount_value, 500)
with self.assertRaises(AssertionError):
line.discount_value = 20
order.discount_rate = 35
with order.order_line.edit(0) as line:
self.assertEqual(line.discount, 50)
self.assertEqual(line.discount_value, 500)
line.discount_fixed = False
self.assertEqual(line.discount, 35)
self.assertEqual(line.discount_value, 350) |
1,428 | test get ls tree line submodule | import io
from datalad_service.common.annex import parse_ls_tree_line, read_ls_tree_line, compute_rmet, parse_remote_line, parse_rmet_line, read_rmet_file, encode_remote_url
expected_file_object = {
'filename': 'dataset_description.json',
'id': '43502da40903d08b18b533f8897330badd6e1da3',
'key': '838d19644b3296cf32637bbdf9ae5c87db34842f',
'size': 101
}
def test_parse_ls_tree_line():
filename, mode, obj_type, obj_hash, size = parse_ls_tree_line(
"""100644 blob a786c385bd1812410d01177affb6ce834d85facd 459 dataset_description.json""")
assert int(size) > 0
def test_parse_ls_tree_line_annexed():
filename, mode, obj_type, obj_hash, size = parse_ls_tree_line(
"""120000 blob 570cb4a3fd80de6e8491312c935bfe8029066361 141 derivatives/mriqc/reports/sub-01_ses-01_T1w.html""")
assert int(size) > 0
def test_parse_ls_tree_line_submodule():
filename, mode, obj_type, obj_hash, size = parse_ls_tree_line(
"""160000 commit fcafd17fbfa44495c7f5f8a0777e5ab610b09500 - code/facedistid_analysis""")
assert size == '-'
def test_get_ls_tree_line():
files = []
symlinkFilenames = []
symlinkObjects = []
read_ls_tree_line("""100644 blob a786c385bd1812410d01177affb6ce834d85facd 459 dataset_description.json""",
files, symlinkFilenames, symlinkObjects)
assert files == [
{
'filename': 'dataset_description.json',
'size': 459,
'id': '78dd92373749f62af23f3ae499b7a8ac33418fff',
'key': 'a786c385bd1812410d01177affb6ce834d85facd',
'urls': [],
'annexed': False,
'directory': False}]
assert symlinkFilenames == []
assert symlinkObjects == []
def test_get_ls_tree_line_ignored():
files = []
symlinkFilenames = []
symlinkObjects = []
read_ls_tree_line("""100644 blob a786c385bd1812410d01177affb6ce834d85facd 459 .gitattributes""",
files, symlinkFilenames, symlinkObjects)
assert files == []
assert symlinkFilenames == []
assert symlinkObjects == []
def test_get_ls_tree_line_annexed():
files = []
symlinkFilenames = []
symlinkObjects = []
read_ls_tree_line("""120000 blob 570cb4a3fd80de6e8491312c935bfe8029066361 141 derivatives/mriqc/reports/sub-01_ses-01_T1w.html""",
files, symlinkFilenames, symlinkObjects)
assert files == []
assert symlinkFilenames == [
'derivatives/mriqc/reports/sub-01_ses-01_T1w.html']
assert symlinkObjects == ['570cb4a3fd80de6e8491312c935bfe8029066361']
def METHOD_NAME():
files = []
symlinkFilenames = []
symlinkObjects = []
read_ls_tree_line("""160000 commit fcafd17fbfa44495c7f5f8a0777e5ab610b09500 - code/facedistid_analysis""",
files, symlinkFilenames, symlinkObjects)
assert files == []
assert symlinkFilenames == []
assert symlinkObjects == []
def test_compute_rmet_git():
# Test a git SHA1 key
assert compute_rmet(
'99fe93bfea62c16a10488593da870df25d09be81') == '0f5/0b4/GIT--99fe93bfea62c16a10488593da870df25d09be81.log.rmet'
def test_compute_rmet_git_legacy():
# Test a git SHA1 key
assert compute_rmet(
'99fe93bfea62c16a10488593da870df25d09be81', legacy=True) == '9e2/03e/SHA1--99fe93bfea62c16a10488593da870df25d09be81.log.rmet'
def test_compute_rmet_annex():
# Test a git annex MD5E key
assert compute_rmet(
'MD5E-s12102144--d614929593bf2a7cccea90bea67255f4.bdf') == '9ce/c07/MD5E-s12102144--d614929593bf2a7cccea90bea67255f4.bdf.log.rmet'
def test_compute_rmet_sha256_annex():
# Test a git annex MD5E key
assert compute_rmet(
'SHA256E-s311112--c3527d7944a9619afb57863a34e6af7ec3fe4f108e56c860d9e700699ff806fb.nii.gz') == '2ed/6ea/SHA256E-s311112--c3527d7944a9619afb57863a34e6af7ec3fe4f108e56c860d9e700699ff806fb.nii.gz.log.rmet'
def test_parse_remote_line():
remote = parse_remote_line("""57894849-d0c8-4c62-8418-3627be18a196 autoenable=true bucket=openneuro.org datacenter=US encryption=none exporttree=yes fileprefix=ds002778/ host=s3.amazonaws.com name=s3-PUBLIC partsize=1GiB port=80 public=yes publicurl=http://openneuro.org.s3.amazonaws.com/ storageclass=STANDARD type=S3 versioning=yes timestamp=1588743361.538097946s""")
assert remote == {'url': 'http://openneuro.org.s3.amazonaws.com/',
'uuid': '57894849-d0c8-4c62-8418-3627be18a196'}
def test_parse_rmet_line():
remote = {'url': 'http://openneuro.org.s3.amazonaws.com/',
'uuid': '57894849-d0c8-4c62-8418-3627be18a196'}
url = parse_rmet_line(
remote, """1590213748.042921433s 57894849-d0c8-4c62-8418-3627be18a196:V +iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y#ds002778/dataset_description.json""")
assert url == 'http://openneuro.org.s3.amazonaws.com/ds002778/dataset_description.json?versionId=iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y'
def test_parse_rmet_line_https():
remote = {'url': 'https://s3.amazonaws.com/openneuro.org',
'uuid': '57894849-d0c8-4c62-8418-3627be18a196'}
url = parse_rmet_line(
remote, """1590213748.042921433s 57894849-d0c8-4c62-8418-3627be18a196:V +iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y#ds002778/dataset_description.json""")
assert url == 'https://s3.amazonaws.com/openneuro.org/ds002778/dataset_description.json?versionId=iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y'
def test_read_rmet_file():
remote = {'url': 'http://openneuro.org.s3.amazonaws.com/',
'uuid': '57894849-d0c8-4c62-8418-3627be18a196'}
catFile = io.StringIO(""":::99fe93bfea62c16a10488593da870df25d09be81
1590213748.042921433s 57894849-d0c8-4c62-8418-3627be18a196:V +iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y#ds002778/dataset_description.json""")
url = read_rmet_file(remote, catFile)
assert url == 'http://openneuro.org.s3.amazonaws.com/ds002778/dataset_description.json?versionId=iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y'
def test_remote_url_encoding():
assert encode_remote_url(
"https://s3.amazonaws.com/openneuro.org/ds000248/derivatives/freesurfer/subjects/sub-01/mri/aparc+aseg.mgz?versionId=2Wx7w.fCYeGzGWLnW9sxWsPdztl.2HL0") == "https://s3.amazonaws.com/openneuro.org/ds000248/derivatives/freesurfer/subjects/sub-01/mri/aparc%2Baseg.mgz?versionId=2Wx7w.fCYeGzGWLnW9sxWsPdztl.2HL0"
assert encode_remote_url(
"https://s3.amazonaws.com/openneuro.org/ds000248/sub-01/anat/sub-01_T1w.nii.gz?versionId=8uTXIQ10Blcp2GeAVJJCHL5PimkSaQZL") == "https://s3.amazonaws.com/openneuro.org/ds000248/sub-01/anat/sub-01_T1w.nii.gz?versionId=8uTXIQ10Blcp2GeAVJJCHL5PimkSaQZL"
assert encode_remote_url("=") == '=' |
1,429 | multifractal | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import EnumProperty, IntProperty, FloatProperty
from mathutils import noise
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode
from sverchok.utils.sv_seed_funcs import get_offset, seed_adjusted
from sverchok.utils.sv_noise_utils import noise_options, PERLIN_ORIGINAL
# helpers
def dict_from(options, idx1, idx2):
return {t[idx1]: t[idx2] for t in options}
def enum_from(options):
return [(t[0], t[0].title(), t[0].title(), '', t[1]) for t in options]
# function wrappers
def fractal(nbasis, verts, h_factor, lacunarity, octaves, offset, gain):
return [noise.fractal(v, h_factor, lacunarity, octaves, noise_basis=nbasis) for v in verts]
def METHOD_NAME(nbasis, verts, h_factor, lacunarity, octaves, offset, gain):
return [noise.multi_fractal(v, h_factor, lacunarity, octaves, noise_basis=nbasis) for v in verts]
def hetero(nbasis, verts, h_factor, lacunarity, octaves, offset, gain):
return [noise.hetero_terrain(v, h_factor, lacunarity, octaves, offset, noise_basis=nbasis) for v in verts]
def ridged(nbasis, verts, h_factor, lacunarity, octaves, offset, gain):
return [noise.ridged_multi_fractal(v, h_factor, lacunarity, octaves, offset, gain, noise_basis=nbasis) for v in verts]
def hybrid(nbasis, verts, h_factor, lacunarity, octaves, offset, gain):
return [noise.hybrid_multi_fractal(v, h_factor, lacunarity, octaves, offset, gain, noise_basis=nbasis) for v in verts]
fractal_options = [
('FRACTAL', 0, fractal),
('MULTI_FRACTAL', 1, METHOD_NAME),
('HETERO_TERRAIN', 2, hetero),
('RIDGED_MULTI_FRACTAL', 3, ridged),
('HYBRID_MULTI_FRACTAL', 4, hybrid),
]
socket_count_to_mode = {5: 'A', 6: 'B', 7: 'C'}
fractal_type_to_mode = {
'FRACTAL': 'A',
'MULTI_FRACTAL': 'A',
'HETERO_TERRAIN': 'B',
'RIDGED_MULTI_FRACTAL': 'C',
'HYBRID_MULTI_FRACTAL': 'C'
}
fractal_f = dict_from(fractal_options, 0, 2)
avail_noise = enum_from(noise_options)
avail_fractal = enum_from(fractal_options)
class SvVectorFractal(SverchCustomTreeNode, bpy.types.Node):
'''Vector Fractal node. [default]
In: Vertices, Seed, HFactor, Lacunarity, Octaves
Params: Type1 ([Fractal]/Terrain, Rygded, Hybrid), Type2 ([Perlin]/Blender/Voronoi/CellNoise)
Out: Value. Floats in the range [0.0 to 1.0]
'''
bl_idname = 'SvVectorFractal'
bl_label = 'Vector Fractal'
bl_icon = 'FORCE_TURBULENCE'
sv_icon = 'SV_VECTOR_FRACTAL'
def mk_input_sockets(self, *sockets):
for socket in sockets:
print(socket.title())
self.inputs.new('SvStringsSocket', socket.title()).prop_name = socket
def rm_input_sockets(self, *sockets):
for socket in sockets:
self.inputs.remove(self.inputs[socket.title()])
def wrapped_update(self, context):
add = self.mk_input_sockets
remove = self.rm_input_sockets
current_mode = socket_count_to_mode.get(len(self.inputs))
new_mode = fractal_type_to_mode.get(self.fractal_type)
actionables = {
'AB': (add, ('offset',)),
'BA': (remove, ('offset',)),
'BC': (add, ('gain',)),
'CB': (remove, ('gain',)),
'AC': (add, ('offset', 'gain')),
'CA': (remove, ('offset', 'gain'))
}.get(current_mode + new_mode)
if actionables:
socket_func, names = actionables
socket_func(*names)
updateNode(self, context)
noise_type: EnumProperty(
items=avail_noise,
default=PERLIN_ORIGINAL,
description="Noise type",
update=updateNode)
fractal_type: EnumProperty(
items=avail_fractal,
default="FRACTAL",
description="Fractal type",
update=wrapped_update)
h_factor: FloatProperty(default=0.05, description='H factor parameter', name='H Factor', update=updateNode)
lacunarity: FloatProperty(default=0.5, description='Lacunarity parameter', name='Lacunarity', update=updateNode)
octaves: IntProperty(default=3, min=0, max=6, description='Octaves', name='Octaves', update=updateNode)
offset: FloatProperty(default=0.0, name='Offset', description='Offset parameter', update=updateNode)
gain: FloatProperty(default=0.5, description='Gain parameter', name='Gain', update=updateNode)
seed: IntProperty(default=0, name='Seed', update=updateNode)
def sv_init(self, context):
self.inputs.new('SvVerticesSocket', 'Vertices')
self.inputs.new('SvStringsSocket', 'Seed').prop_name = 'seed'
self.inputs.new('SvStringsSocket', 'H Factor').prop_name = 'h_factor'
self.inputs.new('SvStringsSocket', 'Lacunarity').prop_name = 'lacunarity'
self.inputs.new('SvStringsSocket', 'Octaves').prop_name = 'octaves'
self.outputs.new('SvStringsSocket', 'Value')
def draw_buttons(self, context, layout):
layout.prop(self, 'fractal_type', text="Type")
layout.prop(self, 'noise_type', text="Type")
def process(self):
inputs, outputs = self.inputs, self.outputs
if not outputs[0].is_linked:
return
_seed = inputs['Seed'].sv_get()[0][0]
wrapped_fractal_function = fractal_f[self.fractal_type]
verts = inputs['Vertices'].sv_get()
m_h_factor = inputs['H Factor'].sv_get()[0]
m_lacunarity = inputs['Lacunarity'].sv_get()[0]
m_octaves = inputs['Octaves'].sv_get()[0]
m_offset = inputs['Offset'].sv_get()[0] if 'Offset' in inputs else [0.0]
m_gain = inputs['Gain'].sv_get()[0] if 'Gain' in inputs else [0.0]
param_list = [m_h_factor, m_lacunarity, m_octaves, m_offset, m_gain]
out = []
for idx, vlist in enumerate(verts):
# lazy generation of full parameters.
params = [(param[idx] if idx < len(param) else param[-1]) for param in param_list]
final_vert_list = [seed_adjusted(vlist, _seed)]
out.append(wrapped_fractal_function(self.noise_type, final_vert_list[0], *params))
outputs[0].sv_set(out)
def register():
bpy.utils.register_class(SvVectorFractal)
def unregister():
bpy.utils.unregister_class(SvVectorFractal) |
1,430 | state machine config | """The definition for the states section of the toolbox."""
from functools import partial
from gaphas.item import SE
from gaphor import UML
from gaphor.core import gettext
from gaphor.diagram.diagramtoolbox import ToolDef, ToolSection, new_item_factory
from gaphor.UML import diagramitems
from gaphor.UML.recipes import owner_package
from gaphor.UML.toolboxconfig import namespace_config
def state_config(new_item):
METHOD_NAME(new_item, name=new_item.diagram.gettext("State"))
def pseudostate_config(new_item, kind):
new_item.subject.kind = kind
METHOD_NAME(new_item)
def METHOD_NAME(new_item, name=None):
subject = new_item.subject
if name:
subject.name = new_item.diagram.gettext("New {name}").format(name=name)
if subject.container:
return
diagram = new_item.diagram
package = owner_package(diagram.owner)
state_machines = (
[i for i in package.ownedType if isinstance(i, UML.StateMachine)]
if package
else diagram.model.lselect(
lambda e: isinstance(e, UML.StateMachine) and e.package is None
)
)
if state_machines:
state_machine = state_machines[0]
else:
state_machine = subject.model.create(UML.StateMachine)
state_machine.name = new_item.diagram.gettext("State Machine")
state_machine.package = package
if state_machine.region:
region = state_machine.region[0]
else:
region = subject.model.create(UML.Region)
region.stateMachine = state_machine
subject.container = region
states = ToolSection(
gettext("States"),
(
ToolDef(
"toolbox-state-machine",
gettext("State Machine"),
"gaphor-state-machine-symbolic",
None,
new_item_factory(
diagramitems.StateMachineItem,
UML.StateMachine,
config_func=namespace_config,
),
handle_index=SE,
),
ToolDef(
"toolbox-state",
gettext("State"),
"gaphor-state-symbolic",
"s",
new_item_factory(
diagramitems.StateItem,
UML.State,
config_func=state_config,
),
handle_index=SE,
),
ToolDef(
"toolbox-initial-pseudostate",
gettext("Initial Pseudostate"),
"gaphor-initial-pseudostate-symbolic",
None,
new_item_factory(
diagramitems.PseudostateItem,
UML.Pseudostate,
partial(pseudostate_config, kind="initial"),
),
handle_index=SE,
),
ToolDef(
"toolbox-final-state",
gettext("Final State"),
"gaphor-final-state-symbolic",
None,
new_item_factory(
diagramitems.FinalStateItem,
UML.FinalState,
config_func=METHOD_NAME,
),
handle_index=SE,
),
ToolDef(
"toolbox-transition",
gettext("Transition"),
"gaphor-transition-symbolic",
"<Shift>T",
new_item_factory(diagramitems.TransitionItem),
),
ToolDef(
"toolbox-shallow-history-pseudostate",
gettext("Shallow History Pseudostate"),
"gaphor-shallow-history-pseudostate-symbolic",
None,
new_item_factory(
diagramitems.PseudostateItem,
UML.Pseudostate,
partial(pseudostate_config, kind="shallowHistory"),
),
handle_index=SE,
),
ToolDef(
"toolbox-deep-history-pseudostate",
gettext("Deep History Pseudostate"),
"gaphor-deep-history-pseudostate-symbolic",
None,
new_item_factory(
diagramitems.PseudostateItem,
UML.Pseudostate,
partial(pseudostate_config, kind="deepHistory"),
),
handle_index=SE,
),
ToolDef(
"toolbox-join-pseudostate",
gettext("Join Pseudostate"),
"gaphor-join-pseudostate-symbolic",
None,
new_item_factory(
diagramitems.PseudostateItem,
UML.Pseudostate,
partial(pseudostate_config, kind="join"),
),
handle_index=SE,
),
ToolDef(
"toolbox-fork-pseudostate",
gettext("Fork Pseudostate"),
"gaphor-fork-pseudostate-symbolic",
None,
new_item_factory(
diagramitems.PseudostateItem,
UML.Pseudostate,
partial(pseudostate_config, kind="fork"),
),
handle_index=SE,
),
ToolDef(
"toolbox-junction-pseudostate",
gettext("Junction Pseudostate"),
"gaphor-junction-pseudostate-symbolic",
None,
new_item_factory(
diagramitems.PseudostateItem,
UML.Pseudostate,
partial(pseudostate_config, kind="junction"),
),
handle_index=SE,
),
ToolDef(
"toolbox-choice-pseudostate",
gettext("Choice Pseudostate"),
"gaphor-choice-pseudostate-symbolic",
None,
new_item_factory(
diagramitems.PseudostateItem,
UML.Pseudostate,
partial(pseudostate_config, kind="choice"),
),
handle_index=SE,
),
ToolDef(
"toolbox-entry-point-pseudostate",
gettext("Entry Point Pseudostate"),
"gaphor-entry-point-pseudostate-symbolic",
None,
new_item_factory(
diagramitems.PseudostateItem,
UML.Pseudostate,
partial(pseudostate_config, kind="entryPoint"),
),
handle_index=SE,
),
ToolDef(
"toolbox-exit-point-pseudostate",
gettext("Exit Point Pseudostate"),
"gaphor-exit-point-pseudostate-symbolic",
None,
new_item_factory(
diagramitems.PseudostateItem,
UML.Pseudostate,
partial(pseudostate_config, kind="exitPoint"),
),
handle_index=SE,
),
ToolDef(
"toolbox-terminate-pseudostate",
gettext("Terminate Pseudostate"),
"gaphor-terminate-pseudostate-symbolic",
None,
new_item_factory(
diagramitems.PseudostateItem,
UML.Pseudostate,
partial(pseudostate_config, kind="terminate"),
),
handle_index=SE,
),
),
) |
1,431 | gracefully terminate | #!/usr/bin/env python3
from datetime import datetime
import logging
import os
import signal
import subprocess
import sys
import time
PROG = os.path.basename(sys.argv[0])
try:
tool_output_dir = sys.argv[1]
except IndexError:
print(f"{PROG}: missing required tool output directory argument", file=sys.stderr)
sys.exit(1)
components = []
for idx in range(2, len(sys.argv)):
components.append(sys.argv[idx])
if not components:
print(f"{PROG}: missing required components argument(s)", file=sys.stderr)
sys.exit(1)
if os.environ.get("_PBENCH_UNIT_TESTS"):
def _now():
return datetime.utcfromtimestamp(0)
else:
def _now():
return datetime.utcnow()
def now():
return _now().strftime("%Y-%m-%dT%H:%M:%S.%f")
def gather_nodes_ev(when):
with open(os.path.join(tool_output_dir, f"nodes-{when}.txt"), "w") as nfp:
nfp.write(f"timestamp: {now()}\n\n")
nfp.flush()
proc = subprocess.run(
"oc get nodes --show-labels -o wide",
stdout=nfp,
stderr=subprocess.STDOUT,
shell=True,
)
nfp.write(f"\ntimestamp: {now()}\n")
nfp.flush()
if proc.returncode != 0:
logging.warning('%s: "oc get nodes" failed with %d', PROG, proc.returncode)
with open(os.path.join(tool_output_dir, f"ev-{when}.txt"), "w") as efp:
efp.write(f"timestamp: {now()}\n\n")
efp.flush()
proc = subprocess.run(
"oc get ev --all-namespaces -o wide",
stdout=efp,
stderr=subprocess.STDOUT,
shell=True,
)
efp.write(f"\ntimestamp: {now()}\n")
efp.flush()
if proc.returncode != 0:
logging.warning('%s: "oc get ev" failed with %d', PROG, proc.returncode)
class GracefulPopen(subprocess.Popen):
"""Simple sub-class of Popen which adds a graceful way to terminate a
process.
"""
def silent_kill(self):
try:
self.kill()
except Exception:
# Don't bother reporting any errors on the .kill().
pass
def METHOD_NAME(self, name):
try:
self.terminate()
except Exception:
logging.warning(
"%s: error gracefully terminating %s (pid %d)", PROG, name, self.pid
)
self.silent_kill()
else:
try:
self.wait(timeout=5)
except subprocess.TimeoutExpired:
logging.warning(
"%s: timeout gracefully terminating %s (pid %d)",
PROG,
name,
self.pid,
)
self.silent_kill()
terminate = False
def handler(signum, frame):
global terminate
terminate = True
# Establish signal handler for TERM, QUIT, and INT to stop process creation,
# and then tear them all down.
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGQUIT, handler)
signal.signal(signal.SIGINT, handler)
if os.environ.get("_PBENCH_UNIT_TESTS"):
_OC_DELAY = 0
def mock_pause():
time.sleep(1)
global terminate
terminate = True
signal.pause = mock_pause
else:
_OC_DELAY = 5
# Gather "nodes" and "events" before we start other watchers.
gather_nodes_ev("start")
cmd_fmt = "oc get {all_ns_opt}{component} -o wide -w"
opts = {}
pids = {}
try:
for component in components:
if terminate:
# During the creation of sub-processes we were told to terminate.
break # lgtm [py/unreachable-statement]
# Wait 5 seconds between starting watchers.
time.sleep(_OC_DELAY)
opts["all_ns_opt"] = "" if component in ("cs", "pv") else "--all-namespaces "
opts["component"] = component
cmd = cmd_fmt.format(**opts)
cfp = open(os.path.join(tool_output_dir, f"{component}.txt"), "w")
cfp.write(f"timestamp: {now()}\n\n")
cfp.flush()
oc_cmd = GracefulPopen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True
)
log_ts = GracefulPopen(
"pbench-log-timestamp",
stdin=oc_cmd.stdout,
stdout=cfp,
stderr=subprocess.STDOUT,
shell=True,
)
pids[component] = (oc_cmd, log_ts, cfp)
# Wait for all sub-processes to complete
while not terminate:
signal.pause()
finally:
for component, procs in pids.items():
oc_cmd, log_ts, fp = procs
oc_cmd.METHOD_NAME(component)
log_ts.METHOD_NAME(f"{component}-ts")
fp.close()
# Gather "nodes" and "events" now that all watchers are stopped.
gather_nodes_ev("stop") |
1,432 | test run two dependent models from json | import os
from pathlib import Path
import numpy as np
import pandas
import pytest
from pywr.parameters import DataFrameParameter
from pywr.model import MultiModel, Model
from pywr.core import Scenario
# def load_multi_model(filename: Path):
from pywr.nodes import Input, Link, Output
from pywr.parameters.multi_model_parameters import (
OtherModelParameterValueParameter,
OtherModelNodeFlowParameter,
OtherModelNodeStorageParameter,
)
from pywr.recorders import (
NumpyArrayNodeRecorder,
NumpyArrayParameterRecorder,
NumpyArrayStorageRecorder,
)
def make_simple_model(num: int) -> Model:
m = Model()
inpt = Input(m, name=f"input-{num}", max_flow=5 + num)
lnk = Link(m, name=f"link-{num}")
otpt = Output(m, name=f"output-{num}", max_flow=5 + num, cost=-10)
inpt.connect(lnk)
lnk.connect(otpt)
return m
def test_run_two_independent_models():
multi_model = MultiModel()
for i in range(2):
m = make_simple_model(i)
multi_model.add_model(f"model-{i}", m)
multi_model.run()
np.testing.assert_allclose(
multi_model.models["model-0"].nodes["output-0"].flow[0], 5.0
)
np.testing.assert_allclose(
multi_model.models["model-1"].nodes["output-1"].flow[0], 6.0
)
def test_setup_profile_two_independent_models(tmp_path):
multi_model = MultiModel()
for i in range(2):
m = make_simple_model(i)
multi_model.add_model(f"model-{i}", m)
profile_out = tmp_path / "stats.csv"
multi_model.setup(profile=True, profile_dump_filename=profile_out)
assert profile_out.exists()
df = pandas.read_csv(profile_out)
assert len(df) == 12
def test_run_two_independent_models_from_json():
"""Test two independent models running together"""
path = Path(os.path.dirname(__file__)) / "models" / "two-independent-sub-models"
multi_model = MultiModel.load(path / "integrated-model.json")
multi_model.run()
np.testing.assert_allclose(
multi_model.models["model1"].nodes["demand1"].flow[0], 10.0
)
np.testing.assert_allclose(
multi_model.models["model2"].nodes["demand1"].flow[0], 10.0
)
def METHOD_NAME():
"""Test two simple but dependent models."""
path = Path(os.path.dirname(__file__)) / "models" / "two-dependent-sub-models"
multi_model = MultiModel.load(path / "integrated-model.json")
sub_model2 = multi_model.models["model2"]
supply2 = sub_model2.nodes["supply2"]
demand2 = sub_model2.nodes["demand2"]
assert isinstance(supply2.max_flow, OtherModelParameterValueParameter)
# Add recorder for flow
demand2_rec = NumpyArrayNodeRecorder(sub_model2, demand2)
# Demand should equal the inflow the model1
expected_flow = pandas.read_csv(path / "timeseries1.csv", index_col=0)
multi_model.run()
np.testing.assert_allclose(
multi_model.models["model1"].nodes["demand1"].flow[0], 10.0
)
np.testing.assert_allclose(demand2_rec.data, expected_flow)
def test_run_two_dependent_models_with_flow_transfer_from_json():
"""Test two simple but dependent models."""
path = (
Path(os.path.dirname(__file__))
/ "models"
/ "two-dependent-sub-models-flow-transfer"
)
multi_model = MultiModel.load(path / "integrated-model.json")
sub_model2 = multi_model.models["model2"]
supply2 = sub_model2.nodes["supply2"]
demand2 = sub_model2.nodes["demand2"]
assert isinstance(supply2.max_flow, OtherModelNodeFlowParameter)
# Add recorder for flow
demand2_rec = NumpyArrayNodeRecorder(sub_model2, demand2)
# Demand should equal the flow supplied in model1
expected_flow = pandas.read_csv(path / "timeseries1.csv", index_col=0)
multi_model.run()
np.testing.assert_allclose(
multi_model.models["model1"].nodes["demand1"].flow[0], 21.92
)
np.testing.assert_allclose(demand2_rec.data, expected_flow)
def test_run_three_dependent_storage_sub_models():
"""Test three dependent models."""
path = (
Path(os.path.dirname(__file__))
/ "models"
/ "three-dependent-storage-sub-models"
)
multi_model = MultiModel.load(path / "integrated-model.json")
sub_model0 = multi_model.models["model0"]
sub_model1 = multi_model.models["model1"]
sub_model2 = multi_model.models["model2"]
# Create some recorders
sm0_sv1 = NumpyArrayParameterRecorder(
sub_model0, sub_model0.parameters["storage1-volume"]
)
sm1_sv1 = NumpyArrayStorageRecorder(sub_model1, sub_model1.nodes["storage1"])
sm0_sr1 = NumpyArrayParameterRecorder(
sub_model0, sub_model0.parameters["storage1-release"]
)
multi_model.setup()
assert isinstance(
sub_model0.parameters["storage1-volume"], OtherModelNodeStorageParameter
)
multi_model.run()
# Reservoir releases on the first time-step because combined volume > 0
# From second time-step onwards release is turned off from model0 because combined volume < 0
# The storage parameter in model0 has the volume at the end of the previous day ...
np.testing.assert_allclose(
sm0_sv1.data[:, 0],
[
510.0,
490.0,
480.0,
470.0,
460.0,
],
)
# The volume in storage is recorded on the node at the end of the day
np.testing.assert_allclose(sm1_sv1.data[:, 0], [490.0, 480.0, 470.0, 460.0, 450.0])
# The release is calculated using previous day's volume
np.testing.assert_allclose(sm0_sr1.data[:, 0], [10.0, 0.0, 0.0, 0.0, 0.0])
def test_error_with_different_timesteps():
"""Check a RuntimeError is raised if the models have different timesteps."""
path = Path(os.path.dirname(__file__)) / "models" / "two-independent-sub-models"
multi_model = MultiModel.load(path / "integrated-model.json")
multi_model.models["model1"].timestepper.start = "1900-01-01"
with pytest.raises(RuntimeError):
multi_model.run()
@pytest.mark.parametrize(
"sizes1,names1,sizes2,names2",
[
[[10], ["A"], [11], ["B"]],
[[11], ["A"], [10], ["B"]],
[[10, 20], ["A", "B"], [10], ["A"]],
[[10], ["A"], [10, 20], ["A", "B"]],
[[10], ["A"], [5, 2], ["A", "B"]],
[[1], ["A"], [10], ["A"]],
],
)
def test_error_with_different_scenarios(sizes1, names1, sizes2, names2):
"""Check a ValueError is raised if the models have different scenarios."""
path = Path(os.path.dirname(__file__)) / "models" / "two-dependent-sub-models"
multi_model = MultiModel.load(path / "integrated-model.json")
for s, n in zip(sizes1, names1):
Scenario(multi_model.models["model1"], size=s, name=n)
for s, n in zip(sizes2, names2):
Scenario(multi_model.models["model2"], size=s, name=n)
with pytest.raises(ValueError):
multi_model.run()
def test_error_with_different_scenario_combinations():
"""Check a ValueError is raised if the models have different scenarios."""
path = Path(os.path.dirname(__file__)) / "models" / "two-dependent-sub-models"
multi_model = MultiModel.load(path / "integrated-model.json")
# Define the same scenarios in each sub-model
Scenario(multi_model.models["model1"], size=10, name="A")
Scenario(multi_model.models["model1"], size=2, name="B")
Scenario(multi_model.models["model2"], size=10, name="A")
Scenario(multi_model.models["model2"], size=2, name="B")
# Only run the first two scenarios in model1
multi_model.models["model1"].scenarios.user_combinations = [[0, 0], [1, 0]]
with pytest.raises(ValueError):
multi_model.run() |
1,433 | test echo alias | import pytest
from tests.utils import active_character, end_init, start_init
pytestmark = pytest.mark.asyncio
async def METHOD_NAME(avrae, dhttp):
avrae.message("!alias foobar echo foobar")
await dhttp.receive_message("Alias `foobar` added.```py\n!alias foobar echo foobar\n```")
avrae.message("!foobar")
await dhttp.receive_delete()
await dhttp.receive_message(".+: foobar")
async def test_variables(avrae, dhttp):
avrae.message("!uvar foobar Hello world")
await dhttp.receive_message()
avrae.message("!gvar create I am a gvar")
match = await dhttp.receive_message("Created global variable `([0-9a-f-]+)`.")
address = match.group(1)
avrae.message(
"!alias foobar echo <foobar> {foobar} {{foobar}}\n" + f"{{{{get_gvar('{address}')}}}}"
) # {{get_gvar('1234...')}}
await dhttp.receive_message()
avrae.message("!foobar")
await dhttp.receive_delete()
await dhttp.receive_message(".+: Hello world 0 Hello world\nI am a gvar")
async def test_alias_percent_arguments(avrae, dhttp):
avrae.message("!alias foobar echo the first argument is %1% yay")
await dhttp.drain()
# 1 arg, none given
avrae.message("!foobar")
await dhttp.receive_delete()
await dhttp.receive_message(".+: the first argument is %1% yay")
# 1 arg, 1 given
avrae.message("!foobar foo")
await dhttp.receive_delete()
await dhttp.receive_message(".+: the first argument is foo yay")
# 1 arg, 2 given
avrae.message("!foobar foo bar")
await dhttp.receive_delete()
await dhttp.receive_message(".+: the first argument is foo yay bar")
# 1 arg, 1 given with quotes
avrae.message('!foobar "foo bar"')
await dhttp.receive_delete()
await dhttp.receive_message('.+: the first argument is "foo bar" yay')
async def test_alias_ampersand_arguments(avrae, dhttp):
avrae.message("!alias foobar echo the first argument is &1& yay")
await dhttp.drain()
# 1 arg, none given
avrae.message("!foobar")
await dhttp.receive_delete()
await dhttp.receive_message(".+: the first argument is &1& yay")
# 1 arg, 1 given
avrae.message("!foobar foo")
await dhttp.receive_delete()
await dhttp.receive_message(".+: the first argument is foo yay")
# 1 arg, 2 given
avrae.message("!foobar foo bar")
await dhttp.receive_delete()
await dhttp.receive_message(".+: the first argument is foo yay bar")
# 1 arg, 1 given with quotes
avrae.message('!foobar "foo bar"')
await dhttp.receive_delete()
await dhttp.receive_message(".+: the first argument is foo bar yay")
async def test_alias_ampersand_all_arguments(avrae, dhttp):
avrae.message("!alias foobar echo the arguments are &ARGS&")
await dhttp.drain()
# no args
avrae.message("!foobar")
await dhttp.receive_delete()
await dhttp.receive_message(r".+: the arguments are \[\]")
# 1 arg
avrae.message("!foobar foo")
await dhttp.receive_delete()
await dhttp.receive_message(r".+: the arguments are \['foo'\]")
# 2 args
avrae.message("!foobar foo bar")
await dhttp.receive_delete()
await dhttp.receive_message(r".+: the arguments are \['foo', 'bar'\]")
# 1 quoted arg
avrae.message('!foobar "foo bar"')
await dhttp.receive_delete()
await dhttp.receive_message(r".+: the arguments are \['foo bar'\]")
async def test_servalias(avrae, dhttp):
avrae.message("!servalias serverfoobar echo this is serverfoobar", as_owner=True)
await dhttp.drain()
avrae.message("!serverfoobar")
await dhttp.receive_delete()
await dhttp.receive_message(r".+: this is serverfoobar")
avrae.message("!serverfoobar", dm=True)
assert dhttp.queue_empty()
async def test_alias_vs_servalias(avrae, dhttp):
avrae.message("!alias foobar echo this is foobar")
avrae.message("!servalias foobar echo this is server foobar", as_owner=True)
await dhttp.drain()
avrae.message("!foobar")
await dhttp.receive_delete()
await dhttp.receive_message(r".+: this is foobar")
@pytest.mark.usefixtures("character")
class TestCharacterAliases:
async def test_echo_attributes(self, avrae, dhttp):
character = await active_character(avrae)
avrae.message(
"!alias foobar echo {charismaMod} {proficiencyBonus} {charismaMod+proficiencyBonus}\n<name> <color>"
)
await dhttp.receive_message()
avrae.message("!foobar")
await dhttp.receive_delete()
await dhttp.receive_message(
f".+: {character.stats.get_mod('cha')} {character.stats.prof_bonus} "
f"{character.stats.get_mod('cha') + character.stats.prof_bonus}\n"
f"{character.get_title_name()} [0-9a-f]+"
)
async def test_echo_attributes_new(self, avrae, dhttp):
character = await active_character(avrae)
avrae.message(
"!alias foobar echo {{c=character()}} {{c.stats.charisma}} {{c.stats.prof_bonus}} "
"{{c.stats.charisma+c.stats.prof_bonus}}"
)
await dhttp.receive_message()
avrae.message("!foobar")
await dhttp.receive_delete()
await dhttp.receive_message(
f".+: {character.stats.charisma} {character.stats.prof_bonus} "
f"{character.stats.charisma + character.stats.prof_bonus}"
)
@pytest.mark.usefixtures("init_fixture", "character")
class TestCombatAliases:
async def test_combat_aliases_setup(cls, avrae, dhttp):
await start_init(avrae, dhttp)
async def test_combat_function(self, avrae, dhttp):
avrae.message("!test {{combat()}}")
await dhttp.receive_message()
async def test_combat_me(self, avrae, dhttp):
avrae.message("!test {{combat().me}}")
await dhttp.receive_message(r".+:\s*$") # nothing after the colon, should return None
# character joins
character = await active_character(avrae)
avrae.message("!init join")
await dhttp.drain()
avrae.message("!test {{combat().me.name}}")
await dhttp.receive_message(f".+: {character.name}") # should return the character's name
async def test_combat_aliases_teardown(cls, avrae, dhttp):
await end_init(avrae, dhttp) |
1,434 | is namedtuple | """
Tests for the `TypeChecker`-based type interface.
The actual correctness of the type checking is handled in
`test_jsonschema_test_suite`; these tests check that TypeChecker
functions correctly at a more granular level.
"""
from collections import namedtuple
from unittest import TestCase
from jsonschema import ValidationError, _keywords
from jsonschema._types import TypeChecker
from jsonschema.exceptions import UndefinedTypeCheck, UnknownType
from jsonschema.validators import Draft202012Validator, extend
def equals_2(checker, instance):
return instance == 2
def METHOD_NAME(instance):
return isinstance(instance, tuple) and getattr(instance, "_fields", None)
def is_object_or_named_tuple(checker, instance):
if Draft202012Validator.TYPE_CHECKER.is_type(instance, "object"):
return True
return METHOD_NAME(instance)
class TestTypeChecker(TestCase):
def test_is_type(self):
checker = TypeChecker({"two": equals_2})
self.assertEqual(
(
checker.is_type(instance=2, type="two"),
checker.is_type(instance="bar", type="two"),
),
(True, False),
)
def test_is_unknown_type(self):
with self.assertRaises(UndefinedTypeCheck) as e:
TypeChecker().is_type(4, "foobar")
self.assertIn(
"'foobar' is unknown to this type checker",
str(e.exception),
)
self.assertTrue(
e.exception.__suppress_context__,
msg="Expected the internal KeyError to be hidden.",
)
def test_checks_can_be_added_at_init(self):
checker = TypeChecker({"two": equals_2})
self.assertEqual(checker, TypeChecker().redefine("two", equals_2))
def test_redefine_existing_type(self):
self.assertEqual(
TypeChecker().redefine("two", object()).redefine("two", equals_2),
TypeChecker().redefine("two", equals_2),
)
def test_remove(self):
self.assertEqual(
TypeChecker({"two": equals_2}).remove("two"),
TypeChecker(),
)
def test_remove_unknown_type(self):
with self.assertRaises(UndefinedTypeCheck) as context:
TypeChecker().remove("foobar")
self.assertIn("foobar", str(context.exception))
def test_redefine_many(self):
self.assertEqual(
TypeChecker().redefine_many({"foo": int, "bar": str}),
TypeChecker().redefine("foo", int).redefine("bar", str),
)
def test_remove_multiple(self):
self.assertEqual(
TypeChecker({"foo": int, "bar": str}).remove("foo", "bar"),
TypeChecker(),
)
def test_type_check_can_raise_key_error(self):
"""
Make sure no one writes:
try:
self._type_checkers[type](...)
except KeyError:
ignoring the fact that the function itself can raise that.
"""
error = KeyError("Stuff")
def raises_keyerror(checker, instance):
raise error
with self.assertRaises(KeyError) as context:
TypeChecker({"foo": raises_keyerror}).is_type(4, "foo")
self.assertIs(context.exception, error)
def test_repr(self):
checker = TypeChecker({"foo": METHOD_NAME, "bar": METHOD_NAME})
self.assertEqual(repr(checker), "<TypeChecker types={'bar', 'foo'}>")
class TestCustomTypes(TestCase):
def test_simple_type_can_be_extended(self):
def int_or_str_int(checker, instance):
if not isinstance(instance, (int, str)):
return False
try:
int(instance)
except ValueError:
return False
return True
CustomValidator = extend(
Draft202012Validator,
type_checker=Draft202012Validator.TYPE_CHECKER.redefine(
"integer", int_or_str_int,
),
)
validator = CustomValidator({"type": "integer"})
validator.validate(4)
validator.validate("4")
with self.assertRaises(ValidationError):
validator.validate(4.4)
with self.assertRaises(ValidationError):
validator.validate("foo")
def test_object_can_be_extended(self):
schema = {"type": "object"}
Point = namedtuple("Point", ["x", "y"])
type_checker = Draft202012Validator.TYPE_CHECKER.redefine(
"object", is_object_or_named_tuple,
)
CustomValidator = extend(
Draft202012Validator,
type_checker=type_checker,
)
validator = CustomValidator(schema)
validator.validate(Point(x=4, y=5))
def test_object_extensions_require_custom_validators(self):
schema = {"type": "object", "required": ["x"]}
type_checker = Draft202012Validator.TYPE_CHECKER.redefine(
"object", is_object_or_named_tuple,
)
CustomValidator = extend(
Draft202012Validator,
type_checker=type_checker,
)
validator = CustomValidator(schema)
Point = namedtuple("Point", ["x", "y"])
# Cannot handle required
with self.assertRaises(ValidationError):
validator.validate(Point(x=4, y=5))
def test_object_extensions_can_handle_custom_validators(self):
schema = {
"type": "object",
"required": ["x"],
"properties": {"x": {"type": "integer"}},
}
type_checker = Draft202012Validator.TYPE_CHECKER.redefine(
"object", is_object_or_named_tuple,
)
def coerce_named_tuple(fn):
def coerced(validator, value, instance, schema):
if METHOD_NAME(instance):
instance = instance._asdict()
return fn(validator, value, instance, schema)
return coerced
required = coerce_named_tuple(_keywords.required)
properties = coerce_named_tuple(_keywords.properties)
CustomValidator = extend(
Draft202012Validator,
type_checker=type_checker,
validators={"required": required, "properties": properties},
)
validator = CustomValidator(schema)
Point = namedtuple("Point", ["x", "y"])
# Can now process required and properties
validator.validate(Point(x=4, y=5))
with self.assertRaises(ValidationError):
validator.validate(Point(x="not an integer", y=5))
# As well as still handle objects.
validator.validate({"x": 4, "y": 5})
with self.assertRaises(ValidationError):
validator.validate({"x": "not an integer", "y": 5})
def test_unknown_type(self):
with self.assertRaises(UnknownType) as e:
Draft202012Validator({}).is_type(12, "some unknown type")
self.assertIn("'some unknown type'", str(e.exception)) |
1,435 | stop | # ***************************************************************
# Copyright (c) 2023 Jittor. All Rights Reserved.
# Maintainers:
# Guoye Yang <498731903@qq.com>
# Dun Liang <randonlang@gmail.com>.
#
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
'''
example:
export class_home=/mnt/disk/cjld/class_nn
mkdir -p $class_home
docker pull jittor/jittor-cuda
python3.7 -m jittor_utils.class.setup_env setup 4
python3.7 -m jittor_utils.class.setup_env start 4
python3.7 -m jittor_utils.class.setup_env report
python3.7 -m jittor_utils.class.setup_env restart 4
python3.7 -m jittor_utils.class.setup_env stop
'''
# export class_home
# setup [n] // setup for n users. including build user paths, user_info.txt and docker imgs. !!!WILL RESET SUDENT_FILES!!!
# start [n_gpu] // run n docker CONTAINERs with n_gpu GPUs.
# stop // stop n docker CONTAINERs
# restart [n_gpu] // restart n docker CONTAINERs with n_gpu GPUs.
import sys
import os
import json as js
import random
class_home = os.environ["class_home"]
student_files_dir = class_home + "/student_files"
student_files_bk_dir = class_home + "/student_files_bak"
cwd = os.path.dirname(__file__)
def run_cmd(cmd):
print("[CMD]:", cmd)
ret = os.system(cmd)
if ret:
print("[CMD] return", ret)
return ret
def generate_random_str(randomlength):
random_str = ''
base_str = 'ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz0123456789'
length = len(base_str) - 1
for i in range(randomlength):
random_str += base_str[random.randint(0, length)]
return random_str
def setup(n):
if os.path.exists(student_files_dir):
if os.path.exists(student_files_bk_dir):
run_cmd(f"rm -rf {student_files_bk_dir}")
run_cmd(f"mv {student_files_dir} {student_files_bk_dir}")
os.makedirs(student_files_dir)
user_info = []
for i in range(n): # 0 for root
port = 20000 + i
passwd = generate_random_str(8)
name = 'stu_'+str(i)
path = os.path.abspath(os.path.join(student_files_dir, name))
info = {'port': port,
'passwd': passwd,
'name': name,
'path': path}
user_info.append(info)
student_files_src = class_home + "/student_files_src"
if os.path.isdir(student_files_src):
run_cmd(f"cp -r {student_files_src} {path}")
else:
run_cmd('mkdir -p ' + path)
js.dump(user_info, open(student_files_dir+"/user_info.json", "w"))
def start(n, n_gpu):
assert os.path.exists(student_files_dir+'/user_info.json')
user_info = js.load(open(student_files_dir+'/user_info.json', 'r'))
for i in range(len(user_info)):
id = i % n
ids = ''
for j in range(n_gpu):
if j > 0:
ids+=','
ids += str((i * n_gpu + j) % n)
u = user_info[i]
print('START', i, '/', len(user_info))
assert 0 == run_cmd(f'docker run -itd --shm-size=8g --network host --name {u["name"]} -v {u["path"]}:/root --gpus \'"device={ids}"\' jittor/jittor-cuda bash')
# assert 0 == run_cmd(f'docker exec -it {u["name"]} bash -c \'apt update && apt install openssh-server -y\'')
assert 0 == run_cmd(f'docker cp {cwd}/setup.py {u["name"]}:/etc/ssh/setup.py')
assert 0 == run_cmd(f'docker cp {cwd}/motd {u["name"]}:/etc/motd')
assert 0 == run_cmd(f'docker exec -it {u["name"]} python3.7 /etc/ssh/setup.py passwd {u["passwd"]}')
assert 0 == run_cmd(f'docker exec -it {u["name"]} python3.7 /etc/ssh/setup.py ssh {u["port"]}')
assert 0 == run_cmd(f'docker exec -it {u["name"]} python3.7 -m pip install jittor -U')
assert 0 == run_cmd(f'docker exec -it {u["name"]} python3.7 -m jittor.test.test_example')
def METHOD_NAME():
assert os.path.exists(student_files_dir+'/user_info.json')
user_info = js.load(open(student_files_dir+'/user_info.json', 'r'))
for i in range(len(user_info)):
u = user_info[i]
print('STOP', i, '/', len(user_info))
run_cmd(f'docker rm -f {u["name"]}')
def report():
assert os.path.exists(student_files_dir+'/user_info.json')
user_info = js.load(open(student_files_dir+'/user_info.json', 'r'))
hostname = open("/etc/hostname", 'r').read().strip() + ".randonl.me"
for i in range(len(user_info)):
u = user_info[i]
print(f"ssh -p {u['port']} root@{hostname} # passwd: {u['passwd']}")
def restart(n, n_gpu):
METHOD_NAME()
start(n, n_gpu)
args = sys.argv[1:]
if (args[0] == 'setup'):
assert(len(args) == 2)
assert(type(eval(args[1])) == int)
n = int(args[1])
assert(n < 999)
setup(n)
elif (args[0] == 'start'):
assert(len(args) in [2,3])
assert(type(eval(args[1])) == int)
n = int(args[1])
if len(args) == 3:
assert(type(eval(args[2])) == int)
n_gpu = int(args[2])
else:
n_gpu=1
start(n, n_gpu)
elif (args[0] == 'stop'):
METHOD_NAME()
elif (args[0] == 'restart'):
assert(len(args) in [2,3])
assert(type(eval(args[1])) == int)
n = int(args[1])
if len(args) == 3:
assert(type(eval(args[2])) == int)
n_gpu = int(args[2])
else:
n_gpu=1
restart(n, n_gpu)
elif (args[0] == 'report'):
report()
else:
assert(False)
|
1,436 | search symop lib for ccp4 symbol | from __future__ import absolute_import, division, print_function
from cctbx import sgtbx
import libtbx.load_env
import os.path as op
from six.moves import range
if (libtbx.env.has_module("ccp4io")):
for _ in ["libccp4/data", "data"]:
ccp4io_lib_data = libtbx.env.under_dist(
module_name="ccp4io", path=_)
if (op.isdir(ccp4io_lib_data)):
break
else:
ccp4io_lib_data = None
else:
ccp4io_lib_data = None
_ccp4_symbol_cache = {"symop.lib": {}, "syminfo.lib": {}}
_syminfo_lib_cache = []
syminfo_lib_bad_old = set("""
P 21/m 21/m 2/n a
""".splitlines())
def ccp4_symbol(space_group_info, lib_name, require_at_least_one_lib=True):
assert lib_name in _ccp4_symbol_cache
sg_type = space_group_info.type()
lookup_symbol = sg_type.lookup_symbol()
cache = _ccp4_symbol_cache[lib_name]
result = cache.get(lookup_symbol, "..unknown..")
if (result != "..unknown.."):
return result
if (lib_name != "syminfo.lib" or len(_syminfo_lib_cache) == 0):
lib_paths = []
if (ccp4io_lib_data is not None):
lib_paths.append(op.join(ccp4io_lib_data, lib_name))
import os
if 'CCP4_LIB' in os.environ:
lib_paths.append(op.expandvars("$CCP4_LIB/data/"+lib_name))
if 'CLIBD' in os.environ:
lib_paths.append(op.expandvars("$CLIBD/"+lib_name))
found_at_least_one_lib = False
for lib_path in lib_paths:
if (op.isfile(lib_path)):
found_at_least_one_lib = True
if (lib_name == "symop.lib"):
with open(lib_path) as fh:
ccp4_symbol = METHOD_NAME(
space_group_info=space_group_info,
file_iter=fh)
if (ccp4_symbol is not None):
cache[lookup_symbol] = ccp4_symbol
return ccp4_symbol
else:
build_syminfo_lib_cache(lib_path)
break
else:
if (require_at_least_one_lib):
assert found_at_least_one_lib
if (lib_name == "syminfo.lib"):
for hall,ccp4_symbol in _syminfo_lib_cache[sg_type.number()]:
sgi = sgtbx.space_group_info(symbol="Hall: "+hall)
lus = sgi.type().lookup_symbol()
cache[lus] = ccp4_symbol
if (lus == lookup_symbol):
return ccp4_symbol
return None
def METHOD_NAME(space_group_info, file_iter):
given_space_group_number = space_group_info.type().number()
for line in file_iter:
flds = line.split(None, 4)
space_group_number = int(flds[0][-3:])
order_z = int(flds[1])
if (space_group_number != given_space_group_number):
for i in range(order_z):
next(file_iter)
else:
result = flds[3]
group = collect_symops(file_iter=file_iter, order_z=order_z)
if (space_group_info.group() == group):
return result
return None
def collect_symops(file_iter, order_z):
result = sgtbx.space_group()
for i in range(order_z):
line = next(file_iter).strip()
result.expand_smx(sgtbx.rt_mx(line))
return result
def build_syminfo_lib_cache(lib_path):
_syminfo_lib_cache.append(None)
for number in range(230):
_syminfo_lib_cache.append([])
with open(lib_path) as file_iter:
for line in file_iter:
l = line.strip()
if (l == "begin_spacegroup"):
number = None
symbols = {}
for line in file_iter:
l = line.strip()
if (l == "end_spacegroup"):
assert number is not None
assert len(symbols) == 3
def get_shortest(s_list):
result = None
for s in s_list:
if (len(s) == 0): continue
if (result is None or len(result) > len(s)):
result = s
return result
ccp4_symbol = get_shortest(symbols["old"])
if ( ccp4_symbol is None
or ccp4_symbol in syminfo_lib_bad_old):
if (len(symbols["xhm"]) != 0):
ccp4_symbol = symbols["xhm"]
else:
raise RuntimeError("Missing both xHM and old symbols")
_syminfo_lib_cache[number].append((symbols["hall"], ccp4_symbol))
break
if (l.startswith("number ")):
flds = l.split()
assert len(flds) == 2
number = int(flds[1])
assert number >= 1
assert number <= 230
elif (l.startswith("symbol ")):
flds = l.split(None, 2)
assert len(flds) == 3
stype = flds[1].lower()
if (stype in ["hall", "xhm", "old"]):
assert stype not in symbols
symbol = flds[2].strip()
assert len(symbol) >= 2
assert symbol.startswith("'")
assert symbol.endswith("'")
if (stype == "old"):
symbols[stype] = " ".join(symbol[1:-1].split()).split("' '")
else:
symbols[stype] = symbol[1:-1]
else:
raise RuntimeError("Missing end_spacegroup")
return None |
1,437 | public visit | # ===============================================================================
# NAME: TopologyIDVisitor.py
#
# DESCRIPTION: A visitor responsible for the generation of component
# base class source code file.
#
# AUTHOR: reder
# EMAIL: reder@jpl.nasa.gov
# DATE CREATED : Feb 5, 2007
#
# Copyright 2013, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
# ===============================================================================
#
# Python standard modules
#
import logging
import sys
from fprime_ac.generators import formatters
# from fprime_ac.utils import DiffAndRename
from fprime_ac.generators.visitors import AbstractVisitor
from fprime_ac.models import ModelParser
#
# Python extension modules and custom interfaces
#
# from Cheetah import Template
# from fprime_ac.utils import version
from fprime_ac.utils import ConfigManager
#
# Import precompiled templates here
#
try:
from fprime_ac.generators.templates.topology import publicTopologyID
except ImportError:
print("ERROR: must generate python templates first.")
sys.exit(-1)
# from fprime_ac.generators.templates import finishTopologyCpp
#
# Universal globals used within module go here.
# (DO NOT USE MANY!)
#
# Global logger init. below.
PRINT = logging.getLogger("output")
DEBUG = logging.getLogger("debug")
#
# Module class or classes go here.
class TopologyIDVisitor(AbstractVisitor.AbstractVisitor):
"""
A visitor class responsible for generation of base ID/window range export CSV files
"""
__instance = None
__config = None
__fp = None
__form = None
__form_comment = None
__model_parser = None
def __init__(self):
"""
Constructor.
"""
super().__init__()
self.__config = ConfigManager.ConfigManager.getInstance()
self.__form = formatters.Formatters()
self.__form_comment = formatters.CommentFormatters()
self.__model_parser = ModelParser.ModelParser.getInstance()
DEBUG.info("TopologyIDVisitor: Instanced.")
self.bodytext = ""
self.prototypetext = ""
def _writeTmpl(self, c, visit_str):
"""
Wrapper to write tmpl to files desc.
"""
DEBUG.debug("TopologyIDVisitor:%s" % visit_str)
DEBUG.debug("===================================")
DEBUG.debug(c)
self.__fp.writelines(c.__str__())
DEBUG.debug("===================================")
def initFilesVisit(self, obj):
"""
Defined to generate files for generated code products.
@param obj: the instance of the component model to visit.
"""
# Build filename here...
if len(obj.get_comp_list()) > 0:
xml_file = obj.get_comp_list()[0].get_xml_filename()
x = xml_file.split(".")
s = self.__config.get("assembly", "TopologyXML").split(".")
l = len(s[0])
#
if (x[0][-l:] == s[0]) & (x[1] == s[1]):
filename = x[0].split(s[0])[0] + self.__config.get(
"assembly", "TopologyID"
)
PRINT.info(
"Generating code filename: %s topology, using default XML filename prefix..."
% filename
)
else:
msg = (
"XML file naming format not allowed (must be XXXAppAi.xml), Filename: %s"
% xml_file
)
PRINT.info(msg)
raise ValueError(msg)
# Open file for writing here...
DEBUG.info("Open file: %s" % filename)
self.__fp = open(filename, "w")
DEBUG.info("Completed")
else:
PRINT.info("ERROR: NO COMPONENTS FOUND IN TOPOLOGY XML FILE...")
sys.exit(-1)
def startSourceFilesVisit(self, obj):
"""
Defined to generate starting static code within files.
"""
def includes1Visit(self, obj):
"""
Defined to generate includes within a file.
Usually used for the base classes but also for Port types
@param args: the instance of the concrete element to operation on.
"""
def includes2Visit(self, obj):
"""
Defined to generate internal includes within a file.
Usually used for data type includes and system includes.
@param args: the instance of the concrete element to operation on.
"""
def namespaceVisit(self, obj):
"""
Defined to generate namespace code within a file.
Also any pre-condition code is generated.
@param args: the instance of the concrete element to operation on.
"""
def METHOD_NAME(self, obj):
"""
Defined to generate public stuff within a class.
@param args: the instance of the concrete element to operation on.
"""
c = publicTopologyID.publicTopologyID()
c.id_list = (
[]
) # Contents will be strings in the form 'component name,instance name,base id,window range'
#
# Generate Set Window/Base ID Method
for id_tuple in obj.get_base_id_list():
n = id_tuple[0]
type = id_tuple[3].get_type()
base_id = id_tuple[1]
window_id = id_tuple[2]
instance_list = [type, n, base_id, window_id]
c.id_list.append(",".join(str(x) for x in instance_list))
#
self._writeTmpl(c, "publicVisit")
def protectedVisit(self, obj):
"""
Defined to generate protected stuff within a class.
@param args: the instance of the concrete element to operation on.
"""
def privateVisit(self, obj):
"""
Defined to generate private stuff within a class.
@param args: the instance of the concrete element to operation on.
"""
def finishSourceFilesVisit(self, obj):
"""
Defined to generate ending static code within files.
"""
# c = finishComponentCpp.finishComponentCpp()
# self._writeTmpl(c, "finishSourceFilesVisit")
self.__fp.close() |
1,438 | get product history | """
Logging system to allow users to view & understand actions done in the strategy
Copyright (C) 2021 Emerson Dove
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import pandas
from typing import Union
from datetime import datetime as dt
import blankly
from blankly.exchanges.interfaces.abc_exchange_interface import ABCExchangeInterface
from blankly.exchanges.orders.market_order import MarketOrder
from blankly.exchanges.orders.limit_order import LimitOrder
from blankly.utils.utils import AttributeDict
class StrategyLogger(ABCExchangeInterface):
def __init__(self, interface=None, strategy=None):
self.interface = interface
self.strategy = strategy
self.__type = self.interface.get_exchange_type()
def get_calls(self):
"""
No logging implemented
"""
return self.interface.get_calls()
def get_exchange_type(self):
"""
No logging implemented
"""
return self.interface.get_exchange_type()
def get_account(self, symbol: str = None) -> AttributeDict:
"""
No logging implemented
"""
return self.interface.get_account(symbol)
"""
These next three queries have large responses. It is unclear if this quantity of data is useful or necessary
"""
def get_products(self):
"""
No logging implemented
"""
return self.interface.get_products()
def METHOD_NAME(self, symbol: str, epoch_start: float,
epoch_stop: float, resolution: Union[str, int]) -> pandas.DataFrame:
"""
No logging implemented
"""
return self.interface.METHOD_NAME(symbol, epoch_start, epoch_stop, resolution)
def history(self,
symbol: str,
to: Union[str, int] = 200,
resolution: Union[str, int] = '1d',
start_date: Union[str, dt, float] = None,
end_date: Union[str, dt, float] = None,
return_as: str = 'df') -> pandas.DataFrame:
"""
No logging implemented
"""
return self.interface.history(symbol, to=to,
resolution=resolution, start_date=start_date,
end_date=end_date, return_as=return_as)
# no logging for these on platform yet
def take_profit_order(self, symbol: str, price: float, size: float) -> LimitOrder:
pass
# no logging for these on platform yet
def stop_loss_order(self, symbol: str, price: float, size: float) -> LimitOrder:
pass
def market_order(self, symbol: str, side: str, size: float) -> MarketOrder:
out = self.interface.market_order(symbol, side, size)
# Record this market order along with the arguments
try:
blankly.reporter.log_market_order({
'symbol': symbol,
'exchange': self.__type,
'size': size,
'id': out.get_id(),
'side': side
}, self.__type)
except Exception:
pass
return out
def limit_order(self, symbol: str, side: str, price: float, size: float) -> LimitOrder:
out = self.interface.limit_order(symbol, side, price, size)
# Record limit order along with the arguments
try:
blankly.reporter.log_limit_order({
'symbol': symbol,
'exchange': self.__type,
'size': size,
'id': out.get_id(),
'side': side,
'price': price
}, self.__type)
except Exception:
pass
return out
def cancel_order(self, symbol: str, order_id: str) -> dict:
"""
No logging implemented
"""
return self.interface.cancel_order(symbol, order_id)
def get_open_orders(self, symbol: str = None) -> list:
"""
No logging implemented
"""
return self.interface.get_open_orders(symbol=symbol)
def get_order(self, symbol: str, order_id: str) -> dict:
"""
TODO - this needs to update the order on the backend
"""
out = self.interface.get_order(symbol, order_id)
# Record the arguments
try:
blankly.reporter.update_order({
'id': order_id,
'status': out['status']
}, self.__type)
except Exception:
pass
return out
def get_fees(self, symbol) -> dict:
"""
No logging implemented
"""
return self.interface.get_fees(symbol)
def get_order_filter(self, symbol: str):
"""
No logging implemented
"""
return self.interface.get_order_filter(symbol)
def get_price(self, symbol: str) -> float:
"""
No logging implemented
"""
return self.interface.get_price(symbol)
"""
No logging implemented for these properties
"""
@property
def account(self) -> AttributeDict:
"""
No logging implemented
"""
return self.interface.account
@property
def orders(self) -> list:
"""
No logging implemented
"""
return self.interface.orders
@property
def cash(self) -> float:
"""
No logging implemented
"""
return self.interface.cash |
1,439 | generate meta | """
Copyright (c) 2018-2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .format_converter import FileBasedAnnotationConverter, ConverterReturn
from ..representation import ClassificationAnnotation, ContainerAnnotation
from ..utils import read_xml, check_file_existence
from ..config import StringField, PathField, ConfigError
class CVATAttributesRecognitionConverter(FileBasedAnnotationConverter):
__provider__ = 'cvat_attributes_recognition'
annotation_types = (ClassificationAnnotation, )
@classmethod
def parameters(cls):
configuration_parameters = super().parameters()
configuration_parameters.update({
'label': StringField(description='specific label for attribute collection'),
'images_dir': PathField(
is_directory=True, optional=True,
description='path to dataset images, used only for content existence check'
)
})
return configuration_parameters
def configure(self):
super().configure()
self.label = self.get_value_from_config('label')
self.images_dir = self.get_value_from_config('images_dir') or self.annotation_file.parent
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
annotation = read_xml(self.annotation_file)
meta = annotation.find('meta')
size = int(meta.find('task').find('size').text)
attribute_values_mapping = {}
label = self.select_label(meta)
for attribute in label.iter('attribute'):
label_to_id = {
label: idx for idx, label in enumerate(attribute.find('values').text.split('\n'))
}
attribute_values_mapping[attribute.find('name').text] = label_to_id
annotations = []
content_errors = None if not check_content else []
for image_id, image in enumerate(annotation.iter('image')):
identifier = image.attrib['name'].split('/')[-1]
if check_content:
if not check_file_existence(self.images_dir / identifier):
content_errors.append('{}: does not exist'.format(self.images_dir / identifier))
for bbox in image:
if 'label' not in bbox.attrib.keys() or bbox.attrib['label'] != self.label:
continue
annotation_dict = {}
bbox_rect = [
float(bbox.attrib['xtl']), float(bbox.attrib['ytl']),
float(bbox.attrib['xbr']), float(bbox.attrib['ybr'])
]
for attribute in bbox.iter('attribute'):
attribute_name = attribute.attrib['name']
attribute_label = attribute_values_mapping[attribute_name][attribute.text]
attribute_annotation = ClassificationAnnotation(identifier, attribute_label)
attribute_annotation.metadata['rect'] = bbox_rect
annotation_dict[attribute_name] = attribute_annotation
if len(annotation_dict) == 1:
annotations.append(next(iter(annotation_dict.values())))
else:
annotations.append(ContainerAnnotation(annotation_dict))
if progress_callback is not None and image_id % progress_interval == 0:
progress_callback(image_id * 100 / size)
return ConverterReturn(annotations, self.METHOD_NAME(attribute_values_mapping), content_errors)
@staticmethod
def METHOD_NAME(attribute_values_mapping):
if len(attribute_values_mapping) == 1:
reversed_label_map = next(iter(attribute_values_mapping.values()))
return {'label_map': {value: key for key, value in reversed_label_map.items()}}
meta = {}
for key, reversed_label_map in attribute_values_mapping.items():
meta['{}_label_map'.format(key)] = {value: key for key, value in reversed_label_map.items()}
return meta
def select_label(self, meta):
label = [label for label in meta.iter('label') if label.find('name').text == self.label]
if not label:
raise ConfigError('{} does not present in annotation'.format(self.label))
return label[0]
def get_meta(self):
annotation = read_xml(self.annotation_file)
meta = annotation.find('meta')
attribute_values_mapping = {}
label = self.select_label(meta)
for attribute in label.iter('attribute'):
label_to_id = {
label: idx for idx, label in enumerate(attribute.find('values').text.split('\n'))
}
attribute_values_mapping[attribute.find('name').text] = label_to_id
return self.METHOD_NAME(attribute_values_mapping) |
1,440 | main | #!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from typing import NamedTuple
from lte.protos.subscriberdb_pb2 import SuciProfile
from lte.protos.subscriberdb_pb2_grpc import SuciProfileDBStub
from magma.common.rpc_utils import grpc_wrapper
from magma.subscriberdb.crypto.EC import ECDH_SECP256R1, X25519
from orc8r.protos.common_pb2 import Void
class home_network_key_pair(NamedTuple):
home_network_public_key: bytes
home_network_private_key: bytes
class HomeNetworkKeyPairGen(object):
"""
Class to generate public/private keys
"""
def __init__(self, profile: str):
"""
Init the class object
"""
self.profile = profile
self.home_network_key_pair = home_network_key_pair(b'', b'')
def core_home_network_key_gen(self):
"""
core_home_network_key_gen
"""
if self.profile == "ProfileA":
ec = X25519()
elif self.profile == "ProfileB":
ec = ECDH_SECP256R1()
if ec:
ec.generate_keypair()
else:
return None
self.home_network_key_pair = home_network_key_pair(
ec.get_pubkey(),
ec.get_privkey(),
)
def get_home_network_public_key(self):
"""
get_home_network_public_key
"""
return self.home_network_key_pair.home_network_public_key
def get_home_network_private_key(self):
"""
get_home_network_private_key
"""
return self.home_network_key_pair.home_network_private_key
def print_key_pair(self):
"""
print_key_pair
"""
print(self.profile)
print(self.home_network_key_pair.home_network_public_key)
print(self.home_network_key_pair.home_network_private_key)
def print_suciprofile(obj: SuciProfile):
"""
print_suciprofile
"""
print("###SUCI Profile Record####")
print("home_net_public_key_id:", obj.home_net_public_key_id)
print("protection_scheme :", obj.protection_scheme)
print("home_net_public_key :", obj.home_net_public_key.hex())
print("home_net_private_key :", obj.home_net_private_key.hex())
print("#######")
@grpc_wrapper
def add_suciprofile(client, args):
"""
add_suciprofile
"""
if args.protection_scheme is not None:
if int(args.protection_scheme) == 0:
hnp_gen = HomeNetworkKeyPairGen("ProfileA")
profile = SuciProfile.ProfileA
elif int(args.protection_scheme) == 1:
hnp_gen = HomeNetworkKeyPairGen("ProfileB")
profile = SuciProfile.ProfileB
else:
print("Invalid protection_scheme value:", args.protection_scheme)
return
else:
print("protection_scheme is not configured, so taking default value as ProfileA")
profile = SuciProfile.ProfileA
if args.home_net_public_key_id is not None:
if int(args.home_net_public_key_id) < 0 or int(args.home_net_public_key_id) > 255:
print("Invalid home_net_public_key_id value:", args.home_net_public_key_id)
return
else:
print("home_net_public_key_id is not passed")
return
hnp_gen.core_home_network_key_gen()
request = SuciProfile(
home_net_public_key_id=int(args.home_net_public_key_id),
protection_scheme=profile,
home_net_public_key=bytes(hnp_gen.get_home_network_public_key()),
home_net_private_key=bytes(hnp_gen.get_home_network_private_key()),
)
client.AddSuciProfile(request)
print("Added the record")
print_suciprofile(request)
@grpc_wrapper
def delete_suciprofile(client, args):
"""
delete_suciprofile
"""
if int(args.home_net_public_key_id) < 0 or int(args.home_net_public_key_id) > 255:
print("Invalid home_net_public_key_id value:", args.home_net_public_key_id)
return
request = SuciProfile(home_net_public_key_id=int(args.home_net_public_key_id))
client.DeleteSuciProfile(request)
print("Deleted the record with home_net_public_key_id:", args.home_net_public_key_id)
@grpc_wrapper
def list_suciprofile(client, args):
"""
list_suciprofile
"""
response = client.ListSuciProfile(Void())
if not response.suci_profiles:
print("SuciProfileList is empty")
else:
for x in response.suci_profiles:
print_suciprofile(x)
def METHOD_NAME():
"""Creates the argparse parser with all the arguments.""" # noqa: D401
parser = argparse.ArgumentParser(
description='Management CLI for SuciProfile',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Add subcommands
subparsers = parser.add_subparsers(title='subcommands', dest='cmd')
# add_suciprofile
subparser = subparsers.add_parser('add', help='Add SuciProfile record')
subparser.add_argument(
"--home_net_public_key_id", help="home_network_public_key_id"
" e.g: --home_net_public_key_id 0..255",
)
subparser.add_argument(
"--protection_scheme", help="ECIESProtectionScheme"
" e.g: --protection_scheme 0 or 1",
)
subparser.set_defaults(func=add_suciprofile)
# delete_suciprofile
subparser = subparsers.add_parser('delete', help='Delete SuciProfile record')
subparser.add_argument(
"--home_net_public_key_id", help="home_network_public_key_id"
" e.g: --home_net_public_key_id 0..255",
)
subparser.set_defaults(func=delete_suciprofile)
# list_suciprofile
subparser = subparsers.add_parser('list', help='List SuciProfile records')
subparser.set_defaults(func=list_suciprofile)
# Parse the args
args = parser.parse_args()
if not args.cmd:
parser.print_usage()
exit(1)
# Execute the subcommand function
args.func(args, SuciProfileDBStub, 'subscriberdb')
if __name__ == "__main__":
METHOD_NAME() |
1,441 | tmpfunc | import io
import os
import sys
import copy
import json
import time
import uuid
import socket
import logging
import traceback
from threading import local
from collections import OrderedDict
from contextlib import contextmanager
LOG_TIMESTAMPS = "LOG_TIMESTAMPS" in os.environ
def json_handler(obj):
# if isinstance(obj, (datetime.date, datetime.time)):
# return obj.isoformat()
return repr(obj)
def json_robust_dumps(obj):
return json.dumps(obj, default=json_handler)
class NiceOrderedDict(OrderedDict):
def __str__(self):
return json_robust_dumps(self)
class SwagFormatter(logging.Formatter):
def __init__(self, swaglogger):
logging.Formatter.__init__(self, None, '%a %b %d %H:%M:%S %Z %Y')
self.swaglogger = swaglogger
self.host = socket.gethostname()
def format_dict(self, record):
record_dict = NiceOrderedDict()
if isinstance(record.msg, dict):
record_dict['msg'] = record.msg
else:
try:
record_dict['msg'] = record.getMessage()
except (ValueError, TypeError):
record_dict['msg'] = [record.msg]+record.args
record_dict['ctx'] = self.swaglogger.get_ctx()
if record.exc_info:
record_dict['exc_info'] = self.formatException(record.exc_info)
record_dict['level'] = record.levelname
record_dict['levelnum'] = record.levelno
record_dict['name'] = record.name
record_dict['filename'] = record.filename
record_dict['lineno'] = record.lineno
record_dict['pathname'] = record.pathname
record_dict['module'] = record.module
record_dict['funcName'] = record.funcName
record_dict['host'] = self.host
record_dict['process'] = record.process
record_dict['thread'] = record.thread
record_dict['threadName'] = record.threadName
record_dict['created'] = record.created
return record_dict
def format(self, record):
if self.swaglogger is None:
raise Exception("must set swaglogger before calling format()")
return json_robust_dumps(self.format_dict(record))
class SwagLogFileFormatter(SwagFormatter):
def fix_kv(self, k, v):
# append type to names to preserve legacy naming in logs
# avoids overlapping key namespaces with different types
# e.g. log.info() creates 'msg' -> 'msg$s'
# log.event() creates 'msg.health.logMonoTime' -> 'msg.health.logMonoTime$i'
# because overlapping namespace 'msg' caused problems
if isinstance(v, (str, bytes)):
k += "$s"
elif isinstance(v, float):
k += "$f"
elif isinstance(v, bool):
k += "$b"
elif isinstance(v, int):
k += "$i"
elif isinstance(v, dict):
nv = {}
for ik, iv in v.items():
ik, iv = self.fix_kv(ik, iv)
nv[ik] = iv
v = nv
elif isinstance(v, list):
k += "$a"
return k, v
def format(self, record):
if isinstance(record, str):
v = json.loads(record)
else:
v = self.format_dict(record)
mk, mv = self.fix_kv('msg', v['msg'])
del v['msg']
v[mk] = mv
v['id'] = uuid.uuid4().hex
return json_robust_dumps(v)
class SwagErrorFilter(logging.Filter):
def filter(self, record):
return record.levelno < logging.ERROR
def METHOD_NAME():
return 0
def _srcfile():
return os.path.normcase(METHOD_NAME.__code__.co_filename)
class SwagLogger(logging.Logger):
def __init__(self):
logging.Logger.__init__(self, "swaglog")
self.global_ctx = {}
self.log_local = local()
self.log_local.ctx = {}
def local_ctx(self):
try:
return self.log_local.ctx
except AttributeError:
self.log_local.ctx = {}
return self.log_local.ctx
def get_ctx(self):
return dict(self.local_ctx(), **self.global_ctx)
@contextmanager
def ctx(self, **kwargs):
old_ctx = self.local_ctx()
self.log_local.ctx = copy.copy(old_ctx) or {}
self.log_local.ctx.update(kwargs)
try:
yield
finally:
self.log_local.ctx = old_ctx
def bind(self, **kwargs):
self.local_ctx().update(kwargs)
def bind_global(self, **kwargs):
self.global_ctx.update(kwargs)
def event(self, event, *args, **kwargs):
evt = NiceOrderedDict()
evt['event'] = event
if args:
evt['args'] = args
evt.update(kwargs)
if 'error' in kwargs:
self.error(evt)
elif 'debug' in kwargs:
self.debug(evt)
else:
self.info(evt)
def timestamp(self, event_name):
if LOG_TIMESTAMPS:
t = time.monotonic()
tstp = NiceOrderedDict()
tstp['timestamp'] = NiceOrderedDict()
tstp['timestamp']["event"] = event_name
tstp['timestamp']["time"] = t*1e9
self.debug(tstp)
def findCaller(self, stack_info=False, stacklevel=1):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = sys._getframe(3)
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
orig_f = f
while f and stacklevel > 1:
f = f.f_back
stacklevel -= 1
if not f:
f = orig_f
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# TODO: is this pylint exception correct?
if filename == _srcfile: # pylint: disable=comparison-with-callable
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
if __name__ == "__main__":
log = SwagLogger()
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.INFO)
stdout_handler.addFilter(SwagErrorFilter())
log.addHandler(stdout_handler)
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setLevel(logging.ERROR)
log.addHandler(stderr_handler)
log.info("asdasd %s", "a")
log.info({'wut': 1})
log.warning("warning")
log.error("error")
log.critical("critical")
log.event("test", x="y")
with log.ctx():
stdout_handler.setFormatter(SwagFormatter(log))
stderr_handler.setFormatter(SwagFormatter(log))
log.bind(user="some user")
log.info("in req")
print("")
log.warning("warning")
print("")
log.error("error")
print("")
log.critical("critical")
print("")
log.event("do_req", a=1, b="c") |
1,442 | mean | #!/usr/bin/env python
##
# @file
# This file is part of SeisSol.
#
# @author Carsten Uphoff (c.uphoff AT tum.de, http://www5.in.tum.de/wiki/index.php/Carsten_Uphoff,_M.Sc.)
#
# @section LICENSE
# Copyright (c) 2016, SeisSol Group
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# @section DESCRIPTION
#
import Proxy
import os
import re
import math
def median(values):
n = len(values)
x = sorted(values)
if n%2 == 0:
return 0.5 * (x[n/2-1] + x[n/2])
return x[n/2]
def METHOD_NAME(values):
n = float(len(values))
return sum(values) / n
def var(values):
if len(values) < 2:
return float('nan')
m = METHOD_NAME(values)
n = float(len(values))
return sum([(v-m)**2. for v in values]) / (n-1.)
def stdev(values):
return math.sqrt(var(values))
def writeTimes(times):
with open('times.txt', 'w') as f:
f.write('{:16} {:10} {:10} {:10}\n'.format('Name', 'Median', 'Mean', 'Std. dev.'))
for name, value in sorted(times.iteritems()):
for idx, time in sorted(value.iteritems()):
f.write('{:16} {:>10f} {:>10f} {:>10f}\n'.format(name + str(idx), median(time), METHOD_NAME(time), stdev(time)))
def analyse():
times = dict()
timePattern = re.compile(r'^time\D+([0-9\.]+)', re.MULTILINE)
for resultFile in os.listdir(Proxy.OutputDir):
resultFileName, extension = os.path.splitext(resultFile)
if extension == '.run':
matrix = resultFileName.split('_')[0]
content = open(os.path.join(Proxy.OutputDir, resultFile), 'r').read()
timeSearch = timePattern.search(content)
if timeSearch:
time = float(timeSearch.group(1))
matrixBase = matrix[:-1]
matrixVariant = int(matrix[-1])
if matrixVariant > 0:
continue
if not times.has_key(matrixBase):
times[matrixBase] = dict()
if not times[matrixBase].has_key(matrixVariant):
times[matrixBase][matrixVariant] = list()
times[matrixBase][matrixVariant].append(time)
else:
print('Warning: Invalid result file {}.'.format(resultFileName))
writeTimes(times)
matrices = list()
dense = times.pop('dense').pop(0)
denseTime = median(dense)
timeSaveVsDense = 0.
for key, variants in times.iteritems():
matrixTimes = {variant: median(timeSeries) for variant, timeSeries in variants.iteritems()}
minTimeKey = min(matrixTimes, key=matrixTimes.get)
if matrixTimes[minTimeKey] < denseTime:
matrices.append((key, minTimeKey))
timeSaveVsDense += denseTime - matrixTimes[minTimeKey]
print('Estimated tuned time: {}'.format(denseTime - timeSaveVsDense))
return matrices
|
1,443 | create record | """Module provider for PointHQ"""
import json
import logging
from argparse import ArgumentParser
from typing import List
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.interfaces import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
class Provider(BaseProvider):
"""Provider class for PointHQ"""
@staticmethod
def get_nameservers() -> List[str]:
return ["pointhq.com"]
@staticmethod
def configure_parser(parser: ArgumentParser) -> None:
parser.add_argument(
"--auth-username", help="specify email address for authentication"
)
parser.add_argument("--auth-token", help="specify token for authentication")
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = "https://pointhq.com"
def authenticate(self):
payload = self._get(f"/zones/{self.domain}")
if not payload["zone"]:
raise AuthenticationError("No domain found")
self.domain_id = payload["zone"]["id"]
def cleanup(self) -> None:
pass
# Create record. If record already exists with the same content, do nothing'
def METHOD_NAME(self, rtype, name, content):
# check if record already exists
existing_records = self.list_records(rtype, name, content)
if len(existing_records) == 1:
return True
payload = self._post(
f"/zones/{self.domain_id}/records",
{
"zone_record": {
"record_type": rtype,
"name": self._relative_name(name),
"data": content,
}
},
)
LOGGER.debug("create_record: %s", payload["zone_record"])
return bool(payload["zone_record"])
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, rtype=None, name=None, content=None):
filter_query = {}
if rtype:
filter_query["record_type"] = rtype
if name:
filter_query["name"] = self._relative_name(name)
payload = self._get(f"/zones/{self.domain_id}/records", filter_query)
records = []
for record in payload:
processed_record = {
"type": record["zone_record"]["record_type"],
"name": self._full_name(record["zone_record"]["name"]),
"ttl": record["zone_record"]["ttl"],
"content": record["zone_record"]["data"],
"id": record["zone_record"]["id"],
}
processed_record = self._clean_TXT_record(processed_record)
records.append(processed_record)
if content:
records = [record for record in records if record["content"] == content]
LOGGER.debug("list_records: %s", records)
return records
# Create or update a record.
def update_record(self, identifier, rtype=None, name=None, content=None):
data = {}
if rtype:
data["record_type"] = rtype
if name:
data["name"] = self._relative_name(name)
if content:
data["data"] = content
payload = self._put(
f"/zones/{self.domain_id}/records/{identifier}", {"zone_record": data}
)
LOGGER.debug("update_record: %s", payload)
return bool(payload["zone_record"])
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self.list_records(rtype, name, content)
delete_record_id = [record["id"] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug("delete_records: %s", delete_record_id)
for record_id in delete_record_id:
self._delete(f"/zones/{self.domain_id}/records/{record_id}")
LOGGER.debug("delete_record: %s", True)
return True
# Helpers
def _request(self, action="GET", url="/", data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
response = requests.request(
action,
self.api_endpoint + url,
params=query_params,
data=json.dumps(data),
auth=requests.auth.HTTPBasicAuth(
self._get_provider_option("auth_username"),
self._get_provider_option("auth_token"),
),
headers={"Content-Type": "application/json", "Accept": "application/json"},
)
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json() |
1,444 | get next | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._usages_operations import build_list_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.loganalytics.aio.LogAnalyticsManagementClient`'s
:attr:`usages` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
) -> AsyncIterable["_models.UsageMetric"]:
"""Gets a list of usage metrics for a workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsageMetric or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.loganalytics.models.UsageMetric]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-08-01"] = kwargs.pop("api_version", _params.pop("api-version", "2020-08-01"))
cls: ClsType[_models.WorkspaceListUsagesResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("WorkspaceListUsagesResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, AsyncList(list_of_elem)
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(METHOD_NAME, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/usages"
} |
1,445 | test time timerange | import pytest
import astropy.units as u
from sunpy.net import _attrs as core_attrs
from sunpy.net import attr
from sunpy.net import attrs as a
from sunpy.net.vso import attrs as va
from sunpy.time import TimeRange, parse_time
def test_simpleattr_apply():
a = attr.ValueAttr({('test', ): 1})
dct = {}
va._walker.apply(a, None, dct)
assert dct['test'] == 1
def METHOD_NAME():
t = core_attrs.Time(TimeRange('2012/1/1', '2012/1/2'))
assert isinstance(t, core_attrs.Time)
assert t.min == parse_time((2012, 1, 1))
assert t.max == parse_time((2012, 1, 2))
def test_input_error():
with pytest.raises(ValueError):
core_attrs.Time('2012/1/1')
@pytest.mark.remote_data
def test_simpleattr_create(client):
a = attr.ValueAttr({('instrument', ): 'eit'})
assert va._walker.create(a, client.api)[0].instrument == 'eit'
def test_simpleattr_and_duplicate():
attr = core_attrs.Instrument('foo')
pytest.raises(TypeError, lambda: attr & core_attrs.Instrument('bar'))
attr |= a.Source('foo')
pytest.raises(TypeError, lambda: attr & core_attrs.Instrument('bar'))
otherattr = core_attrs.Instrument('foo') | a.Source('foo')
pytest.raises(TypeError, lambda: attr & otherattr)
pytest.raises(TypeError, lambda: (attr | otherattr) & core_attrs.Instrument('bar'))
tst = core_attrs.Instrument('foo') & a.Source('foo')
pytest.raises(TypeError, lambda: tst & tst)
def test_simpleattr_or_eq():
attr = core_attrs.Instrument('eit')
assert attr | attr == attr
assert attr | core_attrs.Instrument('eit') == attr
def test_complexattr_apply():
tst = {('test', 'foo'): 'a', ('test', 'bar'): 'b'}
a = attr.ValueAttr(tst)
dct = {'test': {}}
va._walker.apply(a, None, dct)
assert dct['test'] == {'foo': 'a', 'bar': 'b'}
@pytest.mark.remote_data
def test_complexattr_create(client):
a = attr.ValueAttr({('time', 'start'): 'test'})
assert va._walker.create(a, client.api)[0].time['start'] == 'test'
def test_complexattr_and_duplicate():
attr = core_attrs.Time((2011, 1, 1), (2011, 1, 1, 1))
pytest.raises(TypeError,
lambda: attr & core_attrs.Time((2011, 2, 1), (2011, 2, 1, 1)))
attr |= a.Source('foo')
pytest.raises(TypeError,
lambda: attr & core_attrs.Time((2011, 2, 1), (2011, 2, 1, 1)))
def test_complexattr_or_eq():
attr = core_attrs.Time((2011, 1, 1), (2011, 1, 1, 1))
assert attr | attr == attr
assert attr | core_attrs.Time((2011, 1, 1), (2011, 1, 1, 1)) == attr
def test_attror_and():
attr = core_attrs.Instrument('foo') | core_attrs.Instrument('bar')
one = attr & a.Source('bar')
other = ((core_attrs.Instrument('foo') & a.Source('bar')) |
(core_attrs.Instrument('bar') & a.Source('bar')))
assert one == other
def test_wave_inputQuantity():
wrong_type_mesage = "Wave inputs must be astropy Quantities"
with pytest.raises(TypeError) as excinfo:
core_attrs.Wavelength(10, 23)
assert wrong_type_mesage in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
core_attrs.Wavelength(10 * u.AA, 23)
assert wrong_type_mesage in str(excinfo.value)
def test_wave_toangstrom():
# TODO: this test should test that inputs are in any of spectral units
# more than just converted to Angstroms.
frequency = [(1, 1 * u.Hz),
(1e3, 1 * u.kHz),
(1e6, 1 * u.MHz),
(1e9, 1 * u.GHz)]
energy = [(1, 1 * u.eV),
(1e3, 1 * u.keV),
(1e6, 1 * u.MeV)]
for factor, unit in energy:
w = core_attrs.Wavelength((62 / factor) * unit, (62 / factor) * unit)
assert int(w.min.to(u.AA, u.equivalencies.spectral()).value) == 199
w = core_attrs.Wavelength(62 * u.eV, 62 * u.eV)
assert int(w.min.to(u.AA, u.equivalencies.spectral()).value) == 199
w = core_attrs.Wavelength(62e-3 * u.keV, 62e-3 * u.keV)
assert int(w.min.to(u.AA, u.equivalencies.spectral()).value) == 199
for factor, unit in frequency:
w = core_attrs.Wavelength((1.506e16 / factor) * unit, (1.506e16 / factor) * unit)
assert int(w.min.to(u.AA, u.equivalencies.spectral()).value) == 199
w = core_attrs.Wavelength(1.506e16 * u.Hz, 1.506e16 * u.Hz)
assert int(w.min.to(u.AA, u.equivalencies.spectral()).value) == 199
w = core_attrs.Wavelength(1.506e7 * u.GHz, 1.506e7 * u.GHz)
assert int(w.min.to(u.AA, u.equivalencies.spectral()).value) == 199
with pytest.raises(u.UnitsError) as excinfo:
core_attrs.Wavelength(10 * u.g, 23 * u.g)
assert ('This unit is not convertible to any of [Unit("Angstrom"), Unit("kHz"), '
'Unit("keV")]' in str(excinfo.value))
def test_time_xor():
one = core_attrs.Time((2010, 1, 1), (2010, 1, 2))
a = one ^ core_attrs.Time((2010, 1, 1, 1), (2010, 1, 1, 2))
assert a == attr.AttrOr(
[core_attrs.Time((2010, 1, 1), (2010, 1, 1, 1)),
core_attrs.Time((2010, 1, 1, 2), (2010, 1, 2))])
a ^= core_attrs.Time((2010, 1, 1, 4), (2010, 1, 1, 5))
assert a == attr.AttrOr([
core_attrs.Time((2010, 1, 1), (2010, 1, 1, 1)),
core_attrs.Time((2010, 1, 1, 2), (2010, 1, 1, 4)),
core_attrs.Time((2010, 1, 1, 5), (2010, 1, 2))
])
def test_wave_xor():
one = core_attrs.Wavelength(0 * u.AA, 1000 * u.AA)
a = one ^ core_attrs.Wavelength(200 * u.AA, 400 * u.AA)
assert a == attr.AttrOr([core_attrs.Wavelength(0 * u.AA, 200 * u.AA),
core_attrs.Wavelength(400 * u.AA, 1000 * u.AA)])
a ^= core_attrs.Wavelength(600 * u.AA, 800 * u.AA)
assert a == attr.AttrOr(
[core_attrs.Wavelength(0 * u.AA, 200 * u.AA), core_attrs.Wavelength(400 * u.AA, 600 * u.AA),
core_attrs.Wavelength(800 * u.AA, 1000 * u.AA)])
def test_err_dummyattr_create():
with pytest.raises(TypeError):
va._walker.create(attr.DummyAttr(), None, {})
def test_err_dummyattr_apply():
with pytest.raises(TypeError):
va._walker.apply(attr.DummyAttr(), None, {})
def test_wave_repr():
"""Tests the __repr__ method of class vso.attrs.Wave"""
wav = core_attrs.Wavelength(12 * u.AA, 16 * u.AA)
moarwav = core_attrs.Wavelength(15 * u.AA, 12 * u.AA)
assert repr(wav) == "<sunpy.net.attrs.Wavelength(12.0, 16.0, 'Angstrom')>"
assert repr(moarwav) == "<sunpy.net.attrs.Wavelength(12.0, 15.0, 'Angstrom')>"
def test_construct_extent():
# yes this is coverage bingo
ext = va.Extent(10, 20, 30, 40, 'FULLDISK')
assert ext.x == 10
assert ext.y == 20
assert ext.width == 30
assert ext.length == 40
assert ext.type == 'FULLDISK' |
1,446 | make instance | import numpy as np
from ..optics import Wavefront, AgnosticOpticalElement, make_agnostic_forward, make_agnostic_backward
from ..field import Field, evaluate_supersampled
from ..fourier import FastFourierTransform, make_fft_grid, FourierFilter
class AngularSpectrumPropagator(AgnosticOpticalElement):
'''The monochromatic angular spectrum propagator for scalar fields.
The scalar Angular Spectrum propagator is implemented as described by
[McLeod2014]_. The propagation of an electric field can be described as a transfer
function in frequency space. The transfer function is taken from
equation 9 of [McLeod2014]_, and the related impulse response is taken from
equation 6 of [McLeod2014]_.
.. [McLeod2014] Robert R. McLeod and Kelvin H. Wagner 2014, "Vector Fourier optics of
anisotropic materials," Adv. Opt. Photon. 6, 368-412 (2014)
Parameters
----------
input_grid : anything
This argument is ignored. The input grid is taken from the incoming wavefront.
distance : scalar
The distance to propagate
num_oversampling : int
The number of times the transfer function is oversampled. Default is 2.
wavelength : scalar
The wavelength of the wavefront.
refractive_index : scalar
The refractive index of the medium that the wavefront is propagating in.
Raises
------
ValueError
If the `input_grid` is not regular and Cartesian.
'''
def __init__(self, input_grid, distance, num_oversampling=2, refractive_index=1):
self._distance = distance
self._num_oversampling = num_oversampling
self._refractive_index = refractive_index
AgnosticOpticalElement.__init__(self, grid_dependent=True, wavelength_dependent=True)
def METHOD_NAME(self, instance_data, input_grid, output_grid, wavelength):
if not input_grid.is_regular or not input_grid.is_('cartesian'):
raise ValueError('The input grid must be a regular, Cartesian grid.')
k = 2 * np.pi / wavelength * self.evaluate_parameter(self.refractive_index, input_grid, output_grid, wavelength)
L_max = np.max(input_grid.dims * input_grid.delta)
if np.any(input_grid.delta < wavelength * abs(self.distance) / L_max):
def transfer_function(fourier_grid):
enlarged_grid = make_fft_grid(fourier_grid)
fft_upscale = FastFourierTransform(enlarged_grid)
def impulse_response(grid):
r_squared = grid.x**2 + grid.y**2 + self.distance**2
r = np.sqrt(r_squared)
cos_theta = self.distance / r
return Field(cos_theta / (2 * np.pi) * np.exp(1j * k * r) * (1 / r_squared - 1j * k / r), grid)
impulse_response = evaluate_supersampled(impulse_response, enlarged_grid, self.num_oversampling)
return fft_upscale.forward(impulse_response)
else:
def transfer_function_native(fourier_grid):
k_squared = fourier_grid.as_('polar').r**2
k_z = np.sqrt(k**2 - k_squared + 0j)
return Field(np.exp(1j * k_z * self.distance), fourier_grid)
def transfer_function(fourier_grid):
return evaluate_supersampled(transfer_function_native, fourier_grid, self.num_oversampling)
instance_data.fourier_filter = FourierFilter(input_grid, transfer_function, q=2)
@property
def distance(self):
return self._distance
@distance.setter
def distance(self, distance):
self._distance = distance
self.clear_cache()
@property
def num_oversampling(self):
return self._num_oversampling
@num_oversampling.setter
def num_oversampling(self, num_oversampling):
self._num_oversampling = num_oversampling
self.clear_cache()
@property
def refractive_index(self):
return self._refractive_index
@refractive_index.setter
def refractive_index(self, refractive_index):
self._refractive_index = refractive_index
self.clear_cache()
def get_input_grid(self, output_grid, wavelength):
return output_grid
def get_output_grid(self, input_grid, wavelength):
return input_grid
@make_agnostic_forward
def forward(self, instance_data, wavefront):
'''Propagate a wavefront forward by a certain distance.
Parameters
----------
wavefront : Wavefront
The incoming wavefront.
Returns
-------
Wavefront
The wavefront after the propagation.
'''
filtered = instance_data.fourier_filter.forward(wavefront.electric_field)
return Wavefront(filtered, wavefront.wavelength, wavefront.input_stokes_vector)
@make_agnostic_backward
def backward(self, instance_data, wavefront):
'''Propagate a wavefront backward by a certain distance.
Parameters
----------
wavefront : Wavefront
The incoming wavefront.
Returns
-------
Wavefront
The wavefront after the propagation.
'''
filtered = instance_data.fourier_filter.backward(wavefront.electric_field)
return Wavefront(filtered, wavefront.wavelength, wavefront.input_stokes_vector) |
1,447 | send | import asyncio
from collections.abc import Sequence
from typing import Any
from jsonpointer import set_pointer
import reactpy
from reactpy.core.layout import Layout
from reactpy.core.serve import serve_layout
from reactpy.core.types import LayoutUpdateMessage
from reactpy.testing import StaticEventHandler
from tests.tooling.common import event_message
EVENT_NAME = "on_event"
STATIC_EVENT_HANDLER = StaticEventHandler()
def make_send_recv_callbacks(events_to_inject):
changes = []
# We need a semaphore here to simulate receiving an event after each update is sent.
# The effect is that the send() and recv() callbacks trade off control. If we did
# not do this, it would easy to determine when to halt because, while we might have
# received all the events, they might not have been sent since the two callbacks are
# executed in separate loops.
sem = asyncio.Semaphore(0)
async def METHOD_NAME(patch):
changes.append(patch)
sem.release()
if not events_to_inject:
raise reactpy.Stop()
async def recv():
await sem.acquire()
try:
return events_to_inject.pop(0)
except IndexError:
# wait forever
await asyncio.Event().wait()
return changes, METHOD_NAME, recv
def make_events_and_expected_model():
events = [event_message(STATIC_EVENT_HANDLER.target)] * 4
expected_model = {
"tagName": "",
"children": [
{
"tagName": "div",
"attributes": {"count": 4},
"eventHandlers": {
EVENT_NAME: {
"target": STATIC_EVENT_HANDLER.target,
"preventDefault": False,
"stopPropagation": False,
}
},
}
],
}
return events, expected_model
def assert_changes_produce_expected_model(
changes: Sequence[LayoutUpdateMessage],
expected_model: Any,
) -> None:
model_from_changes = {}
for update in changes:
if update["path"]:
model_from_changes = set_pointer(
model_from_changes, update["path"], update["model"]
)
else:
model_from_changes.update(update["model"])
assert model_from_changes == expected_model
@reactpy.component
def Counter():
count, change_count = reactpy.hooks.use_reducer(
(lambda old_count, diff: old_count + diff),
initial_value=0,
)
handler = STATIC_EVENT_HANDLER.use(lambda: change_count(1))
return reactpy.html.div({EVENT_NAME: handler, "count": count})
async def test_dispatch():
events, expected_model = make_events_and_expected_model()
changes, METHOD_NAME, recv = make_send_recv_callbacks(events)
await asyncio.wait_for(serve_layout(Layout(Counter()), METHOD_NAME, recv), 1)
assert_changes_produce_expected_model(changes, expected_model)
async def test_dispatcher_handles_more_than_one_event_at_a_time():
block_and_never_set = asyncio.Event()
will_block = asyncio.Event()
second_event_did_execute = asyncio.Event()
blocked_handler = StaticEventHandler()
non_blocked_handler = StaticEventHandler()
@reactpy.component
def ComponentWithTwoEventHandlers():
@blocked_handler.use
async def block_forever():
will_block.set()
await block_and_never_set.wait()
@non_blocked_handler.use
async def handle_event():
second_event_did_execute.set()
return reactpy.html.div(
reactpy.html.button({"on_click": block_forever}),
reactpy.html.button({"on_click": handle_event}),
)
send_queue = asyncio.Queue()
recv_queue = asyncio.Queue()
task = asyncio.create_task(
serve_layout(
reactpy.Layout(ComponentWithTwoEventHandlers()),
send_queue.put,
recv_queue.get,
)
)
await recv_queue.put(event_message(blocked_handler.target))
await will_block.wait()
await recv_queue.put(event_message(non_blocked_handler.target))
await second_event_did_execute.wait()
task.cancel() |
1,448 | test download with 302 | import json
import unittest.mock
import pytest
import github3
from .helper import create_example_data_helper
from .helper import create_url_helper
from .helper import UnitHelper
from .helper import UnitIteratorHelper
from github3.repos.release import Asset
from github3.repos.release import Release
from github3.users import ShortUser
url_for = create_url_helper(
"https://api.github.com/repos/sigmavirus24/github3.py/releases"
)
uploads_url_for = create_url_helper(
"https://uploads.github.com/repos/sigmavirus24/github3.py/releases"
)
get_release_example_data = create_example_data_helper("repos_release_example")
class TestRelease(UnitHelper):
described_class = Release
example_data = get_release_example_data()
# Attribute tests
def test_original_assets(self):
assert self.instance.original_assets is not None
assert isinstance(self.instance.original_assets[0], Asset)
def test_has_upload_urlt(self):
assert self.instance.upload_urlt is not None
def test_has_author(self):
assert self.instance.author is not None
assert isinstance(self.instance.author, ShortUser)
# Method tests
def test_tarball_archive(self):
"""Verify that we generate the correct URL for a tarball archive."""
self.instance.archive(format="tarball")
self.session.get.assert_called_once_with(
"https://api.github.com/repos/sigmavirus24/github3.py/"
"tarball/v0.7.1",
allow_redirects=True,
stream=True,
)
def test_zipball_archive(self):
"""Verify that we generate the correct URL for a zipball archive."""
self.instance.archive(format="zipball")
self.session.get.assert_called_once_with(
"https://api.github.com/repos/sigmavirus24/github3.py/"
"zipball/v0.7.1",
allow_redirects=True,
stream=True,
)
def test_unsupported_archive(self):
"""Do not make a request if the archive format is unsupported."""
self.instance.archive(format="clearly fake")
assert self.session.get.called is False
def test_delete(self):
self.instance.delete()
self.session.delete.assert_called_once_with(self.example_data["url"])
def test_upload_asset(self):
self.session.post.return_value = unittest.mock.Mock(
status_code=201, json=lambda: self.example_data["assets"][0]
)
with open(__file__) as fd:
content = fd.read()
self.instance.upload_asset(
"text/plain", "test_repos_release.py", content
)
self.post_called_with(
uploads_url_for(
"/76677/assets?name=%s" % "test_repos_release.py"
),
data=content,
headers={"Content-Type": "text/plain"},
)
def test_upload_asset_with_a_label(self):
self.session.post.return_value = unittest.mock.Mock(
status_code=201, json=lambda: self.example_data["assets"][0]
)
with open(__file__) as fd:
content = fd.read()
self.instance.upload_asset(
"text/plain", "test_repos_release.py", content, "test-label"
)
self.post_called_with(
uploads_url_for(
"/76677/assets?name=%s&label=%s"
% ("test_repos_release.py", "test-label")
),
data=content,
headers={"Content-Type": "text/plain"},
)
class TestReleaseIterators(UnitIteratorHelper):
"""Test iterator methods on the Release class."""
described_class = Release
example_data = TestRelease.example_data.copy()
def test_assets(self):
"""Test the request to retrieve a release's assets."""
i = self.instance.assets()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for("76677/assets"), params={"per_page": 100}, headers={}
)
class TestAsset(UnitHelper):
described_class = Asset
get_asset_example_data = create_example_data_helper("repos_asset_example")
example_data = get_asset_example_data()
def test_delete(self):
"""Verify the request to delete an Asset."""
self.instance.delete()
self.session.delete.assert_called_once_with(url_for("/assets/37944"))
@pytest.mark.xfail
def test_download(self):
"""Verify the request to download an Asset file."""
with unittest.mock.patch(
"github3.utils.stream_response_to_file"
) as stream:
self.instance.download()
self.session.get.assert_called_once_with(
url_for("/assets/37944"),
stream=True,
allow_redirects=False,
headers={"Accept": "application/octect-stream"},
)
assert stream.called is False
def METHOD_NAME(self):
"""Verify the request to download an Asset file."""
with unittest.mock.patch.object(
github3.models.GitHubCore, "_get"
) as get:
get.return_value.status_code = 302
get.return_value.headers = {"location": "https://fakeurl"}
self.instance.download()
data = {
"headers": {
"Content-Type": None,
"Accept": "application/octet-stream",
},
"stream": True,
}
assert get.call_count == 2
get.assert_any_call("https://fakeurl", **data)
def test_edit_without_label(self):
self.instance.edit("new name")
self.session.patch.assert_called_once_with(
self.example_data["url"], data='{"name": "new name"}'
)
def test_edit_with_label(self):
self.instance.edit("new name", "label")
_, args, kwargs = list(self.session.patch.mock_calls[0])
assert self.example_data["url"] in args
assert json.loads(kwargs["data"]) == {
"name": "new name",
"label": "label",
} |
1,449 | test pop python | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data_structures module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.operators import data_structures
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class ListTest(test.TestCase):
def test_new_list_empty(self):
l = data_structures.new_list()
# Can't evaluate an empty list.
# TODO(mdan): sess.run should allow tf.variant maybe?
self.assertTrue(isinstance(l, ops.Tensor))
def test_new_list_tensor(self):
l = data_structures.new_list([3, 4, 5])
self.assertAllEqual(l, [3, 4, 5])
def test_tf_tensor_list_new(self):
l = data_structures.tf_tensor_list_new([3, 4, 5])
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [3, 4, 5])
def test_tf_tensor_list_new_empty(self):
l = data_structures.tf_tensor_list_new([],
element_dtype=dtypes.int32,
element_shape=())
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [])
def test_tf_tensor_list_new_from_tensor(self):
l = data_structures.tf_tensor_list_new(constant_op.constant([3, 4, 5]))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [3, 4, 5])
@test_util.run_deprecated_v1
def test_tf_tensor_list_new_illegal_input(self):
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new([3, 4.0])
# TODO(mdan): It might make more sense to type cast in this case.
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new([3, 4], element_dtype=dtypes.float32)
# Tensor lists do support heterogeneous lists.
self.assertIsNot(data_structures.tf_tensor_list_new([3, [4, 5]]), None)
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new([3, 4], element_shape=(2,))
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new(
constant_op.constant([1, 2, 3]), element_shape=[1])
def test_tf_tensor_array_new(self):
l = data_structures.tf_tensor_array_new([3, 4, 5])
t = l.stack()
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [3, 4, 5])
def test_tf_tensor_array_new_illegal_input(self):
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, 4.0])
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, 4], element_dtype=dtypes.float32)
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, [4, 5]])
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, 4], element_shape=(2,))
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([], element_shape=(2,))
# TAs can infer the shape.
self.assertIsNot(
data_structures.tf_tensor_array_new([], element_dtype=dtypes.float32),
None)
def test_append_tensor_list(self):
l = data_structures.new_list()
x = constant_op.constant([1, 2, 3])
l = data_structures.list_append(l, x)
t = list_ops.tensor_list_stack(l, element_dtype=x.dtype)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [[1, 2, 3]])
@test_util.run_v1_only("b/117943489")
def test_append_tensorarray(self):
l = tensor_array_ops.TensorArray(dtypes.int32, size=0, dynamic_size=True)
l1 = data_structures.list_append(l, 1)
l2 = data_structures.list_append(l1, 2)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(l1.stack()), [1])
self.assertAllEqual(self.evaluate(l2.stack()), [1, 2])
def test_append_python(self):
l = []
self.assertAllEqual(data_structures.list_append(l, 1), [1])
self.assertAllEqual(data_structures.list_append(l, 2), [1, 2])
def test_pop_tensor_list(self):
initial_list = constant_op.constant([[1, 2], [3, 4]])
elem_shape = constant_op.constant([2])
l = list_ops.tensor_list_from_tensor(initial_list, element_shape=elem_shape)
opts = data_structures.ListPopOpts(
element_dtype=initial_list.dtype,
element_shape=(2,))
with self.assertRaises(NotImplementedError):
data_structures.list_pop(l, 0, opts)
with self.cached_session() as sess:
l, x = data_structures.list_pop(l, None, opts)
self.assertAllEqual(self.evaluate(x), [3, 4])
t = list_ops.tensor_list_stack(l, element_dtype=initial_list.dtype)
self.assertAllEqual(self.evaluate(t), [[1, 2]])
def METHOD_NAME(self):
l = [1, 2, 3]
opts = data_structures.ListPopOpts(element_dtype=None, element_shape=())
self.assertAllEqual(data_structures.list_pop(l, None, opts), ([1, 2], 3))
self.assertAllEqual(data_structures.list_pop(l, None, opts), ([1], 2))
def test_stack_tensor_list(self):
initial_list = constant_op.constant([[1, 2], [3, 4]])
elem_shape = constant_op.constant([2])
l = list_ops.tensor_list_from_tensor(initial_list, element_shape=elem_shape)
opts = data_structures.ListStackOpts(
element_dtype=initial_list.dtype, original_call=None)
with self.cached_session() as sess:
t = data_structures.list_stack(l, opts)
self.assertAllEqual(self.evaluate(t), self.evaluate(initial_list))
@test_util.run_deprecated_v1
def test_stack_tensor_list_empty(self):
l = list_ops.empty_tensor_list(
element_shape=None, element_dtype=dtypes.variant)
opts = data_structures.ListStackOpts(
element_dtype=dtypes.int32, original_call=None)
# TODO(mdan): Allow stacking empty lists if the dtype and shape are known.
with self.assertRaises(ValueError):
data_structures.list_stack(l, opts)
def test_stack_fallback(self):
def dummy_function(l):
# Lazy person's mock: just transform the argument in a way in which we
# can check that this function was indeed called.
return [x * 2 for x in l]
opts = data_structures.ListStackOpts(
element_dtype=None, original_call=dummy_function)
self.assertAllEqual(data_structures.list_stack([1, 2], opts), [2, 4])
if __name__ == '__main__':
test.main() |
1,450 | test ssh pty | """
Cloudflared Integration tests
"""
import unittest
import subprocess
import os
import tempfile
from contextlib import contextmanager
from pexpect import pxssh
class TestSSHBase(unittest.TestCase):
"""
SSH test base class containing constants and helper funcs
"""
HOSTNAME = os.environ["SSH_HOSTNAME"]
SSH_USER = os.environ["SSH_USER"]
SSH_TARGET = f"{SSH_USER}@{HOSTNAME}"
AUTHORIZED_KEYS_SSH_CONFIG = os.environ["AUTHORIZED_KEYS_SSH_CONFIG"]
SHORT_LIVED_CERT_SSH_CONFIG = os.environ["SHORT_LIVED_CERT_SSH_CONFIG"]
SSH_OPTIONS = {"StrictHostKeyChecking": "no"}
@classmethod
def get_ssh_command(cls, pty=True):
"""
Return ssh command arg list. If pty is true, a PTY is forced for the session.
"""
cmd = [
"ssh",
"-o",
"StrictHostKeyChecking=no",
"-F",
cls.AUTHORIZED_KEYS_SSH_CONFIG,
cls.SSH_TARGET,
]
if not pty:
cmd += ["-T"]
else:
cmd += ["-tt"]
return cmd
@classmethod
@contextmanager
def ssh_session_manager(cls, *args, **kwargs):
"""
Context manager for interacting with a pxssh session.
Disables pty echo on the remote server and ensures session is terminated afterward.
"""
session = pxssh.pxssh(options=cls.SSH_OPTIONS)
session.login(
cls.HOSTNAME,
username=cls.SSH_USER,
original_prompt=r"[#@$]",
ssh_config=kwargs.get("ssh_config", cls.AUTHORIZED_KEYS_SSH_CONFIG),
ssh_tunnels=kwargs.get("ssh_tunnels", {}),
)
try:
session.sendline("stty -echo")
session.prompt()
yield session
finally:
session.logout()
@staticmethod
def get_command_output(session, cmd):
"""
Executes command on remote ssh server and waits for prompt.
Returns command output
"""
session.sendline(cmd)
session.prompt()
return session.before.decode().strip()
def exec_command(self, cmd, shell=False):
"""
Executes command locally. Raises Assertion error for non-zero return code.
Returns stdout and stderr
"""
proc = subprocess.Popen(
cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=shell
)
raw_out, raw_err = proc.communicate()
out = raw_out.decode()
err = raw_err.decode()
self.assertEqual(proc.returncode, 0, msg=f"stdout: {out} stderr: {err}")
return out.strip(), err.strip()
class TestSSHCommandExec(TestSSHBase):
"""
Tests inline ssh command exec
"""
# Name of file to be downloaded over SCP on remote server.
REMOTE_SCP_FILENAME = os.environ["REMOTE_SCP_FILENAME"]
@classmethod
def get_scp_base_command(cls):
return [
"scp",
"-o",
"StrictHostKeyChecking=no",
"-v",
"-F",
cls.AUTHORIZED_KEYS_SSH_CONFIG,
]
@unittest.skip(
"This creates files on the remote. Should be skipped until server is dockerized."
)
def test_verbose_scp_sink_mode(self):
with tempfile.NamedTemporaryFile() as fl:
self.exec_command(
self.get_scp_base_command() + [fl.name, f"{self.SSH_TARGET}:"]
)
def test_verbose_scp_source_mode(self):
with tempfile.TemporaryDirectory() as tmpdirname:
self.exec_command(
self.get_scp_base_command()
+ [f"{self.SSH_TARGET}:{self.REMOTE_SCP_FILENAME}", tmpdirname]
)
local_filename = os.path.join(tmpdirname, self.REMOTE_SCP_FILENAME)
self.assertTrue(os.path.exists(local_filename))
self.assertTrue(os.path.getsize(local_filename) > 0)
def test_pty_command(self):
base_cmd = self.get_ssh_command()
out, _ = self.exec_command(base_cmd + ["whoami"])
self.assertEqual(out.strip().lower(), self.SSH_USER.lower())
out, _ = self.exec_command(base_cmd + ["tty"])
self.assertNotEqual(out, "not a tty")
def test_non_pty_command(self):
base_cmd = self.get_ssh_command(pty=False)
out, _ = self.exec_command(base_cmd + ["whoami"])
self.assertEqual(out.strip().lower(), self.SSH_USER.lower())
out, _ = self.exec_command(base_cmd + ["tty"])
self.assertEqual(out, "not a tty")
class TestSSHShell(TestSSHBase):
"""
Tests interactive SSH shell
"""
# File path to a file on the remote server with root only read privileges.
ROOT_ONLY_TEST_FILE_PATH = os.environ["ROOT_ONLY_TEST_FILE_PATH"]
def METHOD_NAME(self):
with self.ssh_session_manager() as session:
# Test shell launched as correct user
username = self.get_command_output(session, "whoami")
self.assertEqual(username.lower(), self.SSH_USER.lower())
# Test USER env variable set
user_var = self.get_command_output(session, "echo $USER")
self.assertEqual(user_var.lower(), self.SSH_USER.lower())
# Test HOME env variable set to true user home.
home_env = self.get_command_output(session, "echo $HOME")
pwd = self.get_command_output(session, "pwd")
self.assertEqual(pwd, home_env)
# Test shell launched in correct user home dir.
self.assertIn(username, pwd)
# Ensure shell launched with correct user's permissions and privs.
# Can't read root owned 0700 files.
output = self.get_command_output(
session, f"cat {self.ROOT_ONLY_TEST_FILE_PATH}"
)
self.assertIn("Permission denied", output)
def test_short_lived_cert_auth(self):
with self.ssh_session_manager(
ssh_config=self.SHORT_LIVED_CERT_SSH_CONFIG
) as session:
username = self.get_command_output(session, "whoami")
self.assertEqual(username.lower(), self.SSH_USER.lower())
unittest.main() |
1,451 | get resources to apply | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from glob import glob
from typing import List, Optional, Tuple
import airbyte_api_client
import click
from octavia_cli.base_commands import OctaviaCommand
from octavia_cli.check_context import REQUIRED_PROJECT_DIRECTORIES, requires_init
from .diff_helpers import display_diff_line
from .resources import BaseResource
from .resources import factory as resource_factory
@click.command(cls=OctaviaCommand, name="apply", help="Create or update Airbyte remote resources according local YAML configurations.")
@click.option("--file", "-f", "configurations_files", type=click.Path(), multiple=True)
@click.option("--force", is_flag=True, default=False, help="Does not display the diff and updates without user prompt.")
@click.pass_context
@requires_init
def apply(ctx: click.Context, configurations_files: List[click.Path], force: bool):
if not configurations_files:
configurations_files = find_local_configuration_files()
resources = METHOD_NAME(configurations_files, ctx.obj["API_CLIENT"], ctx.obj["WORKSPACE_ID"])
for resource in resources:
apply_single_resource(resource, force)
def METHOD_NAME(
configuration_files: List[str], api_client: airbyte_api_client.ApiClient, workspace_id: str
) -> List[BaseResource]:
"""Create resource objects with factory and sort according to apply priority.
Args:
configuration_files (List[str]): List of YAML configuration files.
api_client (airbyte_api_client.ApiClient): the Airbyte API client.
workspace_id (str): current Airbyte workspace id.
Returns:
List[BaseResource]: Resources sorted according to their apply priority.
"""
all_resources = [resource_factory(api_client, workspace_id, path) for path in configuration_files]
return sorted(all_resources, key=lambda resource: resource.APPLY_PRIORITY)
def apply_single_resource(resource: BaseResource, force: bool) -> None:
"""Runs resource creation if it was not created, update it otherwise.
Args:
resource (BaseResource): The resource to apply.
force (bool): Whether force mode is on.
"""
if resource.was_created:
click.echo(
click.style(
f"🐙 - {resource.resource_name} exists on your Airbyte instance according to your state file, let's check if we need to update it!",
fg="yellow",
)
)
messages = update_resource(resource, force)
else:
click.echo(click.style(f"🐙 - {resource.resource_name} does not exists on your Airbyte instance, let's create it!", fg="green"))
messages = create_resource(resource)
click.echo("\n".join(messages))
def should_update_resource(force: bool, user_validation: Optional[bool], local_file_changed: bool) -> Tuple[bool, str]:
"""Function to decide if the resource needs an update or not.
Args:
force (bool): Whether force mode is on.
user_validation (bool): User validated the existing changes.
local_file_changed (bool): Whether the local file describing the resource was modified.
Returns:
Tuple[bool, str]: Boolean to know if resource should be updated and string describing the update reason.
"""
if force:
should_update, update_reason = True, "🚨 - Running update because the force mode is activated."
elif user_validation is True:
should_update, update_reason = True, "🟢 - Running update because you validated the changes."
elif user_validation is False:
should_update, update_reason = False, "🔴 - Did not update because you refused the changes."
elif user_validation is None and local_file_changed:
should_update, update_reason = (
True,
"🟡 - Running update because a local file change was detected and a secret field might have been edited.",
)
else:
should_update, update_reason = False, "😴 - Did not update because no change detected."
return should_update, click.style(update_reason, fg="green")
def prompt_for_diff_validation(resource_name: str, diff: str) -> bool:
"""Display the diff to user and prompt them from validation.
Args:
resource_name (str): Name of the resource the diff was computed for.
diff (str): The diff.
Returns:
bool: Whether user validated the diff.
"""
if diff:
click.echo(
click.style("👀 - Here's the computed diff (🚨 remind that diff on secret fields are not displayed):", fg="magenta", bold=True)
)
for line in diff.split("\n"):
display_diff_line(line)
return click.confirm(click.style(f"❓ - Do you want to update {resource_name}?", bold=True))
else:
return False
def create_resource(resource: BaseResource) -> List[str]:
"""Run a resource creation.
Args:
resource (BaseResource): The resource to create.
Returns:
List[str]: Post create messages to display to standard output.
"""
created_resource, state = resource.create()
return [
click.style(f"🎉 - Successfully created {created_resource.name} on your Airbyte instance!", fg="green", bold=True),
click.style(f"💾 - New state for {created_resource.name} saved at {state.path}", fg="yellow"),
]
def update_resource(resource: BaseResource, force: bool) -> List[str]:
"""Run a resource update. Check if update is required and prompt for user diff validation if needed.
Args:
resource (BaseResource): Resource to update
force (bool): Whether force mode is on.
Returns:
List[str]: Post update messages to display to standard output.
"""
output_messages = []
diff = resource.get_diff_with_remote_resource()
user_validation = None
if not force and diff:
user_validation = prompt_for_diff_validation(resource.resource_name, diff)
should_update, update_reason = should_update_resource(force, user_validation, resource.local_file_changed)
click.echo(update_reason)
if should_update:
updated_resource, state = resource.update()
output_messages.append(
click.style(f"🎉 - Successfully updated {updated_resource.name} on your Airbyte instance!", fg="green", bold=True)
)
output_messages.append(click.style(f"💾 - New state for {updated_resource.name} stored at {state.path}.", fg="yellow"))
return output_messages
def find_local_configuration_files() -> List[str]:
"""Discover local configuration files.
Returns:
List[str]: Paths to YAML configuration files.
"""
configuration_files = []
for resource_directory in REQUIRED_PROJECT_DIRECTORIES:
configuration_files += glob(f"./{resource_directory}/**/configuration.yaml")
if not configuration_files:
click.echo(click.style("😒 - No YAML file found to run apply.", fg="red"))
return configuration_files |
1,452 | process | """
Update vote data for Reddit datasets
"""
import shutil
import praw, praw.exceptions
import csv
from prawcore.exceptions import Forbidden, NotFound, PrawcoreException
from backend.lib.processor import BasicProcessor
from common.lib.user_input import UserInput
from common.lib.exceptions import ProcessorInterruptedException
from common.config_manager import config
__author__ = "Stijn Peeters"
__credits__ = ["Stijn Peeters"]
__maintainer__ = "Stijn Peeters"
__email__ = "4cat@oilab.eu"
csv.field_size_limit(1024 * 1024 * 1024)
class RedditVoteChecker(BasicProcessor):
"""
Update voting information for Reddit data
"""
type = "get-reddit-votes" # job type ID
category = "Filtering" # category
title = "Update Reddit scores" # title displayed in UI
description = "Updates the scores for each post and comment to more accurately reflect the real score. Can only be used on datasets with < 5,000 posts due to the heavy usage of the Reddit API." # description displayed in UI
extension = "csv" # extension of result file, used internally and in UI
config = {
# Reddit API keys
'api.reddit.client_id': {
'type': UserInput.OPTION_TEXT,
'default' : "",
'help': 'Reddit API Client ID',
'tooltip': "",
},
'api.reddit.secret': {
'type': UserInput.OPTION_TEXT,
'default' : "",
'help': 'Reddit API Secret',
'tooltip': "",
},
}
@classmethod
def is_compatible_with(cls, module=None, user=None):
"""
Allow processor if dataset is a Reddit dataset
:param module: Module to determine compatibility with
"""
if config.get('api.reddit.client_id', False, user=user) and config.get('api.reddit.secret', False, user=user):
return module.is_top_dataset() and module.type == "reddit-search" and module.num_rows <= 5000
return False
def METHOD_NAME(self):
"""
This takes a 4CAT results file as input, and outputs a plain text file
containing all post bodies as one continuous string, sanitized.
"""
try:
user_agent = "4cat:4cat:v1.0 (by /u/oilab-4cat)"
reddit = praw.Reddit(client_id=self.config.get('api.reddit.client_id'),
client_secret=self.config.get('api.reddit.secret'),
user_agent=user_agent)
except praw.exceptions.PRAWException:
# unclear what kind of expression gets thrown here
self.dataset.update_status("Could not connect to Reddit. 4CAT may be configured wrong.")
self.dataset.finish(0)
return
# get thread IDs
# We're assuming here that there are multiple posts per thread. Hence,
# querying full threads and then retaining relevant posts is cheaper
# than querying individual posts, so we first gather all thread IDs to
# query.
thread_ids = set()
for post in self.source_dataset.iterate_items(self):
thread_ids.add(post["thread_id"])
post_scores = {}
thread_scores = {}
processed = 0
failed = 0
self.dataset.update_status("Retrieving scores via Reddit API")
for thread_id in thread_ids:
if self.interrupted:
raise ProcessorInterruptedException("Halted while querying thread data from Reddit")
# get info for all comments in the thread
try:
thread = reddit.submission(id=thread_id)
thread.comments.replace_more(limit=None)
thread_scores[thread.id] = thread.score
for comment in thread.comments.list():
post_scores[comment.id] = comment.score
except (praw.exceptions.PRAWException, PrawcoreException) as e:
if e.response.status_code == 401:
self.dataset.update_status("Unauthorized response from Reddit; check API keys. (%s)" % str(e))
else:
self.dataset.update_status("Error while communicating with Reddit, halting processor. (%s)" % str(e))
self.dataset.finish(0)
return
except Forbidden:
self.dataset.update_status("Got error 403 while getting data from Reddit. Reddit may have blocked 4CAT.", is_final=True)
self.dataset.finish(0)
return
except NotFound:
self.dataset.log("Thread %s no longer exists (404), skipping" % thread_id)
failed += 1
processed += 1
self.dataset.update_status("Retrieved updated scores for %i/%i threads" % (processed, len(thread_ids)))
self.dataset.update_progress(processed / len(thread_ids))
# now write a new CSV with the updated scores
# get field names
fieldnames = [*self.source_dataset.get_item_keys(self)]
if "score" not in fieldnames:
fieldnames.append("score")
self.dataset.update_status("Writing results to file")
with self.dataset.get_results_path().open("w") as output:
writer = csv.DictWriter(output, fieldnames=fieldnames)
writer.writeheader()
processed = 0
for post in self.source_dataset.iterate_items(self):
# threads may be included too, so store the right score
if post["thread_id"] == post["id"] and post["thread_id"] in thread_scores:
post["score"] = thread_scores[post["thread_id"]]
elif post["id"] in post_scores:
post["score"] = post_scores[post["id"]]
else:
failed += 1
self.dataset.log("Post %s no longer exists, skipping" % post["id"])
writer.writerow(post)
processed += 1
# now comes the big trick - replace original dataset with updated one
shutil.move(self.dataset.get_results_path(), self.source_dataset.get_results_path())
if failed > 0:
self.dataset.update_status("Scores retrieved and dataset updated, but unable to find new scores for some "
"deleted posts. Check the dataset log for details.", is_final=True)
else:
self.dataset.update_status("Scores retrieved, parent dataset updated.")
self.dataset.finish(processed) |
1,453 | resolution | # The PEP 484 type hints stub file for the QtSvg module.
#
# Generated by SIP 6.4.0
#
# Copyright (c) 2021 Riverbank Computing Limited <info@riverbankcomputing.com>
#
# This file is part of PyQt5.
#
# This file may be used under the terms of the GNU General Public License
# version 3.0 as published by the Free Software Foundation and appearing in
# the file LICENSE included in the packaging of this file. Please review the
# following information to ensure the GNU General Public License version 3.0
# requirements will be met: http://www.gnu.org/copyleft/gpl.html.
#
# If you do not wish to use this file under the terms of the GPL version 3.0
# then you may purchase a commercial license. For more information contact
# info@riverbankcomputing.com.
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
import typing
from PyQt5 import sip
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
# Support for QDate, QDateTime and QTime.
import datetime
# Convenient type aliases.
PYQT_SLOT = typing.Union[typing.Callable[..., None], QtCore.pyqtBoundSignal]
# Convenient aliases for complicated OpenGL types.
PYQT_OPENGL_ARRAY = typing.Union[typing.Sequence[int], typing.Sequence[float],
sip.Buffer, None]
PYQT_OPENGL_BOUND_ARRAY = typing.Union[typing.Sequence[int],
typing.Sequence[float], sip.Buffer, int, None]
class QGraphicsSvgItem(QtWidgets.QGraphicsObject):
@typing.overload
def __init__(self, parent: typing.Optional[QtWidgets.QGraphicsItem] = ...) -> None: ...
@typing.overload
def __init__(self, fileName: str, parent: typing.Optional[QtWidgets.QGraphicsItem] = ...) -> None: ...
def type(self) -> int: ...
def paint(self, painter: QtGui.QPainter, option: QtWidgets.QStyleOptionGraphicsItem, widget: typing.Optional[QtWidgets.QWidget] = ...) -> None: ...
def boundingRect(self) -> QtCore.QRectF: ...
def maximumCacheSize(self) -> QtCore.QSize: ...
def setMaximumCacheSize(self, size: QtCore.QSize) -> None: ...
def elementId(self) -> str: ...
def setElementId(self, id: str) -> None: ...
def renderer(self) -> 'QSvgRenderer': ...
def setSharedRenderer(self, renderer: 'QSvgRenderer') -> None: ...
class QSvgGenerator(QtGui.QPaintDevice):
def __init__(self) -> None: ...
def metric(self, metric: QtGui.QPaintDevice.PaintDeviceMetric) -> int: ...
def paintEngine(self) -> QtGui.QPaintEngine: ...
@typing.overload
def setViewBox(self, viewBox: QtCore.QRect) -> None: ...
@typing.overload
def setViewBox(self, viewBox: QtCore.QRectF) -> None: ...
def viewBoxF(self) -> QtCore.QRectF: ...
def viewBox(self) -> QtCore.QRect: ...
def setDescription(self, description: str) -> None: ...
def description(self) -> str: ...
def setTitle(self, title: str) -> None: ...
def title(self) -> str: ...
def setResolution(self, METHOD_NAME: int) -> None: ...
def METHOD_NAME(self) -> int: ...
def setOutputDevice(self, outputDevice: QtCore.QIODevice) -> None: ...
def outputDevice(self) -> QtCore.QIODevice: ...
def setFileName(self, fileName: str) -> None: ...
def fileName(self) -> str: ...
def setSize(self, size: QtCore.QSize) -> None: ...
def size(self) -> QtCore.QSize: ...
class QSvgRenderer(QtCore.QObject):
@typing.overload
def __init__(self, parent: typing.Optional[QtCore.QObject] = ...) -> None: ...
@typing.overload
def __init__(self, filename: str, parent: typing.Optional[QtCore.QObject] = ...) -> None: ...
@typing.overload
def __init__(self, contents: typing.Union[QtCore.QByteArray, bytes, bytearray], parent: typing.Optional[QtCore.QObject] = ...) -> None: ...
@typing.overload
def __init__(self, contents: QtCore.QXmlStreamReader, parent: typing.Optional[QtCore.QObject] = ...) -> None: ...
def transformForElement(self, id: str) -> QtGui.QTransform: ...
def setAspectRatioMode(self, mode: QtCore.Qt.AspectRatioMode) -> None: ...
def aspectRatioMode(self) -> QtCore.Qt.AspectRatioMode: ...
repaintNeeded: typing.ClassVar[QtCore.pyqtSignal]
@typing.overload
def render(self, p: QtGui.QPainter) -> None: ...
@typing.overload
def render(self, p: QtGui.QPainter, bounds: QtCore.QRectF) -> None: ...
@typing.overload
def render(self, painter: QtGui.QPainter, elementId: str, bounds: QtCore.QRectF = ...) -> None: ...
@typing.overload
def load(self, filename: str) -> bool: ...
@typing.overload
def load(self, contents: typing.Union[QtCore.QByteArray, bytes, bytearray]) -> bool: ...
@typing.overload
def load(self, contents: QtCore.QXmlStreamReader) -> bool: ...
def animationDuration(self) -> int: ...
def setCurrentFrame(self, a0: int) -> None: ...
def currentFrame(self) -> int: ...
def setFramesPerSecond(self, num: int) -> None: ...
def framesPerSecond(self) -> int: ...
def boundsOnElement(self, id: str) -> QtCore.QRectF: ...
def animated(self) -> bool: ...
@typing.overload
def setViewBox(self, viewbox: QtCore.QRect) -> None: ...
@typing.overload
def setViewBox(self, viewbox: QtCore.QRectF) -> None: ...
def viewBoxF(self) -> QtCore.QRectF: ...
def viewBox(self) -> QtCore.QRect: ...
def elementExists(self, id: str) -> bool: ...
def defaultSize(self) -> QtCore.QSize: ...
def isValid(self) -> bool: ...
class QSvgWidget(QtWidgets.QWidget):
@typing.overload
def __init__(self, parent: typing.Optional[QtWidgets.QWidget] = ...) -> None: ...
@typing.overload
def __init__(self, file: str, parent: typing.Optional[QtWidgets.QWidget] = ...) -> None: ...
def paintEvent(self, event: QtGui.QPaintEvent) -> None: ...
@typing.overload
def load(self, file: str) -> None: ...
@typing.overload
def load(self, contents: typing.Union[QtCore.QByteArray, bytes, bytearray]) -> None: ...
def sizeHint(self) -> QtCore.QSize: ...
def renderer(self) -> QSvgRenderer: ... |
1,454 | create participant | """
Generate fake records. Only used in dev & test.
"""
from datetime import date
from generic.models import User
from app.models import *
from semester.models import Semester, SemesterType
from boot import config
from utils.marker import fix_me
UIDS = ["1", "2"]
TEACHER_UIDS = ["10", "11"]
ORG_UIDS = ['zz00001', 'zz00002', 'zz00000']
ORG_NAMES = ['绘画班', '舞蹈班', 'Official']
# TODO: Change Settings
assert config.DEBUG, 'Should not import fake_records in production env.'
def delete_all():
User.objects.all().delete()
OrganizationType.objects.all().delete()
OrganizationTag.objects.all().delete()
def create_superuser():
try:
User.objects.create_superuser(username='admin', password='admin',
email='admin@notexist.com')
except:
pass
@fix_me
def _create_old_user(username, password, usertype):
user, created = User.objects.get_or_create(username=username)
user.set_password(password)
user.is_newuser = False
user.utype = usertype
user.save()
return user, created
def create_np():
uid, name = UIDS[0], '1号学生'
user, created = _create_old_user(uid, uid, User.Type.PERSON)
if created:
NaturalPerson.objects.create(
person_id=user,
stu_id_dbonly=uid,
name=name,
gender=NaturalPerson.Gender.MALE,
stu_major='元培计划(待定)',
stu_grade='2020',
stu_class=5,
email=uid + '@stu.pku.edu.cn',
telephone=None,
visit_times=100,
biography='我是1号学生',
identity=NaturalPerson.Identity.STUDENT,
)
uid, name = UIDS[1], '2号学生'
user, created = _create_old_user(uid, uid, User.Type.PERSON)
if created:
NaturalPerson.objects.create(
person_id=user,
stu_id_dbonly=uid,
name=name,
gender=NaturalPerson.Gender.FEMALE,
stu_major='元培计划(待定)',
stu_grade='2020',
stu_class=5,
email=uid + '@stu.pku.edu.cn',
telephone=None,
visit_times=100,
biography='我是2号学生',
identity=NaturalPerson.Identity.STUDENT,
)
uid, name = TEACHER_UIDS[0], '1号老师'
user, created = _create_old_user(uid, uid, User.Type.PERSON)
if created:
NaturalPerson.objects.create(
person_id=user,
stu_id_dbonly=uid,
name=name,
gender=NaturalPerson.Gender.MALE,
email=uid + '@pku.edu.cn',
telephone=None,
visit_times=100,
biography='我是1号老师',
identity=NaturalPerson.Identity.TEACHER,
)
uid, name = TEACHER_UIDS[1], '2号老师'
user, created = _create_old_user(uid, uid, User.Type.PERSON)
if created:
NaturalPerson.objects.create(
person_id=user,
stu_id_dbonly=uid,
name=name,
gender=NaturalPerson.Gender.MALE,
email=uid + '@pku.edu.cn',
telephone=None,
visit_times=100,
biography='我是2号老师',
identity=NaturalPerson.Identity.TEACHER,
)
def create_org_type():
otype_id = 1
otype_name = '学生小组'
user = User.objects.get(username=UIDS[0])
incharge = NaturalPerson.objects.get_by_user(user)
job_name_list = ['部长', '副部长', '部员']
org_type = OrganizationType.objects.create(
otype_id=otype_id,
otype_name=otype_name,
incharge=incharge,
job_name_list=job_name_list,
)
def create_org_tag():
OrganizationTag.objects.create(
name='兴趣',
color=OrganizationTag.ColorChoice.red
)
OrganizationTag.objects.create(
name='住宿生活',
color=OrganizationTag.ColorChoice.blue
)
def create_org():
for uname, oname in zip(ORG_UIDS, ORG_NAMES):
otype = OrganizationType.objects.get(otype_id=1)
tags = OrganizationTag.objects.get(name='兴趣')
user, created = _create_old_user(uname, uname, User.Type.ORG)
if created:
org = Organization.objects.create(
organization_id=user,
oname=oname,
otype=otype,
)
org.tags.set([tags])
org.save()
def _create_position(person_uid, org_uid, pos, is_admin):
user = User.objects.get_user(person_uid)
person = NaturalPerson.objects.get_by_user(user)
org_user = User.objects.get_user(org_uid)
org = Organization.objects.get_by_user(org_user)
Position.objects.create(
person=person,
org=org,
pos=pos,
is_admin=is_admin,
)
def create_position():
# stu 1 is admin of hhb
_create_position(UIDS[0], ORG_UIDS[0], 0, True)
# stu 1 is one of wdb
_create_position(UIDS[0], ORG_UIDS[1], 1, False)
# tea 1 is admin of wdb
_create_position(TEACHER_UIDS[0], ORG_UIDS[1], 0, True)
# stu 2 is one of hhb
_create_position(UIDS[1], ORG_UIDS[0], 1, False)
def create_activity():
...
def METHOD_NAME():
...
def create_semester():
spring_type = SemesterType.objects.create(name = "春季学期")
autumn_type = SemesterType.objects.create(name = "秋季学期")
#By default, spring semester is 2.1-6.30, autumn semester is 9.1-1.31
#For summer vacation, the current semester object falls back to last semester, i.e. spring semester
today = date.today()
spring_start = date(today.year, 2, 1)
if spring_start <= today <= date(today.year, 8, 31):
#current semester is spring semester
Semester.objects.bulk_create(
[
Semester(year=today.year, type=spring_type,
start_date=spring_start, end_date=date(today.year, 6, 30)),
Semester(year=today.year, type=autumn_type,
start_date=date(today.year, 9, 1), end_date=date(today.year+1, 1, 31))
]
)
else:
#current semester is autumn semester
#if today.date is before 2.1, then semester's year is today.year-1
cur_year = today.year if today.month >= 9 else today.year - 1
Semester.objects.bulk_create(
[
Semester(year=cur_year, type=autumn_type,
start_date=date(cur_year, 9, 1), end_date=date(cur_year+1, 1, 31)),
Semester(year=cur_year+1, type=spring_type,
start_date=date(cur_year+1, 2, 1), end_date=date(cur_year+1, 6, 30))
]
)
def create_all():
# TODO: Add more
# delete all
delete_all()
# person
create_superuser()
create_np()
# org
create_org_type()
create_org_tag()
create_org()
create_position()
# semester
create_semester() |
1,455 | get general dataframe | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2018-2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import pandas
def get_clock_dataframe(results):
designs = []
# Clock-related lists
actual_frequency = []
requested_frequency = []
hold_violation = []
setup_violation = []
clock_met = []
clock_names = []
for idx, design in enumerate(results['design']):
clock = results['max_freq'][idx]
if clock and type(clock) is dict:
for key, value in clock.items():
designs.append(design)
clock_names.append(key)
actual_frequency.append(value['actual'] / 1e6)
requested_frequency.append(value['requested'] / 1e6)
hold_violation.append(value['hold_violation'])
setup_violation.append(value['setup_violation'])
clock_met.append(value['met'])
else:
clock_name = None
actual_freq = None
if clock and type(clock) is float:
clock_name = 'clk'
actual_freq = clock
designs.append(design)
clock_names.append(clock_name)
actual_frequency.append(actual_freq)
requested_frequency.append(None)
hold_violation.append(None)
setup_violation.append(None)
clock_met.append(None)
index = pandas.Index(designs)
return pandas.DataFrame(
{
'clocks': clock_names,
'clock_actual_frequency': actual_frequency,
'clock_requested_frequency': requested_frequency,
'clock_hold_violation': hold_violation,
'clock_setup_violation': setup_violation,
'clock_met': clock_met,
},
index=index
)
def METHOD_NAME(results):
designs = results['design']
# Get runtimes
runtimes = dict()
for runtime in results['runtime']:
if not runtimes:
runtimes = {k: [] for k in runtime.keys()}
for k, v in runtime.items():
runtimes[k].append(v)
runtimes_keys = list(runtimes.keys())
for key in runtimes_keys:
runtimes["{}_time".format(key)] = runtimes.pop(key)
# Get resources
resources = dict()
for resource in results['resources']:
if not resources:
resources = {k: [] for k in resource.keys()}
for k, v in resource.items():
value = int(float(v)) if v else None
resources[k].append(value)
resources_keys = list(resources.keys())
for key in resources_keys:
resources["#{}".format(key)] = resources.pop(key)
# Get versions
tools = dict()
# Initialize versions dictionary with all possible
# versions of the tools.
for version in results['versions']:
for key in version:
tools[key] = []
for version in results['versions']:
for k, v in version.items():
tools[k].append(v)
for k in version.keys() ^ tools.keys():
tools[k].append(None)
tools_keys = list(tools.keys())
for key in tools_keys:
tools["{}_version".format(key)] = tools.pop(key)
ALREADY_PARSED_KEYS = ['versions', 'max_freq', 'runtime', 'resources']
general_data = {
k: results[k]
for k in results.keys() ^ ALREADY_PARSED_KEYS
}
data = {**runtimes, **resources, **general_data, **tools}
index = pandas.Index(designs)
return pandas.DataFrame(data, index)
def generate_dataframe(results):
clock_df = get_clock_dataframe(results)
general_df = METHOD_NAME(results)
df = general_df.join(clock_df, how="left")
return df |
1,456 | get property | # -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2017 sliptonic <shopinthewoods@gmail.com> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""
The purpose of this file is to collect some handy functions. The reason they
are not in Path.Base.Utils (and there is this confusing naming going on) is that
PathUtils depends on PathJob. Which makes it impossible to use the functions
and classes defined there in PathJob.
So if you add to this file and think about importing anything from PathScripts
other than Path.Log, then it probably doesn't belong here.
"""
import FreeCAD
import Path
translate = FreeCAD.Qt.translate
if False:
Path.Log.setLevel(Path.Log.Level.DEBUG, Path.Log.thisModule())
Path.Log.trackModule(Path.Log.thisModule())
else:
Path.Log.setLevel(Path.Log.Level.INFO, Path.Log.thisModule())
def METHOD_NAME(obj, prop):
o = obj
attr = obj
name = None
for name in prop.split("."):
o = attr
if not hasattr(o, name):
break
attr = getattr(o, name)
if o == attr:
Path.Log.warning(
translate("PathGui", "%s has no property %s (%s)")
% (obj.Label, prop, name)
)
return (None, None, None)
# Path.Log.debug("found property %s of %s (%s: %s)" % (prop, obj.Label, name, attr))
return (o, attr, name)
def getProperty(obj, prop):
"""getProperty(obj, prop) ... answer obj's property defined by its canonical name."""
o, attr, name = METHOD_NAME(obj, prop)
return attr
def getPropertyValueString(obj, prop):
"""getPropertyValueString(obj, prop) ... answer a string representation of an object's property's value."""
attr = getProperty(obj, prop)
if hasattr(attr, "UserString"):
return attr.UserString
return str(attr)
def setProperty(obj, prop, value):
"""setProperty(obj, prop, value) ... set the property value of obj's property defined by its canonical name."""
o, attr, name = METHOD_NAME(obj, prop)
if not attr is None and type(value) == str:
if type(attr) == int:
value = int(value, 0)
elif type(attr) == bool:
value = value.lower() in ["true", "1", "yes", "ok"]
if o and name:
setattr(o, name, value)
# NotValidBaseTypeIds = ['Sketcher::SketchObject']
NotValidBaseTypeIds = []
def isValidBaseObject(obj):
"""isValidBaseObject(obj) ... returns true if the object can be used as a base for a job."""
if hasattr(obj, "getParentGeoFeatureGroup") and obj.getParentGeoFeatureGroup():
# Can't link to anything inside a geo feature group anymore
Path.Log.debug("%s is inside a geo feature group" % obj.Label)
return False
if hasattr(obj, "BitBody") and hasattr(obj, "BitShape"):
# ToolBit's are not valid base objects
return False
if obj.TypeId in NotValidBaseTypeIds:
Path.Log.debug("%s is blacklisted (%s)" % (obj.Label, obj.TypeId))
return False
if hasattr(obj, "Sheets") or hasattr(
obj, "TagText"
): # Arch.Panels and Arch.PanelCut
Path.Log.debug("%s is not an Arch.Panel" % (obj.Label))
return False
import Part
return not Part.getShape(obj).isNull()
def isSolid(obj):
"""isSolid(obj) ... return True if the object is a valid solid."""
import Part
shape = Part.getShape(obj)
return not shape.isNull() and shape.Volume and shape.isClosed()
def opProperty(op, prop):
"""opProperty(op, prop) ... return the value of property prop of the underlying operation (or None if prop does not exist)"""
if hasattr(op, prop):
return getattr(op, prop)
if hasattr(op, "Base"):
return opProperty(op.Base, prop)
return None
def toolControllerForOp(op):
"""toolControllerForOp(op) ... return the tool controller used by the op.
If the op doesn't have its own tool controller but has a Base object, return its tool controller.
Otherwise return None."""
return opProperty(op, "ToolController")
def getPublicObject(obj):
"""getPublicObject(obj) ... returns the object which should be used to reference a feature of the given object."""
if hasattr(obj, "getParentGeoFeatureGroup"):
body = obj.getParentGeoFeatureGroup()
if body:
return getPublicObject(body)
return obj
def clearExpressionEngine(obj):
"""clearExpressionEngine(obj) ... removes all expressions from obj.
There is currently a bug that invalidates the DAG if an object
is deleted that still has one or more expressions attached to it.
Use this function to remove all expressions before deletion."""
if hasattr(obj, "ExpressionEngine"):
for attr, expr in obj.ExpressionEngine:
obj.setExpression(attr, None)
|
1,457 | create model | # coding=utf-8
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import json
import os
import sys
sys.path.insert(0, os.path.join(os.getcwd(), "DeepLearningExamples", "TensorFlow", "LanguageModeling", "BERT"))
sys.path.insert(0, os.getcwd())
import mlperf_loadgen as lg
import modeling
import numpy as np
import tensorflow as tf
from squad_QSL import get_squad_QSL
# Allow TF to increase GPU memory usage dynamically to prevent cuBLAS init problems.
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
class BERT_TF_ESTIMATOR_SUT():
def __init__(self, batch_size=8):
print("Loading TF model...")
bert_config = modeling.BertConfig.from_json_file("bert_config.json")
model_fn = self.model_fn_builder(
bert_config=bert_config,
init_checkpoint=os.environ.get("ML_MODEL_FILE_WITH_PATH", "build/data/bert_tf_v1_1_large_fp32_384_v2/model.ckpt-5474"))
self.estimator = tf.estimator.Estimator(model_fn=model_fn)
self.batch_size = batch_size
print("Constructing SUT...")
self.sut = lg.ConstructSUT(self.issue_queries, self.flush_queries)
print("Finished constructing SUT.")
self.qsl = get_squad_QSL()
def issue_queries(self, query_samples):
input_ids = np.zeros((len(query_samples), 1, 384), dtype=np.int32)
input_mask = np.zeros((len(query_samples), 1, 384), dtype=np.int32)
segment_ids = np.zeros((len(query_samples), 1, 384), dtype=np.int32)
for sample_idx in range(len(query_samples)):
eval_features = self.qsl.get_features(query_samples[sample_idx].index)
input_ids[sample_idx, ...] = np.array(eval_features.input_ids)
input_mask[sample_idx, ...] = np.array(eval_features.input_mask)
segment_ids[sample_idx, ...] = np.array(eval_features.segment_ids)
def input_fn():
inputs = {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids
}
return tf.data.Dataset.from_tensor_slices(inputs)
for i, result in enumerate(self.estimator.predict(input_fn)):
logits = [float(x) for x in result["logits"].flat]
response_array = array.array("B", np.array(logits).astype(np.float32).tobytes())
bi = response_array.buffer_info()
response = lg.QuerySampleResponse(query_samples[i].id, bi[0], bi[1])
lg.QuerySamplesComplete([response])
def flush_queries(self):
pass
def __del__(self):
print("Finished destroying SUT.")
def METHOD_NAME(self, bert_config, is_training, input_ids, input_mask, segment_ids, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
compute_type=tf.float32)
final_hidden = model.get_sequence_output()
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/squad/output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"cls/squad/output_bias", [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden, [batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
return logits
# logits = tf.transpose(logits, [2, 0, 1])
# unstacked_logits = tf.unstack(logits, axis=0, name='unstack')
# (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
# return (start_logits, end_logits)
def model_fn_builder(self, bert_config, init_checkpoint, use_one_hot_embeddings=False):
"""Returns `model_fn` closure for Estimator."""
def model_fn(features, labels): # pylint: disable=unused-argument
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
logits = self.METHOD_NAME(
bert_config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.compat.v1.trainable_variables()
initialized_variable_names = {}
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)
predictions = {
"logits": logits
}
output_spec = tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT, predictions=predictions)
return output_spec
return model_fn
def get_tf_estimator_sut():
return BERT_TF_ESTIMATOR_SUT() |
1,458 | test define output file | import os
import filecmp
import pytest
from .test_utils import (
ExampleTest,
compare_stdout_with_file,
compare_files_ignore_newline,
)
class TestTLSFingerprinting(ExampleTest):
pytestmark = [pytest.mark.tlsfingerprinting, pytest.mark.no_network]
def _get_default_args(self, tmpdir=None):
args = {
"-r": os.path.join("pcap_examples", "tls2.pcap"),
}
if tmpdir:
args["-o"] = os.path.join(tmpdir, "temp.dat")
return args
def _ignore_console_output_lines(self, line):
return line.startswith("Start reading") or line.startswith(
"Output file was written to"
)
def _examine_output_file_lines(self, line1, line2, separator):
line1_elements = line1.split(separator)
line2_elements = line2.split(separator)
if len(line1_elements) != 7 or len(line2_elements) != 7:
return False
for index, (line1_element, line2_element) in enumerate(
zip(line1_elements, line2_elements)
):
# ignore comparing IPv6 addresses because their representation might be
# slightly different on different platforms
if index in [3, 5] and ":" in line1_element and ":" in line2_element:
continue
if line1_element != line2_element:
return False
return True
@pytest.mark.parametrize("tls_type", ["ch", "sh", "ch_sh"])
def test_sanity(self, tls_type):
args = self._get_default_args()
if tls_type != "ch":
args["-t"] = tls_type
output_file_name = "tls2.txt"
expected_output_file_name = f"tls_fp_{tls_type}.txt"
expected_console_output = f"tls_fp_{tls_type}_console.txt"
try:
completed_process = self.run_example(args=args)
assert compare_files_ignore_newline(
os.path.join("expected_output", expected_output_file_name),
output_file_name,
examine_lines_predicate=lambda l1, l2: self._examine_output_file_lines(
l1, l2, "\t"
),
)
compare_stdout_with_file(
completed_process.stdout,
os.path.join("expected_output", expected_console_output),
self._ignore_console_output_lines,
)
finally:
if os.path.exists(output_file_name):
os.remove(output_file_name)
def METHOD_NAME(self, tmpdir):
args = self._get_default_args(tmpdir)
completed_process = self.run_example(args=args)
assert compare_files_ignore_newline(
os.path.join("expected_output", "tls_fp_ch.txt"),
args["-o"],
examine_lines_predicate=lambda l1, l2: self._examine_output_file_lines(
l1, l2, "\t"
),
)
compare_stdout_with_file(
completed_process.stdout,
os.path.join("expected_output", "tls_fp_ch_console.txt"),
self._ignore_console_output_lines,
)
def test_no_input_file(self):
args = dict()
completed_process = self.run_example(args=args, expected_return_code=1)
assert (
"ERROR: Please provide an interface or an input pcap file"
in completed_process.stdout
)
def test_input_file_doesnt_exist(self):
args = {"-r": "invalid_file.pcap"}
completed_process = self.run_example(args=args, expected_return_code=1)
assert "ERROR: Cannot open pcap/pcapng file" in completed_process.stdout
def test_invalid_fingerprint_type(self):
args = self._get_default_args()
args["-t"] = "invalid"
completed_process = self.run_example(args=args, expected_return_code=1)
assert (
"ERROR: Possible options for TLS fingerprint types are 'ch' (Client Hello), 'sh' (Server Hello) or 'ch_sh' (Client Hello & Server Hello)"
in completed_process.stdout
)
def test_separator(self, tmpdir):
separator = "#"
args = self._get_default_args(tmpdir)
args["-s"] = separator
completed_process = self.run_example(args=args)
assert compare_files_ignore_newline(
os.path.join("expected_output", "tls_fp_ch_hash_separator.txt"),
args["-o"],
examine_lines_predicate=lambda l1, l2: self._examine_output_file_lines(
l1, l2, separator
),
)
compare_stdout_with_file(
completed_process.stdout,
os.path.join("expected_output", "tls_fp_ch_console.txt"),
self._ignore_console_output_lines,
)
@pytest.mark.parametrize("invalid_separator", ["a", "2", ",", "-"])
def test_invalid_separator(self, invalid_separator):
args = self._get_default_args()
args["-s"] = invalid_separator
completed_process = self.run_example(args=args, expected_return_code=1)
assert (
"ERROR: Allowed separators are single characters which are not alphanumeric and not ',', '.', ':', '-'"
in completed_process.stdout
)
def test_filter_packets(self, tmpdir):
args = self._get_default_args(tmpdir)
args["-f"] = "net 185.0.0.0 mask 255.0.0.0"
completed_process = self.run_example(args=args)
assert compare_files_ignore_newline(
os.path.join("expected_output", "tls_fp_ch_filter.txt"), args["-o"]
)
compare_stdout_with_file(
completed_process.stdout,
os.path.join("expected_output", "tls_fp_ch_filter_console.txt"),
self._ignore_console_output_lines,
) |
1,459 | test validation | #
#
#
from unittest import TestCase
from helpers import SimpleProvider
from octodns.record import Record
from octodns.record.cname import CnameRecord
from octodns.record.exception import ValidationError
from octodns.zone import Zone
class TestRecordCname(TestCase):
zone = Zone('unit.tests.', [])
def assertSingleValue(self, _type, a_value, b_value):
a_data = {'ttl': 30, 'value': a_value}
a = _type(self.zone, 'a', a_data)
self.assertEqual('a', a.name)
self.assertEqual('a.unit.tests.', a.fqdn)
self.assertEqual(30, a.ttl)
self.assertEqual(a_value, a.value)
self.assertEqual(a_data, a.data)
b_data = {'ttl': 30, 'value': b_value}
b = _type(self.zone, 'b', b_data)
self.assertEqual(b_value, b.value)
self.assertEqual(b_data, b.data)
target = SimpleProvider()
# No changes with self
self.assertFalse(a.changes(a, target))
# Diff in value causes change
other = _type(self.zone, 'a', {'ttl': 30, 'value': b_value})
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# __repr__ doesn't blow up
a.__repr__()
def test_cname(self):
self.assertSingleValue(CnameRecord, 'target.foo.com.', 'other.foo.com.')
def test_cname_lowering_value(self):
upper_record = CnameRecord(
self.zone,
'CnameUppwerValue',
{'ttl': 30, 'type': 'CNAME', 'value': 'GITHUB.COM'},
)
lower_record = CnameRecord(
self.zone,
'CnameLowerValue',
{'ttl': 30, 'type': 'CNAME', 'value': 'github.com'},
)
self.assertEqual(upper_record.value, lower_record.value)
def METHOD_NAME(self):
# doesn't blow up
Record.new(
self.zone,
'www',
{'type': 'CNAME', 'ttl': 600, 'value': 'foo.bar.com.'},
)
# root cname is a no-no
with self.assertRaises(ValidationError) as ctx:
Record.new(
self.zone,
'',
{'type': 'CNAME', 'ttl': 600, 'value': 'foo.bar.com.'},
)
self.assertEqual(['root CNAME not allowed'], ctx.exception.reasons)
# not a valid FQDN
with self.assertRaises(ValidationError) as ctx:
Record.new(
self.zone, 'www', {'type': 'CNAME', 'ttl': 600, 'value': '___.'}
)
self.assertEqual(
['CNAME value "___." is not a valid FQDN'], ctx.exception.reasons
)
# missing trailing .
with self.assertRaises(ValidationError) as ctx:
Record.new(
self.zone,
'www',
{'type': 'CNAME', 'ttl': 600, 'value': 'foo.bar.com'},
)
self.assertEqual(
['CNAME value "foo.bar.com" missing trailing .'],
ctx.exception.reasons,
)
# doesn't allow urls
with self.assertRaises(ValidationError) as ctx:
Record.new(
self.zone,
'www',
{'type': 'CNAME', 'ttl': 600, 'value': 'https://google.com'},
)
self.assertEqual(
['CNAME value "https://google.com" is not a valid FQDN'],
ctx.exception.reasons,
)
# doesn't allow urls with paths
with self.assertRaises(ValidationError) as ctx:
Record.new(
self.zone,
'www',
{
'type': 'CNAME',
'ttl': 600,
'value': 'https://google.com/a/b/c',
},
)
self.assertEqual(
['CNAME value "https://google.com/a/b/c" is not a valid FQDN'],
ctx.exception.reasons,
)
# doesn't allow paths
with self.assertRaises(ValidationError) as ctx:
Record.new(
self.zone,
'www',
{'type': 'CNAME', 'ttl': 600, 'value': 'google.com/some/path'},
)
self.assertEqual(
['CNAME value "google.com/some/path" is not a valid FQDN'],
ctx.exception.reasons,
) |
1,460 | log | # Copyright (c) Alibaba, Inc. and its affiliates.
import datetime
import os
import os.path as osp
from collections import OrderedDict
import json
import torch
from torch import distributed as dist
from modelscope.metainfo import Hooks
from modelscope.trainers.hooks.builder import HOOKS
from modelscope.utils.constant import LogKeys, ModeKeys
from modelscope.utils.json_utils import EnhancedEncoder
from modelscope.utils.torch_utils import is_master
from .base import LoggerHook
@HOOKS.register_module(module_name=Hooks.TextLoggerHook)
class TextLoggerHook(LoggerHook):
"""Logger hook in text, Output log to both console and local json file.
Args:
by_epoch (bool, optional): Whether EpochBasedTrainer is used.
Default: True.
interval (int, optional): Logging interval (every k iterations).
It is interval of iterations even by_epoch is true. Default: 10.
ignore_last (bool, optional): Ignore the log of last iterations in each
epoch if less than :attr:`interval`. Default: True.
reset_flag (bool, optional): Whether to clear the output buffer after
logging. Default: False.
out_dir (str): The directory to save log. If is None, use `trainer.work_dir`
ignore_rounding_keys (`Union[str, List]`): The keys to ignore float rounding, default 'lr'
rounding_digits (`int`): The digits of rounding, exceeding parts will be ignored.
"""
def __init__(self,
by_epoch=True,
interval=10,
ignore_last=True,
reset_flag=False,
out_dir=None,
ignore_rounding_keys='lr',
rounding_digits=5):
super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag,
by_epoch)
self.by_epoch = by_epoch
self.time_sec_tot = 0
self.out_dir = out_dir
self._logged_keys = [] # store the key has been logged
if isinstance(ignore_rounding_keys,
str) or ignore_rounding_keys is None:
ignore_rounding_keys = [ignore_rounding_keys]
self.ignore_rounding_keys = ignore_rounding_keys
self.rounding_digits = rounding_digits
def before_run(self, trainer):
super(TextLoggerHook, self).before_run(trainer)
if self.out_dir is None:
self.out_dir = trainer.work_dir
if not osp.exists(self.out_dir) and is_master():
os.makedirs(self.out_dir)
trainer.logger.info('Text logs will be saved to {}'.format(
self.out_dir))
self.start_iter = trainer.iter
self.json_log_path = osp.join(self.out_dir,
'{}.log.json'.format(trainer.timestamp))
if hasattr(trainer, 'meta') and trainer.meta is not None:
self._dump_log(trainer.meta)
def _get_max_memory(self, trainer):
device = torch.cuda.current_device()
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem / (1024 * 1024))],
dtype=torch.int,
device=device)
if trainer._dist:
dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX)
return mem_mb.item()
def _log_info(self, log_dict, trainer):
lr_key = LogKeys.LR
epoch_key = LogKeys.EPOCH
iter_key = LogKeys.ITER
mode_key = LogKeys.MODE
iter_time_key = LogKeys.ITER_TIME
data_load_time_key = LogKeys.DATA_LOAD_TIME
eta_key = LogKeys.ETA
if log_dict[mode_key] == ModeKeys.TRAIN:
if isinstance(log_dict[lr_key], dict):
lr_str = []
for k, val in log_dict[lr_key].items():
lr_str.append(f'{lr_key}_{k}: {val:.3e}')
lr_str = ' '.join(lr_str)
else:
lr_str = f'{lr_key}: {log_dict[lr_key]:.3e}'
if self.by_epoch:
log_str = f'{epoch_key} [{log_dict[epoch_key]}][{log_dict[iter_key]}/{trainer.iters_per_epoch}]\t'
else:
log_str = f'{iter_key} [{log_dict[iter_key]}/{trainer.max_iters}]\t'
log_str += f'{lr_str}, '
self._logged_keys.extend([lr_key, mode_key, iter_key, epoch_key])
if iter_time_key in log_dict.keys():
self.time_sec_tot += (log_dict[iter_time_key] * self.interval)
time_sec_avg = self.time_sec_tot / (
trainer.iter - self.start_iter + 1)
eta_sec = time_sec_avg * (trainer.max_iters - trainer.iter - 1)
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
log_str += f'{eta_key}: {eta_str}, '
log_str += f'{iter_time_key}: {log_dict[iter_time_key]:.3f}, '
log_str += f'{data_load_time_key}: {log_dict[data_load_time_key]:.3f}, '
self._logged_keys.extend([
iter_time_key,
data_load_time_key,
])
else:
# val/test time
# here 1000 is the length of the val dataloader
# by epoch: epoch[val] [4][1000]
# by iter: iter[val] [1000]
if self.by_epoch:
log_str = f'{epoch_key}({log_dict[mode_key]}) [{log_dict[epoch_key]}][{log_dict[iter_key]}]\t'
else:
# TODO log_dict[iter_key] is not correct because of it's train_loop's inner_iter
log_str = f'{iter_key}({log_dict[mode_key]}) [{log_dict[iter_key]}]\t'
self._logged_keys.extend([mode_key, iter_key, epoch_key])
log_items = []
for name, val in log_dict.items():
if name in self._logged_keys:
continue
if isinstance(val,
float) and name not in self.ignore_rounding_keys:
val = f'{val:.4f}'
log_items.append(f'{name}: {val}')
log_str += ', '.join(log_items)
if is_master():
trainer.logger.info(log_str)
def _dump_log(self, log_dict):
# dump log in json format
json_log = OrderedDict()
for k, v in log_dict.items():
json_log[
k] = v if k in self.ignore_rounding_keys else self._round_float(
v, self.rounding_digits)
if is_master():
with open(self.json_log_path, 'a+') as f:
json.dump(json_log, f, cls=EnhancedEncoder)
f.write('\n')
def _round_float(self, items, ndigits=5):
if isinstance(items, list):
return [self._round_float(item, ndigits) for item in items]
elif isinstance(items, float):
return round(items, ndigits)
else:
return items
def METHOD_NAME(self, trainer):
cur_iter = self.get_iter(
trainer, inner_iter=True
) if trainer.mode == ModeKeys.TRAIN else trainer.iters_per_epoch
log_dict = OrderedDict(
mode=trainer.mode, epoch=self.get_epoch(trainer), iter=cur_iter)
# statistic memory
if torch.cuda.is_available():
log_dict[LogKeys.MEMORY] = self._get_max_memory(trainer)
log_dict = dict(log_dict, **trainer.log_buffer.output)
self._log_info(log_dict, trainer)
self._dump_log(log_dict)
return log_dict |
1,461 | setup testing defaults | """Miscellaneous WSGI-related Utilities"""
import posixpath
__all__ = [
'FileWrapper', 'guess_scheme', 'application_uri', 'request_uri',
'shift_path_info', 'setup_testing_defaults',
]
class FileWrapper:
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def __next__(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
def application_uri(environ):
"""Return the application's base URI (no PATH_INFO or QUERY_STRING)"""
url = environ['wsgi.url_scheme']+'://'
from urllib.parse import quote
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += quote(environ.get('SCRIPT_NAME') or '/', encoding='latin1')
return url
def request_uri(environ, include_query=True):
"""Return the full request URI, optionally including the query string"""
url = application_uri(environ)
from urllib.parse import quote
path_info = quote(environ.get('PATH_INFO',''), safe='/;=,', encoding='latin1')
if not environ.get('SCRIPT_NAME'):
url += path_info[1:]
else:
url += path_info
if include_query and environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return url
def shift_path_info(environ):
"""Shift a name from PATH_INFO to SCRIPT_NAME, returning it
If there are no remaining path segments in PATH_INFO, return None.
Note: 'environ' is modified in-place; use a copy if you need to keep
the original PATH_INFO or SCRIPT_NAME.
Note: when PATH_INFO is just a '/', this returns '' and appends a trailing
'/' to SCRIPT_NAME, even though empty path segments are normally ignored,
and SCRIPT_NAME doesn't normally end in a '/'. This is intentional
behavior, to ensure that an application can tell the difference between
'/x' and '/x/' when traversing to objects.
"""
path_info = environ.get('PATH_INFO','')
if not path_info:
return None
path_parts = path_info.split('/')
path_parts[1:-1] = [p for p in path_parts[1:-1] if p and p != '.']
name = path_parts[1]
del path_parts[1]
script_name = environ.get('SCRIPT_NAME','')
script_name = posixpath.normpath(script_name+'/'+name)
if script_name.endswith('/'):
script_name = script_name[:-1]
if not name and not script_name.endswith('/'):
script_name += '/'
environ['SCRIPT_NAME'] = script_name
environ['PATH_INFO'] = '/'.join(path_parts)
# Special case: '/.' on PATH_INFO doesn't get stripped,
# because we don't strip the last element of PATH_INFO
# if there's only one path part left. Instead of fixing this
# above, we fix it here so that PATH_INFO gets normalized to
# an empty string in the environ.
if name=='.':
name = None
return name
def METHOD_NAME(environ):
"""Update 'environ' with trivial defaults for testing purposes
This adds various parameters required for WSGI, including HTTP_HOST,
SERVER_NAME, SERVER_PORT, REQUEST_METHOD, SCRIPT_NAME, PATH_INFO,
and all of the wsgi.* variables. It only supplies default values,
and does not replace any existing settings for these variables.
This routine is intended to make it easier for unit tests of WSGI
servers and applications to set up dummy environments. It should *not*
be used by actual WSGI servers or applications, since the data is fake!
"""
environ.setdefault('SERVER_NAME','127.0.0.1')
environ.setdefault('SERVER_PROTOCOL','HTTP/1.0')
environ.setdefault('HTTP_HOST',environ['SERVER_NAME'])
environ.setdefault('REQUEST_METHOD','GET')
if 'SCRIPT_NAME' not in environ and 'PATH_INFO' not in environ:
environ.setdefault('SCRIPT_NAME','')
environ.setdefault('PATH_INFO','/')
environ.setdefault('wsgi.version', (1,0))
environ.setdefault('wsgi.run_once', 0)
environ.setdefault('wsgi.multithread', 0)
environ.setdefault('wsgi.multiprocess', 0)
from io import StringIO, BytesIO
environ.setdefault('wsgi.input', BytesIO())
environ.setdefault('wsgi.errors', StringIO())
environ.setdefault('wsgi.url_scheme',guess_scheme(environ))
if environ['wsgi.url_scheme']=='http':
environ.setdefault('SERVER_PORT', '80')
elif environ['wsgi.url_scheme']=='https':
environ.setdefault('SERVER_PORT', '443')
_hoppish = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}.__contains__
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return _hoppish(header_name.lower()) |
1,462 | watch bands | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import time
from collections import defaultdict
from typing import List, DefaultDict, Dict, Tuple
from .... import oscar as mo
from ....resource import Resource, ZeroResource
from ....typing import BandType
logger = logging.getLogger(__name__)
class GlobalResourceManagerActor(mo.Actor):
# {(address, resource_type): {(session_id, subtask_id): Resource(...)}}
_band_stid_resources: DefaultDict[BandType, Dict[Tuple[str, str], Resource]]
_band_used_resources: Dict[BandType, Resource]
_band_total_resources: Dict[BandType, Resource]
def __init__(self):
self._band_stid_resources = defaultdict(dict)
self._band_used_resources = defaultdict(lambda: ZeroResource)
self._band_idle_start_time = dict()
self._band_idle_events = dict()
self._band_total_resources = dict()
self._cluster_api = None
self._band_watch_task = None
async def __post_create__(self):
from ...cluster.api import ClusterAPI
self._cluster_api = await ClusterAPI.create(self.address)
async def METHOD_NAME():
async for bands in self._cluster_api.watch_all_bands():
old_bands = set(self._band_total_resources.keys())
self._band_total_resources = bands
new_bands = set(bands.keys()) - old_bands
for band in new_bands:
self._update_band_usage(band, ZeroResource)
self._band_watch_task = asyncio.create_task(METHOD_NAME())
async def __pre_destroy__(self):
self._band_watch_task.cancel()
async def refresh_bands(self):
self._band_total_resources = await self._cluster_api.get_all_bands()
@mo.extensible
async def apply_subtask_resources(
self,
band: BandType,
session_id: str,
subtask_ids: List[str],
subtask_resources: List[Resource],
) -> List[str]:
if (
not self._band_total_resources or band not in self._band_total_resources
): # pragma: no cover
await self.refresh_bands()
idx = 0
# only ready bands will pass
if band in self._band_total_resources:
total_resource = self._band_total_resources[band]
for stid, subtask_resource in zip(subtask_ids, subtask_resources):
band_used_resource = self._band_used_resources[band]
if band_used_resource + subtask_resource > total_resource:
break
self._band_stid_resources[band][(session_id, stid)] = subtask_resource
self._update_band_usage(band, subtask_resource)
idx += 1
if idx == 0:
logger.debug(
"No resources available, status: %r, request: %r",
self._band_used_resources,
subtask_resources,
)
return subtask_ids[:idx]
@mo.extensible
def update_subtask_resources(
self, band: BandType, session_id: str, subtask_id: str, resource: Resource
):
session_subtask_id = (session_id, subtask_id)
subtask_resources = self._band_stid_resources[band]
if session_subtask_id not in subtask_resources:
return
resource_delta = resource - subtask_resources[session_subtask_id]
subtask_resources[session_subtask_id] = resource
self._update_band_usage(band, resource_delta)
@mo.extensible
def release_subtask_resource(
self, band: BandType, session_id: str, subtask_id: str
):
# todo ensure slots released when subtasks ends in all means
resource_delta = self._band_stid_resources[band].pop(
(session_id, subtask_id), ZeroResource
)
self._update_band_usage(band, -resource_delta)
def _update_band_usage(self, band: BandType, band_usage_delta: Resource):
self._band_used_resources[band] += band_usage_delta
# some code path doesn't call `apply_subtask_resources`
band_total_resource = self._band_total_resources.get(band)
if (
band_total_resource is not None
and self._band_used_resources[band] > band_total_resource
): # pragma: no cover
raise Exception(
f"Resource exceed: band used resource {self._band_used_resources[band]} "
f"band total resource {self._band_total_resources[band]}"
)
if self._band_used_resources[band] <= ZeroResource:
self._band_used_resources.pop(band)
self._band_idle_start_time[band] = time.time()
if band in self._band_idle_events:
self._band_idle_events.pop(band).set()
else:
self._band_idle_start_time[band] = -1
def get_used_resources(self) -> Dict[BandType, Resource]:
return self._band_used_resources
def get_remaining_resources(self) -> Dict[BandType, Resource]:
resources = {}
for band, resource in self._band_total_resources.items():
used_resource = self.get_used_resources()[band]
resources[band] = resource - used_resource
return resources
async def get_idle_bands(self, idle_duration: int):
"""Return a band list which all bands has been idle for at least `idle_duration` seconds."""
now = time.time()
idle_bands = []
for band in self._band_total_resources.keys():
idle_start_time = self._band_idle_start_time.get(band)
if idle_start_time is None: # pragma: no cover
# skip new requested band for this round scale in.
self._band_idle_start_time[band] = now
elif idle_start_time > 0 and now >= idle_start_time + idle_duration:
idle_bands.append(band)
return idle_bands
async def wait_band_idle(self, band: BandType):
if self._band_idle_start_time[band] <= 0:
if band in self._band_idle_events:
event = self._band_idle_events[band]
else:
event = asyncio.Event()
self._band_idle_events[band] = event
return event.wait() |
1,463 | format as sql rows | """
Import prescribing data from CSV files into SQLite
"""
from collections import namedtuple
import csv
from itertools import groupby
import logging
import os
import sqlite3
import gzip
import heapq
from matrixstore.matrix_ops import sparse_matrix, finalise_matrix
from matrixstore.serializer import serialize_compressed
from .common import get_prescribing_filename
logger = logging.getLogger(__name__)
MatrixRow = namedtuple("MatrixRow", "bnf_code items quantity actual_cost net_cost")
class MissingHeaderError(Exception):
pass
def import_prescribing(filename):
if not os.path.exists(filename):
raise RuntimeError("No SQLite file at: {}".format(filename))
connection = sqlite3.connect(filename)
# Trade crash-safety for insert speed
connection.execute("PRAGMA synchronous=OFF")
dates = [date for (date,) in connection.execute("SELECT date FROM date")]
prescriptions = get_prescriptions_for_dates(dates)
write_prescribing(connection, prescriptions)
connection.commit()
connection.close()
def write_prescribing(connection, prescriptions):
cursor = connection.cursor()
# Map practice codes and date strings to their corresponding row/column
# offset in the matrix
practices = dict(cursor.execute("SELECT code, offset FROM practice"))
dates = dict(cursor.execute("SELECT date, offset FROM date"))
matrices = build_matrices(prescriptions, practices, dates)
rows = METHOD_NAME(matrices, connection)
cursor.executemany(
"""
UPDATE presentation SET items=?, quantity=?, actual_cost=?, net_cost=?
WHERE bnf_code=?
""",
rows,
)
def get_prescriptions_for_dates(dates):
"""
Yield all prescribing data for the given dates as tuples of the form:
bnf_code, practice_code, date, items, quantity, actual_cost, net_cost
sorted by bnf_code, practice and date.
"""
dates = sorted(dates)
filenames = [get_prescribing_filename(date) for date in dates]
missing_files = [f for f in filenames if not os.path.exists(f)]
if missing_files:
raise RuntimeError(
"Some required CSV files were missing:\n {}".format(
"\n ".join(missing_files)
)
)
prescribing_streams = [read_gzipped_prescribing_csv(f) for f in filenames]
# We assume that the input files are already sorted by (bnf_code, practice,
# month) so to ensure that the combined stream is sorted we just need to
# merge them correctly, which heapq.merge handles nicely for us
return heapq.merge(*prescribing_streams)
def read_gzipped_prescribing_csv(filename):
with gzip.open(filename, "rt") as f:
for row in parse_prescribing_csv(f):
yield row
def parse_prescribing_csv(input_stream):
"""
Accepts a stream of CSV and yields prescribing data as tuples of the form:
bnf_code, practice_code, date, items, quantity, actual_cost, net_cost
"""
reader = csv.reader(input_stream)
headers = next(reader)
try:
bnf_code_col = headers.index("bnf_code")
practice_col = headers.index("practice")
date_col = headers.index("month")
items_col = headers.index("items")
quantity_col = headers.index("quantity")
actual_cost_col = headers.index("actual_cost")
net_cost_col = headers.index("net_cost")
except ValueError as e:
raise MissingHeaderError(str(e))
for row in reader:
yield (
# These sometimes have trailing spaces in the CSV
row[bnf_code_col].strip(),
row[practice_col].strip(),
# We only need the YYYY-MM-DD part of the date
row[date_col][:10],
int(row[items_col]),
float(row[quantity_col]),
pounds_to_pence(row[actual_cost_col]),
pounds_to_pence(row[net_cost_col]),
)
def pounds_to_pence(value):
return int(round(float(value) * 100))
def build_matrices(prescriptions, practices, dates):
"""
Accepts an iterable of prescriptions plus mappings of pratice codes and
date strings to their respective row/column offsets. Yields tuples of the
form:
bnf_code, items_matrix, quantity_matrix, actual_cost_matrix, net_cost_matrix
Where the matrices contain the prescribed values for that presentation for
every practice and date.
"""
max_row = max(practices.values())
max_col = max(dates.values())
shape = (max_row + 1, max_col + 1)
grouped_by_bnf_code = groupby(prescriptions, lambda row: row[0])
for bnf_code, row_group in grouped_by_bnf_code:
items_matrix = sparse_matrix(shape, integer=True)
quantity_matrix = sparse_matrix(shape, integer=False)
actual_cost_matrix = sparse_matrix(shape, integer=True)
net_cost_matrix = sparse_matrix(shape, integer=True)
for _, practice, date, items, quantity, actual_cost, net_cost in row_group:
practice_offset = practices[practice]
date_offset = dates[date]
items_matrix[practice_offset, date_offset] = items
quantity_matrix[practice_offset, date_offset] = quantity
actual_cost_matrix[practice_offset, date_offset] = actual_cost
net_cost_matrix[practice_offset, date_offset] = net_cost
yield MatrixRow(
bnf_code,
finalise_matrix(items_matrix),
finalise_matrix(quantity_matrix),
finalise_matrix(actual_cost_matrix),
finalise_matrix(net_cost_matrix),
)
def METHOD_NAME(matrices, connection):
"""
Given an iterable of MatrixRows (which contain a BNF code plus all
prescribing data for that presentation) yield tuples of values ready for
insertion into SQLite
"""
cursor = connection.cursor()
num_presentations = next(cursor.execute("SELECT COUNT(*) FROM presentation"))[0]
count = 0
for row in matrices:
count += 1
# We make sure we have a row for every BNF code in the data, even ones
# we didn't know about previously. This is a hack that we won't need
# once we can use SQLite v3.24.0 which has proper UPSERT support.
cursor.execute(
"INSERT OR IGNORE INTO presentation (bnf_code) VALUES (?)", [row.bnf_code]
)
if should_log_message(count):
logger.info(
"Writing data for %s (%s/%s)", row.bnf_code, count, num_presentations
)
yield (
serialize_compressed(row.items),
serialize_compressed(row.quantity),
serialize_compressed(row.actual_cost),
serialize_compressed(row.net_cost),
row.bnf_code,
)
logger.info("Finished writing data for %s presentations", count)
def should_log_message(n):
"""
To avoid cluttering log output we don't log the insertion of every single
presentation
"""
if n <= 10:
return True
if n == 100:
return True
return n % 200 == 0 |
1,464 | test plot connections color by | import pytest
import numpy as np
import openpnm as op
import matplotlib.pyplot as plt
from numpy.testing import assert_allclose
class PlotToolsTest:
def setup_class(self):
self.ws = op.Workspace()
def test_plot_tutorial(self):
pn = op.network.Cubic(shape=[4, 4, 1])
# This runs locally, but fails on the CI due to a missing argument 's'
# coming from within the networkx function, not ours.
# op.visualization.plot_tutorial(pn)
# plt.close()
def test_plot_networkx_var_spacing(self):
for i in range(3):
shape = np.ones(3, dtype=int)
shape[np.arange(3) != i] = [5, 8]
spacing = np.ones(3, dtype=float)
spacing[np.arange(3) != i] = [0.01, 0.6]
pn = op.network.Cubic(shape=shape)
dims = op.topotools.dimensionality(pn)
x, y = pn["pore.coords"].T[dims]
fig, ax = plt.subplots()
m = op.visualization.plot_networkx(pn, ax=ax)
x_plot, y_plot = np.array(m.get_offsets()).T
np.testing.assert_allclose(x_plot, x)
np.testing.assert_allclose(y_plot, y)
plt.close()
def test_plot_networkx(self):
# 2D networks in XY, YZ, XZ planes
for i in range(3):
shape = np.ones(3, dtype=int)
shape[np.arange(3) != i] = [5, 8]
pn = op.network.Cubic(shape=shape)
x, y = pn["pore.coords"].T[op.topotools.dimensionality(pn)]
fig, ax = plt.subplots()
m = op.visualization.plot_networkx(pn, ax=ax)
x_plot, y_plot = np.array(m.get_offsets()).T
np.testing.assert_allclose(x_plot, x)
np.testing.assert_allclose(y_plot, y)
plt.close()
# 1D networks in XY, YZ, XZ planes
for i in range(3):
shape = np.ones(3, dtype=int)
shape[np.arange(3) == i] = [5]
pn = op.network.Cubic(shape=shape)
x, = pn["pore.coords"].T[op.topotools.dimensionality(pn)]
fig, ax = plt.subplots()
m = op.visualization.plot_networkx(pn, ax=ax)
x_plot, y_plot = np.array(m.get_offsets()).T
np.testing.assert_allclose(x_plot, x)
plt.close()
def test_plot_networkx_3d(self):
pn = op.network.Cubic(shape=[5, 8, 3])
with pytest.raises(Exception):
op.visualization.plot_networkx(pn)
def test_generate_voxel_image(self):
pn = op.network.Cubic(shape=[5, 5, 1])
pn.add_model_collection(
op.models.collections.geometry.spheres_and_cylinders)
pn.regenerate_models()
im = op.visualization.generate_voxel_image(network=pn,
pore_shape='sphere',
throat_shape='cylinder',
max_dim=500)
assert im.shape[0] == 500
def METHOD_NAME(self):
pn = op.network.Cubic(shape=[5, 5, 1])
np.random.seed(10)
pn.add_model_collection(
op.models.collections.geometry.spheres_and_cylinders)
pn.regenerate_models()
Ts = np.array([0, 4, 6, 18])
im = op.visualization.plot_connections(pn, throats=Ts,
color_by=pn['throat.diameter'])
colors_im = im.get_color()
color_by = pn['throat.diameter'][Ts]
cscale = (color_by - color_by.min()) / (color_by.max() - color_by.min())
color_calc = plt.colormaps['jet'](cscale)
color_calc[:, 3] = 1.0
assert_allclose(color_calc, colors_im, rtol=1e-5)
def test_plot_coordinates_color_by(self):
pn = op.network.Cubic(shape=[5, 5, 1])
np.random.seed(10)
pn.add_model_collection(
op.models.collections.geometry.spheres_and_cylinders)
pn.regenerate_models()
Ps = np.array([0, 4, 6, 18])
im = op.visualization.plot_coordinates(pn, pores=Ps,
color_by=pn['pore.diameter'])
colors_im = im.get_edgecolors()
color_by = pn['pore.diameter'][Ps]
cscale = (color_by - color_by.min()) / (color_by.max() - color_by.min())
color_calc = plt.colormaps['jet'](cscale)
color_calc[:, 3] = 1.0
assert_allclose(color_calc, colors_im, rtol=1e-5)
if __name__ == '__main__':
t = PlotToolsTest()
self = t
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print(f'Running test: {item}')
t.__getattribute__(item)() |
1,465 | validate archive restore | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from datetime import datetime
from azure.cli.core.azclierror import RequiredArgumentMissingError, MutuallyExclusiveArgumentError, \
ArgumentUsageError, InvalidArgumentValueError
from azure.mgmt.recoveryservicesbackup.activestamp.models import StorageType
# Argument types
def datetime_type(string):
""" Validate UTC datettime in accepted format. Examples: 31-12-2017, 31-12-2017-05:30:00 """
accepted_date_formats = ['%d-%m-%Y', '%d-%m-%Y-%H:%M:%S']
for form in accepted_date_formats:
try:
return datetime.strptime(string, form)
except ValueError: # checks next format
pass
raise InvalidArgumentValueError("""
Input '{}' is not valid. Valid example: 31-12-2017, 31-12-2017-05:30:00
""".format(string))
def validate_mi_used_for_restore_disks(vault_identity, use_system_assigned_msi, identity_id):
if (use_system_assigned_msi or identity_id) and vault_identity is None:
raise ArgumentUsageError("Please ensure that Selected MI is enabled for the vault")
if use_system_assigned_msi:
if vault_identity.type is None or "systemassigned" not in vault_identity.type.lower():
raise ArgumentUsageError("Please ensure that System MI is enabled for the vault")
if identity_id:
if vault_identity.type is not None and "userassigned" in vault_identity.type.lower():
if identity_id.lower() not in (id.lower() for id in vault_identity.user_assigned_identities.keys()):
raise ArgumentUsageError("""
Vault does not have the specified User MI.
Please ensure you've provided the correct --mi-user-assigned.
""")
else:
raise ArgumentUsageError("Please ensure that User MI is enabled for the vault")
def validate_wl_restore(item, item_type, restore_mode, recovery_mode):
# if source_resource_id is None or source_resource_id.lower() != item.properties.source_resource_id.lower():
# raise InvalidArgumentValueError("""
# The source_resource_id specified in recovery config file is incorrect. Please correct it and retry the
# operation. Correct value should be - {}.
# """.format(item.properties.source_resource_id))
# if workload_type is None or workload_type.lower() != item.properties.workload_type.lower():
# raise InvalidArgumentValueError("""
# The workload_type specified in recovery config file is incorrect. Please correct it and retry the
# operation. Correct value should be - {}.
# """.format(item.properties.workload_type))
if item_type is None or item_type.lower() not in ['sql', 'saphana']:
raise InvalidArgumentValueError("""
The item_type specified in recovery config file is incorrect. Please correct it and retry the
operation. Allowed values are: 'SQL', 'SAPHana'.
""")
if item_type.lower() not in item.properties.workload_type.lower():
raise InvalidArgumentValueError("""
The item_type and workload_type specified in recovery config file does not match. Please correct either
of them and retry the operation.
""")
if restore_mode not in ['OriginalLocation', 'AlternateLocation']:
raise InvalidArgumentValueError("""
The restore_mode specified in recovery config file is incorrect. Please correct it and retry the
operation. Allowed values are: 'OriginalLocation', 'AlternateLocation'.
""")
if recovery_mode is not None and recovery_mode != 'FileRecovery':
raise InvalidArgumentValueError("""
The recovery_mode specified in recovery config file is incorrect. Please correct it and retry the
operation.
""")
def validate_log_point_in_time(log_point_in_time, time_range_list):
for time_range in time_range_list:
if (time_range.start_time.replace(tzinfo=None) <= log_point_in_time <=
time_range.end_time.replace(tzinfo=None)):
return
raise InvalidArgumentValueError("""
The log point in time specified in recovery config file does not belong to the allowed time range.
Please correct it and retry the operation. To check the permissible time range use:
'az backup recoverypoint show-log-chain' command.
""")
def validate_crr(target_rg_id, rehydration_priority):
if target_rg_id is None:
raise RequiredArgumentMissingError("Please provide target resource group using --target-resource-group.")
if rehydration_priority is not None:
raise MutuallyExclusiveArgumentError("Archive restore isn't supported for secondary region.")
def validate_czr(backup_config_response, recovery_point, use_secondary_region):
backup_storage_redundancy = backup_config_response.properties.storage_type
cross_region_restore_flag = backup_config_response.properties.cross_region_restore_flag
if (cross_region_restore_flag or backup_storage_redundancy == StorageType.ZONE_REDUNDANT):
if recovery_point.tier_type is not None and recovery_point.tier_type == "VaultStandard":
if backup_storage_redundancy != StorageType.ZONE_REDUNDANT:
if recovery_point.properties.zones is None:
raise ArgumentUsageError("""
Please ensure that either the vault storage redundancy is ZoneRedundant or the recovery
point is zone pinned, or remove --target-zone argument.
""")
if not use_secondary_region:
raise ArgumentUsageError("""
Please ensure that either the vault storage redundancy is ZoneRedundant or the restore
is not to the primary region, or remove --target-zone argument.
""")
else:
raise ArgumentUsageError("""
Please ensure that the given RP tier type is 'VaultStandard' or remove --target-zone argument.
""")
else:
raise ArgumentUsageError("""
Please ensure either the vault storage redundancy is ZoneRedundant or the vault has CRR enabled or try
removing --target-zone argument.
""")
def METHOD_NAME(recovery_point, rehydration_priority):
if (recovery_point.tier_type is not None and recovery_point.tier_type == 'VaultArchive' and
rehydration_priority is None):
raise InvalidArgumentValueError("""The selected recovery point is in archive tier, provide additional
parameters of rehydration duration and rehydration priority.""") |
1,466 | get next | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._location_extension_types_operations import build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LocationExtensionTypesOperations:
"""LocationExtensionTypesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.kubernetesconfiguration.v2022_01_15_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.ExtensionTypeList"]:
"""List all Extension Types.
:param location: extension location.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExtensionTypeList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.kubernetesconfiguration.v2022_01_15_preview.models.ExtensionTypeList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExtensionTypeList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
location=location,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
location=location,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ExtensionTypeList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
METHOD_NAME, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.KubernetesConfiguration/locations/{location}/extensionTypes'} # type: ignore |
1,467 | parse xml | #!/usr/bin/env python3
#
# Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
#
# SPDX-License-Identifier: BSD-2-Clause or GPL-2.0-only
#
# seL4 Invocation ID Generator
# ============================
from __future__ import print_function
from jinja2 import Environment, BaseLoader
import argparse
import sys
import xml.dom.minidom
import pkg_resources
from condition import condition_to_cpp
# We require jinja2 to be at least version 2.10,
# In the past we used the 'namespace' feature from that version.
# other versions of jinja, particularly `minijinja`, don't support namespaces.
# However in case `namespace` is needed in the future require a
# version which supports it.
pkg_resources.require("jinja2>=2.10")
COMMON_HEADER = """
/*
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
*
{%- if libsel4 -%}
* SPDX-License-Identifier: BSD-2-Clause
{%- else -%}
* SPDX-License-Identifier: GPL-2.0-only
{%- endif %}
*/
/* This header was generated by kernel/tools/invocation_header_gen.py.
*
* To add an invocation call number, edit libsel4/include/interfaces/sel4.xml.
*
*/"""
INVOCATION_TEMPLATE = COMMON_HEADER + """
#pragma once
enum invocation_label {
InvalidInvocation,
{%- for label, condition in invocations %}
{%- if condition %}
#if {{condition}}
{%- endif %}
{{label}},
{%- if condition %}
#endif
{%- endif %}
{%- endfor %}
nInvocationLabels
};
{%- if libsel4 %}
#include <sel4/sel4_arch/invocation.h>
#include <sel4/arch/invocation.h>
{%- endif %}
"""
SEL4_ARCH_INVOCATION_TEMPLATE = COMMON_HEADER + """
#pragma once
{%- if not libsel4 %}
#include <api/invocation.h>
{%- endif %}
enum sel4_arch_invocation_label {
{%- for label, condition in invocations %}
{%- if condition %}
{%- if loop.first %}
#error "First sel4_arch invocation label cannot be conditional"
{%- endif %}
#if {{condition}}
{%- endif %}
{%- if loop.first %}
{{label}} = nInvocationLabels,
{%- else %}
{{label}},
{%- endif %}
{%- if condition %}
#endif
{%- endif %}
{%- endfor %}
{%- if invocations|length == 0 %}
nSeL4ArchInvocationLabels = nInvocationLabels
{%- else %}
nSeL4ArchInvocationLabels
{%- endif %}
};
"""
ARCH_INVOCATION_TEMPLATE = COMMON_HEADER + """
#pragma once
{%- if not libsel4 %}
#include <arch/api/sel4_invocation.h>
{%- endif %}
enum arch_invocation_label {
{%- for label, condition in invocations %}
{%- if condition %}
{%- if loop.first %}
#error "First arch invocation label cannot be conditional"
{%- endif %}
#if {{condition}}
{%- endif %}
{%- if loop.first %}
{{label}} = nSeL4ArchInvocationLabels,
{%- else %}
{{label}},
{%- endif %}
{%- if condition %}
#endif
{%- endif %}
{%- endfor %}
{%- if invocations|length == 0 %}
nArchInvocationLabels = nSeL4ArchInvocationLabels
{%- else %}
nArchInvocationLabels
{%- endif %}
};
"""
def parse_args():
parser = argparse.ArgumentParser(description='Generate seL4 invocation API \
constants and header files')
parser.add_argument('--xml', type=argparse.FileType('r'),
help='Name of xml file with invocation definitions', required=True)
parser.add_argument('--dest', type=argparse.FileType('w'),
help='Name of file to create', required=True)
parser.add_argument('--libsel4', action='store_true',
help='Is this being generated for libsel4?')
group = parser.add_mutually_exclusive_group()
group.add_argument('--arch', action='store_true',
help='Is this being generated for the arch layer?')
group.add_argument('--sel4_arch', action='store_true',
help='Is this being generated for the seL4 arch layer?')
return parser.parse_args()
def METHOD_NAME(xml_file):
try:
doc = xml.dom.minidom.parse(xml_file)
except:
print("Error: invalid xml file", file=sys.stderr)
sys.exit(-1)
invocation_labels = []
for method in doc.getElementsByTagName("method"):
invocation_labels.append((str(method.getAttribute("id")),
str(condition_to_cpp(method.getElementsByTagName("condition")))))
return invocation_labels
def generate(args, invocations):
header_title = "API"
if args.libsel4:
header_title = "LIBSEL4"
if args.arch:
template = Environment(loader=BaseLoader).from_string(ARCH_INVOCATION_TEMPLATE)
elif args.sel4_arch:
template = Environment(loader=BaseLoader).from_string(SEL4_ARCH_INVOCATION_TEMPLATE)
else:
template = Environment(loader=BaseLoader).from_string(INVOCATION_TEMPLATE)
data = template.render({'header_title': header_title, 'libsel4': args.libsel4,
'invocations': invocations, 'num_invocations': len(invocations)})
args.dest.write(data)
args.dest.close()
if __name__ == "__main__":
args = parse_args()
invocations = METHOD_NAME(args.xml)
args.xml.close()
generate(args, invocations) |
1,468 | test vcs url for stable version | from django.test import TestCase
from django.test.utils import override_settings
from django_dynamic_fixture import get
from readthedocs.builds.constants import BRANCH, EXTERNAL, LATEST, TAG
from readthedocs.builds.models import Version
from readthedocs.projects.models import Project
class VersionMixin:
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username="eric", password="test")
self.pip = Project.objects.get(slug="pip")
# Create a External Version. ie: pull/merge request Version.
self.external_version = get(
Version,
identifier="9F86D081884C7D659A2FEAA0C55AD015A",
verbose_name="9999",
slug="pr-9999",
project=self.pip,
active=True,
type=EXTERNAL,
)
self.branch_version = get(
Version,
identifier="origin/stable",
verbose_name="stable",
slug="stable",
project=self.pip,
active=True,
type=BRANCH,
)
self.tag_version = get(
Version,
identifier="origin/master",
verbose_name="latest",
slug="latest",
project=self.pip,
active=True,
type=TAG,
)
self.subproject = get(Project, slug="subproject", language="en")
self.translation_subproject = get(
Project,
language="es",
slug="translation-subproject",
main_language_project=self.subproject,
)
self.pip.add_subproject(self.subproject)
class TestVersionModel(VersionMixin, TestCase):
def test_vcs_url_for_external_version_github(self):
self.pip.repo = "https://github.com/pypa/pip"
self.pip.save()
expected_url = (
f"https://github.com/pypa/pip/pull/{self.external_version.verbose_name}"
)
self.assertEqual(self.external_version.vcs_url, expected_url)
def test_vcs_url_for_external_version_gitlab(self):
self.pip.repo = "https://gitlab.com/pypa/pip"
self.pip.save()
expected_url = f"https://gitlab.com/pypa/pip/merge_requests/{self.external_version.verbose_name}"
self.assertEqual(self.external_version.vcs_url, expected_url)
def test_vcs_url_for_latest_version(self):
slug = self.pip.default_branch or self.pip.vcs_class().fallback_branch
expected_url = f"https://github.com/pypa/pip/tree/{slug}/"
self.assertEqual(self.tag_version.vcs_url, expected_url)
def METHOD_NAME(self):
expected_url = f"https://github.com/pypa/pip/tree/{self.branch_version.ref}/"
self.assertEqual(self.branch_version.vcs_url, expected_url)
def test_commit_name_for_stable_version(self):
self.assertEqual(self.branch_version.commit_name, "stable")
def test_commit_name_for_latest_version(self):
self.assertEqual(self.tag_version.commit_name, "master")
def test_commit_name_for_external_version(self):
self.assertEqual(
self.external_version.commit_name, self.external_version.identifier
)
@override_settings(
PRODUCTION_DOMAIN="readthedocs.org",
PUBLIC_DOMAIN="readthedocs.io",
USE_SUBDOMAIN=True,
)
def test_get_downloads(self):
version = self.branch_version
self.assertDictEqual(version.get_downloads(), {})
version.has_pdf = True
version.has_epub = True
version.save()
expected = {
"epub": "//pip.readthedocs.io/_/downloads/en/stable/epub/",
"pdf": "//pip.readthedocs.io/_/downloads/en/stable/pdf/",
}
self.assertDictEqual(version.get_downloads(), expected)
@override_settings(
PRODUCTION_DOMAIN="readthedocs.org",
PUBLIC_DOMAIN="readthedocs.io",
USE_SUBDOMAIN=True,
)
def test_get_downloads_subproject(self):
version = self.subproject.versions.get(slug=LATEST)
self.assertDictEqual(version.get_downloads(), {})
version.has_pdf = True
version.has_epub = True
version.save()
expected = {
"epub": "//pip.readthedocs.io/_/downloads/subproject/en/latest/epub/",
"pdf": "//pip.readthedocs.io/_/downloads/subproject/en/latest/pdf/",
}
self.assertDictEqual(version.get_downloads(), expected)
@override_settings(
PRODUCTION_DOMAIN="readthedocs.org",
PUBLIC_DOMAIN="readthedocs.io",
USE_SUBDOMAIN=True,
)
def test_get_downloads_translation_subproject(self):
version = self.translation_subproject.versions.get(slug=LATEST)
self.assertDictEqual(version.get_downloads(), {})
version.has_pdf = True
version.has_epub = True
version.save()
expected = {
"epub": "//pip.readthedocs.io/_/downloads/subproject/es/latest/epub/",
"pdf": "//pip.readthedocs.io/_/downloads/subproject/es/latest/pdf/",
}
self.assertDictEqual(version.get_downloads(), expected) |
1,469 | test uncond acquire return val | """Generic thread tests.
Meant to be used by dummy_thread and thread. To allow for different modules
to be used, test_main() can be called with the module to use as the thread
implementation as its sole argument.
"""
import dummy_thread as _thread
import time
import Queue
import random
import unittest
from test import test_support
DELAY = 0 # Set > 0 when testing a module other than dummy_thread, such as
# the 'thread' module.
class LockTests(unittest.TestCase):
"""Test lock objects."""
def setUp(self):
# Create a lock
self.lock = _thread.allocate_lock()
def test_initlock(self):
#Make sure locks start locked
self.assertTrue(not self.lock.locked(),
"Lock object is not initialized unlocked.")
def test_release(self):
# Test self.lock.release()
self.lock.acquire()
self.lock.release()
self.assertTrue(not self.lock.locked(),
"Lock object did not release properly.")
def test_improper_release(self):
#Make sure release of an unlocked thread raises _thread.error
self.assertRaises(_thread.error, self.lock.release)
def test_cond_acquire_success(self):
#Make sure the conditional acquiring of the lock works.
self.assertTrue(self.lock.acquire(0),
"Conditional acquiring of the lock failed.")
def test_cond_acquire_fail(self):
#Test acquiring locked lock returns False
self.lock.acquire(0)
self.assertTrue(not self.lock.acquire(0),
"Conditional acquiring of a locked lock incorrectly "
"succeeded.")
def test_uncond_acquire_success(self):
#Make sure unconditional acquiring of a lock works.
self.lock.acquire()
self.assertTrue(self.lock.locked(),
"Uncondional locking failed.")
def METHOD_NAME(self):
#Make sure that an unconditional locking returns True.
self.assertTrue(self.lock.acquire(1) is True,
"Unconditional locking did not return True.")
self.assertTrue(self.lock.acquire() is True)
def test_uncond_acquire_blocking(self):
#Make sure that unconditional acquiring of a locked lock blocks.
def delay_unlock(to_unlock, delay):
"""Hold on to lock for a set amount of time before unlocking."""
time.sleep(delay)
to_unlock.release()
self.lock.acquire()
start_time = int(time.time())
_thread.start_new_thread(delay_unlock,(self.lock, DELAY))
if test_support.verbose:
print
print "*** Waiting for thread to release the lock "\
"(approx. %s sec.) ***" % DELAY
self.lock.acquire()
end_time = int(time.time())
if test_support.verbose:
print "done"
self.assertTrue((end_time - start_time) >= DELAY,
"Blocking by unconditional acquiring failed.")
class MiscTests(unittest.TestCase):
"""Miscellaneous tests."""
def test_exit(self):
#Make sure _thread.exit() raises SystemExit
self.assertRaises(SystemExit, _thread.exit)
def test_ident(self):
#Test sanity of _thread.get_ident()
self.assertIsInstance(_thread.get_ident(), int,
"_thread.get_ident() returned a non-integer")
self.assertTrue(_thread.get_ident() != 0,
"_thread.get_ident() returned 0")
def test_LockType(self):
#Make sure _thread.LockType is the same type as _thread.allocate_locke()
self.assertIsInstance(_thread.allocate_lock(), _thread.LockType,
"_thread.LockType is not an instance of what "
"is returned by _thread.allocate_lock()")
def test_interrupt_main(self):
#Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
self.assertRaises(KeyboardInterrupt, _thread.start_new_thread,
call_interrupt, tuple())
def test_interrupt_in_main(self):
# Make sure that if interrupt_main is called in main threat that
# KeyboardInterrupt is raised instantly.
self.assertRaises(KeyboardInterrupt, _thread.interrupt_main)
class ThreadTests(unittest.TestCase):
"""Test thread creation."""
def test_arg_passing(self):
#Make sure that parameter passing works.
def arg_tester(queue, arg1=False, arg2=False):
"""Use to test _thread.start_new_thread() passes args properly."""
queue.put((arg1, arg2))
testing_queue = Queue.Queue(1)
_thread.start_new_thread(arg_tester, (testing_queue, True, True))
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using tuple failed")
_thread.start_new_thread(arg_tester, tuple(), {'queue':testing_queue,
'arg1':True, 'arg2':True})
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using kwargs failed")
_thread.start_new_thread(arg_tester, (testing_queue, True), {'arg2':True})
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using both tuple"
" and kwargs failed")
def test_multi_creation(self):
#Make sure multiple threads can be created.
def queue_mark(queue, delay):
"""Wait for ``delay`` seconds and then put something into ``queue``"""
time.sleep(delay)
queue.put(_thread.get_ident())
thread_count = 5
testing_queue = Queue.Queue(thread_count)
if test_support.verbose:
print
print "*** Testing multiple thread creation "\
"(will take approx. %s to %s sec.) ***" % (DELAY, thread_count)
for count in xrange(thread_count):
if DELAY:
local_delay = round(random.random(), 1)
else:
local_delay = 0
_thread.start_new_thread(queue_mark,
(testing_queue, local_delay))
time.sleep(DELAY)
if test_support.verbose:
print 'done'
self.assertTrue(testing_queue.qsize() == thread_count,
"Not all %s threads executed properly after %s sec." %
(thread_count, DELAY))
def test_main(imported_module=None):
global _thread, DELAY
if imported_module:
_thread = imported_module
DELAY = 2
if test_support.verbose:
print
print "*** Using %s as _thread module ***" % _thread
test_support.run_unittest(LockTests, MiscTests, ThreadTests)
if __name__ == '__main__':
test_main() |
1,470 | test nested conditions | from homeassistant.components.light import ATTR_BRIGHTNESS
from homeassistant.const import (
CONF_CONDITION,
CONF_ENTITY_ID,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import HomeAssistant
from custom_components.powercalc.const import (
CONF_COMPOSITE,
CONF_FIXED,
CONF_LINEAR,
CONF_MAX_POWER,
CONF_MIN_POWER,
CONF_POWER,
)
from tests.common import (
run_powercalc_setup,
)
async def test_composite(hass: HomeAssistant) -> None:
sensor_config = {
CONF_ENTITY_ID: "light.test",
CONF_COMPOSITE: [
{
CONF_CONDITION: {
"condition": "numeric_state",
"entity_id": "sensor.temperature",
"above": 17,
"below": 25,
},
CONF_FIXED: {
CONF_POWER: 50,
},
},
{
CONF_CONDITION: {
"condition": "state",
"entity_id": "light.test",
"state": "on",
},
CONF_LINEAR: {
CONF_MIN_POWER: 10,
CONF_MAX_POWER: 20,
},
},
],
}
await run_powercalc_setup(hass, sensor_config, {})
hass.states.async_set("sensor.temperature", "12")
await hass.async_block_till_done()
hass.states.async_set("light.test", STATE_ON, {ATTR_BRIGHTNESS: 200})
await hass.async_block_till_done()
assert hass.states.get("sensor.test_power").state == "17.84"
async def test_template_condition(hass: HomeAssistant) -> None:
sensor_config = {
CONF_ENTITY_ID: "light.test",
CONF_COMPOSITE: [
{
CONF_CONDITION: {
"condition": "template",
"value_template": "{{ (state_attr('device_tracker.iphone', 'battery_level')|int) > 50 }}",
},
CONF_FIXED: {
CONF_POWER: 10,
},
},
{
CONF_FIXED: {
CONF_POWER: 20,
},
},
],
}
await run_powercalc_setup(hass, sensor_config, {})
hass.states.async_set("device_tracker.iphone", STATE_ON, {"battery_level": "60"})
await hass.async_block_till_done()
hass.states.async_set("light.test", STATE_ON)
await hass.async_block_till_done()
assert hass.states.get("sensor.test_power").state == "10.00"
hass.states.async_set("device_tracker.iphone", STATE_ON, {"battery_level": "40"})
await hass.async_block_till_done()
assert hass.states.get("sensor.test_power").state == "20.00"
async def test_power_sensor_unavailable_when_no_condition_matches(
hass: HomeAssistant,
) -> None:
sensor_config = {
CONF_ENTITY_ID: "light.test",
CONF_COMPOSITE: [
{
CONF_CONDITION: {
"condition": "state",
"entity_id": "light.test",
"state": STATE_OFF,
},
CONF_FIXED: {
CONF_POWER: 10,
},
},
],
}
await run_powercalc_setup(hass, sensor_config, {})
hass.states.async_set("light.test", STATE_ON)
await hass.async_block_till_done()
assert hass.states.get("sensor.test_power").state == STATE_UNAVAILABLE
async def METHOD_NAME(hass: HomeAssistant) -> None:
sensor_config = {
CONF_ENTITY_ID: "light.test",
CONF_COMPOSITE: [
{
CONF_CONDITION: {
"condition": "and",
"conditions": [
{
"condition": "state",
"entity_id": "binary_sensor.test1",
"state": STATE_OFF,
},
{
"condition": "or",
"conditions": [
{
"condition": "state",
"entity_id": "binary_sensor.test2",
"state": STATE_ON,
},
{
"condition": "template",
"value_template": "{{ is_state('binary_sensor.test3', 'on') }}",
},
],
},
],
},
CONF_FIXED: {
CONF_POWER: 10,
},
},
],
}
await run_powercalc_setup(hass, sensor_config, {})
hass.states.async_set("light.test", STATE_ON)
hass.states.async_set("binary_sensor.test1", STATE_OFF)
hass.states.async_set("binary_sensor.test2", STATE_ON)
hass.states.async_set("binary_sensor.test3", STATE_OFF)
await hass.async_block_till_done()
assert hass.states.get("sensor.test_power").state == "10.00"
hass.states.async_set("binary_sensor.test1", STATE_OFF)
hass.states.async_set("binary_sensor.test2", STATE_OFF)
hass.states.async_set("binary_sensor.test3", STATE_ON)
await hass.async_block_till_done()
assert hass.states.get("sensor.test_power").state == "10.00"
hass.states.async_set("binary_sensor.test1", STATE_ON)
hass.states.async_set("binary_sensor.test2", STATE_OFF)
hass.states.async_set("binary_sensor.test3", STATE_ON)
await hass.async_block_till_done()
assert hass.states.get("sensor.test_power").state == STATE_UNAVAILABLE |
1,471 | test genetic modification upgrade 8 9 | import pytest
def test_genetic_modification_upgrade_1_2(upgrader, genetic_modification_1):
value = upgrader.upgrade('genetic_modification', genetic_modification_1,
current_version='1', target_version='2')
assert value['schema_version'] == '2'
assert value.get('modification_description') == 'some description'
def test_genetic_modification_upgrade_2_3(upgrader, genetic_modification_2):
value = upgrader.upgrade('genetic_modification', genetic_modification_2,
current_version='2', target_version='3')
assert value['schema_version'] == '3'
assert value.get('description') == 'some description'
assert value.get('zygosity') == 'homozygous'
assert value.get('purpose') == 'tagging'
assert 'modification_genome_coordinates' not in value
assert 'modification_treatments' not in value
'''
Commented this test out because the linked technique objects are not embedded for the upgrade
but are for the test so it fails when it's trying to resolve the linked object by UUID. In
the former case, it's a link, in the latter case it's the embedded object. I can make the test
work but then the upgrade doesn't do what it should do.
def test_genetic_modification_upgrade_5_6(upgrader, genetic_modification_5, crispr, registry):
value = upgrader.upgrade('genetic_modification', genetic_modification_5, registry=registry,
current_version='5', target_version='6')
assert value['schema_version'] == '6'
assert 'modification_techniques' not in value
assert value['method'] == 'CRISPR'
assert 'modified_site' not in value
assert 'target' not in value
assert 'purpose' in value
assert value['purpose'] == 'analysis'
assert len(value['guide_rna_sequences']) == 2
assert value['aliases'][0] == 'encode:crispr_technique1-CRISPR'
assert value['introduced_sequence'] == 'TCGA'
assert 'reagents' in value
assert value['reagents'][0]['source'] == 'sigma'
assert value['reagents'][0]['identifier'] == '12345'
'''
def test_genetic_modification_upgrade_6_7(upgrader, genetic_modification_6):
value = upgrader.upgrade('genetic_modification', genetic_modification_6,
current_version='6', target_version='7')
assert value['schema_version'] == '7'
assert value.get('purpose') == 'characterization'
"""
Like test_upgrade_5_6, this test is commented out because get_by_uuid method
is used in the upgrade, which doesn't work for the test app.
def test_genetic_modification_upgrade_7_8(upgrader, genetic_modification_7_invalid_reagent,
genetic_modification_7_valid_reagent,
genetic_modification_7_multiple_matched_identifiers,
genetic_modification_7_multiple_reagents):
value = upgrader.upgrade('genetic_modification', genetic_modification_7_invalid_reagent,
current_version='7', target_version='8')
assert value['schema_version'] == '8'
assert not value.get('reagents')
assert value.get('notes')
value = upgrader.upgrade('genetic_modification', genetic_modification_7_valid_reagent,
current_version='7', target_version='8')
assert value['schema_version'] == '8'
assert value.get('reagents')
assert not value.get('notes')
value = upgrader.upgrade('genetic_modification', genetic_modification_7_multiple_matched_identifiers,
current_version='7', target_version='8')
assert value['schema_version'] == '8'
reagents = value.get('reagents', [])
assert len(reagents) == 1
assert reagents[0]['identifier'].startswith('addgene')
assert 'addgene' in reagents[0]['source']
value = upgrader.upgrade('genetic_modification', genetic_modification_7_multiple_reagents,
current_version='7', target_version='8')
assert value['schema_version'] == '8'
reagents = value.get('reagents', [])
assert len(reagents) == 2
for reagent in reagents:
assert reagent['identifier'].startswith('addgene')
assert 'addgene' in reagent['source']
assert 'url' in reagent
"""
def METHOD_NAME(upgrader, genetic_modification_8):
value = upgrader.upgrade('genetic_modification', genetic_modification_8,
current_version='8', target_version='9')
assert value['schema_version'] == '9'
assert value.get('purpose') == 'characterization'
def test_genetic_modification_upgrade_9_10(upgrader, genetic_modification_9, human_donor_1):
value = upgrader.upgrade('genetic_modification', genetic_modification_9,
current_version='9', target_version='10')
assert value['nucleic_acid_delivery_method'] == ['transient transfection']
assert 'method' not in value
assert value['introduced_elements_donor'] == human_donor_1['@id']
assert 'donor' not in value
def test_genetic_modification_upgrade_10_11(upgrader, genetic_modification_10):
value = upgrader.upgrade('genetic_modification', genetic_modification_10,
current_version='10', target_version='11')
assert value['schema_version'] == '11'
assert value.get('guide_type') == 'sgRNA'
assert value['notes'] == 'guide_type on this GM was defaulted to sgRNA in an upgrade.'
def test_genetic_modification_upgrade_11_12(
upgrader,
genetic_modification_11,
binding_genetic_modification_2,
transgene_insertion_2,
tale_replacement,
activation_genetic_modification_2,
crispr_deletion,
crispri
):
value = upgrader.upgrade('genetic_modification', genetic_modification_11,
current_version='11', target_version='12')
assert value['schema_version'] == '12'
assert value.get('category') == 'CRISPR cutting'
value = upgrader.upgrade('genetic_modification', binding_genetic_modification_2,
current_version='11', target_version='12')
assert value['schema_version'] == '12'
assert value.get('category') == 'CRISPR dCas'
value = upgrader.upgrade('genetic_modification', transgene_insertion_2,
current_version='11', target_version='12')
assert value['schema_version'] == '12'
assert value.get('category') == 'insertion'
value = upgrader.upgrade('genetic_modification', tale_replacement,
current_version='11', target_version='12')
assert value['schema_version'] == '12'
assert value.get('category') == 'mutagenesis'
value = upgrader.upgrade('genetic_modification', activation_genetic_modification_2,
current_version='11', target_version='12')
assert value['schema_version'] == '12'
assert value.get('category') == 'CRISPRa'
crispr_deletion['purpose'] = 'characterization'
value = upgrader.upgrade('genetic_modification', crispr_deletion,
current_version='11', target_version='12')
assert value['schema_version'] == '12'
assert value.get('category') == 'CRISPR cutting'
crispri['purpose'] = 'characterization'
value = upgrader.upgrade('genetic_modification', crispri,
current_version='11', target_version='12')
assert value['schema_version'] == '12'
assert value.get('category') == 'CRISPRi' |
1,472 | test configure | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
import dnf.cli.commands.group as group
import dnf.comps
import dnf.exceptions
from dnf.comps import CompsQuery
from dnf.cli.option_parser import OptionParser
import tests.support
class GroupCommandStaticTest(tests.support.TestCase):
def test_canonical(self):
cmd = group.GroupCommand(tests.support.mock.MagicMock())
for args, out in [
(['grouplist', 'crack'], ['list', 'crack']),
(['groups'], ['summary']),
(['group', 'info', 'crack'], ['info', 'crack']),
(['group', 'update', 'crack'], ['upgrade', 'crack'])]:
parser = OptionParser()
parser.parse_main_args(args)
parser.parse_command_args(cmd, args)
cmd._canonical()
self.assertEqual(cmd.opts.subcmd, out[0])
self.assertEqual(cmd.opts.args, out[1:])
def test_split_extcmds(self):
cmd = group.GroupCommand(tests.support.mock.MagicMock())
cmd.base.conf = dnf.conf.Conf()
tests.support.command_run(cmd, ['install', 'crack'])
cmd.base.env_group_install.assert_called_with(
['crack'], ('mandatory', 'default', 'conditional'),
cmd.base.conf.strict)
class GroupCommandTest(tests.support.DnfBaseTestCase):
REPOS = ["main"]
COMPS = True
INIT_SACK = True
def setUp(self):
super(GroupCommandTest, self).setUp()
self.cmd = group.GroupCommand(self.base.mock_cli())
self.parser = OptionParser()
def test_environment_list(self):
env_inst, env_avail = self.cmd._environment_lists(['sugar*'])
self.assertLength(env_inst, 0)
self.assertLength(env_avail, 1)
self.assertEqual(env_avail[0].name, 'Sugar Desktop Environment')
def METHOD_NAME(self):
tests.support.command_configure(self.cmd, ['remove', 'crack'])
demands = self.cmd.cli.demands
self.assertTrue(demands.allow_erasing)
self.assertFalse(demands.freshest_metadata)
class CompsQueryTest(tests.support.DnfBaseTestCase):
REPOS = []
COMPS = True
def test_all(self):
status_all = CompsQuery.AVAILABLE | CompsQuery.INSTALLED
kinds_all = CompsQuery.ENVIRONMENTS | CompsQuery.GROUPS
q = CompsQuery(self.comps, self.history, kinds_all, status_all)
res = q.get('sugar*', '*er*')
self.assertCountEqual(res.environments,
('sugar-desktop-environment',))
self.assertCountEqual(res.groups, ("Peppers", 'somerset'))
def test_err(self):
q = CompsQuery(self.comps, self.history, CompsQuery.ENVIRONMENTS,
CompsQuery.AVAILABLE)
with self.assertRaises(dnf.exceptions.CompsError):
q.get('*er*')
def test_installed(self):
q = CompsQuery(self.comps, self.history, CompsQuery.GROUPS,
CompsQuery.INSTALLED)
self.base.read_mock_comps(False)
grp = self.base.comps.group_by_pattern('somerset')
self.base.group_install(grp.id, ('mandatory',))
self._swdb_commit()
res = q.get('somerset')
self.assertEmpty(res.environments)
grp_ids = list(res.groups)
self.assertCountEqual(grp_ids, ('somerset',)) |
1,473 | choices | import enum
from django.contrib.postgres.fields import ArrayField
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
from django.db.models import F
from django.urls import reverse
from zentral.contrib.inventory.models import BaseEnrollment
# configuration
class PrincipalUserDetectionSource(enum.Enum):
company_portal = "Company portal"
google_chrome = "Google Chrome"
logged_in_user = "Logged-in user"
@classmethod
def METHOD_NAME(cls):
return tuple((i.name, i.value) for i in cls)
@classmethod
def accepted_sources(cls):
return set(i.name for i in cls)
class Configuration(models.Model):
name = models.CharField(max_length=255, unique=True)
description = models.TextField(blank=True)
inventory_apps_full_info_shard = models.IntegerField(
validators=[MinValueValidator(0), MaxValueValidator(100)],
default=100
)
principal_user_detection_sources = ArrayField(
models.CharField(max_length=64, METHOD_NAME=PrincipalUserDetectionSource.METHOD_NAME()),
blank=True,
default=list,
)
principal_user_detection_domains = ArrayField(
models.CharField(max_length=255),
blank=True,
default=list
)
collected_condition_keys = ArrayField(
models.CharField(max_length=128),
blank=True,
default=list,
help_text="List of Munki condition keys to collect as machine extra facts"
)
managed_installs_sync_interval_days = models.IntegerField(
"Managed installs sync interval in days",
validators=[MinValueValidator(1), MaxValueValidator(90)],
default=7
)
auto_reinstall_incidents = models.BooleanField(
"Auto reinstall incidents",
default=False,
help_text="Enable automatic package reinstall incidents"
)
auto_failed_install_incidents = models.BooleanField(
"Auto failed install incidents",
default=False,
help_text="Enable automatic package failed install incidents"
)
version = models.PositiveIntegerField(editable=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("munki:configuration", args=(self.pk,))
def save(self, *args, **kwargs):
if not self.id:
self.version = 0
else:
self.version = F("version") + 1
super().save(*args, **kwargs)
# enrollment
class Enrollment(BaseEnrollment):
configuration = models.ForeignKey(Configuration, on_delete=models.CASCADE)
def get_absolute_url(self):
return "{}#enrollment-{}".format(self.configuration.get_absolute_url(), self.pk)
def get_description_for_distributor(self):
return "Zentral pre/postflight"
class EnrolledMachine(models.Model):
enrollment = models.ForeignKey(Enrollment, on_delete=models.CASCADE)
serial_number = models.TextField(db_index=True)
token = models.CharField(max_length=64, unique=True)
created_at = models.DateTimeField(auto_now_add=True)
# munki state
class MunkiState(models.Model):
machine_serial_number = models.TextField(unique=True)
munki_version = models.CharField(max_length=32, blank=True, null=True)
user_agent = models.CharField(max_length=64)
ip = models.GenericIPAddressField(blank=True, null=True)
sha1sum = models.CharField(max_length=40, blank=True, null=True)
last_managed_installs_sync = models.DateTimeField(blank=True, null=True)
run_type = models.CharField(max_length=64, blank=True, null=True)
start_time = models.DateTimeField(blank=True, null=True)
end_time = models.DateTimeField(blank=True, null=True)
last_seen = models.DateTimeField(auto_now=True)
# managed install
class ManagedInstall(models.Model):
machine_serial_number = models.TextField(db_index=True)
name = models.TextField(db_index=True)
display_name = models.TextField()
installed_version = models.TextField(null=True)
installed_at = models.DateTimeField(null=True)
reinstall = models.BooleanField(default=False)
failed_version = models.TextField(null=True)
failed_at = models.DateTimeField(null=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
unique_together = (("machine_serial_number", "name"),) |
1,474 | convert to igrf dipole coefficients | #
# ISC License
#
# Copyright (c) 2016, Autonomous Vehicle Systems Lab, University of Colorado at Boulder
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import numpy as np
from Basilisk.utilities import macros
def exponentialAtmosphere(atmosModule, name):
"""
Sets the exponential atmosphere model parameters for a particular planet.
:param atmosModule: atmospheric environment module
:param name: planet name string
"""
if name == "earth":
atmosModule.planetRadius = 6378136.6 # meters
atmosModule.baseDensity = 1.217 # kg/m^3
atmosModule.scaleHeight = 8500.0 # meters
atmosModule.localTemp = 293.0
else:
print("ERROR: " + name + " not setup for exponential atmosphere model\n")
return
def centeredDipoleMagField(magFieldModule, name):
"""
Sets the centered dipole magnetic field model parameters for a particular planet
:param magFieldModule: magnetic field environment module
:param name: planet name string
"""
if name == "earth":
# The following parameters are from the 2020 IGRF model
# (https://www.ngdc.noaa.gov/IAGA/vmod/igrf.html)
magFieldModule.g10 = -30926.00/1e9 # Tesla
magFieldModule.g11 = -2318.00/1e9 # Tesla
magFieldModule.h11 = 5817.00/1e9 # Tesla
magFieldModule.planetRadius = 6371.2*1000 # meters
elif name == "mercury":
# The following parameters are from NASA planetary fact sheet
# (https://nssdc.gsfc.nasa.gov/planetary/planetfact.html)
magFieldModule.planetRadius = 2440.0*1000 # meters
METHOD_NAME(0.002/10000, # [T] dipole strength
0.0*macros.D2R, # [rad] Dipole tilt to rotational axis
0.0*macros.D2R, # [rad] Longitude of tilt
magFieldModule)
elif name == "jupiter":
# The following parameters are from NASA planetary fact sheet
# (https://nssdc.gsfc.nasa.gov/planetary/planetfact.html)
magFieldModule.planetRadius = 71398.0*1000 # meters
METHOD_NAME(4.30/10000, # [T] dipole strength
9.4*macros.D2R, # [rad] Dipole tilt to rotational axis
200.1*macros.D2R, # [rad] Longitude of tilt
magFieldModule)
elif name == "saturn":
# The following parameters are from NASA planetary fact sheet
# (https://nssdc.gsfc.nasa.gov/planetary/planetfact.html)
magFieldModule.planetRadius = 60330.0*1000 # meters
METHOD_NAME(0.215/10000, # [T] dipole strength
0.0*macros.D2R, # [rad] Dipole tilt to rotational axis
0.0*macros.D2R, # [rad] Longitude of tilt
magFieldModule)
elif name == "uranus":
# The following parameters are from NASA planetary fact sheet
# (https://nssdc.gsfc.nasa.gov/planetary/planetfact.html)
magFieldModule.planetRadius = 25600.0*1000 # meters
METHOD_NAME(0.228/10000, # [T] dipole strength
58.6*macros.D2R, # [rad] Dipole tilt to rotational axis
53.6*macros.D2R, # [rad] Longitude of tilt
magFieldModule)
elif name == "neptune":
# The following parameters are from NASA planetary fact sheet
# (https://nssdc.gsfc.nasa.gov/planetary/planetfact.html)
magFieldModule.planetRadius = 24765.0*1000 # meters
METHOD_NAME(0.142/10000, # [T] dipole strength
46.9*macros.D2R, # [rad] Dipole tilt to rotational axis
288.*macros.D2R, # [rad] Longitude of tilt
magFieldModule)
else:
print("ERROR: " + name + " not setup for centered dipole magnetic field model. Options include mercury, earth, jupiter, saturn, uranus and neptune. \n")
return
def METHOD_NAME(nominalField, tilt, longitudeOfTilt, magFieldModule):
"""
Converts the NASA Magnetosphere parameters from https://nssdc.gsfc.nasa.gov/planetary/planetfact.html
to IGRF compatible dipole coefficients.
:param nominalField: nominal magnetic field parameter given in Tesla
:param tilt: Dipole tilt to rotational axis in radians
:param longitudeOfTilt: Longitude of tilt in radians
:param magFieldModule: magnetic field environment module
"""
# the following conversion is taken from Appendix D of doi:10.1007/978-1-4939-0802-8
theta_m = np.pi - tilt
alpha_m = np.pi - longitudeOfTilt
magFieldModule.g11 = nominalField*np.sin(theta_m)*np.cos(alpha_m)
magFieldModule.h11 = nominalField*np.sin(theta_m)*np.sin(alpha_m)
magFieldModule.g10 = nominalField*np.cos(theta_m)
return
|
1,475 | last modified by | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'AmountResponse',
'SystemDataResponse',
]
@pulumi.output_type
class AmountResponse(dict):
"""
The amount.
"""
def __init__(__self__, *,
currency: Optional[str] = None,
value: Optional[float] = None):
"""
The amount.
:param str currency: The type of currency being used for the value.
:param float value: Amount value.
"""
if currency is not None:
pulumi.set(__self__, "currency", currency)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def currency(self) -> Optional[str]:
"""
The type of currency being used for the value.
"""
return pulumi.get(self, "currency")
@property
@pulumi.getter
def value(self) -> Optional[float]:
"""
Amount value.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class SystemDataResponse(dict):
"""
Metadata pertaining to creation and last modification of the resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdAt":
suggest = "created_at"
elif key == "createdBy":
suggest = "created_by"
elif key == "createdByType":
suggest = "created_by_type"
elif key == "lastModifiedAt":
suggest = "last_modified_at"
elif key == "lastModifiedBy":
suggest = "last_modified_by"
elif key == "lastModifiedByType":
suggest = "last_modified_by_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SystemDataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SystemDataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SystemDataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_at: Optional[str] = None,
created_by: Optional[str] = None,
created_by_type: Optional[str] = None,
last_modified_at: Optional[str] = None,
METHOD_NAME: Optional[str] = None,
last_modified_by_type: Optional[str] = None):
"""
Metadata pertaining to creation and last modification of the resource.
:param str created_at: The timestamp of resource creation (UTC).
:param str created_by: The identity that created the resource.
:param str created_by_type: The type of identity that created the resource.
:param str last_modified_at: The timestamp of resource last modification (UTC)
:param str last_modified_by: The identity that last modified the resource.
:param str last_modified_by_type: The type of identity that last modified the resource.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if created_by_type is not None:
pulumi.set(__self__, "created_by_type", created_by_type)
if last_modified_at is not None:
pulumi.set(__self__, "last_modified_at", last_modified_at)
if METHOD_NAME is not None:
pulumi.set(__self__, "last_modified_by", METHOD_NAME)
if last_modified_by_type is not None:
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The timestamp of resource creation (UTC).
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
"""
The identity that created the resource.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
"""
The type of identity that created the resource.
"""
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def METHOD_NAME(self) -> Optional[str]:
"""
The identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
"""
The type of identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by_type")
|
1,476 | on reset | #
# Copyright (C) 2016 TUDelft
#
# This file is part of paparazzi.
#
# paparazzi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with paparazzi. If not, see <http://www.gnu.org/licenses/>.
#
# This is not the main script. Run dist.py to have a distance counter.
import wx
import sys
import os
import threading
import socket
import array
from io import StringIO
import wx
import array
from PIL import Image
import math
PPRZ_SRC = os.getenv("PAPARAZZI_SRC", os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../..')))
sys.path.append(PPRZ_SRC + "/sw/ext/pprzlink/lib/v1.0/python")
from pprzlink.ivy import IvyMessagesInterface
WIDTH = 300
class DistanceCounterFrame(wx.Frame):
def message_recv(self, ac_id, msg):
if msg.name == "INS":
self.msg_count = self.msg_count + 1
newx = float(msg.get_field(0)) / 256.0
newy = float(msg.get_field(1)) / 256.0
moved = ((newx - self.ins_msg_x) ** 2 + (newy - self.ins_msg_y) ** 2)
if self.init == 0:
self.init = 1
elif self.running:
self.distance = self.distance + math.sqrt(moved)
self.ins_msg_x = newx
self.ins_msg_y = newy
self.ins_msg_z = msg.get_field(2)
# graphical update
wx.CallAfter(self.update)
if msg.name == "ROTORCRAFT_STATUS":
self.msg_count_time = self.msg_count_time + 1
time_new = float(msg['cpu_time'])
if time_new > self.time_old and self.time_old != 0 and self.running:
self.time_elapsed += time_new - self.time_old
self.time_old = time_new
# graphical update
wx.CallAfter(self.update)
def update(self):
self.Refresh()
def OnSize(self, event):
self.w = event.GetSize()[0]
self.h = event.GetSize()[1]
self.Refresh()
def OnPaint(self, e):
# Paint Area
dc = wx.PaintDC(self)
brush = wx.Brush("white")
dc.SetBackground(brush)
dc.Clear()
# Background
dc.SetBrush(wx.Brush(wx.Colour(0,0,0), wx.TRANSPARENT))
font = wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL)
dc.SetFont(font)
dc.DrawText("INS Packets:" + str(self.msg_count),2,2)
dc.DrawText("Data: " + str(self.ins_msg_x) + ", " + str(self.ins_msg_y) + ", " + str(self.ins_msg_z) + ".",2,22)
dc.DrawText("Distance: " + str(round(float(self.distance)/1.0,2)) + " m",2,22+20)
dc.DrawText("Time elapsed: " + str(self.time_elapsed) + "s",2,22+20+20)
if self.running:
dc.DrawText("Counter running", 150, 22+20)
else:
dc.DrawText("Counter paused", 150, 22+20)
def onStartStop(self, event):
self.running = not self.running
self.Refresh()
def METHOD_NAME(self, event):
self.time_old = 0
self.time_elapsed = 0
self.distance = 0
self.init = 0
self.Refresh()
return
def __init__(self, _settings):
# Command line arguments
self.settings = _settings
# Statistics
self.data = { 'packets': 0, 'bytes': 0}
self.w = WIDTH
self.h = WIDTH
# Frame
wx.Frame.__init__(self, id=-1, parent=None, name=u'Distance Counter',
size=wx.Size(self.w, self.h), title=u'Distance Counter')
start_stop_button = wx.Button(self, wx.ID_ANY, 'Start/Pause', (150, 58),size=(90, 25))
start_stop_button.Bind(wx.EVT_BUTTON, self.onStartStop)
reset_button = wx.Button(self, wx.ID_ANY, 'Reset', (245, 58), size=(50, 25))
reset_button.Bind(wx.EVT_BUTTON, self.METHOD_NAME)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_CLOSE, self.OnClose)
# IVY
self.interface = IvyMessagesInterface("DistanceCounter")
self.interface.subscribe(self.message_recv)
self.msg_count = 0
self.msg_count_time = 0
self.distance = 0
self.time_old = 0
self.time_elapsed = 0
self.ins_msg_x = 0
self.ins_msg_y = 0
self.ins_msg_z = 0
self.init = 0
self.running = True
def OnClose(self, event):
self.interface.shutdown()
self.Destroy()
if __name__ == '__main__':
raise Exception('This is not the main script. Please run dist.py instead of distance_counter.py') |
1,477 | test embedding loaded | # Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for text embedding exporting tool."""
import logging
import os
from distutils.version import LooseVersion
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
from examples.text_embeddings import export
_MOCK_EMBEDDING = "\n".join(
["cat 1.11 2.56 3.45", "dog 1 2 3", "mouse 0.5 0.1 0.6"])
class ExportTokenEmbeddingTest(tf.test.TestCase):
def setUp(self):
self._embedding_file_path = os.path.join(self.get_temp_dir(),
"mock_embedding_file.txt")
with tf.gfile.GFile(self._embedding_file_path, mode="w") as f:
f.write(_MOCK_EMBEDDING)
def METHOD_NAME(self):
vocabulary, embeddings = export.load(self._embedding_file_path,
export.parse_line)
self.assertEqual((3,), np.shape(vocabulary))
self.assertEqual((3, 3), np.shape(embeddings))
def testExportTokenEmbeddingModule(self):
export.export_module_from_file(
embedding_file=self._embedding_file_path,
export_path=self.get_temp_dir(),
parse_line_fn=export.parse_line,
num_oov_buckets=1,
preprocess_text=False)
with tf.Graph().as_default():
hub_module = hub.Module(self.get_temp_dir())
tokens = tf.constant(["cat", "lizard", "dog"])
embeddings = hub_module(tokens)
with tf.Session() as session:
session.run(tf.tables_initializer())
session.run(tf.global_variables_initializer())
self.assertAllClose(
session.run(embeddings),
[[1.11, 2.56, 3.45], [0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
def testExportFulltextEmbeddingModule(self):
export.export_module_from_file(
embedding_file=self._embedding_file_path,
export_path=self.get_temp_dir(),
parse_line_fn=export.parse_line,
num_oov_buckets=1,
preprocess_text=True)
with tf.Graph().as_default():
hub_module = hub.Module(self.get_temp_dir())
tokens = tf.constant(["cat", "cat cat", "lizard. dog", "cat? dog", ""])
embeddings = hub_module(tokens)
with tf.Session() as session:
session.run(tf.tables_initializer())
session.run(tf.global_variables_initializer())
self.assertAllClose(
session.run(embeddings),
[[1.11, 2.56, 3.45], [1.57, 3.62, 4.88], [0.70, 1.41, 2.12],
[1.49, 3.22, 4.56], [0.0, 0.0, 0.0]],
rtol=0.02)
def testEmptyInput(self):
export.export_module_from_file(
embedding_file=self._embedding_file_path,
export_path=self.get_temp_dir(),
parse_line_fn=export.parse_line,
num_oov_buckets=1,
preprocess_text=True)
with tf.Graph().as_default():
hub_module = hub.Module(self.get_temp_dir())
tokens = tf.constant(["", "", ""])
embeddings = hub_module(tokens)
with tf.Session() as session:
session.run(tf.tables_initializer())
session.run(tf.global_variables_initializer())
self.assertAllClose(
session.run(embeddings),
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
rtol=0.02)
def testEmptyLeading(self):
export.export_module_from_file(
embedding_file=self._embedding_file_path,
export_path=self.get_temp_dir(),
parse_line_fn=export.parse_line,
num_oov_buckets=1,
preprocess_text=True)
with tf.Graph().as_default():
hub_module = hub.Module(self.get_temp_dir())
tokens = tf.constant(["", "cat dog"])
embeddings = hub_module(tokens)
with tf.Session() as session:
session.run(tf.tables_initializer())
session.run(tf.global_variables_initializer())
self.assertAllClose(
session.run(embeddings),
[[0.0, 0.0, 0.0], [1.49, 3.22, 4.56]],
rtol=0.02)
if __name__ == "__main__":
# This test is only supported in graph mode.
if tf.executing_eagerly():
logging.warning("Skipping running tests for TF Version: %s running eagerly.",
tf.__version__)
else:
tf.test.main() |
1,478 | test default protocol | import unittest
import shelve
import glob
from test import support
from collections.abc import MutableMapping
from test.test_dbm import dbm_iterator
def L1(s):
return s.decode("latin-1")
class byteskeydict(MutableMapping):
"Mapping that supports bytes keys"
def __init__(self):
self.d = {}
def __getitem__(self, key):
return self.d[L1(key)]
def __setitem__(self, key, value):
self.d[L1(key)] = value
def __delitem__(self, key):
del self.d[L1(key)]
def __len__(self):
return len(self.d)
def iterkeys(self):
for k in self.d.keys():
yield k.encode("latin-1")
__iter__ = iterkeys
def keys(self):
return list(self.iterkeys())
def copy(self):
return byteskeydict(self.d)
class TestCase(unittest.TestCase):
fn = "shelftemp.db"
def tearDown(self):
for f in glob.glob(self.fn+"*"):
support.unlink(f)
def test_close(self):
d1 = {}
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
self.assertEqual(len(s), 1)
s.close()
self.assertRaises(ValueError, len, s)
try:
s['key1']
except ValueError:
pass
else:
self.fail('Closed shelf should not find a key')
def test_ascii_file_shelf(self):
s = shelve.open(self.fn, protocol=0)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_binary_file_shelf(self):
s = shelve.open(self.fn, protocol=1)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_proto2_file_shelf(self):
s = shelve.open(self.fn, protocol=2)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_in_memory_shelf(self):
d1 = byteskeydict()
with shelve.Shelf(d1, protocol=0) as s:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
d2 = byteskeydict()
with shelve.Shelf(d2, protocol=1) as s:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
self.assertNotEqual(d1.items(), d2.items())
def test_mutable_entry(self):
d1 = byteskeydict()
with shelve.Shelf(d1, protocol=2, writeback=False) as s:
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4])
d2 = byteskeydict()
with shelve.Shelf(d2, protocol=2, writeback=True) as s:
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4,5])
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
def test_keyencoding(self):
d = {}
key = 'Pöp'
# the default keyencoding is utf-8
shelve.Shelf(d)[key] = [1]
self.assertIn(key.encode('utf-8'), d)
# but a different one can be given
shelve.Shelf(d, keyencoding='latin-1')[key] = [1]
self.assertIn(key.encode('latin-1'), d)
# with all consequences
s = shelve.Shelf(d, keyencoding='ascii')
self.assertRaises(UnicodeEncodeError, s.__setitem__, key, [1])
def test_writeback_also_writes_immediately(self):
# Issue 5754
d = {}
key = 'key'
encodedkey = key.encode('utf-8')
with shelve.Shelf(d, writeback=True) as s:
s[key] = [1]
p1 = d[encodedkey] # Will give a KeyError if backing store not updated
s['key'].append(2)
p2 = d[encodedkey]
self.assertNotEqual(p1, p2) # Write creates new object in store
def test_with(self):
d1 = {}
with shelve.Shelf(d1, protocol=2, writeback=False) as s:
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
self.assertEqual(len(s), 1)
self.assertRaises(ValueError, len, s)
try:
s['key1']
except ValueError:
pass
else:
self.fail('Closed shelf should not find a key')
def METHOD_NAME(self):
with shelve.Shelf({}) as s:
self.assertEqual(s._protocol, 3)
from test import mapping_tests
class TestShelveBase(mapping_tests.BasicTestMappingProtocol):
fn = "shelftemp.db"
counter = 0
def __init__(self, *args, **kw):
self._db = []
mapping_tests.BasicTestMappingProtocol.__init__(self, *args, **kw)
type2test = shelve.Shelf
def _reference(self):
return {"key1":"value1", "key2":2, "key3":(1,2,3)}
def _empty_mapping(self):
if self._in_mem:
x= shelve.Shelf(byteskeydict(), **self._args)
else:
self.counter+=1
x= shelve.open(self.fn+str(self.counter), **self._args)
self._db.append(x)
return x
def tearDown(self):
for db in self._db:
db.close()
self._db = []
if not self._in_mem:
for f in glob.glob(self.fn+"*"):
support.unlink(f)
class TestAsciiFileShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = False
class TestBinaryFileShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = False
class TestProto2FileShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = False
class TestAsciiMemShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = True
class TestBinaryMemShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = True
class TestProto2MemShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = True
def test_main():
for module in dbm_iterator():
support.run_unittest(
TestAsciiFileShelve,
TestBinaryFileShelve,
TestProto2FileShelve,
TestAsciiMemShelve,
TestBinaryMemShelve,
TestProto2MemShelve,
TestCase
)
if __name__ == "__main__":
test_main() |
1,479 | test binary classes | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Matthews Correlation Coefficient Test."""
import tensorflow as tf
import numpy as np
from tensorflow_addons.metrics import MatthewsCorrelationCoefficient
from sklearn.metrics import matthews_corrcoef as sklearn_matthew
def test_config():
# mcc object
mcc1 = MatthewsCorrelationCoefficient(num_classes=1)
assert mcc1.num_classes == 1
assert mcc1.dtype == tf.float32
# check configure
mcc2 = MatthewsCorrelationCoefficient.from_config(mcc1.get_config())
assert mcc2.num_classes == 1
assert mcc2.dtype == tf.float32
def check_results(obj, value):
np.testing.assert_allclose(value, obj.result().numpy(), atol=1e-6)
def METHOD_NAME():
gt_label = tf.constant(
[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0], [1.0, 0.0]], dtype=tf.float32
)
preds = tf.constant(
[[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]], dtype=tf.float32
)
# Initialize
mcc = MatthewsCorrelationCoefficient(2)
# Update
mcc.update_state(gt_label, preds)
# Check results
check_results(mcc, [-0.33333334])
# See issue #2339
def test_multiple_classes():
gt_label = np.array(
[
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
]
)
preds = np.array(
[
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
]
)
tensor_gt_label = tf.constant(gt_label, dtype=tf.float32)
tensor_preds = tf.constant(preds, dtype=tf.float32)
# Initialize
mcc = MatthewsCorrelationCoefficient(3)
# Update
mcc.update_state(tensor_gt_label, tensor_preds)
# Check results by comparing to results of scikit-learn matthew implementation.
sklearn_result = sklearn_matthew(gt_label.argmax(axis=1), preds.argmax(axis=1))
check_results(mcc, sklearn_result)
# Keras model API check
def test_keras_model():
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(64, activation="relu"))
model.add(tf.keras.layers.Dense(64, activation="relu"))
model.add(tf.keras.layers.Dense(1, activation="softmax"))
mcc = MatthewsCorrelationCoefficient(num_classes=1)
model.compile(
optimizer="Adam", loss="binary_crossentropy", metrics=["accuracy", mcc]
)
# data preparation
data = np.random.random((10, 1))
labels = np.random.random((10, 1))
labels = np.where(labels > 0.5, 1.0, 0.0)
model.fit(data, labels, epochs=1, batch_size=32, verbose=0)
def test_reset_state_graph():
gt_label = tf.constant(
[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0], [1.0, 0.0]], dtype=tf.float32
)
preds = tf.constant(
[[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]], dtype=tf.float32
)
mcc = MatthewsCorrelationCoefficient(2)
mcc.update_state(gt_label, preds)
@tf.function
def reset_state():
mcc.reset_state()
reset_state()
# Check results
check_results(mcc, [0]) |
1,480 | delete rules | """iptables helper functions.
Unlike the `firewall` module, these functions know nothing about PaaSTA and
could effectively be a third-party library. They just make working with
iptables a little bit easier.
"""
import collections
import contextlib
import logging
import iptc
log = logging.getLogger(__name__)
RULE_TARGET_SORT_ORDER = {
# all else defaults to '0'
"LOG": 1,
"REJECT": 2.0,
}
_RuleBase = collections.namedtuple(
"_RuleBase", ("protocol", "src", "dst", "target", "matches", "target_parameters")
)
class Rule(_RuleBase):
"""Rule representation.
Working with iptc's rule classes directly doesn't work well, since rules
represent actual existing iptables rules, and changes are applied
immediately. They're also difficult to compare.
"""
def __new__(cls, *args, **kwargs):
result = _RuleBase.__new__(cls, *args, **kwargs)
result.validate()
return result
def _replace(self, **kwargs):
result = super()._replace(**kwargs)
result.validate()
return result
def validate(self):
if self.target == "REJECT":
assert any(
name == "reject-with" for name, _ in self.target_parameters
), "REJECT rules must specify reject-with"
assert tuple(sorted(self.matches)) == self.matches, "matches should be sorted"
for match_name, params in self.matches:
for param_name, param_value in params:
assert (
"_" not in param_name
), f"use dashes instead of underscores in {param_name}"
assert isinstance(
param_value, tuple
), f"value of {param_name} should be tuple"
assert (
tuple(sorted(self.target_parameters)) == self.target_parameters
), "target_parameters should be sorted"
for param_name, param_value in self.target_parameters:
assert (
"_" not in param_name
), f"use dashes instead of underscores in {param_name}"
assert isinstance(
param_value, tuple
), f"value of {param_name} should be tuple"
@classmethod
def from_iptc(cls, rule):
fields = {
"protocol": rule.protocol,
"src": rule.src,
"dst": rule.dst,
"target": rule.target.name,
"matches": (),
"target_parameters": (),
}
for param_name, param_value in sorted(rule.target.get_all_parameters().items()):
fields["target_parameters"] += ((param_name, tuple(param_value)),)
matches = []
for match in rule.matches:
matches.append(
(
match.name,
tuple(
(param, tuple(value))
for param, value in sorted(match.get_all_parameters().items())
),
)
)
# ensure that matches are sorted for consistency with matching
fields["matches"] = tuple(sorted(matches))
return cls(**fields)
def to_iptc(self):
rule = iptc.Rule()
rule.protocol = self.protocol
rule.src = self.src
rule.dst = self.dst
target = rule.create_target(self.target)
for param_name, param_value in self.target_parameters:
target.set_parameter(param_name, param_value)
for name, params in self.matches:
match = rule.create_match(name)
for param_name, param_value in params:
match.set_parameter(param_name, param_value)
return rule
@contextlib.contextmanager
def iptables_txn(table):
"""Temporarily disable autocommit and commit at the end.
If an exception occurs, changes are rolled back.
By default, changes to iptables rules are applied immediately. In some
cases, we want to avoid that.
https://github.com/ldx/python-iptables#autocommit
"""
assert table.autocommit is True, table.autocommit
try:
table.autocommit = False
yield
table.commit()
finally:
table.refresh()
table.autocommit = True
class ChainDoesNotExist(Exception):
pass
def all_chains():
return {chain.name for chain in iptc.Table(iptc.Table.FILTER).chains}
def ensure_chain(chain, rules):
"""Idempotently ensure a chain exists and has an exact set of rules.
This function creates or updates an existing chain to match the rules
passed in.
This function will not reorder existing rules, but any new rules are always
inserted at the front of the chain.
"""
try:
current_rules = set(list_chain(chain))
except ChainDoesNotExist:
create_chain(chain)
current_rules = set()
for rule in rules:
if rule not in current_rules:
insert_rule(chain, rule)
extra_rules = current_rules - set(rules)
if extra_rules:
METHOD_NAME(chain, extra_rules)
def _rule_sort_key(rule_tuple):
old_index, rule = rule_tuple
target_name = rule.target
return (RULE_TARGET_SORT_ORDER.get(target_name, 0), old_index)
def reorder_chain(chain_name):
"""Ensure that any REJECT rules are last, and any LOG rules are second-to-last"""
table = iptc.Table(iptc.Table.FILTER)
with iptables_txn(table):
rules = list_chain(chain_name)
chain = iptc.Chain(table, chain_name)
# sort the rules by rule_key, which uses (RULE_TARGET_SORT_ORDER, idx)
sorted_rules_with_indices = sorted(enumerate(rules), key=_rule_sort_key)
for new_index, (old_index, rule) in enumerate(sorted_rules_with_indices):
if new_index == old_index:
continue
log.debug(f"reordering chain {chain_name} rule {rule} to #{new_index}")
chain.replace_rule(rule.to_iptc(), new_index)
def ensure_rule(chain, rule):
rules = list_chain(chain)
if rule not in rules:
insert_rule(chain, rule)
def insert_rule(chain_name, rule):
log.debug(f"adding rule to {chain_name}: {rule}")
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), chain_name)
chain.insert_rule(rule.to_iptc())
def METHOD_NAME(chain_name, rules):
log.debug(f"deleting rules from {chain_name}: {rules}")
table = iptc.Table(iptc.Table.FILTER)
with iptables_txn(table):
chain = iptc.Chain(table, chain_name)
for potential_rule in chain.rules:
if Rule.from_iptc(potential_rule) in rules:
chain.delete_rule(potential_rule)
def create_chain(chain_name):
log.debug(f"creating chain: {chain_name}")
iptc.Table(iptc.Table.FILTER).create_chain(chain_name)
def delete_chain(chain_name):
log.debug(f"deleting chain: {chain_name}")
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), chain_name)
chain.flush()
chain.delete()
def list_chain(chain_name):
"""List rules in a chain.
Returns a list of iptables rules, or raises ChainDoesNotExist.
"""
table = iptc.Table(iptc.Table.FILTER)
chain = iptc.Chain(table, chain_name)
# TODO: is there any way to do this without listing all chains? (probably slow)
# If the chain doesn't exist, chain.rules will be an empty list, so we need
# to make sure the chain actually _does_ exist.
if chain in table.chains:
return tuple(Rule.from_iptc(rule) for rule in chain.rules)
else:
raise ChainDoesNotExist(chain_name) |
1,481 | send | from __future__ import annotations
import dataclasses
from enum import Enum, auto
import logging
import math
from anyio import (
TASK_STATUS_IGNORED,
WouldBlock,
create_memory_object_stream,
create_task_group,
)
from anyio.abc import TaskStatus
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
from dataclasses import InitVar, dataclass
from serde import serde
from typing import (
Any,
Dict,
Iterable,
Set,
Tuple,
Type,
)
from modlunky2.web.api.framework.multiplexing import SendConnection, WSMultiplexerRoute
from modlunky2.web.api.framework.serde_tag import to_tagged_dict
from modlunky2.web.api.framework.session import SessionId
logger = logging.getLogger(__name__)
@serde
@dataclass(frozen=True)
class Subscribe:
topics: Set[str]
@serde
@dataclass(frozen=True)
class Unsubscribe:
topics: Set[str]
@serde
@dataclass(frozen=True)
class Published:
# Should be TaggedMessage, but I'm unsure how to use pyserde with it
message: Dict[str, Any]
class ServiceLevel(Enum):
MUST_DELIVER = auto()
MAY_DROP = auto()
@dataclass(frozen=True)
class PubSubTopic:
typ: Type[Any]
service_level: ServiceLevel
@dataclass
class _TopicInfo:
service_level: ServiceLevel
subscribers: Set[SessionId] = dataclasses.field(default_factory=set)
@dataclass
class _StreamPair:
max_buffer_size: InitVar[float]
METHOD_NAME: MemoryObjectSendStream[Published] = dataclasses.field(init=False)
recv: MemoryObjectReceiveStream[Published] = dataclasses.field(init=False)
def __post_init__(self, max_buffer_size: float):
self.METHOD_NAME, self.recv = create_memory_object_stream(max_buffer_size, Published)
@dataclass
class _SessionCopier:
connection: SendConnection
_may_drop: _StreamPair = dataclasses.field(init=False)
_must_deliver: _StreamPair = dataclasses.field(init=False)
def __post_init__(self):
self._may_drop = _StreamPair(0)
self._must_deliver = _StreamPair(math.inf)
def METHOD_NAME(self, level: ServiceLevel, pub: Published):
if level is ServiceLevel.MAY_DROP:
stream = self._may_drop.METHOD_NAME
elif level is ServiceLevel.MUST_DELIVER:
stream = self._must_deliver.METHOD_NAME
else:
raise ValueError(f"Unknown service level {level}") # pragma: no cover
try:
stream.send_nowait(pub)
except WouldBlock:
pass
async def run(
self,
*,
task_status: TaskStatus = TASK_STATUS_IGNORED,
) -> None:
"""Send messages until the client disconnects"""
async with self._may_drop.recv, self._must_deliver.recv, create_task_group() as tg:
await tg.start(self._run_one, self._may_drop.recv)
await tg.start(self._run_one, self._must_deliver.recv)
task_status.started()
async def _run_one(
self,
recv: MemoryObjectReceiveStream[Published],
*,
task_status: TaskStatus = TASK_STATUS_IGNORED,
) -> None:
task_status.started()
async for pub in recv:
await self.connection.METHOD_NAME(pub)
@dataclass
class PubSubManager:
topics: InitVar[Iterable[PubSubTopic]]
_topic_info: Dict[str, _TopicInfo] = dataclasses.field(
init=False, default_factory=dict
)
_sessions: Dict[SessionId, _SessionCopier] = dataclasses.field(
init=False, default_factory=dict
)
def __post_init__(self, topics: Iterable[PubSubTopic]):
for t in topics:
name = t.typ.__name__
if name in self._topic_info:
raise ValueError(f"Topic {name} appears more than once")
self._topic_info[name] = _TopicInfo(t.service_level)
@property
def multiplexer_routes(self) -> Tuple[WSMultiplexerRoute[Any], ...]:
return (
WSMultiplexerRoute(Subscribe, self._subscribe),
WSMultiplexerRoute(Unsubscribe, self._unsubscribe),
)
def publish(self, msg: Any):
topic_name = type(msg).__name__
if topic_name not in self._topic_info:
raise ValueError(f"Topic {topic_name} is unknown")
info = self._topic_info[topic_name]
pub = Published(to_tagged_dict(msg))
for sid in info.subscribers:
self._sessions[sid].METHOD_NAME(info.service_level, pub)
async def _subscribe(self, connection: SendConnection, req: Subscribe) -> None:
self._check_topics(req.topics)
self._maybe_add_session(connection)
for topic in req.topics:
self._topic_info[topic].subscribers.add(connection.session_id)
async def _unsubscribe(self, connection: SendConnection, req: Unsubscribe) -> None:
self._check_topics(req.topics)
for topic in req.topics:
self._topic_info[topic].subscribers.discard(connection.session_id)
def _check_topics(self, raw_topics: Set[str]) -> None:
unknown_topics: Set[str] = set()
for t in raw_topics:
if t not in self._topic_info:
unknown_topics.add(t)
if unknown_topics:
raise ValueError(f"Request contains unknown topics {unknown_topics!r}")
def _maybe_add_session(self, connection: SendConnection):
if connection.session_id in self._sessions:
return
self._sessions[connection.session_id] = _SessionCopier(connection)
connection.task_group.start_soon(
self._run_session,
connection.session_id,
name=f"pubsub copier for sid {connection.session_id}",
)
async def _run_session(self, session_id: SessionId):
try:
await self._sessions[session_id].run()
finally:
# Cleanup the session
for ti in self._topic_info.values():
ti.subscribers.discard(session_id)
del self._sessions[session_id] |
1,482 | test read top layer | import pytest
import numpy as np
import mikeio
from mikeio.spatial import GeometryUndefined
from mikeio.spatial import Grid2D, Grid3D
def test_dfs3_repr():
dfs = mikeio.open("tests/testdata/test_dfs3.dfs3")
assert "<mikeio.Dfs3>" in repr(dfs)
assert "geometry: Grid3D" in repr(dfs)
def test_dfs3_projection():
dfs = mikeio.open("tests/testdata/test_dfs3.dfs3")
assert dfs.projection_string == "LONG/LAT"
assert dfs.dx == 0.25
assert dfs.dy == 0.25
assert dfs.dz == 1.0
def test_dfs3_geometry():
dfs = mikeio.open("tests/testdata/test_dfs3.dfs3")
assert isinstance(dfs.geometry, Grid3D)
assert dfs.geometry.nx == 21
assert dfs.geometry.ny == 17
assert dfs.geometry.nz == 34
def test_dfs_to_xarray():
ds = mikeio.read("tests/testdata/test_dfs3.dfs3")
xr_ds = ds.to_xarray()
assert xr_ds.dims["time"] == 2
ds_1d = ds.isel(z=0).isel(y=0)
xr_ds_1d = ds_1d.to_xarray()
assert xr_ds_1d.dims["time"] == 2
def test_dfs3_read():
ds = mikeio.read("tests/testdata/Grid1.dfs3")
assert ds.n_items == 2
assert ds.n_timesteps == 30
da = ds[0]
assert da.shape == (30, 10, 10, 10) # t # z # y # x
assert da.dims == ("time", "z", "y", "x")
assert da.name == "Item 1"
assert da.to_numpy().dtype == np.float32
def test_dfs3_read_double_precision():
ds = mikeio.read("tests/testdata/Grid1.dfs3", dtype=np.float64)
assert ds[0].to_numpy().dtype == np.float64
def test_dfs3_read_time():
fn = "tests/testdata/test_dfs3.dfs3"
ds = mikeio.read(fn, time="2020-12-30 00:00")
assert ds.n_timesteps == 1
assert isinstance(ds.geometry, Grid3D)
ds = mikeio.read(fn, time=-1)
assert ds.n_timesteps == 1
assert isinstance(ds.geometry, Grid3D)
def test_dfs3_read_1_layer():
fn = "tests/testdata/test_dfs3.dfs3"
ds = mikeio.read(fn, layers=-1)
assert ds.shape == (2, 17, 21)
assert isinstance(ds.geometry, Grid2D)
ds = mikeio.read(fn, layers="top")
assert ds.shape == (2, 17, 21)
assert isinstance(ds.geometry, Grid2D)
ds = mikeio.read(fn, layers=[0])
assert ds.shape == (2, 17, 21)
assert isinstance(ds.geometry, Grid2D)
def test_dfs3_read_multiple_layers():
fn = "tests/testdata/test_dfs3.dfs3"
ds = mikeio.read(fn, layers=(0, 1, 2, 3))
assert ds.geometry.nz == 4
assert isinstance(ds.geometry, Grid3D)
with pytest.warns(UserWarning):
ds = mikeio.read(fn, layers=[1, 5, -3])
assert isinstance(ds.geometry, GeometryUndefined)
assert ds.shape == (2, 3, 17, 21)
def test_read_rotated_grid():
dfs = mikeio.open("tests/testdata/dissolved_oxygen.dfs3")
# North to Y orientation: 18.124689102173
# Grid rotation: 17.0003657182497
# assert dfs._orientation == pytest.approx(18.1246891)
assert dfs.orientation == pytest.approx(17.0003657)
assert dfs.geometry.orientation == pytest.approx(17.0003657) # in own CRS
def test_dfs3_to_dfs(tmp_path):
ds = mikeio.read("tests/testdata/dissolved_oxygen.dfs3")
fp = tmp_path / "test.dfs3"
ds.to_dfs(fp)
dsnew = mikeio.read(fp)
assert ds.n_items == dsnew.n_items
assert ds.geometry == dsnew.geometry
def METHOD_NAME():
dsall = mikeio.read("tests/testdata/dissolved_oxygen.dfs3")
ds = mikeio.read("tests/testdata/dissolved_oxygen.dfs3", layers="top")
assert "z" not in ds.dims
assert isinstance(ds.geometry, Grid2D)
# TODO: not yet implemented
# dssel = dsall.sel(layers="top")
# assert dssel.geometry == ds.geometry
dssel = dsall.isel(z=-1)
assert dssel.geometry == ds.geometry
dsdiff = dssel - ds
assert dsdiff.nanmax(axis=None).to_numpy()[0] == 0.0
assert dsdiff.nanmin(axis=None).to_numpy()[0] == 0.0
def test_read_bottom_layer():
ds = mikeio.read("tests/testdata/dissolved_oxygen.dfs3", layers="bottom")
assert "z" not in ds.dims
assert isinstance(ds.geometry, Grid2D)
assert pytest.approx(ds[0].to_numpy()[0, 58, 52]) == 0.05738005042076111
def test_sel_bottom_layer():
dsall = mikeio.read("tests/testdata/dissolved_oxygen.dfs3")
with pytest.raises(NotImplementedError) as excinfo:
dsall.sel(layers="bottom")
assert "mikeio.read" in str(excinfo.value)
# assert "z" not in ds.dims
# assert isinstance(ds.geometry, Grid2D)
# assert pytest.approx(ds[0].to_numpy()[0, 58, 52]) == 0.05738005042076111
def test_read_single_layer_dfs3():
fn = "tests/testdata/single_layer.dfs3"
ds = mikeio.read(fn, keepdims=True)
assert isinstance(ds.geometry, Grid3D)
assert ds.dims == ("time", "z", "y", "x")
ds = mikeio.read(fn, keepdims=False)
assert isinstance(ds.geometry, Grid2D)
assert ds.dims == ("time", "y", "x")
def test_read_single_timestep_dfs3():
fn = "tests/testdata/single_timestep.dfs3"
ds = mikeio.read(fn, keepdims=True)
assert ds.dims == ("time", "z", "y", "x")
assert ds.shape == (1, 5, 17, 21)
ds = mikeio.read(fn, time=0, keepdims=False)
assert ds.dims == ("z", "y", "x")
assert ds.shape == (5, 17, 21)
ds = mikeio.read(fn, time=0)
assert ds.dims == ("z", "y", "x")
assert ds.shape == (5, 17, 21)
def test_read_write_single_layer_as_dfs3(tmp_path):
fn = "tests/testdata/single_layer.dfs3"
ds1 = mikeio.read(fn, keepdims=True)
assert isinstance(ds1.geometry, Grid3D)
assert ds1.dims == ("time", "z", "y", "x")
ds2 = mikeio.read(fn, layers=0, keepdims=True)
assert ds2.dims == ("time", "z", "y", "x")
assert isinstance(ds2.geometry, Grid3D)
fp = tmp_path / "single_layer.dfs3"
ds2.to_dfs(fp)
def test_MIKE_SHE_dfs3_output():
ds = mikeio.read("tests/testdata/Karup_MIKE_SHE_head_output.dfs3")
assert ds.n_timesteps == 6
assert ds.n_items == 1
g = ds.geometry
assert g.x[0] == 494329.0
assert g.y[0] == pytest.approx(6220250.0)
assert g.origin == pytest.approx((494329.0, 6220250.0))
ds2 = ds.isel(x=range(30, 45))
g2 = ds2.geometry
assert g2.x[0] == g.x[0] + 30 * g.dx
assert g2.y[0] == g.y[0] # + 35 * g.dy
assert g2.origin == pytest.approx((g2.x[0], g2.y[0])) |
1,483 | visit ndarray | from parsimonious import Grammar, NodeVisitor
from hail.expr.nat import NatVariable
from . import types
from hail.utils.java import unescape_parsable
type_grammar = Grammar(
r"""
type = _ (array / ndarray / set / dict / struct / union / tuple / interval / int64 / int32 / float32 / float64 / bool / str / call / str / locus / void / variable) _
variable = "?" simple_identifier (":" simple_identifier)?
void = "void" / "tvoid"
int64 = "int64" / "tint64"
int32 = "int32" / "tint32" / "int" / "tint"
float32 = "float32" / "tfloat32"
float64 = "float64" / "tfloat64" / "tfloat" / "float"
bool = "tbool" / "bool"
call = "tcall" / "call"
str = "tstr" / "str"
locus = ("tlocus" / "locus") _ "<" identifier ">"
array = ("tarray" / "array") _ "<" type ">"
ndarray = ("tndarray" / "ndarray") _ "<" type "," nat ">"
set = ("tset" / "set") _ "<" type ">"
dict = ("tdict" / "dict") _ "<" type "," type ">"
struct = ("tstruct" / "struct") _ "{" (fields / _) "}"
union = ("tunion" / "union") _ "{" (fields / _) "}"
tuple = ("ttuple" / "tuple") _ "(" ((type ("," type)*) / _) ")"
fields = field ("," field)*
field = identifier ":" type
interval = ("tinterval" / "interval") _ "<" type ">"
identifier = _ (simple_identifier / escaped_identifier) _
simple_identifier = ~r"\w+"
escaped_identifier = ~"`([^`\\\\]|\\\\.)*`"
nat = _ (nat_literal / nat_variable) _
nat_literal = ~"[0-9]+"
nat_variable = "?nat"
_ = ~r"\s*"
""")
class TypeConstructor(NodeVisitor):
def generic_visit(self, node, visited_children):
return visited_children
def visit_type(self, node, visited_children):
_, [t], _ = visited_children
return t
def visit_variable(self, node, visited_children):
question, name, cond_opt = visited_children
cond = None
if cond_opt:
colon, cond = cond_opt[0]
return types.tvariable(name, cond)
def visit_void(self, node, visited_children):
return types.tvoid
def visit_int64(self, node, visited_children):
return types.tint64
def visit_int32(self, node, visited_children):
return types.tint32
def visit_float64(self, node, visited_children):
return types.tfloat64
def visit_float32(self, node, visited_children):
return types.tfloat32
def visit_bool(self, node, visited_children):
return types.tbool
def visit_call(self, node, visited_children):
return types.tcall
def visit_str(self, node, visited_children):
return types.tstr
def visit_locus(self, node, visited_children):
tlocus, _, angle_bracket, gr, angle_bracket = visited_children
return types.tlocus(gr)
def visit_array(self, node, visited_children):
tarray, _, angle_bracket, t, angle_bracket = visited_children
return types.tarray(t)
def METHOD_NAME(self, node, visited_children):
tndarray, _, angle_bracket, elem_t, comma, ndim, angle_bracket = visited_children
return types.tndarray(elem_t, ndim)
def visit_set(self, node, visited_children):
tset, _, angle_bracket, t, angle_bracket = visited_children
return types.tset(t)
def visit_dict(self, node, visited_children):
tdict, _, angle_bracket, kt, comma, vt, angle_bracket = visited_children
return types.tdict(kt, vt)
def visit_struct(self, node, visited_children):
tstruct, _, brace, maybe_fields, brace = visited_children
if not maybe_fields:
return types.tstruct()
else:
fields = maybe_fields[0]
return types.tstruct(**dict(fields))
def visit_union(self, node, visited_children):
tunion, _, brace, maybe_fields, brace = visited_children
if not maybe_fields:
return types.tunion()
else:
fields = maybe_fields[0]
return types.tunion(**dict(fields))
def visit_tuple(self, node, visited_children):
ttuple, _, paren, [maybe_types], paren = visited_children
if not maybe_types:
return types.ttuple()
else:
[first, rest] = maybe_types
return types.ttuple(first, *(t for comma, t in rest))
def visit_fields(self, node, visited_children):
first, rest = visited_children
return [first] + [field for comma, field in rest]
def visit_field(self, node, visited_children):
name, comma, type = visited_children
return (name, type)
def visit_interval(self, node, visited_children):
tinterval, _, angle_bracket, point_t, angle_bracket = visited_children
return types.tinterval(point_t)
def visit_identifier(self, node, visited_children):
_, [id], _ = visited_children
return id
def visit_simple_identifier(self, node, visited_children):
return node.text
def visit_escaped_identifier(self, node, visited_children):
return unescape_parsable(node.text[1:-1])
def visit_nat(self, node, visited_children):
_, [nat], _ = visited_children
return nat
def visit_nat_literal(self, node, visited_children):
return int(node.text)
def visit_nat_variable(self, node, visited_children):
return NatVariable()
type_node_visitor = TypeConstructor()
vcf_type_grammar = Grammar(
r"""
type = _ (array / set / int32 / int64 / float32 / float64 / str / bool / call / struct) _
int64 = "Int64"
int32 = "Int32"
float32 = "Float32"
float64 = "Float64"
bool = "Boolean"
call = "Call"
str = "String"
array = "Array" _ "[" type "]"
set = "Set" _ "[" type "]"
struct = "Struct" _ "{" (fields / _) "}"
fields = field ("," field)*
field = identifier ":" type
identifier = _ (simple_identifier / escaped_identifier) _
simple_identifier = ~r"\w+"
escaped_identifier = ~"`([^`\\\\]|\\\\.)*`"
_ = ~r"\s*"
""")
class VCFTypeConstructor(NodeVisitor):
def generic_visit(self, node, visited_children):
return visited_children
def visit_type(self, node, visited_children):
_, [t], _ = visited_children
return t
def visit_int64(self, node, visited_children):
return types.tint64
def visit_int32(self, node, visited_children):
return types.tint32
def visit_float64(self, node, visited_children):
return types.tfloat64
def visit_float32(self, node, visited_children):
return types.tfloat32
def visit_bool(self, node, visited_children):
return types.tbool
def visit_call(self, node, visited_children):
return types.tcall
def visit_str(self, node, visited_children):
return types.tstr
def visit_array(self, node, visited_children):
tarray, _, bracket, t, bracket = visited_children
return types.tarray(t)
def visit_set(self, node, visited_children):
tarray, _, bracket, t, bracket = visited_children
return types.tset(t)
def visit_struct(self, node, visited_children):
tstruct, _, brace, maybe_fields, brace = visited_children
if not maybe_fields:
return types.tstruct()
else:
fields = maybe_fields[0]
return types.tstruct(**dict(fields))
def visit_fields(self, node, visited_children):
first, rest = visited_children
return [first] + [field for comma, field in rest]
def visit_field(self, node, visited_children):
name, comma, type = visited_children
return (name, type)
def visit_identifier(self, node, visited_children):
_, [id], _ = visited_children
return id
def visit_simple_identifier(self, node, visited_children):
return node.text
def visit_escaped_identifier(self, node, visited_children):
return unescape_parsable(node.text[1:-1])
vcf_type_node_visitor = VCFTypeConstructor() |
1,484 | info | from __future__ import annotations
import dataclasses
import logging
import time
from icclim.models.registry import Registry
@dataclasses.dataclass
class Verbosity:
verbosity_level: str
log_level: str
class VerbosityRegistry(Registry[Verbosity]):
_item_class = Verbosity
LOW = Verbosity("LOW", "INFO")
HIGH = Verbosity("HIGH", "INFO")
SILENT = Verbosity("SILENT", "ERROR")
@staticmethod
def get_item_aliases(item: Verbosity) -> list[str]:
return [item.verbosity_level.upper()]
class IcclimLogger:
"""
Singleton to display and control logs in icclim library.
"""
__instance = None
verbosity: Verbosity = VerbosityRegistry.LOW
@staticmethod
def get_instance(verbosity: Verbosity = VerbosityRegistry.LOW):
if IcclimLogger.__instance is None:
IcclimLogger(verbosity)
return IcclimLogger.__instance
def __init__(self, verbosity: Verbosity):
if IcclimLogger.__instance is not None:
raise Exception(
"This class is a singleton! Use IcclimLogger.get_instance()."
)
else:
IcclimLogger.__instance = self
self.verbosity = verbosity
logging.basicConfig(
level=verbosity.log_level, format="%(asctime)s %(message)s"
)
def set_verbosity(self, verbosity: str | Verbosity):
if isinstance(verbosity, str):
verbosity = VerbosityRegistry.lookup(verbosity)
self.verbosity = verbosity
logging.root.setLevel(verbosity.log_level)
def start_message(self):
from icclim import __version__ as icclim_version
# flake8: noqa
time_now = time.asctime(time.gmtime())
if self.verbosity == VerbosityRegistry.SILENT:
return
if self.verbosity == VerbosityRegistry.LOW:
logging.METHOD_NAME(f"--- icclim {icclim_version}")
logging.METHOD_NAME("--- BEGIN EXECUTION")
return
logging.METHOD_NAME(
" ********************************************************************************************"
)
logging.METHOD_NAME(
" * *"
)
logging.METHOD_NAME(f" * icclim {icclim_version} *")
logging.METHOD_NAME(
" * *"
)
logging.METHOD_NAME(
" * *"
)
logging.METHOD_NAME(
f" * {time_now} *"
)
logging.METHOD_NAME(
" * *"
)
logging.METHOD_NAME(
" * BEGIN EXECUTION *"
)
logging.METHOD_NAME(
" * *"
)
logging.METHOD_NAME(
" ********************************************************************************************"
)
def ending_message(self, time_cpu):
from icclim import __version__ as icclim_version
# flake8: noqa
time_now = time.asctime(time.gmtime())
if self.verbosity == VerbosityRegistry.SILENT:
return
if self.verbosity == VerbosityRegistry.LOW:
logging.METHOD_NAME(f"--- icclim {icclim_version}")
logging.METHOD_NAME("--- CPU SECS = %-10.3f", time_cpu)
logging.METHOD_NAME("--- END EXECUTION")
return
logging.METHOD_NAME(
" ********************************************************************************************"
)
logging.METHOD_NAME(
" * *"
)
logging.METHOD_NAME(f" * icclim {icclim_version} *")
logging.METHOD_NAME(
" * *"
)
logging.METHOD_NAME(
" * *"
)
logging.METHOD_NAME(
f" * {time_now} *"
)
logging.METHOD_NAME(
" * *"
)
logging.METHOD_NAME(
" * END EXECUTION *"
)
logging.METHOD_NAME(
" * *"
)
logging.METHOD_NAME(
f" * CP SECS = {time_cpu} *"
)
logging.METHOD_NAME(
" * *"
)
logging.METHOD_NAME(
" ********************************************************************************************"
)
def METHOD_NAME(self, *args):
logging.METHOD_NAME(args)
def deprecation_warning(self, old: str, new: str = None) -> None:
if new:
logging.warning(
f"DEPRECATION_WARNING: `{old}` is deprecated. Use `{new}` instead."
)
else:
logging.warning(
f"DEPRECATION_WARNING: `{old}` is deprecated and will be removed. Its value is ignored."
)
def callback(self, percent) -> None:
logging.METHOD_NAME(f"Processing: {percent}%") |
1,485 | assume | """Assume an AWS IAM role."""
from __future__ import annotations
import logging
from datetime import datetime
from typing import TYPE_CHECKING, ContextManager, Optional, Type, cast
from typing_extensions import TypedDict
if TYPE_CHECKING:
from types import TracebackType
from mypy_boto3_sts.type_defs import AssumedRoleUserTypeDef, CredentialsTypeDef
from ...._logging import RunwayLogger
from ....context import RunwayContext
LOGGER = cast("RunwayLogger", logging.getLogger(__name__.replace("._", ".")))
_KwargsTypeDef = TypedDict(
"_KwargsTypeDef", DurationSeconds=int, RoleArn=str, RoleSessionName=str
)
class AssumeRole(ContextManager["AssumeRole"]):
"""Context manager for assuming an AWS role."""
assumed_role_user: AssumedRoleUserTypeDef
credentials: CredentialsTypeDef
ctx: RunwayContext
duration_seconds: int
revert_on_exit: bool
session_name: str = "runway"
def __init__(
self,
context: RunwayContext,
role_arn: Optional[str] = None,
duration_seconds: Optional[int] = None,
revert_on_exit: bool = True,
session_name: Optional[str] = None,
):
"""Instantiate class.
Args:
context: Runway context object.
role_arn: ARN of role to be assumed.
duration_seconds: Seconds that the assumed role's credentials will be
valid for. (default: 3600)
revert_on_exit: Whether credentials in the environment will be
reverted upon exiting the context manager.
session_name: Name to use for the assumed role session. (default: runway)
"""
self.assumed_role_user = {"AssumedRoleId": "", "Arn": ""}
self.credentials = {
"AccessKeyId": "",
"Expiration": datetime.now(),
"SecretAccessKey": "",
"SessionToken": "",
}
self.role_arn = role_arn
self.ctx = context
self.duration_seconds = duration_seconds or 3600
self.revert_on_exit = revert_on_exit
self.session_name = session_name or "runway"
@property
def _kwargs(self) -> _KwargsTypeDef:
"""Construct keyword arguments to pass to boto3 call."""
return {
"DurationSeconds": self.duration_seconds,
"RoleArn": self.role_arn or "",
"RoleSessionName": self.session_name,
}
def METHOD_NAME(self) -> None:
"""Perform role assumption."""
if not self.role_arn:
LOGGER.debug("no role to assume")
return
if self.revert_on_exit:
self.save_existing_iam_env_vars()
sts_client = self.ctx.get_session().client("sts")
LOGGER.info("assuming role %s...", self.role_arn)
response = sts_client.assume_role(**self._kwargs)
LOGGER.debug("sts.assume_role response: %s", response)
if "Credentials" in response:
self.assumed_role_user.update(
response.get("AssumedRoleUser", cast("AssumedRoleUserTypeDef", {}))
)
self.credentials.update(response["Credentials"])
self.ctx.env.vars.update(
{
"AWS_ACCESS_KEY_ID": response["Credentials"]["AccessKeyId"],
"AWS_SECRET_ACCESS_KEY": response["Credentials"]["SecretAccessKey"],
"AWS_SESSION_TOKEN": response["Credentials"]["SessionToken"],
}
)
LOGGER.verbose("updated environment with assumed credentials")
else:
raise ValueError("assume_role did not return Credentials")
def restore_existing_iam_env_vars(self) -> None:
"""Restore backed up IAM environment variables."""
if not self.role_arn:
LOGGER.debug("no role was assumed; not reverting credentials")
return
for k in self.ctx.current_aws_creds.keys():
old = "OLD_" + k
if self.ctx.env.vars.get(old):
self.ctx.env.vars[k] = self.ctx.env.vars.pop(old)
LOGGER.debug("reverted environment variables: %s", k)
else:
self.ctx.env.vars.pop(k, None)
LOGGER.debug("removed environment variables: %s ", k)
def save_existing_iam_env_vars(self) -> None:
"""Backup IAM environment variables for later restoration."""
for k, v in self.ctx.current_aws_creds.items():
new = "OLD_" + k
LOGGER.debug('saving environment variable "%s" as "%s"', k, new)
self.ctx.env.vars[new] = cast(str, v)
def __enter__(self) -> AssumeRole:
"""Enter the context manager."""
LOGGER.debug("entering aws.AssumeRole context manager...")
self.METHOD_NAME()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""Exit the context manager."""
if self.revert_on_exit:
self.restore_existing_iam_env_vars()
LOGGER.debug("aws.AssumeRole context manager exited") |
1,486 | poisson train | from .... import config
from ..device import NeuronDevice
from ....simulation.results import SimulationRecorder
from ....exceptions import *
from ....reporting import report, warn
import numpy as np
@config.node
class SpikeGenerator(NeuronDevice):
defaults = {"record": True}
casts = {
"radius": float,
"origin": [float],
"synapses": [str],
}
required = ["targetting", "device", "io", "synapses"]
def implement(self, target, location):
cell = location.cell
section = location.section
if not hasattr(section, "available_synapse_types"):
raise Exception(
"{} {} targetted by {} has no synapses".format(
cell.__class__.__name__, ",".join(section.labels), self.name
)
)
for synapse_type in location.get_synapses() or self.synapses:
if synapse_type in section.available_synapse_types:
synapse = cell.create_synapse(section, synapse_type)
pattern = self.get_pattern(target, cell, section, synapse_type)
synapse.stimulate(pattern=pattern, weight=1)
else:
warn(
"{} targets {} {} with a {} synapse but it doesn't exist on {}".format(
self.name,
cell.__class__.__name__,
cell.ref_id,
synapse_type,
",".join(section.labels),
)
)
def validate_specifics(self):
if not hasattr(self, "spike_times") and not hasattr(self, "parameters"):
raise ConfigurationError(
f"{self.name} is missing `spike_times` or `parameters`"
)
def create_patterns(self):
report("Creating spike generator patterns for '{}'".format(self.name), level=3)
targets = self.get_targets()
if hasattr(self, "spike_times"):
pattern = self.spike_times
if self.record:
for target in targets:
self.adapter.result.add(GeneratorRecorder(self, target, pattern))
patterns = {target: pattern for target in targets}
else:
interval = float(self.parameters["interval"])
number = int(self.parameters["number"])
start = float(self.parameters["start"])
noise = "noise" in self.parameters and self.parameters["noise"]
frequency = 1.0 / interval
duration = interval * number
if not noise:
# Create only 1 copy of the pattern array, might be surprising
# for tinkering users, but in the framework the created patterns
# should be used read only in `get_pattern(target)` to pass as
# input to a VecStim.
pattern = [start + i * interval for i in range(number)]
patterns = {target: pattern for target in targets}
else:
patterns = {
target: list(METHOD_NAME(frequency, duration, start))
for target in targets
}
if self.record:
for target, pattern in patterns.items():
self.adapter.result.add(GeneratorRecorder(self, target, pattern))
report("Pattern {} for {}.".format(pattern, target), level=4)
return patterns
def get_pattern(self, target, cell=None, section=None, synapse=None):
return self.get_patterns()[target]
class GeneratorRecorder(SimulationRecorder):
def __init__(self, device, target, pattern):
self.pattern = pattern
self.device = device
def get_data(self):
return np.array(self.pattern)
def flush(self):
raise NotImplementedError("Flushing generator")
# Kopimismed from abandoned neuronpy project. By Tom McCavish
def METHOD_NAME(frequency, duration, start_time=0, seed=None):
"""
Generator function for a Homogeneous Poisson train.
:param frequency: The mean spiking frequency.
:param duration: Maximum duration.
:param start_time: Timestamp.
:param seed: Seed for the random number generator. If None, this will be decided by
numpy, which chooses the system time.
:returns: A relative spike time from t=start_time, in seconds (not ms).
"""
cur_time = start_time
end_time = duration + start_time
rangen = np.random.mtrand.RandomState()
if seed is not None:
rangen.seed(seed)
isi = 1.0 / frequency
while cur_time <= end_time:
cur_time += isi * rangen.exponential()
if cur_time > end_time:
return
yield cur_time |
1,487 | add error | import traceback
import unittest
from code_feedback import Feedback, GradingComplete
from pl_execute import UserCodeFailed
from pl_helpers import DoNotRun, GradingSkipped, print_student_code
class PLTestResult(unittest.TestResult):
"""
Helper class for generating results of a test suite using the Python
unittest library.
"""
error_message = (
"There was an error while grading your code.\n\n"
"Review the question text to ensure your code matches\nthe expected requirements, such as variable names,\nfunction names, and parameters.\n\n"
"Look at the traceback below to help debug your code:\n"
)
grader_error_message = (
"The grader encountered an error while grading your code.\n\n"
"The associated traceback is:\n"
)
def __init__(self):
unittest.TestResult.__init__(self)
self.results = []
self.format_errors = []
self.main_feedback = ""
self.buffer = False
self.done_grading = False
self.grading_succeeded = True
# If we end grading early, we still want to run through the remaining test cases
# (but not execute them) so that we show the correct number of points on the grading panel
self.skip_grading = False
def startTest(self, test):
unittest.TestResult.startTest(self, test)
options = getattr(test, test._testMethodName).__func__.__dict__
points = options.get("points", 1)
name = options.get("name", test.shortDescription())
filename = test._testMethodName
if name is None:
name = test._testMethodName
self.results.append({"name": name, "max_points": points, "filename": filename})
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
if test.points is None:
self.results[-1]["points"] = self.results[-1]["max_points"]
else:
self.results[-1]["points"] = test.points * self.results[-1]["max_points"]
def METHOD_NAME(self, test, err):
if isinstance(err[1], GradingComplete):
# If grading stopped early, we will flag that but still loop through
# the remaining cases so that we have the correct point values
self.results[-1]["points"] = 0
self.skip_grading = True
elif isinstance(err[1], DoNotRun):
self.results[-1]["points"] = 0
self.results[-1]["max_points"] = 0
elif isinstance(err[1], GradingSkipped):
self.results[-1]["points"] = 0
Feedback.set_name(test._testMethodName)
Feedback.add_feedback(
" - Grading was skipped because an earlier test failed - "
)
elif isinstance(err[1], UserCodeFailed):
# Student code raised Exception
tr_list = traceback.format_exception(*err[1].err)
name = "Your code raised an Exception"
self.done_grading = True
if isinstance(err[1].err[1], SyntaxError) or isinstance(
err[1].err[1], NameError
):
self.grading_succeeded = False
self.format_errors.append("Your code has a syntax error.")
Feedback.set_main_output()
else:
self.results.append(
{"name": name, "filename": "error", "max_points": 1, "points": 0}
)
Feedback.set_name("error")
Feedback.add_feedback("".join(tr_list))
Feedback.add_feedback("\n\nYour code:\n\n")
print_student_code(
st_code=Feedback.test.student_code_abs_path,
ipynb_key=Feedback.test.ipynb_key,
)
else:
tr_message = "".join(traceback.format_exception(*err))
if isinstance(test, unittest.suite._ErrorHolder):
# Error occurred outside of a test case, like in setup code for example
# We can't really recover from this
self.done_grading = True
self.grading_succeeded = False
self.results = [
{
"name": "Internal Grading Error",
"filename": "error",
"max_points": 1,
"points": 0,
}
]
Feedback.set_name("error")
Feedback.add_feedback(self.grader_error_message + tr_message)
else:
# Error in a single test -- keep going
unittest.TestResult.METHOD_NAME(self, test, err)
self.results[-1]["points"] = 0
Feedback.add_feedback(self.error_message + tr_message)
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
if test.points is None:
self.results[-1]["points"] = 0
else:
self.results[-1]["points"] = test.points * self.results[-1]["max_points"]
def stopTest(self, test):
# Never write output back to the console
self._mirrorOutput = False
unittest.TestResult.stopTest(self, test)
def getResults(self):
return self.results
def getGradable(self):
return self.grading_succeeded |
1,488 | test login failed exceptions 173 | # coding=utf-8
import pytz
from datetime import datetime
from mock import patch
from monitorrent.plugins.trackers import LoginResult, TrackerSettings, CloudflareChallengeSolverSettings
from monitorrent.plugins.trackers.kinozal import KinozalPlugin, KinozalLoginFailedException, KinozalTopic
from monitorrent.plugins.trackers.kinozal import KinozalDateParser
from tests import use_vcr, DbTestCase
from tests.plugins.trackers import TrackerSettingsMock
from tests.plugins.trackers.kinozal.kinozal_helper import KinozalHelper
helper = KinozalHelper()
# helper = KinozalHelper.login('realusername', 'realpassword')
class MockDatetime(datetime):
mock_now = None
@classmethod
def now(cls, tz=None):
return cls.mock_now
class KinozalPluginTest(DbTestCase):
def setUp(self):
super(KinozalPluginTest, self).setUp()
cloudflare_challenge_solver_settings = CloudflareChallengeSolverSettings(False, 10000, False, False, 0)
self.tracker_settings = TrackerSettingsMock(10, None, cloudflare_challenge_solver_settings)
self.plugin = KinozalPlugin()
self.plugin.init(self.tracker_settings)
self.urls_to_check = [
"https://kinozal.tv/details.php?id=1506818"
]
def test_can_parse_url(self):
for url in self.urls_to_check:
self.assertTrue(self.plugin.can_parse_url(url))
bad_urls = [
"https://kinozal.com/details.php?id=1506818",
"https://belzal.com/details.php?id=1506818",
]
for url in bad_urls:
self.assertFalse(self.plugin.can_parse_url(url))
@use_vcr
def test_parse_url_success(self):
parsed_url = self.plugin.parse_url("https://kinozal.tv/details.php?id=1506818")
assert parsed_url['original_name'] == u'Война против всех / War on Everyone / 2016 / ДБ / WEB-DLRip'
@use_vcr
def test_login_verify_fail(self):
assert not self.plugin.verify()
assert self.plugin.login() == LoginResult.CredentialsNotSpecified
credentials = {'username': '', 'password': ''}
assert self.plugin.update_credentials(credentials) == LoginResult.CredentialsNotSpecified
assert not self.plugin.verify()
credentials = {'username': helper.fake_login, 'password': helper.fake_password}
assert self.plugin.update_credentials(credentials) == LoginResult.IncorrentLoginPassword
assert not self.plugin.verify()
@helper.use_vcr
def test_login_verify_success(self):
credentials = {'username': helper.real_login, 'password': helper.real_password}
self.assertEqual(self.plugin.update_credentials(credentials), LoginResult.Ok)
self.assertTrue(self.plugin.verify())
def test_login_failed_exceptions_1(self):
# noinspection PyUnresolvedReferences
with patch.object(self.plugin.tracker, 'login',
side_effect=KinozalLoginFailedException(1, 'Invalid login or password')):
credentials = {'username': helper.real_login, 'password': helper.real_password}
self.assertEqual(self.plugin.update_credentials(credentials), LoginResult.IncorrentLoginPassword)
def METHOD_NAME(self):
# noinspection PyUnresolvedReferences
with patch.object(self.plugin.tracker, 'login',
side_effect=KinozalLoginFailedException(173, 'Invalid login or password')):
credentials = {'username': helper.real_login, 'password': helper.real_password}
self.assertEqual(self.plugin.update_credentials(credentials), LoginResult.Unknown)
def test_login_unexpected_exceptions(self):
# noinspection PyUnresolvedReferences
with patch.object(self.plugin.tracker, 'login', side_effect=Exception):
credentials = {'username': helper.real_login, 'password': helper.real_password}
self.assertEqual(self.plugin.update_credentials(credentials), LoginResult.Unknown)
def test_prepare_request(self):
cookies = {'uid': helper.fake_uid, 'pass': helper.fake_pass}
# noinspection PyUnresolvedReferences
with patch.object(self.plugin.tracker, 'get_cookies', result=cookies):
url = "https://kinozal.tv/details.php?id=1506818"
request = self.plugin._prepare_request(KinozalTopic(url=url))
self.assertIsNotNone(request)
self.assertEqual(request.headers['referer'], url)
self.assertEqual(request.url, 'https://dl.kinozal.tv/download.php?id=1506818')
@use_vcr
def test_get_last_torrent_update_for_updated_yesterday_success(self):
url = 'https://kinozal.tv/details.php?id=1831370'
topic = KinozalTopic(id=1, url=url, last_torrent_update=datetime(2021, 3, 17, 10, 10, tzinfo=pytz.utc))
expected = KinozalDateParser.tz_moscow.localize(datetime(2021, 3, 18, 23, 12)).astimezone(pytz.utc)
server_now = datetime(2021, 3, 19, 12, 0, 0, tzinfo=pytz.utc)
MockDatetime.mock_now = server_now
with patch('monitorrent.plugins.trackers.kinozal.datetime.datetime', MockDatetime):
assert self.plugin.check_changes(topic)
assert topic.last_torrent_update == expected
@use_vcr
def test_get_last_torrent_update_for_updated_today_success(self):
url = 'https://kinozal.tv/details.php?id=1496310'
topic = KinozalTopic(id=1, url=url, last_torrent_update=None)
expected = KinozalDateParser.tz_moscow.localize(datetime(2017, 1, 20, 1, 30)).astimezone(pytz.utc)
server_now = datetime(2017, 1, 20, 12, 0, 0, tzinfo=pytz.utc)
MockDatetime.mock_now = server_now
with patch('monitorrent.plugins.trackers.kinozal.datetime.datetime', MockDatetime):
assert self.plugin.check_changes(topic)
assert topic.last_torrent_update == expected
@use_vcr
def test_get_last_torrent_update_for_updated_in_particular_success(self):
url = 'https://kinozal.tv/details.php?id=1508210'
topic = KinozalTopic(id=1, url=url, last_torrent_update=datetime(2017, 1, 26, 10, 10, tzinfo=pytz.utc))
expected = KinozalDateParser.tz_moscow.localize(datetime(2017, 1, 26, 21, 24)).astimezone(pytz.utc)
assert self.plugin.check_changes(topic)
assert topic.last_torrent_update == expected
@use_vcr
def test_get_last_torrent_update_for_updated_in_particular_not_changed(self):
url = 'https://kinozal.tv/details.php?id=1508210'
expected = KinozalDateParser.tz_moscow.localize(datetime(2017, 1, 26, 21, 24)).astimezone(pytz.utc)
topic = KinozalTopic(id=1, url=url, last_torrent_update=expected)
assert not self.plugin.check_changes(topic)
assert topic.last_torrent_update == expected
@use_vcr
def test_get_last_torrent_update_without_updates_success(self):
url = 'https://kinozal.tv/details.php?id=1831382'
expected = KinozalDateParser.tz_moscow.localize(datetime(2021, 3, 15, 23, 27)).astimezone(pytz.utc)
topic = KinozalTopic(id=1, url=url, last_torrent_update=None)
assert self.plugin.check_changes(topic)
assert topic.last_torrent_update == expected
|
1,489 | test named args | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
# COM Interop tests for IronPython
from iptest.assert_util import skiptest
skiptest("win32")
from iptest.cominterop_util import *
from System.Runtime.InteropServices import COMException
from System import InvalidOperationException
from System.Reflection import TargetParameterCountException
from Microsoft.Scripting import ArgumentTypeException
com_type_name = "DlrComLibrary.DlrComServer"
#------------------------------------------------------------------------------
# Create a COM object
com_obj = getRCWFromProgID(com_type_name)
def test_DlrComServerArrays():
dlrComServer = com_obj
data = dlrComServer.GetObjArray() # returns 2 objects - one is itself, another one is something else
Assert(data.Length == 2)
Assert(dlrComServer.Equals(data[0]) == True)
Assert(dlrComServer.Equals(data[1]) == False)
data = dlrComServer.GetIntArray() # returns 5 ints - 1, 2, 3, 4, 5
Assert(data.Length == 5)
Assert(data[0] == 1)
Assert(data[1] == 2)
Assert(data[2] == 3)
Assert(data[3] == 4)
Assert(data[4] == 5)
data = dlrComServer.GetByteArray() # return byte string "GetByteArrayTestData"
stream = System.IO.MemoryStream(data, False)
reader = System.IO.StreamReader(stream, System.Text.UnicodeEncoding())
s = reader.ReadToEnd()
Assert(s == "GetByteArrayTestData")
def test_perfScenarios():
AreEqual(com_obj.SimpleMethod(), None)
AreEqual(com_obj.IntArguments(1, 2), None)
AreEqual(com_obj.StringArguments("hello", "there"), None)
AreEqual(com_obj.ObjectArguments(com_obj, com_obj), None)
def test_errorInfo():
try:
com_obj.TestErrorInfo()
except COMException as e:
# This is commented out to revisit it to see if we want to add coverage for str, or if we are
# happy to have coverage just for e.Message
# AreEqual("Test error message" in str(e), True)
AreEqual("Test error message", e.Message)
@disabled("COM dispatch mode doesn't support documentation")
def test_documentation():
import IronPython
ops = IronPython.Hosting.Python.CreateRuntime().GetEngine('py').Operations
AreEqual(ops.GetDocumentation(com_obj.IntArguments), "void IntArguments(Int32 arg1, Int32 arg2)")
@disabled('CodePlex bug 19282')
def test_method_equality():
AreEqual(com_obj.SumArgs, com_obj.SumArgs)
Assert(com_obj.SumArgs != com_obj.IntArguments)
com_obj2 = getRCWFromProgID(com_type_name)
Assert(com_obj.SumArgs != com_obj2.SumArgs)
#Use COM methods as dicitonary keys
d = {}
d[com_obj.SumArgs] = "SumArgs"
AreEqual(d[com_obj.SumArgs], "SumArgs")
d[com_obj.IntArguments] = "IntArguments"
AreEqual(d[com_obj.IntArguments], "IntArguments")
d[com_obj.SumArgs] = "SumArgs2"
AreEqual(d[com_obj.SumArgs], "SumArgs2")
d[com_obj2.SumArgs] = "obj2_SumArgs"
AreEqual(d[com_obj2.SumArgs], "obj2_SumArgs")
AreEqual(d, {com_obj.SumArgs:"SumArgs2", com_obj.IntArguments:"IntArguments", com_obj2.SumArgs:"obj2_SumArgs"})
def METHOD_NAME():
# Named arguments
AreEqual(12345, com_obj.SumArgs(1, 2, 3, 4, 5))
AreEqual(12345, com_obj.SumArgs(1, 2, 3, 4, a5=5))
AreEqual(12345, com_obj.SumArgs(1, 2, 3, a4=4, a5=5))
AreEqual(12345, com_obj.SumArgs(a1=1, a2=2, a3=3, a4=4, a5=5))
AreEqual(12345, com_obj.SumArgs(a5=5, a4=4, a3=3, a2=2, a1=1))
# kwargs
AreEqual(12345, com_obj.SumArgs(1, 2, 3, 4, **{"a5":5}))
AreEqual(12345, com_obj.SumArgs(1, 2, 3, **{"a4":4, "a5":5}))
AreEqual(12345, com_obj.SumArgs(**{"a1":1, "a2":2, "a3":3, "a4":4, "a5":5}))
AreEqual(12345, com_obj.SumArgs(**{"a5":5, "a4":4, "a3":3, "a2":2, "a1":1}))
# Named arguments and kwargs
AreEqual(12345, com_obj.SumArgs(1, 2, a5=5, **{"a4":4, "a3":3}))
# DISP_E_UNKNOWNNAME
AssertError(COMException, com_obj.SumArgs, 1, 2, 3, 4, 5, **{"a6":6, "bugid":"TODO"})
AssertError(Exception, com_obj.SumArgs, 1, 2, 3, 4, 5, **{"a5":5, "bugid":"TODO"})
#Verify that one is able to enumerate over the object in a loop
#TODO: add more tests for enumerators - bad enumerator, different array sizes, different types.
def test_enumerator():
AreEqual( [x for x in com_obj.GetEnumerator()] , [ 42, True, "DLR"] )
AreEqual( [x for x in com_obj] , [ 42, True, "DLR"] )
#------------------------------------------------------------------------------
run_com_test(__name__, __file__) |
1,490 | relabel on boot | #!/usr/bin/python3
import dbus
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
from gi.repository import GObject
from gi.repository import GLib
import os
import selinux
from subprocess import Popen, PIPE, STDOUT
class selinux_server(dbus.service.Object):
default_polkit_auth_required = "org.selinux.semanage"
def __init__(self, *p, **k):
super(selinux_server, self).__init__(*p, **k)
def is_authorized(self, sender, action_id):
bus = dbus.SystemBus()
proxy = bus.get_object('org.freedesktop.PolicyKit1', '/org/freedesktop/PolicyKit1/Authority')
authority = dbus.Interface(proxy, dbus_interface='org.freedesktop.PolicyKit1.Authority')
subject = ('system-bus-name', {'name': sender})
result = authority.CheckAuthorization(subject, action_id, {}, 1, '')
return result[0]
#
# The semanage method runs a transaction on a series of semanage commands,
# these commands can take the output of customized
#
@dbus.service.method("org.selinux", in_signature='s', sender_keyword="sender")
def semanage(self, buf, sender):
if not self.is_authorized(sender, "org.selinux.semanage"):
raise dbus.exceptions.DBusException("Not authorized")
p = Popen(["/usr/sbin/semanage", "import"], stdout=PIPE, stderr=PIPE, stdin=PIPE, universal_newlines=True)
p.stdin.write(buf)
output = p.communicate()
if p.returncode and p.returncode != 0:
raise dbus.exceptions.DBusException(output[1])
#
# The customized method will return all of the custommizations for policy
# on the server. This output can be used with the semanage method on
# another server to make the two systems have duplicate policy.
#
@dbus.service.method("org.selinux", in_signature='', out_signature='s', sender_keyword="sender")
def customized(self, sender):
if not self.is_authorized(sender, "org.selinux.customized"):
raise dbus.exceptions.DBusException("Not authorized")
p = Popen(["/usr/sbin/semanage", "export"], stdout=PIPE, stderr=PIPE, universal_newlines=True)
buf = p.stdout.read()
output = p.communicate()
if p.returncode and p.returncode != 0:
raise OSError("Failed to read SELinux configuration: %s", output)
return buf
#
# The semodule_list method will return the output of semodule --list=full, using the customized polkit,
# since this is a readonly behaviour
#
@dbus.service.method("org.selinux", in_signature='', out_signature='s', sender_keyword="sender")
def semodule_list(self, sender):
if not self.is_authorized(sender, "org.selinux.semodule_list"):
raise dbus.exceptions.DBusException("Not authorized")
p = Popen(["/usr/sbin/semodule", "--list=full"], stdout=PIPE, stderr=PIPE, universal_newlines=True)
buf = p.stdout.read()
output = p.communicate()
if p.returncode and p.returncode != 0:
raise OSError("Failed to list SELinux modules: %s", output)
return buf
#
# The restorecon method modifies any file path to the default system label
#
@dbus.service.method("org.selinux", in_signature='s', sender_keyword="sender")
def restorecon(self, path, sender):
if not self.is_authorized(sender, "org.selinux.restorecon"):
raise dbus.exceptions.DBusException("Not authorized")
selinux.restorecon(str(path), recursive=1)
#
# The setenforce method turns off the current enforcement of SELinux
#
@dbus.service.method("org.selinux", in_signature='i', sender_keyword="sender")
def setenforce(self, value, sender):
if not self.is_authorized(sender, "org.selinux.setenforce"):
raise dbus.exceptions.DBusException("Not authorized")
selinux.security_setenforce(value)
#
# The setenforce method turns off the current enforcement of SELinux
#
@dbus.service.method("org.selinux", in_signature='i', sender_keyword="sender")
def METHOD_NAME(self, value, sender):
if not self.is_authorized(sender, "org.selinux.relabel_on_boot"):
raise dbus.exceptions.DBusException("Not authorized")
if value == 1:
fd = open("/.autorelabel", "w")
fd.close()
else:
try:
os.unlink("/.autorelabel")
except FileNotFoundError:
pass
def write_selinux_config(self, enforcing=None, policy=None):
path = selinux.selinux_path() + "config"
backup_path = path + ".bck"
fd = open(path)
lines = fd.readlines()
fd.close()
fd = open(backup_path, "w")
for l in lines:
if enforcing and l.startswith("SELINUX="):
fd.write("SELINUX=%s\n" % enforcing)
continue
if policy and l.startswith("SELINUXTYPE="):
fd.write("SELINUXTYPE=%s\n" % policy)
continue
fd.write(l)
fd.close()
os.rename(backup_path, path)
#
# The change_default_enforcement modifies the current enforcement mode
#
@dbus.service.method("org.selinux", in_signature='s', sender_keyword="sender")
def change_default_mode(self, value, sender):
if not self.is_authorized(sender, "org.selinux.change_default_mode"):
raise dbus.exceptions.DBusException("Not authorized")
values = ["enforcing", "permissive", "disabled"]
if value not in values:
raise ValueError("Enforcement mode must be %s" % ", ".join(values))
self.write_selinux_config(enforcing=value)
#
# The change_default_policy method modifies the policy type
#
@dbus.service.method("org.selinux", in_signature='s', sender_keyword="sender")
def change_default_policy(self, value, sender):
if not self.is_authorized(sender, "org.selinux.change_default_policy"):
raise dbus.exceptions.DBusException("Not authorized")
path = selinux.selinux_path() + value
if os.path.isdir(path):
return self.write_selinux_config(policy=value)
raise ValueError("%s does not exist" % path)
if __name__ == "__main__":
DBusGMainLoop(set_as_default=True)
mainloop = GLib.MainLoop()
system_bus = dbus.SystemBus()
name = dbus.service.BusName("org.selinux", system_bus)
server = selinux_server(system_bus, "/org/selinux/object")
mainloop.run() |
1,491 | control led | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) typedef int GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from os import environ
from twisted.internet.defer import inlineCallbacks
from twisted.internet.serialport import SerialPort
from twisted.protocols.basic import LineReceiver
from autobahn.twisted.wamp import ApplicationSession
class McuProtocol(LineReceiver):
"""
MCU serial communication protocol.
"""
# need a reference to our WS-MCU gateway factory to dispatch PubSub events
def __init__(self, session):
self.session = session
def connectionMade(self):
print('Serial port connected.')
def lineReceived(self, line):
print("Serial RX: {0}".format(line))
try:
# parse data received from MCU
data = [int(x) for x in line.split()]
except ValueError:
print('Unable to parse value {0}'.format(line))
else:
# create payload for WAMP event
payload = {'id': data[0], 'value': data[1]}
# publish WAMP event to all subscribers on topic
self.session.publish("com.myapp.mcu.on_analog_value", payload)
def METHOD_NAME(self, turnOn):
"""
This method is exported as RPC and can be called by connected clients
"""
if turnOn:
payload = b'1'
else:
payload = b'0'
print("Serial TX: {0}".format(payload))
self.transport.write(payload)
class McuComponent(ApplicationSession):
"""
MCU WAMP application component.
"""
@inlineCallbacks
def onJoin(self, details):
print("Component ready! Configuration: {}".format(self.config.extra))
port = self.config.extra['port']
baudrate = self.config.extra['baudrate']
serialProtocol = McuProtocol(self)
print('About to open serial port {0} [{1} baud] ..'.format(port, baudrate))
try:
serialPort = SerialPort(serialProtocol, port, reactor, baudrate=baudrate)
except Exception as e:
print('Could not open serial port: {0}'.format(e))
self.leave()
else:
yield self.register(serialProtocol.METHOD_NAME, "com.myapp.mcu.control_led")
if __name__ == '__main__':
import sys
import argparse
# parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--baudrate", type=int, default=9600, choices=[300, 1200, 2400, 4800, 9600, 19200, 57600, 115200],
help='Serial port baudrate.')
parser.add_argument("--port", type=str, default='/dev/ttyACM0',
help='Serial port to use (e.g. 3 for a COM port on Windows, /dev/ttyATH0 for Arduino Yun, /dev/ttyACM0 for Serial-over-USB on RaspberryPi.')
parser.add_argument("--web", type=int, default=8000,
help='Web port to use for embedded Web server. Use 0 to disable.')
router_default = environ.get("AUTOBAHN_DEMO_ROUTER", "ws://127.0.0.1:8080/ws")
parser.add_argument("--router", type=str, default=router_default,
help='WAMP router URL (a WAMP-over-WebSocket endpoint, default: "{}")'.format(router_default))
parser.add_argument("--realm", type=str, default='crossbardemo',
help='WAMP realm to join (default: "crossbardemo")')
args = parser.parse_args()
# import Twisted reactor
if sys.platform == 'win32':
# on Windows, we need to use the following reactor for serial support
# http://twistedmatrix.com/trac/ticket/3802
from twisted.internet import win32eventreactor
win32eventreactor.install()
# on Windows, we need port to be an integer
args.port = int(args.port)
from twisted.internet import reactor
print("Using Twisted reactor {0}".format(reactor.__class__))
# create embedded web server for static files
if args.web:
from twisted.web.server import Site
from twisted.web.static import File
reactor.listenTCP(args.web, Site(File(".")))
# run WAMP application component
from autobahn.twisted.wamp import ApplicationRunner
runner = ApplicationRunner(args.router, args.realm,
extra={'port': args.port, 'baudrate': args.baudrate})
# start the component and the Twisted reactor ..
runner.run(McuComponent) |
1,492 | test create shared memory | import multiprocessing as mp
from collections import OrderedDict
from multiprocessing import Array, Process
from multiprocessing.sharedctypes import SynchronizedArray
import numpy as np
import pytest
from gymnasium.error import CustomSpaceError
from gymnasium.spaces import Dict, Tuple
from gymnasium.vector.utils.shared_memory import (
create_shared_memory,
read_from_shared_memory,
write_to_shared_memory,
)
from gymnasium.vector.utils.spaces import BaseGymSpaces
from tests.vector.utils import custom_spaces, spaces
expected_types = [
Array("d", 1),
Array("f", 1),
Array("f", 3),
Array("f", 4),
Array("B", 1),
Array("B", 32 * 32 * 3),
Array("i", 1),
Array("i", 1),
(Array("i", 1), Array("i", 1)),
(Array("i", 1), Array("f", 2)),
Array("B", 3),
Array("B", 3),
Array("B", 19),
OrderedDict([("position", Array("i", 1)), ("velocity", Array("f", 1))]),
OrderedDict(
[
("position", OrderedDict([("x", Array("i", 1)), ("y", Array("i", 1))])),
("velocity", (Array("i", 1), Array("B", 1))),
]
),
]
@pytest.mark.parametrize("n", [1, 8])
@pytest.mark.parametrize(
"space,expected_type",
list(zip(spaces, expected_types)),
ids=[space.__class__.__name__ for space in spaces],
)
@pytest.mark.parametrize(
"ctx", [None, "fork", "spawn"], ids=["default", "fork", "spawn"]
)
def METHOD_NAME(space, expected_type, n, ctx):
if ctx not in mp.get_all_start_methods():
pytest.skip(
f"Multiprocessing start method {ctx} not available on this platform."
)
def assert_nested_type(lhs, rhs, n):
assert type(lhs) == type(rhs)
if isinstance(lhs, (list, tuple)):
assert len(lhs) == len(rhs)
for lhs_, rhs_ in zip(lhs, rhs):
assert_nested_type(lhs_, rhs_, n)
elif isinstance(lhs, (dict, OrderedDict)):
assert set(lhs.keys()) ^ set(rhs.keys()) == set()
for key in lhs.keys():
assert_nested_type(lhs[key], rhs[key], n)
elif isinstance(lhs, SynchronizedArray):
# Assert the length of the array
assert len(lhs[:]) == n * len(rhs[:])
# Assert the data type
assert isinstance(lhs[0], type(rhs[0]))
else:
raise TypeError(f"Got unknown type `{type(lhs)}`.")
ctx = mp if (ctx is None) else mp.get_context(ctx)
shared_memory = create_shared_memory(space, n=n, ctx=ctx)
assert_nested_type(shared_memory, expected_type, n=n)
@pytest.mark.parametrize("n", [1, 8])
@pytest.mark.parametrize(
"ctx", [None, "fork", "spawn"], ids=["default", "fork", "spawn"]
)
@pytest.mark.parametrize("space", custom_spaces)
def test_create_shared_memory_custom_space(n, ctx, space):
if ctx not in mp.get_all_start_methods():
pytest.skip(
f"Multiprocessing start method {ctx} not available on this platform."
)
ctx = mp if (ctx is None) else mp.get_context(ctx)
with pytest.raises(CustomSpaceError):
create_shared_memory(space, n=n, ctx=ctx)
def _write_shared_memory(space, i, shared_memory, sample):
write_to_shared_memory(space, i, sample, shared_memory)
@pytest.mark.parametrize(
"space", spaces, ids=[space.__class__.__name__ for space in spaces]
)
def test_write_to_shared_memory(space):
def assert_nested_equal(lhs, rhs):
assert isinstance(rhs, list)
if isinstance(lhs, (list, tuple)):
for i in range(len(lhs)):
assert_nested_equal(lhs[i], [rhs_[i] for rhs_ in rhs])
elif isinstance(lhs, (dict, OrderedDict)):
for key in lhs.keys():
assert_nested_equal(lhs[key], [rhs_[key] for rhs_ in rhs])
elif isinstance(lhs, SynchronizedArray):
assert np.all(np.array(lhs[:]) == np.stack(rhs, axis=0).flatten())
else:
raise TypeError(f"Got unknown type `{type(lhs)}`.")
shared_memory_n8 = create_shared_memory(space, n=8)
samples = [space.sample() for _ in range(8)]
processes = [
Process(
target=_write_shared_memory, args=(space, i, shared_memory_n8, samples[i])
)
for i in range(8)
]
for process in processes:
process.start()
for process in processes:
process.join()
assert_nested_equal(shared_memory_n8, samples)
def _process_write(space, i, shared_memory, sample):
write_to_shared_memory(space, i, sample, shared_memory)
@pytest.mark.parametrize(
"space", spaces, ids=[space.__class__.__name__ for space in spaces]
)
def test_read_from_shared_memory(space):
def assert_nested_equal(lhs, rhs, space, n):
assert isinstance(rhs, list)
if isinstance(space, Tuple):
assert isinstance(lhs, tuple)
for i in range(len(lhs)):
assert_nested_equal(
lhs[i], [rhs_[i] for rhs_ in rhs], space.spaces[i], n
)
elif isinstance(space, Dict):
assert isinstance(lhs, OrderedDict)
for key in lhs.keys():
assert_nested_equal(
lhs[key], [rhs_[key] for rhs_ in rhs], space.spaces[key], n
)
elif isinstance(space, BaseGymSpaces):
assert isinstance(lhs, np.ndarray)
assert lhs.shape == ((n,) + space.shape)
assert lhs.dtype == space.dtype
assert np.all(lhs == np.stack(rhs, axis=0))
else:
raise TypeError(f"Got unknown type `{type(space)}`")
shared_memory_n8 = create_shared_memory(space, n=8)
memory_view_n8 = read_from_shared_memory(space, shared_memory_n8, n=8)
samples = [space.sample() for _ in range(8)]
processes = [
Process(target=_process_write, args=(space, i, shared_memory_n8, samples[i]))
for i in range(8)
]
for process in processes:
process.start()
for process in processes:
process.join()
assert_nested_equal(memory_view_n8, samples, space, n=8) |
1,493 | get run | from typing import TYPE_CHECKING, Optional, Sequence, Tuple, cast
import dagster._check as check
from dagster._core.errors import DagsterRunNotFoundError
from dagster._core.execution.plan.state import KnownExecutionState
from dagster._core.host_representation.external import ExternalJob
from dagster._core.instance import DagsterInstance
from dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus
from dagster._core.storage.tags import RESUME_RETRY_TAG
from dagster._core.utils import make_new_run_id
from dagster._utils.merger import merge_dicts
from ..external import ensure_valid_config, get_external_execution_plan_or_raise
from ..utils import ExecutionParams
if TYPE_CHECKING:
from dagster_graphql.schema.util import ResolveInfo
def METHOD_NAME(instance: DagsterInstance, run_id: str) -> DagsterRun:
run = instance.get_run_by_id(run_id)
if not run:
raise DagsterRunNotFoundError(invalid_run_id=run_id)
return cast(DagsterRun, run)
def compute_step_keys_to_execute(
graphene_info: "ResolveInfo", execution_params: ExecutionParams
) -> Tuple[Optional[Sequence[str]], Optional[KnownExecutionState]]:
check.inst_param(execution_params, "execution_params", ExecutionParams)
instance = graphene_info.context.instance
if not execution_params.step_keys and is_resume_retry(execution_params):
# Get step keys from parent_run_id if it's a resume/retry
parent_run_id = check.not_none(execution_params.execution_metadata.parent_run_id)
parent_run = METHOD_NAME(instance, parent_run_id)
return KnownExecutionState.build_resume_retry_reexecution(
instance,
parent_run,
)
else:
known_state = None
if execution_params.execution_metadata.parent_run_id and execution_params.step_keys:
parent_run = METHOD_NAME(instance, execution_params.execution_metadata.parent_run_id)
known_state = KnownExecutionState.build_for_reexecution(
instance,
parent_run,
).update_for_step_selection(execution_params.step_keys)
return execution_params.step_keys, known_state
def is_resume_retry(execution_params: ExecutionParams) -> bool:
check.inst_param(execution_params, "execution_params", ExecutionParams)
return execution_params.execution_metadata.tags.get(RESUME_RETRY_TAG) == "true"
def create_valid_pipeline_run(
graphene_info: "ResolveInfo",
external_pipeline: ExternalJob,
execution_params: ExecutionParams,
) -> DagsterRun:
ensure_valid_config(external_pipeline, execution_params.run_config)
step_keys_to_execute, known_state = compute_step_keys_to_execute(
graphene_info, execution_params
)
external_execution_plan = get_external_execution_plan_or_raise(
graphene_info=graphene_info,
external_pipeline=external_pipeline,
run_config=execution_params.run_config,
step_keys_to_execute=step_keys_to_execute,
known_state=known_state,
)
tags = merge_dicts(external_pipeline.tags, execution_params.execution_metadata.tags)
dagster_run = graphene_info.context.instance.create_run(
job_snapshot=external_pipeline.job_snapshot,
execution_plan_snapshot=external_execution_plan.execution_plan_snapshot,
parent_job_snapshot=external_pipeline.parent_job_snapshot,
job_name=execution_params.selector.job_name,
run_id=(
execution_params.execution_metadata.run_id
if execution_params.execution_metadata.run_id
else make_new_run_id()
),
asset_selection=(
frozenset(execution_params.selector.asset_selection)
if execution_params.selector.asset_selection
else None
),
op_selection=execution_params.selector.op_selection,
resolved_op_selection=(
frozenset(execution_params.selector.op_selection)
if execution_params.selector.op_selection
else None
),
run_config=execution_params.run_config,
step_keys_to_execute=step_keys_to_execute,
tags=tags,
root_run_id=execution_params.execution_metadata.root_run_id,
parent_run_id=execution_params.execution_metadata.parent_run_id,
status=DagsterRunStatus.NOT_STARTED,
external_job_origin=external_pipeline.get_external_origin(),
job_code_origin=external_pipeline.get_python_origin(),
)
return dagster_run |
1,494 | write action transfer | from typing import TYPE_CHECKING
from apps.common.writers import (
write_bytes_fixed,
write_bytes_unchecked,
write_uint8,
write_uint16_le,
write_uint32_le,
write_uint64_le,
write_uvarint,
)
if TYPE_CHECKING:
from trezor.messages import (
EosActionBuyRam,
EosActionBuyRamBytes,
EosActionCommon,
EosActionDelegate,
EosActionDeleteAuth,
EosActionLinkAuth,
EosActionNewAccount,
EosActionRefund,
EosActionSellRam,
EosActionTransfer,
EosActionUndelegate,
EosActionUnlinkAuth,
EosActionUpdateAuth,
EosActionVoteProducer,
EosAsset,
EosAuthorization,
EosTxHeader,
)
from trezor.utils import Writer
def write_auth(w: Writer, auth: EosAuthorization) -> None:
from trezor.wire import DataError
write_uint32_le(w, auth.threshold)
write_uvarint(w, len(auth.keys))
for key in auth.keys:
if key.key is None:
raise DataError("Key must be provided explicitly.")
write_uvarint(w, key.type)
write_bytes_fixed(w, key.key, 33)
write_uint16_le(w, key.weight)
write_uvarint(w, len(auth.accounts))
for account in auth.accounts:
write_uint64_le(w, account.account.actor)
write_uint64_le(w, account.account.permission)
write_uint16_le(w, account.weight)
write_uvarint(w, len(auth.waits))
for wait in auth.waits:
write_uint32_le(w, wait.wait_sec)
write_uint16_le(w, wait.weight)
def write_header(hasher: Writer, header: EosTxHeader) -> None:
write_uint32_le(hasher, header.expiration)
write_uint16_le(hasher, header.ref_block_num)
write_uint32_le(hasher, header.ref_block_prefix)
write_uvarint(hasher, header.max_net_usage_words)
write_uint8(hasher, header.max_cpu_usage_ms)
write_uvarint(hasher, header.delay_sec)
def METHOD_NAME(w: Writer, msg: EosActionTransfer) -> None:
write_uint64_le(w, msg.sender)
write_uint64_le(w, msg.receiver)
write_asset(w, msg.quantity)
write_bytes_prefixed(w, msg.memo.encode())
def write_action_buyram(w: Writer, msg: EosActionBuyRam) -> None:
write_uint64_le(w, msg.payer)
write_uint64_le(w, msg.receiver)
write_asset(w, msg.quantity)
def write_action_buyrambytes(w: Writer, msg: EosActionBuyRamBytes) -> None:
write_uint64_le(w, msg.payer)
write_uint64_le(w, msg.receiver)
write_uint32_le(w, msg.bytes)
def write_action_sellram(w: Writer, msg: EosActionSellRam) -> None:
write_uint64_le(w, msg.account)
write_uint64_le(w, msg.bytes)
def write_action_delegate(w: Writer, msg: EosActionDelegate) -> None:
write_action_undelegate(w, msg)
write_uint8(w, 1 if msg.transfer else 0)
def write_action_undelegate(
w: Writer, msg: EosActionUndelegate | EosActionDelegate
) -> None:
write_uint64_le(w, msg.sender)
write_uint64_le(w, msg.receiver)
write_asset(w, msg.net_quantity)
write_asset(w, msg.cpu_quantity)
def write_action_refund(w: Writer, msg: EosActionRefund) -> None:
write_uint64_le(w, msg.owner)
def write_action_voteproducer(w: Writer, msg: EosActionVoteProducer) -> None:
write_uint64_le(w, msg.voter)
write_uint64_le(w, msg.proxy)
write_uvarint(w, len(msg.producers))
for producer in msg.producers:
write_uint64_le(w, producer)
def write_action_updateauth(w: Writer, msg: EosActionUpdateAuth) -> None:
write_action_deleteauth(w, msg)
write_uint64_le(w, msg.parent)
write_auth(w, msg.auth)
def write_action_deleteauth(
w: Writer, msg: EosActionDeleteAuth | EosActionUpdateAuth
) -> None:
write_uint64_le(w, msg.account)
write_uint64_le(w, msg.permission)
def write_action_linkauth(w: Writer, msg: EosActionLinkAuth) -> None:
write_action_unlinkauth(w, msg)
write_uint64_le(w, msg.requirement)
def write_action_unlinkauth(
w: Writer, msg: EosActionUnlinkAuth | EosActionLinkAuth
) -> None:
write_uint64_le(w, msg.account)
write_uint64_le(w, msg.code)
write_uint64_le(w, msg.type)
def write_action_newaccount(w: Writer, msg: EosActionNewAccount) -> None:
write_uint64_le(w, msg.creator)
write_uint64_le(w, msg.name)
write_auth(w, msg.owner)
write_auth(w, msg.active)
def write_action_common(w: Writer, msg: EosActionCommon) -> None:
write_uint64_le(w, msg.account)
write_uint64_le(w, msg.name)
write_uvarint(w, len(msg.authorization))
for authorization in msg.authorization:
write_uint64_le(w, authorization.actor)
write_uint64_le(w, authorization.permission)
def write_asset(w: Writer, asset: EosAsset) -> None:
write_uint64_le(w, asset.amount)
write_uint64_le(w, asset.symbol)
def write_bytes_prefixed(w: Writer, data: bytes) -> None:
write_uvarint(w, len(data))
write_bytes_unchecked(w, data) |
1,495 | test content id | import json
import os
import re
from contextlib import contextmanager
from io import BytesIO
from tempfile import NamedTemporaryFile, TemporaryDirectory
from unittest.mock import patch
from urllib.request import Request, urlopen
from zipfile import ZipFile
import pytest
from repo2docker.__main__ import make_r2d
from repo2docker.contentproviders import Figshare
test_content_ids = [
("https://figshare.com/articles/title/9782777", "9782777.v1"),
("https://figshare.com/articles/title/9782777/2", "9782777.v2"),
("https://figshare.com/articles/title/9782777/1234", "9782777.v1234"),
]
@pytest.mark.parametrize("link,expected", test_content_ids)
def METHOD_NAME(link, expected, requests_mock):
def mocked_get(req, context):
if req.url.startswith("https://doi.org"):
context.status_code = 302
context.headers["Location"] = link
return link
requests_mock.get(re.compile("https://"), text=mocked_get)
fig = Figshare()
fig.detect("10.6084/m9.figshare.9782777")
assert fig.content_id == expected
test_fig = Figshare()
test_fig.article_id = "123456"
test_fig.article_version = "42"
test_dois_links = [
(
"10.6084/m9.figshare.9782777",
{"host": test_fig.hosts[0], "article": "9782777", "version": "1"},
),
(
"10.6084/m9.figshare.9782777.v1",
{"host": test_fig.hosts[0], "article": "9782777", "version": "1"},
),
pytest.param(
"10.6084/m9.figshare.9782777.v2",
{"host": test_fig.hosts[0], "article": "9782777", "version": "2"},
# $ curl -sIL https://dx.doi.org/10.6084/m9.figshare.9782777.v2 | grep location
# location: https://figshare.com/articles/Binder-ready_openSenseMap_Analysis/9782777/2
# location: https://figshare.com/articles/code/Binder-ready_openSenseMap_Analysis/9782777
marks=pytest.mark.xfail(reason="Problem with figshare version redirects"),
),
(
"https://doi.org/10.6084/m9.figshare.9782777.v1",
{"host": test_fig.hosts[0], "article": "9782777", "version": "1"},
# $ curl -sIL https://doi.org/10.6084/m9.figshare.9782777.v1 | grep location
# location: https://figshare.com/articles/Binder-ready_openSenseMap_Analysis/9782777/1
# location: https://figshare.com/articles/code/Binder-ready_openSenseMap_Analysis/9782777
),
pytest.param(
"https://doi.org/10.6084/m9.figshare.9782777.v3",
{"host": test_fig.hosts[0], "article": "9782777", "version": "3"},
# $ curl -sIL https://doi.org/10.6084/m9.figshare.9782777.v3 | grep location
# location: https://figshare.com/articles/Binder-ready_openSenseMap_Analysis/9782777/3
# location: https://figshare.com/articles/code/Binder-ready_openSenseMap_Analysis/9782777
marks=pytest.mark.xfail(reason="Problem with figshare version redirects"),
),
(
"https://figshare.com/articles/title/97827771234",
{"host": test_fig.hosts[0], "article": "97827771234", "version": "1"},
),
(
"https://figshare.com/articles/title/9782777/1",
{"host": test_fig.hosts[0], "article": "9782777", "version": "1"},
),
(
"https://figshare.com/articles/title/9782777/2",
{"host": test_fig.hosts[0], "article": "9782777", "version": "2"},
),
(
"https://figshare.com/articles/title/9782777/",
{"host": test_fig.hosts[0], "article": "9782777", "version": "1"},
),
(
"https://figshare.com/articles/title/9782777/1234",
{"host": test_fig.hosts[0], "article": "9782777", "version": "1234"},
),
]
test_spec = {"host": test_fig.hosts[0], "article": "123456", "version": "42"}
@pytest.mark.parametrize("test_input,expected", test_dois_links)
def test_detect_figshare(test_input, expected):
assert Figshare().detect(test_input) == expected
def test_detect_not_figshare():
assert Figshare().detect("/some/path/here") is None
assert Figshare().detect("https://example.com/path/here") is None
assert Figshare().detect("10.21105/joss.01277") is None
assert Figshare().detect("10.5281/zenodo.3232985") is None
assert Figshare().detect("https://doi.org/10.21105/joss.01277") is None
@contextmanager
def figshare_archive(prefix="a_directory"):
with NamedTemporaryFile(suffix=".zip") as zfile:
with ZipFile(zfile.name, mode="w") as zip:
zip.writestr(f"{prefix}/some-file.txt", "some content")
zip.writestr(f"{prefix}/some-other-file.txt", "some more content")
yield zfile.name
def test_fetch_zip(requests_mock):
# see test_zenodo.py/test_fetch_software
with figshare_archive() as fig_path:
mock_response = {
"files": [
{
"name": "afake.zip",
"is_link_only": False,
"download_url": f"file://{fig_path}",
}
]
}
requests_mock.get(
"https://api.figshare.com/v2/articles/123456/versions/42",
json=mock_response,
)
requests_mock.get(f"file://{fig_path}", content=open(fig_path, "rb").read())
# with patch.object(Figshare, "urlopen", new=mock_urlopen):
with TemporaryDirectory() as d:
output = []
for l in test_fig.fetch(test_spec, d):
output.append(l)
unpacked_files = set(os.listdir(d))
expected = {"some-other-file.txt", "some-file.txt"}
assert expected == unpacked_files
def test_fetch_data(requests_mock):
with figshare_archive() as a_path:
with figshare_archive() as b_path:
mock_response = {
"files": [
{
"name": "afake.file",
"download_url": f"file://{a_path}",
"is_link_only": False,
},
{
"name": "bfake.data",
"download_url": f"file://{b_path}",
"is_link_only": False,
},
{"name": "cfake.link", "is_link_only": True},
]
}
requests_mock.get(
"https://api.figshare.com/v2/articles/123456/versions/42",
json=mock_response,
)
requests_mock.get(f"file://{a_path}", content=open(a_path, "rb").read())
requests_mock.get(f"file://{b_path}", content=open(b_path, "rb").read())
with TemporaryDirectory() as d:
output = []
for l in test_fig.fetch(test_spec, d):
output.append(l)
unpacked_files = set(os.listdir(d))
# ZIP files shouldn't have been unpacked
expected = {"bfake.data", "afake.file"}
assert expected == unpacked_files |
1,496 | test not valid when empty multivalues | # -*- coding: utf-8 -*-
from django import forms
from django.core.exceptions import ValidationError
from django.test import SimpleTestCase
from ralph.admin.fields import MultilineField, MultivalueFormMixin
from ralph.tests.models import TestAsset
class SimpleTestForm(MultivalueFormMixin, forms.Form):
multivalue_fields = ['sn', 'barcode', 'niw']
sn = MultilineField()
barcode = MultilineField()
niw = MultilineField(required=False)
class OneRequiredTestForm(MultivalueFormMixin, forms.Form):
one_of_mulitvalue_required = ['sn', 'barcode']
multivalue_fields = ['sn', 'barcode']
sn = MultilineField()
barcode = MultilineField()
class TestAssetForm(MultivalueFormMixin, forms.ModelForm):
multivalue_fields = ['sn', 'barcode']
one_of_mulitvalue_required = ['sn', 'barcode']
sn = MultilineField('sn')
barcode = MultilineField('barcode')
class Meta:
model = TestAsset
fields = ['hostname', 'sn', 'barcode']
class MultiValueFormTest(SimpleTestCase):
def test_extend_empty_fields_at_the_end(self):
data = {
'sn': ['1', '2', '3'],
'barcode': ['1'],
}
form = SimpleTestForm({})
result = form.extend_empty_fields_at_the_end(data)
self.assertEqual(result, {
'sn': ['1', '2', '3'],
'barcode': ['1', '', ''],
'niw': ['', '', ''],
})
def test_extend_empty_fields_at_the_end_with_empty_row(self):
data = {
'sn': ['1', '2', '3', ''],
'barcode': ['1', '', '', ''],
}
form = SimpleTestForm({})
result = form.extend_empty_fields_at_the_end(data)
self.assertEqual(result, {
'sn': ['1', '2', '3'],
'barcode': ['1', '', ''],
'niw': ['', '', ''],
})
def test_works_for_single_value_each(self):
data = {
'sn': 'sn1',
'barcode': 'bc1',
'niw': 'niw1',
}
form = SimpleTestForm(data)
self.assertTrue(form.is_valid())
def test_works_for_multi_value_each(self):
data = {
'sn': 'sn1, sn2, sn3',
'barcode': 'bc1, bc2, bc3',
'niw': 'niw1, niw2, niw3',
}
form = SimpleTestForm(data)
self.assertTrue(form.is_valid())
def test_works_for_multi_value_with_empty_holes(self):
data = {
'sn': 'sn1, sn2, sn3',
'barcode': 'bc1, bc2, bc3',
'niw': 'niw1, , niw3',
}
form = SimpleTestForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['niw'], ['niw1', '', 'niw3'])
def test_works_for_multi_value_with_empty_holes_at_the_end(self):
data = {
'sn': 'sn1, sn2, sn3',
'barcode': 'bc1, bc2, bc3',
'niw': 'niw1, ,',
}
form = SimpleTestForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['niw'], ['niw1', '', ''])
def test_works_for_multi_value_with_extension_to_longest_field(self):
data = {
'sn': 'sn1, sn2, sn3',
'barcode': 'bc1, bc2, bc3',
'niw': 'niw1',
}
form = SimpleTestForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['niw'], ['niw1', '', ''])
def test_valid_when_different_count(self):
data = {
'sn': 'sn1',
'barcode': 'bc1, bc2',
'niw': 'niw1, niw2'
}
form = SimpleTestForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['sn'], ['sn1', ''])
def METHOD_NAME(self):
data = {
'sn': '',
'barcode': '',
}
form = OneRequiredTestForm(data)
self.assertFalse(form.is_valid())
def test_not_valid_when_none_multivalue_passed(self):
data = {
'sn': 'sn1,,sn3',
'barcode': 'br1,,br3',
}
form = OneRequiredTestForm(data)
self.assertFalse(form.is_valid())
self.assertIn('sn', form.errors)
self.assertIn('barcode', form.errors)
class MultilineFieldTest(SimpleTestCase):
def test_field_works_for_single_value(self):
field = MultilineField()
value_with_duplicates = '1'
self.assertEqual(field.clean(value_with_duplicates), ['1'])
def test_field_works_for_multi_value(self):
field = MultilineField()
value_with_duplicates = '1,2'
self.assertEqual(field.clean(value_with_duplicates), ['1', '2'])
def test_field_not_valid_when_duplicates(self):
field = MultilineField(allow_duplicates=False)
value_with_duplicates = '1,1'
with self.assertRaises(ValidationError):
field.clean(value_with_duplicates)
def test_field_valid_when_duplicates_allowed(self):
field = MultilineField(allow_duplicates=True)
value_with_duplicates = '1,1'
self.assertEqual(field.clean(value_with_duplicates), ['1', '1'])
def test_field_strips_whitespaces(self):
field = MultilineField(allow_duplicates=True)
value_with_duplicates = ' 1 '
self.assertEqual(field.clean(value_with_duplicates), ['1'])
def test_field_allows_blank_elements(self):
field = MultilineField(allow_duplicates=True)
value_with_empty = '1,,3'
self.assertEqual(field.clean(value_with_empty), ['1', '', '3']) |
1,497 | should hide score | import functools
from datetime import datetime, timezone
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.sql.sqltypes import Boolean, Integer, String, Text
from files.classes.cron.tasks import (RepeatableTask, ScheduledTaskType,
TaskRunContext)
from files.classes.submission import Submission
from files.classes.visstate import StateMod, StateReport, VisibilityState
from files.helpers.config.const import SUBMISSION_TITLE_LENGTH_MAXIMUM
from files.helpers.content import body_displayed
from files.helpers.lazy import lazy
from files.helpers.sanitize import filter_emojis_only
__all__ = ('ScheduledSubmissionTask',)
class ScheduledSubmissionTask(RepeatableTask):
__tablename__ = "tasks_repeatable_scheduled_submissions"
__mapper_args__ = {
"polymorphic_identity": int(ScheduledTaskType.SCHEDULED_SUBMISSION),
}
id = Column(Integer, ForeignKey(RepeatableTask.id), primary_key=True)
author_id_submission = Column(Integer, ForeignKey("users.id"), nullable=False)
ghost = Column(Boolean, default=False, nullable=False)
private = Column(Boolean, default=False, nullable=False)
over_18 = Column(Boolean, default=False, nullable=False)
is_bot = Column(Boolean, default=False, nullable=False)
title = Column(String(SUBMISSION_TITLE_LENGTH_MAXIMUM), nullable=False)
url = Column(String)
body = Column(Text)
body_html = Column(Text)
flair = Column(String)
embed_url = Column(String)
author = relationship("User", foreign_keys=author_id_submission)
task = relationship(RepeatableTask)
submissions = relationship(Submission,
back_populates="task", order_by="Submission.id.desc()")
def run_task(self, ctx:TaskRunContext) -> None:
submission:Submission = self.make_submission(ctx)
with ctx.app_context():
# TODO: stop using app context (currently required for sanitize and
# username pings)
submission.submit(ctx.db) # TODO: thumbnails
submission.publish()
def make_submission(self, ctx:TaskRunContext) -> Submission:
title:str = self.make_title(ctx.trigger_time)
title_html:str = filter_emojis_only(title, graceful=True)
if len(title_html) > 1500: raise ValueError("Rendered title too large")
return Submission(
created_utc=int(ctx.trigger_time.timestamp()),
private=self.private,
author_id=self.author_id_submission,
over_18=self.over_18,
app_id=None,
is_bot =self.is_bot,
title=title,
title_html=title_html,
url=self.url,
body=self.body,
body_html=self.body_html,
flair=self.flair,
ghost=self.ghost,
state_mod=StateMod.VISIBLE,
embed_url=self.embed_url,
task_id=self.id,
)
def make_title(self, trigger_time:datetime) -> str:
return trigger_time.strftime(self.title)
# properties below here are mocked in order to reuse part of the submission
# HTML template for previewing a submitted task
@property
def state_user_deleted_utc(self) -> datetime | None:
return datetime.now(tz=timezone.utc) if not self.task.enabled else None
@functools.cached_property
def title_html(self) -> str:
'''
This is used as a mock property for display in submission listings that
contain scheduled posts.
.. warning::
This property should not be used for generating the HTML for an actual
submission as this will be missing the special formatting that may be
applies to titles. Instead call
`ScheduledSubmissionContext.make_title()` with the `datetime` that the
event was triggered at.
'''
return filter_emojis_only(self.title)
@property
def author_name(self) -> str:
return self.author.username
@property
def upvotes(self) -> int:
return 1
@property
def score(self) -> int:
return 1
@property
def downvotes(self) -> int:
return 0
@property
def realupvotes(self) -> int:
return 1
@property
def comment_count(self) -> int:
return 0
@property
def views(self) -> int:
return 0
@property
def state_mod(self) -> StateMod:
return StateMod.VISIBLE
def award_count(self, kind):
return 0
@lazy
def realurl(self, v):
return Submission.realurl(self, v)
def realbody(self, v):
return body_displayed(self, v, is_html=True)
def plainbody(self, v):
return body_displayed(self, v, is_html=False)
@lazy
def realtitle(self, v):
return self.title_html if self.title_html else self.title
@lazy
def plaintitle(self, v):
return self.title
@property
def permalink(self):
return f"/tasks/scheduled_posts/{self.id}"
@property
def shortlink(self):
return self.permalink
@property
def is_real_submission(self) -> bool:
return False
@property
def METHOD_NAME(self) -> bool:
return True
@property
def edit_url(self) -> str:
return f"/tasks/scheduled_posts/{self.id}/content"
@property
def visibility_state(self) -> VisibilityState:
return VisibilityState(
state_mod=StateMod.VISIBLE,
state_mod_set_by=None,
state_report=StateReport.UNREPORTED,
deleted=False, # we only want to show deleted UI color if disabled
op_shadowbanned=False,
op_id=self.author_id_submission,
op_name_safe=self.author_name
) |
1,498 | load | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import dnf.conf
import dnf.repo
from .common import TestCase
class DnfRepoApiTest(TestCase):
def test_init(self):
# dnf.repo.Repo.__init__
self.assertHasAttr(dnf.repo, "Repo")
self.assertHasType(dnf.repo.Repo, object)
repo = dnf.repo.Repo(name=None, parent_conf=None)
def test_repo_id_invalid(self):
# dnf.repo.repo_id_invalid
self.assertHasAttr(dnf.repo, "repo_id_invalid")
dnf.repo.repo_id_invalid(repo_id="repo-id")
def test_metadata_fresh(self):
# dnf.repo.Metadata.fresh
self.assertHasAttr(dnf.repo, "Metadata")
class MockRepo:
def fresh(self):
return True
mock_repo = MockRepo()
md = dnf.repo.Metadata(repo=mock_repo)
self.assertEqual(md.fresh, True)
def test_DEFAULT_SYNC(self):
# dnf.repo.Repo.DEFAULT_SYNC
self.assertHasAttr(dnf.repo.Repo, "DEFAULT_SYNC")
self.assertHasType(dnf.repo.Repo.DEFAULT_SYNC, int)
def test_metadata(self):
# dnf.repo.Repo.metadata
repo = dnf.repo.Repo()
self.assertHasAttr(repo, "metadata")
self.assertEqual(repo.metadata, None)
def test_id(self):
# dnf.repo.Repo.id
repo = dnf.repo.Repo()
self.assertHasAttr(repo, "id")
self.assertEqual(repo.id, "")
def test_repofile(self):
# dnf.repo.Repo.
repo = dnf.repo.Repo()
self.assertEqual(repo.repofile, "")
def test_pkgdir(self):
# dnf.repo.Repo.pkgdir
conf = dnf.conf.Conf()
conf.cachedir = "/tmp/cache"
repo = dnf.repo.Repo(name=None, parent_conf=conf)
self.assertHasAttr(repo, "pkgdir")
self.assertHasType(repo.pkgdir, str)
def test_pkgdir_setter(self):
# dnf.repo.Repo.pkgdir - setter
repo = dnf.repo.Repo()
repo.pkgdir = "dir"
self.assertHasType(repo.pkgdir, str)
self.assertEqual(repo.pkgdir, "dir")
def test_disable(self):
# dnf.repo.Repo.disable
repo = dnf.repo.Repo()
self.assertHasAttr(repo, "disable")
repo.disable()
def test_enable(self):
# dnf.repo.Repo.enable
repo = dnf.repo.Repo()
self.assertHasAttr(repo, "enable")
repo.enable()
def test_add_metadata_type_to_download(self):
# dnf.repo.Repo.add_metadata_type_to_download
repo = dnf.repo.Repo()
self.assertHasAttr(repo, "add_metadata_type_to_download")
repo.add_metadata_type_to_download(metadata_type="primary")
def test_remove_metadata_type_from_download(self):
# dnf.repo.Repo.remove_metadata_type_from_download
repo = dnf.repo.Repo()
self.assertHasAttr(repo, "remove_metadata_type_from_download")
repo.remove_metadata_type_from_download(metadata_type="primary")
def test_get_metadata_path(self):
# dnf.repo.Repo.get_metadata_path
repo = dnf.repo.Repo()
self.assertHasAttr(repo, "get_metadata_path")
path = repo.get_metadata_path(metadata_type="primary")
self.assertHasType(path, str)
def test_get_metadata_content(self):
# dnf.repo.Repo.get_metadata_content
repo = dnf.repo.Repo()
self.assertHasAttr(repo, "get_metadata_content")
content = repo.get_metadata_content(metadata_type="primary")
self.assertHasType(content, str)
def test_load(self):
# dnf.repo.Repo.load
repo = dnf.repo.Repo()
class MockRepo:
def METHOD_NAME(self):
return True
repo._repo = MockRepo()
self.assertHasAttr(repo, "load")
repo.METHOD_NAME()
def test_dump(self):
# dnf.repo.Repo.dump - inherited from BaseConfig
repo = dnf.repo.Repo()
self.assertHasAttr(repo, "dump")
content = repo.dump()
self.assertHasType(content, str)
def test_set_progress_bar(self):
# dnf.repo.Repo.set_progress_bar
repo = dnf.repo.Repo()
self.assertHasAttr(repo, "set_progress_bar")
repo.set_progress_bar(progress=None)
def test_get_http_headers(self):
# dnf.repo.Repo.get_http_headers
repo = dnf.repo.Repo()
self.assertHasAttr(repo, "get_http_headers")
headers = repo.get_http_headers()
self.assertHasType(headers, tuple)
def test_set_http_headers(self):
# dnf.repo.Repo.set_http_headers
repo = dnf.repo.Repo()
self.assertHasAttr(repo, "set_http_headers")
headers = repo.set_http_headers(headers=[]) |
1,499 | addx | #!/usr/bin/python
#
# Simson's simple stats. If what you want isn't here, use stats.
import time
import os
class statbag:
"""A simple statistics package for 1 and two dimensional values.
Also does histograms."""
def __init__(self):
self.x = []
self.y = []
self.hist = {} # single value histogram
def __add__(self,another):
new = stats()
new.x = self.x + another.x
new.y = self.y + another.y
return new
def METHOD_NAME(self,x):
self.x.append(x)
self.hist[x] = self.hist.get(x,0) + 1
def addxy(self,x,y):
self.x.append(x)
self.y.append(y)
def count(self):
return len(self.x)
def convert_to_float(self):
for i in xrange(len(self.x)):
self.x[i] = float(self.x[i])
for i in xrange(len(self.y)):
self.y[i] = float(self.y[i])
def sumx(self):
sum = 0
for i in self.x:
sum += i
return sum
def sumy(self):
sum = 0
for i in self.y:
sum += i
return sum
def sumxx(self):
sum = 0
for i in self.x:
sum += i*i
return sum
def sumyy(self):
sum = 0
for i in self.y:
sum += i*i
return sum
def average(self):
for i in range(len(self.x)):
if(type(self.x[i])==type("")): self.x[i] = float(self.x[i])
return float(self.sumx()) / self.count()
def minx(self):
min = self.x[0]
for i in self.x:
if(i<min): min=i
return min
def maxx(self):
max = self.x[0]
for i in self.x:
if(i>max): max=i
return max
def rangex(self):
return self.maxx() - self.minx()
def variance(self):
avg = self.average()
var = 0
for i in self.x:
var += (i - avg) * (i - avg)
return var
def stddev(self):
import math
return math.sqrt(self.variance() / self.count())
# Two variable statistics
def sumxy(self):
assert(len(self.x)==len(self.y))
sum = 0
for i in range(len(self.x)):
sum += self.x[i]*self.y[i]
return sum
def correlation(self):
import math
n = len(self.x)
sumx = self.sumx()
sumy = self.sumy()
sumxx = self.sumxx()
sumyy = self.sumyy()
sumxy = self.sumxy()
top = n * sumxy - sumx*sumy
bot = math.sqrt(( n * sumxx - sumx*sumx) * (n * sumyy - sumy*sumy))
if(bot==0): return 0 # not correlated
return top / bot
def xystr(self):
""" Return a string of all the xy values """
ret = ""
for i in range(len(self.x)):
ret += "%g %g\n" % (self.x[i],self.y[i])
return ret
def stats1(self):
ret = ""
ret += "Single variable stats:\n"
ret += "count= %d\n" % self.count()
ret += "min: %g max: %g range: %g\n" % (self.minx(),self.maxx(),self.rangex())
ret += "sum: %g sum of squares: %g \n" % (self.sumx(), self.sumxx())
ret += "average: %g\n" % (self.average())
ret += "variance: %g stddev: %g\n" % (self.variance(),self.stddev())
return ret
def print_stats1(self):
print("Single variable stats:")
print("count= %d" % self.count())
print("min: %g max: %g range: %g" % (self.minx(),self.maxx(),self.rangex()))
print("sum: %g sum of squares: %g " % (self.sumx(), self.sumxx()))
print("average: %g" % (self.average()))
print("variance: %g stddev: %g" % (self.variance(),self.stddev()))
def histogram(self):
"Return a histogram --- a hash of (xvalue,count) tuples"
return self.hist
def print_histogram(self,xtitle,ytitle):
"Print a histogram given XTITLE and YTITLE"
print("%20s %10s" % (xtitle,ytitle))
k = self.hist.keys()
k.sort()
for i in k:
print("%20s %10d" % (i,self.hist[i]))
def plot_date_histogram(self,fname,title,width,height):
def add_days(date,days):
return time.localtime(time.mktime(date)+60*60*24*days)[0:3] + (0,0,0,0,0,0)
first = add_days(self.minx(),-1) # start one day before
last = add_days(self.maxx(),1) # go to one day after
cmd_file = fname+".txt"
dat_file = fname+".dat"
d = open(dat_file,"w")
# Generate output for every day...
# And generate a "0" for every day that we don't have an entry
# that follows an actual day...
hist = self.histogram()
k = hist.keys()
k.sort()
for i in k:
# Check for the previous day
yesterday = add_days(i,-1)
if(not hist.has_key(yesterday)):
d.write("%d/%d/%d 0\n" % (yesterday[1],yesterday[2],yesterday[0]))
d.write("%d/%d/%d %d\n" % (i[1],i[2],i[0],hist[i]))
# Check for the next day
tomorrow = add_days(i,1)
if(not hist.has_key(tomorrow)):
d.write("%d/%d/%d 0\n" % (tomorrow[1],tomorrow[2],tomorrow[0]))
d.close()
f = open(cmd_file,"w")
f.write("set terminal png small size %d,%d\n" % (width,height)) # "small" is fontsize
f.write("set output '%s'\n" % fname)
f.write("set xdata time\n")
f.write("set timefmt '%m/%d/%y'\n")
f.write("set xrange ['%d/%d/%d':'%d/%d/%d']\n" %
(first[1],first[2],first[0], last[1],last[2],last[0]+1))
f.write("set format x '%m/%d'\n")
f.write("set boxwidth 0.5 relative\n")
f.write("plot '%s' using 1:2 t '%s' with boxes fs solid\n" % (dat_file,title))
f.write("quit\n")
f.close()
os.system("gnuplot %s" % cmd_file)
#os.unlink(cmd_file)
#os.unlink(dat_file)
if __name__ == "__main__":
import sys
print("Enter your numbers on a line seperated by spaces:")
j = sys.stdin.readline()
st = statbag()
for v in j.strip().split(' '):
st.METHOD_NAME(float(v))
st.print_stats1() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.