id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
4,700 | run git http | # Copyright © Michal Čihař <michal@weblate.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
import os.path
import subprocess
from base64 import b64decode
from email import message_from_string
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.http.response import HttpResponse, HttpResponseServerError
from django.shortcuts import redirect
from django.urls import reverse
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from weblate.auth.models import User
from weblate.gitexport.models import SUPPORTED_VCS
from weblate.gitexport.utils import find_git_http_backend
from weblate.trans.models import Component
from weblate.utils.errors import report_error
from weblate.utils.views import parse_path
def response_authenticate():
"""Return 401 response with authenticate header."""
response = HttpResponse(status=401)
response["WWW-Authenticate"] = 'Basic realm="Weblate Git access"'
return response
def authenticate(request, auth):
"""Perform authentication with HTTP Basic auth."""
try:
method, data = auth.split(None, 1)
except (ValueError, TypeError):
return False
if method.lower() == "basic":
try:
username, code = b64decode(data).decode("iso-8859-1").split(":", 1)
except (ValueError, TypeError):
return False
try:
user = User.objects.get(username=username, auth_token__key=code)
except User.DoesNotExist:
return False
if not user.is_active:
return False
request.user = user
return True
return False
@never_cache
@csrf_exempt
def git_export(request, path, git_request):
"""
Git HTTP server view.
Wrapper around git-http-backend to provide Git repositories export over HTTP.
Performs permission checks and hands over execution to the wrapper.
"""
# Reject non pull access early
if request.GET.get("service", "") not in ("", "git-upload-pack"):
raise PermissionDenied("Only pull is supported")
# HTTP authentication
auth = request.headers.get("authorization", b"")
if auth and not authenticate(request, auth):
return response_authenticate()
try:
obj = parse_path(request, path, (Component,))
except Http404:
if not request.user.is_authenticated:
return response_authenticate()
raise
# Strip possible double path separators
git_request = git_request.lstrip("/\\")
# Permissions
if not request.user.has_perm("vcs.access", obj):
if not request.user.is_authenticated:
return response_authenticate()
raise PermissionDenied("No VCS permissions")
if obj.vcs not in SUPPORTED_VCS:
raise Http404("Not a git repository")
if obj.is_repo_link:
return redirect(
"{}?{}".format(
reverse(
"git-export",
kwargs={
"path": obj.linked_component.get_url_path(),
"git_request": git_request,
},
),
request.META["QUERY_STRING"],
),
permanent=True,
)
return METHOD_NAME(request, obj, git_request)
def METHOD_NAME(request, obj, git_request):
"""Git HTTP backend execution wrapper."""
# Find Git HTTP backend
git_http_backend = find_git_http_backend()
if git_http_backend is None:
return HttpResponseServerError("git-http-backend not found")
# Invoke Git HTTP backend
query = request.META.get("QUERY_STRING", "")
process_env = {
"REQUEST_METHOD": request.method,
"PATH_TRANSLATED": os.path.join(obj.full_path, git_request),
"GIT_HTTP_EXPORT_ALL": "1",
"CONTENT_TYPE": request.headers.get("content-type", ""),
"QUERY_STRING": query,
"HTTP_CONTENT_ENCODING": request.headers.get("content-encoding", ""),
}
process = subprocess.Popen(
[git_http_backend],
env=process_env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output, output_err = process.communicate(request.body)
retcode = process.poll()
# Log error
if output_err:
output_err = output_err.decode()
report_error(
cause="Git backend failure",
project=obj.project,
level="error",
message=True,
)
# Handle failure
if retcode:
return HttpResponseServerError(output_err)
headers, content = output.split(b"\r\n\r\n", 1)
message = message_from_string(headers.decode())
# Handle status in response
if "status" in message:
return HttpResponse(status=int(message["status"].split()[0]))
# Send content
response = HttpResponse(content_type=message["content-type"])
response.write(content)
return response |
4,701 | header impl | load("//kythe/docs:asciidoc.bzl", "AsciidocInfo")
load("@bazel_skylib//lib:paths.bzl", "paths")
load("@bazel_skylib//lib:shell.bzl", "shell")
_AsciidocHeaderInfo = provider(
fields = {"header": "File with the asciidoc header."},
)
_SiteDocsInfo = provider()
def METHOD_NAME(target, ctx):
src = ctx.rule.file.src
header = ctx.actions.declare_file(paths.replace_extension(src.path, "head." + src.extension))
ctx.actions.run(
inputs = [src],
outputs = [header],
tools = [ctx.executable._docheader],
executable = ctx.executable._docheader,
arguments = [src.path, header.path],
mnemonic = "JekyllHeader",
)
return [
_AsciidocHeaderInfo(header = header),
]
_header_aspect = aspect(
implementation = METHOD_NAME,
attrs = {
"_docheader": attr.label(
default = Label("//kythe/web/site:doc_header"),
executable = True,
cfg = "exec",
),
},
)
def _impl(ctx):
outdir = ctx.actions.declare_directory(ctx.label.name)
commands = []
inputs = []
for src in ctx.attr.srcs:
header = src[_AsciidocHeaderInfo].header
html = src[AsciidocInfo].primary_output_path
resources = src[AsciidocInfo].resource_dir
inputs += [resources, header]
commands += [
# Copy only the files from the resource dir, omitting the html file itself
# or we will get subsequent permissions problems.
"find {resource_dir} -mindepth 1 -maxdepth 1 -depth -not -path {html} -exec cp -L -r {{}} {outdir} \\;".format(
resource_dir = shell.quote(resources.path),
outdir = shell.quote(outdir.path),
html = shell.quote(paths.join(resources.path, html)),
),
"cat {header} {html} > {output}".format(
header = shell.quote(header.path),
html = shell.quote(paths.join(resources.path, html)),
output = shell.quote(paths.join(outdir.path, html)),
),
]
for dep in ctx.attr.deps:
files = dep.files.to_list()
inputs += files
commands += [
"cp -L -r {file} {outdir}".format(
file = shell.quote(file.path),
outdir = shell.quote(outdir.path),
)
for file in files
]
commands.append("pushd {outdir}".format(outdir = shell.quote(outdir.path)))
for src, dest in ctx.attr.rename_files.items():
commands.append("mv {src} {dest}".format(
src = shell.quote(src),
dest = shell.quote(dest),
))
commands.append("popd")
ctx.actions.run_shell(
mnemonic = "BuildDocs",
inputs = inputs,
outputs = [outdir],
command = "\n".join([
"set -e",
"mkdir -p {outdir}".format(outdir = shell.quote(outdir.path)),
] + commands),
)
return [
# Only include the root directory in our declared outputs.
# This ensure that downstream rules don't see files listed twice if the expand tree artifacts.
DefaultInfo(files = depset([outdir])),
_SiteDocsInfo(),
]
site_docs = rule(
implementation = _impl,
attrs = {
"srcs": attr.label_list(
aspects = [_header_aspect],
providers = [AsciidocInfo],
),
"deps": attr.label_list(
providers = [_SiteDocsInfo],
),
"rename_files": attr.string_dict(),
},
)
def _package_path(file):
pkgroot = paths.join(file.root.path, file.owner.package)
return paths.relativize(file.path, pkgroot)
def _jekyll_impl(ctx):
input_root = ctx.label.name + ".staging.d"
symlinks = []
for src in ctx.files.srcs:
declare_output = ctx.actions.declare_directory if src.is_directory else ctx.actions.declare_file
symlink = declare_output(paths.join(input_root, _package_path(src)))
ctx.actions.symlink(output = symlink, target_file = src)
symlinks.append(symlink)
input_dir = paths.join(symlinks[0].root.path, symlinks[0].owner.package, input_root)
outdir = ctx.outputs.out
if not outdir:
outdir = ctx.actions.declare_directory("_site")
# Dummy command for now.
args = ctx.actions.args()
args.add("build")
args.add("-s", input_dir)
args.add("-d", outdir.path)
ctx.actions.run(
outputs = [outdir],
inputs = symlinks,
arguments = [args],
executable = ctx.executable._jekyll,
# TODO(shahms): We don't currently have a Ruby toolchain in the RBE environment.
execution_requirements = {"local": ""},
mnemonic = "JekyllBuild",
)
return [
DefaultInfo(files = depset([outdir])),
]
jekyll_build = rule(
implementation = _jekyll_impl,
attrs = {
"out": attr.output(),
"srcs": attr.label_list(
allow_files = True,
),
"_jekyll": attr.label(
default = "@website_bundle//:bin/jekyll",
executable = True,
cfg = "exec",
allow_files = True,
),
},
) |
4,702 | to constraint | # bluemira is an integrated inter-disciplinary design tool for future fusion
# reactors. It incorporates several modules, some of which rely on other
# codes, to carry out a range of typical conceptual fusion reactor design
# activities.
#
# Copyright (C) 2021-2023 M. Coleman, J. Cook, F. Franza, I.A. Maione, S. McIntosh,
# J. Morris, D. Short
#
# bluemira is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# bluemira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with bluemira; if not, see <https://www.gnu.org/licenses/>.
from dataclasses import dataclass
from typing import List, Optional
import numpy as np
from bluemira.geometry.optimisation.typing import (
GeomClsOptimiserCallable,
GeomConstraintT,
GeomOptimiserCallable,
GeomOptimiserObjective,
)
from bluemira.geometry.parameterisations import GeometryParameterisation
from bluemira.geometry.tools import signed_distance_2D_polygon
from bluemira.geometry.wire import BluemiraWire
from bluemira.optimisation.error import GeometryOptimisationError
from bluemira.optimisation.typing import (
ConstraintT,
ObjectiveCallable,
OptimiserCallable,
)
@dataclass
class KeepOutZone:
"""Definition of a keep-out zone for a geometry optimisation."""
wire: BluemiraWire
"""Closed wire defining the keep-out zone."""
byedges: bool = True
"""Whether to discretize the keep-out zone by edges or not."""
dl: Optional[float] = None
"""
The discretization length for the keep-out zone.
This overrides ``n_discr`` if given.
"""
n_discr: int = 100
"""The number of points to discretise the keep-out zone into."""
shape_n_discr: int = 100
"""The number of points to discretise the geometry being optimised into."""
tol: float = 1e-8
"""The tolerance for the keep-out zone constraint."""
def to_objective(
geom_objective: GeomOptimiserObjective, geom: GeometryParameterisation
) -> ObjectiveCallable:
"""Convert a geometry objective function to a normal objective function."""
def f(x):
geom.variables.set_values_from_norm(x)
return geom_objective(geom)
return f
def to_optimiser_callable(
geom_callable: GeomOptimiserCallable,
geom: GeometryParameterisation,
) -> OptimiserCallable:
"""
Convert a geometry optimiser function to a normal optimiser function.
For example, a gradient or constraint.
"""
def f(x):
geom.variables.set_values_from_norm(x)
return geom_callable(geom)
return f
def to_optimiser_callable_from_cls(
geom_callable: GeomClsOptimiserCallable,
geom: GeometryParameterisation,
) -> OptimiserCallable:
"""
Convert a geometry optimiser function to a normal optimiser function.
For example, a gradient or constraint.
"""
def f(x):
geom.variables.set_values_from_norm(x)
return geom_callable()
return f
def METHOD_NAME(
geom_constraint: GeomConstraintT, geom: GeometryParameterisation
) -> ConstraintT:
"""Convert a geometry constraint to a normal one."""
constraint: ConstraintT = {
"f_constraint": to_optimiser_callable(geom_constraint["f_constraint"], geom),
"df_constraint": None,
"tolerance": geom_constraint["tolerance"],
}
if df_constraint := geom_constraint.get("df_constraint", None):
constraint["df_constraint"] = to_optimiser_callable(df_constraint, geom)
return constraint
def calculate_signed_distance(
parameterisation: GeometryParameterisation,
n_shape_discr: int,
zone_points: np.ndarray,
):
"""
Signed distance from the parameterised shape to the keep-out/in zone.
"""
shape = parameterisation.create_shape()
# Note that we do not discretize by edges here, as the number of
# points must remain constant so the size of constraint vectors
# remain constant.
s = shape.discretize(n_shape_discr, byedges=False).xz
return signed_distance_2D_polygon(s.T, zone_points.T).T
def make_keep_out_zone_constraint(koz: KeepOutZone) -> GeomConstraintT:
"""Make a keep-out zone inequality constraint from a wire."""
if not koz.wire.is_closed():
raise GeometryOptimisationError(
f"Keep-out zone with label '{koz.wire.label}' is not closed."
)
koz_points = koz.wire.discretize(koz.n_discr, byedges=koz.byedges, dl=koz.dl).xz
# Note that we do not allow discretization using 'dl' or 'byedges'
# for the shape being optimised. The size of the constraint cannot
# change within an optimisation loop (NLOpt will error) and these
# options do not guarantee a constant number of discretized points.
shape_n_discr = koz.shape_n_discr
def _f_constraint(geom: GeometryParameterisation) -> np.ndarray:
return calculate_signed_distance(
geom,
n_shape_discr=shape_n_discr,
zone_points=koz_points,
)
return {"f_constraint": _f_constraint, "tolerance": np.full(shape_n_discr, koz.tol)}
def get_shape_ineq_constraint(geom: GeometryParameterisation) -> List[ConstraintT]:
"""
Retrieve the inequality constraints registered for the given parameterisation.
If no constraints are registered, return an empty list.
"""
if geom.n_ineq_constraints < 1:
return []
if df_constraint := getattr(geom, "df_ineq_constraint", None):
df_constraint = to_optimiser_callable_from_cls(df_constraint, geom)
return [
{
"f_constraint": to_optimiser_callable_from_cls(
getattr(geom, "f_ineq_constraint"), geom
),
"df_constraint": df_constraint,
"tolerance": geom.tolerance,
}
] |
4,703 | sync skaled config with upstream | # -*- coding: utf-8 -*-
#
# This file is part of SKALE Admin
#
# Copyright (C) 2021 SKALE Labs
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import os
import re
import shutil
import time
import threading
from abc import ABCMeta, abstractmethod
from pathlib import Path
from typing import ClassVar, Dict, List, Optional, TypeVar
from core.schains.config.directory import get_files_with_prefix
from tools.configs.schains import SCHAINS_DIR_PATH
from tools.helper import read_json, write_json
IConfigFilenameType = TypeVar('IConfigFilenameType', bound='IConfigFilename')
logger = logging.getLogger(__name__)
class IConfigFilename(metaclass=ABCMeta):
@property
@abstractmethod
def filename(self) -> str:
pass
def abspath(self, base_path: str) -> str:
return os.path.join(base_path, self.filename)
@classmethod
@abstractmethod
def from_filename(cls, filename: str) -> IConfigFilenameType:
pass
class UpstreamConfigFilename(IConfigFilename):
def __init__(self, name: str, rotation_id: int, ts: int) -> None:
self.name = name
self.rotation_id = rotation_id
self.ts = ts
@property
def filename(self) -> str:
return f'schain_{self.name}_{self.rotation_id}_{self.ts}.json'
def __eq__(self, other) -> bool:
return self.name == other.name and \
self.rotation_id == other.rotation_id and \
self.ts == other.ts
def __lt__(self, other) -> bool:
if self.name != other.name:
return self.name < other.name
elif self.rotation_id != other.rotation_id:
return self.rotation_id < other.rotation_id
else:
return self.ts < other.ts
@classmethod
def from_filename(cls, filename: str):
stem = Path(filename).stem
ts_start = stem.rfind('_', 0, len(stem))
ts: int = int(stem[ts_start + 1:])
rid_start = stem.rfind('_', 0, ts_start)
rotation_id: int = int(stem[rid_start + 1: ts_start])
name = stem[:rid_start].replace('schain_', '', 1)
return cls(name=name, rotation_id=rotation_id, ts=ts)
class SkaledConfigFilename(IConfigFilename):
def __init__(self, name: str) -> None:
self.name = name
@property
def filename(self) -> str:
return f'schain_{self.name}.json'
@classmethod
def from_filename(cls, filename: str):
_, name = filename.split('_')
return cls(name)
class ConfigFileManager:
CFM_LOCK: ClassVar[threading.RLock] = threading.RLock()
def __init__(self, schain_name: str) -> None:
self.schain_name: str = schain_name
self.dirname: str = os.path.join(SCHAINS_DIR_PATH, schain_name)
self.upstream_prefix = f'schain_{schain_name}_'
def get_upstream_configs(self) -> List[UpstreamConfigFilename]:
pattern = re.compile(rf'{self.upstream_prefix}\d+_\d+.json')
with ConfigFileManager.CFM_LOCK:
filenames = get_files_with_prefix(
self.dirname,
self.upstream_prefix
)
return sorted(
map(
UpstreamConfigFilename.from_filename,
filter(pattern.search, filenames)
)
)
@property
def latest_upstream_path(self) -> Optional[str]:
upstreams = self.get_upstream_configs()
if len(upstreams) == 0:
return None
return upstreams[-1].abspath(self.dirname)
@property
def skaled_config_path(self) -> str:
return SkaledConfigFilename(self.schain_name).abspath(self.dirname)
def upstream_config_exists(self) -> bool:
with ConfigFileManager.CFM_LOCK:
path = self.latest_upstream_path
return path is not None and os.path.isfile(path)
def skaled_config_exists(self) -> bool:
path = SkaledConfigFilename(self.schain_name).abspath(self.dirname)
with ConfigFileManager.CFM_LOCK:
return os.path.isfile(path)
@property
def latest_upstream_config(self) -> Optional[Dict]:
with ConfigFileManager.CFM_LOCK:
if not self.upstream_config_exists():
return None
return read_json(self.latest_upstream_path)
@property
def skaled_config(self):
with ConfigFileManager.CFM_LOCK:
if not self.skaled_config_exists():
return None
return read_json(self.skaled_config_path)
def skaled_config_synced_with_upstream(self) -> bool:
with ConfigFileManager.CFM_LOCK:
if not self.skaled_config_exists():
return False
if not self.upstream_config_exists():
return True
return self.latest_upstream_config == self.skaled_config
def get_new_upstream_filepath(self, rotation_id: int) -> str:
ts = int(time.time())
filename = UpstreamConfigFilename(
self.schain_name,
rotation_id=rotation_id,
ts=ts
)
return filename.abspath(self.dirname)
def save_new_upstream(self, rotation_id: int, config: Dict) -> None:
with ConfigFileManager.CFM_LOCK:
config_path = self.get_new_upstream_filepath(rotation_id)
write_json(config_path, config)
def save_skaled_config(self, config: Dict) -> None:
with ConfigFileManager.CFM_LOCK:
write_json(self.skaled_config_path, config)
def METHOD_NAME(self) -> bool:
with ConfigFileManager.CFM_LOCK:
if not self.upstream_config_exists():
return False
upath = self.latest_upstream_path or ''
path = self.skaled_config_path
logger.debug('Syncing %s with %s', path, upath)
shutil.copy(upath, path)
return True
def upstreams_by_rotation_id(self, rotation_id: int) -> List[str]:
return [
fp.abspath(self.dirname)
for fp in self.get_upstream_configs()
if fp.rotation_id == rotation_id
]
def upstream_exist_for_rotation_id(self, rotation_id: int) -> bool:
return len(self.upstreams_by_rotation_id(rotation_id)) > 0
def remove_skaled_config(self) -> None:
with ConfigFileManager.CFM_LOCK:
if self.skaled_config_exists():
logger.info('Removing skaled config')
os.remove(self.skaled_config_path) |
4,704 | run app | #!/usr/bin/env python
import os
import sys
import falcon
from db_orm import session, World, Fortune
from helpers import load_template, FortuneTuple, generate_ids, sanitize
from operator import attrgetter
from random import randint
from email.utils import formatdate
# ------------------------------------------------------------------
# setup
wsgi = app = falcon.App()
response_server = "Falcon"
response_add_date = False
def add_ext_headers(response):
if response_server:
response.set_header('Server', response_server)
if response_add_date:
response.set_header('Date', formatdate(timeval=None, localtime=False, usegmt=True))
if os.getenv('USE_ORJSON', "0") == "1":
import orjson
# custom JSON handler
JSONHandler = falcon.media.JSONHandler(dumps=orjson.dumps, loads=orjson.loads)
extra_handlers = {
"application/json": JSONHandler,
"application/json; charset=UTF-8": JSONHandler
}
wsgi.req_options.media_handlers.update(extra_handlers)
wsgi.resp_options.media_handlers.update(extra_handlers)
# ------------------------------------------------------------------
# resource endpoints
class JSONResource(object):
def on_get(self, request, response):
add_ext_headers(response)
response.media = {'message': "Hello, world!"}
class SingleQuery(object):
@session(serializable=False)
def on_get(self, request, response):
wid = randint(1, 10000)
world = World[wid]
add_ext_headers(response)
response.media = world.to_dict()
class MultipleQueries(object):
@session(serializable=False)
def on_get(self, request, response, num):
num = sanitize(num)
worlds = [World[ident].to_dict() for ident in generate_ids(num)]
add_ext_headers(response)
response.media = worlds
class UpdateQueries(object):
@session(serializable=False)
def on_get(self, request, response, num):
num = sanitize(num)
ids = generate_ids(num)
ids.sort()
worlds = []
for item in ids:
world = World[item]
world.randomNumber = randint(1, 10000)
worlds.append({"id": world.id, "randomNumber": world.randomNumber})
add_ext_headers(response)
response.media = worlds
class Fortunes(object):
_template = load_template()
@session(serializable=False)
def on_get(self, request, response):
fortunes = [FortuneTuple(id=f.id, message=f.message) for f in Fortune.select()]
fortunes.append(FortuneTuple(id=0, message="Additional fortune added at request time."))
fortunes.sort(key=attrgetter("message"))
content = self._template.render(fortunes=fortunes)
add_ext_headers(response)
response.content_type = falcon.MEDIA_HTML
response.text = content
class PlaintextResource(object):
def on_get(self, request, response):
add_ext_headers(response)
response.content_type = falcon.MEDIA_TEXT
response.text = 'Hello, world!'
# register resources
app.add_route("/json", JSONResource())
app.add_route("/db", SingleQuery())
app.add_route("/queries/{num}", MultipleQueries())
app.add_route("/updates/{num}", UpdateQueries())
app.add_route("/fortunes", Fortunes())
app.add_route("/plaintext", PlaintextResource())
# ------------------------------------------------------------------
if __name__ == "__main__":
import optparse
import multiprocessing
import logging
import re
parser = optparse.OptionParser("usage: %prog [options]", add_help_option=False)
parser.add_option("-h", "--host", dest="host", default='0.0.0.0', type="string")
parser.add_option("-p", "--port", dest="port", default=8080, type="int")
parser.add_option("-s", "--server", dest="server", default="gunicorn", type="string")
parser.add_option("-w", "--workers", dest="workers", default=0, type="int")
parser.add_option("-k", "--keepalive", dest="keepalive", default=60, type="int")
parser.add_option("-v", "--verbose", dest="verbose", default=0, type="int")
(opt, args) = parser.parse_args()
_is_travis = os.environ.get('TRAVIS') == 'true'
workers = opt.workers
if workers <= 0:
workers = int(multiprocessing.cpu_count())
if _is_travis:
workers = 2
if opt.server == 'waitress':
import waitress
response_server = None
response_add_date = False
logging.basicConfig()
logging.getLogger().setLevel(logging.CRITICAL)
logging.disable(True)
if workers < 4:
workers = 4
waitress.serve(
app=wsgi,
listen=f"{opt.host}:{opt.port}",
log_socket_errors=False,
threads=workers,
asyncore_use_poll=True,
expose_tracebacks=False,
connection_limit=128,
channel_timeout=opt.keepalive,
_quiet=True)
sys.exit(0)
def METHOD_NAME():
global response_server
global response_add_date
if opt.server == 'bjoern':
import bjoern
# Note:
# Bjoern doesn't provide any additional response headers like Date and Server
# so we need to provide them manually.
bjoern_version = [i for i in open('requirements-bjoern.txt', 'r') if re.search('bjoern', i)][0].strip().split('==')
response_server = '{}/{}'.format(*bjoern_version).title()
response_add_date = True
bjoern.run(app, host=opt.host, port=opt.port, reuse_port=True)
if opt.server == 'fastwsgi':
import fastwsgi
response_server = "FastWSGI"
response_add_date = False
fastwsgi.server.backlog = 4096
fastwsgi.run(app, host=opt.host, port=opt.port, loglevel=opt.verbose)
if opt.server == 'socketify':
import socketify
response_server = None
response_add_date = False
socketify.WSGI(app).listen(opt.port, lambda config: logging.info(f"Listening on port http://localhost:{opt.port} now\n")).run()
def create_fork():
n = os.fork()
# n greater than 0 means parent process
if not n > 0:
METHOD_NAME()
# fork limiting the cpu count - 1
for i in range(1, workers):
create_fork()
METHOD_NAME() # run app on the main process too :)
|
4,705 | test can convert level set to mesh | ##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of Image Engine Design Inc nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import pathlib
import IECore
import IECoreScene
import IECoreVDB
import GafferScene
import GafferVDB
import GafferVDBTest
class LevelSetToMeshTest( GafferVDBTest.VDBTestCase ) :
def METHOD_NAME( self ) :
sphere = GafferScene.Sphere()
meshToLevelSet = GafferVDB.MeshToLevelSet()
self.setFilter( meshToLevelSet, path='/sphere' )
meshToLevelSet["voxelSize"].setValue( 0.05 )
meshToLevelSet["in"].setInput( sphere["out"] )
obj = meshToLevelSet["out"].object( "sphere" )
self.assertTrue( isinstance( obj, IECoreVDB.VDBObject ) )
self.assertEqual( obj.gridNames(), ['surface'] )
grid = obj.findGrid( "surface" )
levelSetToMesh = GafferVDB.LevelSetToMesh()
self.setFilter( levelSetToMesh, path='/sphere' )
levelSetToMesh["in"].setInput( meshToLevelSet["out"] )
mesh = levelSetToMesh["out"].object( "sphere" )
self.assertTrue( isinstance( mesh, IECoreScene.MeshPrimitive) )
def testChangingIsoValueUpdatesBounds ( self ) :
sphere = GafferScene.Sphere()
sphere["radius"].setValue( 5 )
meshToLevelSet = GafferVDB.MeshToLevelSet()
self.setFilter( meshToLevelSet, path='/sphere' )
meshToLevelSet["voxelSize"].setValue( 0.05 )
meshToLevelSet["interiorBandwidth"].setValue( 100 )
meshToLevelSet["in"].setInput( sphere["out"] )
levelSetToMesh = GafferVDB.LevelSetToMesh()
self.setFilter( levelSetToMesh, path='/sphere' )
levelSetToMesh["in"].setInput( meshToLevelSet["out"] )
self.assertSceneValid( levelSetToMesh["out"] )
self.assertEqual( levelSetToMesh["adjustBounds"].getValue(), False )
self.assertEqual( levelSetToMesh["out"].bound( "/sphere" ), levelSetToMesh["in"].bound( "/sphere" ) )
levelSetToMesh["adjustBounds"].setValue( True )
self.assertSceneValid( levelSetToMesh["out"] )
self.assertEqual(
levelSetToMesh["out"].bound( "/sphere" ),
levelSetToMesh["out"].object( "/sphere" ).bound()
)
bound = levelSetToMesh["out"].bound( "/sphere" )
levelSetToMesh["isoValue"].setValue( -0.5 ) # Shrinks the output mesh
self.assertSceneValid( levelSetToMesh["out"] )
self.assertEqual(
levelSetToMesh["out"].bound( "/sphere" ),
levelSetToMesh["out"].object( "/sphere" ).bound()
)
self.assertTrue( bound.intersects( levelSetToMesh["out"].bound( "/sphere" ).min() ) )
self.assertTrue( bound.intersects( levelSetToMesh["out"].bound( "/sphere" ).max() ) )
def testIncreasingAdapativityDecreasesPolyCount( self ) :
sphere = GafferScene.Sphere()
sphere["radius"].setValue( 5 )
meshToLevelSet = GafferVDB.MeshToLevelSet()
self.setFilter( meshToLevelSet, path='/sphere' )
meshToLevelSet["voxelSize"].setValue( 0.05 )
meshToLevelSet["exteriorBandwidth"].setValue( 4.0 )
meshToLevelSet["interiorBandwidth"].setValue( 4.0 )
meshToLevelSet["in"].setInput( sphere["out"] )
levelSetToMesh = GafferVDB.LevelSetToMesh()
self.setFilter( levelSetToMesh, path='/sphere')
levelSetToMesh["in"].setInput( meshToLevelSet["out"] )
levelSetToMesh['adaptivity'].setValue(0.0)
self.assertTrue( 187000 <= len( levelSetToMesh['out'].object( "sphere" ).verticesPerFace ) <= 188000 )
levelSetToMesh['adaptivity'].setValue(1.0)
self.assertTrue( 2800 <= len( levelSetToMesh['out'].object( "sphere" ).verticesPerFace ) <= 3200 )
def testParallelGetValueComputesObjectOnce( self ) :
reader = GafferScene.SceneReader()
reader["fileName"].setValue( pathlib.Path( __file__ ).parent / "data" / "sphere.vdb" )
pathFilter = GafferScene.PathFilter()
pathFilter["paths"].setValue( IECore.StringVectorData( [ "/vdb" ] ) )
levelSetToMesh = GafferVDB.LevelSetToMesh()
levelSetToMesh["in"].setInput( reader["out"] )
levelSetToMesh["filter"].setInput( pathFilter["out"] )
levelSetToMesh["grid"].setValue( "ls_sphere" )
self.assertParallelGetValueComputesObjectOnce( levelSetToMesh["out"], "/vdb" ) |
4,706 | test get attestation | from unittest.mock import patch
import pytest
from eth_keys import keys
from eth_utils.conversions import to_bytes
from hexbytes import HexBytes
from solders.pubkey import Pubkey
from web3 import Web3
from integration_tests.queries.test_get_challenges import setup_db
from src.queries.get_attestation import (
ADD_SENDER_MESSAGE_PREFIX,
REWARDS_MANAGER_ACCOUNT,
Attestation,
AttestationError,
get_attestation,
get_create_sender_attestation,
)
from src.tasks.index_oracles import oracle_addresses_key
from src.utils.config import shared_config
from src.utils.db_session import get_db
from src.utils.redis_connection import get_redis
REDIS_URL = shared_config["redis"]["url"]
redis_handle = get_redis()
def METHOD_NAME(app):
with app.app_context():
db = get_db()
with db.scoped_session() as session:
setup_db(session)
# Tests:
# - Happy path
# - No user_challenge
# - Challenge not finished
# - No disbursement
# - Invalid oracle
oracle_address = "0x32a10e91820fd10366AC363eD0DEa40B2e598D22"
redis_handle.set(oracle_addresses_key, oracle_address)
delegate_owner_wallet, signature = get_attestation(
session,
user_id=1,
challenge_id="boolean_challenge_2",
oracle_address=oracle_address,
specifier="1",
)
attestation = Attestation(
amount="5",
oracle_address=oracle_address,
user_address="0x38C68fF3926bf4E68289672F75ee1543117dD9B3",
challenge_id="boolean_challenge_2",
challenge_specifier="1",
)
# Test happy path
# confirm the attestation is what we think it should be
config_owner_wallet = shared_config["delegate"]["owner_wallet"]
config_private_key = shared_config["delegate"]["private_key"]
# Ensure we returned the correct owner wallet
assert delegate_owner_wallet == config_owner_wallet
# Ensure we can derive the owner wallet from the signed stringified attestation
attestation_bytes = attestation.get_attestation_bytes()
to_sign_hash = Web3.keccak(attestation_bytes)
private_key = keys.PrivateKey(HexBytes(config_private_key))
public_key = keys.PublicKey.from_private(private_key)
signture_bytes = to_bytes(hexstr=signature)
msg_signature = keys.Signature(signature_bytes=signture_bytes, vrs=None)
recovered_pubkey = public_key.recover_from_msg_hash(
message_hash=to_sign_hash, signature=msg_signature
)
assert (
Web3.to_checksum_address(recovered_pubkey.to_address())
== config_owner_wallet
)
# Test no matching user challenge
with pytest.raises(AttestationError):
get_attestation(
session,
user_id=1,
challenge_id="boolean_challenge_2",
oracle_address=oracle_address,
specifier="xyz",
)
# Test challenge not finished
with pytest.raises(AttestationError):
get_attestation(
session,
user_id=1,
challenge_id="boolean_challenge_3",
oracle_address=oracle_address,
specifier="1",
)
# Test challenge already disbursed
with pytest.raises(AttestationError):
get_attestation(
session,
user_id=1,
challenge_id="boolean_challenge_1",
oracle_address=oracle_address,
specifier="1",
)
# Test with bad AAO
with pytest.raises(AttestationError):
get_attestation(
session,
user_id=1,
challenge_id="boolean_challenge_2",
oracle_address="wrong_oracle_address",
specifier="1",
)
@pytest.fixture
def patch_get_all_other_nodes():
with patch(
"src.queries.get_attestation.get_all_other_discovery_nodes_wallets_cached",
return_value=["0x94e140D27F3d5EE9EcA0109A71CcBa0109964DCa"],
):
yield
def test_get_create_sender_attestation(app, patch_get_all_other_nodes):
new_sender_address = "0x94e140D27F3d5EE9EcA0109A71CcBa0109964DCa"
owner_wallet, sender_attestation = get_create_sender_attestation(new_sender_address)
# confirm the attestation is what we think it should be
config_owner_wallet = shared_config["delegate"]["owner_wallet"]
config_private_key = shared_config["delegate"]["private_key"]
# Ensure we returned the correct owner wallet
assert owner_wallet == config_owner_wallet
# Ensure we can derive the owner wallet from the signed stringified attestation
items = [
to_bytes(text=ADD_SENDER_MESSAGE_PREFIX),
bytes(Pubkey.from_string(REWARDS_MANAGER_ACCOUNT)),
to_bytes(hexstr=new_sender_address),
]
attestation_bytes = to_bytes(text="").join(items)
to_sign_hash = Web3.keccak(attestation_bytes)
private_key = keys.PrivateKey(HexBytes(config_private_key))
public_key = keys.PublicKey.from_private(private_key)
signture_bytes = to_bytes(hexstr=sender_attestation)
msg_signature = keys.Signature(signature_bytes=signture_bytes, vrs=None)
recovered_pubkey = public_key.recover_from_msg_hash(
message_hash=to_sign_hash, signature=msg_signature
)
assert (
Web3.to_checksum_address(recovered_pubkey.to_address()) == config_owner_wallet
)
def test_get_create_sender_attestation_not_registered(app, patch_get_all_other_nodes):
new_sender_address = "0x04e140D27F3d5EE9EcA0109A71CcBa0109964DCa"
with pytest.raises(
Exception,
match=r"Expected 0x04e140D27F3d5EE9EcA0109A71CcBa0109964DCa to be registered on chain",
):
get_create_sender_attestation(new_sender_address) |
4,707 | test prepare inventory command | # -*- coding: utf-8 -*-
import os
import pytest
from ansible_runner.config.inventory import InventoryConfig
from ansible_runner.config._base import BaseExecutionMode
from ansible_runner.exceptions import ConfigurationError
from ansible_runner.utils import get_executable_path
def test_ansible_inventory_init_defaults(tmp_path, patch_private_data_dir):
# pylint: disable=W0613
rc = InventoryConfig()
# Check that the private data dir is placed in our default location with our default prefix
# and has some extra uniqueness on the end.
base_private_data_dir = tmp_path.joinpath('.ansible-runner-').as_posix()
assert rc.private_data_dir.startswith(base_private_data_dir)
assert len(rc.private_data_dir) > len(base_private_data_dir)
assert rc.execution_mode == BaseExecutionMode.ANSIBLE_COMMANDS
def test_invalid_runner_mode_value():
with pytest.raises(ConfigurationError) as exc:
InventoryConfig(runner_mode='test')
assert "Invalid runner mode" in exc.value.args[0]
def METHOD_NAME():
rc = InventoryConfig()
inventories = ['/tmp/inventory1', '/tmp/inventory2']
rc.prepare_inventory_command('list', inventories, response_format='yaml', playbook_dir='/tmp',
vault_ids='1234', vault_password_file='/tmp/password')
expected_command = [get_executable_path('ansible-inventory'), '--list', '-i', '/tmp/inventory1', '-i', '/tmp/inventory2', '--yaml', '--playbook-dir'] + \
['/tmp', '--vault-id', '1234', '--vault-password-file', '/tmp/password']
assert rc.command == expected_command
assert rc.runner_mode == 'subprocess'
def test_prepare_inventory_invalid_action():
with pytest.raises(ConfigurationError) as exc:
rc = InventoryConfig()
inventories = ['/tmp/inventory1', '/tmp/inventory2']
rc.prepare_inventory_command('test', inventories=inventories)
assert "Invalid action test, valid value is one of either graph, host, list" == exc.value.args[0]
def test_prepare_inventory_invalid_response_format():
with pytest.raises(ConfigurationError) as exc:
rc = InventoryConfig()
inventories = ['/tmp/inventory1', '/tmp/inventory2']
rc.prepare_inventory_command('list', inventories=inventories, response_format='test')
assert "Invalid response_format test, valid value is one of either json, yaml, toml" == exc.value.args[0]
def test_prepare_inventory_invalid_inventories():
with pytest.raises(ConfigurationError) as exc:
rc = InventoryConfig()
inventories = '/tmp/inventory1'
rc.prepare_inventory_command('list', inventories=inventories)
assert "inventories should be of type list" in exc.value.args[0]
def test_prepare_inventory_invalid_host_action():
with pytest.raises(ConfigurationError) as exc:
rc = InventoryConfig()
inventories = ['/tmp/inventory1', '/tmp/inventory2']
rc.prepare_inventory_command('host', inventories=inventories)
assert "Value of host parameter is required when action in 'host'" == exc.value.args[0]
def test_prepare_inventory_invalid_graph_response_format():
with pytest.raises(ConfigurationError) as exc:
rc = InventoryConfig()
inventories = ['/tmp/inventory1', '/tmp/inventory2']
rc.prepare_inventory_command('graph', inventories=inventories, response_format='toml')
assert "'graph' action supports only 'json' response format" == exc.value.args[0]
@pytest.mark.parametrize('runtime', ('docker', 'podman'))
def test_prepare_inventory_command_with_containerization(tmp_path, runtime, mocker):
mocker.patch.dict('os.environ', {'HOME': str(tmp_path)}, clear=True)
tmp_path.joinpath('.ssh').mkdir()
kwargs = {
'private_data_dir': tmp_path,
'process_isolation': True,
'container_image': 'my_container',
'process_isolation_executable': runtime
}
rc = InventoryConfig(**kwargs)
rc.ident = 'foo'
inventories = ['/tmp/inventory1', '/tmp/inventory2']
rc.prepare_inventory_command('list', inventories, response_format='yaml', playbook_dir='/tmp',
vault_ids='1234', vault_password_file='/tmp/password', output_file='/tmp/inv_out.txt',
export=True)
assert rc.runner_mode == 'subprocess'
extra_container_args = []
if runtime == 'podman':
extra_container_args = ['--quiet']
else:
extra_container_args = [f'--user={os.getuid()}']
expected_command_start = [
runtime,
'run',
'--rm',
'--interactive',
'--workdir',
'/runner/project',
'-v', f'{rc.private_data_dir}/.ssh/:/home/runner/.ssh/',
'-v', f'{rc.private_data_dir}/.ssh/:/root/.ssh/',
]
if os.path.exists('/etc/ssh/ssh_known_hosts'):
expected_command_start.extend(['-v', '/etc/ssh/:/etc/ssh/'])
if runtime == 'podman':
expected_command_start.extend(['--group-add=root', '--ipc=host'])
expected_command_start.extend([
'-v', f'{rc.private_data_dir}/artifacts/:/runner/artifacts/:Z',
'-v', f'{rc.private_data_dir}/:/runner/:Z',
'--env-file', f'{rc.artifact_dir}/env.list',
])
expected_command_start.extend(extra_container_args)
expected_command_start.extend([
'--name',
'ansible_runner_foo',
'my_container',
'ansible-inventory',
'--list',
'-i', '/tmp/inventory1',
'-i', '/tmp/inventory2',
'--yaml',
'--playbook-dir', '/tmp',
'--vault-id', '1234',
'--vault-password-file', '/tmp/password',
'--output', '/tmp/inv_out.txt',
'--export',
])
assert expected_command_start == rc.command |
4,708 | c comment | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Convert plain qtest traces to C or Bash reproducers
Use this to help build bug-reports or create in-tree reproducers for bugs.
Note: This will not format C code for you. Pipe the output through
clang-format -style="{BasedOnStyle: llvm, IndentWidth: 4, ColumnLimit: 90}"
or similar
"""
import sys
import os
import argparse
import textwrap
from datetime import date
__author__ = "Alexander Bulekov <alxndr@bu.edu>"
__copyright__ = "Copyright (C) 2021, Red Hat, Inc."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Alexander Bulekov"
__email__ = "alxndr@bu.edu"
def c_header(owner):
return """/*
* Autogenerated Fuzzer Test Case
*
* Copyright (c) {date} {owner}
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "libqos/libqtest.h"
""".format(date=date.today().year, owner=owner)
def METHOD_NAME(s):
""" Return a multi-line C comment. Assume the text is already wrapped """
return "/*\n * " + "\n * ".join(s.splitlines()) + "\n*/"
def print_c_function(s):
print("/* ")
for l in s.splitlines():
print(" * {}".format(l))
def bash_reproducer(path, args, trace):
result = '\\\n'.join(textwrap.wrap("cat << EOF | {} {}".format(path, args),
72, break_on_hyphens=False,
drop_whitespace=False))
for l in trace.splitlines():
result += "\n" + '\\\n'.join(textwrap.wrap(l,72,drop_whitespace=False))
result += "\nEOF"
return result
def c_reproducer(name, args, trace):
result = []
result.append("""static void {}(void)\n{{""".format(name))
# libqtest will add its own qtest args, so get rid of them
args = args.replace("-accel qtest","")
args = args.replace(",accel=qtest","")
args = args.replace("-machine accel=qtest","")
args = args.replace("-qtest stdio","")
result.append("""QTestState *s = qtest_init("{}");""".format(args))
for l in trace.splitlines():
param = l.split()
cmd = param[0]
if cmd == "write":
buf = param[3][2:] #Get the 0x... buffer and trim the "0x"
assert len(buf)%2 == 0
bufbytes = [buf[i:i+2] for i in range(0, len(buf), 2)]
bufstring = '\\x'+'\\x'.join(bufbytes)
addr = param[1]
size = param[2]
result.append("""qtest_bufwrite(s, {}, "{}", {});""".format(
addr, bufstring, size))
elif cmd.startswith("in") or cmd.startswith("read"):
result.append("qtest_{}(s, {});".format(
cmd, param[1]))
elif cmd.startswith("out") or cmd.startswith("write"):
result.append("qtest_{}(s, {}, {});".format(
cmd, param[1], param[2]))
elif cmd == "clock_step":
if len(param) ==1:
result.append("qtest_clock_step_next(s);")
else:
result.append("qtest_clock_step(s, {});".format(param[1]))
result.append("qtest_quit(s);\n}")
return "\n".join(result)
def c_main(name, arch):
return """int main(int argc, char **argv)
{{
const char *arch = qtest_get_arch();
g_test_init(&argc, &argv, NULL);
if (strcmp(arch, "{arch}") == 0) {{
qtest_add_func("fuzz/{name}",{name});
}}
return g_test_run();
}}""".format(name=name, arch=arch)
def main():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("-bash", help="Only output a copy-pastable bash command",
action="store_true")
group.add_argument("-c", help="Only output a c function",
action="store_true")
parser.add_argument('-owner', help="If generating complete C source code, \
this specifies the Copyright owner",
nargs='?', default="<name of author>")
parser.add_argument("-no_comment", help="Don't include a bash reproducer \
as a comment in the C reproducers",
action="store_true")
parser.add_argument('-name', help="The name of the c function",
nargs='?', default="test_fuzz")
parser.add_argument('input_trace', help="input QTest command sequence \
(stdin by default)",
nargs='?', type=argparse.FileType('r'),
default=sys.stdin)
args = parser.parse_args()
qemu_path = os.getenv("QEMU_PATH")
qemu_args = os.getenv("QEMU_ARGS")
if not qemu_args or not qemu_path:
print("Please set QEMU_PATH and QEMU_ARGS environment variables")
sys.exit(1)
bash_args = qemu_args
if " -qtest stdio" not in qemu_args:
bash_args += " -qtest stdio"
arch = qemu_path.split("-")[-1]
trace = args.input_trace.read().strip()
if args.bash :
print(bash_reproducer(qemu_path, bash_args, trace))
else:
output = ""
if not args.c:
output += c_header(args.owner) + "\n"
if not args.no_comment:
output += METHOD_NAME(bash_reproducer(qemu_path, bash_args, trace))
output += c_reproducer(args.name, qemu_args, trace)
if not args.c:
output += c_main(args.name, arch)
print(output)
if __name__ == '__main__':
main() |
4,709 | from bitwidth | import enum
import numpy as np
from .abstract import Dummy, Hashable, Literal, Number, Type
from functools import total_ordering, cached_property
from numba.core import utils
from numba.core.typeconv import Conversion
from numba.np import npdatetime_helpers
class Boolean(Hashable):
def cast_python_value(self, value):
return bool(value)
def parse_integer_bitwidth(name):
for prefix in ('int', 'uint'):
if name.startswith(prefix):
bitwidth = int(name[len(prefix):])
return bitwidth
def parse_integer_signed(name):
signed = name.startswith('int')
return signed
@total_ordering
class Integer(Number):
def __init__(self, name, bitwidth=None, signed=None):
super(Integer, self).__init__(name)
if bitwidth is None:
bitwidth = parse_integer_bitwidth(name)
if signed is None:
signed = parse_integer_signed(name)
self.bitwidth = bitwidth
self.signed = signed
@classmethod
def METHOD_NAME(cls, bitwidth, signed=True):
name = ('int%d' if signed else 'uint%d') % bitwidth
return cls(name)
def cast_python_value(self, value):
return getattr(np, self.name)(value)
def __lt__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
if self.signed != other.signed:
return NotImplemented
return self.bitwidth < other.bitwidth
@property
def maxval(self):
"""
The maximum value representable by this type.
"""
if self.signed:
return (1 << (self.bitwidth - 1)) - 1
else:
return (1 << self.bitwidth) - 1
@property
def minval(self):
"""
The minimal value representable by this type.
"""
if self.signed:
return -(1 << (self.bitwidth - 1))
else:
return 0
class IntegerLiteral(Literal, Integer):
def __init__(self, value):
self._literal_init(value)
name = 'Literal[int]({})'.format(value)
basetype = self.literal_type
Integer.__init__(
self,
name=name,
bitwidth=basetype.bitwidth,
signed=basetype.signed,
)
def can_convert_to(self, typingctx, other):
conv = typingctx.can_convert(self.literal_type, other)
if conv is not None:
return max(conv, Conversion.promote)
Literal.ctor_map[int] = IntegerLiteral
class BooleanLiteral(Literal, Boolean):
def __init__(self, value):
self._literal_init(value)
name = 'Literal[bool]({})'.format(value)
Boolean.__init__(
self,
name=name
)
def can_convert_to(self, typingctx, other):
conv = typingctx.can_convert(self.literal_type, other)
if conv is not None:
return max(conv, Conversion.promote)
Literal.ctor_map[bool] = BooleanLiteral
@total_ordering
class Float(Number):
def __init__(self, *args, **kws):
super(Float, self).__init__(*args, **kws)
# Determine bitwidth
assert self.name.startswith('float')
bitwidth = int(self.name[5:])
self.bitwidth = bitwidth
def cast_python_value(self, value):
return getattr(np, self.name)(value)
def __lt__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self.bitwidth < other.bitwidth
@total_ordering
class Complex(Number):
def __init__(self, name, underlying_float, **kwargs):
super(Complex, self).__init__(name, **kwargs)
self.underlying_float = underlying_float
# Determine bitwidth
assert self.name.startswith('complex')
bitwidth = int(self.name[7:])
self.bitwidth = bitwidth
def cast_python_value(self, value):
return getattr(np, self.name)(value)
def __lt__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self.bitwidth < other.bitwidth
class _NPDatetimeBase(Type):
"""
Common base class for np.datetime64 and np.timedelta64.
"""
def __init__(self, unit, *args, **kws):
name = '%s[%s]' % (self.type_name, unit)
self.unit = unit
self.unit_code = npdatetime_helpers.DATETIME_UNITS[self.unit]
super(_NPDatetimeBase, self).__init__(name, *args, **kws)
def __lt__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
# A coarser-grained unit is "smaller", i.e. less precise values
# can be represented (but the magnitude of representable values is
# also greater...).
return self.unit_code < other.unit_code
def cast_python_value(self, value):
cls = getattr(np, self.type_name)
if self.unit:
return cls(value, self.unit)
else:
return cls(value)
@total_ordering
class NPTimedelta(_NPDatetimeBase):
type_name = 'timedelta64'
@total_ordering
class NPDatetime(_NPDatetimeBase):
type_name = 'datetime64'
class EnumClass(Dummy):
"""
Type class for Enum classes.
"""
basename = "Enum class"
def __init__(self, cls, dtype):
assert isinstance(cls, type)
assert isinstance(dtype, Type)
self.instance_class = cls
self.dtype = dtype
name = "%s<%s>(%s)" % (self.basename, self.dtype, self.instance_class.__name__)
super(EnumClass, self).__init__(name)
@property
def key(self):
return self.instance_class, self.dtype
@cached_property
def member_type(self):
"""
The type of this class' members.
"""
return EnumMember(self.instance_class, self.dtype)
class IntEnumClass(EnumClass):
"""
Type class for IntEnum classes.
"""
basename = "IntEnum class"
@cached_property
def member_type(self):
"""
The type of this class' members.
"""
return IntEnumMember(self.instance_class, self.dtype)
class EnumMember(Type):
"""
Type class for Enum members.
"""
basename = "Enum"
class_type_class = EnumClass
def __init__(self, cls, dtype):
assert isinstance(cls, type)
assert isinstance(dtype, Type)
self.instance_class = cls
self.dtype = dtype
name = "%s<%s>(%s)" % (self.basename, self.dtype, self.instance_class.__name__)
super(EnumMember, self).__init__(name)
@property
def key(self):
return self.instance_class, self.dtype
@property
def class_type(self):
"""
The type of this member's class.
"""
return self.class_type_class(self.instance_class, self.dtype)
class IntEnumMember(EnumMember):
"""
Type class for IntEnum members.
"""
basename = "IntEnum"
class_type_class = IntEnumClass
def can_convert_to(self, typingctx, other):
"""
Convert IntEnum members to plain integers.
"""
if issubclass(self.instance_class, enum.IntEnum):
conv = typingctx.can_convert(self.dtype, other)
return max(conv, Conversion.safe) |
4,710 | get render window | #import sys
from vtk import vtkCamera
#import vtk.util
#VTK_FLOAT = 1
#VTK_UNSIGNED_CHAR = 2
#VTK_UNSIGNED_SHORT = 3
#VTK_UNSIGNED_INT = 4
#VTK_VERSION = '7.1.1'
#class vtkLine:
#def __init__(self):
#pass
#class vtkTriangle:
#def __init__(self):
#pass
#class vtkQuad:
#def __init__(self):
#pass
#class vtkTetra:
#def __init__(self):
#pass
#class vtkWedge:
#def __init__(self):
#pass
#class vtkHexahedron:
#def __init__(self):
#pass
#class vtkQuadraticTriangle:
#def __init__(self):
#pass
#class vtkQuadraticQuad:
#def __init__(self):
#pass
#class vtkQuadraticTetra:
#def __init__(self):
#pass
#class vtkQuadraticWedge:
#def __init__(self):
#pass
#class vtkQuadraticHexahedron:
#def __init__(self):
#pass
#class vtkPyramid:
#def __init__(self):
#pass
#class vtkPoints:
#def __init__(self):
#self.npoints = 0
#def SetNumberOfPoints(self, npoints):
#assert isinstance(npoints, integer_types), 'npoints=%s type=%s' % (npoints, type(npoints))
#self.npoints = npoints
#class Arrays:
#def __init__(self):
#pass
#def AddArray(self, grid):
#pass
#def SetActiveScalars(self, name):
#pass
#def GetNumberOfArrays(self):
#return 4
#def GetArrayName(self, i):
#return 'fakename'
#def RemoveArray(self, name):
#pass
#def SetActiveVectors(self, name):
#pass
#class vtkArray:
#def __init__(self):
#pass
#def SetNumberOfComponents(self, ncomp):
#assert isinstance(ncomp, int), ncomp
#class vtkLongArray(vtkArray):
#def __init__(self):
#Arrays.__init__(self)
#def GetDataTypeSize(self):
#return 8
#class vtkIdTypeArray:
#def __init__(self):
#pass
#def GetDataTypeSize(self):
#return 4
#class vtkDataArray:
#def __init__(self):
#pass
#def CreateDataArray(self, vtk_array_type):
#pass
#class vtkGenericRenderWindowInteractor:
#def __init__(self):
#pass
#class vtkInteractorStyleRubberBandZoom:
#def __init__(self):
#pass
#class vtkInteractorStyleTrackballCamera:
#def __init__(self):
#pass
#class vtkColorTransferFunction:
#def __init__(self):
#pass
#def SetNanColor(self, red, green, blue):
#assert isinstance(red, float), red
#assert isinstance(green, float), green
#assert isinstance(blue, float), blue
#def SetColorSpaceToHSV(self):
#pass
##def SetColorSpaceToRGB(self):
##pass
#def AddRGBPoint(self, value, red, green, blue):
#assert isinstance(value, float), value
#assert isinstance(red, float), red
#assert isinstance(green, float), green
#assert isinstance(blue, float), blue
#def HSVWrapOff(self):
#pass
#def SetRange(self, min_value, max_value):
#assert isinstance(min_value, float), min_value
#assert isinstance(max_value, float), max_value
#class vtkScalarBarActor:
#def __init__(self):
#pass
#def SetTitle(self, title):
#assert isinstance(title, str), 'title=%r' % title
#def SetLookupTable(self, color_function):
#assert isinstance(color_function, vtkColorTransferFunction), 'color_function=%r' % color_function
#def SetOrientationToVertical(self):
#pass
#def SetPosition(self, x, y):
#assert isinstance(x, float), x
#assert isinstance(y, float), y
#def SetHeight(self, height):
#assert isinstance(height, float), height
#def SetWidth(self, width):
#assert isinstance(width, float), width
#def SetLabelFormat(self, label_format):
#assert isinstance(label_format, str), 'label_format=%r' % label_format
#def SetNumberOfLabels(self, nlabels):
#assert isinstance(nlabels, int), nlabels
#def SetMaximumNumberOfColors(self, ncolors):
#assert isinstance(ncolors, int), ncolors
#def VisibilityOff(self):
#pass
#def Modified(self):
#pass
class GeometryProperty:
def __init__(self):
pass
def SetRepresentationToPoints(self):
pass
def SetColor(self, rgb_floats):
for val in rgb_floats:
assert isinstance(val, float), rgb_floats
def SetBackfaceColor(self, rgb_floats):
for val in rgb_floats:
assert isinstance(val, float), rgb_floats
def SetPointSize(self, size):
assert isinstance(size, int), type(size)
class GridMapper:
def __init__(self):
pass
def InterpolateScalarsBeforeMappingOff(self):
pass
def InterpolateScalarsBeforeMappingOn(self):
pass
class vtkRenderer:
def __init__(self):
pass
def AddActor(self, actor):
pass
def GetActiveCamera(self):
return vtkCamera()
def ResetCameraClippingRange(self):
pass
def ResetCamera(self):
pass
def Render(self):
pass
def SetBackground(self, a, b, c):
pass
def SetBackground2(self, a, b, c):
pass
def SetGradientBackground(self, a):
pass
class vtkLODActor:
def __init__(self):
pass
def SetVisibility(self, is_visible):
pass
#class vtkCamera:
#def __init__(self):
#pass
#def GetPosition(self):
#return (0., 0., 0.)
#def SetPosition(self, xyz):
#pass
#def GetFocalPoint(self):
#return (0., 0., 0.)
#def SetFocalPoint(self, xyz):
#pass
#def GetViewUp(self):
#return (0., 0., 1.)
class VTKInteractor:
def __init__(self):
pass
def Render(self):
pass
def METHOD_NAME(self):
return vtkRenderer() |
4,711 | test issue 1856 reverse | import textwrap
from parameterized import parameterized
import hedy
import hedy_translation
from tests.Tester import HedyTester
# tests should be ordered as follows:
# * Translation from English to Dutch
# * Translation from Dutch to English
# * Translation to several languages
# * Error handling
class TestsTranslationLevel3(HedyTester):
level = 3
def test_assign_list(self):
code = "animals is dog, cat, kangaroo"
result = hedy_translation.translate_keywords(code, "en", "nl", self.level)
expected = "animals is dog, cat, kangaroo"
self.assertEqual(expected, result)
def test_at_random(self):
code = "print animals at random"
result = hedy_translation.translate_keywords(code, "en", "nl", self.level)
expected = "print animals op willekeurig"
self.assertEqual(expected, result)
def test_assign_list_nl_en(self):
code = "actie is drukaf, echo, vraag"
result = hedy_translation.translate_keywords(code, "nl", "en", self.level)
expected = "actie is drukaf, echo, vraag"
self.assertEqual(expected, result)
def test_at_random_nl_en(self):
code = "print echo op willekeurig"
result = hedy_translation.translate_keywords(code, "nl", "en", self.level)
expected = "print echo at random"
self.assertEqual(expected, result)
def test_issue_1856(self):
code = textwrap.dedent("""\
print Hoi Ik ben Hedy de Waarzegger
vraag is ask Wat wil je weten?
print vraag
antwoorden is ja, nee, misschien
print Mijn glazen bol zegt...
sleep 2
print antwoorden at random""")
result = hedy_translation.translate_keywords(code, "en", "nl", self.level)
expected = textwrap.dedent("""\
print Hoi Ik ben Hedy de Waarzegger
vraag is vraag Wat wil je weten?
print vraag
antwoorden is ja, nee, misschien
print Mijn glazen bol zegt...
slaap 2
print antwoorden op willekeurig""")
self.assertEqual(expected, result)
def METHOD_NAME(self):
code = textwrap.dedent("""\
print Hoi Ik ben Hedy de Waarzegger
vraag is vraag Wat wil je weten?
print vraag
antwoorden is ja, nee, misschien
print Mijn glazen bol zegt...
slaap 2
print antwoorden op willekeurig""")
expected = textwrap.dedent("""\
print Hoi Ik ben Hedy de Waarzegger
vraag is ask Wat wil je weten?
print vraag
antwoorden is ja, nee, misschien
print Mijn glazen bol zegt...
sleep 2
print antwoorden at random""")
result = hedy_translation.translate_keywords(code, "nl", "en", self.level)
self.assertEqual(expected, result)
def test_issue_1856_v3(self):
code = textwrap.dedent("""\
print Hoi Ik ben Hedy de Waarzegger
vraag is ask Wat wil je weten?
print vraag
antwoorden is ja, nee, misschien
print Mijn glazen bol zegt...
slaap 2
print antwoorden op willekeurig""")
expected = textwrap.dedent("""\
print Hoi Ik ben Hedy de Waarzegger
vraag is ask Wat wil je weten?
print vraag
antwoorden is ja, nee, misschien
print Mijn glazen bol zegt...
sleep 2
print antwoorden at random""")
result = hedy_translation.translate_keywords(code, "nl", "en", self.level)
self.assertEqual(expected, result)
def test_add_remove_en_nl(self):
code = textwrap.dedent("""\
dieren is koe, kiep
add muis to dieren
remove koe from dieren
print dieren at random""")
result = hedy_translation.translate_keywords(code, "en", "nl", self.level)
expected = textwrap.dedent("""\
dieren is koe, kiep
voeg muis toe aan dieren
verwijder koe uit dieren
print dieren op willekeurig""")
self.assertEqual(expected, result)
@parameterized.expand([
('en', 'print'),
('es', 'imprimir'),
('es', 'print')])
def test_print_type_error_translates_command(self, lang, command):
code = textwrap.dedent(f"""\
letters is a, b, b
{command} letters""")
self.multi_level_tester(
lang=lang,
code=code,
max_level=11,
exception=hedy.exceptions.InvalidArgumentTypeException,
extra_check_function=self.exception_command(command)
)
@parameterized.expand([
('en', 'at'),
('es', 'en'),
('es', 'at')])
def test_list_at_index_type_error_translates_command(self, lang, at):
code = textwrap.dedent(f"""\
letters is ask 'What are the letters?'
print letters {at} 1""")
self.multi_level_tester(
lang=lang,
code=code,
max_level=11,
exception=hedy.exceptions.InvalidArgumentTypeException,
extra_check_function=self.exception_command(at)
)
@parameterized.expand([
('en', 'at random'),
('es', 'en aleatorio'),
('es', 'at aleatorio'),
('es', 'en random')])
def test_list_at_random_type_error_translates_command(self, lang, at_random):
code = textwrap.dedent(f"""\
letters is ask 'What are the letters?'
print letters {at_random}""")
self.multi_level_tester(
lang=lang,
code=code,
max_level=11,
exception=hedy.exceptions.InvalidArgumentTypeException,
extra_check_function=self.exception_command(at_random)
)
@parameterized.expand([
('en', 'add', 'to'),
('es', 'añadir', 'a'),
('es', 'añadir', 'to'),
('es', 'add', 'a')])
def test_add_to_list_type_error_translates_command(self, lang, add, to):
code = textwrap.dedent(f"""\
color is yellow
{add} blue {to} color""")
self.multi_level_tester(
lang=lang,
code=code,
max_level=11,
exception=hedy.exceptions.InvalidArgumentTypeException,
extra_check_function=self.exception_command(f'{add} {to}')
)
@parameterized.expand([
('en', 'add', 'to'),
('es', 'borrar', 'de'),
('es', 'borrar', 'from'),
('es', 'remove', 'de')])
def test_remove_from_list_type_error_translates_command(self, lang, remove, from_):
code = textwrap.dedent(f"""\
color is yellow
{remove} blue {from_} color""")
self.multi_level_tester(
lang=lang,
code=code,
max_level=11,
exception=hedy.exceptions.InvalidArgumentTypeException,
extra_check_function=self.exception_command(f'{remove} {from_}')
) |
4,712 | check target | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
"""Example code to do convolution."""
import numpy as np
import tvm
import os
import tvm.testing
import tvm.topi.testing
from tvm import te, autotvm, topi, relay
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib import nvcc
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.utils import get_const_tuple
_conv2d_hwnc_tensorcore_implement = {
"cuda": (topi.cuda.conv2d_hwnc_tensorcore, topi.cuda.schedule_conv2d_hwnc_tensorcore)
}
def verify_conv2d_hwnc(
batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1, dtype="int4"
):
"""Test the conv2d with tensorcore for hwnc layout"""
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
# choose dtype from int4, int8
assert dtype in ["int4", "int8"]
in_height = in_width = in_size
A = te.placeholder((in_height, in_width, batch, in_channel), name="A", dtype=dtype)
W = te.placeholder((kernel, kernel, num_filter, in_channel), name="W", dtype=dtype)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
@memoize("topi.tests.test_topi_conv2d_hwnc.verify_conv2d_hwnc")
def get_ref_data():
if dtype == "int4":
a_np = np.random.randint(low=-8, high=7, size=a_shape).transpose((2, 0, 1, 3))
w_np = np.random.randint(low=-8, high=7, size=w_shape)
dw_np = topi.testing.dilate_python(
w_np.transpose((0, 1, 3, 2)), (1, 1, dilation, dilation)
)
elif dtype == "int8":
a_np = (
np.random.randint(low=-128, high=127, size=a_shape)
.transpose((2, 0, 1, 3))
.astype(dtype)
)
w_np = np.random.randint(low=-128, high=127, size=w_shape).astype(dtype)
dw_np = topi.testing.dilate_python(
w_np.transpose((0, 1, 3, 2)), (1, 1, dilation, dilation)
)
c_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
return a_np, w_np, c_np
def convert_int32_into_int4(a_int32):
"""convert int32 values into int4
Parameters
----------
a_int32 : int
Return
------
a_int4 : int
"""
I, J, K, L = a_int32.shape
a_int4 = np.zeros(shape=(I, J, K, L // 8), dtype=np.int32)
for i in range(I):
for j in range(J):
for k in range(K):
for l in range(L // 8):
for m in range(min(8, L - l * 8)):
a_int4[i, j, k, l] = a_int4[i, j, k, l] | (
(a_int32[i, j, k, l * 8 + m] & 0xF) << ((7 - m) * 4)
)
return a_int4
a_np, w_np, c_np = get_ref_data()
if dtype == "int4":
a_np = convert_int32_into_int4(a_np)
w_np = convert_int32_into_int4(w_np)
def METHOD_NAME(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
if not nvcc.have_tensorcore(dev.compute_version):
print("skip because gpu does not support Tensor Cores")
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = topi.testing.dispatch(target, _conv2d_hwnc_tensorcore_implement)
C = fcompute(A, W, stride, padding, dilation, dtype, "int32")
s = fschedule([C])
a = tvm.nd.array(a_np.transpose((1, 2, 0, 3)), dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
rtol = 1e-3
tvm.testing.assert_allclose(c.numpy().transpose((2, 0, 1, 3)), c_np, rtol=rtol)
METHOD_NAME("cuda")
def verify_feature_length():
np.random.seed(123)
target = "cuda"
ctx = tvm.device(target)
batch_size = 32
input_shape = (32, 512, 7, 7)
kernel_shape = (512, 512, 3, 3)
def get_mod():
x = relay.var("x", relay.TensorType(input_shape, "float32"))
y = relay.var("y", relay.TensorType(kernel_shape, "float32"))
f = relay.Function(
[x, y], relay.nn.conv2d(x, y, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3])
)
mod = tvm.IRModule()
mod["main"] = f
mod = relay.transform.InferType()(mod)
return mod, {}
mod, params = get_mod()
layout_config = relay.transform.LayoutConfig()
desired_layouts = {"nn.conv2d": ["HWNC", "default"]}
with layout_config:
seq = tvm.transform.Sequential([relay.transform.ConvertLayout(desired_layouts)])
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
mod = relay.transform.recast(mod, "int4", "int32")
tasks = autotvm.task.extract_from_program(
mod, target=target, params=params, ops=(relay.op.get("nn.conv2d"),)
)
assert len(tasks) == 1
task = tasks[0]
space = task.config_space
idx1 = space.get_rand_index()
idx2 = space.get_rand_index()
cfg = space.get(idx1)
sch, arg_bufs = task.instantiate(cfg)
fea1 = autotvm.feature.get_itervar_feature_flatten(sch, arg_bufs, take_log=True)
cfg = space.get(idx2)
sch, arg_bufs = task.instantiate(cfg)
fea2 = autotvm.feature.get_itervar_feature_flatten(sch, arg_bufs, take_log=True)
assert len(fea1) == len(fea2)
@tvm.testing.requires_tensorcore
def test_conv2d_hwnc_tensorcore():
"""Test the conv2d with tensorcore for hwnc layout"""
verify_conv2d_hwnc(8, 64, 56, 64, 3, 1, 1, dtype="int8")
verify_conv2d_hwnc(8, 64, 56, 64, 1, 1, 0, dtype="int4")
verify_conv2d_hwnc(8, 64, 56, 128, 3, 2, 1)
verify_conv2d_hwnc(8, 64, 56, 64, 1, 2, 0)
verify_conv2d_hwnc(8, 128, 28, 128, 3, 1, 1)
verify_conv2d_hwnc(8, 128, 28, 256, 3, 2, 1)
verify_conv2d_hwnc(8, 128, 28, 256, 1, 2, 0)
verify_conv2d_hwnc(8, 256, 14, 256, 3, 1, 1)
verify_conv2d_hwnc(8, 256, 14, 512, 3, 2, 1)
verify_conv2d_hwnc(8, 256, 14, 512, 1, 2, 0)
verify_conv2d_hwnc(8, 512, 9, 512, 3, 1, 1)
verify_feature_length()
if __name__ == "__main__":
test_conv2d_hwnc_tensorcore() |
4,713 | check first stage | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from cfnlint.rules import CloudFormationLintRule, RuleMatch
class CodepipelineStages(CloudFormationLintRule):
"""Check if CodePipeline Stages are set up properly."""
id = "E2540"
shortdesc = "CodePipeline Stages"
description = "See if CodePipeline stages are set correctly"
source_url = "https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#pipeline-requirements"
tags = ["properties", "codepipeline"]
def _format_error_message(self, message, scenario):
"""Format error message with scenario text"""
if scenario:
scenario_text = " When " + " and ".join(
[f'condition "{k}" is {v}' for (k, v) in scenario.items()]
)
return message + scenario_text
return message
def check_stage_count(self, stages, path, scenario):
"""Check that there is minimum 2 stages."""
matches = []
if len(stages) < 2:
message = f"CodePipeline has {len(stages)} stages. There must be at least two stages."
matches.append(
RuleMatch(path, self._format_error_message(message, scenario))
)
return matches
def METHOD_NAME(self, stages, path, scenario):
"""Validate the first stage of a pipeline has source actions."""
matches = []
if len(stages) < 1: # pylint: disable=C1801
self.logger.debug(
"Stages was empty. Should have been caught by generic linting."
)
return matches
# pylint: disable=R1718
first_stage = set(
[a.get("ActionTypeId").get("Category") for a in stages[0]["Actions"]]
)
if first_stage and "Source" not in first_stage:
message = (
"The first stage of a pipeline must contain at least one source action."
)
matches.append(
RuleMatch(
path + [0, "Name"], self._format_error_message(message, scenario)
)
)
if len(first_stage) != 1:
message = "The first stage of a pipeline must contain only source actions."
matches.append(
RuleMatch(
path + [0, "Name"], self._format_error_message(message, scenario)
)
)
return matches
def check_source_actions(self, stages, path, scenario):
"""Validate the all of the stages."""
matches = []
categories = set()
if len(stages) < 1: # pylint: disable=C1801
self.logger.debug(
"Stages was empty. Should have been caught by generic linting."
)
return matches
for sidx, stage in enumerate(stages):
for aidx, action in enumerate(stage.get("Actions", [])):
action_type_id = action.get("ActionTypeId")
categories.add(action_type_id.get("Category"))
if sidx > 0 and action_type_id.get("Category") == "Source":
message = (
"Only the first stage of a pipeline may contain source actions."
)
matches.append(
RuleMatch(
path + [sidx, "Actions", aidx],
self._format_error_message(message, scenario),
)
)
if not categories - set(["Source"]):
message = "At least one stage in pipeline must contain an action that is not a source action."
matches.append(
RuleMatch(path, self._format_error_message(message, scenario))
)
return matches
def check_names_unique(self, value, path, scenario):
"""Check that stage names are unique."""
matches = []
stage_names = set()
for sidx, stage in enumerate(value):
stage_name = stage.get("Name")
if isinstance(stage_name, str):
if stage_name in stage_names:
message = f"All stage names within a pipeline must be unique. ({stage_name})"
matches.append(
RuleMatch(
path + [sidx, "Name"],
self._format_error_message(message, scenario),
)
)
stage_names.add(stage_name)
else:
self.logger.debug("Found non string for stage name: %s", stage_name)
return matches
def match(self, cfn):
"""Check CodePipeline stages"""
matches = []
resources = cfn.get_resource_properties(["AWS::CodePipeline::Pipeline"])
for resource in resources:
path = resource["Path"] + ["Stages"]
properties = resource["Value"]
s_stages = cfn.get_object_without_nested_conditions(
properties.get("Stages"), path
)
for s_stage in s_stages:
s_stage_obj = s_stage.get("Object")
s_scenario = s_stage.get("Scenario")
if not isinstance(s_stage_obj, list):
self.logger.debug(
"Stages not list. Should have been caught by generic linting."
)
continue
try:
matches.extend(
self.check_stage_count(s_stage_obj, path, s_scenario)
)
matches.extend(
self.METHOD_NAME(s_stage_obj, path, s_scenario)
)
matches.extend(
self.check_source_actions(s_stage_obj, path, s_scenario)
)
matches.extend(
self.check_names_unique(s_stage_obj, path, s_scenario)
)
except AttributeError as err:
self.logger.debug(
"Got AttributeError. Should have been caught by generic linting. "
"Ignoring the error here: %s",
str(err),
)
return matches |
4,714 | test probabilities negative exception | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
ReadoutError class tests
"""
import unittest
from test.terra.common import QiskitAerTestCase
import numpy as np
from qiskit_aer.noise.noiseerror import NoiseError
from qiskit_aer.noise.errors.readout_error import ReadoutError
class TestReadoutError(QiskitAerTestCase):
"""Testing ReadoutError class"""
def test_probabilities_normalized_exception(self):
"""Test exception is raised for probabilities greater than 1."""
probs = [[0.9, 0.2], [0, 1]]
self.assertRaises(NoiseError, lambda: ReadoutError(probs))
probs = [[0, 1], [0.9, 0.2]]
self.assertRaises(NoiseError, lambda: ReadoutError(probs))
def METHOD_NAME(self):
"""Test exception is raised for negative probabilities."""
probs = [[1.1, -0.1], [0, 1]]
self.assertRaises(NoiseError, lambda: ReadoutError(probs))
probs = [[0, 1], [1.1, -0.1]]
self.assertRaises(NoiseError, lambda: ReadoutError(probs))
def test_probabilities_dimension_exception(self):
"""Test exception is raised if probabilities are not multi-qubit"""
probs = [[1, 0, 0], [0, 1, 0], [0, 1, 0]]
self.assertRaises(NoiseError, lambda: ReadoutError(probs))
def test_probabilities_length_exception(self):
"""Test exception is raised if probabilities are different lengths"""
probs = [[1, 0, 0, 0], [0, 1]]
self.assertRaises(NoiseError, lambda: ReadoutError(probs))
probs = [[0, 1], [1, 0, 0, 0]]
self.assertRaises(NoiseError, lambda: ReadoutError(probs))
probs = [[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1]]
self.assertRaises(NoiseError, lambda: ReadoutError(probs))
def test_probabilities_num_outcomes_exception(self):
"""Test exception is raised if not enough probability vectors"""
probs = [[0, 1]]
self.assertRaises(NoiseError, lambda: ReadoutError(probs))
probs = [[1, 0], [0, 1], [0, 0]]
self.assertRaises(NoiseError, lambda: ReadoutError(probs))
probs = [[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]
self.assertRaises(NoiseError, lambda: ReadoutError(probs))
def test_1qubit(self):
"""Test 1-qubit readout error"""
# Test circuit: ideal outcome "11"
probs = [[1, 0], [0, 1]]
roerror_dict = {"type": "roerror", "operations": ["measure"], "probabilities": probs}
roerror = ReadoutError(probs)
self.assertEqual(roerror.number_of_qubits, 1)
self.assertEqual(roerror.probabilities.tolist(), probs)
self.assertEqual(roerror.to_dict(), roerror_dict)
def test_2qubit(self):
"""Test 2-qubit readout error"""
# Test circuit: ideal outcome "11"
probs = [[0.7, 0.2, 0.1, 0], [0, 0.9, 0.1, 0], [0, 0, 1, 0], [0.1, 0.1, 0.2, 0.6]]
roerror_dict = {"type": "roerror", "operations": ["measure"], "probabilities": probs}
roerror = ReadoutError(probs)
self.assertEqual(roerror.number_of_qubits, 2)
self.assertEqual(roerror.probabilities.tolist(), probs)
self.assertEqual(roerror.to_dict(), roerror_dict)
def test_tensor(self):
"""Test tensor of two readout errors."""
probs0 = [[0.9, 0.1], [0.4, 0.6]]
probs1 = [[0.5, 0.5], [0.2, 0.8]]
probs = np.kron(probs0, probs1).tolist()
error_dict = {"type": "roerror", "operations": ["measure"], "probabilities": probs}
error0 = ReadoutError(probs0)
error1 = ReadoutError(probs1)
error = error0.tensor(error1)
self.assertEqual(error.number_of_qubits, 2)
self.assertEqual(error.probabilities.tolist(), probs)
self.assertEqual(error.to_dict(), error_dict)
def test_expand(self):
"""Test expand of two readout errors."""
probs0 = [[0.9, 0.1], [0.4, 0.6]]
probs1 = [[0.5, 0.5], [0.2, 0.8]]
probs = np.kron(probs0, probs1).tolist()
error_dict = {"type": "roerror", "operations": ["measure"], "probabilities": probs}
error0 = ReadoutError(probs0)
error1 = ReadoutError(probs1)
error = error1.expand(error0)
self.assertEqual(error.number_of_qubits, 2)
self.assertEqual(error.probabilities.tolist(), probs)
self.assertEqual(error.to_dict(), error_dict)
def test_compose(self):
"""Test compose of two readout errors."""
probs0 = [[0.9, 0.1], [0.4, 0.6]]
probs1 = [[0.5, 0.5], [0.2, 0.8]]
probs = np.dot(probs1, probs0).tolist()
error_dict = {"type": "roerror", "operations": ["measure"], "probabilities": probs}
error0 = ReadoutError(probs0)
error1 = ReadoutError(probs1)
# compose method
error = error0.compose(error1)
self.assertEqual(error.number_of_qubits, 1)
self.assertEqual(error.probabilities.tolist(), probs)
self.assertEqual(error.to_dict(), error_dict)
# @ method
error = error0 @ error1
self.assertEqual(error.number_of_qubits, 1)
self.assertEqual(error.probabilities.tolist(), probs)
self.assertEqual(error.to_dict(), error_dict)
def test_dot_front(self):
"""Test dot of two readout errors."""
probs0 = [[0.9, 0.1], [0.4, 0.6]]
probs1 = [[0.5, 0.5], [0.2, 0.8]]
probs = np.dot(probs1, probs0).tolist()
error_dict = {"type": "roerror", "operations": ["measure"], "probabilities": probs}
error0 = ReadoutError(probs0)
error1 = ReadoutError(probs1)
# dot method
error = error1.dot(error0)
self.assertEqual(error.number_of_qubits, 1)
self.assertEqual(error.probabilities.tolist(), probs)
self.assertEqual(error.to_dict(), error_dict)
# * method
error = error1 * error0
self.assertEqual(error.number_of_qubits, 1)
self.assertEqual(error.probabilities.tolist(), probs)
self.assertEqual(error.to_dict(), error_dict)
def test_equal(self):
"""Test two readout errors are equal"""
error1 = ReadoutError([[0.9, 0.1], [0.5, 0.5]])
error2 = ReadoutError(np.array([[0.9, 0.1], [0.5, 0.5]]))
self.assertEqual(error1, error2)
def test_to_instruction(self):
"""Test conversion of ReadoutError to Instruction."""
# 1-qubit case
probs1 = [[0.8, 0.2], [0.5, 0.5]]
instr1 = ReadoutError(probs1).to_instruction()
self.assertEqual(instr1.name, "roerror")
self.assertEqual(instr1.num_clbits, 1)
self.assertEqual(instr1.num_qubits, 0)
self.assertTrue(np.allclose(instr1.params, probs1))
# 2-qubit case
probs2 = np.kron(probs1, probs1)
instr2 = ReadoutError(probs2).to_instruction()
self.assertEqual(instr2.name, "roerror")
self.assertEqual(instr2.num_clbits, 2)
self.assertEqual(instr2.num_qubits, 0)
self.assertTrue(np.allclose(instr2.params, probs2))
if __name__ == "__main__":
unittest.main() |
4,715 | stack | from __future__ import annotations
from dataclasses import dataclass
from typing import ClassVar, Callable, Optional, Union, cast
import numpy as np
from pandas import DataFrame
from seaborn._core.groupby import GroupBy
from seaborn._core.scales import Scale
from seaborn._core.typing import Default
default = Default()
@dataclass
class Move:
"""Base class for objects that apply simple positional transforms."""
group_by_orient: ClassVar[bool] = True
def __call__(
self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
) -> DataFrame:
raise NotImplementedError
@dataclass
class Jitter(Move):
"""
Random displacement along one or both axes to reduce overplotting.
Parameters
----------
width : float
Magnitude of jitter, relative to mark width, along the orientation axis.
If not provided, the default value will be 0 when `x` or `y` are set, otherwise
there will be a small amount of jitter applied by default.
x : float
Magnitude of jitter, in data units, along the x axis.
y : float
Magnitude of jitter, in data units, along the y axis.
Examples
--------
.. include:: ../docstrings/objects.Jitter.rst
"""
width: float | Default = default
x: float = 0
y: float = 0
seed: int | None = None
def __call__(
self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
) -> DataFrame:
data = data.copy()
rng = np.random.default_rng(self.seed)
def jitter(data, col, scale):
noise = rng.uniform(-.5, +.5, len(data))
offsets = noise * scale
return data[col] + offsets
if self.width is default:
width = 0.0 if self.x or self.y else 0.2
else:
width = cast(float, self.width)
if self.width:
data[orient] = jitter(data, orient, width * data["width"])
if self.x:
data["x"] = jitter(data, "x", self.x)
if self.y:
data["y"] = jitter(data, "y", self.y)
return data
@dataclass
class Dodge(Move):
"""
Displacement and narrowing of overlapping marks along orientation axis.
Parameters
----------
empty : {'keep', 'drop', 'fill'}
gap : float
Size of gap between dodged marks.
by : list of variable names
Variables to apply the movement to, otherwise use all.
Examples
--------
.. include:: ../docstrings/objects.Dodge.rst
"""
empty: str = "keep" # Options: keep, drop, fill
gap: float = 0
# TODO accept just a str here?
# TODO should this always be present?
# TODO should the default be an "all" singleton?
by: Optional[list[str]] = None
def __call__(
self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
) -> DataFrame:
grouping_vars = [v for v in groupby.order if v in data]
groups = groupby.agg(data, {"width": "max"})
if self.empty == "fill":
groups = groups.dropna()
def groupby_pos(s):
grouper = [groups[v] for v in [orient, "col", "row"] if v in data]
return s.groupby(grouper, sort=False, observed=True)
def scale_widths(w):
# TODO what value to fill missing widths??? Hard problem...
# TODO short circuit this if outer widths has no variance?
empty = 0 if self.empty == "fill" else w.mean()
filled = w.fillna(empty)
scale = filled.max()
norm = filled.sum()
if self.empty == "keep":
w = filled
return w / norm * scale
def widths_to_offsets(w):
return w.shift(1).fillna(0).cumsum() + (w - w.sum()) / 2
new_widths = groupby_pos(groups["width"]).transform(scale_widths)
offsets = groupby_pos(new_widths).transform(widths_to_offsets)
if self.gap:
new_widths *= 1 - self.gap
groups["_dodged"] = groups[orient] + offsets
groups["width"] = new_widths
out = (
data
.drop("width", axis=1)
.merge(groups, on=grouping_vars, how="left")
.drop(orient, axis=1)
.rename(columns={"_dodged": orient})
)
return out
@dataclass
class Stack(Move):
"""
Displacement of overlapping bar or area marks along the value axis.
Examples
--------
.. include:: ../docstrings/objects.Stack.rst
"""
# TODO center? (or should this be a different move, eg. Stream())
def METHOD_NAME(self, df, orient):
# TODO should stack do something with ymin/ymax style marks?
# Should there be an upstream conversion to baseline/height parameterization?
if df["baseline"].nunique() > 1:
err = "Stack move cannot be used when baselines are already heterogeneous"
raise RuntimeError(err)
other = {"x": "y", "y": "x"}[orient]
stacked_lengths = (df[other] - df["baseline"]).dropna().cumsum()
offsets = stacked_lengths.shift(1).fillna(0)
df[other] = stacked_lengths
df["baseline"] = df["baseline"] + offsets
return df
def __call__(
self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
) -> DataFrame:
# TODO where to ensure that other semantic variables are sorted properly?
# TODO why are we not using the passed in groupby here?
groupers = ["col", "row", orient]
return GroupBy(groupers).apply(data, self.METHOD_NAME, orient)
@dataclass
class Shift(Move):
"""
Displacement of all marks with the same magnitude / direction.
Parameters
----------
x, y : float
Magnitude of shift, in data units, along each axis.
Examples
--------
.. include:: ../docstrings/objects.Shift.rst
"""
x: float = 0
y: float = 0
def __call__(
self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
) -> DataFrame:
data = data.copy(deep=False)
data["x"] = data["x"] + self.x
data["y"] = data["y"] + self.y
return data
@dataclass
class Norm(Move):
"""
Divisive scaling on the value axis after aggregating within groups.
Parameters
----------
func : str or callable
Function called on each group to define the comparison value.
where : str
Query string defining the subset used to define the comparison values.
by : list of variables
Variables used to define aggregation groups.
percent : bool
If True, multiply the result by 100.
Examples
--------
.. include:: ../docstrings/objects.Norm.rst
"""
func: Union[Callable, str] = "max"
where: Optional[str] = None
by: Optional[list[str]] = None
percent: bool = False
group_by_orient: ClassVar[bool] = False
def _norm(self, df, var):
if self.where is None:
denom_data = df[var]
else:
denom_data = df.query(self.where)[var]
df[var] = df[var] / denom_data.agg(self.func)
if self.percent:
df[var] = df[var] * 100
return df
def __call__(
self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
) -> DataFrame:
other = {"x": "y", "y": "x"}[orient]
return groupby.apply(data, self._norm, other)
# TODO
# @dataclass
# class Ridge(Move):
# ... |
4,716 | test flush publishes request to inbox | # Copyright 2017 Workiva
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import struct
from tornado import concurrent
from tornado.testing import gen_test, AsyncTestCase
from thrift.transport.TTransport import TTransportException
from frugal import _NATS_MAX_MESSAGE_SIZE
from frugal.exceptions import TTransportExceptionType
from frugal.tornado.transport import FNatsTransport
class TestFNatsTransport(AsyncTestCase):
def setUp(self):
self.mock_nats_client = mock.Mock()
self.subject = "foo"
self.inbox = "new_inbox"
super(TestFNatsTransport, self).setUp()
self.transport = FNatsTransport(self.mock_nats_client,
self.subject,
self.inbox)
def test_is_open_returns_true_when_nats_connected(self):
self.transport._is_open = True
self.mock_nats_client.is_connected.return_value = True
self.assertTrue(self.transport.is_open())
def test_is_open_returns_false_when_nats_not_connected(self):
self.mock_nats_client.is_connected.return_value = True
self.assertFalse(self.transport.is_open())
@mock.patch('frugal.tornado.transport.nats_transport.new_inbox')
def test_init(self, mock_new_inbox):
self.assertEquals(self.mock_nats_client, self.transport._nats_client)
self.assertEquals(self.subject, self.transport._subject)
self.assertEquals(self.inbox, self.transport._inbox)
mock_new_inbox.return_value = "asdf"
transport = FNatsTransport(self.mock_nats_client, self.subject)
mock_new_inbox.assert_called_with()
self.assertEquals("asdf", transport._inbox)
@gen_test
def test_open_throws_nats_not_connected_exception(self):
self.mock_nats_client.is_connected = False
with self.assertRaises(TTransportException) as cm:
yield self.transport.open()
self.assertEqual(
TTransportExceptionType.NOT_OPEN, cm.exception.type)
self.assertEqual("NATS not connected.", cm.exception.message)
@gen_test
def test_open_throws_transport_already_open_exception(self):
self.mock_nats_client.is_connected = True
self.transport._is_open = True
with self.assertRaises(TTransportException) as cm:
yield self.transport.open()
self.assertEqual(
TTransportExceptionType.ALREADY_OPEN, cm.exception.type)
self.assertEqual("NATS transport already open.", cm.exception.message)
@gen_test
def test_open_subscribes_to_new_inbox(self):
f = concurrent.Future()
f.set_result(1)
self.mock_nats_client.subscribe_async.return_value = f
yield self.transport.open()
self.assertEquals(1, self.transport._sub_id)
self.mock_nats_client.subscribe_async.assert_called_with(
"new_inbox.*", cb=self.transport._on_message_callback
)
@gen_test
def test_on_message_callback(self):
message = mock.Mock()
message.data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
callback = mock.Mock()
future = concurrent.Future()
future.set_result(None)
callback.return_value = future
self.transport.handle_response = callback
yield self.transport._on_message_callback(message)
callback.assert_called_once_with(message)
@gen_test
def test_close_calls_unsubscribe_and_sets_is_open_to_false(self):
self.transport._sub_id = 1
f = concurrent.Future()
f.set_result(None)
self.mock_nats_client.unsubscribe.return_value = f
self.mock_nats_client.flush.return_value = f
yield self.transport.close()
self.mock_nats_client.unsubscribe.assert_called_with(
self.transport._sub_id)
self.mock_nats_client.flush.assert_called_with()
self.assertFalse(self.transport._is_open)
@gen_test
def test_close_with_no_sub_id_returns_early(self):
self.transport._sub_id = None
f = concurrent.Future()
f.set_result(None)
self.mock_nats_client.unsubscribe.return_value = f
self.mock_nats_client.flush.return_value = f
yield self.transport.close()
self.mock_nats_client.unsubscribe.assert_not_called()
@gen_test
def METHOD_NAME(self):
self.mock_nats_client.is_connected.return_value = True
self.transport._is_open = True
data = bytearray('test')
frame_length = struct.pack('!I', len(data))
f = concurrent.Future()
f.set_result("")
self.mock_nats_client.publish_request.return_value = f
self.mock_nats_client._flush_pending.return_value = f
yield self.transport.flush(frame_length + data)
self.mock_nats_client.publish_request.assert_called_with(
self.subject,
self.inbox,
frame_length + data
)
def test_request_size_limit(self):
self.assertEqual(_NATS_MAX_MESSAGE_SIZE,
self.transport.get_request_size_limit()) |
4,717 | using positions and prices merged from fills | import numpy as np
import pandas as pd
from syscore.constants import arg_not_supplied
from systems.accounts.pandl_calculators.pandl_calculation import (
pandlCalculation,
apply_weighting,
)
from sysobjects.fills import ListOfFills, Fill
class pandlCalculationWithFills(pandlCalculation):
def __init__(self, *args, fills: ListOfFills = arg_not_supplied, **kwargs):
# if fills aren't supplied, can be inferred from positions
super().__init__(*args, **kwargs)
self._fills = fills
# This attribute is not used
self._calculated_price = None
def weight(self, weight: pd.Series):
## we don't weight fills, instead will be inferred from positions
weighted_capital = apply_weighting(weight, self.capital)
weighted_positions = apply_weighting(weight, self.positions)
return pandlCalculationWithFills(
self.price,
positions=weighted_positions,
fx=self.fx,
capital=weighted_capital,
value_per_point=self.value_per_point,
roundpositions=self.roundpositions,
delayfill=self.delayfill,
)
@classmethod
def METHOD_NAME(
pandlCalculation,
price: pd.Series,
positions: pd.Series,
fills: ListOfFills,
**kwargs,
):
merged_prices = merge_fill_prices_with_prices(price, fills)
return pandlCalculation(price=merged_prices, positions=positions, **kwargs)
@property
def fills(self) -> ListOfFills:
fills = self._fills
if fills is arg_not_supplied:
# Infer from positions
# positions will have delayfill and round applied to them already
fills = self._infer_fills_from_position()
self._fills = fills
## Fills passed in directly are expected to be precise, so we don't round or delay them
return fills
def _infer_fills_from_position(self) -> ListOfFills:
# positions will have delayfill and round applied to them already
positions = self.positions
if positions is arg_not_supplied:
raise Exception("Need to pass fills or positions")
fills = ListOfFills.from_position_series_and_prices(
positions=positions, price=self.price
)
return fills
@property
def positions(self) -> pd.Series:
positions = self._get_passed_positions()
if positions is arg_not_supplied:
## fills will already have delay and round positions applied
positions = self._infer_position_from_fills()
self._positions = positions
return positions
else:
positions_to_use = self._process_positions(positions)
return positions_to_use
def _infer_position_from_fills(self) -> pd.Series:
fills = self._fills
if fills is arg_not_supplied:
raise Exception("Need to pass fills or positions")
positions = infer_positions_from_fills(fills)
return positions
def merge_fill_prices_with_prices(
prices: pd.Series, list_of_fills: ListOfFills
) -> pd.Series:
list_of_trades_as_pd_df = list_of_fills.as_pd_df()
unique_trades_as_pd_df = unique_trades_df(list_of_trades_as_pd_df)
prices_to_use = pd.concat(
[prices, unique_trades_as_pd_df.price], axis=1, join="outer"
)
prices_to_use.columns = ["price", "fill_price"]
# Where no fill price available, use price
prices_to_use = prices_to_use.fillna(axis=1, method="ffill")
prices_to_use = prices_to_use.fill_price
prices_to_use = prices_to_use.replace([np.inf, -np.inf], np.nan)
prices_to_use = prices_to_use.dropna()
return prices_to_use
def unique_trades_df(trade_df: pd.DataFrame) -> pd.DataFrame:
cash_flow = trade_df.qty * trade_df.price
trade_df["cash_flow"] = cash_flow
new_df = trade_df.groupby(trade_df.index).sum()
# qty and cash_flow will be correct, price won't be
new_price = new_df.cash_flow / new_df.qty
new_df["price"] = new_price
new_df = new_df.drop(labels="cash_flow", axis=1)
return new_df
def infer_positions_from_fills(fills: ListOfFills) -> pd.Series:
date_index = [fill.date for fill in fills]
qty_trade = [fill.qty for fill in fills]
trade_series = pd.Series(qty_trade, index=date_index)
trade_series = trade_series.sort_index()
position_series = trade_series.cumsum()
return position_series
"""
Can have other class methods in future that allow you to just pass fills, trades, or something else
@classmethod
def using_fills(pandlCalculation, price: pd.Series,
fills: ListOfFills,
**kwargs):
positions = from_fills_to_positions(fills)
merged_prices = merge_fill_prices_with_prices(price,
fills)
return pandlCalculation(price=price,
positions=positions,
**kwargs)
@classmethod
def using_trades_inferring_fill_prices(pandlCalculation, price: pd.Series,
trades: pd.Series,
**kwargs):
fills = from_trades_to_fills(trades, price)
positions = from_fills_to_positions(fills)
merged_prices = merge_fill_prices_with_prices(price,
fills)
return pandlCalculation(price = price,
positions=positions,
**kwargs)
""" |
4,718 | get activated threads | # Copyright (C) 2007 Samuel Abels
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from ..task import TaskState
from .base import TaskSpec
from .ThreadStart import ThreadStart
from ..operators import valueof
class ThreadSplit(TaskSpec):
"""
When executed, this task performs a split on the current my_task.
The number of outgoing my_tasks depends on the runtime value of a
specified data field.
If more than one input is connected, the task performs an implicit
multi merge.
This task has one or more inputs and may have any number of outputs.
"""
def __init__(self,
wf_spec,
name,
times=1,
suppress_threadstart_creation=False,
**kwargs):
"""
Constructor.
:type wf_spec: WorkflowSpec`
:param wf_spec: A reference to the workflow specification.
:type name: string
:param name: A name for the task.
:type times: int or :class:`SpiffWorkflow.operators.Term`
:param times: The number of tasks to create.
:type suppress_threadstart_creation: bool
:param suppress_threadstart_creation: Don't create a ThreadStart,
because the deserializer is about to.
:type kwargs: dict
:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.
"""
if times is None:
raise ValueError('times argument is required')
TaskSpec.__init__(self, wf_spec, name, **kwargs)
self.times = times
if not suppress_threadstart_creation:
self.thread_starter = ThreadStart(wf_spec, **kwargs)
self.outputs.append(self.thread_starter)
self.thread_starter._connect_notify(self)
else:
self.thread_starter = None
def connect(self, task_spec):
"""
Connect the *following* task to this one. In other words, the
given task is added as an output task.
task -- the task to connect to.
"""
self.thread_starter.outputs.append(task_spec)
task_spec._connect_notify(self.thread_starter)
def _get_activated_tasks(self, my_task, destination):
"""
Returns the list of tasks that were activated in the previous
call of execute(). Only returns tasks that point towards the
destination task, i.e. those which have destination as a
descendant.
my_task -- the task of this TaskSpec
destination -- the child task
"""
task = destination._find_ancestor(self.thread_starter)
return self.thread_starter._get_activated_tasks(task, destination)
def METHOD_NAME(self, my_task):
"""
Returns the list of threads that were activated in the previous
call of execute().
my_task -- the task of this TaskSpec
"""
return my_task.children
def _on_trigger(self, my_task):
"""
May be called after execute() was already completed to create an
additional outbound task.
"""
for output in self.outputs:
new_task = my_task.add_child(output, TaskState.READY)
new_task.triggered = True
def _get_predicted_outputs(self, my_task):
split_n = int(valueof(my_task, self.times))
return [self.thread_starter] * split_n
def _predict_hook(self, my_task):
# if we were created with thread_starter suppressed, connect it now.
if self.thread_starter is None:
self.thread_starter = self.outputs[0]
outputs = self._get_predicted_outputs(my_task)
if my_task._is_definite():
my_task._sync_children(outputs, TaskState.FUTURE)
else:
my_task._sync_children(outputs, TaskState.LIKELY)
def _run_hook(self, my_task):
outputs = self._get_predicted_outputs(my_task)
my_task._sync_children(outputs, TaskState.FUTURE)
return True
def serialize(self, serializer):
return serializer.serialize_thread_split(self)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
return serializer.deserialize_thread_split(wf_spec, s_state) |
4,719 | filter task type | # mypy: ignore-errors
""" Core definition of a Q-Chem Task Document """
from typing import Any, Dict, List, Optional, Callable
from pydantic import BaseModel, Field
from pymatgen.core.structure import Molecule
from emmet.core.structure import MoleculeMetadata
from emmet.core.task import BaseTaskDocument
from emmet.core.utils import ValueEnum
from emmet.core.qchem.calc_types import (
LevelOfTheory,
CalcType,
TaskType,
calc_type,
level_of_theory,
task_type,
solvent,
lot_solvent_string,
)
__author__ = "Evan Spotte-Smith <ewcspottesmith@lbl.gov>"
class QChemStatus(ValueEnum):
"""
Q-Chem Calculation State
"""
SUCESS = "successful"
FAILED = "unsuccessful"
class OutputSummary(BaseModel):
"""
Summary of an output for a Q-Chem calculation
"""
initial_molecule: Molecule = Field(None, description="Input Molecule object")
optimized_molecule: Molecule = Field(None, description="Optimized Molecule object")
final_energy: float = Field(
None, description="Final electronic energy for the calculation (units: Hartree)"
)
enthalpy: float = Field(
None, description="Total enthalpy of the molecule (units: kcal/mol)"
)
entropy: float = Field(
None, description="Total entropy of the molecule (units: cal/mol-K"
)
mulliken: List[Any] = Field(
None, description="Mulliken atomic partial charges and partial spins"
)
resp: List[float] = Field(
None,
description="Restrained Electrostatic Potential (RESP) atomic partial charges",
)
nbo: Dict[str, Any] = Field(
None, description="Natural Bonding Orbital (NBO) output"
)
frequencies: List[float] = Field(
None, description="Vibrational frequencies of the molecule (units: cm^-1)"
)
def as_dict(self) -> Dict[str, Any]:
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"initial_molecule": self.initial_molecule,
"optimized_molecule": self.optimized_molecule,
"final_energy": self.final_energy,
"enthalpy": self.enthalpy,
"entropy": self.entropy,
"mulliken": self.mulliken,
"resp": self.resp,
"nbo": self.nbo,
"frequencies": self.frequencies,
}
class TaskDocument(BaseTaskDocument, MoleculeMetadata):
"""
Definition of a Q-Chem task document
"""
calc_code = "Q-Chem"
completed = True
is_valid: bool = Field(
True, description="Whether this task document passed validation or not"
)
state: QChemStatus = Field(None, description="State of this calculation")
cputime: float = Field(None, description="The system CPU time in seconds")
walltime: float = Field(None, description="The real elapsed time in seconds")
calcs_reversed: List[Dict] = Field(
[], description="The 'raw' calculation docs used to assembled this task"
)
orig: Dict[str, Any] = Field(
{}, description="Summary of the original Q-Chem inputs"
)
output = Field(OutputSummary())
critic2: Dict[str, Any] = Field(
None, description="Output from Critic2 critical point analysis code"
)
custom_smd: str = Field(
None, description="Parameter string for SMD implicit solvent model"
)
special_run_type: str = Field(
None, description="Special workflow name (if applicable)"
)
smiles: str = Field(
None,
description="Simplified molecular-input line-entry system (SMILES) string for the molecule involved "
"in this calculation.",
)
species_hash: str = Field(
None,
description="Weisfeiler Lehman (WL) graph hash using the atom species as the graph "
"node attribute.",
)
coord_hash: str = Field(
None,
description="Weisfeiler Lehman (WL) graph hash using the atom coordinates as the graph "
"node attribute.",
)
# TODO - type of `tags` field seems to differ among task databases
# sometimes List, sometimes Dict
# left as Any here to ensure tags don't cause validation to fail.
tags: Any = Field(None, description="Metadata tags")
warnings: Dict[str, bool] = Field(
None, description="Any warnings related to this task document"
)
@property
def level_of_theory(self) -> LevelOfTheory:
return level_of_theory(self.orig)
@property
def solvent(self) -> str:
return solvent(self.orig, custom_smd=self.custom_smd)
@property
def lot_solvent(self) -> str:
return lot_solvent_string(self.orig, custom_smd=self.custom_smd)
@property
def task_type(self) -> TaskType:
return task_type(self.orig, special_run_type=self.special_run_type)
@property
def calc_type(self) -> CalcType:
return calc_type(self.special_run_type, self.orig)
@property
def entry(self) -> Dict[str, Any]:
if self.output.optimized_molecule is not None:
mol = self.output.optimized_molecule
else:
mol = self.output.initial_molecule
if self.charge is None:
charge = int(mol.charge)
else:
charge = int(self.charge)
if self.spin_multiplicity is None:
spin = mol.spin_multiplicity
else:
spin = self.spin_multiplicity
entry_dict = {
"entry_id": self.task_id,
"task_id": self.task_id,
"charge": charge,
"spin_multiplicity": spin,
"level_of_theory": self.level_of_theory,
"solvent": self.solvent,
"lot_solvent": self.lot_solvent,
"custom_smd": self.custom_smd,
"task_type": self.task_type,
"calc_type": self.calc_type,
"molecule": mol,
"composition": mol.composition,
"formula": mol.composition.alphabetical_formula,
"energy": self.output.final_energy,
"output": self.output.as_dict(),
"critic2": self.critic2,
"orig": self.orig,
"tags": self.tags,
"last_updated": self.last_updated,
"species_hash": self.species_hash,
"coord_hash": self.coord_hash,
}
return entry_dict
def METHOD_NAME(
entries: List[Dict[str, Any]],
task_type: TaskType,
sort_by: Optional[Callable] = None,
) -> List[Dict[str, Any]]:
"""
Filter (and sort) TaskDocument entries based on task type
:param entries: List of TaskDocument entry dicts
:param TaskType: TaskType to accept
:param sort_by: Function used to sort (default None)
:return: Filtered (sorted) list of entries
"""
filtered = [f for f in entries if f["task_type"] == task_type]
if sort_by is not None:
return sorted(filtered, key=sort_by)
else:
return filtered |
4,720 | check scopes | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from itertools import groupby
from operator import itemgetter
from typing import Any, List, Mapping, Tuple
import pendulum
import requests
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http.auth import TokenAuthenticator
from .streams import Collectors, SurveyCollectors, SurveyPages, SurveyQuestions, SurveyResponses, Surveys
class SourceSurveymonkey(AbstractSource):
SCOPES = {"responses_read_detail", "surveys_read", "users_read"}
@classmethod
def _check_credentials(cls, config: Mapping[str, Any]) -> Tuple[bool, Any]:
# check if the credentials are provided correctly, because for now these value are not required in spec
if not config.get("access_token"):
credentials = config.get("credentials", {})
if not credentials:
return False, "credentials fields are not provided"
else:
if not credentials.get("auth_method"):
return False, "auth_method in credentials is not provided"
if not credentials.get("access_token"):
return False, "access_token in credentials is not provided"
return True, None
def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Any]:
is_valid_credentials, msg = self._check_credentials(config)
if not is_valid_credentials:
return is_valid_credentials, msg
authenticator = self.get_authenticator(config)
if "survey_ids" in config:
# Check whether survey id exists and collect errors
errors = []
for survey_id in config["survey_ids"]:
response = requests.head(
url=f"https://api.surveymonkey.com/v3/surveys/{survey_id}/details", headers=authenticator.get_auth_header()
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
errors.append((survey_id, f"{response.status_code} {response.reason}"))
if errors:
# Group survey ids by their error type
survey_id_index, error_message_index = 0, 1
msg = "; ".join(
[
f"{error_type}: {', '.join(list(map(itemgetter(survey_id_index), survey_ids)))}"
for error_type, survey_ids in groupby(errors, lambda x: x[error_message_index])
]
)
return False, msg
try:
response = requests.get(url="https://api.surveymonkey.com/v3/users/me", headers=authenticator.get_auth_header())
response.raise_for_status()
return self.METHOD_NAME(response.json())
except Exception as e:
return False, repr(e)
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
authenticator = self.get_authenticator(config)
start_date = pendulum.parse(config["start_date"])
survey_ids = config.get("survey_ids", [])
args = {"authenticator": authenticator, "start_date": start_date, "survey_ids": survey_ids}
return [
Collectors(**args),
Surveys(**args),
SurveyCollectors(**args),
SurveyPages(**args),
SurveyQuestions(**args),
SurveyResponses(**args),
]
@staticmethod
def get_authenticator(config: Mapping[str, Any]):
token = config.get("credentials", {}).get("access_token")
if not token:
token = config["access_token"]
return TokenAuthenticator(token=token)
@classmethod
def METHOD_NAME(cls, response_json):
granted_scopes = response_json["scopes"]["granted"]
missed_scopes = cls.SCOPES - set(granted_scopes)
if missed_scopes:
return False, "missed required scopes: " + ", ".join(missed_scopes)
return True, None |
4,721 | inject view methods | from django.core.exceptions import ImproperlyConfigured
from django.urls import reverse
from django.utils.functional import cached_property
from wagtail.admin.menu import WagtailMenuRegisterable, WagtailMenuRegisterableGroup
class ViewSet(WagtailMenuRegisterable):
"""
Defines a viewset to be registered with the Wagtail admin.
All properties of the viewset can be defined as class-level attributes, or passed as
keyword arguments to the constructor (in which case they will override any class-level
attributes). Additionally, the `name` property can be passed as the first positional
argument to the constructor.
"""
#: A name for this viewset, used as the default URL prefix and namespace.
name = None
#: The icon to use across the views.
icon = ""
def __init__(self, name=None, **kwargs):
if name:
self.__dict__["name"] = name
for key, value in kwargs.items():
self.__dict__[key] = value
def get_common_view_kwargs(self, **kwargs):
"""
Returns a dictionary of keyword arguments to be passed to all views within this viewset.
"""
return kwargs
def construct_view(self, view_class, **kwargs):
"""
Wrapper for view_class.as_view() which passes the kwargs returned from get_common_view_kwargs
in addition to any kwargs passed to this method. Items from get_common_view_kwargs will be
filtered to only include those that are valid for the given view_class.
"""
filtered_kwargs = {
key: value
for key, value in self.get_common_view_kwargs().items()
if hasattr(view_class, key)
}
filtered_kwargs.update(kwargs)
return view_class.as_view(**filtered_kwargs)
def METHOD_NAME(self, view_class, method_names):
"""
Check for the presence of any of the named methods on this viewset. If any are found,
create a subclass of view_class that overrides those methods to call the implementation
on this viewset instead. Otherwise, return view_class unmodified.
"""
viewset = self
overrides = {}
for method_name in method_names:
viewset_method = getattr(viewset, method_name, None)
if viewset_method:
def view_method(self, *args, **kwargs):
return viewset_method(*args, **kwargs)
view_method.__name__ = method_name
overrides[method_name] = view_method
if overrides:
return type(view_class.__name__, (view_class,), overrides)
else:
return view_class
@cached_property
def url_prefix(self):
"""
The preferred URL prefix for views within this viewset. When registered through
Wagtail's ``register_admin_viewset`` hook, this will be used as the URL path component
following ``/admin/``. Other URL registration mechanisms (e.g. editing urls.py manually)
may disregard this and use a prefix of their own choosing.
Defaults to the viewset's name.
"""
if not self.name:
raise ImproperlyConfigured(
"ViewSet %r must provide a `name` property" % self
)
return self.name
@cached_property
def url_namespace(self):
"""
The URL namespace for views within this viewset. Will be used internally as the
application namespace for the viewset's URLs, and generally be the instance namespace
too.
Defaults to the viewset's name.
"""
if not self.name:
raise ImproperlyConfigured(
"ViewSet %r must provide a `name` property" % self
)
return self.name
def on_register(self):
"""
Called when the viewset is registered; subclasses can override this to perform additional setup.
"""
self.register_menu_item()
def get_urlpatterns(self):
"""
Returns a set of URL routes to be registered with the Wagtail admin.
"""
return []
def get_url_name(self, view_name):
"""
Returns the namespaced URL name for the given view.
"""
return self.url_namespace + ":" + view_name
@cached_property
def menu_icon(self):
return self.icon
@cached_property
def menu_url(self):
return reverse(self.get_url_name(self.get_urlpatterns()[0].name))
class ViewSetGroup(WagtailMenuRegisterableGroup):
"""
A container for grouping together multiple ViewSet instances.
Creates a menu item with a submenu for accessing the main URL for each instances.
"""
def on_register(self):
self.register_menu_item() |
4,722 | shared ciphers | import io
import socket
import ssl
from ..exceptions import ProxySchemeUnsupported
from ..packages import six
SSL_BLOCKSIZE = 16384
class SSLTransport:
"""
The SSLTransport wraps an existing socket and establishes an SSL connection.
Contrary to Python's implementation of SSLSocket, it allows you to chain
multiple TLS connections together. It's particularly useful if you need to
implement TLS within TLS.
The class supports most of the socket API operations.
"""
@staticmethod
def _validate_ssl_context_for_tls_in_tls(ssl_context):
"""
Raises a ProxySchemeUnsupported if the provided ssl_context can't be used
for TLS in TLS.
The only requirement is that the ssl_context provides the 'wrap_bio'
methods.
"""
if not hasattr(ssl_context, "wrap_bio"):
if six.PY2:
raise ProxySchemeUnsupported(
"TLS in TLS requires SSLContext.wrap_bio() which isn't "
"supported on Python 2"
)
else:
raise ProxySchemeUnsupported(
"TLS in TLS requires SSLContext.wrap_bio() which isn't "
"available on non-native SSLContext"
)
def __init__(
self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True
):
"""
Create an SSLTransport around socket using the provided ssl_context.
"""
self.incoming = ssl.MemoryBIO()
self.outgoing = ssl.MemoryBIO()
self.suppress_ragged_eofs = suppress_ragged_eofs
self.socket = socket
self.sslobj = ssl_context.wrap_bio(
self.incoming, self.outgoing, server_hostname=server_hostname
)
# Perform initial handshake.
self._ssl_io_loop(self.sslobj.do_handshake)
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def fileno(self):
return self.socket.fileno()
def read(self, len=1024, buffer=None):
return self._wrap_ssl_read(len, buffer)
def recv(self, len=1024, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to recv")
return self._wrap_ssl_read(len)
def recv_into(self, buffer, nbytes=None, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to recv_into")
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
return self.read(nbytes, buffer)
def sendall(self, data, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to sendall")
count = 0
with memoryview(data) as view, view.cast("B") as byte_view:
amount = len(byte_view)
while count < amount:
v = self.send(byte_view[count:])
count += v
def send(self, data, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to send")
response = self._ssl_io_loop(self.sslobj.write, data)
return response
def makefile(
self, mode="r", buffering=None, encoding=None, errors=None, newline=None
):
"""
Python's httpclient uses makefile and buffered io when reading HTTP
messages and we need to support it.
This is unfortunately a copy and paste of socket.py makefile with small
changes to point to the socket directly.
"""
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = socket.SocketIO(self, rawmode)
self.socket._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def unwrap(self):
self._ssl_io_loop(self.sslobj.unwrap)
def close(self):
self.socket.close()
def getpeercert(self, binary_form=False):
return self.sslobj.getpeercert(binary_form)
def version(self):
return self.sslobj.version()
def cipher(self):
return self.sslobj.cipher()
def selected_alpn_protocol(self):
return self.sslobj.selected_alpn_protocol()
def selected_npn_protocol(self):
return self.sslobj.selected_npn_protocol()
def METHOD_NAME(self):
return self.sslobj.METHOD_NAME()
def compression(self):
return self.sslobj.compression()
def settimeout(self, value):
self.socket.settimeout(value)
def gettimeout(self):
return self.socket.gettimeout()
def _decref_socketios(self):
self.socket._decref_socketios()
def _wrap_ssl_read(self, len, buffer=None):
try:
return self._ssl_io_loop(self.sslobj.read, len, buffer)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:
return 0 # eof, return 0.
else:
raise
def _ssl_io_loop(self, func, *args):
"""Performs an I/O loop between incoming/outgoing and the socket."""
should_loop = True
ret = None
while should_loop:
errno = None
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
# WANT_READ, and WANT_WRITE are expected, others are not.
raise e
errno = e.errno
buf = self.outgoing.read()
self.socket.sendall(buf)
if errno is None:
should_loop = False
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = self.socket.recv(SSL_BLOCKSIZE)
if buf:
self.incoming.write(buf)
else:
self.incoming.write_eof()
return ret |
4,723 | write collateral | import json
import argparse
hMetals = {"metal%d" % i for i in range(2,6,2)}
vMetals = {"metal%d" % i for i in range(1,6,2)}
metals = ["metal%d" % i for i in range(1,6)]
vias = ["via%d" % i for i in range(1,5)]
def generateVia( tech, v, l, u, l_width, u_width, l_space, u_space):
halfSpace1 = "%.3f" % (l_space/20000)
halfSpace2 = "%.3f" % (u_space/20000)
zero = "%.3f" % ( 0/10000)
width1 = "%.3f" % (l_width/10000)
width2 = "%.3f" % (u_width/10000)
if l in hMetals and u in vMetals:
cutHeight = width1
cutWidth = width2
x1 = halfSpace1
y1 = zero
x2 = zero
y2 = halfSpace2
elif l in vMetals and u in hMetals:
cutWidth = width1
cutHeight = width2
x1 = zero
y1 = halfSpace1
x2 = halfSpace2
y2 = zero
else:
assert False
return ("""Generator name={0}_{11}_{12} {{
Layer1 value={1} {{
x_coverage value={3}
y_coverage value={4}
widths value={7}
}}
Layer2 value={2} {{
x_coverage value={5}
y_coverage value={6}
widths value={8}
}}
CutWidth value={9}
CutHeight value={10}
cutlayer value={0}
}}
""").format( v, l, u, x1, y1, x2, y2, width1, width2, cutWidth, cutHeight, l_width, u_width)
def METHOD_NAME( tech):
triples = zip( vias,metals[:-1],metals[1:])
mts = { mt['name'] : mt for mt in tech['metalTemplates']}
widths = {}
spaces = {}
for (nm,mt) in mts.items():
ly = mt['layer']
if ly not in widths: widths[ly] = set()
widths[ly] = widths[ly].union( set(mt['widths']))
if ly not in spaces: spaces[ly] = set()
spaces[ly] = spaces[ly].union( set(mt['spaces']))
print( widths)
print( spaces)
with open( "car_generators.txt", "wt") as fp:
for (v,l,u) in triples:
l_space = min(spaces[l])
u_space = min(spaces[u])
for l_width in widths[l]:
for u_width in widths[u]:
fp.write( generateVia( tech, v, l, u, l_width, u_width, l_space, u_space))
with open( "arch.txt", "wt") as fp:
fp.write( """Option name=gr_region_width_in_poly_pitches value={0}
Option name=gr_region_height_in_diff_pitches value={1}
""".format( tech['halfXGRGrid']*2, tech['halfYGRGrid']*2))
def emitLayer( fp, layer, level, types=None, pgd=None, pitch=None, cLayers=None):
fp.write( "Layer name=%s" % layer)
if pgd is not None:
fp.write( " pgd=%s" % pgd)
fp.write( " level=%d {\n" % level)
if types is not None:
for ty in types:
fp.write( " Type value=%s\n" % ty)
if pitch is not None:
fp.write( " Technology pitch=%d\n" % pitch)
if cLayers is not None:
for ly in cLayers:
fp.write( " ElectricallyConnected layer=%s\n" % ly)
fp.write( "}\n")
with open( "layers.txt", "wt") as fp:
emitLayer( fp, "diffusion", 0, types=["diffusion"], pgd="hor", pitch=tech['pitchDG'])
emitLayer( fp, "wirepoly", 1, types=["wire","poly"], pgd="ver", pitch=tech['pitchPoly'])
def dir( m):
if m in vMetals:
return "ver"
elif m in hMetals:
return "hor"
else:
assert False, m
lCount = 2
for i in range(len(metals)):
m = metals[i]
if i == 0:
emitLayer( fp, m, lCount, types=["wire","metal"], pgd=dir(m), cLayers=vias[i:i+1])
elif i < len(vias):
emitLayer( fp, m, lCount, types=["wire","metal"], pgd=dir(m), cLayers=vias[i-1:i+1])
else:
emitLayer( fp, m, lCount, types=["wire","metal"], pgd=dir(m), cLayers=vias[i-1:i])
lCount += 1
if i < len(vias):
emitLayer( fp, vias[i], lCount, types=["via"], cLayers=metals[i:i+2])
lCount += 1
with open( "design_rules.txt", "wt") as fp:
for m in metals:
minete = str(tech['halfMinETESpaceM'+m[-1]]*2)
fp.write( "Rule name={0}_{1} type={1} value={2} layer={0}\n".format( m, "minete", minete))
fp.write( "\n")
for m in metals:
minlength = str(tech['halfMinETESpaceM'+m[-1]]*2*3)
fp.write( "Rule name={0}_{1} type={1} value={2} layer={0}\n".format( m, "minlength", minlength))
with open( "v2_patterns.txt", "wt") as fp:
pass
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser( description="Generates detailed router collateral")
parser.add_argument( "-tf", "--technology_file", type=str, default="Process.json")
args = parser.parse_args()
with open( args.technology_file, "rt") as fp:
tech = json.load( fp)
METHOD_NAME( tech)
|
4,724 | recover | import sys
import typing as t
from rich.tree import Tree
from rich.panel import Panel
from rich.pretty import Pretty
from starwhale.utils import console, pretty_bytes
from starwhale.consts import CREATED_AT_KEY, DEFAULT_PAGE_IDX, DEFAULT_PAGE_SIZE
from starwhale.base.view import BaseTermView
from starwhale.base.uri.project import Project as ProjectURI
from .model import Project, ProjectObjType
class ProjectTermView(BaseTermView):
def __init__(self, project_uri: str = "") -> None:
super().__init__()
self.raw_uri = project_uri
self.uri = ProjectURI(project_uri)
self.project = Project.get_project(self.uri)
@BaseTermView._simple_action_print
def create(self) -> t.Tuple[bool, str]:
return self.project.create()
@classmethod
def list(
cls,
instance_uri: str = "",
page: int = DEFAULT_PAGE_IDX,
size: int = DEFAULT_PAGE_SIZE,
) -> t.Tuple[t.List[t.Any], t.Dict[str, t.Any]]:
projects, pager = Project.list(instance_uri, page, size)
_current_project = ProjectURI(instance_uri)
result = list()
for _p in projects:
_name = _p["name"]
_owner = _p.get("owner", "")
_is_current = _name == _current_project.name
result.append(
{
"in_use": _is_current,
"name": _name,
"location": _p.get("location", ""),
"owner": _owner,
CREATED_AT_KEY: _p[CREATED_AT_KEY],
}
)
return result, pager
def select(self) -> None:
try:
self.select_current_default(
instance=self.uri.instance.alias,
project=self.uri.name,
)
except Exception as e:
console.print(
f":broken_heart: failed to select {self.raw_uri}, reason: {e}"
)
sys.exit(1)
else:
console.print(
f":clap: select instance:{self.current_instance}, project:{self.current_project} successfully"
)
def remove(self, force: bool = False) -> None:
ok, reason = self.project.remove(force)
if ok:
console.print(
f":dog: remove project {self.project.name}, you can recover it, don't panic."
)
else:
console.print(
f":fearful: failed to remove project {self.project.name}, reason: {reason}"
)
sys.exit(1)
def METHOD_NAME(self) -> None:
ok, reason = self.project.METHOD_NAME()
if ok:
console.print(f":clap: recover project {self.project.name}")
else:
console.print(
f":fearful: failed to recover project {self.project.name}, reason: {reason}"
)
sys.exit(1)
def info(self, fullname: bool = False) -> None:
_r = self.project.info()
_models = _r.pop("models", [])
_datasets = _r.pop("datasets", [])
def _show_objects(objects: t.List[t.Dict[str, t.Any]], typ: str) -> Tree:
tree = Tree(f"[red]{typ}[/]")
for _o in objects:
otree = tree.add(f"{_o['name']}")
for _v in _o["latest_versions"]:
_k = "name" if fullname else "short_name"
if typ == ProjectObjType.MODEL:
# TODO: add model version for every version
_size = _o["files"][0]["size"]
else:
_size = pretty_bytes(_v["meta"]["blobs_byte_size"])
otree.add(
f"[{_v['id']}][green]{_v[_k]}[/] :timer_clock: {_v['created_at']} :dizzy:{_size}"
)
return tree
console.print(Panel(Pretty(_r), title="Project Details", title_align="left"))
if _models or _datasets:
_block = self.comparison(
_show_objects(_models, ProjectObjType.MODEL),
_show_objects(_datasets, ProjectObjType.DATASET),
)
console.print(_block)
# TODO: add ProjectHTTPView for http request
class ProjectTermViewRich(ProjectTermView):
@classmethod
@BaseTermView._pager # type: ignore
def list(
cls,
instance_uri: str = "",
page: int = DEFAULT_PAGE_IDX,
size: int = DEFAULT_PAGE_SIZE,
) -> t.Tuple[t.List[t.Any], t.Dict[str, t.Any]]:
projects, pager = super().list(instance_uri, page, size)
title = "Project List"
custom_column: t.Dict[str, t.Callable[[t.Any], str]] = {
"in_use": lambda x: ":backhand_index_pointing_right:" if x else "",
"location": cls.place_holder_for_empty(),
"owner": cls.place_holder_for_empty(),
}
custom_row: t.Callable[[t.Dict[str, t.Any]], t.Optional[t.Dict[str, str]]] = (
lambda row: {"style": "magenta"} if row["in_use"] else None
)
cls.print_table(
title, projects, custom_column=custom_column, custom_row=custom_row
)
return projects, pager
@BaseTermView._header # type: ignore
def info(self, fullname: bool = False) -> None:
_r = self.project.info()
_models = _r.pop("models", [])
_datasets = _r.pop("datasets", [])
def _show_objects(objects: t.List[t.Dict[str, t.Any]], typ: str) -> Tree:
tree = Tree(f"[red]{typ}[/]")
for _o in objects:
otree = tree.add(f"{_o['name']}")
for _v in _o["latest_versions"]:
_k = "name" if fullname else "short_name"
if typ == ProjectObjType.MODEL:
# TODO: add model version for every version
_size = _o["files"][0]["size"]
else:
_size = pretty_bytes(_v["meta"]["blobs_byte_size"])
otree.add(
f"[{_v['id']}][green]{_v[_k]}[/] :timer_clock: {_v['created_at']} :dizzy:{_size}"
)
return tree
console.print(Panel(Pretty(_r), title="Project Details", title_align="left"))
if _models or _datasets:
_block = self.comparison(
_show_objects(_models, ProjectObjType.MODEL),
_show_objects(_datasets, ProjectObjType.DATASET),
)
console.print(_block)
class ProjectTermViewJson(ProjectTermView):
@classmethod
def list( # type: ignore
cls,
instance_uri: str = "",
page: int = DEFAULT_PAGE_IDX,
size: int = DEFAULT_PAGE_SIZE,
) -> None:
projects, pager = super().list(instance_uri, page, size)
cls.pretty_json(projects)
def info(self, fullname: bool = False) -> None:
_r = self.project.info()
self.pretty_json(_r)
def get_term_view(ctx_obj: t.Dict) -> t.Type[ProjectTermView]:
return (
ProjectTermViewJson if ctx_obj.get("output") == "json" else ProjectTermViewRich
) |
4,725 | exceeds cache size limit | import logging
import types
import weakref
from dataclasses import dataclass
from . import config
log = logging.getLogger(__name__)
"""
[Note on cache size limit]
Background - TorchDynamo cache is a linked list. Each cache entry is a
(check_fn, out_code, next pointer). These are stored on the f_code's co_extra
scratch space. When a frame is invoked, we walk this linked list and run
check_fn in each cache_entry to decide if the frame needs recompilation. If none
of the check_fn's returns True, we recompile and add a new entry. To ensure we
don't end up recompiling infinitely, we put limits on the cache size.
There are two limits
1) cache_size_limit
2) accumulated_cache_size_limit
Earlier we used to have only limit - maximum number of entries in 1 cache line
(which is now represented by (2) above). So, why do we need two limits? Lets try
to understand that.
In general, we want our cache limit value to be a small number (e.g. 8 or even
lower). This ensures that for frames that cause too many recompilation fall to
eager quickly. However, there is another problem that prevents us from lowering
the value of cache_size_limit. This is due to ID_MATCH'd guards. Today, we put
ID_MATCH guards on nn module if there is a graph break. This means we will have
many recompilations for the same code object because the ID_MATCH guard fails
for different instances of the nn module. This is a common pattern in how models
are authored. Therefore, this requires us to keep the cache_size_limit high.
We resolve this by introducing these two limits. The first limit (1) limits the
number of cache entries that have an ID_MATCH'd guard for an nn module instance.
And, (2)nd limit becomes a safeguard mechanism to have a maximum compilations
for a code object. One important question is - what is the limit for the code
object that does not have any ID_MATCH guard? For such code objects, we choose
(1) as the cache size limit.
Lets take an example to understand how these limits help. Suppose, we have 16
instances of a nn module and we ID_MATCH on the self object. Further, suppose
the inputs to these functions have varying batch size, leading to one
recompilation. In total, there will be 32 recompilations, and therefore 32 cache
entries on the forward code object. In the older case when we had only 1 limit,
our cache size limit must be >= 32 to capture all these recompilations. Now,
suppose there is a separate function in the same program which is very dynamic
and unsuitable for compilation. Such a function will need to undergo 32
compilations to burst the cache and fallback to eager. These 32 recompilations
are too many and we want to fallback for these compilation-unfriendly functions
sooner.
In the new scenario, we can have (1) cache_size_limit = 2, (2)
accumulated_cache_size_limit = 32. This means that each ID_MATCH'd object can
have maximum of two cache entries, and the maximum number of cache entries
(irrespective of ID_MATCH obj) is 32. This covers the case of forward code
object which has 32 recompilations. For the other function, the one unsuitable
for recompilation, our limit is 2. So, we will burst the cache in just 2
recompilations. In this manner, these 2 limits help us resolve the tension
mentioned earlier.
"""
@dataclass
class CacheSizeRelevantForFrame:
"""
We track the number of cache entries that have same id_match objects as the
given frame.
TODO(janimesh) - Consider adding a map from tuple_of_match_ids to count -
https://github.com/pytorch/pytorch/pull/107496#discussion_r1304564682 - this
could be useful for debugging as well.
"""
# Total number of CacheEntry objects in the Dynamo linked list
num_cache_entries: int = 0
# Number of CacheEntry objects having same ID_MATCH'd objects as given frame.
num_cache_entries_with_same_id_matched_objs: int = 0
def will_compilation_exceed(self, limit: int) -> bool:
# Checks if a compilation will exceed the given limit (thats why >=).
return (
self.num_cache_entries >= config.accumulated_cache_size_limit
or self.num_cache_entries_with_same_id_matched_objs >= limit
)
def _get_weakref_from_f_locals(frame: types.FrameType, local_name: str):
obj = frame.f_locals.get(local_name, None)
weak_id = None
try:
weak_id = weakref.ref(obj)
except TypeError:
pass # cannot weakref bool object
return weak_id
def _has_same_id_matched_objs(frame: types.FrameType, cache_entry) -> bool:
"""
Checks if the ID_MATCH'd objects saved on cache_entry are same as the ones
in frame.f_locals.
"""
if not cache_entry:
return False
for (
local_name,
weakref_from_cache_entry,
) in cache_entry.check_fn.id_matched_objs.items():
if weakref_from_cache_entry() is not None:
weakref_from_frame = _get_weakref_from_f_locals(frame, local_name)
if weakref_from_frame != weakref_from_cache_entry:
return False
# Also covers the case where no ID_MATCH objects are saved in frame.f_locals
return True
def compute_cache_size(
frame: types.FrameType, cache_entry
) -> CacheSizeRelevantForFrame:
# Walk the linked list to calculate the cache size
num_cache_entries = 0
num_cache_entries_with_same_id_matched_objs = 0
while cache_entry:
num_cache_entries += 1
# Track the number of cache entries having same ID_MATCH'd objects as
# that of frame.f_locals. This will be used later to compare against the
# cache_size_limit.
if _has_same_id_matched_objs(frame, cache_entry):
num_cache_entries_with_same_id_matched_objs += 1
cache_entry = cache_entry.next
return CacheSizeRelevantForFrame(
num_cache_entries, num_cache_entries_with_same_id_matched_objs
)
def is_recompilation(cache_size: CacheSizeRelevantForFrame) -> bool:
"""
If the frame (earlier parsed by compute_cache_size) has more than 1 cache
entry with same ID_MATCH'd objects, then its a recompilation.
"""
# Note that you can have multiple entries in the cache but still not a
# recompile, e.g., you can have 64 nn module instances, each one having an
# ID_MATCH guard, and each one having just 1 cache entry in the cache. In
# this case, we can have 64 entries in the cache, but no recompilation
# because there is only one entry for each id_matched_obj.
return cache_size.will_compilation_exceed(1)
def METHOD_NAME(cache_size: CacheSizeRelevantForFrame) -> bool:
"""
Checks if we are exceeding the cache size limit.
"""
return cache_size.will_compilation_exceed(config.cache_size_limit) |
4,726 | init stubs | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-self-use
"""
To run these tests:
$ pip install webtest nosegae
$ nosetests --with-gae --gae-lib-root ~/google_appengine/
"""
import json
import unittest
import webtest
from google.appengine.ext import deferred
from google.appengine.ext import testbed
import handlers
import main
import models
import secrets
app = webtest.TestApp(main.app)
class TestBase(unittest.TestCase):
def METHOD_NAME(self):
self.testbed.init_memcache_stub()
self.testbed.init_app_identity_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_datastore_v3_stub()
self.testbed.init_taskqueue_stub()
class AppTest(TestBase):
def setUp(self):
self.METHOD_NAME()
self.taskqueue = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
secrets.put('github_webhook_secret', 'some_secret', per_host=False)
def get_response(self, event, body):
if isinstance(body, dict):
body = json.dumps(body)
signature = handlers.make_signature(body)
resp = app.post('/webhook', body,
{'X-Github-Event': event,
'X-Hub-Signature': signature})
for task in self.taskqueue.get_filtered_tasks():
deferred.run(task.payload)
return resp
def test_webhook(self):
self.get_response('test', {'action': 'blah'})
hooks = list(models.GithubWebhookRaw.query())
self.assertEqual(len(hooks), 1)
self.assertIsNotNone(hooks[0].timestamp)
def test_webhook_bad_sig(self):
body = json.dumps({'action': 'blah'})
signature = handlers.make_signature(body + 'foo')
app.post('/webhook', body,
{'X-Github-Event': 'test',
'X-Hub-Signature': signature}, status=400)
def test_webhook_missing_sig(self):
app.post('/webhook', '{}',
{'X-Github-Event': 'test'}, status=400)
def test_webhook_unicode(self):
self.get_response('test', {'action': u'blah\u03BA'})
def test_webhook_status(self):
args = {
'name': 'owner/repo',
'sha': '1234',
'context': 'ci',
'state': 'success',
'target_url': 'http://example.com',
'description': 'passed the tests!',
'created_at': '2016-07-07T01:58:09Z',
'updated_at': '2016-07-07T02:03:12Z',
}
self.get_response('status', args)
statuses = list(models.GHStatus.query_for_sha('owner/repo', '1234'))
self.assertEqual(len(statuses), 1)
status = statuses[0]
args['repo'] = args.pop('name')
for key, value in args.iteritems():
status_val = getattr(status, key)
try:
status_val = status_val.strftime('%Y-%m-%dT%H:%M:%SZ')
except AttributeError:
pass
assert status_val == value, '%r != %r' % (getattr(status, key), value)
PR_EVENT_BODY = {
'repository': {'full_name': 'test/test'},
'pull_request': {
'number': 123,
'head': {'sha': 'cafe'},
'updated_at': '2016-07-07T02:03:12+00:00',
'state': 'open',
'user': {'login': 'rmmh'},
'assignees': [{'login': 'spxtr'}],
'title': 'test pr',
},
'action': 'opened',
}
def test_webhook_pr_open(self):
body = json.dumps(self.PR_EVENT_BODY)
self.get_response('pull_request', body)
digest = models.GHIssueDigest.get('test/test', 123)
self.assertTrue(digest.is_pr)
self.assertTrue(digest.is_open)
self.assertEqual(digest.involved, ['rmmh', 'spxtr'])
self.assertEqual(digest.payload['title'], 'test pr')
self.assertEqual(digest.payload['needs_rebase'], False)
def test_webhook_pr_open_and_status(self):
self.get_response('pull_request', self.PR_EVENT_BODY)
self.get_response('status', {
'repository': self.PR_EVENT_BODY['repository'],
'name': self.PR_EVENT_BODY['repository']['full_name'],
'sha': self.PR_EVENT_BODY['pull_request']['head']['sha'],
'context': 'test-ci',
'state': 'success',
'target_url': 'example.com',
'description': 'woop!',
'created_at': '2016-07-07T01:58:09Z',
'updated_at': '2016-07-07T02:03:15Z',
})
digest = models.GHIssueDigest.get('test/test', 123)
self.assertEqual(digest.payload['status'],
{'test-ci': ['success', 'example.com', 'woop!']}) |
4,727 | convert nest | """
Convert weights from https://github.com/google-research/nested-transformer
NOTE: You'll need https://github.com/google/CommonLoopUtils, not included in requirements.txt
"""
import sys
import numpy as np
import torch
from clu import checkpoint
arch_depths = {
'nest_base': [2, 2, 20],
'nest_small': [2, 2, 20],
'nest_tiny': [2, 2, 8],
}
def METHOD_NAME(checkpoint_path, arch):
"""
Expects path to checkpoint which is a dir containing 4 files like in each of these folders
- https://console.cloud.google.com/storage/browser/gresearch/nest-checkpoints
`arch` is needed to
Returns a state dict that can be used with `torch.nn.Module.load_state_dict`
Hint: Follow timm.models.nest.Nest.__init__ and
https://github.com/google-research/nested-transformer/blob/main/models/nest_net.py
"""
assert arch in ['nest_base', 'nest_small', 'nest_tiny'], "Your `arch` is not supported"
flax_dict = checkpoint.load_state_dict(checkpoint_path)['optimizer']['target']
state_dict = {}
# Patch embedding
state_dict['patch_embed.proj.weight'] = torch.tensor(
flax_dict['PatchEmbedding_0']['Conv_0']['kernel']).permute(3, 2, 0, 1)
state_dict['patch_embed.proj.bias'] = torch.tensor(flax_dict['PatchEmbedding_0']['Conv_0']['bias'])
# Positional embeddings
posemb_keys = [k for k in flax_dict.keys() if k.startswith('PositionEmbedding')]
for i, k in enumerate(posemb_keys):
state_dict[f'levels.{i}.pos_embed'] = torch.tensor(flax_dict[k]['pos_embedding'])
# Transformer encoders
depths = arch_depths[arch]
for level in range(len(depths)):
for layer in range(depths[level]):
global_layer_ix = sum(depths[:level]) + layer
# Norms
for i in range(2):
state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i+1}.weight'] = torch.tensor(
flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['scale'])
state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i+1}.bias'] = torch.tensor(
flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['bias'])
# Attention qkv
w_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['kernel']
w_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['kernel']
# Pay attention to dims here (maybe get pen and paper)
w_kv = np.concatenate(np.split(w_kv, 2, -1), 1)
w_qkv = np.concatenate([w_q, w_kv], 1)
state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.weight'] = torch.tensor(w_qkv).flatten(1).permute(1,0)
b_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['bias']
b_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['bias']
# Pay attention to dims here (maybe get pen and paper)
b_kv = np.concatenate(np.split(b_kv, 2, -1), 0)
b_qkv = np.concatenate([b_q, b_kv], 0)
state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.bias'] = torch.tensor(b_qkv).reshape(-1)
# Attention proj
w_proj = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['proj_kernel']
w_proj = torch.tensor(w_proj).permute(2, 1, 0).flatten(1)
state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.weight'] = w_proj
state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.bias'] = torch.tensor(
flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['bias'])
# MLP
for i in range(2):
state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i+1}.weight'] = torch.tensor(
flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['kernel']).permute(1, 0)
state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i+1}.bias'] = torch.tensor(
flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['bias'])
# Block aggregations (ConvPool)
for level in range(1, len(depths)):
# Convs
state_dict[f'levels.{level}.pool.conv.weight'] = torch.tensor(
flax_dict[f'ConvPool_{level-1}']['Conv_0']['kernel']).permute(3, 2, 0, 1)
state_dict[f'levels.{level}.pool.conv.bias'] = torch.tensor(
flax_dict[f'ConvPool_{level-1}']['Conv_0']['bias'])
# Norms
state_dict[f'levels.{level}.pool.norm.weight'] = torch.tensor(
flax_dict[f'ConvPool_{level-1}']['LayerNorm_0']['scale'])
state_dict[f'levels.{level}.pool.norm.bias'] = torch.tensor(
flax_dict[f'ConvPool_{level-1}']['LayerNorm_0']['bias'])
# Final norm
state_dict[f'norm.weight'] = torch.tensor(flax_dict['LayerNorm_0']['scale'])
state_dict[f'norm.bias'] = torch.tensor(flax_dict['LayerNorm_0']['bias'])
# Classifier
state_dict['head.weight'] = torch.tensor(flax_dict['Dense_0']['kernel']).permute(1, 0)
state_dict['head.bias'] = torch.tensor(flax_dict['Dense_0']['bias'])
return state_dict
if __name__ == '__main__':
variant = sys.argv[1] # base, small, or tiny
state_dict = METHOD_NAME(f'./nest-{variant[0]}_imagenet', f'nest_{variant}')
torch.save(state_dict, f'./jx_nest_{variant}.pth' |
4,728 | test chemception classification | import os
import numpy as np
import tempfile
import pytest
from flaky import flaky
import deepchem as dc
from deepchem.feat import create_char_to_idx, SmilesToSeq, SmilesToImage
from deepchem.molnet.load_function.chembl25_datasets import CHEMBL25_TASKS
try:
from deepchem.models import Smiles2Vec, ChemCeption
has_tensorflow = True
except:
has_tensorflow = False
@pytest.mark.tensorflow
def get_dataset(mode="classification",
featurizer="smiles2seq",
max_seq_len=20,
data_points=10,
n_tasks=5):
dataset_file = os.path.join(os.path.dirname(__file__), "assets",
"chembl_25_small.csv")
if featurizer == "smiles2seq":
max_len = 250
pad_len = 10
char_to_idx = create_char_to_idx(dataset_file,
max_len=max_len,
smiles_field="smiles")
feat = SmilesToSeq(char_to_idx=char_to_idx,
max_len=max_len,
pad_len=pad_len)
elif featurizer == "smiles2img":
img_size = 80
img_spec = "engd"
res = 0.5
feat = SmilesToImage(img_size=img_size, img_spec=img_spec, res=res)
loader = dc.data.CSVLoader(tasks=CHEMBL25_TASKS,
smiles_field='smiles',
featurizer=feat)
dataset = loader.create_dataset(inputs=[dataset_file],
shard_size=10000,
data_dir=tempfile.mkdtemp())
w = np.ones(shape=(data_points, n_tasks))
if mode == 'classification':
y = np.random.randint(0, 2, size=(data_points, n_tasks))
metric = dc.metrics.Metric(dc.metrics.roc_auc_score,
np.mean,
mode="classification")
else:
y = np.random.normal(size=(data_points, n_tasks))
metric = dc.metrics.Metric(dc.metrics.mean_absolute_error,
mode="regression")
if featurizer == "smiles2seq":
dataset = dc.data.NumpyDataset(dataset.X[:data_points, :max_seq_len], y,
w, dataset.ids[:data_points])
else:
dataset = dc.data.NumpyDataset(dataset.X[:data_points], y, w,
dataset.ids[:data_points])
if featurizer == "smiles2seq":
return dataset, metric, char_to_idx
else:
return dataset, metric
@pytest.mark.slow
@pytest.mark.tensorflow
def test_chemception_regression():
n_tasks = 5
dataset, metric = get_dataset(mode="regression",
featurizer="smiles2img",
n_tasks=n_tasks)
model = ChemCeption(n_tasks=n_tasks,
img_spec="engd",
model_dir=None,
mode="regression")
model.fit(dataset, nb_epoch=300)
scores = model.evaluate(dataset, [metric], [])
assert scores['mean_absolute_error'] < 0.1
@pytest.mark.slow
@pytest.mark.tensorflow
def METHOD_NAME():
n_tasks = 5
dataset, metric = get_dataset(mode="classification",
featurizer="smiles2img",
n_tasks=n_tasks)
model = ChemCeption(n_tasks=n_tasks,
img_spec="engd",
model_dir=None,
mode="classification")
model.fit(dataset, nb_epoch=300)
scores = model.evaluate(dataset, [metric], [])
assert scores['mean-roc_auc_score'] >= 0.9
@pytest.mark.slow
@pytest.mark.tensorflow
def test_smiles_to_vec_regression():
n_tasks = 5
max_seq_len = 20
dataset, metric, char_to_idx = get_dataset(mode="regression",
featurizer="smiles2seq",
n_tasks=n_tasks,
max_seq_len=max_seq_len)
model = Smiles2Vec(char_to_idx=char_to_idx,
max_seq_len=max_seq_len,
use_conv=True,
n_tasks=n_tasks,
model_dir=None,
mode="regression")
model.fit(dataset, nb_epoch=500)
scores = model.evaluate(dataset, [metric], [])
assert scores['mean_absolute_error'] < 0.1
@pytest.mark.slow
@pytest.mark.tensorflow
def test_smiles_to_vec_classification():
n_tasks = 5
max_seq_len = 20
dataset, metric, char_to_idx, = get_dataset(mode="classification",
featurizer="smiles2seq",
n_tasks=n_tasks,
max_seq_len=max_seq_len)
model = Smiles2Vec(char_to_idx=char_to_idx,
max_seq_len=max_seq_len,
use_conv=True,
n_tasks=n_tasks,
model_dir=None,
mode="classification")
model.fit(dataset, nb_epoch=500)
scores = model.evaluate(dataset, [metric], [])
assert scores['mean-roc_auc_score'] >= 0.9
@flaky
@pytest.mark.slow
@pytest.mark.tensorflow
def test_chemception_fit_with_augmentation():
n_tasks = 5
dataset, metric = get_dataset(mode="classification",
featurizer="smiles2img",
n_tasks=n_tasks)
model = ChemCeption(n_tasks=n_tasks,
img_spec="engd",
model_dir=None,
augment=True,
mode="classification")
model.fit(dataset, nb_epoch=300)
scores = model.evaluate(dataset, [metric], [])
assert scores['mean-roc_auc_score'] >= 0.9 |
4,729 | test leeway global today | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of OpenDrift.
#
# OpenDrift is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2
#
# OpenDrift is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenDrift. If not, see <https://www.gnu.org/licenses/>.
#
# Copyright 2015, Knut-Frode Dagestad, MET Norway
import unittest
import os
from datetime import datetime, timedelta
import numpy as np
from opendrift.models.oceandrift import OceanDrift
from opendrift.models.leeway import Leeway
from opendrift.models.openoil import OpenOil
from opendrift.readers import reader_netCDF_CF_generic
from opendrift.readers import reader_ROMS_native
from opendrift.readers import reader_constant
from opendrift.readers import reader_from_url
class TestWPS(unittest.TestCase):
"""Tests for wps simulations"""
def test_leeway_today(self):
o = Leeway(loglevel=0)
o.add_readers_from_file(o.test_data_folder() +
'../../opendrift/scripts/data_sources.txt')
o.seed_elements(lon=14, lat=67.85, number=100, radius=1000,
time=datetime.now())
o.run(steps=15)
print (o)
self.assertEqual(o.steps_calculation, 15)
def test_leeway_yesterday(self):
o = Leeway(loglevel=0)
o.add_readers_from_file(o.test_data_folder() +
'../../opendrift/scripts/data_sources.txt')
o.seed_elements(lon=14, lat=67.85, number=100, radius=1000,
time=datetime.now() - timedelta(days=1))
o.run(steps=15)
o.export_ascii('leeway_ascii.txt')
os.remove('leeway_ascii.txt')
print (o)
self.assertEqual(o.steps_calculation, 15)
def METHOD_NAME(self):
o = Leeway(loglevel=0)
o.add_readers_from_file(o.test_data_folder() +
'../../opendrift/scripts/data_sources.txt')
o.seed_elements(lon=50, lat=29, number=100, radius=1000,
time=datetime.now())
o.run(steps=15)
print (o)
self.assertEqual(o.steps_calculation, 15)
def test_leeway_global_one_month_ago(self):
o = Leeway(loglevel=0)
o.add_readers_from_file(o.test_data_folder() +
'../../opendrift/scripts/data_sources.txt')
o.seed_elements(lon=50, lat=29, number=100, radius=1000,
time=datetime.now() - timedelta(days=30))
o.run(steps=15)
o.export_ascii('leeway_ascii.txt')
os.remove('leeway_ascii.txt')
print (o)
self.assertEqual(o.steps_calculation, 15)
def test_openoil_today(self):
o = OpenOil(loglevel=0)
o.add_readers_from_file(o.test_data_folder() +
'../../opendrift/scripts/data_sources.txt')
o.seed_elements(lon=14, lat=67.85, number=100, radius=1000,
time=datetime.now())
o.run(steps=15)
print (o)
self.assertEqual(o.steps_calculation, 15)
def test_openoil_yesterday(self):
o = OpenOil(loglevel=0)
o.add_readers_from_file(o.test_data_folder() +
'../../opendrift/scripts/data_sources.txt')
o.seed_elements(lon=14, lat=67.85, number=100, radius=1000,
time=datetime.now() - timedelta(days=1))
o.run(steps=15)
print (o)
self.assertEqual(o.steps_calculation, 15)
def test_openoil_global_today(self):
o = OpenOil(loglevel=0)
o.add_readers_from_file(o.test_data_folder() +
'../../opendrift/scripts/data_sources.txt')
o.seed_elements(lon=50, lat=29, number=100, radius=1000,
time=datetime.now())
o.run(steps=15)
print (o)
self.assertEqual(o.steps_calculation, 15)
def test_openoil_global_one_month_ago(self):
o = OpenOil(loglevel=0)
o.add_readers_from_file(o.test_data_folder() +
'../../opendrift/scripts/data_sources.txt')
o.seed_elements(lon=50, lat=29, number=100, radius=1000,
time=datetime.now() - timedelta(days=30))
o.run(steps=15)
print (o)
self.assertEqual(o.steps_calculation, 15)
#def test_oildrift_backwards(self):
# o = OpenOil(loglevel=20)
# reader_constant_wind = \
# reader_constant.Reader({'x_wind':5, 'y_wind': 6})
# o.add_reader(reader_constant_wind)
# o.add_readers_from_list(reader_list, lazy=True)
# self.assertEqual(len(o._lazy_readers()), 4)
# o.seed_elements(lon=14, lat=67.85,
# time=datetime(2016, 2, 2, 12))
# o.set_config()
# o.run(steps=5)
# self.assertEqual(len(o._lazy_readers()), 2)
# self.assertEqual(len(o.discarded_readers), 1)
#def test_lazy_reader_oildrift_real(self):
# o = OpenOil(loglevel=0)
# o.add_readers_from_file(o.test_data_folder() +
# '../../opendrift/scripts/data_sources.txt')
# o.seed_elements(lon=4, lat=60.0,
# time=datetime(2018, 7, 2, 12))
# o.run(steps=5)
# print (o)
if __name__ == '__main__':
unittest.main() |
4,730 | tie average dcg | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_retrieval_functional_inputs
def METHOD_NAME(target: Tensor, preds: Tensor, discount_cumsum: Tensor) -> Tensor:
"""Translated version of sklearns `_tie_average_dcg` function.
Args:
target: ground truth about each document relevance.
preds: estimated probabilities of each document to be relevant.
discount_cumsum: cumulative sum of the discount.
Returns:
The cumulative gain of the tied elements.
"""
_, inv, counts = torch.unique(-preds, return_inverse=True, return_counts=True)
ranked = torch.zeros_like(counts, dtype=torch.float32)
ranked.scatter_add_(0, inv, target.to(dtype=ranked.dtype))
ranked = ranked / counts
groups = counts.cumsum(dim=0) - 1
discount_sums = torch.zeros_like(counts, dtype=torch.float32)
discount_sums[0] = discount_cumsum[groups[0]]
discount_sums[1:] = discount_cumsum[groups].diff()
return (ranked * discount_sums).sum()
def _dcg_sample_scores(target: Tensor, preds: Tensor, top_k: int, ignore_ties: bool) -> Tensor:
"""Translated version of sklearns `_dcg_sample_scores` function.
Args:
target: ground truth about each document relevance.
preds: estimated probabilities of each document to be relevant.
top_k: consider only the top k elements
ignore_ties: If True, ties are ignored. If False, ties are averaged.
Returns:
The cumulative gain
"""
discount = 1.0 / (torch.log2(torch.arange(target.shape[-1], device=target.device) + 2.0))
discount[top_k:] = 0.0
if ignore_ties:
ranking = preds.argsort(descending=True)
ranked = target[ranking]
cumulative_gain = (discount * ranked).sum()
else:
discount_cumsum = discount.cumsum(dim=-1)
cumulative_gain = METHOD_NAME(target, preds, discount_cumsum)
return cumulative_gain
def retrieval_normalized_dcg(preds: Tensor, target: Tensor, top_k: Optional[int] = None) -> Tensor:
"""Compute `Normalized Discounted Cumulative Gain`_ (for information retrieval).
``preds`` and ``target`` should be of the same shape and live on the same device.
``target`` must be either `bool` or `integers` and ``preds`` must be ``float``,
otherwise an error is raised.
Args:
preds: estimated probabilities of each document to be relevant.
target: ground truth about each document relevance.
top_k: consider only the top k elements (default: ``None``, which considers them all)
Return:
A single-value tensor with the nDCG of the predictions ``preds`` w.r.t. the labels ``target``.
Raises:
ValueError:
If ``top_k`` parameter is not `None` or an integer larger than 0
Example:
>>> from torchmetrics.functional.retrieval import retrieval_normalized_dcg
>>> preds = torch.tensor([.1, .2, .3, 4, 70])
>>> target = torch.tensor([10, 0, 0, 1, 5])
>>> retrieval_normalized_dcg(preds, target)
tensor(0.6957)
"""
preds, target = _check_retrieval_functional_inputs(preds, target, allow_non_binary_target=True)
top_k = preds.shape[-1] if top_k is None else top_k
if not (isinstance(top_k, int) and top_k > 0):
raise ValueError("`top_k` has to be a positive integer or None")
gain = _dcg_sample_scores(target, preds, top_k, ignore_ties=False)
normalized_gain = _dcg_sample_scores(target, target, top_k, ignore_ties=True)
# filter undefined scores
all_irrelevant = normalized_gain == 0
gain[all_irrelevant] = 0
gain[~all_irrelevant] /= normalized_gain[~all_irrelevant]
return gain.mean() |
4,731 | test subtitle small numbers a4 | from django.http import QueryDict
from django.test import tag
from resources.generators.SortingNetworkCardsResourceGenerator import SortingNetworkCardsResourceGenerator
from tests.resources.generators.utils import BaseGeneratorTest
@tag("resource")
class SortingNetworkCardsResourceGeneratorTest(BaseGeneratorTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.language = "en"
self.base_valid_query = QueryDict("type=letters&paper_size=a4")
def test_type_values(self):
generator = SortingNetworkCardsResourceGenerator(self.base_valid_query)
self.run_parameter_smoke_tests(generator, "type")
def test_subtitle_letters_a4(self):
query = QueryDict("type=letters&paper_size=a4")
generator = SortingNetworkCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Letters - a4"
)
def test_subtitle_letters_letter(self):
query = QueryDict("type=letters&paper_size=letter")
generator = SortingNetworkCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Letters - letter"
)
def test_subtitle_words_a4(self):
query = QueryDict("type=words&paper_size=a4")
generator = SortingNetworkCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Words - a4"
)
def test_subtitle_words_letter(self):
query = QueryDict("type=words&paper_size=letter")
generator = SortingNetworkCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Words - letter"
)
def METHOD_NAME(self):
query = QueryDict("type=small_numbers&paper_size=a4")
generator = SortingNetworkCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Small numbers (1 to 10) - a4"
)
def test_subtitle_small_numbers_letter(self):
query = QueryDict("type=small_numbers&paper_size=letter")
generator = SortingNetworkCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Small numbers (1 to 10) - letter"
)
def test_subtitle_large_numbers_a4(self):
query = QueryDict("type=large_numbers&paper_size=a4")
generator = SortingNetworkCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Large numbers (7 digit numbers) - a4"
)
def test_subtitle_large_numbers_letter(self):
query = QueryDict("type=large_numbers&paper_size=letter")
generator = SortingNetworkCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Large numbers (7 digit numbers) - letter"
)
def test_subtitle_fractions_a4(self):
query = QueryDict("type=fractions&paper_size=a4")
generator = SortingNetworkCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Fractions - a4"
)
def test_subtitle_fractions_letter(self):
query = QueryDict("type=fractions&paper_size=letter")
generator = SortingNetworkCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Fractions - letter"
)
def test_subtitle_butterfly_a4(self):
query = QueryDict("type=butterfly&paper_size=a4")
generator = SortingNetworkCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Butterfly life cycle - a4"
)
def test_subtitle_butterfly_letter(self):
query = QueryDict("type=butterfly&paper_size=letter")
generator = SortingNetworkCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Butterfly life cycle - letter"
)
def test_subtitle_riding_hood_a4(self):
query = QueryDict("type=riding_hood&paper_size=a4")
generator = SortingNetworkCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Little Red Riding Hood - a4"
)
def test_subtitle_riding_hood_letter(self):
query = QueryDict("type=riding_hood&paper_size=letter")
generator = SortingNetworkCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Little Red Riding Hood - letter"
) |
4,732 | test post phase project archived | import pytest
import rules
from adhocracy4.projects.enums import Access
from adhocracy4.test.helpers import freeze_phase
from adhocracy4.test.helpers import freeze_post_phase
from adhocracy4.test.helpers import freeze_pre_phase
from adhocracy4.test.helpers import setup_phase
from adhocracy4.test.helpers import setup_users
from meinberlin.apps.documents import phases
from meinberlin.test.helpers import setup_group_members
perm_name = "meinberlin_documents.add_chapter"
def test_perm_exists():
assert rules.perm_exists(perm_name)
@pytest.mark.django_db
def test_pre_phase(phase_factory, user_factory, group_factory, user):
phase, module, project, _ = setup_phase(phase_factory, None, phases.CommentPhase)
anonymous, moderator, initiator = setup_users(project)
(
project,
group_member_in_org,
group_member_in_pro,
group_member_out,
) = setup_group_members(project, group_factory, user_factory)
assert project.access == Access.PUBLIC
with freeze_pre_phase(phase):
assert not rules.has_perm(perm_name, anonymous, module)
assert not rules.has_perm(perm_name, user, module)
assert not rules.has_perm(perm_name, group_member_in_org, module)
assert not rules.has_perm(perm_name, group_member_out, module)
assert not rules.has_perm(perm_name, moderator, module)
assert rules.has_perm(perm_name, group_member_in_pro, module)
assert rules.has_perm(perm_name, initiator, module)
@pytest.mark.django_db
def test_phase_active(phase_factory, user_factory, group_factory, user):
phase, module, project, _ = setup_phase(phase_factory, None, phases.CommentPhase)
anonymous, moderator, initiator = setup_users(project)
(
project,
group_member_in_org,
group_member_in_pro,
group_member_out,
) = setup_group_members(project, group_factory, user_factory)
assert project.access == Access.PUBLIC
with freeze_phase(phase):
assert not rules.has_perm(perm_name, anonymous, module)
assert not rules.has_perm(perm_name, user, module)
assert not rules.has_perm(perm_name, group_member_in_org, module)
assert not rules.has_perm(perm_name, group_member_out, module)
assert not rules.has_perm(perm_name, moderator, module)
assert rules.has_perm(perm_name, group_member_in_pro, module)
assert rules.has_perm(perm_name, initiator, module)
@pytest.mark.django_db
def test_phase_active_project_private(phase_factory, user_factory, group_factory, user):
phase, module, project, _ = setup_phase(
phase_factory, None, phases.CommentPhase, module__project__access=Access.PRIVATE
)
anonymous, moderator, initiator = setup_users(project)
participant = user_factory()
project.participants.add(participant)
(
project,
group_member_in_org,
group_member_in_pro,
group_member_out,
) = setup_group_members(project, group_factory, user_factory)
assert project.access == Access.PRIVATE
with freeze_phase(phase):
assert not rules.has_perm(perm_name, anonymous, module)
assert not rules.has_perm(perm_name, user, module)
assert not rules.has_perm(perm_name, participant, module)
assert not rules.has_perm(perm_name, group_member_in_org, module)
assert not rules.has_perm(perm_name, group_member_out, module)
assert not rules.has_perm(perm_name, moderator, module)
assert rules.has_perm(perm_name, group_member_in_pro, module)
assert rules.has_perm(perm_name, initiator, module)
@pytest.mark.django_db
def test_phase_active_project_semipublic(
phase_factory, user_factory, group_factory, user
):
phase, module, project, _ = setup_phase(
phase_factory,
None,
phases.CommentPhase,
module__project__access=Access.SEMIPUBLIC,
)
anonymous, moderator, initiator = setup_users(project)
participant = user_factory()
project.participants.add(participant)
(
project,
group_member_in_org,
group_member_in_pro,
group_member_out,
) = setup_group_members(project, group_factory, user_factory)
assert project.access == Access.SEMIPUBLIC
with freeze_phase(phase):
assert not rules.has_perm(perm_name, anonymous, module)
assert not rules.has_perm(perm_name, user, module)
assert not rules.has_perm(perm_name, participant, module)
assert not rules.has_perm(perm_name, group_member_in_org, module)
assert not rules.has_perm(perm_name, group_member_out, module)
assert not rules.has_perm(perm_name, moderator, module)
assert rules.has_perm(perm_name, group_member_in_pro, module)
assert rules.has_perm(perm_name, initiator, module)
@pytest.mark.django_db
def test_phase_active_project_draft(phase_factory, user_factory, group_factory, user):
phase, module, project, _ = setup_phase(
phase_factory, None, phases.CommentPhase, module__project__is_draft=True
)
anonymous, moderator, initiator = setup_users(project)
(
project,
group_member_in_org,
group_member_in_pro,
group_member_out,
) = setup_group_members(project, group_factory, user_factory)
assert project.is_draft
with freeze_phase(phase):
assert not rules.has_perm(perm_name, anonymous, module)
assert not rules.has_perm(perm_name, user, module)
assert not rules.has_perm(perm_name, group_member_in_org, module)
assert not rules.has_perm(perm_name, group_member_out, module)
assert not rules.has_perm(perm_name, moderator, module)
assert rules.has_perm(perm_name, group_member_in_pro, module)
assert rules.has_perm(perm_name, initiator, module)
@pytest.mark.django_db
def METHOD_NAME(phase_factory, user_factory, group_factory, user):
phase, module, project, _ = setup_phase(
phase_factory, None, phases.CommentPhase, module__project__is_archived=True
)
anonymous, moderator, initiator = setup_users(project)
(
project,
group_member_in_org,
group_member_in_pro,
group_member_out,
) = setup_group_members(project, group_factory, user_factory)
assert project.is_archived
with freeze_post_phase(phase):
assert not rules.has_perm(perm_name, anonymous, module)
assert not rules.has_perm(perm_name, user, module)
assert not rules.has_perm(perm_name, group_member_in_org, module)
assert not rules.has_perm(perm_name, group_member_out, module)
assert not rules.has_perm(perm_name, moderator, module)
assert rules.has_perm(perm_name, group_member_in_pro, module)
assert rules.has_perm(perm_name, initiator, module) |
4,733 | remove label at | from PyQt5.QtCore import QAbstractTableModel, Qt, QModelIndex, pyqtSignal
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import qApp
from urh import settings
from urh.signalprocessing.FieldType import FieldType
from urh.signalprocessing.MessageType import MessageType
from urh.signalprocessing.ProtocoLabel import ProtocolLabel
from urh.simulator.SimulatorProtocolLabel import SimulatorProtocolLabel
from urh.util import util
class SimulatorMessageFieldModel(QAbstractTableModel):
header_labels = ['Name', 'Display format', 'Value type', 'Value']
protocol_label_updated = pyqtSignal(SimulatorProtocolLabel)
def __init__(self, controller, parent=None):
super().__init__(parent)
self.controller = controller # type: SimulatorTabController
self.message_type = None # type: MessageType
def update(self):
self.beginResetModel()
self.message_type = self.controller.active_item.message_type
self.endResetModel()
def columnCount(self, parent: QModelIndex=None, *args, **kwargs):
return len(self.header_labels)
def rowCount(self, parent: QModelIndex=None, *args, **kwargs):
return len(self.message_type) if self.message_type is not None else 0
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.DisplayRole and orientation == Qt.Horizontal:
return self.header_labels[section]
elif role == Qt.TextAlignmentRole:
return Qt.AlignLeft
return super().headerData(section, orientation, role)
def data(self, index: QModelIndex, role=Qt.DisplayRole):
i, j = index.row(), index.column()
lbl = self.message_type[i] # type: SimulatorProtocolLabel
if role == Qt.DisplayRole:
if j == 0:
return lbl.name
elif j == 1:
return ProtocolLabel.DISPLAY_FORMATS[lbl.display_format_index]
elif j == 2:
if lbl.is_checksum_label:
return "Checksum"
else:
return lbl.VALUE_TYPES[lbl.value_type_index]
elif j == 3:
if lbl.value_type_index == 0:
message = lbl.parent()
try:
data = message.plain_bits[lbl.start:lbl.end]
except IndexError:
return None
return util.convert_bits_to_string(data, lbl.display_format_index, pad_zeros=True)
elif lbl.value_type_index == 1:
return "-"
elif lbl.value_type_index == 2:
return lbl.formula
elif lbl.value_type_index == 3:
return lbl.external_program
elif lbl.value_type_index == 4:
return "Range (Decimal): " + str(lbl.random_min) + " - " + str(lbl.random_max)
elif role == Qt.EditRole:
if j == 0:
return lbl.name
elif j == 1:
return lbl.display_format_index
elif j == 2:
return lbl.value_type_index
elif j == 3:
if lbl.value_type_index == 2:
return lbl.formula
elif lbl.value_type_index == 3:
return lbl.external_program
elif lbl.value_type_index == 4:
return [lbl.random_min, lbl.random_max]
elif role == Qt.FontRole:
if j == 0:
font = QFont()
font.setItalic(lbl.field_type is None)
return font
elif j == 2 and self.link_index(index):
font = QFont()
font.setUnderline(True)
return font
elif role == Qt.BackgroundColorRole:
if j == 0:
return settings.LABEL_COLORS[lbl.color_index]
elif j == 3:
if (lbl.value_type_index == 2 and
not self.controller.sim_expression_parser.validate_expression(lbl.formula)[0]):
return settings.ERROR_BG_COLOR
elif role == Qt.TextColorRole:
if self.link_index(index):
return qApp.palette().link().color()
def link_index(self, index: QModelIndex):
try:
lbl = self.message_type[index.row()] # type: SimulatorProtocolLabel
if index.column() == 2 and lbl.is_checksum_label:
return True
except:
return False
return False
def setData(self, index: QModelIndex, value, role=None):
if role == Qt.EditRole:
i, j = index.row(), index.column()
label = self.message_type[i] # type: SimulatorProtocolLabel
if j == 0:
label.name = value
ft = self.controller.field_types_by_caption.get(value, FieldType("Custom", FieldType.Function.CUSTOM))
label.field_type = ft
elif j == 1:
label.display_format_index = value
elif j == 2:
label.value_type_index = value
elif j == 3:
if label.value_type_index == 0:
message = label.parent()
try:
bits = util.convert_string_to_bits(value, label.display_format_index,
target_num_bits=label.end-label.start)
message.plain_bits[label.start:label.end] = bits
except ValueError:
pass
elif label.value_type_index == 2:
label.formula = value
elif label.value_type_index == 3:
label.external_program = value
elif label.value_type_index == 4:
label.random_min = value[0]
label.random_max = value[1]
self.dataChanged.emit(self.index(i, 0),
self.index(i, self.columnCount()))
self.protocol_label_updated.emit(label)
return True
def flags(self, index: QModelIndex):
row, col = index.row(), index.column()
label = self.message_type[row] # type: SimulatorProtocolLabel
if col == 2 and label.is_checksum_label:
return Qt.ItemIsSelectable
flags = Qt.ItemIsEnabled | Qt.ItemIsSelectable
if not(col == 3 and label.value_type_index == 1):
flags |= Qt.ItemIsEditable
return flags
def METHOD_NAME(self, index: int):
try:
label = self.message_type[index]
self.controller.simulator_config.delete_items([label])
except IndexError:
pass
def set_value_type_index(self, rows: list, value_type_index: int):
for row in rows:
label = self.message_type[row]
if not label.is_checksum_label:
label.value_type_index = value_type_index
self.protocol_label_updated.emit(label)
self.update() |
4,734 | disk state paths iter | import os
from collections.abc import AsyncGenerator, Generator, Iterator
from functools import cached_property
from pathlib import Path
from fastapi import FastAPI
from models_library.projects_nodes import NodeID
from models_library.services import RunID
from servicelib.docker_constants import PREFIX_DYNAMIC_SIDECAR_VOLUMES
from ..core.docker_utils import get_volume_by_label
from ..core.settings import ApplicationSettings
def _ensure_path(path: Path) -> Path:
path.mkdir(parents=True, exist_ok=True)
return path
def _name_from_full_path(path: Path) -> str:
"""transforms: /path/to/a/file -> _path_to_a_file"""
return str(path).replace(os.sep, "_")
class MountedVolumes:
"""
The inputs and outputs directories are created and by the dynamic-sidecar
and mounted into all started containers at the specified path.
Locally, on its disk, the dynamic-sidecar ensures the `inputs` and
`outputs` directories are created in the external volume of name
`dy-sidecar_UUID` in the `/dy-volumes` path.
Eg: - /dy-sidecar_UUID_inputs:/inputs-dir
- /dy-sidecar_UUID_outputs:/outputs-dir
"""
def __init__(
self,
run_id: RunID,
node_id: NodeID,
inputs_path: Path,
outputs_path: Path,
state_paths: list[Path],
state_exclude: set[str],
compose_namespace: str,
dy_volumes: Path,
) -> None:
self.run_id: RunID = run_id
self.node_id: NodeID = node_id
self.inputs_path: Path = inputs_path
self.outputs_path: Path = outputs_path
self.state_paths: list[Path] = state_paths
self.state_exclude: set[str] = state_exclude
self.compose_namespace = compose_namespace
self._dy_volumes = dy_volumes
self._ensure_directories()
@cached_property
def volume_name_inputs(self) -> str:
"""Same name as the namespace, to easily track components"""
return (
f"{PREFIX_DYNAMIC_SIDECAR_VOLUMES}_{self.run_id}_{self.node_id}"
f"_{_name_from_full_path(self.inputs_path)[::-1]}"
)
@cached_property
def volume_name_outputs(self) -> str:
return (
f"{PREFIX_DYNAMIC_SIDECAR_VOLUMES}_{self.run_id}_{self.node_id}"
f"_{_name_from_full_path(self.outputs_path)[::-1]}"
)
def volume_name_state_paths(self) -> Generator[str, None, None]:
for state_path in self.state_paths:
yield (
f"{PREFIX_DYNAMIC_SIDECAR_VOLUMES}_{self.run_id}_{self.node_id}"
f"_{_name_from_full_path(state_path)[::-1]}"
)
@cached_property
def disk_inputs_path(self) -> Path:
return _ensure_path(self._dy_volumes / self.inputs_path.relative_to("/"))
@cached_property
def disk_outputs_path(self) -> Path:
return _ensure_path(self._dy_volumes / self.outputs_path.relative_to("/"))
def METHOD_NAME(self) -> Iterator[Path]:
for state_path in self.state_paths:
yield _ensure_path(self._dy_volumes / state_path.relative_to("/"))
def all_disk_paths_iter(self) -> Iterator[Path]:
# PC: keeps iterator to follow same style as disk_state_paths but IMO it is overreaching
yield self.disk_inputs_path
yield self.disk_outputs_path
yield from self.METHOD_NAME()
def _ensure_directories(self) -> None:
"""
Creates directories on its file system, these will be mounted by the user services.
"""
_ensure_path(self._dy_volumes)
for path in self.all_disk_paths_iter():
_ensure_path(path)
@staticmethod
async def _get_bind_path_from_label(label: str, run_id: RunID) -> Path:
volume_details = await get_volume_by_label(label=label, run_id=run_id)
return Path(volume_details["Mountpoint"])
async def get_inputs_docker_volume(self, run_id: RunID) -> str:
bind_path: Path = await self._get_bind_path_from_label(
self.volume_name_inputs, run_id
)
return f"{bind_path}:{self.inputs_path}"
async def get_outputs_docker_volume(self, run_id: RunID) -> str:
bind_path: Path = await self._get_bind_path_from_label(
self.volume_name_outputs, run_id
)
return f"{bind_path}:{self.outputs_path}"
async def iter_state_paths_to_docker_volumes(
self, run_id: RunID
) -> AsyncGenerator[str, None]:
for volume_state_path, state_path in zip(
self.volume_name_state_paths(), self.state_paths, strict=True
):
bind_path: Path = await self._get_bind_path_from_label(
volume_state_path, run_id
)
yield f"{bind_path}:{state_path}"
def setup_mounted_fs(app: FastAPI) -> MountedVolumes:
settings: ApplicationSettings = app.state.settings
app.state.mounted_volumes = MountedVolumes(
run_id=settings.DY_SIDECAR_RUN_ID,
node_id=settings.DY_SIDECAR_NODE_ID,
inputs_path=settings.DY_SIDECAR_PATH_INPUTS,
outputs_path=settings.DY_SIDECAR_PATH_OUTPUTS,
state_paths=settings.DY_SIDECAR_STATE_PATHS,
state_exclude=settings.DY_SIDECAR_STATE_EXCLUDE,
compose_namespace=settings.DYNAMIC_SIDECAR_COMPOSE_NAMESPACE,
dy_volumes=settings.DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR,
)
return app.state.mounted_volumes |
4,735 | log batch | from typing import Any, Dict, List, Optional
import torch
import wandb
from tango.common.exceptions import ConfigurationError
from tango.integrations.torch.train_callback import TrainCallback
from tango.integrations.torch.util import peak_gpu_memory
from .util import check_environment
from .workspace import WandbWorkspace
@TrainCallback.register("wandb::log")
class WandbTrainCallback(TrainCallback):
"""
A torch :class:`~tango.integrations.torch.TrainCallback` for use with
the :class:`~tango.integrations.torch.TorchTrainStep` that logs training and
validation metrics to W&B.
This can be used with any :class:`~tango.workspace.Workspace` implementation,
including :class:`WandbWorkspace`.
.. tip::
Registered as a :class:`~tango.integrations.torch.TrainCallback`
under the name "wandb::log".
.. important::
When this callback is used with the :class:`WandbWorkspace` it will log metrics
to the same W&B project that the workspace uses. The ``group`` and ``name``
parameters will also automatically be set, so a :class:`~tango.common.exceptions.ConfigurationError`
will be raised if any of ``project``, ``entity``, ``group``, or ``name`` are set in this callback.
:param project:
W&B project to associated this run with.
:param entity:
W&B entity (user or organization) to associated this run with.
:param group:
W&B group to associated this run with.
:param name:
Set the name of the run in W&B. If not set, the default will be the name of the step.
:param notes:
Arbitrary notes to add in W&B to this run.
:param tags:
Arbitrary tags to add in W&B to this run.
:param watch_model:
If ``True``, ``wandb.watch()`` is called to collect gradients and other information
about the model throughout training.
See `docs.wandb.ai/ref/python/watch <https://docs.wandb.ai/ref/python/watch>`_.
:param wandb_config:
Arbitrary configuration fields to set in W&B for this run.
See `docs.wandb.ai/guides/track/config <https://docs.wandb.ai/guides/track/config>`_.
"""
def __init__(
self,
*args,
project: Optional[str] = None,
entity: Optional[str] = None,
group: Optional[str] = None,
name: Optional[str] = None,
notes: Optional[str] = None,
tags: Optional[List[str]] = None,
watch_model: bool = False,
wandb_config: Optional[Dict[str, Any]] = None,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
if self.is_local_main_process:
check_environment()
if isinstance(self.workspace, WandbWorkspace) or wandb.run is not None:
err_msg_template = "Cannot set '{var_name}' in WandbTrainCallback "
if isinstance(self.workspace, WandbWorkspace):
err_msg_template += "since it has already been set from the WandbWorkspace."
else:
err_msg_template += "since a W&B run has already been initialized."
for var, var_name in [
(project, "project"),
(entity, "entity"),
(group, "group"),
(name, "name"),
]:
if var is not None:
raise ConfigurationError(err_msg_template.format(var_name=var_name))
self.project = (
project if not isinstance(self.workspace, WandbWorkspace) else self.workspace.project
)
self.entity = (
entity if not isinstance(self.workspace, WandbWorkspace) else self.workspace.entity
)
self.group = group or self.step_id
self.notes = notes or self._get_default_notes()
self.tags = tags
self.watch_model = watch_model
self.wandb_config = self.train_config.as_dict()
del self.wandb_config["worker_id"]
if wandb_config is not None:
self.wandb_config.update(wandb_config)
if wandb.run is None:
self.wandb_config["job_type"] = "train_metrics"
self.run_name: str = name or self.step_name or "train"
if self.train_config.is_distributed:
self.run_name += f" (rank {self.train_config.worker_id})"
self.run_id: str = (
wandb.run.id # type: ignore[attr-defined]
if wandb.run is not None
else self.step_id + f"-rank{self.train_config.worker_id}"
)
self.resume: Optional[str] = None
self.should_finalize_run: bool = (
wandb.run is None
) # if we have to start out own W&B run, we need to finish it
def state_dict(self) -> Dict[str, Any]:
return {}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.resume = "allow"
def pre_train_loop(self) -> None:
if wandb.run is None:
if self.run_id is None:
self.run_id = self.step_id + f"-rank{self.train_config.worker_id}"
# Initialize a new W&B run.
wandb.init(
id=self.run_id,
dir=str(self.work_dir),
project=self.project,
entity=self.entity,
group=self.group,
name=self.run_name,
notes=self.notes,
config=self.wandb_config,
tags=self.tags,
job_type="train_metrics",
)
else:
# We are already running inside of a W&B run, possibly because
# we're using the WandbWorkspace.
wandb.config.update(self.wandb_config)
if self.tags:
wandb.run.tags = (wandb.run.tags or tuple()) + tuple(self.tags)
if self.notes:
wandb.run.notes = self.notes
if self.watch_model:
wandb.watch(self.training_engine.model)
# Log GPU memory statistics.
if torch.cuda.is_available():
torch.cuda.reset_peak_memory_stats()
peak_gpu_mbs = peak_gpu_memory()
if self.is_local_main_process:
metrics = {f"sys/worker{rank}_peak_gpu_mem": mbs for rank, mbs in peak_gpu_mbs.items()}
metrics["epoch"] = 0
wandb.log(metrics, step=0)
def post_train_loop(self, step: int, epoch: int) -> None:
if self.should_finalize_run:
wandb.finish()
def METHOD_NAME(
self, step: int, epoch: int, batch_loss: float, batch_outputs: List[Dict[str, Any]]
) -> None:
peak_gpu_mbs = peak_gpu_memory()
if self.is_local_main_process:
metrics = {
"train/loss": batch_loss,
"train/lr": self.training_engine.optimizer.param_groups[0]["lr"],
"epoch": epoch,
}
metrics.update(
{f"sys/worker{rank}_peak_gpu_mem": mbs for rank, mbs in peak_gpu_mbs.items()}
)
wandb.log(
metrics,
step=step + 1,
)
def post_val_loop(
self, step: int, epoch: int, val_metric: float, best_val_metric: float
) -> None:
if self.is_local_main_process:
wandb.log(
{
f"val/{self.train_config.val_metric_name}": val_metric,
f"val/best_{self.train_config.val_metric_name}": best_val_metric,
"epoch": epoch,
},
step=step + 1,
)
def _get_default_notes(self) -> str:
notes = (
f'Metrics for Tango step "{self.step_name}" from worker {self.train_config.worker_id}.'
)
if isinstance(self.workspace, WandbWorkspace):
notes += f"\nMain run for step: {self.workspace.wandb_project_url}/runs/{self.step_id}/overview"
return notes |
4,736 | list | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsageModelsOperations:
"""UsageModelsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storagecache.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def METHOD_NAME(
self,
**kwargs
) -> AsyncIterable["_models.UsageModelsResult"]:
"""Get the list of Cache Usage Models available to this subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsageModelsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storagecache.models.UsageModelsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsageModelsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.METHOD_NAME.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('UsageModelsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
METHOD_NAME.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/usageModels'} # type: ignore |
4,737 | proxy | r"""
This code was generated by
___ _ _ _ _ _ _ ____ ____ ____ _ ____ ____ _ _ ____ ____ ____ ___ __ __
| | | | | | | | | __ | | |__| | __ | __ |___ |\ | |___ |__/ |__| | | | |__/
| |_|_| | |___ | |__| |__| | | | |__] |___ | \| |___ | \ | | | |__| | \
Twilio - Studio
This is the public Twilio REST API.
NOTE: This class is auto generated by OpenAPI Generator.
https://openapi-generator.tech
Do not edit the class manually.
"""
from typing import Any, Dict, Optional
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.version import Version
class ExecutionStepContextInstance(InstanceResource):
"""
:ivar account_sid: The SID of the [Account](https://www.twilio.com/docs/iam/api/account) that created the ExecutionStepContext resource.
:ivar context: The current state of the Flow's Execution. As a flow executes, we save its state in this context. We save data that your widgets can access as variables in configuration fields or in text areas as variable substitution.
:ivar execution_sid: The SID of the context's Execution resource.
:ivar flow_sid: The SID of the Flow.
:ivar step_sid: The SID of the Step that the context is associated with.
:ivar url: The absolute URL of the resource.
"""
def __init__(
self,
version: Version,
payload: Dict[str, Any],
flow_sid: str,
execution_sid: str,
step_sid: str,
):
super().__init__(version)
self.account_sid: Optional[str] = payload.get("account_sid")
self.context: Optional[Dict[str, object]] = payload.get("context")
self.execution_sid: Optional[str] = payload.get("execution_sid")
self.flow_sid: Optional[str] = payload.get("flow_sid")
self.step_sid: Optional[str] = payload.get("step_sid")
self.url: Optional[str] = payload.get("url")
self._solution = {
"flow_sid": flow_sid,
"execution_sid": execution_sid,
"step_sid": step_sid,
}
self._context: Optional[ExecutionStepContextContext] = None
@property
def METHOD_NAME(self) -> "ExecutionStepContextContext":
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ExecutionStepContextContext for this ExecutionStepContextInstance
"""
if self._context is None:
self._context = ExecutionStepContextContext(
self._version,
flow_sid=self._solution["flow_sid"],
execution_sid=self._solution["execution_sid"],
step_sid=self._solution["step_sid"],
)
return self._context
def fetch(self) -> "ExecutionStepContextInstance":
"""
Fetch the ExecutionStepContextInstance
:returns: The fetched ExecutionStepContextInstance
"""
return self.METHOD_NAME.fetch()
async def fetch_async(self) -> "ExecutionStepContextInstance":
"""
Asynchronous coroutine to fetch the ExecutionStepContextInstance
:returns: The fetched ExecutionStepContextInstance
"""
return await self.METHOD_NAME.fetch_async()
def __repr__(self) -> str:
"""
Provide a friendly representation
:returns: Machine friendly representation
"""
context = " ".join("{}={}".format(k, v) for k, v in self._solution.items())
return "<Twilio.Studio.V2.ExecutionStepContextInstance {}>".format(context)
class ExecutionStepContextContext(InstanceContext):
def __init__(
self, version: Version, flow_sid: str, execution_sid: str, step_sid: str
):
"""
Initialize the ExecutionStepContextContext
:param version: Version that contains the resource
:param flow_sid: The SID of the Flow with the Step to fetch.
:param execution_sid: The SID of the Execution resource with the Step to fetch.
:param step_sid: The SID of the Step to fetch.
"""
super().__init__(version)
# Path Solution
self._solution = {
"flow_sid": flow_sid,
"execution_sid": execution_sid,
"step_sid": step_sid,
}
self._uri = "/Flows/{flow_sid}/Executions/{execution_sid}/Steps/{step_sid}/Context".format(
**self._solution
)
def fetch(self) -> ExecutionStepContextInstance:
"""
Fetch the ExecutionStepContextInstance
:returns: The fetched ExecutionStepContextInstance
"""
payload = self._version.fetch(
method="GET",
uri=self._uri,
)
return ExecutionStepContextInstance(
self._version,
payload,
flow_sid=self._solution["flow_sid"],
execution_sid=self._solution["execution_sid"],
step_sid=self._solution["step_sid"],
)
async def fetch_async(self) -> ExecutionStepContextInstance:
"""
Asynchronous coroutine to fetch the ExecutionStepContextInstance
:returns: The fetched ExecutionStepContextInstance
"""
payload = await self._version.fetch_async(
method="GET",
uri=self._uri,
)
return ExecutionStepContextInstance(
self._version,
payload,
flow_sid=self._solution["flow_sid"],
execution_sid=self._solution["execution_sid"],
step_sid=self._solution["step_sid"],
)
def __repr__(self) -> str:
"""
Provide a friendly representation
:returns: Machine friendly representation
"""
context = " ".join("{}={}".format(k, v) for k, v in self._solution.items())
return "<Twilio.Studio.V2.ExecutionStepContextContext {}>".format(context)
class ExecutionStepContextList(ListResource):
def __init__(
self, version: Version, flow_sid: str, execution_sid: str, step_sid: str
):
"""
Initialize the ExecutionStepContextList
:param version: Version that contains the resource
:param flow_sid: The SID of the Flow with the Step to fetch.
:param execution_sid: The SID of the Execution resource with the Step to fetch.
:param step_sid: The SID of the Step to fetch.
"""
super().__init__(version)
# Path Solution
self._solution = {
"flow_sid": flow_sid,
"execution_sid": execution_sid,
"step_sid": step_sid,
}
def get(self) -> ExecutionStepContextContext:
"""
Constructs a ExecutionStepContextContext
"""
return ExecutionStepContextContext(
self._version,
flow_sid=self._solution["flow_sid"],
execution_sid=self._solution["execution_sid"],
step_sid=self._solution["step_sid"],
)
def __call__(self) -> ExecutionStepContextContext:
"""
Constructs a ExecutionStepContextContext
"""
return ExecutionStepContextContext(
self._version,
flow_sid=self._solution["flow_sid"],
execution_sid=self._solution["execution_sid"],
step_sid=self._solution["step_sid"],
)
def __repr__(self) -> str:
"""
Provide a friendly representation
:returns: Machine friendly representation
"""
return "<Twilio.Studio.V2.ExecutionStepContextList>" |
4,738 | remove dirs | """Python script to build the OSX universal binaries.
Stolen with thankfulness from the numpy distribution
This is a simple script, most of the heavy lifting is done in bdist_mpkg.
To run this script: 'python build.py'
Installer is built using sudo so file permissions are correct when installed on
user system. Script will prompt for sudo pwd.
"""
import os
import sys
import shutil
import subprocess
from optparse import OptionParser
from getpass import getuser
# USER_README = 'docs/README.rst'
# DEV_README = SRC_DIR + 'README.rst'
BUILD_DIR = 'build'
DIST_DIR = 'dist'
DIST_DMG_DIR = 'dist-dmg'
def METHOD_NAME(sudo):
print('Removing old build and distribution directories...')
print("""The distribution is built as root, so the files have the correct
permissions when installed by the user. Chown them to user for removal.""")
if os.path.exists(BUILD_DIR):
cmd = 'chown -R %s %s' % (getuser(), BUILD_DIR)
if sudo:
cmd = 'sudo ' + cmd
shellcmd(cmd)
shutil.rmtree(BUILD_DIR)
if os.path.exists(DIST_DIR):
cmd = 'sudo chown -R %s %s' % (getuser(), DIST_DIR)
if sudo:
cmd = 'sudo ' + cmd
shellcmd(cmd)
shutil.rmtree(DIST_DIR)
def build_dist(readme, python_exe, sudo):
print('Building distribution... (using sudo)')
cmd = '%s setup_egg.py bdist_mpkg --readme=%s' % (
python_exe, readme)
if sudo:
cmd = 'sudo ' + cmd
shellcmd(cmd)
def build_dmg(sudo):
print('Building disk image...')
# Since we removed the dist directory at the start of the script,
# our pkg should be the only file there.
pkg = os.listdir(DIST_DIR)[0]
fn, ext = os.path.splitext(pkg)
dmg = fn + '.dmg'
srcfolder = os.path.join(DIST_DIR, pkg)
dstfolder = os.path.join(DIST_DMG_DIR, dmg)
# build disk image
try:
os.mkdir(DIST_DMG_DIR)
except OSError:
pass
try:
os.unlink(dstfolder)
except OSError:
pass
cmd = 'hdiutil create -srcfolder %s %s' % (srcfolder, dstfolder)
if sudo:
cmd = 'sudo ' + cmd
shellcmd(cmd)
def copy_readme():
"""Copy a user README with info regarding the website, instead of
the developer README which tells one how to build the source.
"""
print('Copy user README.rst for installer.')
shutil.copy(USER_README, DEV_README)
def revert_readme():
"""Revert the developer README."""
print('Reverting README.rst...')
cmd = 'svn revert %s' % DEV_README
shellcmd(cmd)
def shellcmd(cmd, verbose=True):
"""Call a shell command."""
if verbose:
print(cmd)
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as err:
msg = """
Error while executing a shell command.
%s
""" % str(err)
raise Exception(msg)
def build():
parser = OptionParser()
parser.add_option("-p", "--python", dest="python",
default=sys.executable,
help="python interpreter executable",
metavar="PYTHON_EXE")
parser.add_option("-r", "--readme", dest="readme",
default='README.rst',
help="README file",
metavar="README")
parser.add_option("-s", "--sudo", dest="sudo",
default=False,
help="Run as sudo or no",
metavar="SUDO")
(options, args) = parser.parse_args()
try:
src_dir = args[0]
except IndexError:
src_dir = '.'
# Check source directory
if not os.path.isfile(os.path.join(src_dir, 'setup.py')):
raise RuntimeError('Run this script from directory '
'with setup.py, or pass in this '
'directory on command line')
# update end-user documentation
# copy_readme()
# shellcmd("svn stat %s"%DEV_README)
# change to source directory
cwd = os.getcwd()
os.chdir(src_dir)
# build distribution
METHOD_NAME(options.sudo)
build_dist(options.readme, options.python, options.sudo)
build_dmg(options.sudo)
# change back to original directory
os.chdir(cwd)
# restore developer documentation
# revert_readme()
if __name__ == '__main__':
build() |
4,739 | get field full config | #!/usr/bin/python3
"""
Config save in DB
===================
"""
import os
import sys
import redis
sys.path.append(os.path.join(os.environ['AIL_BIN'], 'lib'))
from lib import ConfigLoader
config_loader = ConfigLoader.ConfigLoader()
r_serv_db = config_loader.get_redis_conn("_DB")
config_loader = None
#### TO PUT IN CONFIG
# later => module timeout
#
## data retention
#########################
ail_config = {
"crawler": {
"enable_har_by_default": {
"default": False,
"type": bool,
"info": "Enable HAR by default"
},
"enable_screenshot_by_default": {
"default": True,
"type": bool,
"info": "Enable screenshot by default"
},
"depth_limit": {
"default": 1,
"type": int,
"info": "Maximum number of url depth"
},
"closespider_pagecount": {
"default": 50,
"type": int,
"info": "Maximum number of pages"
},
"user_agent": {
"default": 50,
"type": str,
"info": "User agent used by default"
},
"timeout": {
"default": 30,
"type": int,
"info": "Crawler connection timeout"
},
},
"misp": {
"url": {
"default": "https://localhost:8443/",
"type": str,
"info": "Crawler connection timeout"
},
"key": {
"default": "",
"type": str,
"info": "Crawler connection timeout"
},
"verifycert": {
"default": True,
"type": bool,
"info": "Crawler connection timeout"
},
}
}
# The MISP auth key can be found on the MISP web interface under the automation section
def get_config_value(section, field, value):
return r_serv_db.hget(f'ail:config:global:{section}', field, value)
def get_config_default_value(section, field, value):
return ail_config[section][field]['default']
def get_config_type(section, field, value):
return ail_config[section][field]['type']
def get_config_info(section, field, value):
return ail_config[section][field]['info']
def save_config(section, field, value):
if section in ail_config:
if is_valid_type(value, section, field, value_type=value_type):
# if value_type in ['list', 'set', 'dict']:
# pass
# else:
r_serv_db.hset(f'ail:config:global:{section}', field, value)
config_documentation = {
}
default_config = {
}
def get_default_config():
return default_config
def get_default_config_value(section, field):
return default_config[section][field]
#### DEFAULT CONFIG ####
#### CONFIG TYPE ####
# CONFIG DOC
config_type = {
}
# # TODO: add set, dict, list and select_(multiple_)value
def is_valid_type(obj, section, field, value_type=None):
res = isinstance(obj, get_config_type(section, field))
return res
# # TODO: ###########################################################
def reset_default_config():
for section in config_type:
pass
def set_default_config(section, field):
save_config(section, field, get_default_config_value(section, field))
def get_all_config_sections():
return list(get_default_config())
def get_all_config_fields_by_section(section):
return list(get_default_config()[section])
def get_config(section, field):
# config field don't exist
if not r_serv_db.hexists(f'config:global:{section}', field):
set_default_config(section, field)
return get_default_config_value(section, field)
# load default config section
if not r_serv_db.exists('config:global:{}'.format(section)):
save_config(section, field, get_default_config_value(section, field))
return get_default_config_value(section, field)
return r_serv_db.hget(f'config:global:{section}', field)
def get_config_dict_by_section(section):
config_dict = {}
for field in get_all_config_fields_by_section(section):
config_dict[field] = get_config(section, field)
return config_dict
# check config value + type
def check_integrity():
pass
def METHOD_NAME(section, field):
dict_config = {}
dict_config['value'] = get_config(section, field)
dict_config['type'] = get_config_type(section, field)
dict_config['info'] = get_config_documentation(section, field)
return dict_config
def get_full_config_by_section(section):
dict_config = {}
for field in get_all_config_fields_by_section(section):
dict_config[field] = METHOD_NAME(section, field)
return dict_config
def get_full_config():
dict_config = {}
for section in get_all_config_sections():
dict_config[section] = get_full_config_by_section(section)
return dict_config
if __name__ == '__main__':
res = get_full_config()
print(res) |
4,740 | update token | import asyncio
import contextlib
import json
from copy import copy
from pathlib import Path
from typing import TYPE_CHECKING, Mapping, Optional, Union
import aiohttp
from lavalink.rest_api import LoadResult
from red_commons.logging import getLogger
from redbot.core import Config
from redbot.core.bot import Red
from redbot.core.commands import Cog
from redbot.core.i18n import Translator
from ..audio_dataclasses import Query
if TYPE_CHECKING:
from .. import Audio
_API_URL = "https://api.redbot.app/"
_ = Translator("Audio", Path(__file__))
log = getLogger("red.cogs.Audio.api.GlobalDB")
class GlobalCacheWrapper:
def __init__(
self, bot: Red, config: Config, session: aiohttp.ClientSession, cog: Union["Audio", Cog]
):
# Place Holder for the Global Cache PR
self.bot = bot
self.config = config
self.session = session
self.api_key = None
self._handshake_token = ""
self.has_api_key = None
self._token: Mapping[str, str] = {}
self.cog = cog
async def METHOD_NAME(self, new_token: Mapping[str, str]):
self._token = new_token
await self.get_perms()
async def _get_api_key(
self,
) -> Optional[str]:
if not self._token:
self._token = await self.bot.get_shared_api_tokens("audiodb")
self.api_key = self._token.get("api_key", None)
self.has_api_key = self.cog.global_api_user.get("can_post")
id_list = list(self.bot.owner_ids)
self._handshake_token = "||".join(map(str, id_list))
return self.api_key
async def get_call(self, query: Optional[Query] = None) -> dict:
api_url = f"{_API_URL}api/v2/queries"
if not self.cog.global_api_user.get("can_read"):
return {}
try:
query = Query.process_input(query, self.cog.local_folder_current_path)
if any([not query or not query.valid or query.is_spotify or query.is_local]):
return {}
await self._get_api_key()
if self.api_key is None:
return {}
search_response = "error"
query = query.lavalink_query
with contextlib.suppress(aiohttp.ContentTypeError, asyncio.TimeoutError):
async with self.session.get(
api_url,
timeout=aiohttp.ClientTimeout(total=await self.config.global_db_get_timeout()),
headers={"Authorization": self.api_key, "X-Token": self._handshake_token},
params={"query": query},
) as r:
search_response = await r.json(loads=json.loads)
log.trace(
"GET || Ping %s || Status code %s || %s",
r.headers.get("x-process-time"),
r.status,
query,
)
if "tracks" not in search_response:
return {}
return search_response
except Exception as exc:
log.trace("Failed to Get query: %s/%s", api_url, query, exc_info=exc)
return {}
async def get_spotify(self, title: str, author: Optional[str]) -> dict:
if not self.cog.global_api_user.get("can_read"):
return {}
api_url = f"{_API_URL}api/v2/queries/spotify"
try:
search_response = "error"
params = {"title": title, "author": author}
await self._get_api_key()
if self.api_key is None:
return {}
with contextlib.suppress(aiohttp.ContentTypeError, asyncio.TimeoutError):
async with self.session.get(
api_url,
timeout=aiohttp.ClientTimeout(total=await self.config.global_db_get_timeout()),
headers={"Authorization": self.api_key, "X-Token": self._handshake_token},
params=params,
) as r:
search_response = await r.json(loads=json.loads)
log.trace(
"GET/spotify || Ping %s || Status code %s || %s - %s",
r.headers.get("x-process-time"),
r.status,
title,
author,
)
if "tracks" not in search_response:
return {}
return search_response
except Exception as exc:
log.trace("Failed to Get query: %s", api_url, exc_info=exc)
return {}
async def post_call(self, llresponse: LoadResult, query: Optional[Query]) -> None:
try:
if not self.cog.global_api_user.get("can_post"):
return
query = Query.process_input(query, self.cog.local_folder_current_path)
if llresponse.has_error or llresponse.load_type.value in ["NO_MATCHES", "LOAD_FAILED"]:
return
if query and query.valid and query.is_youtube:
query = query.lavalink_query
else:
return None
await self._get_api_key()
if self.api_key is None:
return None
api_url = f"{_API_URL}api/v2/queries"
async with self.session.post(
api_url,
json=llresponse._raw,
headers={"Authorization": self.api_key, "X-Token": self._handshake_token},
params={"query": query},
) as r:
await r.read()
log.trace(
"GET || Ping %s || Status code %s || %s",
r.headers.get("x-process-time"),
r.status,
query,
)
except Exception as exc:
log.trace("Failed to post query: %s", query, exc_info=exc)
await asyncio.sleep(0)
async def update_global(self, llresponse: LoadResult, query: Optional[Query] = None):
await self.post_call(llresponse=llresponse, query=query)
async def report_invalid(self, id: str) -> None:
if not self.cog.global_api_user.get("can_delete"):
return
api_url = f"{_API_URL}api/v2/queries/es/id"
with contextlib.suppress(Exception):
async with self.session.delete(
api_url,
headers={"Authorization": self.api_key, "X-Token": self._handshake_token},
params={"id": id},
) as r:
await r.read()
async def get_perms(self):
global_api_user = copy(self.cog.global_api_user)
await self._get_api_key()
# global API is force-disabled right now
is_enabled = False
if (not is_enabled) or self.api_key is None:
return global_api_user
with contextlib.suppress(Exception):
async with aiohttp.ClientSession(json_serialize=json.dumps) as session:
async with session.get(
f"{_API_URL}api/v2/users/me",
headers={"Authorization": self.api_key, "X-Token": self._handshake_token},
) as resp:
if resp.status == 200:
search_response = await resp.json(loads=json.loads)
global_api_user["fetched"] = True
global_api_user["can_read"] = search_response.get("can_read", False)
global_api_user["can_post"] = search_response.get("can_post", False)
global_api_user["can_delete"] = search_response.get("can_delete", False)
return global_api_user |
4,741 | is chat member | import logging
from typing import Optional, Tuple, Union
from telegram import Bot, InlineKeyboardMarkup, Message, ParseMode
from telegram.error import BadRequest, InvalidToken, Unauthorized
from telegram.utils.request import Request
from apps.alerts.models import AlertGroup
from apps.base.utils import live_settings
from apps.telegram.models import TelegramMessage
from apps.telegram.renderers.keyboard import TelegramKeyboardRenderer
from apps.telegram.renderers.message import TelegramMessageRenderer
from common.api_helpers.utils import create_engine_url
logger = logging.getLogger(__name__)
class TelegramClient:
ALLOWED_UPDATES = ("message", "callback_query")
PARSE_MODE = ParseMode.HTML
def __init__(self, token: Optional[str] = None):
self.token = token or live_settings.TELEGRAM_TOKEN
if self.token is None:
raise InvalidToken()
@property
def api_client(self) -> Bot:
return Bot(self.token, request=Request(read_timeout=15))
def METHOD_NAME(self, chat_id: Union[int, str]) -> bool:
try:
self.api_client.get_chat(chat_id=chat_id)
return True
except Unauthorized:
return False
def register_webhook(self, webhook_url: Optional[str] = None) -> None:
webhook_url = webhook_url or create_engine_url("/telegram/", override_base=live_settings.TELEGRAM_WEBHOOK_HOST)
# avoid unnecessary set_webhook calls to make sure Telegram rate limits are not exceeded
webhook_info = self.api_client.get_webhook_info()
if webhook_info.url == webhook_url:
return
self.api_client.set_webhook(webhook_url, allowed_updates=self.ALLOWED_UPDATES)
def delete_webhook(self):
webhook_info = self.api_client.get_webhook_info()
if webhook_info.url == "":
return
self.api_client.delete_webhook()
def send_message(
self,
chat_id: Union[int, str],
message_type: int,
alert_group: AlertGroup,
reply_to_message_id: Optional[int] = None,
) -> TelegramMessage:
text, keyboard = self._get_message_and_keyboard(message_type=message_type, alert_group=alert_group)
raw_message = self.send_raw_message(
chat_id=chat_id, text=text, keyboard=keyboard, reply_to_message_id=reply_to_message_id
)
message = TelegramMessage.create_from_message(
message=raw_message, alert_group=alert_group, message_type=message_type
)
return message
def send_raw_message(
self,
chat_id: Union[int, str],
text: str,
keyboard: Optional[InlineKeyboardMarkup] = None,
reply_to_message_id: Optional[int] = None,
) -> Message:
try:
message = self.api_client.send_message(
chat_id=chat_id,
text=text,
reply_markup=keyboard,
reply_to_message_id=reply_to_message_id,
parse_mode=self.PARSE_MODE,
disable_web_page_preview=False,
)
except BadRequest as e:
logger.warning("Telegram BadRequest: {}".format(e.message))
raise
return message
def edit_message(self, message: TelegramMessage) -> TelegramMessage:
text, keyboard = self._get_message_and_keyboard(
message_type=message.message_type, alert_group=message.alert_group
)
self.edit_raw_message(chat_id=message.chat_id, message_id=message.message_id, text=text, keyboard=keyboard)
return message
def edit_raw_message(
self,
chat_id: Union[int, str],
message_id: Union[int, str],
text: str,
keyboard: Optional[InlineKeyboardMarkup] = None,
) -> Union[Message, bool]:
return self.api_client.edit_message_text(
chat_id=chat_id,
message_id=message_id,
text=text,
reply_markup=keyboard,
parse_mode=self.PARSE_MODE,
disable_web_page_preview=False,
)
@staticmethod
def _get_message_and_keyboard(
message_type: int, alert_group: AlertGroup
) -> Tuple[str, Optional[InlineKeyboardMarkup]]:
message_renderer = TelegramMessageRenderer(alert_group=alert_group)
keyboard_renderer = TelegramKeyboardRenderer(alert_group=alert_group)
if message_type == TelegramMessage.ALERT_GROUP_MESSAGE:
text = message_renderer.render_alert_group_message()
keyboard = None
elif message_type == TelegramMessage.LOG_MESSAGE:
text = message_renderer.render_log_message()
keyboard = None
elif message_type == TelegramMessage.ACTIONS_MESSAGE:
text = message_renderer.render_actions_message()
keyboard = keyboard_renderer.render_actions_keyboard()
elif message_type == TelegramMessage.PERSONAL_MESSAGE:
text = message_renderer.render_personal_message()
keyboard = keyboard_renderer.render_actions_keyboard()
elif message_type == TelegramMessage.FORMATTING_ERROR:
text = message_renderer.render_formatting_error_message()
keyboard = None
elif message_type in (
TelegramMessage.LINK_TO_CHANNEL_MESSAGE,
TelegramMessage.LINK_TO_CHANNEL_MESSAGE_WITHOUT_TITLE,
):
alert_group_message = alert_group.telegram_messages.filter(
chat_id__startswith="-",
message_type__in=[TelegramMessage.ALERT_GROUP_MESSAGE, TelegramMessage.FORMATTING_ERROR],
).first()
if alert_group_message is None:
raise Exception("No alert group message found, probably it is not saved to database yet")
include_title = message_type == TelegramMessage.LINK_TO_CHANNEL_MESSAGE
link = alert_group_message.link
text = message_renderer.render_link_to_channel_message(include_title=include_title)
keyboard = keyboard_renderer.render_link_to_channel_keyboard(link=link)
else:
raise Exception(f"_get_message_and_keyboard with type {message_type} is not implemented")
return text, keyboard |
4,742 | reset | try:
import time
from sonic_sfp.sfputilbase import SfpUtilBase
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
class SfpUtil(SfpUtilBase):
"""Platform specific SfpUtill class"""
_port_start = 1
_port_end = 54
_qsfp_port_start = 49
_ports_in_block = 54
_port_to_eeprom_mapping = {}
_global_port_pres_dict = {}
_port_to_i2c_mapping = {
1: 8,
2: 9,
3: 10,
4: 11,
5: 12,
6: 13,
7: 14,
8: 15,
9: 16,
10: 17,
11: 18,
12: 19,
13: 20,
14: 21,
15: 22,
16: 23,
17: 24,
18: 25,
19: 26,
20: 27,
21: 28,
22: 29,
23: 30,
24: 31,
25: 32,
26: 33,
27: 34,
28: 35,
29: 36,
30: 37,
31: 38,
32: 39,
33: 40,
34: 41,
35: 42,
36: 43,
37: 44,
38: 45,
39: 46,
40: 47,
41: 48,
42: 49,
43: 50,
44: 51,
45: 52,
46: 53,
47: 54,
48: 55,
49: 56,
50: 57,
51: 60,
52: 61,
53: 62,
54: 63,
}
_qsfp_ports = list(range(_qsfp_port_start, _ports_in_block + 1))
def get_presence(self, port_num):
# Check for invalid port_num
if port_num < self._port_start or port_num > self._port_end:
return False
path = "/sys/bus/i2c/devices/{0}-0050/sfp_is_present"
port_ps = path.format(self._port_to_i2c_mapping[port_num])
try:
reg_file = open(port_ps)
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
try:
reg_value = reg_file.readline().rstrip()
except IOError as e:
time.sleep(1)
try:
reg_value = reg_file.readline().rstrip()
except IOError as e:
print("Error:try again to read file failed: %s %s" % (str(e), port_ps))
reg_file.close()
return False
reg_file.close()
if reg_value == '1':
return True
reg_file.close()
if reg_value == '1':
return True
return False
def init_global_port_presence(self):
for port_num in range(self.port_start, (self.port_end + 1)):
self._global_port_pres_dict[port_num] = '0'
def __init__(self):
eeprom_path = '/sys/bus/i2c/devices/{0}-0050/sfp_eeprom'
for x in range(self._port_start, self._port_end + 1):
port_eeprom_path = eeprom_path.format(self._port_to_i2c_mapping[x])
self._port_to_eeprom_mapping[x] = port_eeprom_path
self.init_global_port_presence()
SfpUtilBase.__init__(self)
def METHOD_NAME(self, port_num):
# Check for invalid port_num
if port_num < self._port_start or port_num > self._port_end:
return False
path = "/sys/bus/i2c/devices/{0}-0050/sfp_port_reset"
port_ps = path.format(self._port_to_i2c_mapping[port_num])
try:
reg_file = open(port_ps, 'w')
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
# toggle reset
reg_file.seek(0)
reg_file.write('1')
time.sleep(1)
reg_file.seek(0)
reg_file.write('0')
reg_file.close()
return True
def set_low_power_mode(self, port_num, lpmode):
# Check for invalid port_num
if port_num < self._qsfp_port_start or port_num > self._port_end:
return False
pre_value = self.get_presence(port_num)
if pre_value == False:
return False
path = "/sys/bus/i2c/devices/{0}-0050/sfp_lpmode"
port_ps = path.format(self._port_to_i2c_mapping[port_num])
try:
reg_file = open(port_ps, 'w')
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
reg_file.seek(0)
if lpmode == 1:
reg_file.write('1')
elif lpmode == 0:
reg_file.write('0')
reg_file.close()
return True
def get_low_power_mode(self, port_num):
# Check for invalid port_num
if port_num < self._qsfp_port_start or port_num > self._port_end:
return False
pre_value = self.get_presence(port_num)
if pre_value == False:
return False
path = "/sys/bus/i2c/devices/{0}-0050/sfp_lpmode"
port_ps = path.format(self._port_to_i2c_mapping[port_num])
try:
reg_file = open(port_ps)
except IOError as e:
print("Error: unable to open file:%s %s" % (str(e), port_ps))
return False
try:
reg_value = reg_file.readline().rstrip()
except IOError as e:
print("Error: unable to open file:%s %s" % (str(e), port_ps))
reg_file.close()
return False
reg_file.close()
if reg_value == '1':
return True
return False
def get_transceiver_change_event(self):
port_dict = {}
while True:
for port_num in range(self.port_start, (self.port_end + 1)):
presence = self.get_presence(port_num)
if(presence and self._global_port_pres_dict[port_num] == '0'):
self._global_port_pres_dict[port_num] = '1'
port_dict[port_num] = '1'
elif(not presence and
self._global_port_pres_dict[port_num] == '1'):
self._global_port_pres_dict[port_num] = '0'
port_dict[port_num] = '0'
if(len(port_dict) > 0):
return True, port_dict
time.sleep(0.5)
@property
def port_start(self):
return self._port_start
@property
def port_end(self):
return self._port_end
@property
def qsfp_ports(self):
return self._qsfp_ports
@property
def port_to_eeprom_mapping(self):
return self._port_to_eeprom_mapping |
4,743 | enable universal checkpoint | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import types
from .constants import (FP32_WEIGHT_KEY, PARAM, VOCAB_DIVISIBILITY_PADDING_TENSOR, CAT_DIM)
def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size):
hp_mapping = self._hp_mapping
optim_state_keys = hp_mapping.get_optim_state_keys()
hp_keys = [FP32_WEIGHT_KEY] + optim_state_keys
checkpoint_files = {key: os.path.join(folder, f"{key}.pt") for key in hp_keys}
for file in checkpoint_files.values():
assert os.path.isfile(file), f'{file} is not a valid file'
for key in hp_keys:
ckpt_file = checkpoint_files[key]
ckpt_dict = torch.load(ckpt_file)
full_hp_param = ckpt_dict[PARAM]
# need to deal with slices that were averaged.
# the opposite of averaging here becomes an exact copy of the first slice
# I thought of 2 ways:
# implementation a. find a way for a client to pass a dict with patterns
# if any(re.search(pattern, folder) for pattern in WEIGHTS_TO_AVERAGE_PATTERNS):
# tp_rank = 0
# tp_world_size = 1
# the other approach is to assume that the saved data is correct and if full_hp_param.shape ==
# self.shape that means we automatically copy?
# implementation b.
# this version requires no additional data passed from the client
# if the shapes already match it must be slices that were averaged - so we just hack around those
if full_hp_param.shape == self.shape:
tp_rank = 0
tp_world_size = 1
# special case for word_embeddings weights which get padded differently depending on TP degree.
# the converter to universal currently strips the original padding completely so the saved
# weight is padding-free and we just need to add new padding depending on the target TP
# degree
vocab_divisibility_padding_tensor = ckpt_dict.get(VOCAB_DIVISIBILITY_PADDING_TENSOR, None)
if vocab_divisibility_padding_tensor is not None:
# In the absence of data passed from the user wrt new padded vocab specific to tp degree
# we can again derive that data by reverse engineering the target shapes like so:
padded_target_vocab_size = self.shape[0] * tp_world_size
if padded_target_vocab_size > full_hp_param.shape[0]:
# Need to expand
padding_size = padded_target_vocab_size - full_hp_param.shape[0]
# Implement the following concat in efficient way using pad
#full_hp_param = torch.cat((full_hp_param, padding_tensor), 0)
full_hp_param = torch.nn.functional.pad(full_hp_param, (0, 0, 0, padding_size), "constant", 0)
full_hp_param[:-padding_size, :] = vocab_divisibility_padding_tensor
else:
# Need to shrink or keep the same
full_hp_param = full_hp_param[:padded_target_vocab_size, :]
full_param_numel = full_hp_param.numel()
tp_slice_numel = self.numel()
# if key == FP32_WEIGHT_KEY and 'word_embeddings.weight' in folder:
# print_rank_0(f'{full_hp_param[:10]=}', force=True)
assert full_param_numel == tp_world_size * tp_slice_numel, \
f'Loading {ckpt_file} full param numel {full_param_numel} != tensor slice numel {tp_slice_numel} * tp_world_size {tp_world_size}'
dst_tensor = hp_mapping.hp_fragment if key == FP32_WEIGHT_KEY else hp_mapping.get_optim_state_fragment(key)
# print(f"{full_hp_param.shape=} {full_param_numel=} {folder=}")
# print(f"{dst_tensor.shape=} {dst_tensor.numel()=}{folder=}")
# since when we do many to 1 on tp we cat sometimes on dim=0 and other times on dim=1 we have to do exactly the same in reverse
chunk_dim = ckpt_dict.get(CAT_DIM, 0)
# this performs the opposite of cat when merging TP slices
tp_hp_slice = full_hp_param.chunk(tp_world_size, chunk_dim)[tp_rank]
tp_hp_slice = tp_hp_slice.flatten()
lp_frag_address = hp_mapping.lp_fragment_address
tp_hp_fragment = tp_hp_slice.narrow(0, lp_frag_address.start, lp_frag_address.numel)
assert dst_tensor.numel() == lp_frag_address.numel, \
f'Load checkpoint {key} dst_tensor numel {dst_tensor.numel()} != src numel {lp_frag_address.numel}'
# print(f"{key} SHAPE: {tp_hp_slice.shape=}")
# print(f"{key} SHAPE: {dst_tensor.shape=}")
# print(f"{key} SHAPE: {tp_hp_fragment.shape=}")
dst_tensor.data.copy_(tp_hp_fragment.data)
def METHOD_NAME(param_list):
for param in param_list:
param.load_hp_checkpoint_state = types.MethodType(load_hp_checkpoint_state, param) |
4,744 | test notify service owners | import mock
from kubernetes.client import V1DeleteOptions
from pysensu_yelp import Status
from paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods import (
evicted_pods_per_service,
)
from paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods import EvictedPod
from paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods import get_evicted_pods
from paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods import get_pod_service
from paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods import (
notify_service_owners,
)
from paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods import remove_pods
def test_get_evicted_pods():
pod1 = mock.MagicMock(
status=mock.MagicMock(reason="Evicted", phase="Failed"),
metadata=mock.MagicMock(),
)
pod1.metadata.name = "pod-id-1"
pod2 = mock.MagicMock(
status=mock.MagicMock(reason=None, phase="Running"), metadata=mock.MagicMock()
)
pod2.metadata.name = "pod-id-2"
pod3 = mock.MagicMock(
status=mock.MagicMock(reason=None, phase="Running"), metadata=mock.MagicMock()
)
pod3.metadata.name = "pod-id-3"
evicted_pods = get_evicted_pods([pod1, pod2, pod3])
assert len(evicted_pods) == 1
assert evicted_pods[0].metadata.name == "pod-id-1"
def test_get_pod_service():
pod1 = mock.MagicMock(
metadata=mock.MagicMock(labels={"paasta.yelp.com/service": "my-service"})
)
pod_service = get_pod_service(pod1)
assert pod_service == "my-service"
def test_get_pod_service_no_labels():
pod1 = mock.MagicMock(metadata=mock.MagicMock(labels=None))
pod_service = get_pod_service(pod1)
assert pod_service is None
def METHOD_NAME():
service_map = {
"service1": [
EvictedPod("pod1", "namespace1", "Ran out of disk"),
EvictedPod("pod2", "namespace1", "Ran out of mem"),
]
}
check_output = "The following pods have been evicted and will be removed from the cluster:\n- pod1: Ran out of disk\n- pod2: Ran out of mem\n"
with mock.patch(
"paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods.send_event",
autospec=True,
) as mock_send_event:
notify_service_owners(service_map, "/soa_dir", False)
mock_send_event.assert_called_with(
"service1",
"pod-eviction.service1",
mock.ANY,
Status.CRITICAL,
check_output,
"/soa_dir",
)
def test_notify_service_ownersi_dry_run():
service_map = {
"service1": [
EvictedPod("pod1", "namespace1", "Ran out of disk"),
EvictedPod("pod2", "namespace1", "Ran out of mem"),
]
}
with mock.patch(
"paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods.send_event",
autospec=True,
) as mock_send_event, mock.patch(
"paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods.log", autospec=True
) as mock_logging:
notify_service_owners(service_map, "/soa_dir", True)
assert mock_send_event.call_count == 0
mock_logging.info.assert_called_once_with(
"Would have notified owners for service service1"
)
def test_remove_pods():
service_map = {
"service1": [
EvictedPod("pod1", "namespace1", "Ran out of disk"),
EvictedPod("pod2", "namespace1", "Ran out of mem"),
EvictedPod("pod3", "namespace1", "Ran out of disk"),
]
}
mock_client = mock.MagicMock()
remove_pods(mock_client, service_map, False)
assert mock_client.core.delete_namespaced_pod.call_count == 2
assert mock_client.core.delete_namespaced_pod.mock_calls == [
mock.call(
"pod1",
"namespace1",
body=V1DeleteOptions(),
grace_period_seconds=0,
propagation_policy="Background",
),
mock.call(
"pod2",
"namespace1",
body=V1DeleteOptions(),
grace_period_seconds=0,
propagation_policy="Background",
),
]
def test_remove_pods_dry_run():
service_map = {
"service1": [
EvictedPod("pod1", "namespace1", "Ran out of disk"),
EvictedPod("pod2", "namespace1", "Ran out of mem"),
EvictedPod("pod3", "namespace1", "Ran out of disk"),
]
}
mock_client = mock.MagicMock()
with mock.patch(
"paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods.log", autospec=True
) as mock_logging:
remove_pods(mock_client, service_map, True)
assert mock_client.core.delete_namespaced_pod.call_count == 0
assert mock_logging.info.mock_calls == [
mock.call("Would have removed pod pod1"),
mock.call("Would have removed pod pod2"),
]
def test_evicted_pods_per_service():
pod1 = mock.MagicMock(
status=mock.MagicMock(
reason="Evicted", phase="Failed", message="Ran out of disk"
),
metadata=mock.MagicMock(
labels={"paasta.yelp.com/service": "my-service"}, namespace="namespace1"
),
)
pod1.metadata.name = "pod-id-1"
pod2 = mock.MagicMock(
status=mock.MagicMock(reason=None, phase="Running", message=None),
metadata=mock.MagicMock(
labels={"paasta.yelp.com/service": "my-service"}, namespace="namespace1"
),
)
pod2.metadata.name = "pod-id-2"
pod3 = mock.MagicMock(
status=mock.MagicMock(reason=None, phase="Running", message=None),
metadata=mock.MagicMock(
labels={"paasta.yelp.com/service": "my-service"}, namespace="namespace1"
),
)
pod3.metadata.name = "pod-id-3"
mock_client = mock.MagicMock()
with mock.patch(
"paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods.get_all_pods",
autospec=True,
) as mock_get_all_pods:
mock_get_all_pods.return_value = [pod1, pod2, pod3]
evicted_pods = evicted_pods_per_service(mock_client)
assert evicted_pods == {
"my-service": [EvictedPod("pod-id-1", "namespace1", "Ran out of disk")]
} |
4,745 | pause stop commands | # Copyright (c) 2017 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from spinn_utilities.overrides import overrides
from pacman.model.routing_info import BaseKeyAndMask
from pacman.model.graphs.application import ApplicationSpiNNakerLinkVertex
from spinn_front_end_common.abstract_models import (
AbstractSendMeMulticastCommandsVertex)
from spinn_front_end_common.utility_models import MultiCastCommand
from spynnaker.pyNN.exceptions import SpynnakerException
from spynnaker.pyNN.models.common import PopulationApplicationVertex
def get_y_from_fpga_retina(key, mode):
if mode == 128:
return key & 0x7f
elif mode == 64:
return key & 0x3f
elif mode == 32:
return key & 0x1f
elif mode == 16:
return key & 0xf
return None
def get_x_from_fpga_retina(key, mode):
if mode == 128:
return (key >> 7) & 0x7f
elif mode == 64:
return (key >> 6) & 0x3f
elif mode == 32:
return (key >> 5) & 0x1f
elif mode == 16:
return (key >> 4) & 0xf
return None
def get_spike_value_from_fpga_retina(key, mode):
if mode == 128:
return (key >> 14) & 0x1
elif mode == 64:
return (key >> 14) & 0x1
elif mode == 32:
return (key >> 14) & 0x1
elif mode == 16:
return (key >> 14) & 0x1
return None
class ExternalFPGARetinaDevice(
ApplicationSpiNNakerLinkVertex, PopulationApplicationVertex,
AbstractSendMeMulticastCommandsVertex):
__slots__ = [
"__fixed_key",
"__fixed_mask"]
MODE_128 = "128"
MODE_64 = "64"
MODE_32 = "32"
MODE_16 = "16"
UP_POLARITY = "UP"
DOWN_POLARITY = "DOWN"
MERGED_POLARITY = "MERGED"
def __init__(
self, mode, retina_key, spinnaker_link_id, polarity,
label=None, board_address=None):
"""
:param str mode: The retina "mode"
:param int retina_key: The value of the top 16-bits of the key
:param int spinnaker_link_id:
The SpiNNaker link to which the retina is connected
:param str polarity: The "polarity" of the retina data
:param str label:
:param str board_address:
"""
# pylint: disable=too-many-arguments
fixed_n_neurons = self.get_n_neurons(mode, polarity)
super().__init__(
n_atoms=fixed_n_neurons, spinnaker_link_id=spinnaker_link_id,
label=label, board_address=board_address, incoming=True,
outgoing=True)
self.__fixed_key = (retina_key & 0xFFFF) << 16
self.__fixed_mask = 0xFFFF8000
if polarity == self.UP_POLARITY:
self.__fixed_key |= 0x4000
self.__fixed_mask = self._get_mask(mode)
@overrides(ApplicationSpiNNakerLinkVertex.get_fixed_key_and_mask)
def get_fixed_key_and_mask(self, partition_id):
return BaseKeyAndMask(self.__fixed_key, self.__fixed_mask)
def _get_mask(self, mode):
if mode == ExternalFPGARetinaDevice.MODE_128:
return 0xFFFFC000
elif mode == ExternalFPGARetinaDevice.MODE_64:
return 0xFFFFF000
elif mode == ExternalFPGARetinaDevice.MODE_32:
return 0xFFFFFC00
elif mode == ExternalFPGARetinaDevice.MODE_16:
return 0xFFFFFF00
raise SpynnakerException(
"the FPGA retina does not recognise this mode")
@staticmethod
def get_n_neurons(mode, polarity):
"""
:param str mode: ``128`` or ``64`` or ``32`` or ``16``
:param str parity: ``UP`` or ``DOWN`` or ``MERGED``
:rtype: int
"""
if mode == ExternalFPGARetinaDevice.MODE_128:
if (polarity == ExternalFPGARetinaDevice.UP_POLARITY or
polarity == ExternalFPGARetinaDevice.DOWN_POLARITY):
return 128 * 128
return 128 * 128 * 2
elif mode == ExternalFPGARetinaDevice.MODE_64:
if (polarity == ExternalFPGARetinaDevice.UP_POLARITY or
polarity == ExternalFPGARetinaDevice.DOWN_POLARITY):
return 64 * 64
return 64 * 64 * 2
elif mode == ExternalFPGARetinaDevice.MODE_32:
if (polarity == ExternalFPGARetinaDevice.UP_POLARITY or
polarity == ExternalFPGARetinaDevice.DOWN_POLARITY):
return 32 * 32
return 32 * 32 * 2
elif mode == ExternalFPGARetinaDevice.MODE_16:
if (polarity == ExternalFPGARetinaDevice.UP_POLARITY or
polarity == ExternalFPGARetinaDevice.DOWN_POLARITY):
return 16 * 16
return 16 * 16 * 2
raise SpynnakerException(
"the FPGA retina does not recognise this mode")
@property
@overrides(AbstractSendMeMulticastCommandsVertex.start_resume_commands)
def start_resume_commands(self):
return [MultiCastCommand(
key=0x0000FFFF, payload=1, repeat=5, delay_between_repeats=100)]
@property
@overrides(AbstractSendMeMulticastCommandsVertex.METHOD_NAME)
def METHOD_NAME(self):
return [MultiCastCommand(
key=0x0000FFFE, payload=0, repeat=5, delay_between_repeats=100)]
@property
@overrides(AbstractSendMeMulticastCommandsVertex.timed_commands)
def timed_commands(self):
return [] |
4,746 | validate docker env | import logging
import os
import posixpath
import shutil
import subprocess
import tempfile
import urllib.parse
import urllib.request
import docker
from mlflow import tracking
from mlflow.environment_variables import MLFLOW_TRACKING_URI
from mlflow.exceptions import ExecutionException
from mlflow.projects.utils import MLFLOW_DOCKER_WORKDIR_PATH
from mlflow.utils import file_utils, process
from mlflow.utils.databricks_utils import get_databricks_env_vars
from mlflow.utils.file_utils import _handle_readonly_on_windows
from mlflow.utils.git_utils import get_git_commit
from mlflow.utils.mlflow_tags import MLFLOW_DOCKER_IMAGE_ID, MLFLOW_DOCKER_IMAGE_URI
_logger = logging.getLogger(__name__)
_GENERATED_DOCKERFILE_NAME = "Dockerfile.mlflow-autogenerated"
_MLFLOW_DOCKER_TRACKING_DIR_PATH = "/mlflow/tmp/mlruns"
_PROJECT_TAR_ARCHIVE_NAME = "mlflow-project-docker-build-context"
def validate_docker_installation():
"""
Verify if Docker is installed and running on host machine.
"""
if shutil.which("docker") is None:
raise ExecutionException(
"Could not find Docker executable. "
"Ensure Docker is installed as per the instructions "
"at https://docs.docker.com/install/overview/."
)
cmd = ["docker", "info"]
prc = process._exec_cmd(
cmd,
throw_on_error=False,
capture_output=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
if prc.returncode != 0:
joined_cmd = " ".join(cmd)
raise ExecutionException(
f"Ran `{joined_cmd}` to ensure docker daemon is running but it failed "
f"with the following output:\n{prc.stdout}"
)
def METHOD_NAME(project):
if not project.name:
raise ExecutionException(
"Project name in MLProject must be specified when using docker for image tagging."
)
if not project.docker_env.get("image"):
raise ExecutionException(
"Project with docker environment must specify the docker image "
"to use via an 'image' field under the 'docker_env' field."
)
def build_docker_image(work_dir, repository_uri, base_image, run_id, build_image, docker_auth):
"""
Build a docker image containing the project in `work_dir`, using the base image.
"""
image_uri = _get_docker_image_uri(repository_uri=repository_uri, work_dir=work_dir)
client = docker.from_env()
if docker_auth is not None:
client.login(**docker_auth)
if not build_image:
if not client.images.list(name=base_image):
_logger.info(f"Pulling {base_image}")
image = client.images.pull(base_image)
else:
_logger.info(f"{base_image} already exists")
image = client.images.get(base_image)
image_uri = base_image
else:
dockerfile = (
f"FROM {base_image}\n COPY {_PROJECT_TAR_ARCHIVE_NAME}/ {MLFLOW_DOCKER_WORKDIR_PATH}\n"
f" WORKDIR {MLFLOW_DOCKER_WORKDIR_PATH}\n"
)
build_ctx_path = _create_docker_build_ctx(work_dir, dockerfile)
with open(build_ctx_path, "rb") as docker_build_ctx:
_logger.info("=== Building docker image %s ===", image_uri)
image, _ = client.images.build(
tag=image_uri,
forcerm=True,
dockerfile=posixpath.join(_PROJECT_TAR_ARCHIVE_NAME, _GENERATED_DOCKERFILE_NAME),
fileobj=docker_build_ctx,
custom_context=True,
encoding="gzip",
)
try:
os.remove(build_ctx_path)
except Exception:
_logger.info("Temporary docker context file %s was not deleted.", build_ctx_path)
tracking.MlflowClient().set_tag(run_id, MLFLOW_DOCKER_IMAGE_URI, image_uri)
tracking.MlflowClient().set_tag(run_id, MLFLOW_DOCKER_IMAGE_ID, image.id)
return image
def _get_docker_image_uri(repository_uri, work_dir):
"""
Returns an appropriate Docker image URI for a project based on the git hash of the specified
working directory.
:param repository_uri: The URI of the Docker repository with which to tag the image. The
repository URI is used as the prefix of the image URI.
:param work_dir: Path to the working directory in which to search for a git commit hash
"""
repository_uri = repository_uri if repository_uri else "docker-project"
# Optionally include first 7 digits of git SHA in tag name, if available.
git_commit = get_git_commit(work_dir)
version_string = ":" + git_commit[:7] if git_commit else ""
return repository_uri + version_string
def _create_docker_build_ctx(work_dir, dockerfile_contents):
"""
Creates build context tarfile containing Dockerfile and project code, returning path to tarfile
"""
directory = tempfile.mkdtemp()
try:
dst_path = os.path.join(directory, "mlflow-project-contents")
shutil.copytree(src=work_dir, dst=dst_path)
with open(os.path.join(dst_path, _GENERATED_DOCKERFILE_NAME), "w") as handle:
handle.write(dockerfile_contents)
_, result_path = tempfile.mkstemp()
file_utils.make_tarfile(
output_filename=result_path, source_dir=dst_path, archive_name=_PROJECT_TAR_ARCHIVE_NAME
)
finally:
shutil.rmtree(directory, onerror=_handle_readonly_on_windows)
return result_path
def get_docker_tracking_cmd_and_envs(tracking_uri):
cmds = []
env_vars = {}
local_path, container_tracking_uri = _get_local_uri_or_none(tracking_uri)
if local_path is not None:
cmds = ["-v", f"{local_path}:{_MLFLOW_DOCKER_TRACKING_DIR_PATH}"]
env_vars[MLFLOW_TRACKING_URI.name] = container_tracking_uri
env_vars.update(get_databricks_env_vars(tracking_uri))
return cmds, env_vars
def _get_local_uri_or_none(uri):
if uri == "databricks":
return None, None
parsed_uri = urllib.parse.urlparse(uri)
if not parsed_uri.netloc and parsed_uri.scheme in ("", "file", "sqlite"):
path = urllib.request.url2pathname(parsed_uri.path)
if parsed_uri.scheme == "sqlite":
uri = file_utils.path_to_local_sqlite_uri(_MLFLOW_DOCKER_TRACKING_DIR_PATH)
else:
uri = file_utils.path_to_local_file_uri(_MLFLOW_DOCKER_TRACKING_DIR_PATH)
return path, uri
else:
return None, None |
4,747 | wrapped | import functools
import logging
import queue
import threading
import time
class _CallbackThread(threading.Thread):
"A queue-based callback dispatcher thread"
def __init__(
self,
name,
*,
dispatcher,
logger,
context,
stop_event,
timeout,
callback_queue=None,
daemon=True,
):
super().__init__(name=name, daemon=daemon)
self.context = context
self.current_callback = None
self.dispatcher = dispatcher
self.logger = logger
self.stop_event = stop_event
self.timeout = timeout
if callback_queue is None:
callback_queue = queue.Queue()
self.queue = callback_queue
def __repr__(self):
return "<{} qsize={}>".format(self.__class__.__name__, self.queue.qsize())
def run(self):
"""The dispatcher itself"""
self.logger.debug("Callback thread %s started", self.name)
self.attach_context()
while not self.stop_event.is_set():
try:
callback, args, kwargs = self.queue.get(True, self.timeout)
except queue.Empty:
...
else:
try:
self.current_callback = (
getattr(callback, "__name__", "(unnamed)"),
kwargs.get("pvname"),
)
callback(*args, **kwargs)
except Exception:
self.logger.exception(
"Exception occurred during callback %r (pvname=%r)",
callback,
kwargs.get("pvname"),
)
self.detach_context()
def attach_context(self):
self.logger.debug(
"Callback thread %s attaching to context %s", self.name, self.context
)
def detach_context(self):
self.context = None
class DispatcherThreadContext:
"""
A thread context associated with a single Dispatcher event type
Parameters
----------
dispatcher : Dispatcher
event_type : str
Attributes
----------
dispatcher : Dispatcher
event_type : str
event_thread : _CallbackThread
"""
def __init__(self, dispatcher, event_type):
self.dispatcher = dispatcher
self.event_type = event_type
self.event_thread = None
def run(self, func, *args, **kwargs):
"""
If in the correct threading context, run func(*args, **kwargs) directly,
otherwise schedule it to be run in that thread.
"""
if self.event_thread is None:
self.event_thread = self.dispatcher._threads[self.event_type]
current_thread = threading.current_thread()
if current_thread is self.event_thread:
func(*args, **kwargs)
else:
self.event_thread.queue.put((func, args, kwargs))
__call__ = run
debug_monitor_log = logging.getLogger("ophyd.event_dispatcher")
class EventDispatcher:
def __init__(
self,
*,
context,
logger,
timeout=0.1,
thread_class=_CallbackThread,
utility_threads=4,
):
self._threads = {}
self._thread_contexts = {}
self._thread_class = thread_class
self._timeout = timeout
# The dispatcher thread will stop if this event is set
self._stop_event = threading.Event()
self.context = context
self.logger = logger
self.debug_monitor_interval = 1
self._utility_threads = [f"util{i}" for i in range(utility_threads)]
self._utility_queue = queue.Queue()
self._start_thread(name="metadata")
self._start_thread(name="monitor")
self._start_thread(name="get_put")
for name in self._utility_threads:
self._start_thread(name=name, callback_queue=self._utility_queue)
self._debug_monitor_thread = threading.Thread(
target=self._debug_monitor, name="debug_monitor", daemon=True
)
self._debug_monitor_thread.start()
def _debug_monitor(self):
while not self._stop_event.is_set():
queue_sizes = [
(name, thread.queue.qsize(), thread.current_callback)
for name, thread in sorted(self._threads.items())
]
status = [
"{name}={qsize} ({cb})".format(name=name, qsize=qsize, cb=cb)
for name, qsize, cb in queue_sizes
if qsize
]
if status:
debug_monitor_log.debug(" / ".join(status))
# Else, all EventDispatch queues are empty.
time.sleep(self.debug_monitor_interval)
def __repr__(self):
threads = [repr(thread) for thread in self._threads.values()]
return "<{} threads={}>".format(self.__class__.__name__, threads)
def is_alive(self):
return any(
thread.is_alive() for thread in self.threads.values() if thread is not None
)
@property
def stop_event(self):
return self._stop_event
@property
def timeout(self):
return self._timeout
@property
def threads(self):
return dict(self._threads)
def stop(self):
"""Stop the dispatcher threads and re-enable normal callbacks"""
self._stop_event.set()
for attr, thread in list(self._threads.items()):
if thread is not None:
thread.join()
self._threads.clear()
self._debug_monitor_thread.join()
def schedule_utility_task(self, callback, *args, **kwargs):
"Schedule `callback` with the given args and kwargs in a util thread"
self._utility_queue.put((callback, args, kwargs))
def get_thread_context(self, name):
"Get the DispatcherThreadContext for the given thread name"
return self._thread_contexts[name]
def _start_thread(self, name, *, callback_queue=None):
"Start dispatcher thread by name"
self._threads[name] = self._thread_class(
name=name,
dispatcher=self,
stop_event=self._stop_event,
timeout=self.timeout,
context=self.context,
logger=self.logger,
daemon=True,
callback_queue=callback_queue,
)
self._thread_contexts[name] = DispatcherThreadContext(self, name)
self._threads[name].start()
def wrap_callback(dispatcher, event_type, callback):
"Wrap a callback for usage with the dispatcher"
if callback is None or getattr(callback, "_wrapped_callback", False):
return callback
assert event_type in dispatcher._threads
callback_queue = dispatcher._threads[event_type].queue
@functools.wraps(callback)
def METHOD_NAME(*args, **kwargs):
callback_queue.put((callback, args, kwargs))
METHOD_NAME._wrapped_callback = True
return METHOD_NAME |
4,748 | get nested columns | # -*- coding: utf-8 -*-
#
# Copyright (C) Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dci.common import exceptions as dci_exc
from dci.db import query_dsl
import pyparsing as pp
from sqlalchemy import func, String
from sqlalchemy.types import ARRAY
from sqlalchemy.sql.expression import cast
import datetime
import uuid
class Mixin(object):
def serialize(self, ignore_columns=[]):
def METHOD_NAME():
_res = {}
for ic in ignore_columns:
if "." in ic:
k, v = ic.split(".")
if k not in _res:
_res[k] = [v]
else:
_res[k].append(v)
return _res
nested_ignore_columns = []
if ignore_columns:
nested_ignore_columns = METHOD_NAME()
_dict = {}
_attrs = self.__dict__.keys()
for attr in _attrs:
if attr in ignore_columns:
continue
attr_obj = getattr(self, attr)
if isinstance(attr_obj, list):
_dict[attr] = []
for ao in attr_obj:
_ignore_columns = []
if attr in nested_ignore_columns:
_ignore_columns = nested_ignore_columns[attr]
if isinstance(ao, Mixin):
_dict[attr].append(ao.serialize(ignore_columns=_ignore_columns))
else:
_dict[attr].append(ao)
elif isinstance(attr_obj, Mixin):
_ignore_columns = []
if attr in nested_ignore_columns:
_ignore_columns = nested_ignore_columns[attr]
_dict[attr] = attr_obj.serialize(ignore_columns=_ignore_columns)
elif isinstance(attr_obj, uuid.UUID):
_dict[attr] = str(attr_obj)
elif isinstance(attr_obj, datetime.datetime):
_dict[attr] = attr_obj.isoformat()
elif not attr.startswith("_"):
_dict[attr] = self.__dict__[attr]
return _dict
def handle_pagination(query, args):
limit_max = 200
default_limit = 20
default_offset = 0
query = query.offset(args.get("offset", default_offset))
query = query.limit(min(args.get("limit", default_limit), limit_max))
return query
def handle_args(query, model_object, args):
if args.get("sort"):
columns = model_object.__mapper__.columns.keys()
for s in args.get("sort"):
asc = True
if s.startswith("-"):
s = s[1:]
asc = False
if s not in columns:
raise dci_exc.DCIException(
'Invalid sort key: "%s"' % s,
payload={"Valid sort keys": sorted(set(columns))},
)
if asc:
query = query.order_by(getattr(model_object, s).asc())
else:
query = query.order_by(getattr(model_object, s).desc())
else:
query = query.order_by(getattr(model_object, "created_at").desc())
where = args.get("where")
if where:
columns = model_object.__mapper__.columns.keys()
for w in where:
try:
name, value = w.split(":", 1)
if not value:
value = None
except ValueError:
raise dci_exc.DCIException(
'Invalid where key: "%s"' % w,
payload={
"error": 'where key must have the following form "key:value"'
},
)
if name not in columns:
raise dci_exc.DCIException(
'Invalid where key: "%s"' % w,
payload={"Valid where keys": sorted(set(columns))},
)
forbidden_column_names = [
"api_secret",
"data",
"password",
"cert_fp",
]
if name in forbidden_column_names:
raise dci_exc.DCIException('Invalid where key: "%s"' % name)
m_column = getattr(model_object, name)
if value is None:
query = query.filter(m_column == value)
elif isinstance(m_column.type, String):
value = value.lower()
m_column = func.lower(cast(m_column, String))
if value.endswith("*") and value.count("*") == 1:
query = query.filter(m_column.contains(value.replace("*", "")))
else:
query = query.filter(m_column == value)
elif isinstance(m_column.type, ARRAY):
query = query.filter(m_column.contains([value]))
else:
query = query.filter(m_column == value)
elif args.get("query"):
try:
parsed_query = query_dsl.parse(args.get("query"))
query = query_dsl.build(query, parsed_query, model_object)
except pp.ParseException as pe:
raise dci_exc.DCIException("error while parsing the query %s" % str(pe))
if args.get("created_after"):
query = query.filter(
getattr(model_object, "created_at") >= args.get("created_after")
)
if args.get("updated_after"):
query = query.filter(
getattr(model_object, "updated_at") >= args.get("updated_after")
)
return query |
4,749 | num boxes static | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List definition.
BoxList represents a list of bounding boxes as tensorflow
tensors, where each bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes
within a given list correspond to a single image. See also
box_list_ops.py for common box related operations (such as area, iou, etc).
Optionally, users can add additional related fields (such as weights).
We assume the following things to be true about fields:
* they correspond to boxes in the box_list along the 0th dimension
* they have inferable rank at graph construction time
* all dimensions except for possibly the 0th can be inferred
(i.e., not None) at graph construction time.
Some other notes:
* Following tensorflow conventions, we use height, width ordering,
and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering
* Tensors are always provided as (flat) [N, 4] tensors.
"""
import tensorflow.compat.v1 as tf
class BoxList(object):
"""Box collection."""
def __init__(self, boxes):
"""Constructs box collection.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data or if bbox data is not in
float32 format.
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError("Invalid dimensions for box data.")
if boxes.dtype != tf.float32:
raise ValueError("Invalid tensor type: should be tf.float32")
self.data = {"boxes": boxes}
def num_boxes(self):
"""Returns number of boxes held in collection.
Returns:
a tensor representing the number of boxes held in the collection.
"""
return tf.shape(self.data["boxes"])[0]
def METHOD_NAME(self):
"""Returns number of boxes held in collection.
This number is inferred at graph construction time rather than run-time.
Returns:
Number of boxes held in collection (integer) or None if this is not
inferable at graph construction time.
"""
return self.data["boxes"].get_shape().as_list()[0]
def get_all_fields(self):
"""Returns all fields."""
return self.data.keys()
def get_extra_fields(self):
"""Returns all non-box fields (i.e., everything not named 'boxes')."""
return [k for k in self.data.keys() if k != "boxes"]
def add_field(self, field, field_data):
"""Add field to box list.
This method can be used to add related box data such as
weights/labels, etc.
Args:
field: a string key to access the data via `get`
field_data: a tensor containing the data to store in the BoxList
"""
self.data[field] = field_data
def has_field(self, field):
return field in self.data
def get(self):
"""Convenience function for accessing box coordinates.
Returns:
a tensor with shape [N, 4] representing box coordinates.
"""
return self.get_field("boxes")
def set(self, boxes):
"""Convenience function for setting box coordinates.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError("Invalid dimensions for box data.")
self.data["boxes"] = boxes
def get_field(self, field):
"""Accesses a box collection and associated fields.
This function returns specified field with object; if no field is specified,
it returns the box coordinates.
Args:
field: this optional string parameter can be used to specify
a related field to be accessed.
Returns:
a tensor representing the box collection or an associated field.
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError("field " + str(field) + " does not exist")
return self.data[field]
def set_field(self, field, value):
"""Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.
"""
if not self.has_field(field):
raise ValueError("field %s does not exist" % field)
self.data[field] = value
def get_center_coordinates_and_sizes(self, scope=None):
"""Computes the center coordinates, height and width of the boxes.
Args:
scope: name scope of the function.
Returns:
a list of 4 1-D tensors [ycenter, xcenter, height, width].
"""
with tf.name_scope(scope, "get_center_coordinates_and_sizes"):
box_corners = self.get()
ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(box_corners))
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.0
xcenter = xmin + width / 2.0
return [ycenter, xcenter, height, width]
def transpose_coordinates(self, scope=None):
"""Transpose the coordinate representation in a boxlist.
Args:
scope: name scope of the function.
"""
with tf.name_scope(scope, "transpose_coordinates"):
y_min, x_min, y_max, x_max = tf.split(
value=self.get(), num_or_size_splits=4, axis=1
)
self.set(tf.concat([x_min, y_min, x_max, y_max], 1))
def as_tensor_dict(self, fields=None):
"""Retrieves specified fields as a dictionary of tensors.
Args:
fields: (optional) list of fields to return in the dictionary.
If None (default), all fields are returned.
Returns:
tensor_dict: A dictionary of tensors specified by fields.
Raises:
ValueError: if specified field is not contained in boxlist.
"""
tensor_dict = {}
if fields is None:
fields = self.get_all_fields()
for field in fields:
if not self.has_field(field):
raise ValueError("boxlist must contain all specified fields")
tensor_dict[field] = self.get_field(field)
return tensor_dict |
4,750 | change input | import sys
import math
import numpy as np
import Orange.data
from Orange.widgets.widget import OWWidget, Msg, Input, Output
from Orange.widgets import gui, settings
from AnyQt.QtWidgets import QFormLayout, QWidget
from orangecontrib.spectroscopy.data import getx
from orangecontrib.spectroscopy.preprocess import Interpolate, InterpolateToDomain, \
NotAllContinuousException
from orangecontrib.spectroscopy.widgets.gui import lineEditFloatOrNone
class OWInterpolate(OWWidget):
name = "Interpolate"
description = "Interpolate spectra"
icon = "icons/interpolate.svg"
priority = 990
replaces = ["orangecontrib.infrared.widgets.owinterpolate.OWInterpolate"]
class Inputs:
data = Input("Data", Orange.data.Table, default=True)
points = Input("Points", Orange.data.Table)
class Outputs:
interpolated_data = Output("Interpolated data", Orange.data.Table, default=True)
# how are the interpolation points given
input_radio = settings.Setting(0)
# specification of linear space
xmin = settings.Setting(None)
xmax = settings.Setting(None)
dx = settings.Setting(10.)
autocommit = settings.Setting(True)
want_main_area = False
resizing_enabled = False
class Warning(OWWidget.Warning):
reference_data_missing = Msg("Missing separate reference data input.")
reference_data_unused = Msg("Reference data is present but unused.")
class Error(OWWidget.Error):
dxzero = Msg("Step should be higher than 0.0.")
too_many_points = Msg("More than 10000 points with your current setting.")
non_continuous = Msg("Points input contains non-continuous features.")
def __init__(self):
super().__init__()
self.data_points_interpolate = None
dbox = gui.widgetBox(self.controlArea, "Interpolation")
rbox = gui.radioButtons(
dbox, self, "input_radio", callback=self.METHOD_NAME)
gui.appendRadioButton(rbox, "Enable automatic interpolation")
gui.appendRadioButton(rbox, "Linear interval")
ibox = gui.indentedBox(rbox)
form = QWidget()
formlayout = QFormLayout()
form.setLayout(formlayout)
ibox.layout().addWidget(form)
self.xmin_edit = lineEditFloatOrNone(ibox, self, "xmin", callback=self.commit.deferred)
formlayout.addRow("Min", self.xmin_edit)
self.xmax_edit = lineEditFloatOrNone(ibox, self, "xmax", callback=self.commit.deferred)
formlayout.addRow("Max", self.xmax_edit)
self.dx_edit = lineEditFloatOrNone(ibox, self, "dx", callback=self.commit.deferred)
formlayout.addRow("Δ", self.dx_edit)
gui.appendRadioButton(rbox, "Reference data")
self.data = None
gui.auto_commit(self.controlArea, self, "autocommit", "Interpolate")
self.METHOD_NAME()
@gui.deferred
def commit(self):
out = None
self.Error.dxzero.clear()
self.Error.too_many_points.clear()
if self.data:
if self.input_radio == 0:
points = getx(self.data)
out = Interpolate(points)(self.data)
elif self.input_radio == 1:
xs = getx(self.data)
if not self.dx > 0:
self.Error.dxzero()
else:
xmin = self.xmin if self.xmin is not None else np.min(xs)
xmax = self.xmax if self.xmax is not None else np.max(xs)
xmin, xmax = min(xmin, xmax), max(xmin, xmax)
reslength = abs(math.ceil((xmax - xmin)/self.dx))
if reslength < 10002:
points = np.arange(xmin, xmax, self.dx)
out = Interpolate(points)(self.data)
else:
self.Error.too_many_points(reslength)
elif self.input_radio == 2 and self.data_points_interpolate is not None:
out = self.data_points_interpolate(self.data)
self.Outputs.interpolated_data.send(out)
def _update_input_type(self):
if self.input_radio == 2 and self.data_points_interpolate is None:
self.Warning.reference_data_missing()
else:
self.Warning.reference_data_missing.clear()
self.xmin_edit.setDisabled(self.input_radio != 1)
self.xmax_edit.setDisabled(self.input_radio != 1)
self.dx_edit.setDisabled(self.input_radio != 1)
def METHOD_NAME(self):
self._update_input_type()
self.commit.deferred()
@Inputs.data
def set_data(self, data):
self.data = data
if self.data and len(getx(data)):
points = getx(data)
self.xmin_edit.setPlaceholderText(str(np.min(points)))
self.xmax_edit.setPlaceholderText(str(np.max(points)))
else:
self.xmin_edit.setPlaceholderText("")
self.xmax_edit.setPlaceholderText("")
@Inputs.points
def set_points(self, data):
self.Error.non_continuous.clear()
if data:
try:
self.data_points_interpolate = InterpolateToDomain(target=data)
except NotAllContinuousException:
self.data_points_interpolate = None
self.Error.non_continuous()
else:
self.data_points_interpolate = None
self._update_input_type()
def handleNewSignals(self):
self.commit.now()
if __name__=="__main__":
from Orange.widgets.utils.widgetpreview import WidgetPreview
WidgetPreview(OWInterpolate).run(Orange.data.Table("collagen")) |
4,751 | test single color ciede94 | """Test for correctness of color distance functions"""
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
from skimage._shared.testing import fetch
from skimage._shared.utils import _supported_float_type
from skimage.color.delta_e import (deltaE_cie76, deltaE_ciede94,
deltaE_ciede2000, deltaE_cmc)
@pytest.mark.parametrize("channel_axis", [0, 1, -1])
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
def test_ciede2000_dE(dtype, channel_axis):
data = load_ciede2000_data()
N = len(data)
lab1 = np.zeros((N, 3), dtype=dtype)
lab1[:, 0] = data['L1']
lab1[:, 1] = data['a1']
lab1[:, 2] = data['b1']
lab2 = np.zeros((N, 3), dtype=dtype)
lab2[:, 0] = data['L2']
lab2[:, 1] = data['a2']
lab2[:, 2] = data['b2']
lab1 = np.moveaxis(lab1, source=-1, destination=channel_axis)
lab2 = np.moveaxis(lab2, source=-1, destination=channel_axis)
dE2 = deltaE_ciede2000(lab1, lab2, channel_axis=channel_axis)
assert dE2.dtype == _supported_float_type(dtype)
assert_allclose(dE2, data['dE'], rtol=1e-2)
def load_ciede2000_data():
dtype = [('pair', int),
('1', int),
('L1', float),
('a1', float),
('b1', float),
('a1_prime', float),
('C1_prime', float),
('h1_prime', float),
('hbar_prime', float),
('G', float),
('T', float),
('SL', float),
('SC', float),
('SH', float),
('RT', float),
('dE', float),
('2', int),
('L2', float),
('a2', float),
('b2', float),
('a2_prime', float),
('C2_prime', float),
('h2_prime', float),
]
# note: ciede_test_data.txt contains several intermediate quantities
path = fetch('color/tests/ciede2000_test_data.txt')
return np.loadtxt(path, dtype=dtype)
@pytest.mark.parametrize("channel_axis", [0, 1, -1])
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
def test_cie76(dtype, channel_axis):
data = load_ciede2000_data()
N = len(data)
lab1 = np.zeros((N, 3), dtype=dtype)
lab1[:, 0] = data['L1']
lab1[:, 1] = data['a1']
lab1[:, 2] = data['b1']
lab2 = np.zeros((N, 3), dtype=dtype)
lab2[:, 0] = data['L2']
lab2[:, 1] = data['a2']
lab2[:, 2] = data['b2']
lab1 = np.moveaxis(lab1, source=-1, destination=channel_axis)
lab2 = np.moveaxis(lab2, source=-1, destination=channel_axis)
dE2 = deltaE_cie76(lab1, lab2, channel_axis=channel_axis)
assert dE2.dtype == _supported_float_type(dtype)
oracle = np.array([
4.00106328, 6.31415011, 9.1776999, 2.06270077, 2.36957073,
2.91529271, 2.23606798, 2.23606798, 4.98000036, 4.9800004,
4.98000044, 4.98000049, 4.98000036, 4.9800004, 4.98000044,
3.53553391, 36.86800781, 31.91002977, 30.25309901, 27.40894015,
0.89242934, 0.7972, 0.8583065, 0.82982507, 3.1819238,
2.21334297, 1.53890382, 4.60630929, 6.58467989, 3.88641412,
1.50514845, 2.3237848, 0.94413208, 1.31910843
])
rtol = 1e-5 if dtype == np.float32 else 1e-8
assert_allclose(dE2, oracle, rtol=rtol)
@pytest.mark.parametrize("channel_axis", [0, 1, -1])
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
def test_ciede94(dtype, channel_axis):
data = load_ciede2000_data()
N = len(data)
lab1 = np.zeros((N, 3), dtype=dtype)
lab1[:, 0] = data['L1']
lab1[:, 1] = data['a1']
lab1[:, 2] = data['b1']
lab2 = np.zeros((N, 3), dtype=dtype)
lab2[:, 0] = data['L2']
lab2[:, 1] = data['a2']
lab2[:, 2] = data['b2']
lab1 = np.moveaxis(lab1, source=-1, destination=channel_axis)
lab2 = np.moveaxis(lab2, source=-1, destination=channel_axis)
dE2 = deltaE_ciede94(lab1, lab2, channel_axis=channel_axis)
assert dE2.dtype == _supported_float_type(dtype)
oracle = np.array([
1.39503887, 1.93410055, 2.45433566, 0.68449187, 0.6695627,
0.69194527, 2.23606798, 2.03163832, 4.80069441, 4.80069445,
4.80069449, 4.80069453, 4.80069441, 4.80069445, 4.80069449,
3.40774352, 34.6891632, 29.44137328, 27.91408781, 24.93766082,
0.82213163, 0.71658427, 0.8048753, 0.75284394, 1.39099471,
1.24808929, 1.29795787, 1.82045088, 2.55613309, 1.42491303,
1.41945261, 2.3225685, 0.93853308, 1.30654464
])
rtol = 1e-5 if dtype == np.float32 else 1e-8
assert_allclose(dE2, oracle, rtol=rtol)
@pytest.mark.parametrize("channel_axis", [0, 1, -1])
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
def test_cmc(dtype, channel_axis):
data = load_ciede2000_data()
N = len(data)
lab1 = np.zeros((N, 3), dtype=dtype)
lab1[:, 0] = data['L1']
lab1[:, 1] = data['a1']
lab1[:, 2] = data['b1']
lab2 = np.zeros((N, 3), dtype=dtype)
lab2[:, 0] = data['L2']
lab2[:, 1] = data['a2']
lab2[:, 2] = data['b2']
lab1 = np.moveaxis(lab1, source=-1, destination=channel_axis)
lab2 = np.moveaxis(lab2, source=-1, destination=channel_axis)
dE2 = deltaE_cmc(lab1, lab2, channel_axis=channel_axis)
assert dE2.dtype == _supported_float_type(dtype)
oracle = np.array([
1.73873611, 2.49660844, 3.30494501, 0.85735576, 0.88332927,
0.97822692, 3.50480874, 2.87930032, 6.5783807, 6.57838075,
6.5783808, 6.57838086, 6.67492321, 6.67492326, 6.67492331,
4.66852997, 42.10875485, 39.45889064, 38.36005919, 33.93663807,
1.14400168, 1.00600419, 1.11302547, 1.05335328, 1.42822951,
1.2548143, 1.76838061, 2.02583367, 3.08695508, 1.74893533,
1.90095165, 1.70258148, 1.80317207, 2.44934417
])
rtol = 1e-5 if dtype == np.float32 else 1e-8
assert_allclose(dE2, oracle, rtol=rtol)
# Equal or close colors make `delta_e.get_dH2` function to return
# negative values resulting in NaNs when passed to sqrt (see #1908
# issue on Github):
lab1 = lab2
expected = np.zeros_like(oracle)
assert_almost_equal(
deltaE_cmc(lab1, lab2, channel_axis=channel_axis), expected, decimal=6
)
lab2[0, 0] += np.finfo(float).eps
assert_almost_equal(
deltaE_cmc(lab1, lab2, channel_axis=channel_axis), expected, decimal=6
)
def test_cmc_single_item():
# Single item case:
lab1 = lab2 = np.array([0., 1.59607713, 0.87755709])
assert_equal(deltaE_cmc(lab1, lab2), 0)
lab2[0] += np.finfo(float).eps
assert_equal(deltaE_cmc(lab1, lab2), 0)
def test_single_color_cie76():
lab1 = (0.5, 0.5, 0.5)
lab2 = (0.4, 0.4, 0.4)
deltaE_cie76(lab1, lab2)
def METHOD_NAME():
lab1 = (0.5, 0.5, 0.5)
lab2 = (0.4, 0.4, 0.4)
deltaE_ciede94(lab1, lab2)
def test_single_color_ciede2000():
lab1 = (0.5, 0.5, 0.5)
lab2 = (0.4, 0.4, 0.4)
deltaE_ciede2000(lab1, lab2)
def test_single_color_cmc():
lab1 = (0.5, 0.5, 0.5)
lab2 = (0.4, 0.4, 0.4)
deltaE_cmc(lab1, lab2) |
4,752 | test unmaskedarray | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pickle
import numpy as np
import pytest # noqa: F401
import awkward as ak
ak_Array = ak.highlevel.Array
ak_Record = ak.highlevel.Record
ak_to_buffers = ak.operations.to_buffers
ak_from_buffers = ak.operations.from_buffers
def test_numpyarray():
assert ak_from_buffers(*ak_to_buffers(ak_Array([1, 2, 3, 4, 5]))).to_list() == [
1,
2,
3,
4,
5,
]
assert pickle.loads(pickle.dumps(ak_Array([1, 2, 3, 4, 5]), -1)).to_list() == [
1,
2,
3,
4,
5,
]
def test_listoffsetarray():
assert ak_from_buffers(*ak_to_buffers([[1, 2, 3], [], [4, 5]])).to_list() == [
[1, 2, 3],
[],
[4, 5],
]
assert ak_from_buffers(
*ak_to_buffers(["one", "two", "three", "four", "five"])
).to_list() == ["one", "two", "three", "four", "five"]
assert ak_from_buffers(
*ak_to_buffers([["one", "two", "three"], [], ["four", "five"]])
).to_list() == [["one", "two", "three"], [], ["four", "five"]]
assert pickle.loads(
pickle.dumps(ak_Array([[1, 2, 3], [], [4, 5]]), -1)
).to_list() == [[1, 2, 3], [], [4, 5]]
def test_listarray():
listoffsetarray = ak_Array([[1, 2, 3], [], [4, 5]]).layout
listarray = ak.contents.ListArray(
listoffsetarray.starts, listoffsetarray.stops, listoffsetarray.content
)
assert ak_from_buffers(*ak_to_buffers(listarray)).to_list() == [
[1, 2, 3],
[],
[4, 5],
]
assert pickle.loads(pickle.dumps(ak_Array(listarray), -1)).to_list() == [
[1, 2, 3],
[],
[4, 5],
]
def test_indexedoptionarray():
assert ak_from_buffers(*ak_to_buffers([1, 2, 3, None, None, 5])).to_list() == [
1,
2,
3,
None,
None,
5,
]
assert pickle.loads(
pickle.dumps(ak_Array([1, 2, 3, None, None, 5]), -1)
).to_list() == [1, 2, 3, None, None, 5]
def test_indexedarray():
content = ak_Array([0.0, 1.1, 2.2, 3.3, 4.4]).layout
index = ak.index.Index64(np.array([3, 1, 1, 4, 2], dtype=np.int64))
indexedarray = ak.contents.IndexedArray(index, content)
assert ak_from_buffers(*ak_to_buffers(indexedarray)).to_list() == [
3.3,
1.1,
1.1,
4.4,
2.2,
]
assert pickle.loads(pickle.dumps(ak_Array(indexedarray), -1)).to_list() == [
3.3,
1.1,
1.1,
4.4,
2.2,
]
def test_emptyarray():
assert ak_from_buffers(*ak_to_buffers([])).to_list() == []
assert ak_from_buffers(*ak_to_buffers([[], [], []])).to_list() == [[], [], []]
assert pickle.loads(pickle.dumps(ak_Array([]), -1)).to_list() == []
assert pickle.loads(pickle.dumps(ak_Array([[], [], []]), -1)).to_list() == [
[],
[],
[],
]
def test_bytemaskedarray():
content = ak_Array([0.0, 1.1, 2.2, 3.3, 4.4]).layout
mask = ak.index.Index8(np.array([False, True, True, False, False], dtype=np.int8))
bytemaskedarray = ak.contents.ByteMaskedArray(mask, content, True)
assert ak_from_buffers(*ak_to_buffers(bytemaskedarray)).to_list() == [
None,
1.1,
2.2,
None,
None,
]
assert pickle.loads(pickle.dumps(ak_Array(bytemaskedarray), -1)).to_list() == [
None,
1.1,
2.2,
None,
None,
]
def test_bitmaskedarray():
content = ak_Array([0.0, 1.1, 2.2, 3.3, 4.4]).layout
mask = ak.index.IndexU8(
np.packbits(np.array([False, True, True, False, False], dtype=np.int8))
)
bitmaskedarray = ak.contents.BitMaskedArray(mask, content, True, 5, False)
assert ak_from_buffers(*ak_to_buffers(bitmaskedarray)).to_list() == [
None,
1.1,
2.2,
None,
None,
]
assert pickle.loads(pickle.dumps(ak_Array(bitmaskedarray), -1)).to_list() == [
None,
1.1,
2.2,
None,
None,
]
def test_recordarray():
assert ak_from_buffers(
*ak_to_buffers([(1.1, [1]), (2.2, [1, 2]), (3.3, [1, 2, 3])])
).to_list() == [(1.1, [1]), (2.2, [1, 2]), (3.3, [1, 2, 3])]
assert ak_from_buffers(
*ak_to_buffers(
[{"x": 1.1, "y": [1]}, {"x": 2.2, "y": [1, 2]}, {"x": 3.3, "y": [1, 2, 3]}]
)
).to_list() == [
{"x": 1.1, "y": [1]},
{"x": 2.2, "y": [1, 2]},
{"x": 3.3, "y": [1, 2, 3]},
]
assert pickle.loads(
pickle.dumps(ak_Array([(1.1, [1]), (2.2, [1, 2]), (3.3, [1, 2, 3])]), -1)
).to_list() == [(1.1, [1]), (2.2, [1, 2]), (3.3, [1, 2, 3])]
assert pickle.loads(
pickle.dumps(
ak_Array(
[
{"x": 1.1, "y": [1]},
{"x": 2.2, "y": [1, 2]},
{"x": 3.3, "y": [1, 2, 3]},
]
),
-1,
)
).to_list() == [
{"x": 1.1, "y": [1]},
{"x": 2.2, "y": [1, 2]},
{"x": 3.3, "y": [1, 2, 3]},
]
def test_record():
assert pickle.loads(
pickle.dumps(ak_Record({"x": 2.2, "y": [1, 2]}), -1)
).to_list() == {"x": 2.2, "y": [1, 2]}
assert pickle.loads(
pickle.dumps(
ak_Array(
[
{"x": 1.1, "y": [1]},
{"x": 2.2, "y": [1, 2]},
{"x": 3.3, "y": [1, 2, 3]},
]
)[1],
-1,
)
).to_list() == {"x": 2.2, "y": [1, 2]}
def test_regulararray():
content = ak_Array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]).layout
regulararray = ak.contents.RegularArray(content, 3, zeros_length=0)
assert ak_from_buffers(*ak_to_buffers(regulararray)).to_list() == [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12],
]
assert pickle.loads(pickle.dumps(ak_Array(regulararray), -1)).to_list() == [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12],
]
def test_unionarray():
assert ak_from_buffers(*ak_to_buffers([[1, 2, 3], [], 4, 5])).to_list() == [
[1, 2, 3],
[],
4,
5,
]
assert pickle.loads(
pickle.dumps(ak_Array([[1, 2, 3], [], 4, 5]), -1)
).to_list() == [
[1, 2, 3],
[],
4,
5,
]
def METHOD_NAME():
content = ak_Array([1, 2, 3, 4, 5]).layout
unmaskedarray = ak.contents.UnmaskedArray(content)
assert ak_from_buffers(*ak_to_buffers(unmaskedarray)).to_list() == [1, 2, 3, 4, 5]
assert pickle.loads(pickle.dumps(ak_Array(unmaskedarray), -1)).to_list() == [
1,
2,
3,
4,
5,
] |
4,753 | start date | import datetime
from unittest.mock import MagicMock
import numpy as np
import pandas as pd
import pytest
from pandera import Check, Column, DataFrameSchema, DateTime, Float, Index, String
from ruamel.yaml import YAML
from deployer.billing import importers
yaml = YAML(typ="safe", pure=True)
@pytest.fixture
def cluster():
test_cluster = """
name: billing-test
provider: gcp
gcp:
project: billing-test
cluster: billing-test-cluster
zone: us-central1
billing:
paid_by_us: true
bigquery:
project: two-eye-two-see
dataset: cloud_costs
billing_id: 00DEAD-BEEF000-012345
"""
c = yaml.load(test_cluster)
return c
@pytest.fixture
def shared_cluster(cluster):
cluster["tenancy"] = "shared"
return cluster
@pytest.fixture
def schema():
schema = DataFrameSchema(
{
"project": Column(String),
"total_with_credits": Column(Float, Check(lambda x: x > 0.0), coerce=True),
},
index=Index(DateTime, name="month"),
)
return schema
@pytest.fixture
def METHOD_NAME():
return datetime.datetime(2023, 1, 1, tzinfo=datetime.timezone.utc)
@pytest.fixture
def end_date():
return datetime.datetime(2023, 2, 1, tzinfo=datetime.timezone.utc)
def test_gcp_query_builder_invalid_service(cluster):
with pytest.raises(AssertionError):
importers.build_gcp_query(cluster, service_id="not-a-service-id")
def test_gcp_query_builder_invalid_billing_project(cluster):
cluster["gcp"]["billing"]["bigquery"]["project"] = "$%!?"
with pytest.raises(AssertionError):
importers.build_gcp_query(cluster)
def test_cost_schema(cluster, schema, METHOD_NAME, end_date, mocker):
mocker.patch("google.cloud.bigquery.Client", autospec=True)
bq_importer = importers.BigqueryGCPBillingCostImporter(cluster)
bq_importer._run_query = MagicMock(
return_value=pd.DataFrame(
{
"month": ["202301"],
"project": ["test-cluster"],
"total_with_credits": [42.0],
}
)
)
schema.validate(bq_importer.get_costs(METHOD_NAME, end_date))
def test_shared_cluster_importer_single_hub(shared_cluster, METHOD_NAME, end_date):
shared_importer = importers.PrometheusUtilizationImporter(shared_cluster)
shared_importer._run_query = MagicMock(
return_value=pd.DataFrame(
{'{namespace="hub1"}': np.repeat(4.0, 31)},
index=pd.date_range(start="2023-01-01", end="2023-01-31", freq="D"),
)
)
rows = shared_importer.get_utilization(METHOD_NAME, end_date)
assert (
rows["hub1"].item() == 1.0
), "Single hub in shared cluster utilization should be 1.0"
def test_shared_cluster_importer_no_support(shared_cluster, METHOD_NAME, end_date):
shared_importer = importers.PrometheusUtilizationImporter(shared_cluster)
shared_importer._run_query = MagicMock(
return_value=pd.DataFrame(
{'{namespace="hub1"}': np.repeat(1.0, 31)},
index=pd.date_range(start="2023-01-01", end="2023-01-31", freq="D"),
)
)
rows = shared_importer.get_utilization(METHOD_NAME, end_date)
assert (
rows["support_combined"].item() == 0.0
), "Shared cluster without support should have zero combined support utilization"
def test_shared_cluster_importer_multiple_hub(shared_cluster, METHOD_NAME, end_date):
shared_importer = importers.PrometheusUtilizationImporter(shared_cluster)
shared_importer._run_query = MagicMock(
return_value=pd.DataFrame(
{
'{namespace="hub1"}': np.repeat(1.0, 31),
'{namespace="hub2"}': np.repeat(9.0, 31),
},
index=pd.date_range(start="2023-01-01", end="2023-01-31", freq="D"),
)
)
rows = shared_importer.get_utilization(METHOD_NAME, end_date)
assert (
rows["hub1"].item() == 0.1
), "Shared cluster hub1 should have utilization of 0.1"
assert (
rows["hub2"].item() == 0.9
), "Shared cluster hub2 should have utilization of 0.9"
def test_shard_cluster_hub_and_support(shared_cluster, METHOD_NAME, end_date):
shared_importer = importers.PrometheusUtilizationImporter(shared_cluster)
shared_importer._run_query = MagicMock(
return_value=pd.DataFrame(
{
'{namespace="hub1"}': np.repeat(9.9, 31),
'{namespace="kube-system"}': np.repeat(0.1, 31),
},
index=pd.date_range(start="2023-01-01", end="2023-01-31", freq="D"),
)
)
rows = shared_importer.get_utilization(METHOD_NAME, end_date)
assert rows["hub1"].item() == pytest.approx(0.99)
assert rows["support_combined"].item() == pytest.approx(0.01)
def test_shared_cluster_aggregates_support(shared_cluster, METHOD_NAME, end_date):
shared_importer = importers.PrometheusUtilizationImporter(shared_cluster)
shared_importer._run_query = MagicMock(
return_value=pd.DataFrame(
{
'{namespace="support"}': np.repeat(9.9, 31),
'{namespace="kube-system"}': np.repeat(0.1, 31),
},
index=pd.date_range(start="2023-01-01", end="2023-01-31", freq="D"),
)
)
rows = shared_importer.get_utilization(METHOD_NAME, end_date)
# Support only is 100% utilization
assert rows["support_combined"].item() == pytest.approx(
1.0
), "Utilization for support_combined should be the sum of support and kube-system and 1.0"
assert (
"support" not in rows
), "Utilization for support_combined should replace support"
assert "kube-system" not in rows
def test_shared_cluster_internal(shared_cluster, METHOD_NAME, end_date):
shared_importer = importers.PrometheusUtilizationImporter(shared_cluster)
shared_importer._run_query = MagicMock(
return_value=pd.DataFrame(
{
'{namespace="staging"}': np.repeat(75, 31),
'{namespace="kube-system"}': np.repeat(25, 31),
},
index=pd.date_range(start="2023-01-01", end="2023-01-31", freq="D"),
)
)
rows = shared_importer.get_utilization(METHOD_NAME, end_date)
assert rows["2i2c_costs"].item() == pytest.approx(
0.75
), "Utilization for 2i2c_costs should be 0.75"
assert (
"staging" not in rows
), "Utilization for 2i2c_costs should replace interal namespaces"
def test_shared_cluster_aggregates_internal(shared_cluster, METHOD_NAME, end_date):
shared_importer = importers.PrometheusUtilizationImporter(shared_cluster)
shared_importer._run_query = MagicMock(
return_value=pd.DataFrame(
{
'{namespace="staging"}': np.repeat(25, 31),
'{namespace="demo"}': np.repeat(50, 31),
'{namespace="kube-system"}': np.repeat(25, 31),
},
index=pd.date_range(start="2023-01-01", end="2023-01-31", freq="D"),
)
)
rows = shared_importer.get_utilization(METHOD_NAME, end_date)
assert rows["2i2c_costs"].item() == pytest.approx(
0.75
), "Utilization for 2i2c_costs should be summed over internal namespace" |
4,754 | maybe decode swap | import logging
from typing import TYPE_CHECKING, Callable, Optional
from rotkehlchen.accounting.structures.types import HistoryEventSubType, HistoryEventType
from rotkehlchen.assets.asset import EvmToken
from rotkehlchen.chain.ethereum.modules.aave.v1.decoder import DEFAULT_DECODING_OUTPUT
from rotkehlchen.chain.evm.decoding.interfaces import DecoderInterface
from rotkehlchen.chain.evm.decoding.structures import ActionItem, DecodingOutput
from rotkehlchen.chain.evm.decoding.types import CounterpartyDetails, EventCategory
from rotkehlchen.chain.evm.decoding.utils import maybe_reshuffle_events
from rotkehlchen.chain.evm.structures import EvmTxReceiptLog
from rotkehlchen.errors.asset import UnknownAsset, WrongAssetType
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.types import DecoderEventMappingType, EvmTransaction
from rotkehlchen.utils.misc import hex_or_bytes_to_address
from ..constants import CPT_UNISWAP_V1, UNISWAP_ICON, UNISWAP_LABEL
if TYPE_CHECKING:
from rotkehlchen.accounting.structures.evm_event import EvmEvent
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
# https://github.com/Uniswap/v1-contracts/blob/c10c08d81d6114f694baa8bd32f555a40f6264da/contracts/uniswap_exchange.vy#L13
TOKEN_PURCHASE = b'\xcd`\xaau\xde\xa3\x07/\xbc\x07\xaem}\x85k]\xc5\xf4\xee\xe8\x88T\xf5\xb4\xab\xf7\xb6\x80\xef\x8b\xc5\x0f' # noqa: E501
# https://github.com/Uniswap/v1-contracts/blob/c10c08d81d6114f694baa8bd32f555a40f6264da/contracts/uniswap_exchange.vy#L14
ETH_PURCHASE = b'\x7f@\x91\xb4l3\xe9\x18\xa0\xf3\xaaB0vA\xd1{\xb6p)BzSi\xe5K59\x84#\x87\x05' # noqa: E501
class Uniswapv1Decoder(DecoderInterface):
def METHOD_NAME(
self,
token: Optional[EvmToken], # pylint: disable=unused-argument
tx_log: EvmTxReceiptLog,
transaction: EvmTransaction, # pylint: disable=unused-argument
decoded_events: list['EvmEvent'],
action_items: list[ActionItem], # pylint: disable=unused-argument
all_logs: list[EvmTxReceiptLog], # pylint: disable=unused-argument
) -> DecodingOutput:
"""Search for both events. Since the order is not guaranteed try reshuffle in both cases"""
out_event = in_event = None
if tx_log.topics[0] == TOKEN_PURCHASE:
buyer = hex_or_bytes_to_address(tx_log.topics[1])
# search for a send to buyer from a tracked address
for event in decoded_events:
if event.event_type == HistoryEventType.SPEND and event.address == buyer:
try:
crypto_asset = event.asset.resolve_to_crypto_asset()
except (UnknownAsset, WrongAssetType):
self.notify_user(event=event, counterparty=CPT_UNISWAP_V1)
continue
event.event_type = HistoryEventType.TRADE
event.event_subtype = HistoryEventSubType.SPEND
event.counterparty = CPT_UNISWAP_V1
event.notes = f'Swap {event.balance.amount} {crypto_asset.symbol} in uniswap-v1 from {event.location_label}' # noqa: E501
out_event = event
elif event.event_type == HistoryEventType.TRADE and event.event_subtype == HistoryEventSubType.RECEIVE and event.counterparty == CPT_UNISWAP_V1: # noqa: E501
in_event = event
elif tx_log.topics[0] == ETH_PURCHASE:
buyer = hex_or_bytes_to_address(tx_log.topics[1])
# search for a receive to buyer
for event in decoded_events:
if event.event_type == HistoryEventType.RECEIVE and event.location_label == buyer:
try:
crypto_asset = event.asset.resolve_to_crypto_asset()
except (UnknownAsset, WrongAssetType):
self.notify_user(event=event, counterparty=CPT_UNISWAP_V1)
continue
event.event_type = HistoryEventType.TRADE
event.event_subtype = HistoryEventSubType.RECEIVE
event.counterparty = CPT_UNISWAP_V1
event.notes = f'Receive {event.balance.amount} {crypto_asset.symbol} from uniswap-v1 swap in {event.location_label}' # noqa: E501
in_event = event
elif event.event_type == HistoryEventType.TRADE and event.event_subtype == HistoryEventSubType.SPEND and event.counterparty == CPT_UNISWAP_V1: # noqa: E501
out_event = event
maybe_reshuffle_events(ordered_events=[out_event, in_event], events_list=decoded_events)
return DEFAULT_DECODING_OUTPUT
# -- DecoderInterface methods
def possible_events(self) -> DecoderEventMappingType:
return {CPT_UNISWAP_V1: {
HistoryEventType.TRADE: {
HistoryEventSubType.RECEIVE: EventCategory.SWAP_IN,
HistoryEventSubType.SPEND: EventCategory.SWAP_OUT,
},
}}
def decoding_rules(self) -> list[Callable]:
return [
self.METHOD_NAME,
]
def counterparties(self) -> list[CounterpartyDetails]:
return [CounterpartyDetails(
identifier=CPT_UNISWAP_V1,
label=UNISWAP_LABEL,
image=UNISWAP_ICON,
)] |
4,755 | wireless security | """Connection object for Network Manager."""
import logging
from typing import Any
from dbus_fast import Variant
from dbus_fast.aio.message_bus import MessageBus
from ....const import ATTR_METHOD, ATTR_MODE, ATTR_PSK, ATTR_SSID
from ...const import DBUS_NAME_NM
from ...interface import DBusInterface
from ...utils import dbus_connected
from ..configuration import (
ConnectionProperties,
EthernetProperties,
IpProperties,
MatchProperties,
VlanProperties,
WirelessProperties,
WirelessSecurityProperties,
)
CONF_ATTR_CONNECTION = "connection"
CONF_ATTR_802_ETHERNET = "802-3-ethernet"
CONF_ATTR_802_WIRELESS = "802-11-wireless"
CONF_ATTR_802_WIRELESS_SECURITY = "802-11-wireless-security"
CONF_ATTR_VLAN = "vlan"
CONF_ATTR_IPV4 = "ipv4"
CONF_ATTR_IPV6 = "ipv6"
CONF_ATTR_MATCH = "match"
CONF_ATTR_PATH = "path"
ATTR_ID = "id"
ATTR_UUID = "uuid"
ATTR_TYPE = "type"
ATTR_PARENT = "parent"
ATTR_ASSIGNED_MAC = "assigned-mac-address"
ATTR_POWERSAVE = "powersave"
ATTR_AUTH_ALG = "auth-alg"
ATTR_KEY_MGMT = "key-mgmt"
ATTR_INTERFACE_NAME = "interface-name"
ATTR_PATH = "path"
IPV4_6_IGNORE_FIELDS = [
"addresses",
"address-data",
"dns",
"gateway",
"method",
]
_LOGGER: logging.Logger = logging.getLogger(__name__)
def _merge_settings_attribute(
base_settings: dict[str, dict[str, Variant]],
new_settings: dict[str, dict[str, Variant]],
attribute: str,
*,
ignore_current_value: list[str] = None,
) -> None:
"""Merge settings attribute if present."""
if attribute in new_settings:
if attribute in base_settings:
if ignore_current_value:
for field in ignore_current_value:
base_settings[attribute].pop(field, None)
base_settings[attribute].update(new_settings[attribute])
else:
base_settings[attribute] = new_settings[attribute]
class NetworkSetting(DBusInterface):
"""Network connection setting object for Network Manager.
https://developer.gnome.org/NetworkManager/stable/gdbus-org.freedesktop.NetworkManager.Settings.Connection.html
"""
bus_name: str = DBUS_NAME_NM
def __init__(self, object_path: str) -> None:
"""Initialize NetworkConnection object."""
self.object_path: str = object_path
self._connection: ConnectionProperties | None = None
self._wireless: WirelessProperties | None = None
self._wireless_security: WirelessSecurityProperties | None = None
self._ethernet: EthernetProperties | None = None
self._vlan: VlanProperties | None = None
self._ipv4: IpProperties | None = None
self._ipv6: IpProperties | None = None
self._match: MatchProperties | None = None
@property
def connection(self) -> ConnectionProperties | None:
"""Return connection properties if any."""
return self._connection
@property
def wireless(self) -> WirelessProperties | None:
"""Return wireless properties if any."""
return self._wireless
@property
def METHOD_NAME(self) -> WirelessSecurityProperties | None:
"""Return wireless security properties if any."""
return self._wireless_security
@property
def ethernet(self) -> EthernetProperties | None:
"""Return Ethernet properties if any."""
return self._ethernet
@property
def vlan(self) -> VlanProperties | None:
"""Return Vlan properties if any."""
return self._vlan
@property
def ipv4(self) -> IpProperties | None:
"""Return ipv4 properties if any."""
return self._ipv4
@property
def ipv6(self) -> IpProperties | None:
"""Return ipv6 properties if any."""
return self._ipv6
@property
def match(self) -> MatchProperties | None:
"""Return match properties if any."""
return self._match
@dbus_connected
async def get_settings(self) -> dict[str, Any]:
"""Return connection settings."""
return await self.dbus.Settings.Connection.call_get_settings()
@dbus_connected
async def update(self, settings: dict[str, dict[str, Variant]]) -> None:
"""Update connection settings."""
new_settings: dict[
str, dict[str, Variant]
] = await self.dbus.Settings.Connection.call_get_settings(unpack_variants=False)
_merge_settings_attribute(
new_settings,
settings,
CONF_ATTR_CONNECTION,
ignore_current_value=[ATTR_INTERFACE_NAME],
)
_merge_settings_attribute(new_settings, settings, CONF_ATTR_802_ETHERNET)
_merge_settings_attribute(new_settings, settings, CONF_ATTR_802_WIRELESS)
_merge_settings_attribute(
new_settings, settings, CONF_ATTR_802_WIRELESS_SECURITY
)
_merge_settings_attribute(new_settings, settings, CONF_ATTR_VLAN)
_merge_settings_attribute(
new_settings,
settings,
CONF_ATTR_IPV4,
ignore_current_value=IPV4_6_IGNORE_FIELDS,
)
_merge_settings_attribute(
new_settings,
settings,
CONF_ATTR_IPV6,
ignore_current_value=IPV4_6_IGNORE_FIELDS,
)
_merge_settings_attribute(new_settings, settings, CONF_ATTR_MATCH)
await self.dbus.Settings.Connection.call_update(new_settings)
@dbus_connected
async def delete(self) -> None:
"""Delete connection settings."""
await self.dbus.Settings.Connection.call_delete()
async def connect(self, bus: MessageBus) -> None:
"""Get connection information."""
await super().connect(bus)
await self.reload()
self.dbus.Settings.Connection.on_updated(self.reload)
@dbus_connected
async def reload(self):
"""Get current settings for connection."""
data = await self.get_settings()
# Get configuration settings we care about
# See: https://developer-old.gnome.org/NetworkManager/stable/ch01.html
if CONF_ATTR_CONNECTION in data:
self._connection = ConnectionProperties(
data[CONF_ATTR_CONNECTION].get(ATTR_ID),
data[CONF_ATTR_CONNECTION].get(ATTR_UUID),
data[CONF_ATTR_CONNECTION].get(ATTR_TYPE),
data[CONF_ATTR_CONNECTION].get(ATTR_INTERFACE_NAME),
)
if CONF_ATTR_802_ETHERNET in data:
self._ethernet = EthernetProperties(
data[CONF_ATTR_802_ETHERNET].get(ATTR_ASSIGNED_MAC),
)
if CONF_ATTR_802_WIRELESS in data:
self._wireless = WirelessProperties(
bytes(data[CONF_ATTR_802_WIRELESS].get(ATTR_SSID, [])).decode(),
data[CONF_ATTR_802_WIRELESS].get(ATTR_ASSIGNED_MAC),
data[CONF_ATTR_802_WIRELESS].get(ATTR_MODE),
data[CONF_ATTR_802_WIRELESS].get(ATTR_POWERSAVE),
)
if CONF_ATTR_802_WIRELESS_SECURITY in data:
self._wireless_security = WirelessSecurityProperties(
data[CONF_ATTR_802_WIRELESS_SECURITY].get(ATTR_AUTH_ALG),
data[CONF_ATTR_802_WIRELESS_SECURITY].get(ATTR_KEY_MGMT),
data[CONF_ATTR_802_WIRELESS_SECURITY].get(ATTR_PSK),
)
if CONF_ATTR_VLAN in data:
self._vlan = VlanProperties(
data[CONF_ATTR_VLAN].get(ATTR_ID),
data[CONF_ATTR_VLAN].get(ATTR_PARENT),
)
if CONF_ATTR_IPV4 in data:
self._ipv4 = IpProperties(
data[CONF_ATTR_IPV4].get(ATTR_METHOD),
)
if CONF_ATTR_IPV6 in data:
self._ipv6 = IpProperties(
data[CONF_ATTR_IPV6].get(ATTR_METHOD),
)
if CONF_ATTR_MATCH in data:
self._match = MatchProperties(data[CONF_ATTR_MATCH].get(ATTR_PATH)) |
4,756 | build list request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import BillingBenefitsRPMixinABC, _convert_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def METHOD_NAME(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.BillingBenefits/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.billingbenefits.BillingBenefitsRP`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Get operations.
List all the operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.billingbenefits.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-11-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = METHOD_NAME(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.BillingBenefits/operations"} |
4,757 | parse args | #!/usr/bin/python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
import sys
import tarfile
import time
from shutil import which
class IndexCreator:
"""Reads `Webdataset` data format, and creates index file
that enables random access.
Example usage:
----------
>> with IndexCreator('data/test.tar','data/test.idx') as ic:
>> ic.create_index()
>> !ls data/
test.tar test.idx
Parameters
----------
uri : str
Path to the archive file.
idx_path : str
Path to the index file, that will be created/overwritten.
"""
tar_block_size = 512
index_file_version = "v1.2"
def __init__(self, uri, idx_path, verbose=True):
self.uri = uri
self.idx_path = idx_path
self.fidx = open(self.idx_path, "w")
self.verbose = verbose
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
def open(self):
"""Opens the archive and index files and sets their read heads to 0."""
if self.fidx.closed:
self.fidx = open(self.idx_path, "w")
else:
self.fidx.seek(0)
def close(self):
"""Closes the archive and index files."""
if not self.fidx.closed:
self.fidx.close()
def reset(self):
"""Resets the archive and index files."""
self.close()
self.open()
@staticmethod
def split_name(filepath):
"""Splits the webdataset into the basename and the extension"""
dot_pos = filepath.find(".", filepath.rfind("/") + 1)
return filepath[:dot_pos], filepath[dot_pos + 1:]
def _get_data_tar(self):
"""Retreives the data about the offset, name and size of each component
using the gnu tar utility, while also filtering out non-file entries"""
tar_blocks_proc = subprocess.Popen(
["tar", "--list", "--block-num", "--file", self.uri],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
tar_types_sizes_proc = subprocess.Popen(
["tar", "--verbose", "--list", "--file", self.uri],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
tar_blocks = tar_blocks_proc.communicate()[0].split(b"\n") # block <n>: <filepath>
tar_types_sizes = tar_types_sizes_proc.communicate()[0].split(
b"\n"
) # <type>... <size> <date> <name>
# Extracting
for blocks_line, types_sizes_line in zip(tar_blocks, tar_types_sizes):
if not blocks_line or not types_sizes_line:
continue
name = str(blocks_line[blocks_line.find(b":") + 2:], "ascii")
entry_type = types_sizes_line[0:1]
if entry_type != b"-":
continue
offset = int(blocks_line[blocks_line.find(b"block") + 6:
blocks_line.find(b":")])
# according to https://www.loc.gov/preservation/digital/formats/fdd/fdd000531.shtml#:~:text=A%20tar%20(tape%20archive)%20file,are%20not%20compressed%20archive%20files. # noqa: E501, W505
# each data record is preceded by 512-byte header. `tar --list --block-num --file`
# return the position (counted in 512-byte blocks) of the header for a given entry.
# So the size of the header needs to be added to get the data offset
offset = (offset + 1) * 512
size = types_sizes_line[: -len(name)]
size = size[: size.rfind(b"-") - 8] # "... <size> 20yy-mm-...."
size = int(size[size.rfind(b" "):])
yield offset, name, size
def _get_data_tarfile(self):
"""Retreives the data about the offset, name and size of each component
using the tarfile module, while also filtering out non-file entries
Intended as a fallback for the gnu tar version (since it is much slower)"""
print(
"Warning: tar utility not found. Falling back to tarfile."
+ " Processing will most likely take much longer",
file=sys.stderr,
)
farchive = tarfile.open(self.uri)
for member in iter(farchive):
if member.type != tarfile.REGTYPE:
continue
offset = farchive.fileobj.tell()
yield offset, member.name, member.size
def create_index(self):
"""Creates the index file from a tar archive"""
self.reset()
pre_time = time.time()
counter = 0
report_step = 100000
if self.verbose:
print(f"time: {time.time() - pre_time:.2f} count: {counter} stage: collect")
# Aggregates extensions in samples
aggregated_data = []
last_basename = None
for offset, name, size in (
self._get_data_tar() if which("tar") is not None else self._get_data_tarfile()
):
if counter % report_step == 0 and counter > 0:
cur_time = time.time()
if self.verbose:
print(f"time: {cur_time - pre_time:.2f} count: {counter} stage: collect")
counter += 1
basename, extension = IndexCreator.split_name(name)
# check for the files starting with a dot (hidden files)
if not basename or basename.endswith("/"):
continue
if last_basename != basename:
aggregated_data.append([(extension, offset, size, name)])
last_basename = basename
else:
aggregated_data[-1].append((extension, offset, size, name))
if not aggregated_data:
raise ValueError("Webdataset Tar File empty")
# Constructs the index file out of the aggregated extensions
self.fidx.write(f"{IndexCreator.index_file_version} {len(aggregated_data)}\n")
for bundle in aggregated_data:
if counter % report_step == 0:
cur_time = time.time()
if self.verbose:
print(f"time: {cur_time - pre_time:.2f} count: {counter} stage: index")
self.fidx.write(" ".join(map(lambda component: " ".join(map(str, component)), bundle)))
self.fidx.write("\n")
counter += 1
cur_time = time.time()
if self.verbose:
print(f"time: {cur_time - pre_time:.2f} count: {counter} stage: done")
def METHOD_NAME():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Creates a webdataset index file for the use with the `fn.readers.webdataset`.",
)
parser.add_argument("archive", help="path to .tar file.")
parser.add_argument(
"index",
help="path to index file",
nargs="?",
)
args = parser.METHOD_NAME()
if args.index is None:
args.index = args.archive[: args.archive.find(".", args.archive.rfind("/") + 2)] + ".idx"
args.archive = os.path.abspath(args.archive)
args.index = os.path.abspath(args.index)
return args
def main():
args = METHOD_NAME()
creator = IndexCreator(args.archive, args.index)
creator.create_index()
creator.close()
if __name__ == "__main__":
main() |
4,758 | get paddle version | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import paddle
import numbers
import numpy as np
from ....common import get_logger
def METHOD_NAME():
import paddle
pd_ver = 185
if hasattr(paddle, 'nn'):
if hasattr(paddle.nn, 'Conv1D'): ### judge 2.0 alpha
pd_ver = 200
return pd_ver
pd_ver = METHOD_NAME()
if pd_ver == 185:
Layer = paddle.nn.Layer
else:
Layer = paddle.nn.Layer
_logger = get_logger(__name__, level=logging.INFO)
__all__ = ['set_state_dict']
def set_state_dict(model, state_dict):
"""
Set state dict from origin model to supernet model.
Args:
model(paddle.nn.Layer): model after convert to supernet.
state_dict(dict): dict with the type of {name: param} in origin model.
"""
assert isinstance(model, Layer)
assert isinstance(state_dict, dict)
for name, param in model.state_dict().items():
tmp_n = name.split('.')[:-2] + [name.split('.')[-1]]
tmp_n = '.'.join(tmp_n)
if name in state_dict:
param.set_value(state_dict[name])
elif tmp_n in state_dict:
param.set_value(state_dict[tmp_n])
else:
_logger.info('{} is not in state_dict'.format(tmp_n))
def build_input(input_size, dtypes):
if isinstance(input_size, list) and all(
isinstance(i, numbers.Number) for i in input_size):
if isinstance(dtypes, list):
dtype = dtypes[0]
else:
dtype = dtypes
# if dtype == paddle.framework.core.VarDesc.VarType.STRINGS:
# return to_tensor([""])
# TODO: Uncommet Add unittest for strings dtype
assert dtype != paddle.framework.core.VarDesc.VarType.STRINGS
return paddle.cast(paddle.rand(list(input_size)), dtype)
if isinstance(input_size, dict):
inputs = {}
if isinstance(dtypes, list):
dtype = dtypes[0]
else:
dtype = dtypes
for key, value in input_size.items():
inputs[key] = paddle.cast(paddle.rand(list(value)), dtype)
return inputs
if isinstance(input_size, list):
return [build_input(i, dtype) for i, dtype in zip(input_size, dtypes)]
def remove_model_fn(model, state_dict):
new_dict = {}
keys = []
for name, param in model.state_dict().items():
keys.append(name)
for name, param in state_dict.items():
tmp_n = None
if len(name.split('.')) <= 2:
new_dict[name] = param
continue
if name.split('.')[-2] == 'fn':
tmp_n = name.split('.')[:-2] + [name.split('.')[-1]]
tmp_n = '.'.join(tmp_n)
if name in keys:
new_dict[name] = param
elif tmp_n in keys:
new_dict[tmp_n] = param
else:
_logger.debug('{} is not in state_dict'.format(tmp_n))
return new_dict
def compute_start_end(kernel_size, sub_kernel_size):
center = kernel_size // 2
sub_center = sub_kernel_size // 2
start = center - sub_center
if sub_kernel_size % 2 == 0:
end = center + sub_center
else:
end = center + sub_center + 1
assert end - start == sub_kernel_size
return start, end
def get_same_padding(kernel_size):
assert isinstance(kernel_size, int)
assert kernel_size % 2 > 0, "kernel size must be odd number"
return kernel_size // 2
def convert_to_list(value, n):
return [value, ] * n
def search_idx(num, sorted_nestlist):
max_num = -1
max_idx = -1
for idx in range(len(sorted_nestlist)):
task_ = sorted_nestlist[idx]
max_num = task_[-1]
max_idx = len(task_) - 1
for phase_idx in range(len(task_)):
if num <= task_[phase_idx]:
return idx, phase_idx
assert num > max_num
return len(sorted_nestlist) - 1, max_idx |
4,759 | we have type of keyring | # Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import json
import os
import random
import shutil
import string
from datetime import datetime
from unittest.mock import MagicMock
from unittest.mock import patch
from xml.etree import ElementTree
from pytest_bdd import given
from pytest_bdd.parsers import parse
from jrnl import __version__
from jrnl.time import __get_pdt_calendar
from tests.lib.fixtures import FailedKeyring
from tests.lib.fixtures import NoKeyring
from tests.lib.fixtures import TestKeyring
@given(parse("we {editor_method} to the editor if opened\n{editor_input}"))
@given(parse("we {editor_method} nothing to the editor if opened"))
def we_enter_editor(editor_method, editor_input, editor_state):
file_method = editor_state["intent"]["method"]
if editor_method == "write":
file_method = "w+"
elif editor_method == "append":
file_method = "a+"
else:
assert False, f"Method '{editor_method}' not supported"
editor_state["intent"] = {"method": file_method, "input": editor_input}
@given(parse('now is "{date_str}"'))
def now_is_str(date_str, mock_factories):
class DatetimeMagicMock(MagicMock):
# needed because jrnl does some reflection on datetime
def __instancecheck__(self, subclass):
return isinstance(subclass, datetime)
def mocked_now(tz=None):
now = datetime.strptime(date_str, "%Y-%m-%d %I:%M:%S %p")
if tz:
time_zone = datetime.utcnow().astimezone().tzinfo
now = now.replace(tzinfo=time_zone)
return now
# jrnl uses two different classes to parse dates, so both must be mocked
datetime_mock = DatetimeMagicMock(wraps=datetime)
datetime_mock.now.side_effect = mocked_now
pdt = __get_pdt_calendar()
calendar_mock = MagicMock(wraps=pdt)
calendar_mock.parse.side_effect = lambda date_str_input: pdt.parse(
date_str_input, mocked_now()
)
mock_factories["datetime"] = lambda: patch("datetime.datetime", new=datetime_mock)
mock_factories["calendar_parse"] = lambda: patch(
"jrnl.time.__get_pdt_calendar", return_value=calendar_mock
)
@given("we don't have a keyring", target_fixture="keyring")
def we_dont_have_keyring(keyring_type):
return NoKeyring()
@given("we have a keyring", target_fixture="keyring")
@given(parse("we have a {keyring_type} keyring"), target_fixture="keyring")
def METHOD_NAME(keyring_type):
match keyring_type:
case "failed":
return FailedKeyring()
case _:
return TestKeyring()
@given(parse("we use no config"), target_fixture="config_path")
def we_use_no_config(temp_dir):
os.chdir(temp_dir.name) # @todo move this step to a more universal place
return os.path.join(temp_dir.name, "non_existing_config.yaml")
@given(parse('we use the config "{config_file}"'), target_fixture="config_path")
def we_use_the_config(request, temp_dir, working_dir, config_file):
# Move into temp dir as cwd
os.chdir(temp_dir.name) # @todo move this step to a more universal place
# Copy the config file over
config_source = os.path.join(working_dir, "data", "configs", config_file)
config_dest = os.path.join(temp_dir.name, config_file)
shutil.copy2(config_source, config_dest)
# @todo make this only copy some journals over
# Copy all of the journals over
journal_source = os.path.join(working_dir, "data", "journals")
journal_dest = os.path.join(temp_dir.name, "features", "journals")
shutil.copytree(journal_source, journal_dest)
# @todo maybe only copy needed templates over?
# Copy all of the templates over
template_source = os.path.join(working_dir, "data", "templates")
template_dest = os.path.join(temp_dir.name, "features", "templates")
shutil.copytree(template_source, template_dest)
# @todo get rid of this by using default config values
# merge in version number
if (
config_file.endswith("yaml")
and os.path.exists(config_dest)
and os.path.getsize(config_dest) > 0
):
# Add jrnl version to file for 2.x journals
with open(config_dest, "a") as cf:
cf.write("version: {}".format(__version__))
return config_dest
@given(
parse('we copy the template "{template_file}" to the default templates folder'),
target_fixture="default_templates_path",
)
def we_copy_the_template(request, temp_dir, working_dir, template_file):
# Move into temp dir as cwd
os.chdir(temp_dir.name) # @todo move this step to a more universal place
# Copy template over
template_source = os.path.join(working_dir, "data", "templates", template_file)
template_dest = os.path.join(temp_dir.name, "templates", template_file)
os.makedirs(os.path.dirname(template_dest), exist_ok=True)
shutil.copy2(template_source, template_dest)
return template_dest
@given(parse('the config "{config_file}" exists'), target_fixture="config_path")
def config_exists(config_file, temp_dir, working_dir):
config_source = os.path.join(working_dir, "data", "configs", config_file)
config_dest = os.path.join(temp_dir.name, config_file)
shutil.copy2(config_source, config_dest)
@given(parse('we use the password "{password}" if prompted'), target_fixture="password")
def use_password_forever(password):
return password
@given("we create a cache directory", target_fixture="cache_dir")
def create_cache_dir(temp_dir):
random_str = "".join(random.choices(string.ascii_uppercase + string.digits, k=20))
dir_path = os.path.join(temp_dir.name, "cache_" + random_str)
os.mkdir(dir_path)
return {"exists": True, "path": dir_path}
@given(parse("we parse the output as {language_name}"), target_fixture="parsed_output")
def parse_output_as_language(cli_run, language_name):
language_name = language_name.upper()
actual_output = cli_run["stdout"]
if language_name == "XML":
parsed_output = ElementTree.fromstring(actual_output)
elif language_name == "JSON":
parsed_output = json.loads(actual_output)
else:
assert False, f"Language name {language_name} not recognized"
return {"lang": language_name, "obj": parsed_output}
@given(parse('the home directory is called "{home_dir}"'))
def home_directory(temp_dir, home_dir, monkeypatch):
home_path = os.path.join(temp_dir.name, home_dir)
monkeypatch.setenv("USERPROFILE", home_path) # for windows
monkeypatch.setenv("HOME", home_path) # for *nix |
4,760 | test timeseries get config has all attributes | # Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import keras_tuner
import tensorflow as tf
from tensorflow import keras
from tensorflow import nest
from autokeras import analysers
from autokeras import blocks
from autokeras import test_utils
def test_image_build_return_tensor():
block = blocks.ImageBlock()
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(32, 32, 3), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_image_block_xception_return_tensor():
block = blocks.ImageBlock(block_type="xception")
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(32, 32, 3), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_image_block_normalize_return_tensor():
block = blocks.ImageBlock(normalize=True)
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(32, 32, 3), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_image_block_augment_return_tensor():
block = blocks.ImageBlock(augment=True)
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(32, 32, 3), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_image_deserialize_to_image():
serialized_block = blocks.serialize(blocks.ImageBlock())
block = blocks.deserialize(serialized_block)
assert isinstance(block, blocks.ImageBlock)
def test_image_get_config_has_all_attributes():
block = blocks.ImageBlock()
config = block.get_config()
assert test_utils.get_func_args(blocks.ImageBlock.__init__).issubset(
config.keys()
)
def test_text_build_return_tensor():
block = blocks.TextBlock()
outputs = block.build(
keras_tuner.HyperParameters(), keras.Input(shape=(1,), dtype=tf.string)
)
assert len(nest.flatten(outputs)) == 1
def test_text_block_ngram_return_tensor():
block = blocks.TextBlock(block_type="ngram")
outputs = block.build(
keras_tuner.HyperParameters(), keras.Input(shape=(1,), dtype=tf.string)
)
assert len(nest.flatten(outputs)) == 1
def test_text_block_transformer_return_tensor():
block = blocks.TextBlock(block_type="transformer")
outputs = block.build(
keras_tuner.HyperParameters(), keras.Input(shape=(1,), dtype=tf.string)
)
assert len(nest.flatten(outputs)) == 1
def test_text_deserialize_to_text():
serialized_block = blocks.serialize(blocks.TextBlock())
block = blocks.deserialize(serialized_block)
assert isinstance(block, blocks.TextBlock)
def test_text_get_config_has_all_attributes():
block = blocks.TextBlock()
config = block.get_config()
assert test_utils.get_func_args(blocks.TextBlock.__init__).issubset(
config.keys()
)
def test_structured_build_return_tensor():
block = blocks.StructuredDataBlock()
block.column_names = ["0", "1"]
block.column_types = {"0": analysers.NUMERICAL, "1": analysers.NUMERICAL}
outputs = block.build(
keras_tuner.HyperParameters(), keras.Input(shape=(2,), dtype=tf.string)
)
assert len(nest.flatten(outputs)) == 1
def test_structured_block_normalize_return_tensor():
block = blocks.StructuredDataBlock(normalize=True)
block.column_names = ["0", "1"]
block.column_types = {"0": analysers.NUMERICAL, "1": analysers.NUMERICAL}
outputs = block.build(
keras_tuner.HyperParameters(), keras.Input(shape=(2,), dtype=tf.string)
)
assert len(nest.flatten(outputs)) == 1
def test_structured_block_search_normalize_return_tensor():
block = blocks.StructuredDataBlock(name="a")
block.column_names = ["0", "1"]
block.column_types = {"0": analysers.NUMERICAL, "1": analysers.NUMERICAL}
hp = keras_tuner.HyperParameters()
hp.values["a/" + blocks.wrapper.NORMALIZE] = True
outputs = block.build(hp, keras.Input(shape=(2,), dtype=tf.string))
assert len(nest.flatten(outputs)) == 1
def test_structured_deserialize_to_structured():
serialized_block = blocks.serialize(blocks.StructuredDataBlock())
block = blocks.deserialize(serialized_block)
assert isinstance(block, blocks.StructuredDataBlock)
def test_structured_get_config_has_all_attributes():
block = blocks.StructuredDataBlock()
config = block.get_config()
assert test_utils.get_func_args(
blocks.StructuredDataBlock.__init__
).issubset(config.keys())
def test_timeseries_build_return_tensor():
block = blocks.TimeseriesBlock()
block.column_names = ["0", "1"]
block.column_types = {"0": analysers.NUMERICAL, "1": analysers.NUMERICAL}
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(32, 2), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_timeseries_deserialize_to_timeseries():
serialized_block = blocks.serialize(blocks.TimeseriesBlock())
block = blocks.deserialize(serialized_block)
assert isinstance(block, blocks.TimeseriesBlock)
def METHOD_NAME():
block = blocks.TimeseriesBlock()
config = block.get_config()
assert test_utils.get_func_args(blocks.TimeseriesBlock.__init__).issubset(
config.keys()
) |
4,761 | test calibration functions | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Satpy developers
#
# satpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# satpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with satpy. If not, see <http://www.gnu.org/licenses/>.
"""The vii_l1b_nc reader tests package.
This version tests the readers for VII test data V2 as per PFS V4A.
"""
import datetime
import os
import unittest
import uuid
import dask.array as da
import numpy as np
import xarray as xr
from netCDF4 import Dataset
from satpy.readers.vii_l1b_nc import ViiL1bNCFileHandler
from satpy.readers.vii_utils import MEAN_EARTH_RADIUS
TEST_FILE = 'test_file_vii_l1b_nc.nc'
class TestViiL1bNCFileHandler(unittest.TestCase):
"""Test the ViiL1bNCFileHandler reader."""
def setUp(self):
"""Set up the test."""
# Easiest way to test the reader is to create a test netCDF file on the fly
# uses a UUID to avoid permission conflicts during execution of tests in parallel
self.test_file_name = TEST_FILE + str(uuid.uuid1()) + ".nc"
with Dataset(self.test_file_name, 'w') as nc:
# Create data group
g1 = nc.createGroup('data')
# Add dimensions to data group
g1.createDimension('num_chan_solar', 11)
g1.createDimension('num_chan_thermal', 9)
g1.createDimension('num_pixels', 72)
g1.createDimension('num_lines', 600)
# Create calibration_data group
g1_1 = g1.createGroup('calibration_data')
# Add variables to data/calibration_data group
bt_a = g1_1.createVariable('bt_conversion_a', np.float32, dimensions=('num_chan_thermal',))
bt_a[:] = np.arange(9)
bt_b = g1_1.createVariable('bt_conversion_b', np.float32, dimensions=('num_chan_thermal',))
bt_b[:] = np.arange(9)
cw = g1_1.createVariable('channel_cw_thermal', np.float32, dimensions=('num_chan_thermal',))
cw[:] = np.arange(9)
isi = g1_1.createVariable('Band_averaged_solar_irradiance', np.float32, dimensions=('num_chan_solar',))
isi[:] = np.arange(11)
# Create measurement_data group
g1_2 = g1.createGroup('measurement_data')
# Add dimensions to data/measurement_data group
g1_2.createDimension('num_tie_points_act', 10)
g1_2.createDimension('num_tie_points_alt', 100)
# Add variables to data/measurement_data group
sza = g1_2.createVariable('solar_zenith', np.float32,
dimensions=('num_tie_points_alt', 'num_tie_points_act'))
sza[:] = 25.0
delta_lat = g1_2.createVariable('delta_lat', np.float32, dimensions=('num_lines', 'num_pixels'))
delta_lat[:] = 1.0
self.reader = ViiL1bNCFileHandler(
filename=self.test_file_name,
filename_info={
'creation_time': datetime.datetime(year=2017, month=9, day=22,
hour=22, minute=40, second=10),
'sensing_start_time': datetime.datetime(year=2017, month=9, day=20,
hour=12, minute=30, second=30),
'sensing_end_time': datetime.datetime(year=2017, month=9, day=20,
hour=18, minute=30, second=50)
},
filetype_info={}
)
def tearDown(self):
"""Remove the previously created test file."""
# Catch Windows PermissionError for removing the created test file.
try:
os.remove(self.test_file_name)
except OSError:
pass
def METHOD_NAME(self):
"""Test the calibration functions."""
radiance = np.array([[1.0, 2.0, 5.0], [7.0, 10.0, 20.0]])
cw = 13.0
a = 3.0
b = 100.0
bt = self.reader._calibrate_bt(radiance, cw, a, b)
expected_bt = np.array([[675.04993213, 753.10301462, 894.93149648],
[963.20401882, 1048.95086402, 1270.95546218]])
self.assertTrue(np.allclose(bt, expected_bt))
angle_factor = 0.4
isi = 2.0
refl = self.reader._calibrate_refl(radiance, angle_factor, isi)
expected_refl = np.array([[62.8318531, 125.6637061, 314.1592654],
[439.8229715, 628.3185307, 1256.637061]])
self.assertTrue(np.allclose(refl, expected_refl))
def test_functions(self):
"""Test the functions."""
# Checks that the _perform_orthorectification function is correctly executed
variable = xr.DataArray(
dims=('num_lines', 'num_pixels'),
name='test_name',
attrs={
'key_1': 'value_1',
'key_2': 'value_2'
},
data=da.from_array(np.ones((600, 72)))
)
orthorect_variable = self.reader._perform_orthorectification(variable, 'data/measurement_data/delta_lat')
expected_values = np.degrees(np.ones((600, 72)) / MEAN_EARTH_RADIUS) + np.ones((600, 72))
self.assertTrue(np.allclose(orthorect_variable.values, expected_values))
# Checks that the _perform_calibration function is correctly executed in all cases
# radiance calibration: return value is simply a copy of the variable
return_variable = self.reader._perform_calibration(variable, {'calibration': 'radiance'})
self.assertTrue(np.all(return_variable == variable))
# invalid calibration: raises a ValueError
with self.assertRaises(ValueError):
self.reader._perform_calibration(variable,
{'calibration': 'invalid', 'name': 'test'})
# brightness_temperature calibration: checks that the return value is correct
calibrated_variable = self.reader._perform_calibration(variable,
{'calibration': 'brightness_temperature',
'chan_thermal_index': 3})
expected_values = np.full((600, 72), 1101.10413712)
self.assertTrue(np.allclose(calibrated_variable.values, expected_values))
# reflectance calibration: checks that the return value is correct
calibrated_variable = self.reader._perform_calibration(variable,
{'calibration': 'reflectance',
'wavelength': [0.658, 0.668, 0.678],
'chan_solar_index': 2})
expected_values = np.full((600, 72), 173.3181982)
self.assertTrue(np.allclose(calibrated_variable.values, expected_values)) |
4,762 | test sliding pulses analysis | import unittest
import numpy as np
import pycqed as pq
import os
import matplotlib.pyplot as plt
from pycqed.analysis_v2 import measurement_analysis as ma
class Test_Cryoscope_analysis(unittest.TestCase):
@classmethod
def tearDownClass(self):
plt.close('all')
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
@unittest.skip("FIXME: PR #658: test broken by commit bd19f56: 'TypeError: __init__() missing 1 required positional argument: 'raw_data''")
def test_Cryoscope_Analysis(self):
a = ma.Cryoscope_Analysis(
t_start='20180423_114715',
polycoeffs_freq_conv='Snapshot/instruments/FL_LutMan_QR/parameters/polycoeffs_freq_conv/value',
derivative_window_length=2e-9)
expected_figs = {'raw_data', 'demod_data', 'norm_data_circ',
'demod_phase', 'frequency_detuning',
'cryoscope_amplitude', 'short_time_fft'}
self.assertTrue(expected_figs.issubset(set(a.figs.keys())))
self.assertTrue(expected_figs.issubset(set(a.axs.keys())))
# Does not actually check for the content
@unittest.skip("FIXME: PR #658: test broken by commit bd19f56: 'TypeError: __init__() missing 1 required positional argument: 'raw_data''")
def test_RamZFluxArc(self):
a = ma.RamZFluxArc(t_start='20180205_105633', t_stop='20180205_120210',
ch_idx_cos=2, ch_idx_sin=3)
poly_coeffs = a.proc_data_dict['poly_coeffs']
# test dac arc conversion
# For this to work all other parts have to work
amps = np.linspace(.1, 1, 21)
freqs = a.amp_to_freq(amps)
rec_amps = a.freq_to_amp(freqs, kind='interpolate')
np.testing.assert_array_almost_equal(amps, rec_amps, decimal=2)
rec_amps = a.freq_to_amp(freqs, kind='root')
np.testing.assert_array_almost_equal(amps, rec_amps, decimal=2)
rec_amps = a.freq_to_amp(freqs, kind='root_parabola')
np.testing.assert_array_almost_equal(amps, rec_amps, decimal=2)
np.testing.assert_array_almost_equal(amps, rec_amps, decimal=2)
poly_coeffs = a.proc_data_dict['poly_coeffs']
exp_poly_coeffs = np.array(
[1.36263320e+09, -2.23862128e+08, 3.87339064e+07])
print(poly_coeffs)
np.testing.assert_array_almost_equal(poly_coeffs, exp_poly_coeffs,
decimal=-7)
@unittest.skip("FIXME: PR #658: test broken by commit bd19f56: 'TypeError: __init__() missing 1 required positional argument: 'raw_data''")
def METHOD_NAME(self):
a = ma.SlidingPulses_Analysis(t_start='20180221_195729')
exp_phase = np.array(
[132.48846657, 288.37102808, 298.68161824, 307.02336668,
303.94512662, 306.71370643, 305.59951102, 303.79221692,
311.98804177, 318.1734892, 331.79725518, 322.90068287,
341.15829614, 328.38539928, 337.929674, 335.46041175,
310.84851162, 333.47238641, 343.83919864, 339.75778735,
350.46377994, 311.97060112, 327.71100615, 343.67186721,
326.73144141])
np.testing.assert_array_almost_equal(
a.proc_data_dict['phase'], exp_phase)
# a reference curve is needed to convert to amps
da = ma.RamZFluxArc(t_start='20180205_105633',
t_stop='20180205_120210',
ch_idx_cos=2, ch_idx_sin=3)
a = ma.SlidingPulses_Analysis(
t_start='20180221_195729',
freq_to_amp=da.freq_to_amp, amp_to_freq=da.amp_to_freq)
exp_amps = np.array(
[0.87382635, 0.87473807, 0.87479834, 0.8748471, 0.87482911,
0.87484529, 0.87483878, 0.87482821, 0.87487611, 0.87491226,
0.87499188, 0.87493989, 0.87504658, 0.87497194, 0.87502771,
0.87501328, 0.87486945, 0.87500167, 0.87506224, 0.87503839,
0.87510095, 0.87487601, 0.874968, 0.87506126, 0.87496228])
np.testing.assert_array_almost_equal(a.proc_data_dict['amp'], exp_amps,
decimal=2) |
4,763 | get run function | # Copyright 2021-2023 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import logging
import sys
import traceback
from dataclasses import dataclass
from typing import Callable
from typing import Union
from vdk.api.job_input import IJobInput
from vdk.internal.builtin_plugins.run.file_based_step import TYPE_PYTHON
from vdk.internal.builtin_plugins.run.file_based_step import TYPE_SQL
from vdk.internal.builtin_plugins.run.step import Step
from vdk.internal.core import errors
log = logging.getLogger(__name__)
# consists may duplicates of
# https://github.com/vmware/versatile-data-kit/blob/main/projects/vdk-core/src/vdk/internal/builtin_plugins/run/file_based_step.py
# The function accept NotebookCellStep (below class) and IJobInput and
# return true if the step has been executed and false if it is not (valid) executable step.
# On error it's expected to raise an exception.
NotebookStepFunction = Callable[["NotebookCellStep", IJobInput], bool]
@dataclass
class NotebookCellStep(Step):
"""
A notebook step that will be executed when running a data job.
Step class attributes:
::name: str - the name of the concrete step (e.g name of the file)
::type: str - string representing a step type (sql or python).
::runner_func: StepFunction - function that will execute the actual step
::file_path: pathlib.Path - file where the step is defined
::job_dir: pathlib.Path - the root job directory
::parent: Step | None = None - parent Step
Additional attributes:
::source: str - the code string retrieved from Jupyter code cell
::module: module object - the module the code belongs to
(see imp.new_module in https://docs.python.org/3/library/imp.html)
"""
def __init__(
self,
name,
type,
runner_func,
file_path,
job_dir,
source,
cell_id,
module=None,
parent=None,
):
super().__init__(name, type, runner_func, file_path, job_dir, parent)
self.runner_func = runner_func
self.source = source
self.module = module
self.cell_id = cell_id
class NotebookStepFuncFactory:
"""
Implementations of runner_func for running Notebook steps
"""
@staticmethod
def METHOD_NAME(source_type: Union[TYPE_PYTHON, TYPE_SQL]) -> Callable:
if source_type == TYPE_PYTHON:
return NotebookStepFuncFactory.run_python_step
elif source_type == TYPE_SQL:
return NotebookStepFuncFactory.run_sql_step
else:
raise NotImplementedError(
f"Run function for source type {source_type} is not implemented."
)
@staticmethod
def run_sql_step(step: NotebookCellStep, job_input: IJobInput) -> bool:
""" """
job_input.execute_query(step.source)
return True
@staticmethod
def run_python_step(step: NotebookCellStep, job_input: IJobInput) -> bool:
try:
sys.path.insert(0, str(step.job_dir))
success = False
try:
log.debug("Loading %s ..." % step.name)
step.module.job_input = job_input
exec(step.source, step.module.__dict__)
log.debug("Loading %s SUCCESS" % step.name)
success = True
except SyntaxError as e:
log.info("Loading %s FAILURE" % step.name)
errors.log_and_rethrow(
to_be_fixed_by=errors.ResolvableBy.USER_ERROR,
log=log,
what_happened=f"Failed loading job sources of {step.name} from cell with cell_id:{step.cell_id}"
f" from {step.file_path.name}",
why_it_happened=f"{e.__class__.__name__} at line {e.lineno} of {step.name}"
f": {e.args[0]}",
consequences=f"Current Step {step.name} from {step.file_path}"
f"will fail, and as a result the whole Data Job will fail. ",
countermeasures=f"Please, check the {step.file_path.name} file again for syntax errors",
exception=e,
wrap_in_vdk_error=True,
)
except Exception as e:
cl, exc, tb = sys.exc_info()
line_number = traceback.extract_tb(tb)[-1][1]
errors.log_and_rethrow(
to_be_fixed_by=errors.ResolvableBy.USER_ERROR,
log=log,
what_happened=f"Failed loading job sources of {step.name} from cell with cell_id:{step.cell_id}"
f" from {step.file_path.name}",
why_it_happened=f"{e.__class__.__name__} at line {line_number} of {step.name}"
f": {e.args[0]}",
consequences=f"Current Step {step.name} from {step.file_path}"
f"will fail, and as a result the whole Data Job will fail. ",
countermeasures=f"Please, check the {step.file_path.name} file again for errors",
exception=e,
wrap_in_vdk_error=True,
)
return success
finally:
sys.path.remove(str(step.job_dir)) |
4,764 | test overwrite jp2 | """
Helioviewer Client tests
"""
import os
import urllib
from collections import OrderedDict
import pytest
from sunpy.net.helioviewer import HelioviewerClient
pytestmark = pytest.mark.filterwarnings('ignore:The HelioviewerClient class is deprecated')
@pytest.fixture()
def client():
"""
Fixture to create a client and skip tests if not available
"""
try:
client = HelioviewerClient()
client.sources = client.get_data_sources()
return client
except urllib.error.HTTPError as e:
pytest.skip(f"There was a HTTP error {e.code} {e.args} for "
"HelioViewer.")
@pytest.mark.remote_data
def test_get_datasources(client):
"""
Tests get_data_sources and data_sources and that they match.
"""
assert isinstance(client.data_sources, OrderedDict)
assert isinstance(client.sources, dict)
# Rough check that the ordered dict is ordered
assert list(client.data_sources.values())[0:3] == [0, 1, 2]
aia_4500_id = client.data_sources['SDO', 'AIA', None, '4500']
aia_4500_id_copy = client.sources['SDO']['AIA']['4500']['sourceId']
assert isinstance(aia_4500_id, int)
assert isinstance(aia_4500_id_copy, int)
assert aia_4500_id == aia_4500_id_copy
@pytest.mark.remote_data
def test_keyvalue_all(client):
"""
Checks that we raise the correct error for these functions.
"""
with pytest.raises(KeyError):
client.get_closest_image("2012/01/01")
with pytest.raises(KeyError):
client.download_jp2("2012/01/01")
@pytest.mark.remote_data
def test_get_closest_image(client):
"""Tests getClosestImage API method"""
if client._api != "https://api.helioviewer.org":
pytest.skip("Only works with helioviewer.org")
image_meta = client.get_closest_image('1994/01/01', observatory='SOHO',
instrument='EIT', measurement='304')
assert isinstance(image_meta, dict)
assert image_meta['id'] == "1795504"
assert image_meta['width'] == image_meta['height'] == 1024
assert image_meta['height'] == image_meta['height'] == 1024
assert image_meta['name'] == 'EIT 304'
source_id = client.data_sources['SOHO', 'EIT', None, '304']
image_meta_id = client.get_closest_image('1994/01/01', source_id=source_id)
assert image_meta == image_meta_id
@pytest.mark.remote_data
def test_download_jp2(client):
"""
Tests getJP2Image API method.
"""
filepath = client.download_jp2('2012/01/01', observatory='SOHO',
instrument='MDI', measurement='continuum')
assert "2011_01_11__22_39_00_000__SOHO_MDI_MDI_continuum.jp2" in filepath
os.remove(filepath)
@pytest.mark.remote_data
def test_get_jp2_header(client):
"""
Tests getJP2Header API method
"""
if client._api != "https://api.helioviewer.org":
pytest.skip("Only works with helioviewer.org")
header1 = client.get_jp2_header('1994/01/01', observatory='SOHO',
instrument='EIT', measurement='304')
header2 = client.get_jp2_header('1994/01/01', jp2_id=1795504)
assert header1 == header2
assert len(header1) == len(header2) == 1
assert ('fits' in header1.keys())
assert ('fits' in header2.keys())
@pytest.mark.remote_data
def test_download_directory_not_exist_all(client, tmpdir):
"""
Tests for missing directory.
"""
fake_dir = os.path.join(str(tmpdir), 'directorynotexist')
filepath = client.download_jp2('2020/01/01', observatory='SOHO',
instrument='MDI', measurement='continuum',
directory=fake_dir)
assert 'directorynotexist' in filepath
os.remove(filepath)
fake_dir = os.path.join(str(tmpdir), 'directorynotexist_2')
filepath = client.download_png('2020/01/01', 2.4, "[SOHO,MDI,continuum,1,100]",
directory=fake_dir)
assert 'directorynotexist_2' in filepath
os.remove(filepath)
@pytest.mark.remote_data
def METHOD_NAME(client):
"""
Tests for that overwrites, overwrites jp2 edition.
"""
filepath = client.download_jp2('2020/01/01', observatory='SOHO',
instrument='MDI', measurement='continuum',
overwrite=False)
filepath_2 = client.download_jp2('2020/01/01', observatory='SOHO',
instrument='MDI', measurement='continuum',
overwrite=False)
assert filepath_2 == filepath
filepath_3 = client.download_jp2('2020/01/01', observatory='SOHO',
instrument='MDI', measurement='continuum',
overwrite=True)
assert filepath_3 == filepath
@pytest.mark.remote_data
def test_overwrite_png(client):
"""
Tests for that overwrites, overwrites png edition.
"""
filepath = client.download_png('2020/01/01', 2.4, "[SOHO,MDI,continuum,1,100]",
overwrite=False)
filepath_2 = client.download_png('2020/01/01', 2.4, "[SOHO,MDI,continuum,1,100]",
overwrite=False)
assert filepath_2 is not filepath
filepath_3 = client.download_png('2020/01/01', 2.4, "[SOHO,MDI,continuum,1,100]",
overwrite=True)
assert filepath_3 == filepath |
4,765 | test link team identity does not exist | import responses
from rest_framework import status
from sentry.integrations.slack.webhooks.command import (
CHANNEL_ALREADY_LINKED_MESSAGE,
INSUFFICIENT_ROLE_MESSAGE,
LINK_FROM_CHANNEL_MESSAGE,
LINK_USER_FIRST_MESSAGE,
TEAM_NOT_LINKED_MESSAGE,
)
from sentry.models import OrganizationIntegration
from sentry.testutils.helpers import get_response_text, link_user
from sentry.testutils.silo import region_silo_test
from sentry.utils import json
from tests.sentry.integrations.slack.webhooks.commands import SlackCommandsTest
OTHER_SLACK_ID = "UXXXXXXX2"
class SlackCommandsLinkTeamTestBase(SlackCommandsTest):
def setUp(self):
super().setUp()
self.link_user()
responses.add(
method=responses.POST,
url="https://slack.com/api/chat.postMessage",
body='{"ok": true}',
status=status.HTTP_200_OK,
content_type="application/json",
)
@region_silo_test
class SlackCommandsLinkTeamTest(SlackCommandsLinkTeamTestBase):
@responses.activate
def test_link_another_team_to_channel(self):
"""
Test that we block a user who tries to link a second team to a
channel that already has a team linked to it.
"""
self.link_team()
response = self.get_slack_response(
{
"text": "link team",
"team_id": self.external_id,
"user_id": self.slack_id,
"channel_name": self.channel_name,
"channel_id": self.channel_id,
}
)
data = json.loads(str(response.content.decode("utf-8")))
assert CHANNEL_ALREADY_LINKED_MESSAGE in get_response_text(data)
@responses.activate
def test_link_team_from_dm(self):
"""
Test that if a user types `/sentry link team` from a DM instead of a
channel, we reply with an error message.
"""
response = self.get_slack_response(
{
"text": "link team",
"team_id": self.external_id,
"user_id": OTHER_SLACK_ID,
"channel_name": "directmessage",
}
)
data = json.loads(str(response.content.decode("utf-8")))
assert LINK_FROM_CHANNEL_MESSAGE in get_response_text(data)
@responses.activate
def METHOD_NAME(self):
"""Test that get_identity fails if the user has no Identity and we reply with the LINK_USER_MESSAGE"""
user2 = self.create_user()
self.create_member(
teams=[self.team], user=user2, role="member", organization=self.organization
)
self.login_as(user2)
data = self.send_slack_message("link team", user_id=OTHER_SLACK_ID)
assert LINK_USER_FIRST_MESSAGE in get_response_text(data)
@responses.activate
def test_link_team_insufficient_role(self):
"""
Test that when a user whose role is insufficient attempts to link a
team, we reject them and reply with the INSUFFICIENT_ROLE_MESSAGE.
"""
user2 = self.create_user()
self.create_member(
teams=[self.team], user=user2, role="member", organization=self.organization
)
self.login_as(user2)
link_user(user2, self.idp, slack_id=OTHER_SLACK_ID)
data = self.send_slack_message("link team", user_id=OTHER_SLACK_ID)
assert INSUFFICIENT_ROLE_MESSAGE in get_response_text(data)
@responses.activate
def test_link_team_sufficient_role_through_team(self):
"""
Test that when a user whose org role is sufficient through team membership
attempts to link a team, we allow it.
"""
user2 = self.create_user()
admin_team = self.create_team(org_role="admin")
self.create_member(
teams=[admin_team], user=user2, role="member", organization=self.organization
)
self.login_as(user2)
link_user(user2, self.idp, slack_id=OTHER_SLACK_ID)
data = self.send_slack_message("link team", user_id=OTHER_SLACK_ID)
assert "Link your Sentry team to this Slack channel!" in get_response_text(data)
@region_silo_test
class SlackCommandsUnlinkTeamTest(SlackCommandsLinkTeamTestBase):
def setUp(self):
super().setUp()
self.link_team()
@responses.activate
def test_unlink_team(self):
data = self.send_slack_message(
"unlink team",
channel_name=self.channel_name,
channel_id=self.channel_id,
)
assert "Click here to unlink your team from this channel" in get_response_text(data)
@responses.activate
def test_unlink_no_team(self):
"""
Test for when a user attempts to remove a link between a Slack channel
and a Sentry team that does not exist.
"""
data = self.send_slack_message(
"unlink team",
channel_name="specific",
channel_id=OTHER_SLACK_ID,
)
assert TEAM_NOT_LINKED_MESSAGE in get_response_text(data)
@responses.activate
def test_unlink_multiple_orgs(self):
# Create another organization and team for this user that is linked through `self.integration`.
organization2 = self.create_organization(owner=self.user)
team2 = self.create_team(organization=organization2, members=[self.user])
OrganizationIntegration.objects.create(
organization_id=organization2.id, integration=self.integration
)
self.link_team(team2)
data = self.send_slack_message(
"unlink team",
channel_name=self.channel_name,
channel_id=self.channel_id,
)
assert "Click here to unlink your team from this channel" in get_response_text(data) |
4,766 | wrapper | import os
import platform
from functools import wraps
from pathlib import PurePath, PurePosixPath
from typing import Any, NewType, Union
# Path _inputs_ should generally accept any kind of path. This is named the same and
# modeled after the hint defined in the Python standard library's `typeshed`:
# https://github.com/python/typeshed/blob/0b1cd5989669544866213807afa833a88f649ee7/stdlib/_typeshed/__init__.pyi#L56-L65
StrPath = Union[str, "os.PathLike[str]"]
# A native path to a file on a local filesystem.
FilePathStr = NewType("FilePathStr", str)
URIStr = NewType("URIStr", str)
class LogicalPath(str):
"""A string that represents a path relative to an artifact or run.
The format of the string is always as a POSIX path, e.g. "foo/bar.txt".
A neat trick is that you can use this class as if it were a PurePosixPath. E.g.:
```
>>> path = LogicalPath("foo/bar.txt")
>>> path.parts
('foo', 'bar.txt')
>>> path.parent / "baz.txt"
'foo/baz.txt'
>>> type(path.relative_to("foo"))
LogicalPath
```
"""
# It should probably always be a relative path, but that would be a behavior change.
#
# These strings used to be the output of `to_forward_slash_path`, which only works
# with strings and whose behavior is pretty simple:
# ```
# if platform.system() == "Windows":
# path = path.replace("\\", "/")
# ```
#
# This results in some weird things, such as backslashes being allowed from
# non-Windows platforms (which would probably break if such an artifact was used
# from Windows) and anchors or absolute paths being allowed. E.g., the Windows path
# "C:\foo\bar.txt" becomes "C:/foo/bar.txt", which then would mount as
# "./artifacts/artifact_name:v0/C:/foo/bar.txt" on MacOS and as
# "./artifacts/artifact_name-v0/C-/foo/bar.txt" on Windows.
#
# This implementation preserves behavior for strings but attempts to sanitize other
# formerly unsupported inputs more aggressively. It uses the `.as_posix()` form of
# pathlib objects rather than the `str()` form to reduce how often identical inputs
# will result in different outputs on different platforms; however, it doesn't alter
# absolute paths or check for prohibited characters etc.
def __new__(cls, path: StrPath) -> "LogicalPath":
if isinstance(path, LogicalPath):
return super().__new__(cls, path)
if hasattr(path, "as_posix"):
path = PurePosixPath(path.as_posix())
return super().__new__(cls, str(path))
if hasattr(path, "__fspath__"):
path = path.__fspath__() # Can be str or bytes.
if isinstance(path, bytes):
path = os.fsdecode(path)
# For historical reasons we have to convert backslashes to forward slashes, but
# only on Windows, and need to do it before any pathlib operations.
if platform.system() == "Windows":
path = path.replace("\\", "/")
# This weird contortion and the one above are because in some unusual cases
# PurePosixPath(path.as_posix()).as_posix() != path.as_posix().
path = PurePath(path).as_posix()
return super().__new__(cls, str(PurePosixPath(path)))
def to_path(self) -> PurePosixPath:
"""Convert this path to a PurePosixPath."""
return PurePosixPath(self)
def __getattr__(self, attr: str) -> Any:
"""Act like a subclass of PurePosixPath for all methods not defined on str."""
try:
result = getattr(self.to_path(), attr)
except AttributeError as e:
raise AttributeError(f"LogicalPath has no attribute {attr!r}") from e
if isinstance(result, PurePosixPath):
return LogicalPath(result)
# If the result is a callable (a method), wrap it so that it has the same
# behavior: if the call result returns a PurePosixPath, return a LogicalPath.
if callable(result):
@wraps(result)
def METHOD_NAME(*args: Any, **kwargs: Any) -> Any:
inner_result = result(*args, **kwargs)
if isinstance(inner_result, PurePosixPath):
return LogicalPath(inner_result)
return inner_result
return METHOD_NAME
return result
def __truediv__(self, other: StrPath) -> "LogicalPath":
"""Act like a PurePosixPath for the / operator, but return a LogicalPath."""
return LogicalPath(self.to_path() / LogicalPath(other)) |
4,767 | determine next id | # This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import bpy
from .base import PlasmaModifierProperties
from .anim import *
from .avatar import *
from .gui import *
from .logic import *
from .physics import *
from .region import *
from .render import *
from .sound import *
from .water import *
class PlasmaModifiers(bpy.types.PropertyGroup):
def METHOD_NAME(self):
"""Gets the ID for the next modifier in the UI"""
# This is NOT a property, otherwise the modifiers property would access this...
# Which acesses the modifiers property... INFINITE RECURSION! :D
ids = [mod.display_order for mod in self.modifiers]
if ids:
return max(ids) + 1
else:
return 0
@property
def modifiers(self):
"""Generates all of the enabled modifiers.
NOTE: We do not promise to return modifiers in their display_order!
"""
for i in dir(self):
attr = getattr(self, i, None)
# Assumes each modifier is a single pointer to PlasmaModifierProperties
if isinstance(attr, PlasmaModifierProperties):
if attr.enabled:
yield attr
@classmethod
def register(cls):
# Okay, so we have N plasma modifer property groups...
# Rather than have (dis)organized chaos on the Blender Object, we will collect all of the
# property groups of type PlasmaModifierProperties and generate on-the-fly a PlasmaModifier
# property group to rule them all. The class attribute 'pl_id' will determine the name of
# the property group in PlasmaModifiers.
# Also, just to spite us, Blender doesn't seem to handle PropertyGroup inheritance... at all.
# So, we're going to have to create our base properties on all of the PropertyGroups.
# It's times like these that make me wonder about life...
# Enjoy!
for i in PlasmaModifierProperties.__subclasses__():
for name, (prop, kwargs) in PlasmaModifierProperties._subprops.items():
setattr(i, name, prop(**kwargs))
setattr(cls, i.pl_id, bpy.props.PointerProperty(type=i))
bpy.types.Object.plasma_modifiers = bpy.props.PointerProperty(type=cls)
def test_property(self, property : str) -> bool:
"""Tests a property on all enabled Plasma modifiers"""
return any((getattr(i, property) for i in self.modifiers))
class PlasmaModifierSpec(bpy.types.PropertyGroup):
pass
def modifier_mapping():
"""This returns a dict mapping Plasma Modifier categories to names"""
d = {}
sorted_modifiers = sorted(PlasmaModifierProperties.__subclasses__(), key=lambda x: x.bl_label)
for i, mod in enumerate(sorted_modifiers):
pl_id, category, label, description = mod.pl_id, mod.bl_category, mod.bl_label, mod.bl_description
icon = getattr(mod, "bl_icon", "")
# The modifier might include the cateogry name in its name, so we'll strip that.
if label != category:
if label.startswith(category):
label = label[len(category)+1:]
if label.endswith(category):
label = label[:-len(category)-1]
tup = (pl_id, label, description, icon, i)
d_cat = d.setdefault(category, [])
d_cat.append(tup)
return d |
4,768 | finalize options | """distutils.command.check
Implements the Distutils 'check' command.
"""
from distutils.core import Command
from distutils.errors import DistutilsSetupError
try:
# docutils is installed
from docutils.utils import Reporter
from docutils.parsers.rst import Parser
from docutils import frontend
from docutils import nodes
class SilentReporter(Reporter):
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding='ascii', error_handler='replace'):
self.messages = []
Reporter.__init__(self, source, report_level, halt_level, stream,
debug, encoding, error_handler)
def system_message(self, level, message, *children, **kwargs):
self.messages.append((level, message, children, kwargs))
return nodes.system_message(message, level=level,
type=self.levels[level],
*children, **kwargs)
HAS_DOCUTILS = True
except Exception:
# Catch all exceptions because exceptions besides ImportError probably
# indicate that docutils is not ported to Py3k.
HAS_DOCUTILS = False
class check(Command):
"""This command checks the meta-data of the package.
"""
description = ("perform some checks on the package")
user_options = [('metadata', 'm', 'Verify meta-data'),
('restructuredtext', 'r',
('Checks if long string meta-data syntax '
'are reStructuredText-compliant')),
('strict', 's',
'Will exit with an error if a check fails')]
boolean_options = ['metadata', 'restructuredtext', 'strict']
def initialize_options(self):
"""Sets default values for options."""
self.restructuredtext = 0
self.metadata = 1
self.strict = 0
self._warnings = 0
def METHOD_NAME(self):
pass
def warn(self, msg):
"""Counts the number of warnings that occurs."""
self._warnings += 1
return Command.warn(self, msg)
def run(self):
"""Runs the command."""
# perform the various tests
if self.metadata:
self.check_metadata()
if self.restructuredtext:
if HAS_DOCUTILS:
self.check_restructuredtext()
elif self.strict:
raise DistutilsSetupError('The docutils package is needed.')
# let's raise an error in strict mode, if we have at least
# one warning
if self.strict and self._warnings > 0:
raise DistutilsSetupError('Please correct your package.')
def check_metadata(self):
"""Ensures that all required elements of meta-data are supplied.
Required fields:
name, version, URL
Recommended fields:
(author and author_email) or (maintainer and maintainer_email)
Warns if any are missing.
"""
metadata = self.distribution.metadata
missing = []
for attr in ('name', 'version', 'url'):
if not (hasattr(metadata, attr) and getattr(metadata, attr)):
missing.append(attr)
if missing:
self.warn("missing required meta-data: %s" % ', '.join(missing))
if metadata.author:
if not metadata.author_email:
self.warn("missing meta-data: if 'author' supplied, " +
"'author_email' should be supplied too")
elif metadata.maintainer:
if not metadata.maintainer_email:
self.warn("missing meta-data: if 'maintainer' supplied, " +
"'maintainer_email' should be supplied too")
else:
self.warn("missing meta-data: either (author and author_email) " +
"or (maintainer and maintainer_email) " +
"should be supplied")
def check_restructuredtext(self):
"""Checks if the long string fields are reST-compliant."""
data = self.distribution.get_long_description()
for warning in self._check_rst_data(data):
line = warning[-1].get('line')
if line is None:
warning = warning[1]
else:
warning = '%s (line %s)' % (warning[1], line)
self.warn(warning)
def _check_rst_data(self, data):
"""Returns warnings when the provided data doesn't compile."""
# the include and csv_table directives need this to be a path
source_path = self.distribution.script_name or 'setup.py'
parser = Parser()
settings = frontend.OptionParser(components=(Parser,)).get_default_values()
settings.tab_width = 4
settings.pep_references = None
settings.rfc_references = None
reporter = SilentReporter(source_path,
settings.report_level,
settings.halt_level,
stream=settings.warning_stream,
debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
try:
parser.parse(data, document)
except AttributeError as e:
reporter.messages.append(
(-1, 'Could not finish the parsing: %s.' % e, '', {}))
return reporter.messages |
4,769 | test phonopy yaml read | """Tests of PhonopyYaml."""
from io import StringIO
from pathlib import Path
import numpy as np
import yaml
from phonopy import Phonopy
from phonopy.interface.phonopy_yaml import (
PhonopyYaml,
PhonopyYamlLoader,
load_phonopy_yaml,
read_cell_yaml,
read_phonopy_yaml,
)
from phonopy.interface.vasp import read_vasp
from phonopy.structure.cells import get_primitive
from phonopy.structure.dataset import get_displacements_and_forces
cwd = Path(__file__).parent
def test_read_poscar_yaml(helper_methods):
"""Test to parse PhonopyAtoms.__str__ output."""
filename = cwd / "NaCl-vasp.yaml"
cell = _get_unitcell(filename)
_compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)
def test_read_phonopy_yaml(helper_methods):
"""Test to parse phonopy.yaml like file."""
filename = cwd / "phonopy.yaml"
cell = read_phonopy_yaml(filename).unitcell
_compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)
def test_read_phonopy_yaml_with_stream(helper_methods):
"""Test to parse phonopy.yaml like file stream."""
filename = cwd / "phonopy.yaml"
with open(filename) as fp:
cell = read_phonopy_yaml(fp).unitcell
_compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)
def METHOD_NAME(helper_methods):
"""Test to parse phonopy.yaml like file using PhonopyYaml.read()."""
filename = cwd / "phonopy.yaml"
cell = _get_unitcell(filename)
_compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)
def test_PhonopyYaml_read_with_stream(helper_methods):
"""Test to parse phonopy.yaml like file stream using PhonopyYaml.read()."""
filename = cwd / "phonopy.yaml"
with open(filename) as fp:
cell = _get_unitcell(fp)
_compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)
def test_read_cell_yaml(helper_methods):
"""Test to parse phonopy_symcells.yaml like file."""
filename = cwd / "phonopy_symcells_NaCl.yaml"
cell = read_cell_yaml(filename)
_compare_NaCl_convcell(cell, helper_methods.compare_cells)
pcell = read_cell_yaml(filename, cell_type="primitive")
helper_methods.compare_cells(pcell, get_primitive(cell, "F"))
def test_read_cell_yaml_with_stream(helper_methods):
"""Test to parse phonopy_symcells.yaml like file."""
filename = cwd / "phonopy_symcells_NaCl.yaml"
with open(filename) as fp:
cell = _get_unitcell(fp)
_compare_NaCl_convcell(cell, helper_methods.compare_cells)
fp.seek(0)
pcell = read_cell_yaml(fp, cell_type="primitive")
helper_methods.compare_cells(pcell, get_primitive(cell, "F"))
def test_write_phonopy_yaml(ph_nacl_nofcsym: Phonopy, helper_methods):
"""Test PhonopyYaml.set_phonon_info, __str__, yaml_data, parse."""
phonon = ph_nacl_nofcsym
phpy_yaml = PhonopyYaml(calculator="vasp")
phpy_yaml.set_phonon_info(phonon)
phpy_yaml_test = PhonopyYaml()
phpy_yaml_test._data = load_phonopy_yaml(
yaml.safe_load(StringIO(str(phpy_yaml))), calculator=phpy_yaml.calculator
)
helper_methods.compare_cells_with_order(
phpy_yaml.primitive, phpy_yaml_test.primitive
)
helper_methods.compare_cells_with_order(phpy_yaml.unitcell, phpy_yaml_test.unitcell)
helper_methods.compare_cells_with_order(
phpy_yaml.supercell, phpy_yaml_test.supercell
)
assert phpy_yaml.version == phpy_yaml_test.version
np.testing.assert_allclose(
phpy_yaml.supercell_matrix, phpy_yaml_test.supercell_matrix, atol=1e-8
)
np.testing.assert_allclose(
phpy_yaml.primitive_matrix, phpy_yaml_test.primitive_matrix, atol=1e-8
)
def test_write_phonopy_yaml_extra(ph_nacl_nofcsym: Phonopy):
"""Test PhonopyYaml.set_phonon_info, __str__, yaml_data, parse.
settings parameter controls amount of yaml output. In this test,
more data than the default are dumped and those are tested.
"""
phonon = ph_nacl_nofcsym
settings = {
"force_sets": True,
"displacements": True,
"force_constants": True,
"born_effective_charge": True,
"dielectric_constant": True,
}
phpy_yaml = PhonopyYaml(calculator="vasp", settings=settings)
phpy_yaml.set_phonon_info(phonon)
phpy_yaml_test = PhonopyYaml()
phpy_yaml_test._data = load_phonopy_yaml(
yaml.safe_load(StringIO(str(phpy_yaml))), calculator=phpy_yaml.calculator
)
np.testing.assert_allclose(
phpy_yaml.force_constants, phpy_yaml_test.force_constants, atol=1e-8
)
np.testing.assert_allclose(
phpy_yaml.nac_params["born"], phpy_yaml_test.nac_params["born"], atol=1e-8
)
np.testing.assert_allclose(
phpy_yaml.nac_params["dielectric"],
phpy_yaml_test.nac_params["dielectric"],
atol=1e-8,
)
np.testing.assert_allclose(
phpy_yaml.nac_params["factor"],
phpy_yaml_test.nac_params["factor"],
atol=1e-8,
)
disps, forces = get_displacements_and_forces(phpy_yaml.dataset)
disps_test, forces_test = get_displacements_and_forces(phpy_yaml_test.dataset)
np.testing.assert_allclose(forces, forces_test, atol=1e-8)
np.testing.assert_allclose(disps, disps_test, atol=1e-8)
def test_load_nac_yaml():
"""Test to read NAC params using PhonopyYamlLoader."""
pyl = PhonopyYamlLoader(yaml.safe_load(open(cwd / "nac.yaml"))).parse()
assert pyl.data.nac_params
for key in (
"dielectric",
"born",
"factor",
"method",
):
assert key in pyl.data.nac_params
assert pyl.data.nac_params["dielectric"].shape == (3, 3)
assert pyl.data.nac_params["born"].shape == (2, 3, 3)
assert isinstance(pyl.data.nac_params["factor"], float)
assert isinstance(pyl.data.nac_params["method"], str)
def _compare_NaCl_convcell(cell, compare_cells):
cell_ref = read_vasp(cwd / ".." / "POSCAR_NaCl")
compare_cells(cell, cell_ref)
def _get_unitcell(filename):
phpy_yaml = PhonopyYaml().read(filename)
return phpy_yaml.unitcell |
4,770 | test opsscan abox uploads | from chemaboxwriters.app import write_abox
import pytest
import os
from chemaboxwriters.common.pipeline import Pipeline
import chemaboxwriters.common.assemble_pipeline as asp
from chemaboxwriters.ontocompchem.pipeline import OC_PIPELINE
from chemaboxwriters.ontospecies.pipeline import OS_PIPELINE
from chemaboxwriters.ontomops.pipeline import OMOPS_PIPELINE
from chemaboxwriters.ontopesscan.pipeline import OPS_PIPELINE
from pytest_mock import MockerFixture
from typing import Callable, Dict, Optional, List
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
ABOX_CONFIG_FILE = os.path.join(THIS_DIR, "test_config_files", "abox_config.yml")
REF_DIR = os.path.join(THIS_DIR, "..", "refData")
OCOMPCHEM_REF_DIR = os.path.join(REF_DIR, "ontocompchem")
OSPECIES_REF_DIR = os.path.join(REF_DIR, "ontospecies")
OPSSCAN_REF_DIR = os.path.join(REF_DIR, "ontopesscan")
OMOPS_REF_DIR = os.path.join(REF_DIR, "ontomops")
class DummyPubchemComp:
def __init__(self, cid: int, synonyms: List[str]):
self.cid = cid
self.synonyms = synonyms
def check_uploads(
pipeline: Pipeline, inp_file_type: str, fs_num_uploads: int, ts_num_uploads: int
) -> None:
fs_uploads = pipeline._file_server_uploads
ts_uploads = pipeline._triple_store_uploads
assert len(fs_uploads) == fs_num_uploads
assert len(ts_uploads) == ts_num_uploads
inp_stage = inp_file_type
for handler in pipeline._handlers.values():
if handler._in_stage == inp_stage:
fs_upload_configs = handler.get_file_server_upload_configs()
ts_upload_configs = handler.get_triple_store_upload_configs()
_check_uploads_exists(fs_uploads, fs_upload_configs)
_check_uploads_exists(ts_uploads, ts_upload_configs)
inp_stage = handler._out_stage
def _check_uploads_exists(uploads: Dict, upload_configs: Optional[Dict]) -> None:
if upload_configs is not None:
upload_file_types = upload_configs["upload_file_types"]
url = _construct_full_url(upload_configs)
file_type_in_uploads = []
for file_type in upload_file_types:
for _, upload_entry in uploads.items():
if upload_entry["input_type"] == file_type:
file_type_in_uploads.append(file_type)
upload_url = (
f"{'/'.join(upload_entry['location'].split('/')[:-1])}/"
)
assert upload_url == url
assert set(file_type_in_uploads) == set(upload_file_types)
def _construct_full_url(upload_configs: Dict) -> str:
url = upload_configs["url"]
subdirs = upload_configs["subdirs"]
if not url.endswith("/"):
url = f"{url}/"
if subdirs is not None:
if not subdirs.endswith("/"):
subdirs = f"{subdirs}/"
url = f"{url}{subdirs}"
return url
@pytest.mark.parametrize(
"inp_file_or_dir, inp_file_type, fs_num_uploads, ts_num_uploads",
[
(
os.path.join("OC_qc_log_single_log_scan_test", "ethane_scan_rigid.g09"),
"qc_log",
5,
4,
),
],
)
def test_ocompchem_abox_uploads(
inp_file_or_dir: str,
inp_file_type: str,
fs_num_uploads: int,
ts_num_uploads: int,
clean_tests: bool,
cleanup_test_data: Callable,
):
print("========================================================")
print("TEST INPUT FILE: ", inp_file_or_dir)
print("TEST INPUT FILE TYPE: ", inp_file_type)
print()
print()
inp_file_or_dir = os.path.join(OCOMPCHEM_REF_DIR, inp_file_or_dir)
pipeline = asp.assemble_pipeline(
pipeline_type=OC_PIPELINE, config_file=ABOX_CONFIG_FILE
)
write_abox(
pipeline=pipeline,
file_or_dir=inp_file_or_dir,
input_file_type=inp_file_type,
dry_run=False,
)
check_uploads(pipeline, inp_file_type, fs_num_uploads, ts_num_uploads)
if clean_tests:
cleanup_test_data(pipeline.written_files)
print("========================================================")
print()
print()
@pytest.mark.parametrize(
"inp_file_or_dir, inp_file_type, fs_num_uploads, ts_num_uploads",
[
("OS_qc_log_test\\h2o_opt_n_g09.log", "qc_log", 0, 1),
],
)
def test_ospecies_abox_uploads(
inp_file_or_dir: str,
inp_file_type: str,
fs_num_uploads: int,
ts_num_uploads: int,
mocker: MockerFixture,
clean_tests: bool,
cleanup_test_data: Callable,
):
print("========================================================")
print("TEST INPUT FILE: ", inp_file_or_dir)
print("TEST INPUT FILE TYPE: ", inp_file_type)
print()
print()
inp_file_or_dir = os.path.join(OSPECIES_REF_DIR, inp_file_or_dir)
mocker.patch(
"chemaboxwriters.ontospecies.handlers.qc_json_handler.pcp.get_compounds",
return_value=[DummyPubchemComp(cid=1111, synonyms=["1111-11-1"])],
)
pipeline = asp.assemble_pipeline(
pipeline_type=OS_PIPELINE, config_file=ABOX_CONFIG_FILE
)
write_abox(
pipeline=pipeline,
file_or_dir=inp_file_or_dir,
input_file_type=inp_file_type,
dry_run=False,
)
check_uploads(pipeline, inp_file_type, fs_num_uploads, ts_num_uploads)
if clean_tests:
cleanup_test_data(pipeline.written_files)
print("========================================================")
print()
print()
@pytest.mark.parametrize(
"inp_file_or_dir, inp_file_type, fs_num_uploads, ts_num_uploads",
[
("OPS_oc_json_angle_test", "oc_json", 0, 1),
],
)
def METHOD_NAME(
inp_file_or_dir: str,
inp_file_type: str,
fs_num_uploads: int,
ts_num_uploads: int,
clean_tests: bool,
cleanup_test_data: Callable,
):
print("========================================================")
print("TEST INPUT DIR: ", inp_file_or_dir)
print("TEST INPUT FILE TYPE: ", inp_file_type)
print()
print()
inp_file_or_dir = os.path.join(OPSSCAN_REF_DIR, inp_file_or_dir)
pipeline = asp.assemble_pipeline(
pipeline_type=OPS_PIPELINE, config_file=ABOX_CONFIG_FILE
)
write_abox(
pipeline=pipeline,
file_or_dir=inp_file_or_dir,
input_file_type=inp_file_type,
dry_run=False,
)
check_uploads(pipeline, inp_file_type, fs_num_uploads, ts_num_uploads)
if clean_tests:
cleanup_test_data(pipeline.written_files)
print("========================================================")
print()
print()
@pytest.mark.parametrize(
"inp_file_or_dir, inp_file_type, fs_num_uploads, ts_num_uploads",
[
("OM_om_json_test\\example.ominp_json", "ominp_json", 1, 1),
],
)
def test_omops_abox_uploads(
inp_file_or_dir: str,
inp_file_type: str,
fs_num_uploads: int,
ts_num_uploads: int,
clean_tests: bool,
cleanup_test_data: Callable,
):
print("========================================================")
print("TEST INPUT FILE: ", inp_file_or_dir)
print("TEST INPUT FILE TYPE: ", inp_file_type)
print()
print()
inp_file_or_dir = os.path.join(OMOPS_REF_DIR, inp_file_or_dir)
pipeline = asp.assemble_pipeline(
pipeline_type=OMOPS_PIPELINE, config_file=ABOX_CONFIG_FILE
)
write_abox(
pipeline=pipeline,
file_or_dir=inp_file_or_dir,
input_file_type=inp_file_type,
dry_run=False,
)
check_uploads(pipeline, inp_file_type, fs_num_uploads, ts_num_uploads)
if clean_tests:
cleanup_test_data(pipeline.written_files)
print("========================================================")
print()
print() |
4,771 | test wait for status reply | """
Test the evennia launcher.
"""
import os
import pickle
from anything import Something
from mock import MagicMock, create_autospec, patch
from twisted.internet import reactor
from twisted.internet.base import DelayedCall
from twisted.trial.unittest import TestCase as TwistedTestCase
from evennia.server import evennia_launcher
from evennia.server.portal import amp
DelayedCall.debug = True
@patch("evennia.server.evennia_launcher.Popen", new=MagicMock())
class TestLauncher(TwistedTestCase):
def test_is_windows(self):
self.assertEqual(evennia_launcher._is_windows(), os.name == "nt")
def test_file_compact(self):
self.assertEqual(
evennia_launcher._file_names_compact("foo/bar/test1", "foo/bar/test2"),
"foo/bar/test1 and test2",
)
self.assertEqual(
evennia_launcher._file_names_compact("foo/test1", "foo/bar/test2"),
"foo/test1 and foo/bar/test2",
)
@patch("evennia.server.evennia_launcher.print")
def test_print_info(self, mockprint):
portal_dict = {
"servername": "testserver",
"version": "1",
"telnet": 1234,
"telnet_ssl": [1234, 2345],
"ssh": 1234,
"webserver_proxy": 1234,
"webclient": 1234,
"webserver_internal": 1234,
"amp": 1234,
}
server_dict = {
"servername": "testserver",
"version": "1",
"webserver": [1234, 1234],
"amp": 1234,
"irc_rss": "irc.test",
"info": "testing mode",
"errors": "",
}
evennia_launcher._print_info(portal_dict, server_dict)
mockprint.assert_called()
def test_parse_status(self):
response = {"status": pickle.dumps(("teststring",))}
result = evennia_launcher._parse_status(response)
self.assertEqual(result, ("teststring",))
@patch("evennia.server.evennia_launcher.os.name", new="posix")
def test_get_twisted_cmdline(self):
pcmd, scmd = evennia_launcher._get_twistd_cmdline(False, False)
self.assertIn("portal.py", pcmd[1])
self.assertIn("--pidfile", pcmd[3])
self.assertIn("server.py", scmd[1])
self.assertIn("--pidfile", scmd[3])
pcmd, scmd = evennia_launcher._get_twistd_cmdline(True, True)
self.assertIn("portal.py", pcmd[1])
self.assertIn("--pidfile", pcmd[3])
self.assertIn("--profiler=cprofile", pcmd[5], pcmd)
self.assertIn("--profile=", pcmd[6])
self.assertIn("server.py", scmd[1])
self.assertIn("--pidfile", scmd[3])
self.assertIn("--pidfile", scmd[3])
self.assertIn("--profiler=cprofile", scmd[5], "actual: {}".format(scmd))
self.assertIn("--profile=", scmd[6])
@patch("evennia.server.evennia_launcher.os.name", new="nt")
def test_get_twisted_cmdline_nt(self):
pcmd, scmd = evennia_launcher._get_twistd_cmdline(False, False)
self.assertTrue(len(pcmd) == 3, pcmd)
self.assertTrue(len(scmd) == 3, scmd)
@patch("evennia.server.evennia_launcher.reactor.stop")
def test_reactor_stop(self, mockstop):
evennia_launcher._reactor_stop()
mockstop.assert_called()
def _catch_wire_read(self, mocktransport):
"Parse what was supposed to be sent over the wire"
arg_list = mocktransport.write.call_args_list
all_sent = []
for i, cll in enumerate(arg_list):
args, kwargs = cll
raw_inp = args[0]
all_sent.append(raw_inp)
return all_sent
# @patch("evennia.server.portal.amp.amp.BinaryBoxProtocol.transport")
# def test_send_instruction_pstatus(self, mocktransport):
# deferred = evennia_launcher.send_instruction(
# evennia_launcher.PSTATUS,
# (),
# callback=MagicMock(),
# errback=MagicMock())
# on_wire = self._catch_wire_read(mocktransport)
# self.assertEqual(on_wire, "")
# return deferred
def _msend_status_ok(operation, arguments, callback=None, errback=None):
callback({"status": pickle.dumps((True, True, 2, 24, "info1", "info2"))})
def _msend_status_err(operation, arguments, callback=None, errback=None):
errback({"status": pickle.dumps((False, False, 3, 25, "info3", "info4"))})
@patch("evennia.server.evennia_launcher.send_instruction", _msend_status_ok)
@patch("evennia.server.evennia_launcher.NO_REACTOR_STOP", True)
@patch("evennia.server.evennia_launcher.get_pid", MagicMock(return_value=100))
@patch("evennia.server.evennia_launcher.print")
def test_query_status_run(self, mprint):
evennia_launcher.query_status()
mprint.assert_called_with("Portal: RUNNING (pid 100)\nServer: RUNNING (pid 100)")
@patch("evennia.server.evennia_launcher.send_instruction", _msend_status_err)
@patch("evennia.server.evennia_launcher.NO_REACTOR_STOP", True)
@patch("evennia.server.evennia_launcher.print")
def test_query_status_not_run(self, mprint):
evennia_launcher.query_status()
mprint.assert_called_with("Portal: NOT RUNNING\nServer: NOT RUNNING")
@patch("evennia.server.evennia_launcher.send_instruction", _msend_status_ok)
@patch("evennia.server.evennia_launcher.NO_REACTOR_STOP", True)
def test_query_status_callback(self):
mprint = MagicMock()
def testcall(response):
resp = pickle.loads(response["status"])
mprint(resp)
evennia_launcher.query_status(callback=testcall)
mprint.assert_called_with((True, True, 2, 24, "info1", "info2"))
@patch("evennia.server.evennia_launcher.AMP_CONNECTION")
@patch("evennia.server.evennia_launcher.print")
def METHOD_NAME(self, mprint, aconn):
aconn.wait_for_status = MagicMock()
def test():
pass
evennia_launcher.wait_for_status_reply(test)
aconn.wait_for_status.assert_called_with(test)
@patch("evennia.server.evennia_launcher.AMP_CONNECTION", None)
@patch("evennia.server.evennia_launcher.print")
def test_wait_for_status_reply_fail(self, mprint):
evennia_launcher.wait_for_status_reply(None)
mprint.assert_called_with("No Evennia connection established.")
@patch("evennia.server.evennia_launcher.send_instruction", _msend_status_ok)
@patch("evennia.server.evennia_launcher.reactor.callLater")
def test_wait_for_status(self, mcalllater):
mcall = MagicMock()
merr = MagicMock()
evennia_launcher.wait_for_status(
portal_running=True, server_running=True, callback=mcall, errback=merr
)
mcall.assert_called_with(True, True)
merr.assert_not_called()
@patch("evennia.server.evennia_launcher.send_instruction", _msend_status_err)
@patch("evennia.server.evennia_launcher.reactor.callLater")
def test_wait_for_status_fail(self, mcalllater):
mcall = MagicMock()
merr = MagicMock()
evennia_launcher.wait_for_status(
portal_running=True, server_running=True, callback=mcall, errback=merr
)
mcall.assert_not_called()
merr.assert_not_called()
mcalllater.assert_called() |
4,772 | test authenticate with username | """
Test of custom django-oauth-toolkit behavior
"""
# pylint: disable=protected-access
import datetime
from django.conf import settings
from django.test import RequestFactory, TestCase
from django.utils import timezone
from common.djangoapps.student.tests.factories import UserFactory
from openedx.core.djangolib.testing.utils import skip_unless_lms
# oauth_dispatch is not in CMS' INSTALLED_APPS so these imports will error during test collection
if settings.ROOT_URLCONF == 'lms.urls':
from oauth2_provider import models as dot_models
from .. import adapters
from .. import models
from ..dot_overrides.validators import EdxOAuth2Validator
from .constants import DUMMY_REDIRECT_URL
@skip_unless_lms
class AuthenticateTestCase(TestCase):
"""
Test that users can authenticate with either username or email
"""
def setUp(self):
super().setUp()
self.user = UserFactory.create(
username='darkhelmet',
password='12345',
email='darkhelmet@spaceball_one.org',
)
self.validator = EdxOAuth2Validator()
def METHOD_NAME(self):
user = self.validator._authenticate(username='darkhelmet', password='12345')
assert self.user == user
def test_authenticate_with_email(self):
user = self.validator._authenticate(username='darkhelmet@spaceball_one.org', password='12345')
assert self.user == user
@skip_unless_lms
class CustomValidationTestCase(TestCase):
"""
Test custom user validation works.
In particular, inactive users should be able to validate.
"""
def setUp(self):
super().setUp()
self.user = UserFactory.create(
username='darkhelmet',
password='12345',
email='darkhelmet@spaceball_one.org',
)
self.validator = EdxOAuth2Validator()
self.request_factory = RequestFactory()
def test_active_user_validates(self):
assert self.user.is_active
request = self.request_factory.get('/')
assert self.validator.validate_user('darkhelmet', '12345', client=None, request=request)
def test_inactive_user_validates(self):
self.user.is_active = False
self.user.save()
request = self.request_factory.get('/')
assert self.validator.validate_user('darkhelmet', '12345', client=None, request=request)
@skip_unless_lms
class CustomAuthorizationViewTestCase(TestCase):
"""
Test custom authorization view works.
In particular, users should not be re-prompted to approve
an application even if the access token is expired.
(This is a temporary override until Auth Scopes is implemented.)
"""
def setUp(self):
super().setUp()
self.dot_adapter = adapters.DOTAdapter()
self.user = UserFactory()
self.client.login(username=self.user.username, password='test')
self.restricted_dot_app = self._create_restricted_app()
self._create_expired_token(self.restricted_dot_app)
def _create_restricted_app(self): # lint-amnesty, pylint: disable=missing-function-docstring
restricted_app = self.dot_adapter.create_confidential_client(
name='test restricted dot application',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL,
client_id='dot-restricted-app-client-id',
)
models.RestrictedApplication.objects.create(application=restricted_app)
return restricted_app
def _create_expired_token(self, application):
date_in_the_past = timezone.now() + datetime.timedelta(days=-100)
dot_models.AccessToken.objects.create(
user=self.user,
token='1234567890',
application=application,
expires=date_in_the_past,
scope='profile',
)
def _get_authorize(self, scope):
authorize_url = '/oauth2/authorize/'
return self.client.get(
authorize_url,
{
'client_id': self.restricted_dot_app.client_id,
'response_type': 'code',
'state': 'random_state_string',
'redirect_uri': DUMMY_REDIRECT_URL,
'scope': scope,
},
)
def test_no_reprompting(self):
response = self._get_authorize(scope='profile')
assert response.status_code == 302
assert response.url.startswith(DUMMY_REDIRECT_URL)
def test_prompting_with_new_scope(self):
response = self._get_authorize(scope='email')
assert response.status_code == 200
self.assertContains(response, settings.OAUTH2_PROVIDER['SCOPES']['email'])
self.assertNotContains(response, settings.OAUTH2_PROVIDER['SCOPES']['profile']) |
4,773 | initialize | # Copyright 2018 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for external dependencies that
# are UI-related.
from __future__ import annotations
import json
import os
from . import ExtensionModule, ModuleInfo
from .. import mlog
from ..dependencies import Dependency
from ..dependencies.dub import DubDependency
from ..interpreterbase import typed_pos_args
from ..mesonlib import Popen_safe, MesonException
class DlangModule(ExtensionModule):
class_dubbin = None
init_dub = False
INFO = ModuleInfo('dlang', '0.48.0')
def __init__(self, interpreter):
super().__init__(interpreter)
self.methods.update({
'generate_dub_file': self.generate_dub_file,
})
def _init_dub(self, state):
if DlangModule.class_dubbin is None:
self.dubbin = DubDependency.class_dubbin
DlangModule.class_dubbin = self.dubbin
else:
self.dubbin = DlangModule.class_dubbin
if DlangModule.class_dubbin is None:
self.dubbin = self.check_dub(state)
DlangModule.class_dubbin = self.dubbin
else:
self.dubbin = DlangModule.class_dubbin
if not self.dubbin:
if not self.dubbin:
raise MesonException('DUB not found.')
@typed_pos_args('dlang.generate_dub_file', str, str)
def generate_dub_file(self, state, args, kwargs):
if not DlangModule.init_dub:
self._init_dub(state)
config = {
'name': args[0]
}
config_path = os.path.join(args[1], 'dub.json')
if os.path.exists(config_path):
with open(config_path, encoding='utf-8') as ofile:
try:
config = json.load(ofile)
except ValueError:
mlog.warning('Failed to load the data in dub.json')
warn_publishing = ['description', 'license']
for arg in warn_publishing:
if arg not in kwargs and \
arg not in config:
mlog.warning('Without', mlog.bold(arg), 'the DUB package can\'t be published')
for key, value in kwargs.items():
if key == 'dependencies':
config[key] = {}
if isinstance(value, list):
for dep in value:
if isinstance(dep, Dependency):
name = dep.get_name()
ret, res = self._call_dubbin(['describe', name])
if ret == 0:
version = dep.get_version()
if version is None:
config[key][name] = ''
else:
config[key][name] = version
elif isinstance(value, Dependency):
name = value.get_name()
ret, res = self._call_dubbin(['describe', name])
if ret == 0:
version = value.get_version()
if version is None:
config[key][name] = ''
else:
config[key][name] = version
else:
config[key] = value
with open(config_path, 'w', encoding='utf-8') as ofile:
ofile.write(json.dumps(config, indent=4, ensure_ascii=False))
def _call_dubbin(self, args, env=None):
p, out = Popen_safe(self.dubbin.get_command() + args, env=env)[0:2]
return p.returncode, out.strip()
def check_dub(self, state):
dubbin = state.find_program('dub', silent=True)
if dubbin.found():
try:
p, out = Popen_safe(dubbin.get_command() + ['--version'])[0:2]
if p.returncode != 0:
mlog.warning('Found dub {!r} but couldn\'t run it'
''.format(' '.join(dubbin.get_command())))
# Set to False instead of None to signify that we've already
# searched for it and not found it
dubbin = False
except (FileNotFoundError, PermissionError):
dubbin = False
else:
dubbin = False
if dubbin:
mlog.log('Found DUB:', mlog.bold(dubbin.get_path()),
'(%s)' % out.strip())
else:
mlog.log('Found DUB:', mlog.red('NO'))
return dubbin
def METHOD_NAME(*args, **kwargs):
return DlangModule(*args, **kwargs) |
4,774 | gs callable | """
Subset Species
"""
#*****************************************************************************
# Copyright (C) 2008 Mike Hansen <mhansen@gmail.com>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from .species import GenericCombinatorialSpecies
from .set_species import SetSpecies
from .structure import GenericSpeciesStructure
from sage.combinat.species.misc import accept_size
from sage.structure.unique_representation import UniqueRepresentation
from sage.arith.misc import factorial
class SubsetSpeciesStructure(GenericSpeciesStructure):
def __repr__(self):
"""
EXAMPLES::
sage: set_random_seed(0)
sage: S = species.SubsetSpecies()
sage: a = S.structures(["a","b","c"])[0]; a
{}
"""
s = GenericSpeciesStructure.__repr__(self)
return "{"+s[1:-1]+"}"
def canonical_label(self):
"""
Return the canonical label of ``self``.
EXAMPLES::
sage: P = species.SubsetSpecies()
sage: S = P.structures(["a", "b", "c"])
sage: [s.canonical_label() for s in S]
[{}, {'a'}, {'a'}, {'a'}, {'a', 'b'}, {'a', 'b'}, {'a', 'b'}, {'a', 'b', 'c'}]
"""
rng = list(range(1, len(self._list) + 1))
return self.__class__(self.parent(), self._labels, rng)
def label_subset(self):
r"""
Return a subset of the labels that "appear" in this structure.
EXAMPLES::
sage: P = species.SubsetSpecies()
sage: S = P.structures(["a", "b", "c"])
sage: [s.label_subset() for s in S]
[[], ['a'], ['b'], ['c'], ['a', 'b'], ['a', 'c'], ['b', 'c'], ['a', 'b', 'c']]
"""
return [self._relabel(i) for i in self._list]
def transport(self, perm):
r"""
Return the transport of this subset along the permutation perm.
EXAMPLES::
sage: F = species.SubsetSpecies()
sage: a = F.structures(["a", "b", "c"])[5]; a
{'a', 'c'}
sage: p = PermutationGroupElement((1,2))
sage: a.transport(p)
{'b', 'c'}
sage: p = PermutationGroupElement((1,3))
sage: a.transport(p)
{'a', 'c'}
"""
l = sorted([perm(i) for i in self._list])
return SubsetSpeciesStructure(self.parent(), self._labels, l)
def automorphism_group(self):
r"""
Return the group of permutations whose action on this subset leave
it fixed.
EXAMPLES::
sage: F = species.SubsetSpecies()
sage: a = F.structures([1,2,3,4])[6]; a
{1, 3}
sage: a.automorphism_group()
Permutation Group with generators [(2,4), (1,3)]
::
sage: [a.transport(g) for g in a.automorphism_group()]
[{1, 3}, {1, 3}, {1, 3}, {1, 3}]
"""
from sage.groups.perm_gps.permgroup_named import SymmetricGroup
from sage.groups.perm_gps.permgroup import PermutationGroup
a = SymmetricGroup(self._list)
b = SymmetricGroup(self.complement()._list)
return PermutationGroup(a.gens() + b.gens())
def complement(self):
r"""
Return the complement of ``self``.
EXAMPLES::
sage: F = species.SubsetSpecies()
sage: a = F.structures(["a", "b", "c"])[5]; a
{'a', 'c'}
sage: a.complement()
{'b'}
"""
new_list = [i for i in range(1, len(self._labels)+1) if i not in self._list]
return SubsetSpeciesStructure(self.parent(), self._labels, new_list)
class SubsetSpecies(GenericCombinatorialSpecies, UniqueRepresentation):
@staticmethod
@accept_size
def __classcall__(cls, *args, **kwds):
"""
EXAMPLES::
sage: S = species.SubsetSpecies(); S
Subset species
"""
return super(SubsetSpecies, cls).__classcall__(cls, *args, **kwds)
def __init__(self, min=None, max=None, weight=None):
"""
Return the species of subsets.
EXAMPLES::
sage: S = species.SubsetSpecies()
sage: S.generating_series()[0:5]
[1, 2, 2, 4/3, 2/3]
sage: S.isotype_generating_series()[0:5]
[1, 2, 3, 4, 5]
sage: S = species.SubsetSpecies()
sage: c = S.generating_series()[0:3]
sage: S._check()
True
sage: S == loads(dumps(S))
True
"""
GenericCombinatorialSpecies.__init__(self, min=None, max=None, weight=None)
self._name = "Subset species"
_default_structure_class = SubsetSpeciesStructure
def _structures(self, structure_class, labels):
"""
EXAMPLES::
sage: S = species.SubsetSpecies()
sage: S.structures([1,2]).list()
[{}, {1}, {2}, {1, 2}]
sage: S.structures(['a','b']).list()
[{}, {'a'}, {'b'}, {'a', 'b'}]
"""
from sage.combinat.combination import Combinations
for c in Combinations(range(1, len(labels)+1)):
yield structure_class(self, labels, c)
def _isotypes(self, structure_class, labels):
"""
EXAMPLES::
sage: S = species.SubsetSpecies()
sage: S.isotypes([1,2]).list()
[{}, {1}, {1, 2}]
sage: S.isotypes(['a','b']).list()
[{}, {'a'}, {'a', 'b'}]
"""
for i in range(len(labels)+1):
yield structure_class(self, labels, range(1, i+1))
def METHOD_NAME(self, base_ring, n):
"""
The generating series for the species of subsets is
`e^{2x}`.
EXAMPLES::
sage: S = species.SubsetSpecies()
sage: [S.generating_series().coefficient(i) for i in range(5)]
[1, 2, 2, 4/3, 2/3]
"""
return base_ring(2)**n / base_ring(factorial(n))
def _itgs_callable(self, base_ring, n):
r"""
The generating series for the species of subsets is
`e^{2x}`.
EXAMPLES::
sage: S = species.SubsetSpecies()
sage: S.isotype_generating_series()[0:5]
[1, 2, 3, 4, 5]
"""
return base_ring(n + 1)
def _cis(self, series_ring, base_ring):
r"""
The cycle index series for the species of subsets satisfies
.. MATH::
Z_{\mathfrak{p}} = Z_{\mathcal{E}} \cdot Z_{\mathcal{E}}.
EXAMPLES::
sage: S = species.SubsetSpecies()
sage: S.cycle_index_series()[0:5]
[p[],
2*p[1],
2*p[1, 1] + p[2],
4/3*p[1, 1, 1] + 2*p[2, 1] + 2/3*p[3],
2/3*p[1, 1, 1, 1] + 2*p[2, 1, 1] + 1/2*p[2, 2] + 4/3*p[3, 1] + 1/2*p[4]]
"""
ciset = SetSpecies().cycle_index_series(base_ring)
res = ciset**2
if self.is_weighted():
res *= self._weight
return res
#Backward compatibility
SubsetSpecies_class = SubsetSpecies |
4,775 | get cycle length | #
# DeepLabCut Toolbox (deeplabcut.org)
# © A. & M.W. Mathis Labs
# https://github.com/DeepLabCut/DeepLabCut
#
# Please see AUTHORS for contributors.
# https://github.com/DeepLabCut/DeepLabCut/blob/master/AUTHORS
#
# Licensed under GNU Lesser General Public License v3.0
#
""" Cosine Scheduler
Cosine LR schedule with warmup, cycle/restarts, noise.
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
import math
import torch
from .scheduler import Scheduler
_logger = logging.getLogger(__name__)
class CosineLRScheduler(Scheduler):
"""
Cosine decay with restarts.
This is described in the paper https://arxiv.org/abs/1608.03983.
Inspiration from
https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
t_initial: int,
t_mul: float = 1.0,
lr_min: float = 0.0,
decay_rate: float = 1.0,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
cycle_limit=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True,
) -> None:
super().__init__(
optimizer,
param_group_field="lr",
noise_range_t=noise_range_t,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
initialize=initialize,
)
assert t_initial > 0
assert lr_min >= 0
if t_initial == 1 and t_mul == 1 and decay_rate == 1:
_logger.warning(
"Cosine annealing scheduler will have no effect on the learning "
"rate since t_initial = t_mul = eta_mul = 1."
)
self.t_initial = t_initial
self.t_mul = t_mul
self.lr_min = lr_min
self.decay_rate = decay_rate
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [
(v - warmup_lr_init) / self.warmup_t for v in self.base_values
]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.t_mul != 1:
i = math.floor(
math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul)
)
t_i = self.t_mul**i * self.t_initial
t_curr = t - (1 - self.t_mul**i) / (1 - self.t_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
gamma = self.decay_rate**i
lr_min = self.lr_min * gamma
lr_max_values = [v * gamma for v in self.base_values]
if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit):
lrs = [
lr_min
+ 0.5 * (lr_max - lr_min) * (1 + math.cos(math.pi * t_curr / t_i))
for lr_max in lr_max_values
]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
def METHOD_NAME(self, cycles=0):
if not cycles:
cycles = self.cycle_limit
cycles = max(1, cycles)
if self.t_mul == 1.0:
return self.t_initial * cycles
else:
return int(
math.floor(
-self.t_initial * (self.t_mul**cycles - 1) / (1 - self.t_mul)
)
) |
4,776 | init data handler | import os
import re
import signal
import subprocess
import sys
from sonic_py_common import daemon_base, logger
from swsscommon.swsscommon import ConfigDBConnector, RestartWaiter
SYSLOG_IDENTIFIER = "containercfgd"
logger = logger.Logger(SYSLOG_IDENTIFIER)
# Table names
FEATURE_TABLE = 'FEATURE'
SYSLOG_CONFIG_FEATURE_TABLE = 'SYSLOG_CONFIG_FEATURE'
# Table field names
SYSLOG_RATE_LIMIT_INTERVAL = 'rate_limit_interval'
SYSLOG_RATE_LIMIT_BURST = 'rate_limit_burst'
# Container name
container_name = None
def run_command(command):
"""
Utility function to run an shell command and return the output.
:param command: Shell command string.
:return: Output of the shell command.
"""
return subprocess.check_output(command, text=True, stderr=subprocess.PIPE)
class ContainerConfigDaemon(daemon_base.DaemonBase):
handlers = {}
def __init__(self):
super(ContainerConfigDaemon, self).__init__(SYSLOG_IDENTIFIER)
def run(self):
"""Register config handlers and listen to CONFIG DB changes
"""
config_db = ConfigDBConnector()
config_db.connect(wait_for_init=True, retry_on=True)
self.log_notice(f'Connected to CONFIG DB')
for table_name, handler in self.handlers.items():
config_db.subscribe(table_name, handler.handle_config)
config_db.listen(METHOD_NAME=self.METHOD_NAME)
def METHOD_NAME(self, init_data):
"""Handle initial data in CONFIG DB
Args:
init_data (dict): Initial data when first time connecting to CONFIG DB. {<table_name>: {<field_name>: <field_value>}}
"""
for handler in self.handlers.values():
handler.handle_init_data(init_data)
@classmethod
def register_handler(cls, table_name, object_type):
"""Register CONFIG DB handler
Args:
table_name (str): CONFIG DB table name
object_type (class): Class of CONFIG DB handler
"""
cls.handlers[table_name] = object_type()
def signal_handler(self, sig, frame):
if sig == signal.SIGHUP:
self.log_info("ContainerCfgd: Caught SIGHUP - ignoring...")
elif sig == signal.SIGINT:
self.log_info("ContainerCfgd: Caught SIGINT - exiting...")
sys.exit(128 + sig)
elif sig == signal.SIGTERM:
self.log_info("ContainerCfgd: Caught SIGTERM - exiting...")
sys.exit(128 + sig)
else:
self.log_warning("ContainerCfgd: Caught unhandled signal '{}'".format(sig))
def config_handler(table_name):
"""Decorator to register CONFIG DB handler
Args:
table_name (str): CONFIG DB table name
"""
def wrapper(object_type):
ContainerConfigDaemon.register_handler(table_name, object_type)
return object_type
return wrapper
@config_handler(SYSLOG_CONFIG_FEATURE_TABLE)
class SyslogHandler:
# syslog conf file path in docker
SYSLOG_CONF_PATH = '/etc/rsyslog.conf'
# temporary syslog conf file path in docker
TMP_SYSLOG_CONF_PATH = '/tmp/rsyslog.conf'
# Regular expressions to extract value from rsyslog.conf
INTERVAL_PATTERN = '.*SystemLogRateLimitInterval\s+(\d+).*'
BURST_PATTERN = '.*SystemLogRateLimitBurst\s+(\d+).*'
TARGET_IP_PATTERN = '.*target="(.*?)".*'
def __init__(self):
self.current_interval, self.current_burst, self.target_ip = self.parse_syslog_conf()
def handle_config(self, table, key, data):
"""Handle CONFIG DB change. Callback by ConfigDBConnector.
Args:
table (str): CONFIG DB table name
key (str): Key of the changed entry
data (dict): Data of the entry: {<field_name>: <field_value>}
"""
try:
if key != container_name:
return
self.update_syslog_config(data)
except Exception as e:
logger.log_error('Failed to config syslog for container {} with data {} - {}'.format(key, data, e))
def handle_init_data(self, init_data):
"""Handle initial data in CONFIG DB. Callback by ConfigDBConnector.
Args:
init_data (dict): Initial data when first time connecting to CONFIG DB. {<table_name>: {<field_name>: <field_value>}}
"""
if SYSLOG_CONFIG_FEATURE_TABLE in init_data:
if container_name in init_data[SYSLOG_CONFIG_FEATURE_TABLE]:
self.update_syslog_config(init_data[SYSLOG_CONFIG_FEATURE_TABLE][container_name])
def update_syslog_config(self, data):
"""Parse existing syslog conf and apply new syslog conf.
Args:
data (dict): Data of the entry: {<field_name>: <field_value>}
"""
new_interval = '0' if not data else data.get(SYSLOG_RATE_LIMIT_INTERVAL, '0')
new_burst = '0' if not data else data.get(SYSLOG_RATE_LIMIT_BURST, '0')
if new_interval == self.current_interval and new_burst == self.current_burst:
logger.log_notice('Syslog rate limit configuration does not change, ignore it')
return
logger.log_notice(f'Configure syslog rate limit interval={new_interval}, burst={new_burst}')
if os.path.exists(self.TMP_SYSLOG_CONF_PATH):
os.remove(self.TMP_SYSLOG_CONF_PATH)
with open(self.TMP_SYSLOG_CONF_PATH, 'w+') as f:
json_args = f'{{"target_ip": "{self.target_ip}", "container_name": "{container_name}" }}'
output = run_command(['sonic-cfggen', '-d', '-t', '/usr/share/sonic/templates/rsyslog-container.conf.j2', '-a', json_args])
f.write(output)
run_command(['cp', self.TMP_SYSLOG_CONF_PATH, self.SYSLOG_CONF_PATH])
run_command(['supervisorctl', 'restart', 'rsyslogd'])
self.current_interval = new_interval
self.current_burst = new_burst
def parse_syslog_conf(self):
"""Passe existing syslog conf and extract config values
Returns:
tuple: interval,burst,target_ip
"""
interval = '0'
burst = '0'
target_ip = None
with open(self.SYSLOG_CONF_PATH, 'r') as f:
content = f.read()
pattern = re.compile(self.INTERVAL_PATTERN)
for match in pattern.finditer(content):
interval = match.group(1)
break
pattern = re.compile(self.BURST_PATTERN)
for match in pattern.finditer(content):
burst = match.group(1)
break
pattern = re.compile(self.TARGET_IP_PATTERN)
for match in pattern.finditer(content):
target_ip = match.group(1)
break
return interval, burst, target_ip
def main():
RestartWaiter.waitAdvancedBootDone()
global container_name
container_name = os.environ['CONTAINER_NAME']
daemon = ContainerConfigDaemon()
daemon.run()
if __name__ == '__main__':
main() |
4,777 | convert dtype | """This module contains actions (functions) that operate on layers.
Among other potential uses, these will populate the menu when you right-click
on a layer in the LayerList.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, List, cast
import numpy as np
from napari.layers import Image, Labels, Layer
from napari.layers._source import layer_source
from napari.layers.utils import stack_utils
from napari.layers.utils._link_layers import get_linked_layers
from napari.utils.translations import trans
if TYPE_CHECKING:
from napari.components import LayerList
def _duplicate_layer(ll: LayerList, *, name: str = ''):
from copy import deepcopy
for lay in list(ll.selection):
data, state, type_str = lay.as_layer_data_tuple()
state["name"] = trans._('{name} copy', name=lay.name)
with layer_source(parent=lay):
new = Layer.create(deepcopy(data), state, type_str)
ll.insert(ll.index(lay) + 1, new)
def _split_stack(ll: LayerList, axis: int = 0):
layer = ll.selection.active
if not isinstance(layer, Image):
return
if layer.rgb:
images = stack_utils.split_rgb(layer)
else:
images = stack_utils.stack_to_images(layer, axis)
ll.remove(layer)
ll.extend(images)
ll.selection = set(images) # type: ignore
def _split_rgb(ll: LayerList):
return _split_stack(ll)
def _convert(ll: LayerList, type_: str):
from napari.layers import Shapes
for lay in list(ll.selection):
idx = ll.index(lay)
ll.pop(idx)
if isinstance(lay, Shapes) and type_ == 'labels':
data = lay.to_labels()
else:
data = lay.data.astype(int) if type_ == 'labels' else lay.data
new_layer = Layer.create(data, lay._get_base_state(), type_)
ll.insert(idx, new_layer)
# TODO: currently, we have to create a thin _convert_to_x wrapper around _convert
# here for the purpose of type hinting (which partial doesn't do) ...
# so that inject_dependencies works correctly.
# however, we could conceivably add an `args` option to register_action
# that would allow us to pass additional arguments, like a partial.
def _convert_to_labels(ll: LayerList):
return _convert(ll, 'labels')
def _convert_to_image(ll: LayerList):
return _convert(ll, 'image')
def _merge_stack(ll: LayerList, rgb=False):
# force selection to follow LayerList ordering
imgs = cast(List[Image], [layer for layer in ll if layer in ll.selection])
assert all(isinstance(layer, Image) for layer in imgs)
merged = (
stack_utils.merge_rgb(imgs)
if rgb
else stack_utils.images_to_stack(imgs)
)
for layer in imgs:
ll.remove(layer)
ll.append(merged)
def _toggle_visibility(ll: LayerList):
current_visibility_state = []
for layer in ll.selection:
current_visibility_state.append(layer.visible)
for visibility, layer in zip(current_visibility_state, ll.selection):
if layer.visible == visibility:
layer.visible = not visibility
def _link_selected_layers(ll: LayerList):
ll.link_layers(ll.selection)
def _unlink_selected_layers(ll: LayerList):
ll.unlink_layers(ll.selection)
def _select_linked_layers(ll: LayerList):
ll.selection.update(get_linked_layers(*ll.selection))
def METHOD_NAME(ll: LayerList, mode='int64'):
if not (layer := ll.selection.active):
return
if not isinstance(layer, Labels):
raise NotImplementedError(
trans._(
"Data type conversion only implemented for labels",
deferred=True,
)
)
target_dtype = np.dtype(mode)
if (
np.min(layer.data) < np.iinfo(target_dtype).min
or np.max(layer.data) > np.iinfo(target_dtype).max
):
raise AssertionError(
trans._(
"Labeling contains values outside of the target data type range.",
deferred=True,
)
)
layer.data = layer.data.astype(np.dtype(mode))
def _project(ll: LayerList, axis: int = 0, mode='max'):
layer = ll.selection.active
if not layer:
return
if not isinstance(layer, Image):
raise NotImplementedError(
trans._(
"Projections are only implemented for images", deferred=True
)
)
# this is not the desired behavior for coordinate-based layers
# but the action is currently only enabled for 'image_active and ndim > 2'
# before opening up to other layer types, this line should be updated.
data = (getattr(np, mode)(layer.data, axis=axis, keepdims=False),)
# get the meta data of the layer, but without transforms
meta = {
key: layer._get_base_state()[key]
for key in layer._get_base_state()
if key not in ('scale', 'translate', 'rotate', 'shear', 'affine')
}
meta.update( # sourcery skip
{
'name': f'{layer} {mode}-proj',
'colormap': layer.colormap.name,
'rendering': layer.rendering,
}
)
new = Layer.create(data, meta, layer._type_string)
# add transforms from original layer, but drop the axis of the projection
new._transforms = layer._transforms.set_slice(
[ax for ax in range(layer.ndim) if ax != axis]
)
ll.append(new) |
4,778 | test bincount minlength | import numpy
import pytest
from numpy.testing import assert_allclose
import dpnp
from .helper import get_all_dtypes
@pytest.mark.parametrize(
"dtype", get_all_dtypes(no_none=True, no_bool=True, no_complex=True)
)
@pytest.mark.parametrize("size", [2, 4, 8, 16, 3, 9, 27, 81])
def test_median(dtype, size):
a = numpy.arange(size, dtype=dtype)
ia = dpnp.array(a)
np_res = numpy.median(a)
dpnp_res = dpnp.median(ia)
assert_allclose(dpnp_res, np_res)
@pytest.mark.usefixtures("allow_fall_back_on_numpy")
@pytest.mark.parametrize("axis", [0, 1, -1, 2, -2, (1, 2), (0, -2)])
@pytest.mark.parametrize(
"dtype", get_all_dtypes(no_none=True, no_bool=True, no_complex=True)
)
def test_max(axis, dtype):
a = numpy.arange(768, dtype=dtype).reshape((4, 4, 6, 8))
ia = dpnp.array(a)
np_res = numpy.max(a, axis=axis)
dpnp_res = dpnp.max(ia, axis=axis)
assert_allclose(dpnp_res, np_res)
@pytest.mark.usefixtures("allow_fall_back_on_numpy")
@pytest.mark.parametrize(
"array",
[
[2, 0, 6, 2],
[2, 0, 6, 2, 5, 6, 7, 8],
[],
[2, 1, numpy.nan, 5, 3],
[-1, numpy.nan, 1, numpy.inf],
[3, 6, 0, 1],
[3, 6, 0, 1, 8],
[3, 2, 9, 6, numpy.nan],
[numpy.nan, numpy.nan, numpy.inf, numpy.nan],
[[2, 0], [6, 2]],
[[2, 0, 6, 2], [5, 6, 7, 8]],
[[[2, 0], [6, 2]], [[5, 6], [7, 8]]],
[[-1, numpy.nan], [1, numpy.inf]],
[[numpy.nan, numpy.nan], [numpy.inf, numpy.nan]],
],
ids=[
"[2, 0, 6, 2]",
"[2, 0, 6, 2, 5, 6, 7, 8]",
"[]",
"[2, 1, np.nan, 5, 3]",
"[-1, np.nan, 1, np.inf]",
"[3, 6, 0, 1]",
"[3, 6, 0, 1, 8]",
"[3, 2, 9, 6, np.nan]",
"[np.nan, np.nan, np.inf, np.nan]",
"[[2, 0], [6, 2]]",
"[[2, 0, 6, 2], [5, 6, 7, 8]]",
"[[[2, 0], [6, 2]], [[5, 6], [7, 8]]]",
"[[-1, np.nan], [1, np.inf]]",
"[[np.nan, np.nan], [np.inf, np.nan]]",
],
)
@pytest.mark.parametrize(
"dtype", get_all_dtypes(no_none=True, no_bool=True, no_complex=True)
)
def test_nanvar(array, dtype):
dtype = dpnp.default_float_type()
a = numpy.array(array, dtype=dtype)
ia = dpnp.array(a)
for ddof in range(a.ndim):
expected = numpy.nanvar(a, ddof=ddof)
result = dpnp.nanvar(ia, ddof=ddof)
assert_allclose(expected, result, rtol=1e-06)
expected = numpy.nanvar(a, axis=None, ddof=0)
result = dpnp.nanvar(ia, axis=None, ddof=0)
assert_allclose(expected, result, rtol=1e-06)
@pytest.mark.usefixtures("allow_fall_back_on_numpy")
class TestBincount:
@pytest.mark.parametrize(
"array",
[[1, 2, 3], [1, 2, 2, 1, 2, 4], [2, 2, 2, 2]],
ids=["[1, 2, 3]", "[1, 2, 2, 1, 2, 4]", "[2, 2, 2, 2]"],
)
@pytest.mark.parametrize(
"minlength", [0, 1, 3, 5], ids=["0", "1", "3", "5"]
)
def METHOD_NAME(self, array, minlength):
np_a = numpy.array(array)
dpnp_a = dpnp.array(array)
expected = numpy.bincount(np_a, minlength=minlength)
result = dpnp.bincount(dpnp_a, minlength=minlength)
assert_allclose(expected, result)
@pytest.mark.parametrize(
"array", [[1, 2, 2, 1, 2, 4]], ids=["[1, 2, 2, 1, 2, 4]"]
)
@pytest.mark.parametrize(
"weights",
[None, [0.3, 0.5, 0.2, 0.7, 1.0, -0.6], [2, 2, 2, 2, 2, 2]],
ids=["None", "[0.3, 0.5, 0.2, 0.7, 1., -0.6]", "[2, 2, 2, 2, 2, 2]"],
)
def test_bincount_weights(self, array, weights):
np_a = numpy.array(array)
dpnp_a = dpnp.array(array)
expected = numpy.bincount(np_a, weights=weights)
result = dpnp.bincount(dpnp_a, weights=weights)
assert_allclose(expected, result)
@pytest.mark.parametrize(
"dtype", get_all_dtypes(no_bool=True, no_none=True, no_complex=True)
)
def test_cov_rowvar(dtype):
a = dpnp.array([[0, 2], [1, 1], [2, 0]], dtype=dtype)
b = numpy.array([[0, 2], [1, 1], [2, 0]], dtype=dtype)
assert_allclose(dpnp.cov(a.T), dpnp.cov(a, rowvar=False))
assert_allclose(numpy.cov(b, rowvar=False), dpnp.cov(a, rowvar=False))
@pytest.mark.parametrize(
"dtype", get_all_dtypes(no_bool=True, no_none=True, no_complex=True)
)
def test_cov_1D_rowvar(dtype):
a = dpnp.array([[0, 1, 2]], dtype=dtype)
b = numpy.array([[0, 1, 2]], dtype=dtype)
assert_allclose(numpy.cov(b, rowvar=False), dpnp.cov(a, rowvar=False)) |
4,779 | dev interrupt hw config dev list | import logging
from itertools import product
from lnst.Common.Parameters import (
Param,
IntParam,
StrParam,
IPv4NetworkParam,
IPv6NetworkParam,
)
from lnst.Common.IpAddress import interface_addresses
from lnst.Controller import HostReq, DeviceReq, RecipeParam
from lnst.Recipes.ENRT.VirtualEnrtRecipe import VirtualEnrtRecipe
from lnst.Recipes.ENRT.ConfigMixins.OffloadSubConfigMixin import (
OffloadSubConfigMixin)
from lnst.Recipes.ENRT.ConfigMixins.CommonHWSubConfigMixin import (
CommonHWSubConfigMixin)
from lnst.Recipes.ENRT.PingMixins import VlanPingEvaluatorMixin
from lnst.RecipeCommon.Ping.PingEndpoints import PingEndpoints
from lnst.Devices import OvsBridgeDevice
class VirtualOvsBridgeVlansOverBondRecipe(VlanPingEvaluatorMixin,
CommonHWSubConfigMixin, OffloadSubConfigMixin, VirtualEnrtRecipe):
host1 = HostReq()
host1.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver"))
host1.eth1 = DeviceReq(label="to_switch", driver=RecipeParam("driver"))
host1.tap0 = DeviceReq(label="to_guest1")
host1.tap1 = DeviceReq(label="to_guest2")
host2 = HostReq()
host2.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver"))
host2.eth1 = DeviceReq(label="to_switch", driver=RecipeParam("driver"))
host2.tap0 = DeviceReq(label="to_guest3")
host2.tap1 = DeviceReq(label="to_guest4")
guest1 = HostReq()
guest1.eth0 = DeviceReq(label="to_guest1")
guest2 = HostReq()
guest2.eth0 = DeviceReq(label="to_guest2")
guest3 = HostReq()
guest3.eth0 = DeviceReq(label="to_guest3")
guest4 = HostReq()
guest4.eth0 = DeviceReq(label="to_guest4")
vlan0_id = IntParam(default=10)
vlan1_id = IntParam(default=20)
offload_combinations = Param(default=(
dict(gro="on", gso="on", tso="on", tx="on"),
dict(gro="off", gso="on", tso="on", tx="on"),
dict(gro="on", gso="off", tso="off", tx="on"),
dict(gro="on", gso="on", tso="off", tx="off")))
vlan0_ipv4 = IPv4NetworkParam(default="192.168.10.0/24")
vlan0_ipv6 = IPv6NetworkParam(default="fc00:0:0:1::/64")
vlan1_ipv4 = IPv4NetworkParam(default="192.168.20.0/24")
vlan1_ipv6 = IPv6NetworkParam(default="fc00:0:0:2::/64")
bonding_mode = StrParam(mandatory = True)
miimon_value = IntParam(mandatory = True)
def test_wide_configuration(self):
host1, host2, guest1, guest2, guest3, guest4 = (self.matched.host1,
self.matched.host2, self.matched.guest1, self.matched.guest2,
self.matched.guest3, self.matched.guest4)
for host, port_name in [(host1, "bond_port1"),
(host2, "bond_port2")]:
for dev in [host.eth0, host.eth1, host.tap0, host.tap1]:
dev.down()
host.br0 = OvsBridgeDevice()
host.br0.port_add(device=host.tap0, port_options={'tag': self.params.vlan0_id})
host.br0.port_add(device=host.tap1, port_options={'tag': self.params.vlan1_id})
#miimon cannot be set due to colon in argument name -->
#other_config:bond-miimon-interval
host.br0.bond_add(port_name, (host.eth0, host.eth1),
bond_mode=self.params.bonding_mode)
guest1.eth0.down()
guest2.eth0.down()
guest3.eth0.down()
guest4.eth0.down()
configuration = super().test_wide_configuration()
configuration.test_wide_devices = [guest1.eth0, guest2.eth0,
guest3.eth0, guest4.eth0]
vlan0_ipv4_addr = interface_addresses(self.params.vlan0_ipv4)
vlan0_ipv6_addr = interface_addresses(self.params.vlan0_ipv6)
vlan1_ipv4_addr = interface_addresses(self.params.vlan1_ipv4)
vlan1_ipv6_addr = interface_addresses(self.params.vlan1_ipv6)
for guest in [guest1, guest3]:
guest.eth0.ip_add(next(vlan0_ipv4_addr))
guest.eth0.ip_add(next(vlan0_ipv6_addr))
for guest in [guest2, guest4]:
guest.eth0.ip_add(next(vlan1_ipv4_addr))
guest.eth0.ip_add(next(vlan1_ipv6_addr))
for host in [host1, host2]:
for dev in [host.eth0, host.eth1, host.tap0, host.tap1,
host.br0]:
dev.up()
for guest in [guest1, guest2, guest3, guest4]:
guest.eth0.up()
if "perf_tool_cpu" in self.params:
logging.info("'perf_tool_cpu' param (%d) to be set to None" %
self.params.perf_tool_cpu)
self.params.perf_tool_cpu = None
self.wait_tentative_ips(configuration.test_wide_devices)
return configuration
def generate_test_wide_description(self, config):
host1, host2 = self.matched.host1, self.matched.host2
desc = super().generate_test_wide_description(config)
desc += [
"\n".join([
"Configured {}.{}.ips = {}".format(
dev.host.hostid, dev.name, dev.ips
)
for dev in config.test_wide_devices
]),
"\n".join([
"Configured {}.{}.ports = {}".format(
dev.host.hostid, dev.name, dev.ports
)
for dev in [host1.br0, host2.br0]
]),
"\n".join([
"Configured {}.{}.bonds = {}".format(
dev.host.hostid, dev.name, dev.bonds
)
for dev in [host1.br0, host2.br0]
])
]
return desc
def test_wide_deconfiguration(self, config):
del config.test_wide_devices
super().test_wide_deconfiguration(config)
def generate_ping_endpoints(self, config):
guest1, guest2, guest3, guest4 = (self.matched.guest1,
self.matched.guest2, self.matched.guest3, self.matched.guest4)
dev_combinations = product(
[guest1.eth0, guest2.eth0],
[guest3.eth0, guest4.eth0]
)
return [
PingEndpoints(
comb[0], comb[1],
reachable=((comb[0].host, comb[1].host) in [
(guest1, guest3),
(guest2, guest4)
])
)
for comb in dev_combinations
]
def generate_perf_endpoints(self, config):
return [(self.matched.guest1.eth0, self.matched.guest3.eth0)]
@property
def offload_nics(self):
host1, host2, guest1, guest2, guest3, guest4 = (self.matched.host1,
self.matched.host2, self.matched.guest1, self.matched.guest2,
self.matched.guest3, self.matched.guest4)
result = []
for machine in host1, host2, guest1, guest2, guest3, guest4:
result.append(machine.eth0)
result.extend([host1.eth1, host2.eth1])
return result
@property
def mtu_hw_config_dev_list(self):
host1, host2, guest1, guest2, guest3, guest4 = (self.matched.host1,
self.matched.host2, self.matched.guest1, self.matched.guest2,
self.matched.guest3, self.matched.guest4)
result = []
for host in [host1, host2]:
for dev in [host.eth0, host.eth1, host.tap0, host.tap1,
host.br0]:
result.append(dev)
for guest in [guest1, guest2, guest3, guest4]:
result.append(guest.eth0)
return result
@property
def METHOD_NAME(self):
return [self.matched.host1.eth0, self.matched.host1.eth1,
self.matched.host2.eth0, self.matched.host2.eth1]
@property
def parallel_stream_qdisc_hw_config_dev_list(self):
return [self.matched.host1.eth0, self.matched.host1.eth1,
self.matched.host2.eth0, self.matched.host2.eth1] |
4,780 | test czi file read buffer | # pylint: disable=no-self-use
import math
import os.path
import shutil
from glob import glob
from io import BytesIO
import numpy as np
import pytest
import tifffile
import PartSegData
from PartSegImage import CziImageReader, GenericImageReader, Image, ObsepImageReader, OifImagReader, TiffImageReader
class TestImageClass:
def test_tiff_image_read(self):
image = TiffImageReader.read_image(PartSegData.segmentation_mask_default_image)
assert isinstance(image, Image)
assert np.all(np.isclose(image.spacing, (7.752248561753867e-08,) * 2))
def test_tiff_image_read_buffer(self):
with open(PartSegData.segmentation_mask_default_image, "rb") as f_p:
buffer = BytesIO(f_p.read())
image = TiffImageReader.read_image(buffer)
assert isinstance(image, Image)
assert np.all(np.isclose(image.spacing, (7.752248561753867e-08,) * 2))
def test_czi_file_read(self, data_test_dir):
image = CziImageReader.read_image(os.path.join(data_test_dir, "test_czi.czi"))
assert image.channels == 4
assert image.layers == 1
assert image.file_path == os.path.join(data_test_dir, "test_czi.czi")
assert np.all(np.isclose(image.spacing, (7.752248561753867e-08,) * 2))
def METHOD_NAME(self, data_test_dir):
with open(os.path.join(data_test_dir, "test_czi.czi"), "rb") as f_p:
buffer = BytesIO(f_p.read())
image = CziImageReader.read_image(buffer)
assert image.channels == 4
assert image.layers == 1
assert image.file_path == ""
assert np.all(np.isclose(image.spacing, (7.752248561753867e-08,) * 2))
def test_oib_file_read(self, data_test_dir):
image = OifImagReader.read_image(os.path.join(data_test_dir, "N2A_H2BGFP_dapi_falloidin_cycling1.oib"))
assert image.channels == 3
assert image.layers == 6
assert np.all(np.isclose(image.spacing, (2.1e-07,) + (7.752248561753867e-08,) * 2))
def test_oif_file_read(self, data_test_dir):
image = OifImagReader.read_image(os.path.join(data_test_dir, "Image0003_01.oif"))
assert image.channels == 1
assert image.layers == 49
assert np.all(np.isclose(image.spacing, (3.2e-07,) + (5.1e-08,) * 2))
def test_read_with_mask(self, data_test_dir):
image = TiffImageReader.read_image(
os.path.join(data_test_dir, "stack1_components", "stack1_component1.tif"),
os.path.join(data_test_dir, "stack1_components", "stack1_component1_mask.tif"),
)
assert isinstance(image, Image)
with pytest.raises(ValueError, match="Incompatible shape"):
TiffImageReader.read_image(
os.path.join(data_test_dir, "stack1_components", "stack1_component1.tif"),
os.path.join(data_test_dir, "stack1_components", "stack1_component2_mask.tif"),
)
def test_lsm_read(self, data_test_dir):
image1 = TiffImageReader.read_image(os.path.join(data_test_dir, "test_lsm.lsm"))
image2 = TiffImageReader.read_image(os.path.join(data_test_dir, "test_lsm.tif"))
data = np.moveaxis(np.load(os.path.join(data_test_dir, "test_lsm.npy")), -1, 0)
assert np.all(image1.get_data() == data)
assert np.all(image2.get_data() == data)
assert np.all(image1.get_data() == image2.get_data())
def test_ome_read(self, data_test_dir): # error in tifffile
image1 = TiffImageReader.read_image(os.path.join(data_test_dir, "test_lsm2.tif"))
image2 = TiffImageReader.read_image(os.path.join(data_test_dir, "test_lsm.tif"))
data = np.moveaxis(np.load(os.path.join(data_test_dir, "test_lsm.npy")), -1, 0)
assert np.all(image1.get_data() == data)
assert np.all(image2.get_data() == data)
assert np.all(image1.get_data() == image2.get_data())
def test_generic_reader(self, data_test_dir):
GenericImageReader.read_image(
os.path.join(data_test_dir, "stack1_components", "stack1_component1.tif"),
os.path.join(data_test_dir, "stack1_components", "stack1_component1_mask.tif"),
)
GenericImageReader.read_image(os.path.join(data_test_dir, "test_czi.czi"))
GenericImageReader.read_image(os.path.join(data_test_dir, "test_lsm2.tif"))
GenericImageReader.read_image(os.path.join(data_test_dir, "test_lsm.tif"))
GenericImageReader.read_image(os.path.join(data_test_dir, "Image0003_01.oif"))
GenericImageReader.read_image(os.path.join(data_test_dir, "N2A_H2BGFP_dapi_falloidin_cycling1.oib"))
def test_decode_int(self):
assert TiffImageReader.decode_int(0) == [0, 0, 0, 0]
assert TiffImageReader.decode_int(15) == [0, 0, 0, 15]
assert TiffImageReader.decode_int(3 + 7 * 256 + 11 * 256**2 + 13 * 256**3) == [13, 11, 7, 3]
def test_set_spacing(self):
reader = TiffImageReader()
reader.set_default_spacing((11, 12, 13))
assert reader.default_spacing == (11, 12, 13)
reader.set_default_spacing((5, 7))
assert reader.default_spacing == (10**-6, 5, 7)
def test_obsep_read(self, data_test_dir):
image = ObsepImageReader.read_image(os.path.join(data_test_dir, "obsep", "test.obsep"))
assert image.channels == 2
assert np.allclose(image.spacing, (500 * 10**-9, 64 * 10**-9, 64 * 10**-9))
assert image.channel_names == ["channel 1", "channel 2"]
def test_obsep_deconv_read(self, data_test_dir, tmp_path):
for el in glob(os.path.join(data_test_dir, "obsep", "*")):
shutil.copy(os.path.join(data_test_dir, "obsep", el), tmp_path)
image = GenericImageReader.read_image(tmp_path / "test.obsep")
assert image.channels == 2
assert np.allclose(image.spacing, (500 * 10**-9, 64 * 10**-9, 64 * 10**-9))
assert image.channel_names == ["channel 1", "channel 2"]
shutil.copy(tmp_path / "Cy5.TIF", tmp_path / "Cy5_decon2.TIF")
image = GenericImageReader.read_image(tmp_path / "test.obsep")
assert image.channels == 2
shutil.copy(tmp_path / "Cy5.TIF", tmp_path / "Cy5_deconv.TIF")
image = GenericImageReader.read_image(tmp_path / "test.obsep")
assert image.channels == 3
def test_double_axes_in_dim_read(self, data_test_dir):
image = GenericImageReader.read_image(os.path.join(data_test_dir, "double_q_in_axes.tif"))
assert image.layers == 360
assert image.channels == 1
assert image.stack_pos == 1
assert image.plane_shape == (360, 32)
class CustomImage(Image):
axis_order = "TCXYZ"
class CustomTiffReader(TiffImageReader):
image_class = CustomImage
def test_change_class(data_test_dir):
img = CustomTiffReader.read_image(os.path.join(data_test_dir, "test_lsm.tif"))
assert isinstance(img, CustomImage)
assert img.plane_shape == (1024, 1024)
assert img.layers == 6
assert img.channels == 3
assert img.stack_pos == 3
def test_xml2dict():
sample_text = """
<level1>
<level2>3.5322</level2>
</level1>
"""
data = tifffile.xml2dict(sample_text)
assert math.isclose(data["level1"]["level2"], 3.5322) |
4,781 | get target mask | import math
import numpy as np
import scipy.special
import torch
from ..distances import CosineSimilarity
from ..utils import common_functions as c_f
from ..utils import loss_and_miner_utils as lmu
from .base_metric_loss_function import BaseMetricLossFunction
from .mixins import WeightRegularizerMixin
class LargeMarginSoftmaxLoss(WeightRegularizerMixin, BaseMetricLossFunction):
"""
Implementation of https://arxiv.org/pdf/1612.02295.pdf
"""
def __init__(self, num_classes, embedding_size, margin=4, scale=1, **kwargs):
super().__init__(**kwargs)
c_f.assert_distance_type(self, CosineSimilarity)
self.margin = margin
self.num_classes = num_classes
self.scale = scale
self.add_to_recordable_attributes(
list_of_names=["num_classes", "margin", "scale"], is_stat=False
)
self.add_to_recordable_attributes(name="avg_angle", is_stat=True)
self.init_margin()
self.W = torch.nn.Parameter(torch.Tensor(embedding_size, num_classes))
self.weight_init_func(self.W)
self.cross_entropy = torch.nn.CrossEntropyLoss(reduction="none")
def init_margin(self):
self.margin = int(self.margin)
self.max_n = self.margin // 2
## For the trigonometric multiple-angle formula ##
self.n_range = torch.tensor([n for n in range(0, self.max_n + 1)])
self.margin_choose_n = torch.tensor(
[scipy.special.binom(self.margin, 2 * n) for n in self.n_range]
)
self.cos_powers = torch.tensor([self.margin - (2 * n) for n in self.n_range])
self.alternating = torch.tensor([(-1) ** n for n in self.n_range])
def get_cos_with_margin(self, cosine):
cosine = cosine.unsqueeze(1)
for attr in ["n_range", "margin_choose_n", "cos_powers", "alternating"]:
setattr(self, attr, c_f.to_device(getattr(self, attr), cosine))
cos_powered = cosine**self.cos_powers
sin_powered = (1 - cosine**2) ** self.n_range
terms = (
self.alternating * self.margin_choose_n * cos_powered * sin_powered
) # Equation 7 in the paper
return torch.sum(terms, dim=1)
def get_cosine(self, embeddings):
return self.distance(embeddings, self.W.t())
def get_angles(self, cosine_of_target_classes):
angles = torch.acos(torch.clamp(cosine_of_target_classes, -1, 1))
if self.collect_stats:
with torch.no_grad():
self.avg_angle = np.degrees(torch.mean(angles).item())
return angles
def METHOD_NAME(self, embeddings, labels):
batch_size = labels.size(0)
mask = torch.zeros(
batch_size,
self.num_classes,
dtype=embeddings.dtype,
device=embeddings.device,
)
mask[torch.arange(batch_size), labels] = 1
return mask
def modify_cosine_of_target_classes(self, cosine_of_target_classes):
cos_with_margin = self.get_cos_with_margin(cosine_of_target_classes)
angles = self.get_angles(cosine_of_target_classes)
with torch.no_grad():
k = (
angles / (math.pi / self.margin)
).floor() # Equation 6: angles needs to be between [k*pi/m and (k+1)*pi/m]
return ((-1) ** k) * cos_with_margin - (2 * k)
def scale_logits(self, logits, embeddings):
embedding_norms = self.distance.get_norm(embeddings)
weight_norms = self.distance.get_norm(self.W, dim=0)
product_of_magnitudes = weight_norms.unsqueeze(0) * embedding_norms.unsqueeze(1)
return logits * product_of_magnitudes * self.scale
def cast_types(self, dtype, device):
self.W.data = c_f.to_device(self.W.data, device=device, dtype=dtype)
self.n_range = c_f.to_device(self.n_range, device=device, dtype=dtype)
self.margin_choose_n = c_f.to_device(
self.margin_choose_n, device=device, dtype=dtype
)
self.cos_powers = c_f.to_device(self.cos_powers, device=device, dtype=dtype)
self.alternating = c_f.to_device(self.alternating, device=device, dtype=dtype)
def compute_loss(self, embeddings, labels, indices_tuple, ref_emb, ref_labels):
c_f.labels_required(labels)
c_f.ref_not_supported(embeddings, labels, ref_emb, ref_labels)
dtype, device = embeddings.dtype, embeddings.device
self.cast_types(dtype, device)
miner_weights = lmu.convert_to_weights(indices_tuple, labels, dtype=dtype)
mask = self.METHOD_NAME(embeddings, labels)
cosine = self.get_cosine(embeddings)
cosine_of_target_classes = cosine[mask == 1]
modified_cosine_of_target_classes = self.modify_cosine_of_target_classes(
cosine_of_target_classes
)
diff = (modified_cosine_of_target_classes - cosine_of_target_classes).unsqueeze(
1
)
logits = cosine + (mask * diff)
logits = self.scale_logits(logits, embeddings)
unweighted_loss = self.cross_entropy(logits, labels)
miner_weighted_loss = unweighted_loss * miner_weights
loss_dict = {
"loss": {
"losses": miner_weighted_loss,
"indices": c_f.torch_arange_from_size(embeddings),
"reduction_type": "element",
}
}
self.add_weight_regularization_to_loss_dict(loss_dict, self.W.t())
return loss_dict
def get_default_distance(self):
return CosineSimilarity()
def get_logits(self, embeddings):
logits = self.get_cosine(embeddings)
return self.scale_logits(logits, embeddings) |
4,782 | post | #!/usr/bin/env python
#
# Simple asynchronous HTTP proxy with tunnelling (CONNECT).
#
# GET/POST proxying based on
# http://groups.google.com/group/python-tornado/msg/7bea08e7a049cf26
#
# Copyright (C) 2012 Senko Rasic <senko.rasic@dobarkod.hr>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import annotations
import socket
import ssl
import sys
import tornado.gen
import tornado.httpclient
import tornado.httpserver
import tornado.ioloop
import tornado.iostream
import tornado.web
__all__ = ["ProxyHandler", "run_proxy"]
class ProxyHandler(tornado.web.RequestHandler):
SUPPORTED_METHODS = ["GET", "POST", "CONNECT"] # type: ignore[assignment]
async def get(self) -> None:
upstream_ca_certs = self.application.settings.get("upstream_ca_certs", None)
ssl_options = None
if upstream_ca_certs:
ssl_options = ssl.create_default_context(cafile=upstream_ca_certs)
assert self.request.uri is not None
assert self.request.method is not None
req = tornado.httpclient.HTTPRequest(
url=self.request.uri,
method=self.request.method,
body=self.request.body,
headers=self.request.headers,
follow_redirects=False,
allow_nonstandard_methods=True,
ssl_options=ssl_options,
)
client = tornado.httpclient.AsyncHTTPClient()
response = await client.fetch(req, raise_error=False)
self.set_status(response.code)
for header in (
"Date",
"Cache-Control",
"Server",
"Content-Type",
"Location",
):
v = response.headers.get(header)
if v:
self.set_header(header, v)
if response.body:
self.write(response.body)
await self.finish()
async def METHOD_NAME(self) -> None:
await self.get()
async def connect(self) -> None:
assert self.request.uri is not None
host, port = self.request.uri.split(":")
assert self.request.connection is not None
client: tornado.iostream.IOStream = self.request.connection.stream # type: ignore[attr-defined]
async def start_forward(
reader: tornado.iostream.IOStream, writer: tornado.iostream.IOStream
) -> None:
while True:
try:
data = await reader.read_bytes(4096, partial=True)
except tornado.iostream.StreamClosedError:
break
if not data:
break
writer.write(data)
writer.close()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
upstream = tornado.iostream.IOStream(s)
await upstream.connect((host, int(port)))
client.write(b"HTTP/1.0 200 Connection established\r\n\r\n")
fu1 = start_forward(client, upstream)
fu2 = start_forward(upstream, client)
await tornado.gen.multi([fu1, fu2])
def run_proxy(port: int, start_ioloop: bool = True) -> None:
"""
Run proxy on the specified port. If start_ioloop is True (default),
the tornado IOLoop will be started immediately.
"""
app = tornado.web.Application([(r".*", ProxyHandler)])
app.listen(port)
ioloop = tornado.ioloop.IOLoop.instance()
if start_ioloop:
ioloop.start()
if __name__ == "__main__":
port = 8888
if len(sys.argv) > 1:
port = int(sys.argv[1])
print(f"Starting HTTP proxy on port {port}")
run_proxy(port) |
4,783 | colo linear | from copy import deepcopy
from typing import Optional
import torch.nn.functional as F
from colossalai.tensor import ColoTensor, ColoTensorSpec, ComputePattern, ComputeSpec, ReplicaSpec, ShardSpec
from colossalai.tensor.op_wrapper import colo_op_impl
from colossalai.tensor.sharding_spec import ShardingSpec
from ._utils import GeneralTensor, convert_to_colo_tensor, reduce_grad, reduce_input
def colo_linear_1drow(input_tensor: ColoTensor, weight: ColoTensor, bias: Optional[ColoTensor]) -> 'ColoTensor':
# Input:S[1] x Weight:S[0] = Output:P
# All-Reduce(Output) + bias = res
# Input:S[1]
pg = weight.get_process_group()
input_tensor = input_tensor.redistribute(ShardSpec([-1], [weight.get_tp_world_size()]), pg)
# Output:P
partial_output = F.linear(input_tensor, weight)
# Reduce(Output)
output = reduce_input(partial_output, pg)
# Bias
if bias is not None:
assert not bias.has_compute_spec(), 'Invalid bias spec for 1Drow Linear op'
output = output + bias
output = ColoTensor.from_torch_tensor(output, spec=ColoTensorSpec(pg, ReplicaSpec()))
return output
def colo_linear_1dcol(input_tensor: ColoTensor, weight: ColoTensor, bias: Optional[ColoTensor]) -> 'ColoTensor':
# Input:B x Weight:S[1] + Bias:S[1] = Output:S[1]
# All-Gather(Output)
# Input:B
compute_spec = weight.compute_spec
input_tensor = input_tensor.redistribute(ReplicaSpec())
input_parallel = reduce_grad(input_tensor, weight.get_process_group())
output_parallel = F.linear(input_parallel, weight, bias)
output = ColoTensor.from_torch_tensor(output_parallel,
spec=ColoTensorSpec(weight.get_process_group(),
ShardSpec([-1], [weight.get_tp_world_size()]),
ComputeSpec(ComputePattern.TP1D)))
if compute_spec.output_replicate:
return output.to_replicate()
else:
return output
def colo_linear_1d(mode: str, input_tensor: ColoTensor, weight: ColoTensor, bias: Optional[ColoTensor]) -> 'ColoTensor':
assert mode in ('row', 'col')
funcs = {'row': colo_linear_1drow, 'col': colo_linear_1dcol}
return funcs[mode](input_tensor, weight, bias)
# @register_colo_graph(input_pos=[1], param_pos=[2, 3])
def colo_linear_imp(input_tensor: GeneralTensor,
weight: GeneralTensor,
bias: Optional[GeneralTensor] = None) -> 'ColoTensor':
"""Handles ``__torch_function__`` dispatch for ``torch.nn.functional.linear``.
This method computes a linear.
"""
assert isinstance(weight, ColoTensor)
pg = weight.get_process_group()
assert pg
input_tensor = convert_to_colo_tensor(input_tensor, pg)
bias = convert_to_colo_tensor(bias, pg)
# input_tensor, weight, bias = tuple(map(convert_to_colo_tensor, (input_tensor, weight, bias)))
# Add communication logic before and after linear call.
ret_tensor = None
if not weight.has_compute_spec(): # No Model Parallel Applied
assert weight.is_replicate(), 'Invalid weight spec for native Linear op'
assert bias is None or bias.is_replicate(), 'Invalid bias spec for native Linear op'
ret_tensor = ColoTensor.from_torch_tensor(F.linear(input_tensor, weight, bias), spec=ColoTensorSpec(pg))
elif weight.has_compute_pattern(ComputePattern.TP1D): # Single Model Parallel Applied
if weight.is_shard_1dcol() and (bias is None or bias.is_replicate()):
mode = 'row'
elif weight.is_shard_1drow() and (bias is None or bias.is_shard_1drow() or bias.is_shard_1dcol()):
mode = 'col'
else:
raise RuntimeError(f"the weight or bias tensor spec is not valid, weight {weight}, bias {bias}")
ret_tensor = colo_linear_1d(mode, input_tensor, weight, bias)
else:
raise NotImplementedError
return ret_tensor
def _new_colo_linear_imp(input_tensor: GeneralTensor,
weight: GeneralTensor,
bias: Optional[GeneralTensor] = None) -> 'ColoTensor':
"""
A tentative function to compute the distributed linear layer with the latest sharding spec.
This function is subject to future change as the current sharding API is not stable.
"""
# get mesh info
input_sharding_seq = input_tensor.sharding_spec.sharding_sequence
weight_sharding_seq = weight.sharding_spec.sharding_sequence
if bias is not None:
bias_sharding_seq = bias.sharding_spec.sharding_sequence
device_mesh = weight.sharding_spec.device_mesh
pg_axis0 = weight.pg_axis0
pg_axis1 = weight.pg_axis1
# the last dim of input should have the same spec as the first dim of weight
# the weight is transposed, so we look at the second dimension
assert input_sharding_seq[-1] == weight_sharding_seq[1]
if bias is not None:
assert bias_sharding_seq[0] == weight_sharding_seq[0]
# compute the output sharding sequence
# as weight is transposed, so we look at the first dimension
output_shard_seq = input_sharding_seq[:-1] + weight_sharding_seq[:1]
output_shard_seq = deepcopy(output_shard_seq)
# TODO: add reduce grad logic
# handle column and row parallel linear
# by reusing the implementation above
out = F.linear(input_tensor, weight)
# run all reduce if necessary
last_dim_spec = input_sharding_seq[-1]
if last_dim_spec.is_replica:
pass
elif last_dim_spec.shard_list is not None:
for dim in last_dim_spec.shard_list:
if dim == 0:
reduce_input(out, pg_axis0)
elif dim == 1:
reduce_input(out, pg_axis1)
else:
raise RuntimeError("Found invalid sharding axis {dim}, only 0 or 1 is expected")
# add bias
if bias is not None:
out += bias
# convert shard seq to partition dict
output_partition_dict = {}
for index, dim_spec in enumerate(output_shard_seq):
if not dim_spec.is_replica:
if index not in output_partition_dict:
output_partition_dict[index] = []
output_partition_dict[index].extend(dim_spec.shard_list)
entire_shape = out.shape
output_sharding_spec = ShardingSpec(device_mesh, entire_shape, output_partition_dict)
ret_tensor = ColoTensor.from_torch_tensor(out)
setattr(ret_tensor, 'sharding_spec', output_sharding_spec)
return ret_tensor
def _has_sharding_spec(tensor):
"""
A tentative function to check whether the tensor is using the new sharding spec API. We assume that the sharding spec object is
set as the attribute `sharding_spec` on a tensor.
"""
return hasattr(tensor, 'sharding_spec')
@colo_op_impl(F.linear)
def METHOD_NAME(input: GeneralTensor, weight: GeneralTensor, bias: Optional[GeneralTensor] = None) -> 'ColoTensor':
if _has_sharding_spec(weight):
return _new_colo_linear_imp(input, weight, bias)
else:
return colo_linear_imp(input, weight, bias) |
4,784 | print operation | #!/usr/bin/python
# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.tools import freeze_graph
from tensorflow.python.tools import optimize_for_inference_lib
import argparse
import os
def splitDirFilenameExt(path):
# in case of '/tmp/.ssh/my.key.dat'
# this returns ('/tmp/.ssh', 'my.key', 'dat')
directory = os.path.split(path)[0]
ext = os.path.splitext(path)[1][1:] # remove '.', e.g., '.dat' -> 'dat'
filename = os.path.splitext(os.path.split(path)[1])[0]
return (directory, filename, ext)
def importGraphIntoSession(sess, filename):
# this should be called inside
# with tf.Session() as sess:
assert sess
(_, _, ext) = splitDirFilenameExt(filename)
if (ext.lower() == 'pb'):
with tf.gfile.GFile(filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
elif (ext.lower() == 'pbtxt'):
with open(filename, 'r') as reader:
graph_def = tf.GraphDef()
text_format.Parse(reader.read(), graph_def)
else:
print("# Error: unknown extension - " + ext)
tf.import_graph_def(graph_def)
def METHOD_NAME(op, op_count):
print("") # new line
print("OP #{}: {}, name = {}".format(op_count, op.type, op.name))
print("\tinputs:")
for input_tensor in op.inputs:
print("\t\t{} : name = {}".format(input_tensor.shape, input_tensor.name))
print("\toutputs:")
for output_tensor in op.outputs:
print("\t\t{}, name = {}".format(output_tensor.shape, output_tensor.name))
print("\tattributes:")
op_def = op.op_def
for attr_def in op.op_def.attr:
attr = op.get_attr(attr_def.name)
# skip Const value
if op.type == "Const" and attr_def.name == "value":
print("\t\t{}, name = {}".format("skipping value", attr_def.name))
else:
print("\t\t{}, name = {}".format(attr, attr_def.name))
print("") # new line
def print_graph_info(pb_path, optype_substring, name_prefix):
with tf.Session() as sess:
importGraphIntoSession(sess, pb_path)
op_seq = 1
op_count = 1
graph = sess.graph
ops = graph.get_operations()
for op in ops:
if optype_substring == "*" and (name_prefix == None
or op.name.startswith(name_prefix)):
METHOD_NAME(op, op_seq)
op_count += 1
elif op.type.lower().find(optype_substring.lower()) != -1 and (
name_prefix == None or op.name.startswith(name_prefix)):
METHOD_NAME(op, op_seq)
op_count += 1
else:
print("skipping {}, name = {}".format(op.type, op.name))
op_seq += 1
print("")
print("Total number of operations : " + str(op_count))
print("")
def print_summary(pb_path, optype_substring, name_prefix):
op_map = {}
op_count = 0
with tf.Session() as sess:
importGraphIntoSession(sess, pb_path)
graph = sess.graph
ops = graph.get_operations()
for op in ops:
process = False
if optype_substring == "*" and (name_prefix == None
or op.name.startswith(name_prefix)):
process = True
elif op.type.lower().find(optype_substring.lower()) != -1 and (
name_prefix == None or op.name.startswith(name_prefix)):
process = True
if process:
op_count += 1
if op_map.get(op.type) == None:
op_map[op.type] = 1
else:
op_map[op.type] += 1
# print op list
print("")
for op_type, count in op_map.items():
print("\t" + op_type + " : \t" + str(count))
print("")
print("Total number of operations : " + str(op_count))
print("Total number of operation types : " + str(len(op_map.keys())))
print("")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Prints information inside pb file')
parser.add_argument("pb_file", help="pb file to read")
parser.add_argument(
"op_subst",
help="substring of operations. only info of these operasions will be printed.")
parser.add_argument(
"--summary", help="print summary of operations", action="store_true")
parser.add_argument("--name_prefix", help="filtered by speficied name prefix")
args = parser.parse_args()
if args.summary:
print_summary(args.pb_file, args.op_subst, args.name_prefix)
else:
print_graph_info(args.pb_file, args.op_subst, args.name_prefix) |
4,785 | get | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._jobs_operations import build_get_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class JobsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.databoxedge.v2021_02_01_preview.aio.DataBoxEdgeManagementClient`'s
:attr:`jobs` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def METHOD_NAME(self, device_name: str, name: str, resource_group_name: str, **kwargs: Any) -> _models.Job:
"""Gets the details of a specified job on a Data Box Edge/Data Box Gateway device.
Gets the details of a specified job on a Data Box Edge/Data Box Gateway device.
:param device_name: The device name. Required.
:type device_name: str
:param name: The job name. Required.
:type name: str
:param resource_group_name: The resource group name. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Job or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2021_02_01_preview.models.Job
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-02-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-02-01-preview")
)
cls: ClsType[_models.Job] = kwargs.pop("cls", None)
request = build_get_request(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Job", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
METHOD_NAME.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/jobs/{name}"
} |
4,786 | close | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import logging
import uuid
import time
from functools import partial
from ._management_link_async import ManagementLink
from ..error import (
AMQPLinkError,
ErrorCondition
)
from ..constants import (
ManagementOpenResult,
ManagementExecuteOperationResult
)
_LOGGER = logging.getLogger(__name__)
class ManagementOperation(object):
def __init__(self, session, endpoint='$management', **kwargs):
self._mgmt_link_open_status = None
self._session = session
self._connection = self._session._connection
self._network_trace_params = {
"amqpConnection": self._session._connection._container_id,
"amqpSession": self._session.name,
"amqpLink": None
}
self._mgmt_link = self._session.create_request_response_link_pair(
endpoint=endpoint,
on_amqp_management_open_complete=self._on_amqp_management_open_complete,
on_amqp_management_error=self._on_amqp_management_error,
**kwargs
) # type: ManagementLink
self._responses = {}
self._mgmt_error = None
async def _on_amqp_management_open_complete(self, result):
"""Callback run when the send/receive links are open and ready
to process messages.
:param result: Whether the link opening was successful.
:type result: int
"""
self._mgmt_link_open_status = result
async def _on_amqp_management_error(self):
"""Callback run if an error occurs in the send/receive links."""
# TODO: This probably shouldn't be ValueError
self._mgmt_error = ValueError("Management Operation error occurred.")
async def _on_execute_operation_complete(
self,
operation_id,
operation_result,
status_code,
status_description,
raw_message,
error=None
):
_LOGGER.debug(
"Management operation completed, id: %r; result: %r; code: %r; description: %r, error: %r",
operation_id,
operation_result,
status_code,
status_description,
error,
extra=self._network_trace_params
)
if operation_result in\
(ManagementExecuteOperationResult.ERROR, ManagementExecuteOperationResult.LINK_CLOSED):
self._mgmt_error = error
_LOGGER.error(
"Failed to complete management operation due to error: %r.",
error,
extra=self._network_trace_params
)
else:
self._responses[operation_id] = (status_code, status_description, raw_message)
async def execute(self, message, operation=None, operation_type=None, timeout=0):
start_time = time.time()
operation_id = str(uuid.uuid4())
self._responses[operation_id] = None
self._mgmt_error = None
await self._mgmt_link.execute_operation(
message,
partial(self._on_execute_operation_complete, operation_id),
timeout=timeout,
operation=operation,
type=operation_type
)
while not self._responses[operation_id] and not self._mgmt_error:
if timeout and timeout > 0:
now = time.time()
if (now - start_time) >= timeout:
raise TimeoutError("Failed to receive mgmt response in {}ms".format(timeout))
await self._connection.listen()
if self._mgmt_error:
self._responses.pop(operation_id)
raise self._mgmt_error # pylint: disable=raising-bad-type
response = self._responses.pop(operation_id)
return response
async def open(self):
self._mgmt_link_open_status = ManagementOpenResult.OPENING
await self._mgmt_link.open()
async def ready(self):
try:
raise self._mgmt_error # pylint: disable=raising-bad-type
except TypeError:
pass
if self._mgmt_link_open_status == ManagementOpenResult.OPENING:
return False
if self._mgmt_link_open_status == ManagementOpenResult.OK:
return True
# ManagementOpenResult.ERROR or CANCELLED
# TODO: update below with correct status code + info
raise AMQPLinkError(
condition=ErrorCondition.ClientError,
description="Failed to open mgmt link, management link status: {}".format(self._mgmt_link_open_status),
info=None
)
async def METHOD_NAME(self):
await self._mgmt_link.METHOD_NAME() |
4,787 | test tabled4 | import unittest
import numpy as np
from pyNastran.bdf.bdf import BDF, BDFCard
from pyNastran.bdf.cards.bdf_tables import (
TABLED1, TABLED2, TABLED3, TABLED4,
TABLEM1, TABLEM2, TABLEM3, TABLEM4,
TABDMP1, #TABLES1, TABLEST, TABRND1, TABRNDG,
)
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.cards.test.utils import save_load_deck
model = BDF(debug=False)
class TestTables(unittest.TestCase):
def test_tabdmp1_01(self):
lines = ['TABDMP1,100,,,,,,,,+',
'+,1e-3,.02,200.,.02,ENDT',]
card = model._process_card(lines)
card = BDFCard(card)
#print(card)
table = TABDMP1.add_card(card, comment='table')
table.raw_fields()
str(table)
table.write_card(size=8)
table.write_card_16(is_double=False)
table.write_card_16(is_double=True)
assert table._comment == '$table\n', '%r' % table._comment
assert table.comment == '$table\n', '%r' % table.comment
fields = table.raw_fields()
msg = table.write_card(size=8).rstrip()
#print(msg)
lines_expected = [
'$table',
'TABDMP1 100 G',
' .001 .02 200. .02 ENDT']
#' 1E-3 0.02 200.0 0.02 ENDT']
lines_actual = msg.rstrip().split('\n')
msg = '\n%s\n\n%s\n' % ('\n'.join(lines_expected), msg)
msg += 'nlines_actual=%i nlines_expected=%i' % (len(lines_actual), len(lines_expected))
self.assertEqual(len(lines_actual), len(lines_expected), msg)
for actual, expected in zip(lines_actual, lines_expected):
self.assertEqual(actual, expected, msg)
msg = table.write_card(size=16).rstrip()
#print(msg)
lines_expected = [
'$table',
'TABDMP1* 100 G',
'*',
'* .001 .02 200. .02',
'* ENDT'
]
lines_actual = [line.rstrip() for line in msg.rstrip().split('\n')]
msg = '\n%r\n\n%r\n' % ('\n'.join(lines_expected), msg)
msg += 'nlines_actual=%i nlines_expected=%i' % (len(lines_actual), len(lines_expected))
self.assertEqual(len(lines_actual), len(lines_expected), msg)
for actual, expected in zip(lines_actual, lines_expected):
self.assertEqual(actual, expected, msg)
def test_tabled1(self):
lines = [
'TABLED1, 32',
',-2.0, 6.0, 2.0, 5.0, 3.0, 5.6, ENDT',
]
card = model._process_card(lines)
card = BDFCard(card)
table = TABLED1.add_card(card, comment='table')
table.raw_fields()
str(table)
table.write_card(size=8)
table.write_card_16(is_double=False)
table.write_card_16(is_double=True)
interp = table.interpolate(0.)
assert np.allclose(interp, [5.5]), interp
interp = table.interpolate([0., 5.])
def test_tabled2(self):
lines = [
'TABLED2, 15, -10.5',
',1.0, -4.5, 2.0, -4.2, 2.0, 2.8, 7.0, 6.5',
',SKIP, SKIP, 9.0, 6.5, ENDT',
]
card = model._process_card(lines)
card = BDFCard(card)
table = TABLED2.add_card(card, comment='table')
table.raw_fields()
str(table)
table.write_card(size=8)
table.write_card_16(is_double=False)
table.write_card_16(is_double=True)
interp = table.interpolate(0.)
assert np.allclose(interp, [-5.875]), interp
interp = table.interpolate([0., 5.])
def test_tabled3(self):
lines = [
'TABLED3, 62, 126.9, 30.0',
',2.9, 2.9, 3.6, 4.7, 5.2, 5.7, ENDT',
]
card = model._process_card(lines)
card = BDFCard(card)
table = TABLED3.add_card(card, comment='table')
table.raw_fields()
str(table)
#interp = table.interpolate(0.)
#print('interp =', interp, type(interp))
#assert np.allclose(interp, [5.5]), interp
def METHOD_NAME(self):
lines = [
'TABLED4, 28, 0.0, 1.0, 0.0, 100.',
',2.91, -0.0329, 6.51-5, 0.0, -3.4-7, ENDT',
]
card = model._process_card(lines)
card = BDFCard(card)
table = TABLED4.add_card(card)
table.write_card(size=8)
table.write_card_16(is_double=False)
table.write_card_16(is_double=True)
table.raw_fields()
str(table)
interp = table.interpolate(5.)
assert np.allclose(interp, [2.746915]), interp
with self.assertRaises(ValueError): # bug...
interp = table.interpolate([0., 5.])
#def test_tableht(self):
#lines = [
#'TABLEHT, 85',
#'10.0, 101, 25.0, 102, 40.0, 110, ENDT',
#]
#card = model._process_card(lines)
#card = BDFCard(card)
#card2 = TABLEHT.add_card(card)
def test_tableh1(self):
lines = [
'TABLEH1, 32',
'-3.0, 6.9, 2.0, 5.6, 3.0, 5.6, ENDT',
]
card = model._process_card(lines)
card = BDFCard(card)
table = TABLEM1.add_card(card, comment='table')
table.raw_fields()
str(table)
table.write_card(size=8)
table.write_card_16(is_double=False)
table.write_card_16(is_double=True)
save_load_deck(model)
def test_tablem1(self):
lines = [
'TABLEM1, 32',
'-3.0, 6.9, 2.0, 5.6, 3.0, 5.6, ENDT',
]
card = model._process_card(lines)
card = BDFCard(card)
table = TABLEM1.add_card(card, comment='table')
table.raw_fields()
str(table)
save_load_deck(model)
#interp = table.interpolate(0.)
#print('interp =', interp, type(interp))
#assert np.allclose(interp, [5.5]), interp
def test_tablem2(self):
lines = [
'TABLEM2, 15, -10.5',
',1.0, -4.5, 2.0, -4.5, 2.0, 2.8, 7.0, 6.5',
',SKIP, SKIP, 9.0, 6.5, ENDT',
]
card = model._process_card(lines)
card = BDFCard(card)
table = TABLEM2.add_card(card)
table.raw_fields()
str(table)
table.write_card(size=8)
table.write_card_16(is_double=False)
table.write_card_16(is_double=True)
save_load_deck(model)
#interp = table.interpolate(0.)
#print('interp =', interp, type(interp))
#assert np.allclose(interp, [5.5]), interp
def test_tablem3(self):
lines = [
'TABLEM3, 62, 126.9, 30.0',
',2.9, 2.9, 3.6, 4.7, 5.2, 5.7, ENDT',
]
card = model._process_card(lines)
card = BDFCard(card)
table = TABLEM3.add_card(card, comment='table')
table.raw_fields()
str(table)
table.write_card(size=8)
table.write_card_16(is_double=False)
table.write_card_16(is_double=True)
save_load_deck(model)
#interp = table.interpolate(0.)
#print('interp =', interp, type(interp))
#assert np.allclose(interp, [5.5]), interp
def test_tablem4(self):
lines = [
'TABLEM4, 28, 0.0, 1.0, 0.0, 100.',
',2.91, -0.0329, 6.51-5, 0.0, -3.4-7, ENDT',
]
card = model._process_card(lines)
card = BDFCard(card)
table = TABLEM4.add_card(card, comment='table')
table.raw_fields()
str(table)
table.raw_fields()
table.write_card(size=8)
table.write_card_16(is_double=False)
table.write_card_16(is_double=True)
save_load_deck(model)
#interp = table.interpolate(0.)
#print('interp =', interp, type(interp))
#assert np.allclose(interp, [5.5]), interp
if __name__ == '__main__': # pragma: no cover
unittest.main() |
4,788 | stop ping servers | # -*- coding: utf-8 -*-
"""
threaded_ping_server.py
~~~~~~~~~~~~~~~~~~~~~~~
TCP server based on threads simulating ping output.
"""
__author__ = 'Grzegorz Latuszek'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = 'grzegorz.latuszek@nokia.com'
import logging
import select
import socket
import sys
import threading
import time
from contextlib import closing
ping_output = '''
greg@debian:~$ ping 10.0.2.15
PING 10.0.2.15 (10.0.2.15) 56(84) bytes of data.
64 bytes from 10.0.2.15: icmp_req=1 ttl=64 time=0.080 ms
64 bytes from 10.0.2.15: icmp_req=2 ttl=64 time=0.037 ms
64 bytes from 10.0.2.15: icmp_req=3 ttl=64 time=0.045 ms
ping: sendmsg: Network is unreachable
ping: sendmsg: Network is unreachable
ping: sendmsg: Network is unreachable
64 bytes from 10.0.2.15: icmp_req=7 ttl=64 time=0.123 ms
64 bytes from 10.0.2.15: icmp_req=8 ttl=64 time=0.056 ms
'''
def ping_sim_tcp_server(server_port, ping_ip, client, address):
_, client_port = address
logger = logging.getLogger('threaded.ping.tcp-server({} -> {})'.format(server_port,
client_port))
logger.debug('connection accepted - client at tcp://{}:{}'.format(*address))
ping_out = ping_output.replace("10.0.2.15", ping_ip)
ping_lines = ping_out.splitlines(True)
with closing(client):
for ping_line in ping_lines:
data = ping_line.encode(encoding='utf-8')
try:
client.sendall(data)
except socket.error: # client is gone
break
time.sleep(1) # simulate delay between ping lines
logger.info('Connection closed')
def server_loop(server_port, server_socket, ping_ip, done_event):
logger = logging.getLogger('threaded.ping.tcp-server({})'.format(server_port))
while not done_event.is_set():
# without select we can't break loop from outside (via done_event)
# since .accept() is blocking
read_sockets, _, _ = select.select([server_socket], [], [], 0.1)
if not read_sockets:
continue
client_socket, client_addr = server_socket.accept()
client_socket.setblocking(1)
client_thread = threading.Thread(target=ping_sim_tcp_server,
args=(server_port, ping_ip,
client_socket, client_addr))
client_thread.start()
logger.debug("Ping Sim: ... bye")
def start_ping_sim_server(server_address, ping_ip):
"""Run server simulating ping command output, this is one-shot server"""
_, server_port = server_address
logger = logging.getLogger('threaded.ping.tcp-server({})'.format(server_port))
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(server_address)
server_socket.listen(1)
logger.debug("Ping Sim started at tcp://{}:{}".format(*server_address))
done_event = threading.Event()
server_thread = threading.Thread(target=server_loop,
args=(server_port, server_socket, ping_ip,
done_event))
server_thread.start()
return server_thread, done_event
def tcp_connection(address, moler_conn):
"""Forwarder reading from tcp network transport layer"""
logger = logging.getLogger('threaded.tcp-connection({}:{})'.format(*address))
logger.debug('... connecting to tcp://{}:{}'.format(*address))
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(address)
with closing(client_socket):
while True:
data = client_socket.recv(128)
if data:
logger.debug('<<< {!r}'.format(data))
# Forward received data into Moler's connection
moler_conn.data_received(data)
yield data
else:
logger.debug("... closed")
break
def start_ping_servers(servers_addr):
servers = []
for address, ping_ip in servers_addr:
# simulate pinging given IP
server_thread, server_done = start_ping_sim_server(address, ping_ip)
servers.append((server_thread, server_done))
return servers
def METHOD_NAME(servers):
for server_thread, server_done in servers:
server_done.set()
server_thread.join()
# ==============================================================================
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s |%(name)-40s |%(message)s',
datefmt='%H:%M:%S',
stream=sys.stderr,
)
connections2serve = [(('localhost', 5671), '10.0.2.15'),
(('localhost', 5672), '10.0.2.16')]
servers = start_ping_servers(connections2serve)
time.sleep(2)
METHOD_NAME(servers) |
4,789 | test provides request db as session | from unittest import mock
import pytest
from h.models import User
from h.services.group_members import GroupMembersService, group_members_factory
class TestMemberJoin:
def test_it_adds_user_to_group(self, group_members_service, factories):
user = factories.User()
group = factories.Group()
group_members_service.member_join(group, user.userid)
assert user in group.members
def test_it_is_idempotent(self, group_members_service, factories):
user = factories.User()
group = factories.Group()
group_members_service.member_join(group, user.userid)
group_members_service.member_join(group, user.userid)
assert group.members.count(user) == 1
def test_it_publishes_join_event(self, group_members_service, factories, publish):
group = factories.Group()
user = factories.User()
group_members_service.member_join(group, user.userid)
publish.assert_called_once_with("group-join", group.pubid, user.userid)
class TestMemberLeave:
def test_it_removes_user_from_group(
self, group_members_service, factories, creator
):
group = factories.Group(creator=creator)
new_member = factories.User()
group.members.append(new_member)
group_members_service.member_leave(group, new_member.userid)
assert new_member not in group.members
def test_it_is_idempotent(self, group_members_service, factories, creator):
group = factories.Group(creator=creator)
new_member = factories.User()
group.members.append(new_member)
group_members_service.member_leave(group, new_member.userid)
group_members_service.member_leave(group, new_member.userid)
assert new_member not in group.members
def test_it_publishes_leave_event(self, group_members_service, factories, publish):
group = factories.Group()
new_member = factories.User()
group.members.append(new_member)
group_members_service.member_leave(group, new_member.userid)
publish.assert_called_once_with("group-leave", group.pubid, new_member.userid)
class TestAddMembers:
def test_it_adds_users_in_userids(self, factories, group_members_service):
group = factories.OpenGroup()
users = [factories.User(), factories.User()]
userids = [user.userid for user in users]
group_members_service.add_members(group, userids)
assert group.members == users
def test_it_does_not_remove_existing_members(
self, factories, group_members_service
):
creator = factories.User()
group = factories.Group(creator=creator)
users = [factories.User(), factories.User()]
userids = [user.userid for user in users]
group_members_service.add_members(group, userids)
assert len(group.members) == len(users) + 1 # account for creator user
assert creator in group.members
class TestUpdateMembers:
def test_it_adds_users_in_userids(self, factories, group_members_service):
group = factories.OpenGroup() # no members at outset
new_members = [factories.User(), factories.User()]
group_members_service.update_members(
group, [user.userid for user in new_members]
)
assert group.members == new_members
def test_it_removes_members_not_present_in_userids(
self, factories, group_members_service, creator
):
group = factories.Group(creator=creator) # creator will be a member
new_members = [factories.User(), factories.User()]
group.members.append(new_members[0])
group.members.append(new_members[1])
group_members_service.update_members(group, [])
assert not group.members # including the creator
def test_it_does_not_remove_members_present_in_userids(
self, factories, group_members_service, publish
):
group = factories.OpenGroup() # no members at outset
new_members = [factories.User(), factories.User()]
group.members.append(new_members[0])
group.members.append(new_members[1])
group_members_service.update_members(
group, [user.userid for user in group.members]
)
assert new_members[0] in group.members
assert new_members[1] in group.members
publish.assert_not_called()
def test_it_proxies_to_member_join_and_leave(
self, factories, group_members_service
):
group_members_service.member_join = mock.Mock()
group_members_service.member_leave = mock.Mock()
group = factories.OpenGroup() # no members at outset
new_members = [factories.User(), factories.User()]
group.members.append(new_members[0])
group_members_service.update_members(group, [new_members[1].userid])
group_members_service.member_join.assert_called_once_with(
group, new_members[1].userid
)
group_members_service.member_leave.assert_called_once_with(
group, new_members[0].userid
)
def test_it_does_not_add_duplicate_members(self, factories, group_members_service):
# test for idempotency
group = factories.OpenGroup()
new_member = factories.User()
group_members_service.update_members(
group, [new_member.userid, new_member.userid]
)
assert group.members == [new_member]
assert len(group.members) == 1
@pytest.mark.usefixtures("user_service")
class TestFactory:
def test_returns_groups_service(self, pyramid_request):
group_members_service = group_members_factory(None, pyramid_request)
assert isinstance(group_members_service, GroupMembersService)
def METHOD_NAME(self, pyramid_request):
group_members_service = group_members_factory(None, pyramid_request)
assert group_members_service.db == pyramid_request.db
def test_wraps_user_service_as_user_fetcher(self, pyramid_request, user_service):
group_members_service = group_members_factory(None, pyramid_request)
group_members_service.user_fetcher("foo")
user_service.fetch.assert_called_once_with("foo")
def test_provides_realtime_publisher_as_publish(self, patch, pyramid_request):
pyramid_request.realtime = mock.Mock(spec_set=["publish_user"])
session = patch("h.services.group_members.session")
group_members_service = group_members_factory(None, pyramid_request)
group_members_service.publish("group-join", "abc123", "theresa")
session.model.assert_called_once_with(pyramid_request)
pyramid_request.realtime.publish_user.assert_called_once_with(
{
"type": "group-join",
"session_model": session.model.return_value,
"userid": "theresa",
"group": "abc123",
}
)
@pytest.fixture
def usr_group_members_service(db_session):
def fetch(userid):
# One doesn't want to couple to the user fetching service but
# we do want to be able to fetch user models for internal
# module behavior tests
return db_session.query(User).filter_by(userid=userid).one_or_none()
return fetch
@pytest.fixture
def publish():
return mock.Mock(spec_set=[])
@pytest.fixture
def group_members_service(db_session, usr_group_members_service, publish):
return GroupMembersService(db_session, usr_group_members_service, publish=publish)
@pytest.fixture
def creator(factories):
return factories.User(username="group_creator") |
4,790 | translated words | from robotframework_ls.impl.protocols import ICompletionContext, ILocalizationInfo
from typing import Optional
from robotframework_ls.impl.text_utilities import normalize_robot_name
class _Requisites(object):
def __init__(self, section, matcher, replace_from_col, replace_to_col, selection):
self.section = section
self.matcher = matcher
self.replace_from_col = replace_from_col
self.replace_to_col = replace_to_col
self.selection = selection
def get_requisites(completion_context: ICompletionContext) -> Optional[_Requisites]:
section_node = completion_context.get_ast_current_section()
if section_node is None:
return None
from robotframework_ls.impl.string_matcher import RobotStringMatcher
from robotframework_ls.impl.section_completions import get_section_constant
section = get_section_constant(completion_context, section_node)
if section is None:
return None
selection = completion_context.sel #: :type selection: DocumentSelection
line_to_col = selection.line_to_column
if line_to_col.endswith(" "):
return None
replace_to_col = selection.col
if section.names_in_brackets:
for i, c in enumerate(line_to_col):
if c.isspace():
continue
elif c == "[":
line_to_col = line_to_col[i + 1 :]
replace_from_col = i
break
else:
return None
else:
return None
matcher = RobotStringMatcher(line_to_col)
else:
# i.e.: Needs to be the first char
matcher = RobotStringMatcher(line_to_col)
replace_from_col = 0
return _Requisites(section, matcher, replace_from_col, replace_to_col, selection)
def complete(completion_context: ICompletionContext):
from robocorp_ls_core.lsp import (
TextEdit,
Range,
Position,
CompletionItem,
CompletionItemKind,
)
from robotframework_ls.impl.robot_version import robot_version_supports_language
requisites = get_requisites(completion_context)
if requisites is None:
return []
section = requisites.section
matcher = requisites.matcher
replace_from_col = requisites.replace_from_col
selection = requisites.selection
replace_to_col = requisites.replace_to_col
line = selection.current_line
sel_ends_with_close = line[selection.col :].startswith("]")
ret = []
if robot_version_supports_language():
from robot.api import Language
locinfo: ILocalizationInfo = completion_context.get_ast_localization_info()
current_section_name = normalize_robot_name(
completion_context.get_current_section_name()
)
def METHOD_NAME():
lang: Language
for lang in locinfo.iter_languages_on_write():
markers = section.markers_for_lang(lang)
for marker in markers:
if normalize_robot_name(marker) == current_section_name:
yield from iter(section.names_for_lang(lang))
return
# If it didn't return (is this possible?), provide all.
for lang in locinfo.iter_languages_on_write():
yield from iter(section.names_for_lang(lang))
words = tuple(METHOD_NAME())
else:
words = section.get_names_in_section_pre_rf_5_1()
for word in sorted(words):
if matcher.accepts(word):
col_delta = 0
if section.names_in_brackets:
label = f"[{word}]"
replacement = label
if sel_ends_with_close:
col_delta = 1
else:
label = word
replacement = word
text_edit = TextEdit(
Range(
start=Position(selection.line, replace_from_col),
end=Position(selection.line, replace_to_col + col_delta),
),
replacement,
)
# text_edit = None
ret.append(
CompletionItem(
label, kind=CompletionItemKind.Keyword, text_edit=text_edit
).to_dict()
)
return ret |
4,791 | vts | # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
""" Classes for storing VTs
"""
import logging
import multiprocessing
from hashlib import sha256
import re
from copy import deepcopy
from typing import (
Dict,
Any,
Type,
Iterator,
Iterable,
Tuple,
)
from ospd.errors import OspdError
logger = logging.getLogger(__name__)
DEFAULT_VT_ID_PATTERN = re.compile("[0-9a-zA-Z_\\-:.]{1,80}")
class Vts:
def __init__(
self,
storage: Type[Dict] = None,
vt_id_pattern=DEFAULT_VT_ID_PATTERN,
):
self.storage = storage
self.vt_id_pattern = vt_id_pattern
self._vts = None
self.sha256_hash = None
self.is_cache_available = True
def __contains__(self, key: str) -> bool:
return key in self._vts
def __iter__(self) -> Iterator[str]:
if hasattr(self.METHOD_NAME, '__iter__'):
return self.METHOD_NAME.__iter__()
def __getitem__(self, key):
return self.METHOD_NAME[key]
def items(self) -> Iterator[Tuple[str, Dict]]:
return iter(self.METHOD_NAME.items())
def __len__(self) -> int:
return len(self.METHOD_NAME)
def __init_vts(self):
if self.storage:
self._vts = self.storage()
else:
self._vts = multiprocessing.Manager().dict()
@property
def METHOD_NAME(self) -> Dict[str, Any]:
if self._vts is None:
self.__init_vts()
return self._vts
def add(
self,
vt_id: str,
name: str = None,
vt_params: str = None,
vt_refs: str = None,
custom: str = None,
vt_creation_time: str = None,
vt_modification_time: str = None,
vt_dependencies: str = None,
summary: str = None,
impact: str = None,
affected: str = None,
insight: str = None,
solution: str = None,
solution_t: str = None,
solution_m: str = None,
detection: str = None,
qod_t: str = None,
qod_v: str = None,
severities: str = None,
) -> None:
"""Add a vulnerability test information.
IMPORTANT: The VT's Data Manager will store the vts collection.
If the collection is considerably big and it will be consultated
intensible during a routine, consider to do a deepcopy(), since
accessing the shared memory in the data manager is very expensive.
At the end of the routine, the temporal copy must be set to None
and deleted.
"""
if not vt_id:
raise OspdError(f'Invalid vt_id {vt_id}')
if self.vt_id_pattern.fullmatch(vt_id) is None:
raise OspdError(f'Invalid vt_id {vt_id}')
if vt_id in self.METHOD_NAME:
raise OspdError(f'vt_id {vt_id} already exists')
if name is None:
name = ''
vt = {'name': name}
if custom is not None:
vt["custom"] = custom
if vt_params is not None:
vt["vt_params"] = vt_params
if vt_refs is not None:
vt["vt_refs"] = vt_refs
if vt_dependencies is not None:
vt["vt_dependencies"] = vt_dependencies
if vt_creation_time is not None:
vt["creation_time"] = vt_creation_time
if vt_modification_time is not None:
vt["modification_time"] = vt_modification_time
if summary is not None:
vt["summary"] = summary
if impact is not None:
vt["impact"] = impact
if affected is not None:
vt["affected"] = affected
if insight is not None:
vt["insight"] = insight
if solution is not None:
vt["solution"] = solution
if solution_t is not None:
vt["solution_type"] = solution_t
if solution_m is not None:
vt["solution_method"] = solution_m
if detection is not None:
vt["detection"] = detection
if qod_t is not None:
vt["qod_type"] = qod_t
elif qod_v is not None:
vt["qod"] = qod_v
if severities is not None:
vt["severities"] = severities
self.METHOD_NAME[vt_id] = vt
def get(self, vt_id: str) -> Dict[str, Any]:
return self.METHOD_NAME.get(vt_id)
def keys(self) -> Iterable[str]:
return self.METHOD_NAME.keys()
def clear(self) -> None:
self._vts.clear()
self._vts = None
def copy(self) -> "Vts":
copy = Vts(self.storage, vt_id_pattern=self.vt_id_pattern)
copy._vts = deepcopy(self._vts) # pylint: disable=protected-access
return copy
def calculate_vts_collection_hash(self, include_vt_params: bool = True):
"""Calculate the vts collection sha256 hash."""
if not self._vts:
logger.debug(
"Error calculating VTs collection hash. Cache is empty"
)
return
m = sha256() # pylint: disable=invalid-name
# for a reproducible hash calculation
# the vts must already be sorted in the dictionary.
for vt_id, vt in self.METHOD_NAME.items():
param_chain = ""
vt_params = vt.get('vt_params')
if include_vt_params and vt_params:
for _, param in sorted(vt_params.items()):
param_chain += (
param.get('id')
+ param.get('name')
+ param.get('default')
)
m.update(
(vt_id + vt.get('modification_time')).encode('utf-8')
+ param_chain.encode('utf-8')
)
self.sha256_hash = m.hexdigest() |
4,792 | get read stats | import glob
import os.path
import datetime
import sys
from typing import Any
from hdrh.histogram import HdrHistogram
from hdrh.log import HistogramLogReader
CS_HDR_FILE_WC = "*/cs_hdr_*.hdr"
TIME_INTERVAL = 300
def get_list_of_hdr_files(base_path: str) -> list[str]:
hdr_files = []
for hdr_file in glob.glob(CS_HDR_FILE_WC, root_dir=base_path, recursive=True):
hdr_files.append(os.path.join(base_path, hdr_file))
return hdr_files
class CSHdrHistogram:
LOWEST = 1
HIGHEST = 24 * 60 * 60 * 1000 * 1000
SIGNIFICANT = 3
WRITE = "WRITE-rt"
READ = "READ-rt"
PERCENTILES = [50, 90, 95, 99, 99.9, 99.99, 99.999]
@classmethod
def get_empty_histogram(cls) -> HdrHistogram:
return HdrHistogram(cls.LOWEST, cls.HIGHEST, cls.SIGNIFICANT)
@classmethod
def format_timestamp(cls, timestamp: float) -> str:
return datetime.datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
def __init__(self, base_path: str):
self.base_time = None
self._base_path = base_path
self._tagged_histograms: dict[str, HdrHistogram] = {}
self._untagged_histogram = self.get_empty_histogram()
def clear_histograms(self):
self._tagged_histograms: dict[str, HdrHistogram] = {}
self._untagged_histogram = self.get_empty_histogram()
def build_histogram_from_files(self,
start_time: float = 0,
end_time: int = sys.maxsize,
absolute: bool = False):
hdr_files = get_list_of_hdr_files(self._base_path)
for hdr_file in hdr_files:
hdr_reader = HistogramLogReader(hdr_file, self.get_empty_histogram())
while True:
next_hist = hdr_reader.get_next_interval_histogram(range_start_time_sec=start_time,
range_end_time_sec=end_time,
absolute=absolute)
if not next_hist:
break
if tag := next_hist.get_tag():
if tag not in self._tagged_histograms.keys():
self._tagged_histograms[tag] = self.get_empty_histogram()
self._tagged_histograms[tag].set_tag(tag)
if self._tagged_histograms[tag].get_start_time_stamp() == 0:
self._tagged_histograms[tag].set_start_time_stamp(next_hist.get_start_time_stamp())
self._tagged_histograms[tag].add(next_hist)
else:
if not self._untagged_histogram.get_start_time_stamp() == 0:
self._untagged_histogram.set_start_time_stamp(next_hist.get_start_time_stamp())
self._untagged_histogram.add(next_hist)
def get_operation_stats_by_tag(self, tag: str = '') -> dict[str, Any]:
histogram = self._untagged_histogram if not tag else self._tagged_histograms.get(tag)
if not histogram:
return {}
percentilies = histogram.get_percentile_to_value_dict(self.PERCENTILES)
percentils_in_ms = {f"percentile_{k}".replace(".", "_"): round(
(v / 1_000_000), 2) for k, v in percentilies.items()}
return {
"start_time": self.format_timestamp(histogram.get_start_time_stamp() / 1000),
"end_time": self.format_timestamp(histogram.get_end_time_stamp() / 1000),
"stddev": histogram.get_stddev() / 1_000_000,
**percentils_in_ms
}
def get_write_stats(self):
return self.get_operation_stats_by_tag(tag=self.WRITE)
def METHOD_NAME(self):
return self.get_operation_stats_by_tag(tag=self.READ)
def get_stats(self, operation):
if operation == "write":
return {"WRITE": self.get_write_stats()}
elif operation == "read":
return {"READ": self.METHOD_NAME()}
else:
return {
"WRITE": self.get_write_stats(),
"READ": self.METHOD_NAME()
}
def get_hdr_stats_for_interval(self, workload: str,
start_ts: float, end_ts: float | int,
base_dir: str = '') -> list[dict]:
start_ts = int(start_ts)
end_ts = int(end_ts)
if not base_dir:
base_dir = self._base_path
if end_ts - start_ts < TIME_INTERVAL:
window_step = int(end_ts - start_ts)
else:
window_step = TIME_INTERVAL
hdr_stats = []
for start_interval in range(start_ts, end_ts, window_step):
end_interval = end_ts if start_interval + window_step > end_ts else start_interval + window_step
self.build_histogram_from_files(start_time=start_interval, end_time=end_interval, absolute=True)
hdr_stats.append(self.get_stats(workload))
self.clear_histograms()
return hdr_stats |
4,793 | create integration gb | # -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher (clemens.prescher@gmail.com)
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019-2020 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from qtpy import QtWidgets, QtCore, QtGui
from .... import icons_path
from ...CustomWidgets import IntegerTextField, NumberTextField, LabelAlignRight, SpinBoxAlignRight,\
ConservativeSpinBox, CheckableFlatButton, SaveIconButton
class OptionsWidget(QtWidgets.QWidget):
def __init__(self):
super(OptionsWidget, self).__init__()
self.METHOD_NAME()
self.create_cake_gb()
self.style_integration_widgets()
self.style_cake_widgets()
self.set_tooltips()
self._layout = QtWidgets.QHBoxLayout()
self._layout.setContentsMargins(5, 5, 5, 5)
self._layout.setSpacing(5)
self._left_layout = QtWidgets.QVBoxLayout()
self._left_layout.addWidget(self.integration_gb)
self._left_layout.addWidget(self.cake_gb)
self._left_layout.addStretch(1)
self._layout.addLayout(self._left_layout)
self._layout.addStretch(1)
self.setLayout(self._layout)
def METHOD_NAME(self):
self.integration_gb = QtWidgets.QGroupBox('1D integration')
self._integration_gb_layout = QtWidgets.QGridLayout()
self._integration_gb_layout.setContentsMargins(5, 8, 5, 7)
self._integration_gb_layout.setSpacing(5)
self.oned_azimuth_min_txt = NumberTextField('-180')
self.oned_azimuth_max_txt = NumberTextField('180')
self.oned_full_toggle_btn = CheckableFlatButton('Full available range')
self.bin_count_txt = IntegerTextField('0')
self.bin_count_cb = QtWidgets.QCheckBox('auto')
self.supersampling_sb = SpinBoxAlignRight()
self.correct_solid_angle_cb = QtWidgets.QCheckBox('correct Solid Angle')
self.correct_solid_angle_cb.setChecked(True)
self._integration_gb_layout.addWidget(LabelAlignRight('Radial bins:'), 0, 0)
self._integration_gb_layout.addWidget(LabelAlignRight('Supersampling:'), 1, 0)
self._integration_gb_layout.addWidget(self.bin_count_txt, 0, 1)
self._integration_gb_layout.addWidget(self.bin_count_cb, 0, 2)
self._integration_gb_layout.addWidget(self.supersampling_sb, 1, 1)
self._integration_gb_layout.addWidget(self.correct_solid_angle_cb, 2, 1, 1, 2)
self._integration_gb_layout.addWidget(LabelAlignRight('Azimuth range:'), 3, 0)
self._integration_gb_layout.addWidget(self.oned_azimuth_min_txt, 3, 1, 1, 1)
self._integration_gb_layout.addWidget(self.oned_azimuth_max_txt, 3, 2, 1, 1)
self._integration_gb_layout.addWidget(self.oned_full_toggle_btn, 4, 1, 1, 2)
self.integration_gb.setLayout(self._integration_gb_layout)
def create_cake_gb(self):
self.cake_gb = QtWidgets.QGroupBox('2D (Cake-) integration')
self._cake_gb_layout = QtWidgets.QGridLayout()
self._cake_gb_layout.setContentsMargins(5, 8, 5, 7)
self._cake_gb_layout.setSpacing(5)
self.cake_azimuth_points_sb = ConservativeSpinBox()
self.cake_azimuth_min_txt = NumberTextField('-180')
self.cake_azimuth_max_txt = NumberTextField('180')
self.cake_full_toggle_btn = CheckableFlatButton('Full')
self.cake_integral_width_sb = ConservativeSpinBox()
self.cake_save_integral_btn = SaveIconButton()
self._cake_gb_layout.addWidget(LabelAlignRight('Azimuth bins:'), 0, 0)
self._cake_gb_layout.addWidget(self.cake_azimuth_points_sb, 0, 1)
self._cake_gb_layout.addWidget(LabelAlignRight('Azimuth range:'), 1, 0)
self._azi_range_layout = QtWidgets.QHBoxLayout()
self._azi_range_layout.addWidget(self.cake_azimuth_min_txt)
self._azi_range_separater_lbl = LabelAlignRight('-')
self._azi_range_layout.addWidget(self._azi_range_separater_lbl)
self._azi_range_layout.addWidget(self.cake_azimuth_max_txt)
self._cake_gb_layout.addLayout(self._azi_range_layout, 1, 1)
self._cake_gb_layout.addWidget(self.cake_full_toggle_btn, 1, 2)
self._cake_gb_layout.addWidget(LabelAlignRight('Integral Width:'), 2, 0)
self._cake_gb_layout.addWidget(self.cake_integral_width_sb, 2, 1)
self._cake_gb_layout.addWidget(self.cake_save_integral_btn, 2, 2)
self.cake_gb.setLayout(self._cake_gb_layout)
def style_integration_widgets(self):
max_width = 110
self.bin_count_txt.setMaximumWidth(max_width)
self.supersampling_sb.setMaximumWidth(max_width)
self.supersampling_sb.setMinimum(1)
self.supersampling_sb.setMaximum(20)
self.supersampling_sb.setSingleStep(1)
self.bin_count_txt.setEnabled(False)
self.bin_count_cb.setChecked(True)
self.oned_full_toggle_btn.setChecked(True)
self.oned_azimuth_min_txt.setDisabled(True)
self.oned_azimuth_max_txt.setDisabled(True)
def style_cake_widgets(self):
self.cake_azimuth_points_sb.setMaximumWidth(115)
self.cake_azimuth_points_sb.setMinimum(1)
self.cake_azimuth_points_sb.setMaximum(10000)
self.cake_azimuth_points_sb.setSingleStep(100)
self.cake_azimuth_min_txt.setMinimumWidth(50)
self.cake_azimuth_min_txt.setMaximumWidth(50)
self.cake_azimuth_max_txt.setMinimumWidth(50)
self.cake_azimuth_max_txt.setMaximumWidth(50)
self._azi_range_separater_lbl.setMaximumWidth(5)
self._azi_range_layout.setSpacing(0)
self._azi_range_layout.setContentsMargins(0, 0, 0, 0)
self.cake_full_toggle_btn.setChecked(True)
self.cake_azimuth_min_txt.setDisabled(True)
self.cake_azimuth_max_txt.setDisabled(True)
self.cake_integral_width_sb.setMinimum(1)
self.cake_integral_width_sb.setSingleStep(1)
self.cake_integral_width_sb.setMaximum(1000000)
button_width = 25
button_height = 25
self.cake_save_integral_btn.setIconSize(QtCore.QSize(15, 15))
self.cake_save_integral_btn.setWidth(button_width)
self.cake_save_integral_btn.setHeight(button_height)
def set_tooltips(self):
self.cake_full_toggle_btn.setToolTip("Set to full available range")
self.cake_save_integral_btn.setToolTip("Save the tth integral next to the cake image")
self.cake_integral_width_sb.setToolTip("Sets the width used for the integral plot\nnext to the cake image.")
|
4,794 | test frames to tensor and back to | #!/usr/bin/env python
# Copyright (C) 2006-2021 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import *
class TestTensorToPool(TestCase):
def identityOperation(self, frameSize=1024, hopSize=512, patchSize=187,
lastPatchMode='discard', accumulate=False):
batchHopSize = -1 if accumulate else 1
filename = join(testdata.audio_dir, 'recorded', 'cat_purrrr.wav')
namespace='tensor'
ml = MonoLoader(filename=filename)
fc = FrameCutter(frameSize=frameSize, hopSize=hopSize)
vtt = VectorRealToTensor(shape=[1, 1, patchSize, frameSize],
lastPatchMode=lastPatchMode)
ttp = TensorToPool(namespace=namespace)
ptt = PoolToTensor(namespace=namespace)
ttv = TensorToVectorReal()
pool = Pool()
ml.audio >> fc.signal
fc.frame >> vtt.frame
fc.frame >> (pool, "framesIn")
vtt.tensor >> ttp.tensor
ttp.pool >> ptt.pool
ptt.tensor >> ttv.tensor
ttv.frame >> (pool, "framesOut")
run(ml)
return pool['framesOut'], pool['framesIn']
def testFramesToTensorAndBackToFramesDiscard(self):
# The test audio file has 430 frames.
# Setting the patchSize to produce exactly 10 patches.
numberOfFrames = 43
found, expected = self.identityOperation(patchSize=numberOfFrames,
lastPatchMode='discard')
self.assertAlmostEqualMatrix(found, expected, 1e-8)
# Now the number of frames does not match an exact number of patches.
# The expected output is trimmed to the found shape as with
# lastPatchMode='discard' the remaining frames not fitting into a
# patch are discarded.
found, expected = self.identityOperation(frameSize=256, hopSize=128,
lastPatchMode='discard')
self.assertAlmostEqualMatrix(found, expected[:found.shape[0], :], 1e-8)
# Increase the patch size.
found, expected = self.identityOperation(frameSize=256, hopSize=128,
patchSize=300, lastPatchMode='discard')
self.assertAlmostEqualMatrix(found, expected[:found.shape[0], :], 1e-8)
def testFramesToTensorAndBackToFramesDiscardAccumulate(self):
# Repeat the tests in accumulate mode. Here the patches are stored
# internally and pushed at once at the end of the stream.
numberOfFrames = 43
found, expected = self.identityOperation(patchSize=numberOfFrames,
lastPatchMode='discard',
accumulate=True)
self.assertAlmostEqualMatrix(found, expected, 1e-8)
found, expected = self.identityOperation(frameSize=256, hopSize=128,
lastPatchMode='discard',
accumulate=True)
self.assertAlmostEqualMatrix(found, expected[:found.shape[0], :], 1e-8)
found, expected = self.identityOperation(frameSize=256, hopSize=128,
patchSize=300, lastPatchMode='discard',
accumulate=True)
self.assertAlmostEqualMatrix(found, expected[:found.shape[0], :], 1e-8)
def METHOD_NAME(self):
# Repeat the experiments with lastPatchMode='repeat'. Now if there
# are remaining frames they will be looped into a final patch.
# The found shape will be equal or bigger than the expected one.
# Found values will be trimmed to fit the expected shape.
# No remaining frames.
numberOfFrames = 43
found, expected = self.identityOperation(patchSize=numberOfFrames,
lastPatchMode='repeat')
self.assertAlmostEqualMatrix(found, expected, 1e-8)
# Some remaining frames.
found, expected = self.identityOperation(frameSize=256, hopSize=128,
lastPatchMode='repeat')
self.assertAlmostEqualMatrix(found[:expected.shape[0], :], expected, 1e-8)
# Increase the patch size.
found, expected = self.identityOperation(frameSize=256, hopSize=128,
patchSize=300, lastPatchMode='repeat')
self.assertAlmostEqualMatrix(found[:expected.shape[0], :], expected, 1e-8)
def testFramesToTensorAndBackToFramesRepeatAccumulate(self):
# The behavior should be the same in accumulate mode.
numberOfFrames = 43
found, expected = self.identityOperation(patchSize=numberOfFrames,
lastPatchMode='repeat',
accumulate=True)
self.assertAlmostEqualMatrix(found, expected, 1e-8)
found, expected = self.identityOperation(frameSize=256, hopSize=128,
lastPatchMode='repeat',
accumulate=True)
self.assertAlmostEqualMatrix(found[:expected.shape[0], :], expected, 1e-8)
found, expected = self.identityOperation(frameSize=256, hopSize=128,
patchSize=300, lastPatchMode='repeat',
accumulate=True)
self.assertAlmostEqualMatrix(found[:expected.shape[0], :], expected, 1e-8)
def testInvalidParam(self):
self.assertConfigureFails(TensorToPool(), {'mode': ''})
def testRepeatMode(self):
# The test audio file has 430 frames. If patchSize is set to 428 with
# lastPatchMode='repeat' VectorRealToTensor will produce a second
# patch of 428 frames by looping the last two spare samples.
numberOfFrames = 428
loopFrames = 430 - numberOfFrames
found, expected = self.identityOperation(patchSize=numberOfFrames,
lastPatchMode='repeat')
expected = numpy.vstack([expected[:numberOfFrames]] + # frames for the first patch
[expected[numberOfFrames:numberOfFrames + loopFrames]] * # remaining frames for the second patch
(numberOfFrames // loopFrames)) # number of repetitions to fill the second patch
self.assertAlmostEqualMatrix(found, expected, 1e-8)
suite = allTests(TestTensorToPool)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite) |
4,795 | test delete requires auth | import pytest
from . import helper
from github3 import GitHubError
from github3.orgs import Team
url_for = helper.create_url_helper(
"https://api.github.com/organizations/1/team/1"
)
get_team_example_data = helper.create_example_data_helper("orgs_team_example")
class TestTeam(helper.UnitHelper):
described_class = Team
example_data = get_team_example_data()
def test_add_repository(self):
"""Show that one can add a repository to an organization team."""
self.instance.add_repository("name-of-repo")
self.put_called_with(url_for("repos/name-of-repo"), data={})
self.instance.add_repository("name-of-repo", permission="push")
self.put_called_with(
url_for("repos/name-of-repo"), data={"permission": "push"}
)
def test_delete(self):
"""Show that a user can delete an organization team."""
self.instance.delete()
self.session.delete.assert_called_once_with(url_for())
def test_edit(self):
"""Show that a user can edit a team."""
self.instance.edit("name", "admin", 1234)
self.patch_called_with(
url_for(),
data={
"name": "name",
"permission": "admin",
"parent_team_id": 1234,
},
)
def test_has_repository(self):
"""Show that a user can check if a team has access to a repository."""
self.instance.has_repository("org/repo")
self.session.get.assert_called_once_with(url_for("repos/org/repo"))
def test_remove_repository(self):
"""Show that a user can remove a repository from a team."""
self.instance.remove_repository("repo")
self.session.delete.assert_called_once_with(url_for("/repos/repo"))
class TestTeamRequiresAuth(helper.UnitRequiresAuthenticationHelper):
described_class = Team
example_data = get_team_example_data()
def test_add_repository_requires_auth(self):
"""Show that adding a repo to a team requires authentication."""
with pytest.raises(GitHubError):
self.instance.add_repository("repo")
def METHOD_NAME(self):
"""Show that deleteing a team requires authentication."""
with pytest.raises(GitHubError):
self.instance.delete()
def test_edit_requires_auth(self):
"""Show that editing a team requires authentication."""
with pytest.raises(GitHubError):
self.instance.edit("name")
def test_has_repository_requires_auth(self):
"""Show that checking a team's access to a repo needs auth."""
with pytest.raises(GitHubError):
self.instance.has_repository("org/repo")
def test_remove_repository_requires_auth(self):
"""Show that removing a repo from a team requires authentication."""
with pytest.raises(GitHubError):
self.instance.remove_repository("repo")
class TestTeamIterator(helper.UnitIteratorHelper):
described_class = Team
example_data = get_team_example_data()
def test_members(self):
"""Show that one can iterate over all members of a Team."""
i = self.instance.members()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for("members"), params={"per_page": 100}, headers={}
)
def test_members_roles(self):
"""Show that one can iterate of all maintainers of a Team."""
i = self.instance.members(role="maintainer")
self.get_next(i)
self.session.get.assert_called_once_with(
url_for("members"),
params={"per_page": 100, "role": "maintainer"},
headers={"Accept": "application/vnd.github.ironman-preview+json"},
)
def test_members_excludes_fake_roles(self):
"""Show that one cannot pass a bogus role to the API."""
i = self.instance.members(role="bogus-role")
self.get_next(i)
self.session.get.assert_called_once_with(
url_for("members"), params={"per_page": 100}, headers={}
)
def test_members_requires_auth(self):
"""Show that one needs to authenticate to get team members."""
self.session.has_auth.return_value = False
with pytest.raises(GitHubError):
self.instance.members()
def test_repositories(self):
"""Show that one can iterate over an organization's repositories."""
i = self.instance.repositories()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for("repos"), params={"per_page": 100}, headers={}
) |
4,796 | log | import ctypes
import os
import pathlib
import platform
import json
from typing import Union, Dict, List, Any, Type, Optional
from enum import Enum, IntEnum, unique, auto
JSON = Union[Dict[str, Any], List[Any], int, str, float, bool, Type[None]]
class InstanceOptionType(IntEnum):
touch_type = 2
deployment_with_pause = 3
class Asst:
CallBackType = ctypes.CFUNCTYPE(
None, ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p)
"""
回调函数,使用实例可参照 my_callback
:params:
``param1 message``: 消息类型
``param2 details``: json string
``param3 arg``: 自定义参数
"""
@staticmethod
def load(path: Union[pathlib.Path, str], incremental_path: Optional[Union[pathlib.Path, str]] = None, user_dir: Optional[Union[pathlib.Path, str]] = None) -> bool:
"""
加载 dll 及资源
:params:
``path``: DLL及资源所在文件夹路径
``incremental_path``: 增量资源所在文件夹路径
``user_dir``: 用户数据(日志、调试图片等)写入文件夹路径
"""
platform_values = {
'windows': {
'libpath': 'MaaCore.dll',
'environ_var': 'PATH'
},
'darwin': {
'libpath': 'libMaaCore.dylib',
'environ_var': 'DYLD_LIBRARY_PATH'
},
'linux': {
'libpath': 'libMaaCore.so',
'environ_var': 'LD_LIBRARY_PATH'
}
}
lib_import_func = None
platform_type = platform.system().lower()
if platform_type == 'windows':
lib_import_func = ctypes.WinDLL
else:
lib_import_func = ctypes.CDLL
Asst.__libpath = pathlib.Path(path) / platform_values[platform_type]['libpath']
try:
os.environ[platform_values[platform_type]['environ_var']] += os.pathsep + str(path)
except KeyError:
os.environ[platform_values[platform_type]['environ_var']] = os.pathsep + str(path)
Asst.__lib = lib_import_func(str(Asst.__libpath))
Asst.__set_lib_properties()
ret: bool = True
if user_dir:
ret &= Asst.__lib.AsstSetUserDir(str(user_dir).encode('utf-8'))
ret &= Asst.__lib.AsstLoadResource(str(path).encode('utf-8'))
if incremental_path:
ret &= Asst.__lib.AsstLoadResource(
str(incremental_path).encode('utf-8'))
return ret
def __init__(self, callback: CallBackType = None, arg=None):
"""
:params:
``callback``: 回调函数
``arg``: 自定义参数
"""
if callback:
self.__ptr = Asst.__lib.AsstCreateEx(callback, arg)
else:
self.__ptr = Asst.__lib.AsstCreate()
def __del__(self):
Asst.__lib.AsstDestroy(self.__ptr)
self.__ptr = None
def set_instance_option(self, option_type: InstanceOptionType, option_value: str):
"""
设置额外配置
参见${MaaAssistantArknights}/src/MaaCore/Assistant.cpp#set_instance_option
:params:
``externa_config``: 额外配置类型
``config_value``: 额外配置的值
:return: 是否设置成功
"""
return Asst.__lib.AsstSetInstanceOption(self.__ptr,
int(option_type), option_value.encode('utf-8'))
def connect(self, adb_path: str, address: str, config: str = 'General'):
"""
连接设备
:params:
``adb_path``: adb 程序的路径
``address``: adb 地址+端口
``config``: adb 配置,可参考 resource/config.json
:return: 是否连接成功
"""
return Asst.__lib.AsstConnect(self.__ptr,
adb_path.encode('utf-8'), address.encode('utf-8'), config.encode('utf-8'))
TaskId = int
def append_task(self, type_name: str, params: JSON = {}) -> TaskId:
"""
添加任务
:params:
``type_name``: 任务类型,请参考 docs/集成文档.md
``params``: 任务参数,请参考 docs/集成文档.md
:return: 任务 ID, 可用于 set_task_params 接口
"""
return Asst.__lib.AsstAppendTask(self.__ptr, type_name.encode('utf-8'), json.dumps(params, ensure_ascii=False).encode('utf-8'))
def set_task_params(self, task_id: TaskId, params: JSON) -> bool:
"""
动态设置任务参数
:params:
``task_id``: 任务 ID, 使用 append_task 接口的返回值
``params``: 任务参数,同 append_task 接口,请参考 docs/集成文档.md
:return: 是否成功
"""
return Asst.__lib.AsstSetTaskParams(self.__ptr, task_id, json.dumps(params, ensure_ascii=False).encode('utf-8'))
def start(self) -> bool:
"""
开始任务
:return: 是否成功
"""
return Asst.__lib.AsstStart(self.__ptr)
def stop(self) -> bool:
"""
停止并清空所有任务
:return: 是否成功
"""
return Asst.__lib.AsstStop(self.__ptr)
def running(self) -> bool:
"""
是否正在运行
:return: 是否正在运行
"""
return Asst.__lib.AsstRunning(self.__ptr)
@staticmethod
def METHOD_NAME(level: str, message: str) -> None:
'''
打印日志
:params:
``level``: 日志等级标签
``message``: 日志内容
'''
Asst.__lib.AsstLog(level.encode('utf-8'), message.encode('utf-8'))
def get_version(self) -> str:
"""
获取DLL版本号
: return: 版本号
"""
return Asst.__lib.AsstGetVersion().decode('utf-8')
@staticmethod
def __set_lib_properties():
Asst.__lib.AsstSetUserDir.restype = ctypes.c_bool
Asst.__lib.AsstSetUserDir.argtypes = (
ctypes.c_char_p,)
Asst.__lib.AsstLoadResource.restype = ctypes.c_bool
Asst.__lib.AsstLoadResource.argtypes = (
ctypes.c_char_p,)
Asst.__lib.AsstCreate.restype = ctypes.c_void_p
Asst.__lib.AsstCreate.argtypes = ()
Asst.__lib.AsstCreateEx.restype = ctypes.c_void_p
Asst.__lib.AsstCreateEx.argtypes = (
ctypes.c_void_p, ctypes.c_void_p,)
Asst.__lib.AsstDestroy.argtypes = (ctypes.c_void_p,)
Asst.__lib.AsstSetInstanceOption.restype = ctypes.c_bool
Asst.__lib.AsstSetInstanceOption.argtypes = (
ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p,)
Asst.__lib.AsstConnect.restype = ctypes.c_bool
Asst.__lib.AsstConnect.argtypes = (
ctypes.c_void_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p,)
Asst.__lib.AsstAppendTask.restype = ctypes.c_int
Asst.__lib.AsstAppendTask.argtypes = (
ctypes.c_void_p, ctypes.c_char_p, ctypes.c_char_p)
Asst.__lib.AsstSetTaskParams.restype = ctypes.c_bool
Asst.__lib.AsstSetTaskParams.argtypes = (
ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p)
Asst.__lib.AsstStart.restype = ctypes.c_bool
Asst.__lib.AsstStart.argtypes = (ctypes.c_void_p,)
Asst.__lib.AsstStop.restype = ctypes.c_bool
Asst.__lib.AsstStop.argtypes = (ctypes.c_void_p,)
Asst.__lib.AsstRunning.restype = ctypes.c_bool
Asst.__lib.AsstRunning.argtypes = (ctypes.c_void_p,)
Asst.__lib.AsstGetVersion.restype = ctypes.c_char_p
Asst.__lib.AsstLog.restype = None
Asst.__lib.AsstLog.argtypes = (
ctypes.c_char_p, ctypes.c_char_p)
@unique
class Message(Enum):
"""
回调消息
请参考 docs/回调消息.md
"""
InternalError = 0
InitFailed = auto()
ConnectionInfo = auto()
AllTasksCompleted = auto()
TaskChainError = 10000
TaskChainStart = auto()
TaskChainCompleted = auto()
TaskChainExtraInfo = auto()
TaskChainStopped = auto()
SubTaskError = 20000
SubTaskStart = auto()
SubTaskCompleted = auto()
SubTaskExtraInfo = auto()
SubTaskStopped = auto( |
4,797 | cache invalidate | # The MIT License
#
# Copyright (c) 2018 aio-libs team https://github.com/aio-libs/
# Copyright (c) 2017 Ocean S. A. https://ocean.io/
# Copyright (c) 2016-2017 WikiBusiness Corporation http://wikibusiness.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import asyncio
import os
import weakref
from collections import OrderedDict
from functools import _CacheInfo, _make_key, partial, wraps
__version__ = "1.0.2"
__all__ = ("alru_cache", "clear_all_alru_caches")
_is_ci = (os.environ.get("CI") or "0").lower() in ("1", "true")
_all_wrapped = weakref.WeakSet()
def clear_all_alru_caches():
for wrapped in _all_wrapped:
wrapped.cache_clear()
def unpartial(fn):
while hasattr(fn, "func"):
fn = fn.func
return fn
def _done_callback(fut, task):
if task.cancelled():
fut.cancel()
return
exc = task.exception()
if exc is not None:
fut.set_exception(exc)
return
fut.set_result(task.result())
def METHOD_NAME(wrapped, typed, *args, **kwargs):
key = _make_key(args, kwargs, typed)
exists = key in wrapped._cache
if exists:
wrapped._cache.pop(key)
return exists
def _cache_clear(wrapped):
wrapped.hits = wrapped.misses = 0
wrapped._cache = OrderedDict()
wrapped.tasks = set()
def _open(wrapped):
if not wrapped.closed:
raise RuntimeError("alru_cache is not closed")
was_closed = (
wrapped.hits == wrapped.misses == len(wrapped.tasks) == len(wrapped._cache) == 0
)
if not was_closed:
raise RuntimeError("alru_cache was not closed correctly")
wrapped.closed = False
def _close(wrapped, *, cancel=False, return_exceptions=True):
if wrapped.closed:
raise RuntimeError("alru_cache is closed")
wrapped.closed = True
if cancel:
for task in wrapped.tasks:
if not task.done(): # not sure is it possible
task.cancel()
return _wait_closed(wrapped, return_exceptions=return_exceptions)
async def _wait_closed(wrapped, *, return_exceptions):
wait_closed = asyncio.gather(*wrapped.tasks, return_exceptions=return_exceptions)
wait_closed.add_done_callback(partial(_close_waited, wrapped))
ret = await wait_closed
# hack to get _close_waited callback to be executed
await asyncio.sleep(0)
return ret
def _close_waited(wrapped, _):
wrapped.cache_clear()
def _cache_info(wrapped, maxsize):
return _CacheInfo(
wrapped.hits,
wrapped.misses,
maxsize,
len(wrapped._cache),
)
def __cache_touch(wrapped, key):
try:
wrapped._cache.move_to_end(key)
except KeyError: # not sure is it possible
pass
def _cache_hit(wrapped, key):
wrapped.hits += 1
__cache_touch(wrapped, key)
def _cache_miss(wrapped, key):
wrapped.misses += 1
__cache_touch(wrapped, key)
def alru_cache(
fn=None,
maxsize=128,
typed=False,
*,
cache_exceptions=True,
):
def wrapper(fn):
_origin = unpartial(fn)
if not asyncio.iscoroutinefunction(_origin):
raise RuntimeError("Coroutine function is required, got {}".format(fn))
# functools.partialmethod support
if hasattr(fn, "_make_unbound_method"):
fn = fn._make_unbound_method()
@wraps(fn)
async def wrapped(*fn_args, **fn_kwargs):
if wrapped.closed:
raise RuntimeError("alru_cache is closed for {}".format(wrapped))
loop = asyncio.get_event_loop()
key = _make_key(fn_args, fn_kwargs, typed)
fut = wrapped._cache.get(key)
if fut is not None:
if not fut.done():
_cache_hit(wrapped, key)
return await asyncio.shield(fut)
exc = fut._exception
if exc is None or cache_exceptions:
_cache_hit(wrapped, key)
return fut.result()
# exception here and cache_exceptions == False
wrapped._cache.pop(key)
fut = loop.create_future()
task = loop.create_task(fn(*fn_args, **fn_kwargs))
task.add_done_callback(partial(_done_callback, fut))
wrapped.tasks.add(task)
task.add_done_callback(wrapped.tasks.remove)
wrapped._cache[key] = fut
if maxsize is not None and len(wrapped._cache) > maxsize:
wrapped._cache.popitem(last=False)
_cache_miss(wrapped, key)
return await asyncio.shield(fut)
_cache_clear(wrapped)
wrapped._origin = _origin
wrapped.closed = False
wrapped.cache_info = partial(_cache_info, wrapped, maxsize)
wrapped.cache_clear = partial(_cache_clear, wrapped)
wrapped.invalidate = partial(METHOD_NAME, wrapped, typed)
wrapped.close = partial(_close, wrapped)
wrapped.open = partial(_open, wrapped)
if _is_ci:
_all_wrapped.add(wrapped)
return wrapped
if fn is None:
return wrapper
if callable(fn) or hasattr(fn, "_make_unbound_method"):
return wrapper(fn)
raise NotImplementedError("{} decorating is not supported".format(fn)) |
4,798 | create grid | # Copyright (c) 2017 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyNN.spiNNaker as p
import math
from spinnaker_testbase import BaseTestCase
from pyNN.utility.plotting import Figure, Panel
from pyNN.random import NumpyRNG
import matplotlib.pyplot as plt
def do_run(plot):
p.setup(timestep=1.0)
cell_params_lif = {'cm': 0.25,
'i_offset': 0.0,
'tau_m': 20.0,
'tau_refrac': 2.0,
'tau_syn_E': 5.0,
'tau_syn_I': 5.0,
'v_reset': -70.0,
'v_rest': -65.0,
'v_thresh': -40.0
}
def METHOD_NAME(n, label, dx=1.0, dy=1.0):
grid_structure = p.Grid2D(dx=dx, dy=dy, x0=0.0, y0=0.0)
return p.Population(n*n, p.IF_curr_exp(**cell_params_lif),
structure=grid_structure, label=label)
n = 4
weight_to_spike = 5.0
delay = 5
runtime = 200
# Network
grid = METHOD_NAME(n, 'grid')
grid.set_max_atoms_per_core((16, 16))
# SpikeInjector
injectionConnection = [(0, 0)]
spikeArray = {'spike_times': [[0]]}
inj_pop = p.Population(1, p.SpikeSourceArray(**spikeArray),
label='inputSpikes_1')
p.Projection(inj_pop, grid, p.FromListConnector(injectionConnection),
p.StaticSynapse(weight=weight_to_spike, delay=delay))
# Connectors
exc_connector = p.AllToAllConnector()
inh_connector = p.FixedProbabilityConnector(0.5, rng=NumpyRNG(seed=10101))
# Wire grid
exc_proj = p.Projection(grid, grid, exc_connector,
p.StaticSynapse(
weight="1.0 + (2.0*exp(-d))", delay=5))
inh_proj = p.Projection(grid, grid, inh_connector,
p.StaticSynapse(
weight=1.5, delay="2 + 2.0*d"))
grid.record(['v', 'spikes'])
p.run(runtime)
v = grid.get_data('v')
spikes = grid.get_data('spikes')
exc_weights_delays = exc_proj.get(['weight', 'delay'], 'list')
inh_weights_delays = inh_proj.get(['weight', 'delay'], 'list')
if plot:
Figure(
# raster plot of the presynaptic neurons' spike times
Panel(spikes.segments[0].spiketrains,
yticks=True, markersize=0.2, xlim=(0, runtime), xticks=True),
# membrane potential of the postsynaptic neurons
Panel(v.segments[0].filter(name='v')[0],
ylabel="Membrane potential (mV)",
data_labels=[grid.label], yticks=True, xlim=(0, runtime),
xticks=True),
title="Simple 2D grid distance-dependent weights and delays",
annotations="Simulated with {}".format(p.name())
)
plt.show()
p.end()
return exc_weights_delays, inh_weights_delays
class DistanceDependentWeightsAndDelaysTest(BaseTestCase):
POSITIONS = [(i, j) for i in range(4) for j in range(4)]
def check_exc_weights(self, exc_weights_delays):
for conn in exc_weights_delays:
# delays are constant
self.assertEqual(5, conn[3])
source_pos = self.POSITIONS[conn[0]]
target_pos = self.POSITIONS[conn[1]]
dist = math.sqrt((source_pos[0]-target_pos[0])**2 +
(source_pos[1]-target_pos[1])**2)
weight = 1.0 + (2.0 * math.exp(-dist))
# The weight from such an equation cannot be represented exactly
# on SpiNNaker but in this case should be within 3 dp
self.assertAlmostEqual(weight, conn[2], places=3)
def check_inh_weights(self, inh_weights_delays):
for conn in inh_weights_delays:
# weights are constant
self.assertEqual(1.5, conn[2])
source_pos = self.POSITIONS[conn[0]]
target_pos = self.POSITIONS[conn[1]]
dist = math.sqrt((source_pos[0]-target_pos[0])**2 +
(source_pos[1]-target_pos[1])**2)
# For ts=1.0 on SpiNNaker delays are rounded to nearest integer
delay = round(2.0 + (2.0 * dist))
self.assertEqual(delay, conn[3])
def a_run(self):
exc_weights_delays, inh_weights_delays = do_run(plot=False)
# any checks go here
self.check_exc_weights(exc_weights_delays)
self.check_inh_weights(inh_weights_delays)
def test_a_run(self):
self.runsafe(self.a_run)
if __name__ == '__main__':
exc_weights_delays, inh_weights_delays = do_run(plot=True)
print(len(exc_weights_delays), len(inh_weights_delays)) |
4,799 | test get input fields | import re
import pytest
from . import library # for the fixture to work
from . import TestFiles, temp_filename # noqa
def _escape_string(value: str) -> str:
to_escape = {"."}
values = []
for char in value:
if char in to_escape:
char = "\\\\" + oct(ord(char)).replace("o", "")
values.append(char)
return "".join(values)
def assert_field_value(lib, name, value):
value = _escape_string(value)
content = lib.active_pdf_document.fileobject.read()
assert re.search(rf"{name}[^>]+{value}".encode(), content)
@pytest.mark.parametrize(
"trim,text",
[
(True, "ILMOITA VERKOSSA\nvero.fi/omavero"),
(False, "ILMOITA VERKOSSA\nvero.fi/omavero\n"),
],
)
def test_convert(library, trim, text):
library.convert(TestFiles.vero_pdf, trim=trim)
assert (
len(library.active_pdf_document.has_converted_pages)
== library.get_number_of_pages()
)
first_paragraph = library.active_pdf_document.get_page(1).content[0]
assert first_paragraph.text == text
# A secondary conversion wouldn't be triggered on already converted PDF files.
library.convert(TestFiles.vero_pdf, trim=not trim) # reverse trimming flag
first_paragraph = library.active_pdf_document.get_page(1).content[0]
assert first_paragraph.text == text # still getting the same expected text
def METHOD_NAME(library):
fields = library.get_input_fields(TestFiles.vero_pdf)
assert len(fields) == 65
assert fields["Puhelinnumero"]["value"] == None
assert isinstance(fields["Puhelinnumero"]["rect"], tuple)
def test_get_input_fields_replace_none_values(library):
fields = library.get_input_fields(TestFiles.vero_pdf, replace_none_value=True)
assert fields["Puhelinnumero"]["value"] == "Puhelinnumero"
def test_set_field_value(library):
fields = library.get_input_fields(TestFiles.vero_pdf)
new_number = "+358-55-12322121312"
assert fields["Puhelinnumero"]["value"] == None
library.set_field_value("Puhelinnumero", new_number)
assert fields["Puhelinnumero"]["value"] == new_number
def test_set_field_value_encoding(library):
fields = library.get_input_fields(TestFiles.foersom_pdf, encoding="utf-16")
name_field = "Given Name Text Box"
assert not fields[name_field]["value"]
new_name = "Mark"
library.set_field_value(name_field, new_name)
assert fields[name_field]["value"] == new_name
driving_field = "Driving License Check Box"
assert fields[driving_field]["value"].name == "Off" # unchecked
new_driving = "/Yes"
library.set_field_value(driving_field, new_driving) # checks it
color_field = "Favourite Colour List Box"
assert fields[color_field]["value"] == "Red"
new_color = "Black"
library.set_field_value(color_field, new_color)
with temp_filename(suffix=".pdf") as tmp_file:
library.save_field_values(output_path=tmp_file, use_appearances_writer=True)
library.switch_to_pdf(tmp_file)
# Fields can still be retrieved even after the PDF is saved.
new_fields = library.get_input_fields()
assert new_fields[name_field]["value"] == new_name
assert new_fields[driving_field]["value"] == new_driving
assert new_fields[color_field]["value"] == new_color
def test_set_field_value_checkbox(library):
fields = library.get_input_fields(TestFiles.alianz_pdf)
checkbox_name = "VeroeffentlichungInst"
value_obj = fields[checkbox_name]["value"]
assert value_obj.name == "Off" # checkbox not checked yet
# Tick the checkbox and save the new state of it.
library.set_field_value(checkbox_name, "/Yes")
with temp_filename(suffix=".pdf") as tmp_file:
library.save_field_values(output_path=tmp_file, use_appearances_writer=True)
library.switch_to_pdf(tmp_file)
new_fields = library.get_input_fields()
assert new_fields[checkbox_name]["value"] == "/Yes"
assert_field_value(library, checkbox_name, "Yes")
@pytest.mark.parametrize("set_fields", [False, True])
def test_save_field_values_fields_exist(library, set_fields):
library.open_pdf(TestFiles.vero_pdf)
to_insert = {
"Puhelinnumero": "12313123", # new number
"Paivays": "01.04.2021", # new date
}
with temp_filename(suffix="-fields.pdf") as tmp_file:
if set_fields:
# Keep non-empty values, because null fields will fail the saving.
existing_fields = library.get_input_fields(replace_none_value=True)
for name, value in to_insert.items():
library.set_field_value(name, value)
assert existing_fields[name]["value"] == value
library.save_field_values(output_path=tmp_file)
else:
# There are no fields retrieved at all this time.
library.save_field_values(output_path=tmp_file, newvals=to_insert)
library.switch_to_pdf(tmp_file)
for name, value in to_insert.items():
assert_field_value(library, name, value)
def test_dump_pdf_as_xml(library):
head = '<?xml version="1.0" encoding="utf-8" ?>'
xml = library.dump_pdf_as_xml(TestFiles.invoice_pdf) # get non-empty output
assert xml.count(head) == 1
xml = library.dump_pdf_as_xml(TestFiles.invoice_pdf) # no double output
assert xml.count(head) == 1
def test_convert_after_line_margin_is_set(library):
library.set_convert_settings(line_margin=0.00000001)
library.convert(TestFiles.vero_pdf)
assert library.active_pdf_document
page = library.active_pdf_document.get_page(1)
first_paragraph, second_paragraph = page.content[0], page.content[1]
assert first_paragraph.text == "ILMOITA VERKOSSA"
assert second_paragraph.text == "vero.fi/omavero" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.