id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
5,900 | render template string | from __future__ import annotations
from typing import Any, AsyncIterator, TYPE_CHECKING
from flask.templating import DispatchingJinjaLoader as DispatchingJinjaLoader # noqa: F401
from jinja2 import Environment as BaseEnvironment, Template
from .ctx import has_app_context, has_request_context
from .globals import app_ctx, current_app, request_ctx
from .helpers import stream_with_context
from .signals import before_render_template, template_rendered
if TYPE_CHECKING:
from .app import Quart # noqa
class Environment(BaseEnvironment):
"""Quart specific Jinja Environment.
This changes the default Jinja loader to use the
DispatchingJinjaLoader, and enables async Jinja by default.
"""
def __init__(self, app: Quart, **options: Any) -> None:
"""Create a Quart specific Jinja Environment.
Arguments:
app: The Quart app to bind to.
options: The standard Jinja Environment options.
"""
if "loader" not in options:
options["loader"] = app.create_global_jinja_loader()
options["enable_async"] = True
super().__init__(**options)
async def render_template(template_name_or_list: str | list[str], **context: Any) -> str:
"""Render the template with the context given.
Arguments:
template_name_or_list: Template name to render of a list of
possible template names.
context: The variables to pass to the template.
"""
await current_app.update_template_context(context)
template = current_app.jinja_env.get_or_select_template(template_name_or_list) # type: ignore
return await _render(template, context, current_app._get_current_object()) # type: ignore
async def METHOD_NAME(source: str, **context: Any) -> str:
"""Render the template source with the context given.
Arguments:
source: The template source code.
context: The variables to pass to the template.
"""
await current_app.update_template_context(context)
template = current_app.jinja_env.from_string(source)
return await _render(template, context, current_app._get_current_object()) # type: ignore
async def _render(template: Template, context: dict, app: Quart) -> str:
await before_render_template.send_async(
app, _sync_wrapper=app.ensure_async, template=template, context=context
)
rendered_template = await template.render_async(context)
await template_rendered.send_async(
app, _sync_wrapper=app.ensure_async, template=template, context=context
)
return rendered_template
async def _default_template_ctx_processor() -> dict[str, Any]:
context = {}
if has_app_context():
context["g"] = app_ctx.g
if has_request_context():
context["request"] = request_ctx.request
context["session"] = request_ctx.session
return context
async def stream_template(
template_name_or_list: str | Template | list[str | Template], **context: Any
) -> AsyncIterator[str]:
"""Render a template by name with the given context as a stream.
This returns an iterator of strings, which can be used as a
streaming response from a view.
Arguments:
template_name_or_list: The name of the template to render. If a
list is given, the first name to exist will be rendered.
context: The variables to make available in the template.
"""
await current_app.update_template_context(context)
template = current_app.jinja_env.get_or_select_template(template_name_or_list)
return await _stream(current_app._get_current_object(), template, context) # type: ignore
async def stream_template_string(source: str, **context: Any) -> AsyncIterator[str]:
"""Render a template from the given source with the *context* as a stream.
This returns an iterator of strings, which can
be used as a streaming response from a view.
Arguments:
source: The source code of the template to render.
context: The variables to make available in the template.
"""
await current_app.update_template_context(context)
template = current_app.jinja_env.from_string(source)
return await _stream(current_app._get_current_object(), template, context) # type: ignore
async def _stream(app: Quart, template: Template, context: dict[str, Any]) -> AsyncIterator[str]:
await before_render_template.send_async(
app, _sync_wrapper=app.ensure_async, template=template, context=context
)
async def generate() -> AsyncIterator[str]:
async for chunk in template.generate_async(context):
yield chunk
await template_rendered.send_async(
app, _sync_wrapper=app.ensure_async, template=template, context=context
)
# If a request context is active, keep it while generating.
if has_request_context():
return stream_with_context(generate)()
else:
return generate() |
5,901 | perform destroy | """Mixins for (API) views in the whole project."""
from django.core.exceptions import FieldDoesNotExist
from rest_framework import generics, mixins, status
from rest_framework.response import Response
from InvenTree.fields import InvenTreeNotesField
from InvenTree.helpers import remove_non_printable_characters, strip_html_tags
class CleanMixin():
"""Model mixin class which cleans inputs using the Mozilla bleach tools."""
# Define a list of field names which will *not* be cleaned
SAFE_FIELDS = []
def create(self, request, *args, **kwargs):
"""Override to clean data before processing it."""
serializer = self.get_serializer(data=self.clean_data(request.data))
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def update(self, request, *args, **kwargs):
"""Override to clean data before processing it."""
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=self.clean_data(request.data), partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return Response(serializer.data)
def clean_string(self, field: str, data: str) -> str:
"""Clean / sanitize a single input string.
Note that this function will *allow* orphaned <>& characters,
which would normally be escaped by bleach.
Nominally, the only thing that will be "cleaned" will be HTML tags
Ref: https://github.com/mozilla/bleach/issues/192
"""
cleaned = strip_html_tags(data, field_name=field)
# By default, newline characters are removed
remove_newline = True
try:
if hasattr(self, 'serializer_class'):
model = self.serializer_class.Meta.model
field = model._meta.get_field(field)
# The following field types allow newline characters
allow_newline = [
InvenTreeNotesField,
]
for field_type in allow_newline:
if issubclass(type(field), field_type):
remove_newline = False
break
except AttributeError:
pass
except FieldDoesNotExist:
pass
cleaned = remove_non_printable_characters(cleaned, remove_newline=remove_newline)
return cleaned
def clean_data(self, data: dict) -> dict:
"""Clean / sanitize data.
This uses mozillas bleach under the hood to disable certain html tags by
encoding them - this leads to script tags etc. to not work.
The results can be longer then the input; might make some character combinations
`ugly`. Prevents XSS on the server-level.
Args:
data (dict): Data that should be sanatized.
Returns:
dict: Provided data sanatized; still in the same order.
"""
clean_data = {}
for k, v in data.items():
if k in self.SAFE_FIELDS:
ret = v
elif isinstance(v, str):
ret = self.clean_string(k, v)
elif isinstance(v, dict):
ret = self.clean_data(v)
else:
ret = v
clean_data[k] = ret
return clean_data
class ListAPI(generics.ListAPIView):
"""View for list API."""
class ListCreateAPI(CleanMixin, generics.ListCreateAPIView):
"""View for list and create API."""
class CreateAPI(CleanMixin, generics.CreateAPIView):
"""View for create API."""
class RetrieveAPI(generics.RetrieveAPIView):
"""View for retrieve API."""
pass
class RetrieveUpdateAPI(CleanMixin, generics.RetrieveUpdateAPIView):
"""View for retrieve and update API."""
pass
class CustomDestroyModelMixin:
"""This mixin was created pass the kwargs from the API to the models."""
def destroy(self, request, *args, **kwargs):
"""Custom destroy method to pass kwargs."""
instance = self.get_object()
self.METHOD_NAME(instance, **kwargs)
return Response(status=status.HTTP_204_NO_CONTENT)
def METHOD_NAME(self, instance, **kwargs):
"""Custom destroy method to pass kwargs."""
instance.delete(**kwargs)
class CustomRetrieveUpdateDestroyAPIView(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
CustomDestroyModelMixin,
generics.GenericAPIView):
"""This APIView was created pass the kwargs from the API to the models."""
def get(self, request, *args, **kwargs):
"""Custom get method to pass kwargs."""
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
"""Custom put method to pass kwargs."""
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
"""Custom patch method to pass kwargs."""
return self.partial_update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
"""Custom delete method to pass kwargs."""
return self.destroy(request, *args, **kwargs)
class CustomRetrieveUpdateDestroyAPI(CleanMixin, CustomRetrieveUpdateDestroyAPIView):
"""This APIView was created pass the kwargs from the API to the models."""
class RetrieveUpdateDestroyAPI(CleanMixin, generics.RetrieveUpdateDestroyAPIView):
"""View for retrieve, update and destroy API."""
class UpdateAPI(CleanMixin, generics.UpdateAPIView):
"""View for update API.""" |
5,902 | can squeeze another process | """Run groups of experiments, hyperparameter sweeps, etc."""
import argparse
import os
import subprocess
import sys
import time
from os.path import join
from sample_factory.utils.utils import ensure_dir_exists, log
def add_os_parallelism_args(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser.add_argument("--num_gpus", default=1, type=int, help="How many local GPUs to use")
parser.add_argument("--max_parallel", default=4, type=int, help="Maximum simultaneous experiments")
parser.add_argument(
"--experiments_per_gpu",
default=-1,
type=int,
help="How many experiments can we squeeze on a single GPU. "
"Specify this option if and only if you are using launcher to run several experiments using OS-level"
"parallelism (--backend=processes)."
"In any other case use default value (-1) for not altering CUDA_VISIBLE_DEVICES at all."
"This will allow your experiments to use all GPUs available (as many as --num_gpu allows)"
"Helpful when e.g. you are running a single big PBT experiment.",
)
return parser
def run(run_description, args):
experiments = run_description.experiments
max_parallel = args.max_parallel
log.info("Starting processes with base cmds: %r", [e.cmd for e in experiments])
log.info("Max parallel processes is %d", max_parallel)
log.info("Monitor log files using\n\n\ttail -f train_dir/%s/**/**/sf_log.txt\n\n", run_description.run_name)
processes = []
processes_per_gpu = {g: [] for g in range(args.num_gpus)}
experiments = run_description.generate_experiments(args.train_dir)
next_experiment = next(experiments, None)
def find_least_busy_gpu():
least_busy_gpu = None
gpu_available_processes = 0
for gpu_id in range(args.num_gpus):
available_processes = args.experiments_per_gpu - len(processes_per_gpu[gpu_id])
if available_processes > gpu_available_processes:
gpu_available_processes = available_processes
least_busy_gpu = gpu_id
return least_busy_gpu, gpu_available_processes
def METHOD_NAME():
if len(processes) >= max_parallel:
return False
if args.experiments_per_gpu > 0:
least_busy_gpu, gpu_available_processes = find_least_busy_gpu()
if gpu_available_processes <= 0:
return False
return True
failed_processes = []
last_log_time = 0
log_interval = 3 # seconds
while len(processes) > 0 or next_experiment is not None:
while METHOD_NAME() and next_experiment is not None:
cmd, name, root_dir, exp_env_vars = next_experiment
cmd_tokens = cmd.split(" ")
# workaround to make sure we're running the correct python executable from our virtual env
if cmd_tokens[0].startswith("python"):
cmd_tokens[0] = sys.executable
log.debug("Using Python executable %s", cmd_tokens[0])
ensure_dir_exists(join(args.train_dir, root_dir))
envvars = os.environ.copy()
best_gpu = None
if args.experiments_per_gpu > 0:
best_gpu, best_gpu_available_processes = find_least_busy_gpu()
log.info(
"The least busy gpu is %d where we can run %d more processes",
best_gpu,
best_gpu_available_processes,
)
envvars["CUDA_VISIBLE_DEVICES"] = f"{best_gpu}"
log.info("Starting process %r", cmd_tokens)
if exp_env_vars is not None:
for key, value in exp_env_vars.items():
log.info("Adding env variable %r %r", key, value)
envvars[str(key)] = str(value)
process = subprocess.Popen(cmd_tokens, stdout=None, stderr=None, env=envvars)
process.gpu_id = best_gpu
process.proc_cmd = cmd
processes.append(process)
if process.gpu_id is not None:
processes_per_gpu[process.gpu_id].append(process.proc_cmd)
log.info("Started process %s on GPU %r", process.proc_cmd, process.gpu_id)
log.info("Waiting for %d seconds before starting next process", args.pause_between)
time.sleep(args.pause_between)
next_experiment = next(experiments, None)
remaining_processes = []
for process in processes:
if process.poll() is None:
remaining_processes.append(process)
continue
else:
if process.gpu_id is not None:
processes_per_gpu[process.gpu_id].remove(process.proc_cmd)
log.info("Process %r finished with code %r", process.proc_cmd, process.returncode)
if process.returncode != 0:
failed_processes.append((process.proc_cmd, process.pid, process.returncode))
log.error("WARNING: RETURN CODE IS %r", process.returncode)
processes = remaining_processes
if time.time() - last_log_time > log_interval:
if failed_processes:
log.error("Failed processes: %s", ", ".join([f"PID: {p[1]} code: {p[2]}" for p in failed_processes]))
last_log_time = time.time()
time.sleep(0.1)
log.info("Done!")
return 0 |
5,903 | set jemalloc version | #!/usr/bin/python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
import subprocess as sp
DEFAULT_SEASTAR_PORT="3333"
JEMALLOC_244 = "libjemalloc.so.2.4.4"
JEMALLOC_251 = "libjemalloc.so.2.5.1"
def gen_cluster_info(workspace):
tf_config_json = os.environ.get("TF_CONFIG", "{}")
print("TF_CONFIG=", tf_config_json)
tf_config = json.loads(tf_config_json)
cluster = tf_config.get("cluster", {})
if cluster is None:
print("TF_CONFIG cluster is empty")
return
ps_hosts = []
worker_hosts = []
chief_hosts = []
node_list = []
for key, value in cluster.items():
if "ps" == key:
ps_hosts = value
elif "worker" == key:
worker_hosts = value
elif "chief" == key:
chief_hosts = value
node_list.extend(value)
os.environ['TF_SEASTAR_ENDPOINT_MAP_PATH'] = '/tmp/'
print("Start to gen endpoint_map file.")
#endpoint_map_path = os.path.join(workspace, ".endpoint_map")
endpoint_map_path = "/tmp/.endpoint_map"
with open(endpoint_map_path, 'w') as fout:
for node in node_list:
host = node[0:node.index(':')]
fout.write(node + "=" + host + ":" + DEFAULT_SEASTAR_PORT + "\n")
os.system("ls -ltr /tmp/.endpoint_map")
task = tf_config.get("task", {})
if task is None:
print("TF_CONFIG task is empty")
return
task_index = task['index']
job_name = task['type']
return ps_hosts, worker_hosts, chief_hosts, job_name, task_index
def copy_python_binary(local_dir):
cmd_str = "cp /usr/bin/python " + os.path.join(local_dir, "python_bin")
return sp.call(cmd_str, shell=True)
def METHOD_NAME(workspace):
strategy = os.environ.get("MEM_USAGE_STRATEGY", "")
cmd_str = ""
if "xmin" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";"
cmd_str += "export MALLOC_CONF=decay_time:0;"
elif "xmid" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";"
elif "min" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=dirty_decay_ms:0,muzzy_decay_ms:0;"
elif "mid" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,dirty_decay_ms:10000,muzzy_decay_ms:10000;"
elif "max" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:240000,muzzy_decay_ms:240000;"
elif "244" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";"
elif "251" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:60000,muzzy_decay_ms:60000;"
elif "close" == strategy:
pass
else:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:240000,muzzy_decay_ms:240000;"
return cmd_str
def pip_install_requirements(workspace):
requirements_path = os.path.join(workspace, "requirements.txt")
if not os.path.exists(requirements_path):
return 0
cmd_str = "$(which pip) install -r " + requirements_path
print("try to install requirements.txt from " + requirements_path)
return sp.call(cmd_str, shell=True)
def run_tensorflow_job(workspace, tf_script, tf_args, tf_envs, set_jemalloc_version_cmd):
cmd_str = "cd " + workspace + ";"
if set_jemalloc_version_cmd:
cmd_str += set_jemalloc_version_cmd
cmd_str += "LD_PRELOAD=${JEMALLOC_VERSION} "
cmd_str += " ".join(tf_envs) + " $(which python) -u "
cmd_str += tf_script + " " + " ".join(tf_args)
print("run tensorflow command:", cmd_str)
return sp.call(cmd_str, shell=True)
def set_mkl_envs(job_name):
envs = []
if "ps" == job_name:
envs.append("OMP_NUM_THREADS=1")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
elif "worker" == job_name:
envs.append("OMP_NUM_THREADS=6")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
elif "evaluator" == job_name or "chief" == job_name:
envs.append("OMP_NUM_THREADS=1")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
else:
envs.append("OMP_NUM_THREADS=1")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
return envs
def set_network_threads(job_name):
envs = []
if "ps" == job_name:
envs.append("WORKER_DEFAULT_CORE_NUM=24")
elif "worker" == job_name:
envs.append("PS_DEFAULT_CORE_NUM=24")
return envs
if __name__ == "__main__":
print("start launching tensorflow job")
if "TF_WORKSPACE" not in os.environ:
print("TF_WORKSPACE env should be set.")
exit(1)
workspace = os.environ.get("TF_WORKSPACE", "")
if "TF_SCRIPT" not in os.environ:
print("TF_SCRIPT env should be set.")
exit(1)
tf_script = os.environ.get("TF_SCRIPT", "")
if "JEMALLOC_PATH" not in os.environ:
jemalloc_path = workspace
else:
jemalloc_path = os.environ.get("JEMALLOC_PATH", "")
#ret_code = copy_python_binary(workspace)
#if (ret_code != 0):
# exit(ret_code)
tf_args = sys.argv[1:]
tf_envs = []
#tf_envs.append("TF_SEASTAR_ENDPOINT_MAP_PATH=/tmp/")
if "TF_CONFIG" in os.environ:
ps_hosts, worker_hosts, chief_hosts, job_name, task_index = gen_cluster_info(workspace)
os.environ["TASK_INDEX"] = str(task_index)
os.environ["JOB_NAME"] = str(job_name)
#tf_envs.extend(set_mkl_envs(job_name))
set_jemalloc_version_cmd = METHOD_NAME(jemalloc_path)
ret_code = pip_install_requirements(workspace)
if (ret_code != 0):
exit(ret_code)
ret_code = run_tensorflow_job(workspace, tf_script, tf_args, tf_envs, set_jemalloc_version_cmd)
if (ret_code != 0):
exit(ret_code) |
5,904 | test no weight | # Copyright (c) 2017 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyNN.spiNNaker as sim
from spinnaker_testbase import BaseTestCase
WEIGHT = 5
DELAY = 2
class TestFromListConnector(BaseTestCase):
# NO unittest_setup() as sim.setup is called
def check_weights(
self, projection, aslist, w_index, d_index, sources, destinations):
from_pro = projection.get(["weight", "delay"], "list")
aslist.sort()
as_index = 0
for (source, dest, weight, delay) in from_pro:
from_as = aslist[as_index]
while from_as[0] >= sources:
as_index += 1
from_as = aslist[as_index]
while from_as[1] >= destinations:
as_index += 1
from_as = aslist[as_index]
self.assertEqual(from_as[0], source)
self.assertEqual(from_as[1], dest)
if w_index:
self.assertAlmostEqual(from_as[w_index], weight, 4)
else:
self.assertEqual(WEIGHT, weight)
if d_index:
self.assertAlmostEqual(from_as[d_index], delay, 4)
else:
self.assertEqual(DELAY, delay)
as_index += 1
while as_index < len(aslist):
from_as = aslist[as_index]
assert from_as[0] >= sources or from_as[1] >= destinations
as_index += 1
def check_other_connect(
self, aslist, column_names=None, w_index=2, d_index=3, sources=6,
destinations=8):
sim.setup(1.0)
pop1 = sim.Population(sources, sim.IF_curr_exp(), label="pop1")
pop2 = sim.Population(destinations, sim.IF_curr_exp(), label="pop2")
synapse_type = sim.StaticSynapse(weight=WEIGHT, delay=DELAY)
projection = sim.Projection(
pop1, pop2, sim.FromListConnector(
aslist, column_names=column_names),
synapse_type=synapse_type)
sim.run(0)
self.check_weights(
projection, aslist, w_index, d_index, sources, destinations)
sim.end()
def test_simple(self):
as_list = [
(0, 0, 0.1, 10),
(3, 0, 0.2, 11),
(2, 3, 0.3, 12),
(5, 1, 0.4, 13),
(0, 1, 0.5, 14),
]
self.check_other_connect(as_list)
def test_list_too_big(self):
as_list = [
(0, 0, 0.1, 10),
(13, 0, 0.2, 11),
(2, 13, 0.3, 12),
(5, 1, 0.4, 13),
(0, 1, 0.5, 14),
]
self.check_other_connect(as_list)
def test_no_delays(self):
as_list = [
(0, 0, 0.1),
(3, 0, 0.2),
(2, 3, 0.3),
(5, 1, 0.4),
(0, 1, 0.5),
]
self.check_other_connect(
as_list, column_names=["weight"], d_index=None)
def METHOD_NAME(self):
as_list = [
(0, 0, 10),
(3, 0, 11),
(2, 3, 12),
(5, 1, 13),
(0, 1, 14),
]
self.check_other_connect(
as_list, column_names=["delay"], d_index=2, w_index=None)
def test_invert(self):
as_list = [
(0, 0, 10, 0.1),
(3, 0, 11, 0.2),
(2, 3, 12, 0.3),
(5, 1, 13, 0.4),
(0, 1, 14, 0.5),
]
self.check_other_connect(
as_list, column_names=["delay", "weight"], w_index=3, d_index=2)
def test_big(self):
sources = 200
destinations = 300
aslist = []
for s in range(sources):
for d in range(destinations):
aslist.append((s, d, 5, 2))
self.check_other_connect(
aslist, column_names=None, w_index=2, d_index=3, sources=sources,
destinations=destinations)
def test_get_before_run(self):
sim.setup(1.0)
pop1 = sim.Population(3, sim.IF_curr_exp(), label="pop1")
pop2 = sim.Population(3, sim.IF_curr_exp(), label="pop2")
synapse_type = sim.StaticSynapse(weight=5, delay=1)
projection = sim.Projection(
pop1, pop2, sim.FromListConnector([[0, 0]]),
synapse_type=synapse_type)
weights = projection.get(["weight"], "list")
sim.run(0)
self.assertEqual(1, len(weights))
sim.end()
def test_using_static_synapse_singles(self):
sim.setup(timestep=1.0)
input = sim.Population(2, sim.SpikeSourceArray([0]), label="input")
pop = sim.Population(2, sim.IF_curr_exp(), label="pop")
as_list = [(0, 0), (1, 1)]
conn = sim.Projection(input, pop, sim.FromListConnector(as_list),
sim.StaticSynapse(weight=0.7, delay=3))
sim.run(1)
weights = conn.get(['weight', 'delay'], 'list')
sim.end()
target = [(0, 0, 0.7, 3), (1, 1, 0.7, 3)]
for i in range(2):
for j in range(2):
self.assertAlmostEqual(weights[i][j], target[i][j], places=3)
def test_using_half_static_synapse_singles(self):
sim.setup(timestep=1.0)
input = sim.Population(2, sim.SpikeSourceArray([0]), label="input")
pop = sim.Population(2, sim.IF_curr_exp(), label="pop")
as_list = [(0, 0, 0.7), (1, 1, 0.3)]
conn = sim.Projection(input, pop, sim.FromListConnector(
as_list, column_names=["weight"]),
sim.StaticSynapse(weight=0.6, delay=3))
sim.run(1)
weights = conn.get(['weight', 'delay'], 'list')
sim.end()
target = [(0, 0, 0.7, 3), (1, 1, 0.3, 3)]
for i in range(2):
for j in range(2):
self.assertAlmostEqual(weights[i][j], target[i][j], places=3)
def test_using_static_synapse_doubles(self):
sim.setup(timestep=1.0)
input = sim.Population(2, sim.SpikeSourceArray([0]), label="input")
pop = sim.Population(2, sim.IF_curr_exp(), label="pop")
as_list = [(0, 0), (1, 1)]
conn = sim.Projection(input, pop, sim.FromListConnector(as_list),
sim.StaticSynapse(weight=[0.7, 0.3],
delay=[3, 33]))
sim.run(1)
weights = conn.get(['weight', 'delay'], 'list')
target = [(0, 0, 0.7, 3), (1, 1, 0.3, 33)]
for i in range(2):
for j in range(2):
self.assertAlmostEqual(weights[i][j], target[i][j], places=3)
sim.end() |
5,905 | test is remote control supported | """Unit tests for pyatv.protocols.airplay.features."""
import pytest
from pyatv.auth.hap_pairing import (
NO_CREDENTIALS,
TRANSIENT_CREDENTIALS,
parse_credentials,
)
from pyatv.const import PairingRequirement, Protocol
from pyatv.core import MutableService
from pyatv.protocols.airplay.utils import (
AirPlayFlags,
AirPlayMajorVersion,
get_pairing_requirement,
get_protocol_version,
is_password_required,
is_remote_control_supported,
parse_features,
)
# These are not really valid credentials but parse_credentials accepts them (for now)
HAP_CREDS = parse_credentials("aa:bb:cc:dd")
LEGACY_CREDS = parse_credentials(":aa::bb")
@pytest.mark.parametrize(
"flags,output",
[
# Single feature flag
("0x00000001", AirPlayFlags.SupportsAirPlayVideoV1),
(
"0x40000003",
AirPlayFlags.HasUnifiedAdvertiserInfo
| AirPlayFlags.SupportsAirPlayPhoto
| AirPlayFlags.SupportsAirPlayVideoV1,
),
# Dual feature flag
(
"0x00000003,0x00000001",
AirPlayFlags.IsCarPlay
| AirPlayFlags.SupportsAirPlayPhoto
| AirPlayFlags.SupportsAirPlayVideoV1,
),
],
)
def test_parse_features(flags, output):
assert parse_features(flags) == output
@pytest.mark.parametrize(
"value",
["foo", "1234", "0x00000001,", ",0x00000001", "0x00000001,0x00000001,0x00000001"],
)
def test_bad_input(value):
with pytest.raises(ValueError):
parse_features(value)
@pytest.mark.parametrize(
"properties,requires_password",
[
({}, False),
({"pw": "false"}, False),
({"pw": "true"}, True),
({"pw": "TRUE"}, True),
({"sf": "0x1"}, False),
({"sf": "0x80"}, True),
({"flags": "0x1"}, False),
({"flags": "0x80"}, True),
],
)
def test_is_password_required(properties, requires_password):
service = MutableService("id", Protocol.RAOP, 0, properties)
assert is_password_required(service) == requires_password
@pytest.mark.asyncio
@pytest.mark.parametrize(
"props,expected_req",
[
({"sf": "0x1"}, PairingRequirement.NotNeeded),
({"sf": "0x200"}, PairingRequirement.Mandatory),
({"ft": "0x1"}, PairingRequirement.NotNeeded),
({"flags": "0x1"}, PairingRequirement.NotNeeded),
({"flags": "0x200"}, PairingRequirement.Mandatory),
({"features": "0x1"}, PairingRequirement.NotNeeded),
({"sf": "0x8"}, PairingRequirement.Mandatory),
({"flags": "0x8"}, PairingRequirement.Mandatory),
({"flags": "0x0"}, PairingRequirement.NotNeeded),
# Corresponds to only allow "Current User", which is not
# supported by pyatv right now
({"act": "2"}, PairingRequirement.Unsupported),
],
)
async def test_get_pairing_requirement(props, expected_req):
service = MutableService("id", Protocol.AirPlay, 0, props)
assert get_pairing_requirement(service) == expected_req
@pytest.mark.parametrize(
"props,credentials,expected_supported",
[
({}, NO_CREDENTIALS, False),
({"model": "AudioAccessory1,2"}, NO_CREDENTIALS, False),
({"model": "AudioAccessory1,2"}, TRANSIENT_CREDENTIALS, True),
({"model": "Foo"}, NO_CREDENTIALS, False),
({"osvers": "13.0"}, NO_CREDENTIALS, False),
({"osvers": "13.0", "model": "AppleTV5,6"}, NO_CREDENTIALS, False),
({"osvers": "13.0", "model": "AppleTV5,6"}, TRANSIENT_CREDENTIALS, False),
({"osvers": "13.0", "model": "AppleTV5,6"}, LEGACY_CREDS, False),
({"osvers": "13.0", "model": "AppleTV5,6"}, HAP_CREDS, True),
({"osvers": "8.4.4", "model": "AppleTV5,6"}, NO_CREDENTIALS, False),
],
)
def METHOD_NAME(props, credentials, expected_supported):
service = MutableService("id", Protocol.AirPlay, 0, props)
assert is_remote_control_supported(service, credentials) == expected_supported
@pytest.mark.parametrize(
"props, expected_version",
[
# Fallback
({}, AirPlayMajorVersion.AirPlayV1),
# Used by RAOP
({"ft": "0x5A7FFFF7,0xE"}, AirPlayMajorVersion.AirPlayV1), # Apple TV 3
(
{"ft": "0x4A7FCA00,0xBC354BD0"},
AirPlayMajorVersion.AirPlayV2,
), # HomePod Mini
# Used by AirPlay
({"features": "0x5A7FFFF7,0xE"}, AirPlayMajorVersion.AirPlayV1), # Apple TV 3
(
{"features": "0x4A7FCA00,0xBC354BD0"},
AirPlayMajorVersion.AirPlayV2,
), # HomePod Mini
],
)
def test_get_protocol_version(props, expected_version):
service = MutableService("id", Protocol.AirPlay, 0, props)
assert get_protocol_version(service) == expected_version |
5,906 | get specific | from django.contrib.contenttypes.models import ContentType
from django.db.models import DEFERRED
from django.utils.functional import cached_property
class SpecificMixin:
"""
Mixin for models that support multi-table inheritance and provide a
``content_type`` field pointing to the specific model class, to provide
methods and properties for retrieving the specific instance of the model.
"""
def METHOD_NAME(self, deferred=False, copy_attrs=None, copy_attrs_exclude=None):
"""
Return this object in its most specific subclassed form.
By default, a database query is made to fetch all field values for the
specific object. If you only require access to custom methods or other
non-field attributes on the specific object, you can use
``deferred=True`` to avoid this query. However, any attempts to access
specific field values from the returned object will trigger additional
database queries.
By default, references to all non-field attribute values are copied
from current object to the returned one. This includes:
* Values set by a queryset, for example: annotations, or values set as
a result of using ``select_related()`` or ``prefetch_related()``.
* Any ``cached_property`` values that have been evaluated.
* Attributes set elsewhere in Python code.
For fine-grained control over which non-field values are copied to the
returned object, you can use ``copy_attrs`` to specify a complete list
of attribute names to include. Alternatively, you can use
``copy_attrs_exclude`` to specify a list of attribute names to exclude.
If called on an object that is already an instance of the most specific
class, the object will be returned as is, and no database queries or
other operations will be triggered.
If the object was originally created using a model that has since
been removed from the codebase, an instance of the base class will be
returned (without any custom field values or other functionality
present on the original class). Usually, deleting these objects is the
best course of action, but there is currently no safe way for Wagtail
to do that at migration time.
"""
model_class = self.specific_class
if model_class is None:
# The codebase and database are out of sync (e.g. the model exists
# on a different git branch and migrations were not applied or
# reverted before switching branches). So, the best we can do is
# return the page in it's current form.
return self
if isinstance(self, model_class):
# self is already an instance of the most specific class.
return self
if deferred:
# Generate a tuple of values in the order expected by __init__(),
# with missing values substituted with DEFERRED ()
values = tuple(
getattr(self, f.attname, self.pk if f.primary_key else DEFERRED)
for f in model_class._meta.concrete_fields
)
# Create object from known attribute values
specific_obj = model_class(*values)
specific_obj._state.adding = self._state.adding
else:
# Fetch object from database
specific_obj = model_class._default_manager.get(id=self.id)
# Copy non-field attribute values
if copy_attrs is not None:
for attr in (attr for attr in copy_attrs if attr in self.__dict__):
setattr(specific_obj, attr, getattr(self, attr))
else:
exclude = copy_attrs_exclude or ()
for k, v in ((k, v) for k, v in self.__dict__.items() if k not in exclude):
# only set values that haven't already been set
specific_obj.__dict__.setdefault(k, v)
return specific_obj
@cached_property
def specific(self):
"""
Returns this object in its most specific subclassed form with all field
values fetched from the database. The result is cached in memory.
"""
return self.METHOD_NAME()
@cached_property
def specific_deferred(self):
"""
Returns this object in its most specific subclassed form without any
additional field values being fetched from the database. The result
is cached in memory.
"""
return self.METHOD_NAME(deferred=True)
@cached_property
def specific_class(self):
"""
Return the class that this object would be if instantiated in its
most specific form.
If the model class can no longer be found in the codebase, and the
relevant ``ContentType`` has been removed by a database migration,
the return value will be ``None``.
If the model class can no longer be found in the codebase, but the
relevant ``ContentType`` is still present in the database (usually a
result of switching between git branches without running or reverting
database migrations beforehand), the return value will be ``None``.
"""
return self.cached_content_type.model_class()
@property
def cached_content_type(self):
"""
Return this object's ``content_type`` value from the ``ContentType``
model's cached manager, which will avoid a database query if the
content type is already in memory.
"""
return ContentType.objects.get_for_id(self.content_type_id) |
5,907 | visit | """Generic visitor pattern implementation for Python objects."""
import enum
class Visitor(object):
defaultStop = False
@classmethod
def _register(celf, clazzes_attrs):
assert celf != Visitor, "Subclass Visitor instead."
if "_visitors" not in celf.__dict__:
celf._visitors = {}
def wrapper(method):
assert method.__name__ == "visit"
for clazzes, attrs in clazzes_attrs:
if type(clazzes) != tuple:
clazzes = (clazzes,)
if type(attrs) == str:
attrs = (attrs,)
for clazz in clazzes:
_visitors = celf._visitors.setdefault(clazz, {})
for attr in attrs:
assert attr not in _visitors, (
"Oops, class '%s' has visitor function for '%s' defined already."
% (clazz.__name__, attr)
)
_visitors[attr] = method
return None
return wrapper
@classmethod
def register(celf, clazzes):
if type(clazzes) != tuple:
clazzes = (clazzes,)
return celf._register([(clazzes, (None,))])
@classmethod
def register_attr(celf, clazzes, attrs):
clazzes_attrs = []
if type(clazzes) != tuple:
clazzes = (clazzes,)
if type(attrs) == str:
attrs = (attrs,)
for clazz in clazzes:
clazzes_attrs.append((clazz, attrs))
return celf._register(clazzes_attrs)
@classmethod
def register_attrs(celf, clazzes_attrs):
return celf._register(clazzes_attrs)
@classmethod
def _visitorsFor(celf, thing, _default={}):
typ = type(thing)
for celf in celf.mro():
_visitors = getattr(celf, "_visitors", None)
if _visitors is None:
break
m = celf._visitors.get(typ, None)
if m is not None:
return m
return _default
def visitObject(self, obj, *args, **kwargs):
"""Called to visit an object. This function loops over all non-private
attributes of the objects and calls any user-registered (via
@register_attr() or @register_attrs()) visit() functions.
If there is no user-registered visit function, of if there is and it
returns True, or it returns None (or doesn't return anything) and
visitor.defaultStop is False (default), then the visitor will proceed
to call self.visitAttr()"""
keys = sorted(vars(obj).keys())
_visitors = self._visitorsFor(obj)
defaultVisitor = _visitors.get("*", None)
for key in keys:
if key[0] == "_":
continue
value = getattr(obj, key)
visitorFunc = _visitors.get(key, defaultVisitor)
if visitorFunc is not None:
ret = visitorFunc(self, obj, key, value, *args, **kwargs)
if ret == False or (ret is None and self.defaultStop):
continue
self.visitAttr(obj, key, value, *args, **kwargs)
def visitAttr(self, obj, attr, value, *args, **kwargs):
"""Called to visit an attribute of an object."""
self.METHOD_NAME(value, *args, **kwargs)
def visitList(self, obj, *args, **kwargs):
"""Called to visit any value that is a list."""
for value in obj:
self.METHOD_NAME(value, *args, **kwargs)
def visitDict(self, obj, *args, **kwargs):
"""Called to visit any value that is a dictionary."""
for value in obj.values():
self.METHOD_NAME(value, *args, **kwargs)
def visitLeaf(self, obj, *args, **kwargs):
"""Called to visit any value that is not an object, list,
or dictionary."""
pass
def METHOD_NAME(self, obj, *args, **kwargs):
"""This is the main entry to the visitor. The visitor will visit object
obj.
The visitor will first determine if there is a registered (via
@register()) visit function for the type of object. If there is, it
will be called, and (visitor, obj, *args, **kwargs) will be passed to
the user visit function.
If there is no user-registered visit function, of if there is and it
returns True, or it returns None (or doesn't return anything) and
visitor.defaultStop is False (default), then the visitor will proceed
to dispatch to one of self.visitObject(), self.visitList(),
self.visitDict(), or self.visitLeaf() (any of which can be overriden in
a subclass)."""
visitorFunc = self._visitorsFor(obj).get(None, None)
if visitorFunc is not None:
ret = visitorFunc(self, obj, *args, **kwargs)
if ret == False or (ret is None and self.defaultStop):
return
if hasattr(obj, "__dict__") and not isinstance(obj, enum.Enum):
self.visitObject(obj, *args, **kwargs)
elif isinstance(obj, list):
self.visitList(obj, *args, **kwargs)
elif isinstance(obj, dict):
self.visitDict(obj, *args, **kwargs)
else:
self.visitLeaf(obj, *args, **kwargs) |
5,908 | test on ready override | import os
import unittest
import tempfile
import time
from mock import Mock, PropertyMock, patch
from patroni.dcs.raft import Cluster, DynMemberSyncObj, KVStoreTTL, \
Raft, RaftError, SyncObjUtility, TCPTransport, _TCPTransport
from pysyncobj import SyncObjConf, FAIL_REASON
def remove_files(prefix):
for f in ('journal', 'journal.meta', 'dump'):
f = prefix + f
if os.path.isfile(f):
for i in range(0, 15):
try:
if os.path.isfile(f):
os.unlink(f)
break
else:
break
except Exception:
time.sleep(1.0)
class TestTCPTransport(unittest.TestCase):
@patch.object(TCPTransport, '__init__', Mock())
@patch.object(TCPTransport, 'setOnUtilityMessageCallback', Mock())
@patch.object(TCPTransport, '_connectIfNecessarySingle', Mock(side_effect=Exception))
def test__connectIfNecessarySingle(self):
t = _TCPTransport(Mock(), None, [])
self.assertFalse(t._connectIfNecessarySingle(None))
@patch('pysyncobj.tcp_server.TcpServer.bind', Mock())
class TestDynMemberSyncObj(unittest.TestCase):
@patch('pysyncobj.tcp_server.TcpServer.bind', Mock())
def setUp(self):
self.conf = SyncObjConf(appendEntriesUseBatch=False, dynamicMembershipChange=True, autoTick=False)
self.so = DynMemberSyncObj('127.0.0.1:1234', ['127.0.0.1:1235'], self.conf)
@patch.object(SyncObjUtility, 'executeCommand')
def test_add_member(self, mock_execute_command):
mock_execute_command.return_value = [{'addr': '127.0.0.1:1235'}, {'addr': '127.0.0.1:1236'}]
mock_execute_command.ver = 0
DynMemberSyncObj('127.0.0.1:1234', ['127.0.0.1:1235'], self.conf)
self.conf.dynamicMembershipChange = False
DynMemberSyncObj('127.0.0.1:1234', ['127.0.0.1:1235'], self.conf)
def test_getMembers(self):
mock_conn = Mock()
self.so._SyncObj__transport._onIncomingMessageReceived(mock_conn, ['members'])
def test__SyncObj__doChangeCluster(self):
self.so._SyncObj__doChangeCluster(['add', '127.0.0.1:1236'])
@patch.object(SyncObjConf, 'fullDumpFile', PropertyMock(return_value=None), create=True)
@patch.object(SyncObjConf, 'journalFile', PropertyMock(return_value=None), create=True)
class TestKVStoreTTL(unittest.TestCase):
@patch.object(SyncObjConf, 'fullDumpFile', PropertyMock(return_value=None), create=True)
@patch.object(SyncObjConf, 'journalFile', PropertyMock(return_value=None), create=True)
def setUp(self):
callback = Mock()
callback.replicated = False
self.so = KVStoreTTL(None, callback, callback, self_addr='127.0.0.1:1234')
self.so.startAutoTick()
self.so.set_retry_timeout(10)
def tearDown(self):
if self.so:
self.so.destroy()
def test_set(self):
self.assertTrue(self.so.set('foo', 'bar', prevExist=False, ttl=30))
self.assertFalse(self.so.set('foo', 'bar', prevExist=False, ttl=30))
self.assertFalse(self.so.retry(self.so._set, 'foo', {'value': 'buz', 'created': 1, 'updated': 1}, prevValue=''))
self.assertTrue(self.so.retry(self.so._set, 'foo', {'value': 'buz', 'created': 1, 'updated': 1}))
with patch.object(KVStoreTTL, 'retry', Mock(side_effect=RaftError(''))):
self.assertFalse(self.so.set('foo', 'bar'))
self.assertRaises(RaftError, self.so.set, 'foo', 'bar', handle_raft_error=False)
def test_delete(self):
self.so.autoTickPeriod = 0.2
self.so.set('foo', 'bar')
self.so.set('fooo', 'bar')
self.assertFalse(self.so.delete('foo', prevValue='buz'))
self.assertTrue(self.so.delete('foo', recursive=True))
self.assertFalse(self.so.retry(self.so._delete, 'foo', prevValue=''))
with patch.object(KVStoreTTL, 'retry', Mock(side_effect=RaftError(''))):
self.assertFalse(self.so.delete('foo'))
def test_expire(self):
self.so.set('foo', 'bar', ttl=0.001)
time.sleep(1)
self.assertIsNone(self.so.get('foo'))
self.assertEqual(self.so.get('foo', recursive=True), {})
@patch('time.sleep', Mock())
def test_retry(self):
return_values = [FAIL_REASON.QUEUE_FULL] * 2 + [FAIL_REASON.SUCCESS, FAIL_REASON.REQUEST_DENIED]
def test(callback):
callback(True, return_values.pop(0))
with patch('time.time', Mock(side_effect=[1, 100])):
self.assertRaises(RaftError, self.so.retry, test)
self.assertTrue(self.so.retry(test))
self.assertFalse(self.so.retry(test))
def METHOD_NAME(self):
self.assertTrue(self.so.set('foo', 'bar'))
self.so.destroy()
self.so = None
so = KVStoreTTL(Mock(), None, None, self_addr='127.0.0.1:1234',
partner_addrs=['127.0.0.1:1235'], patronictl=True)
so.doTick(0)
so.destroy()
class TestRaft(unittest.TestCase):
_TMP = tempfile.gettempdir()
def test_raft(self):
raft = Raft({'ttl': 30, 'scope': 'test', 'name': 'pg', 'self_addr': '127.0.0.1:1234',
'retry_timeout': 10, 'data_dir': self._TMP,
'database': 'citus', 'group': 0})
raft.reload_config({'retry_timeout': 20, 'ttl': 60, 'loop_wait': 10})
self.assertTrue(raft._sync_obj.set(raft.members_path + 'legacy', '{"version":"2.0.0"}'))
self.assertTrue(raft.touch_member(''))
self.assertTrue(raft.initialize())
self.assertTrue(raft.cancel_initialization())
self.assertTrue(raft.set_config_value('{}'))
self.assertTrue(raft.write_sync_state('foo', 'bar'))
self.assertFalse(raft.write_sync_state('foo', 'bar', 1))
raft._citus_group = '1'
self.assertTrue(raft.manual_failover('foo', 'bar'))
raft._citus_group = '0'
self.assertTrue(raft.take_leader())
cluster = raft.get_cluster()
self.assertIsInstance(cluster, Cluster)
self.assertIsInstance(cluster.workers[1], Cluster)
leader = cluster.leader
self.assertTrue(raft.delete_leader(leader))
self.assertTrue(raft._sync_obj.set(raft.status_path, '{"optime":1234567,"slots":{"ls":12345}}'))
raft.get_cluster()
self.assertTrue(raft.update_leader(leader, '1', failsafe={'foo': 'bat'}))
self.assertTrue(raft._sync_obj.set(raft.failsafe_path, '{"foo"}'))
self.assertTrue(raft._sync_obj.set(raft.status_path, '{'))
raft.get_citus_coordinator()
self.assertTrue(raft.delete_sync_state())
self.assertTrue(raft.set_history_value(''))
self.assertTrue(raft.delete_cluster())
raft._citus_group = '1'
self.assertTrue(raft.delete_cluster())
raft._citus_group = None
raft.get_cluster()
raft.watch(None, 0.001)
raft._sync_obj.destroy()
def tearDown(self):
remove_files(os.path.join(self._TMP, '127.0.0.1:1234.'))
def setUp(self):
self.tearDown()
@patch('patroni.dcs.raft.KVStoreTTL')
@patch('threading.Event')
def test_init(self, mock_event, mock_kvstore):
mock_kvstore.return_value.applied_local_log = False
mock_event.return_value.is_set.side_effect = [False, True]
self.assertIsNotNone(Raft({'ttl': 30, 'scope': 'test', 'name': 'pg', 'patronictl': True,
'self_addr': '1', 'data_dir': self._TMP})) |
5,909 | simple encrypt | import functools
import secrets
from base64 import urlsafe_b64decode, urlsafe_b64encode
from typing import Optional
import nacl.pwhash
from nacl.bindings import crypto_aead
from nacl.bindings.crypto_generichash import generichash_blake2b_salt_personal
from nacl.bindings.utils import sodium_memcmp
from nacl.exceptions import InvalidkeyError
from nacl.utils import random as random_bytes
from couchers.config import config
def b64encode(data: bytes) -> str:
return urlsafe_b64encode(data).decode("ascii")
def b64decode(data: str) -> bytes:
return urlsafe_b64decode(data)
def urlsafe_random_bytes(length=32) -> str:
return b64encode(random_bytes(length))
def urlsafe_secure_token():
"""
A cryptographically secure random token that can be put in a URL
"""
return urlsafe_random_bytes(32)
def cookiesafe_secure_token():
return random_hex(32)
def hash_password(password: str):
return nacl.pwhash.str(password.encode("utf-8"))
def verify_password(hashed: bytes, password: str):
try:
correct = nacl.pwhash.verify(hashed, password.encode("utf-8"))
return correct
except InvalidkeyError:
return False
def random_hex(length=32):
"""
Length in binary
"""
return random_bytes(length).hex()
def secure_compare(val1, val2):
return sodium_memcmp(val1, val2)
def generate_hash_signature(message: bytes, key: bytes) -> bytes:
"""
Computes a blake2b keyed hash for the message.
This can be used as a fast yet secure symmetric signature: by checking that
the hashes agree, we can make sure the signature was generated by a party
with knowledge of the key.
"""
return generichash_blake2b_salt_personal(message, key=key, digest_size=32)
def verify_hash_signature(message: bytes, key: bytes, sig: bytes) -> bool:
"""
Verifies a hash signature generated with generate_hash_signature.
Returns true if the signature matches, otherwise false.
"""
return secure_compare(sig, generate_hash_signature(message, key))
def generate_random_5digit_string():
"""Return a random 5-digit string"""
return "%05d" % secrets.randbelow(100000)
def verify_token(a: str, b: str):
"""Return True if strings a and b are equal, in such a way as to
reduce the risk of timing attacks.
"""
return secrets.compare_digest(a, b)
@functools.lru_cache
def get_secret(name: str):
"""
Derives a secret key from the root secret using a key derivation function
"""
return generate_hash_signature(name.encode("utf8"), config["SECRET"])
UNSUBSCRIBE_KEY_NAME = "unsubscribe"
PAGE_TOKEN_KEY_NAME = "pagination"
# AEAD: Authenticated Encryption with Associated Data
_aead_key_len = crypto_aead.crypto_aead_xchacha20poly1305_ietf_KEYBYTES
_aead_nonce_len = crypto_aead.crypto_aead_xchacha20poly1305_ietf_NPUBBYTES
def aead_generate_nonce():
return random_bytes(_aead_nonce_len)
def aead_generate_key():
return random_bytes(_aead_key_len)
def aead_encrypt(key: bytes, secret_data: bytes, plaintext_data: bytes = b"", nonce: Optional[bytes] = None) -> bytes:
if not nonce:
nonce = aead_generate_nonce()
encrypted = crypto_aead.crypto_aead_xchacha20poly1305_ietf_encrypt(secret_data, plaintext_data, nonce, key)
return nonce, encrypted
def aead_decrypt(key: bytes, nonce: bytes, encrypted_secret_data: bytes, plaintext_data: bytes = b"") -> bytes:
return crypto_aead.crypto_aead_xchacha20poly1305_ietf_decrypt(encrypted_secret_data, plaintext_data, nonce, key)
def METHOD_NAME(key_name: str, data: bytes) -> bytes:
key = get_secret(key_name)
nonce, data = aead_encrypt(key, data)
return nonce + data
def simple_decrypt(key_name: str, data: bytes) -> bytes:
key = get_secret(key_name)
nonce, data = data[:_aead_nonce_len], data[_aead_nonce_len:]
return aead_decrypt(key, nonce, data)
def encrypt_page_token(plaintext_page_token: str):
return b64encode(METHOD_NAME(PAGE_TOKEN_KEY_NAME, plaintext_page_token.encode("utf8")))
def decrypt_page_token(encrypted_page_token: str):
return simple_decrypt(PAGE_TOKEN_KEY_NAME, b64decode(encrypted_page_token)).decode("utf8") |
5,910 | set up | # Contents in this file are referenced from the sphinx-generated docs.
# "magictoken" is used for markers as beginning and ending of example text.
import unittest
from numba.tests.support import captured_stdout, skip_parfors_unsupported
from numba import set_parallel_chunksize
from numba.tests.support import TestCase
@skip_parfors_unsupported
class ChunksizeExamplesTest(TestCase):
_numba_parallel_test_ = False
def METHOD_NAME(self):
set_parallel_chunksize(0)
def tearDown(self):
set_parallel_chunksize(0)
def test_unbalanced_example(self):
with captured_stdout():
# magictoken.ex_unbalanced.begin
from numba import (njit,
prange,
)
import numpy as np
@njit(parallel=True)
def func1():
n = 100
vals = np.empty(n)
# The work in each iteration of the following prange
# loop is proportional to its index.
for i in prange(n):
cur = i + 1
for j in range(i):
if cur % 2 == 0:
cur //= 2
else:
cur = cur * 3 + 1
vals[i] = cur
return vals
result = func1()
# magictoken.ex_unbalanced.end
self.assertPreciseEqual(result, func1.py_func())
def test_chunksize_manual(self):
with captured_stdout():
# magictoken.ex_chunksize_manual.begin
from numba import (njit,
prange,
set_parallel_chunksize,
get_parallel_chunksize,
)
@njit(parallel=True)
def func1(n):
acc = 0
print(get_parallel_chunksize()) # Will print 4.
for i in prange(n):
print(get_parallel_chunksize()) # Will print 0.
acc += i
print(get_parallel_chunksize()) # Will print 4.
return acc
@njit(parallel=True)
def func2(n):
acc = 0
# This version gets the previous chunksize explicitly.
old_chunksize = get_parallel_chunksize()
set_parallel_chunksize(8)
for i in prange(n):
acc += i
set_parallel_chunksize(old_chunksize)
return acc
# This version saves the previous chunksize as returned
# by set_parallel_chunksize.
old_chunksize = set_parallel_chunksize(4)
result1 = func1(12)
result2 = func2(12)
result3 = func1(12)
set_parallel_chunksize(old_chunksize)
# magictoken.ex_chunksize_manual.end
self.assertPreciseEqual(result1, func1.py_func(12))
self.assertPreciseEqual(result2, func2.py_func(12))
self.assertPreciseEqual(result3, func1.py_func(12))
def test_chunksize_with(self):
with captured_stdout():
# magictoken.ex_chunksize_with.begin
from numba import njit, prange, parallel_chunksize
@njit(parallel=True)
def func1(n):
acc = 0
for i in prange(n):
acc += i
return acc
@njit(parallel=True)
def func2(n):
acc = 0
with parallel_chunksize(8):
for i in prange(n):
acc += i
return acc
with parallel_chunksize(4):
result1 = func1(12)
result2 = func2(12)
result3 = func1(12)
# magictoken.ex_chunksize_with.end
self.assertPreciseEqual(result1, func1.py_func(12))
self.assertPreciseEqual(result2, func2.py_func(12))
self.assertPreciseEqual(result3, func1.py_func(12))
if __name__ == '__main__':
unittest.main() |
5,911 | osm changeset | import io
from typing import Dict, Optional, Tuple
from uuid import UUID
from asyncpg import Connection
from fastapi import APIRouter, Depends, HTTPException, Request
from modules import OsmSax, utils
from modules.dependencies import database
from .tool import oauth
from .tool.session import SessionData, backend, cookie, verifier
router = APIRouter()
@router.post("/editor/save")
async def save(
request: Request,
db: Connection = Depends(database.db),
session_id: UUID = Depends(cookie),
session_data: Optional[SessionData] = Depends(verifier),
) -> None:
if not session_data:
raise HTTPException(status_code=401)
json = await request.json()
if "tag" not in json:
raise HTTPException(status_code=422)
# Changeset tags
tags = json["tag"]
if "comment" not in tags or tags["comment"].strip() == "":
tags["comment"] = "Fixed with Osmose"
if "source" not in tags or tags["source"].strip() == "":
tags["source"] = "Osmose"
if "type" not in tags or tags["type"].strip() == "":
tags["type"] = "fix"
tags["created_by"] = "Osmose Editor"
reuse_changeset = json.get("reuse_changeset", True) is not False
# Get an open changeset
changeset = session_data.changeset
if changeset and not reuse_changeset:
try:
_changeset_close(session_data.oauth_tokens, changeset)
except Exception:
pass
changeset = None
session_data.changeset = None
await backend.update(session_id, session_data)
elif changeset:
try:
_changeset_update(session_data.oauth_tokens, changeset, tags)
except Exception:
changeset = None
session_data.changeset = changeset
await backend.update(session_id, session_data)
if not changeset:
changeset = _changeset_create(session_data.oauth_tokens, tags)
session_data.changeset = changeset
await backend.update(session_id, session_data)
# OsmChange
out = io.StringIO()
o = OsmSax.OsmSaxWriter(out, "UTF-8")
o.startDocument()
o.startElement("osmChange", {"version": "0.6", "generator": "OsmSax"})
methode = {"node": o.NodeCreate, "way": o.WayCreate, "relation": o.RelationCreate}
for action in ("modify", "delete"):
if action in json and len(json[action]) > 0:
o.startElement(action, {})
for e in json[action]:
try:
ee = utils.fetch_osm_elem(e["type"], e["id"])
except Exception:
ee = None
if ee and ee["version"] == int(e["version"]):
ee["changeset"] = changeset
ee["tag"] = e["tags"]
methode[e["type"]](ee)
else:
# FIXME reject
pass
o.endElement(action)
o.endElement("osmChange")
osmchange = out.getvalue()
# Fire the changeset
_changeset_upload(session_data.oauth_tokens, changeset, osmchange)
def METHOD_NAME(tags, id: str = "0") -> str:
out = io.StringIO()
o = OsmSax.OsmSaxWriter(out, "UTF-8")
o.startDocument()
o.startElement("osm", {"version": "0.6", "generator": "Osmose"})
o.startElement("changeset", {"id": id, "open": "false"})
for k, v in tags.items():
o.Element("tag", {"k": k, "v": v})
o.endElement("changeset")
o.endElement("osm")
return out.getvalue()
def _changeset_create(oauth_tokens: Tuple[str, str], tags: Dict[str, str]) -> str:
changeset = oauth.put(
oauth_tokens,
utils.remote_url_write + "api/0.6/changeset/create",
METHOD_NAME(tags),
)
return changeset
def _changeset_update(
oauth_tokens: Tuple[str, str], id: str, tags: Dict[str, str]
) -> None:
oauth.put(
oauth_tokens,
utils.remote_url_write + "api/0.6/changeset/" + id,
METHOD_NAME(tags, id=id),
)
def _changeset_close(oauth_tokens: Tuple[str, str], id: str) -> None:
oauth.put(
oauth_tokens,
utils.remote_url_write + "api/0.6/changeset/" + id + "/close",
)
def _changeset_upload(oauth_tokens: Tuple[str, str], id: str, osmchange) -> None:
oauth.post(
oauth_tokens,
utils.remote_url_write + "api/0.6/changeset/" + id + "/upload",
osmchange,
) |
5,912 | test module data frame mapping | # /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2020-2021, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
import os
import signal
import unittest
import numpy as np
import torch
import torch.nn as nn
from bokeh.models import Range1d
from bokeh.plotting import figure
from aimet_common.utils import AimetLogger, kill_process_with_name_and_port_number, start_bokeh_server_session
from aimet_common import bokeh_plots
from aimet_torch import plotting_utils
from aimet_torch import visualize_model
from aimet_common.bokeh_plots import BokehServerSession
from aimet_common.bokeh_plots import ProgressBar
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Test)
class CNNModel(torch.nn.Module):
def __init__(self):
super(CNNModel, self).__init__()
# input channel, output channels, 5x5 square convolution
self.conv1 = torch.nn.Conv2d(1, 6, 5)
self.conv2 = torch.nn.Conv2d(6, 150, 5)
self.conv3 = torch.nn.Conv2d(1, 60, 5)
self.conv4 = torch.nn.Conv2d(6, 80, 5)
model = CNNModel()
class VisualizeNetwork(unittest.TestCase):
def test_tensor_input(self):
# 2 input channels, 6 output channels, 5*5 square convolution
conv1 = torch.nn.Conv2d(2, 4, 5)
# each column contains all the weights from one output channel
conv1_weights = plotting_utils.get_weights(conv1)
# print("conv1 weights after numpy reshaping", conv1_weights)
# number of output channels, or columns in the reshaped matrix
num_weights_out_channel1 = len(conv1_weights[0])
total_weights_expected = np.prod(list(conv1.weight.shape))
total_weights_actual = np.prod(conv1_weights.shape)
# the length of any row should equal the number of output channels
self.assertEqual(conv1.weight.shape[0], num_weights_out_channel1)
# ensure the number of weights is the same before and after
self.assertEqual(total_weights_expected, total_weights_actual)
def test_progress_bar(self):
process = None
try:
visualization_url, process = start_bokeh_server_session()
bokeh_session = BokehServerSession(url=visualization_url, session_id="test")
progress_bar = ProgressBar(total=10, bokeh_document=bokeh_session, title="testing", color="green")
for i in range(10):
progress_bar.update()
progress_bar.update()
self.assertEqual(progress_bar.calculate_percentage_complete(), 100.0)
bokeh_session.server_session.close("test complete")
finally:
if process:
process.terminate()
process.join()
def test_show_zoomed_in_plot_from_start(self):
layout = bokeh_plots.PlotsLayout()
# create a new plot with a range set with a tuple
p = figure(plot_width=400, plot_height=400, x_range=(0, 20))
# set a range using a Range1d
p.y_range = Range1d(0, 15)
p.circle([1, 2, 3, 4, 5, 25], [2, 5, 8, 2, 7, 50], size=10)
# r = row(p)
layout.layout = p
# layout.add_row(p)
layout.complete_layout()
def test_invoke_progress_bar(self):
process = None
try:
visualization_url, process = start_bokeh_server_session()
bokeh_session = BokehServerSession(url=visualization_url, session_id="test")
progress_bar = ProgressBar(80, title="Some Title Goes Here", color="green", bokeh_document=bokeh_session)
for i in range(80):
progress_bar.update()
progress_bar.update()
bokeh_session.server_session.close("test complete")
finally:
if process:
process.terminate()
process.join()
def METHOD_NAME(self):
layer_weights_map = plotting_utils.map_all_module_weights_to_data_frame(model)
num_conv_and_linear_layers = 0
for name, module in model.named_modules():
if isinstance(module, (torch.nn.modules.conv.Conv2d, torch.nn.modules.linear.Linear)):
num_conv_and_linear_layers += 1
# verify that there are the same number of data frames as there are conv and linear layers
self.assertEqual(num_conv_and_linear_layers, len(layer_weights_map))
def test_line_plot_visualizations_per_layer(self):
results_dir = 'artifacts'
if not os.path.exists('artifacts'):
os.makedirs('artifacts')
plot = visualize_model.visualize_relative_weight_ranges_to_identify_problematic_layers(model, results_dir) |
5,913 | test secret lookup error | # SPDX-FileCopyrightText: Red Hat, Inc.
# SPDX-License-Identifier: GPL-2.0-or-later
import uuid
import libvirt
import pytest
from vdsm.virt.vmdevices import storage
from . import vmfakelib
def test_secret_define_new():
con = vmfakelib.Connection()
xml = """
<secret>
<uuid>uuid</uuid>
<usage type="ceph">
<name>name</name>
</usage>
</secret>
"""
con.secretDefineXML(xml)
sec = con.secrets['uuid']
assert sec.uuid == "uuid"
assert sec.usage_type == "ceph"
assert sec.usage_id == "name"
assert sec.description is None
def test_secret_define_new_with_description():
con = vmfakelib.Connection()
xml = """
<secret>
<description>description</description>
<uuid>uuid</uuid>
<usage type="ceph">
<name>name</name>
</usage>
</secret>
"""
con.secretDefineXML(xml)
sec = con.secrets['uuid']
assert sec.description == "description"
def test_secret_define_replace():
con = vmfakelib.Connection()
xml1 = """
<secret>
<description>old description</description>
<uuid>uuid</uuid>
<usage type="ceph">
<name>name</name>
</usage>
</secret>
"""
xml2 = """
<secret>
<description>new description</description>
<uuid>uuid</uuid>
<usage type="ceph">
<name>name</name>
</usage>
</secret>
"""
con.secretDefineXML(xml1)
con.secretDefineXML(xml2)
sec = con.secrets['uuid']
assert sec.description == "new description"
def test_secret_define_cannot_change_usage_id():
con = vmfakelib.Connection()
xml1 = """
<secret>
<uuid>uuid</uuid>
<usage type="ceph">
<name>name 1</name>
</usage>
</secret>
"""
xml2 = """
<secret>
<uuid>uuid</uuid>
<usage type="ceph">
<name>name 2</name>
</usage>
</secret>
"""
con.secretDefineXML(xml1)
with pytest.raises(libvirt.libvirtError) as e:
con.secretDefineXML(xml2)
assert e.value.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR
def test_secret_define_usage_not_unique():
con = vmfakelib.Connection()
xml1 = """
<secret>
<uuid>uuid 1</uuid>
<usage type="ceph">
<name>name</name>
</usage>
</secret>
"""
xml2 = """
<secret>
<uuid>uuid 2</uuid>
<usage type="ceph">
<name>name</name>
</usage>
</secret>
"""
con.secretDefineXML(xml1)
with pytest.raises(libvirt.libvirtError) as e:
con.secretDefineXML(xml2)
assert e.value.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR
def test_secret_lookup():
con = vmfakelib.Connection()
xml = """
<secret>
<uuid>uuid</uuid>
<usage type="ceph">
<name>name</name>
</usage>
</secret>
"""
con.secretDefineXML(xml)
sec = con.secretLookupByUUIDString('uuid')
assert sec.usage_id == "name"
def METHOD_NAME():
con = vmfakelib.Connection()
with pytest.raises(libvirt.libvirtError) as e:
con.secretLookupByUUIDString('no-such-uuid')
assert e.value.get_error_code() == libvirt.VIR_ERR_NO_SECRET
def test_irs_prepared_volumes():
sdUUID = uuid.uuid4()
spUUID = uuid.uuid4()
imgUUID = uuid.uuid4()
leafUUID = uuid.uuid4()
irs = vmfakelib.IRS()
expected_path = "/run/storage/{}/{}/{}".format(sdUUID, imgUUID, leafUUID)
res = irs.prepareImage(sdUUID, spUUID, imgUUID, leafUUID)
assert (sdUUID, imgUUID, leafUUID) in irs.prepared_volumes
assert res == {
"status": {
"code": 0,
"message": "Done"
},
"path": expected_path,
"info": {
"type": storage.DISK_TYPE.FILE,
"path": expected_path,
},
"imgVolumesInfo": None,
}
res = irs.teardownImage(sdUUID, spUUID, imgUUID, leafUUID)
assert (sdUUID, imgUUID, leafUUID) not in irs.prepared_volumes
assert res == {
"status": {
"code": 0,
"message": "Done"
},
} |
5,914 | need json response | # This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import traceback
from uuid import uuid4
import sentry_sdk
from flask import g, jsonify, render_template, request, session
from itsdangerous import BadData
from sqlalchemy.exc import OperationalError
from werkzeug.exceptions import Forbidden, HTTPException
from indico.core.cache import make_scoped_cache
from indico.core.errors import NoReportError
from indico.web.util import get_request_info
from indico.web.views import WPError
error_cache = make_scoped_cache('errors')
def render_error(exc, title, message, code, standalone=False):
_save_error(exc, title, message)
if METHOD_NAME():
return _jsonify_error(exc, title, message, code)
elif standalone:
return render_template('standalone_error.html', error_message=title, error_description=message), code
else:
try:
return WPError(title, message).display(), code
except OperationalError:
# If the error was caused while connecting the database,
# rendering the error page fails since e.g. the header/footer
# templates access the database or calls hooks doing so.
# In this case we simply fall-back to the standalone error
# page which does not show the indico UI around the error
# message but doesn't require any kind of DB connection.
return render_error(exc, title, message, code, standalone=True)
def load_error_data(uuid):
return error_cache.get(uuid)
def _save_error(exc, title, message):
# Note that `exc` is only used to check if the error should be saved.
# Any other information is taken from `sys.exc_info()`!
if 'saved_error_uuid' in g:
return
if not _is_error_reportable(exc):
return
g.saved_error_uuid = uuid = str(uuid4())
# XXX: keep this outside - it must be called before `get_request_info()`
# as that function may mess up `sys.exc_info()` in case accessing user
# details fails
tb = traceback.format_exc()
data = {'title': title,
'message': message,
'request_info': get_request_info(),
'traceback': tb,
'sentry_event_id': sentry_sdk.last_event_id()}
error_cache.set(uuid, data, timeout=7200)
def METHOD_NAME():
return request.is_xhr or request.is_json or (hasattr(g, 'rh') and getattr(g.rh, '_JSON_ERRORS', False))
def _is_error_reporting_opted_out(code):
header = request.headers.get('X-Indico-No-Report-Error')
if not header:
return
codes = header.split(',')
return str(code) in codes
def _is_error_reportable(exc):
# client explicitly opted out from reporting this (expected) error
if hasattr(exc, 'code') and _is_error_reporting_opted_out(exc.code):
return False
# error marked as not reportable
elif isinstance(exc, NoReportError) or getattr(exc, '_disallow_report', False):
return False
elif isinstance(exc, BadData):
# itsdangerous stuff - should only fail if someone tampers with a link
return False
elif isinstance(exc, Forbidden):
# forbidden errors for guests are not reportable
# for other users: same logic as any other http exception
return METHOD_NAME() and session.user is not None
elif isinstance(exc, HTTPException):
# http exceptions can only be reported if they occur during
# an AJAX request - otherwise they are typically caused by
# users doing something wrong (typing a 404 URL, messing with
# data, etc)
return METHOD_NAME()
else:
return True
def _jsonify_error(exc, title, message, code):
error_data = {
'title': title,
'message': message,
'error_uuid': g.get('saved_error_uuid') if _is_error_reportable(exc) else None,
}
response = jsonify(error=error_data)
response.status_code = code
return response |
5,915 | get next | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServiceOperations:
"""ServiceOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.storage.filedatalake.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_file_systems(
self,
prefix: Optional[str] = None,
continuation: Optional[str] = None,
max_results: Optional[int] = None,
request_id_parameter: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs
) -> AsyncIterable["_models.FileSystemList"]:
"""List FileSystems.
List filesystems and their properties in given account.
:param prefix: Filters results to filesystems within the specified prefix.
:type prefix: str
:param continuation: Optional. When deleting a directory, the number of paths that are deleted
with each invocation is limited. If the number of paths to be deleted exceeds this limit, a
continuation token is returned in this response header. When a continuation token is returned
in the response, it must be specified in a subsequent invocation of the delete operation to
continue deleting the directory.
:type continuation: str
:param max_results: An optional value that specifies the maximum number of items to return. If
omitted or greater than 5,000, the response will include up to 5,000 items.
:type max_results: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FileSystemList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.filedatalake.models.FileSystemList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileSystemList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
resource = "account"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_file_systems.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['resource'] = self._serialize.query("resource", resource, 'str')
if prefix is not None:
query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
if continuation is not None:
query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
if max_results is not None:
query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('FileSystemList', pipeline_response)
list_of_elem = deserialized.filesystems
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
METHOD_NAME, extract_data
)
list_file_systems.metadata = {'url': '/'} # type: ignore |
5,916 | has tag | from typing import AbstractSet, Any, Mapping, Optional, cast
from dagster import (
DagsterRun,
JobDefinition,
OpDefinition,
_check as check,
)
from dagster._annotations import public
from dagster._core.definitions.dependency import Node, NodeHandle
from dagster._core.execution.context.compute import AbstractComputeExecutionContext
from dagster._core.execution.context.system import PlanExecutionContext, StepExecutionContext
from dagster._core.log_manager import DagsterLogManager
from dagster._core.system_config.objects import ResolvedRunConfig
class DagstermillExecutionContext(AbstractComputeExecutionContext):
"""Dagstermill-specific execution context.
Do not initialize directly: use :func:`dagstermill.get_context`.
"""
def __init__(
self,
job_context: PlanExecutionContext,
job_def: JobDefinition,
resource_keys_to_init: AbstractSet[str],
op_name: str,
node_handle: NodeHandle,
op_config: Any = None,
):
self._job_context = check.inst_param(job_context, "job_context", PlanExecutionContext)
self._job_def = check.inst_param(job_def, "job_def", JobDefinition)
self._resource_keys_to_init = check.set_param(
resource_keys_to_init, "resource_keys_to_init", of_type=str
)
self.op_name = check.str_param(op_name, "op_name")
self.node_handle = check.inst_param(node_handle, "node_handle", NodeHandle)
self._op_config = op_config
def METHOD_NAME(self, key: str) -> bool:
"""Check if a logging tag is defined on the context.
Args:
key (str): The key to check.
Returns:
bool
"""
check.str_param(key, "key")
return self._job_context.METHOD_NAME(key)
def get_tag(self, key: str) -> Optional[str]:
"""Get a logging tag defined on the context.
Args:
key (str): The key to get.
Returns:
str
"""
check.str_param(key, "key")
return self._job_context.get_tag(key)
@public
@property
def run_id(self) -> str:
"""str: The run_id for the context."""
return self._job_context.run_id
@public
@property
def run_config(self) -> Mapping[str, Any]:
"""dict: The run_config for the context."""
return self._job_context.run_config
@property
def resolved_run_config(self) -> ResolvedRunConfig:
""":class:`dagster.ResolvedRunConfig`: The resolved_run_config for the context."""
return self._job_context.resolved_run_config
@public
@property
def logging_tags(self) -> Mapping[str, str]:
"""dict: The logging tags for the context."""
return self._job_context.logging_tags
@public
@property
def job_name(self) -> str:
"""str: The name of the executing job."""
return self._job_context.job_name
@public
@property
def job_def(self) -> JobDefinition:
""":class:`dagster.JobDefinition`: The job definition for the context.
This will be a dagstermill-specific shim.
"""
return self._job_def
@property
def resources(self) -> Any:
"""collections.namedtuple: A dynamically-created type whose properties allow access to
resources.
"""
return self._job_context.scoped_resources_builder.build(
required_resource_keys=self._resource_keys_to_init,
)
@public
@property
def run(self) -> DagsterRun:
""":class:`dagster.DagsterRun`: The job run for the context."""
return cast(DagsterRun, self._job_context.dagster_run)
@property
def log(self) -> DagsterLogManager:
""":class:`dagster.DagsterLogManager`: The log manager for the context.
Call, e.g., ``log.info()`` to log messages through the Dagster machinery.
"""
return self._job_context.log
@public
@property
def op_def(self) -> OpDefinition:
""":class:`dagster.OpDefinition`: The op definition for the context.
In interactive contexts, this may be a dagstermill-specific shim, depending whether an
op definition was passed to ``dagstermill.get_context``.
"""
return cast(OpDefinition, self._job_def.node_def_named(self.op_name))
@property
def node(self) -> Node:
""":class:`dagster.Node`: The node for the context.
In interactive contexts, this may be a dagstermill-specific shim, depending whether an
op definition was passed to ``dagstermill.get_context``.
"""
return self.job_def.get_node(self.node_handle)
@public
@property
def op_config(self) -> Any:
"""collections.namedtuple: A dynamically-created type whose properties allow access to
op-specific config.
"""
if self._op_config:
return self._op_config
op_config = self.resolved_run_config.ops.get(self.op_name)
return op_config.config if op_config else None
class DagstermillRuntimeExecutionContext(DagstermillExecutionContext):
def __init__(
self,
job_context: PlanExecutionContext,
job_def: JobDefinition,
resource_keys_to_init: AbstractSet[str],
op_name: str,
step_context: StepExecutionContext,
node_handle: NodeHandle,
op_config: Any = None,
):
self._step_context = check.inst_param(step_context, "step_context", StepExecutionContext)
super().__init__(
job_context,
job_def,
resource_keys_to_init,
op_name,
node_handle,
op_config,
)
@property
def step_context(self) -> StepExecutionContext:
return self._step_context |
5,917 | test easy thumbnails image field | """Tests for the fields module."""
import sys
from importlib import reload
from unittest.mock import patch, MagicMock, PropertyMock
from django.core.exceptions import ImproperlyConfigured
from django.db.models import ImageField
from django.test import TestCase
from newsletter import fields
class FieldsTestCase(TestCase):
class MockSorlThumbnailImageField:
def __init__(self):
self.parent_class = 'sorl-thumbnail'
class MockEasyThumbnailsImageField:
def __init__(self):
self.parent_class = 'easy-thumbnails'
def clear_imports(self):
"""Removes imported modules to ensure proper test environment.
Need to set import to None because otherwise Python will
automatically re-import them when called during testing.
"""
sys.modules['sorl'] = None
sys.modules['sorl.thumbnail'] = None
sys.modules['sorl.thumbnail.fields'] = None
sys.modules['sorl.thumbnail.fields.ImageField'] = None
sys.modules['easy_thumbnails'] = None
sys.modules['easy_thumbnails.fields'] = None
sys.modules['easy_thumbnails.fields.ThumbnailerImageField'] = None
def mock_sorl_import(self):
"""Mocks import of sorl-thumbnail AdminImageMixin."""
sys.modules['sorl'] = MagicMock()
sys.modules['sorl.thumbnail'] = MagicMock()
sys.modules['sorl.thumbnail.fields'] = MagicMock()
sys.modules['sorl.thumbnail.fields.ImageField'] = (
self.MockSorlThumbnailImageField
)
# Have to set attributes to get around metaclass conflicts when
# setting up DynamicImageField
# https://stackoverflow.com/a/52460876/4521808
setattr(
sys.modules['sorl.thumbnail.fields'],
'ImageField',
self.MockSorlThumbnailImageField
)
def mock_easy_thumbnails_import(self):
"""Mocks import of easy-thumbnails ImageClearableFileInput."""
sys.modules['easy_thumbnails'] = MagicMock()
sys.modules['easy_thumbnails.fields'] = MagicMock()
sys.modules['easy_thumbnails.fields.ThumbnailerImageField'] = (
self.MockEasyThumbnailsImageField
)
# Have to set attributes to get around metaclass conflicts when
# setting up the DynamicImageField
# https://stackoverflow.com/a/52460876/4521808
setattr(
sys.modules['easy_thumbnails.fields'],
'ThumbnailerImageField',
self.MockEasyThumbnailsImageField
)
def tearDown(self):
self.clear_imports()
@patch(
'newsletter.settings.NewsletterSettings.THUMBNAIL',
new_callable=PropertyMock,
)
def test_sorl_thumbnail_image_field(self, THUMBNAIL):
"""Tests that sorl-thumbnail image field loads as expected."""
THUMBNAIL.return_value = 'sorl-thumbnail'
# Reload fields to re-declare the DynamicImageField
self.clear_imports()
self.mock_sorl_import()
# Confirm inheritance from sorl-thubmnail ImageField
image_field = fields.DynamicImageField()
self.assertTrue(image_field.parent_class, 'sorl-thubmnail')
@patch(
'newsletter.settings.NewsletterSettings.THUMBNAIL',
new_callable=PropertyMock,
)
def METHOD_NAME(self, THUMBNAIL):
"""Tests that easy-thumbnails image field loads as expected."""
THUMBNAIL.return_value = 'easy-thumbnails'
# Reload fields to re-declare the DynamicImageField
self.clear_imports()
self.mock_easy_thumbnails_import()
reload(fields)
# Confirm inheritance from easy-thumbnails ThumbnailerImageField
image_field = fields.DynamicImageField()
self.assertEqual(image_field.parent_class, 'easy-thumbnails')
@patch(
'newsletter.settings.NewsletterSettings.THUMBNAIL',
new_callable=PropertyMock,
)
def test_error_on_no_thumbnail(self, THUMBNAIL):
"""Tests that error occurs if no thumbnailer specified."""
THUMBNAIL.return_value = None
# Reload fields to re-declare the DynamicImageField
try:
self.clear_imports()
reload(fields)
except ImproperlyConfigured as error:
self.assertEqual(str(error), 'Invalid NEWSLETTER_THUMBNAIL value.')
else:
self.assertTrue(False) |
5,918 | handle | from __future__ import annotations
import argparse
from typing import Any
from django.core.management import BaseCommand
from django.core.management import CommandError
from django.db import connections
from django.db import DEFAULT_DB_ALIAS
from django.db.utils import ConnectionDoesNotExist
from django_mysql.utils import settings_to_cmd_args
class Command(BaseCommand):
args = "<optional connection alias>"
help = (
"Outputs shell parameters representing database connection "
"suitable for inclusion in various tools' commandlines. The "
"connection alias should be a name from DATABASES - defaults to "
"'{default}'."
).format(default=DEFAULT_DB_ALIAS)
requires_system_checks: list[str] = []
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"alias",
metavar="alias",
nargs="?",
default=DEFAULT_DB_ALIAS,
help="Specify the database connection alias to output " "parameters for.",
)
parser.add_argument(
"--mysql",
action="store_true",
dest="show_mysql",
default=False,
help="Outputs flags for tools that take parameters in the "
"same format as the mysql client, e.g. mysql "
"$(./manage.py dbparams --mysql)",
)
parser.add_argument(
"--dsn",
action="store_true",
dest="show_dsn",
default=False,
help="Output a DSN for e.g. percona tools, e.g. "
"pt-online-schema-change $(./manage.py dbparams --dsn)",
)
def METHOD_NAME(
self, *args: Any, alias: str, show_mysql: bool, show_dsn: bool, **options: Any
) -> None:
try:
connection = connections[alias]
except ConnectionDoesNotExist:
raise CommandError(f"Connection {alias!r} does not exist")
if connection.vendor != "mysql":
raise CommandError(f"{alias!r} is not a MySQL database connection")
if show_mysql and show_dsn:
raise CommandError("Pass only one of --mysql and --dsn")
elif not show_mysql and not show_dsn:
show_mysql = True
settings_dict: dict[str, Any] = connection.settings_dict
if show_mysql:
self.output_for_mysql(settings_dict)
elif show_dsn:
self.output_for_dsn(settings_dict)
else: # pragma: no cover
raise AssertionError("Impossible")
def output_for_mysql(self, settings_dict: dict[str, Any]) -> None:
args = settings_to_cmd_args(settings_dict)
args = args[1:] # Delete the 'mysql' at the start
self.stdout.write(" ".join(args), ending="")
def output_for_dsn(self, settings_dict: dict[str, Any]) -> None:
cert = settings_dict["OPTIONS"].get("ssl", {}).get("ca")
if cert:
self.stderr.write(
"Warning: SSL params can't be passed in the DSN syntax; you "
"must pass them in your my.cnf. See: "
"https://www.percona.com/blog/2014/10/16/percona-toolkit-for-"
"mysql-with-mysql-ssl-connections/"
)
db = settings_dict["OPTIONS"].get("db", settings_dict["NAME"])
user = settings_dict["OPTIONS"].get("user", settings_dict["USER"])
passwd = settings_dict["OPTIONS"].get("passwd", settings_dict["PASSWORD"])
host = settings_dict["OPTIONS"].get("host", settings_dict["HOST"])
port = settings_dict["OPTIONS"].get("port", settings_dict["PORT"])
defaults_file = settings_dict["OPTIONS"].get("read_default_file")
args = []
if defaults_file:
args.append(f"F={defaults_file}")
if user:
args.append(f"u={user}")
if passwd:
args.append(f"p={passwd}")
if host:
if "/" in host:
args.append(f"S={host}")
else:
args.append(f"h={host}")
if port:
args.append(f"P={port}")
if db:
args.append(f"D={db}")
dsn = ",".join(args)
self.stdout.write(dsn, ending="") |
5,919 | test copy | import unittest
import tempfile
import json
import copy
import numpy as np
import pandas as pd
import os
from numpy.testing import assert_almost_equal
from sklearn import datasets
from supervised.algorithms.catboost import CatBoostAlgorithm, additional
from supervised.utils.metric import Metric
import tempfile
additional["max_rounds"] = 1
class CatBoostRegressorAlgorithmTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = datasets.make_regression(
n_samples=100, n_features=5, n_informative=4, shuffle=False, random_state=0
)
cls.X = pd.DataFrame(cls.X, columns=[f"f_{i}" for i in range(cls.X.shape[1])])
cls.params = {
"learning_rate": 0.1,
"depth": 4,
"rsm": 0.5,
"l2_leaf_reg": 1,
"seed": 1,
"ml_task": "regression",
"loss_function": "RMSE",
"eval_metric": "RMSE",
}
def test_reproduce_fit(self):
metric = Metric({"name": "mse"})
prev_loss = None
for _ in range(2):
model = CatBoostAlgorithm(self.params)
model.fit(self.X, self.y)
y_predicted = model.predict(self.X)
loss = metric(self.y, y_predicted)
if prev_loss is not None:
assert_almost_equal(prev_loss, loss, decimal=3)
prev_loss = loss
def test_get_metric_name(self):
model = CatBoostAlgorithm(self.params)
self.assertEqual(model.get_metric_name(), "rmse")
class CatBoostAlgorithmTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = datasets.make_classification(
n_samples=100,
n_features=5,
n_informative=4,
n_redundant=1,
n_classes=2,
n_clusters_per_class=3,
n_repeated=0,
shuffle=False,
random_state=0,
)
cls.X = pd.DataFrame(cls.X, columns=[f"f_{i}" for i in range(cls.X.shape[1])])
cls.params = {
"learning_rate": 0.1,
"depth": 4,
"rsm": 0.5,
"l2_leaf_reg": 1,
"seed": 1,
"ml_task": "binary_classification",
"loss_function": "Logloss",
"eval_metric": "Logloss",
}
def test_reproduce_fit(self):
metric = Metric({"name": "logloss"})
prev_loss = None
for _ in range(2):
model = CatBoostAlgorithm(self.params)
model.fit(self.X, self.y)
y_predicted = model.predict(self.X)
loss = metric(self.y, y_predicted)
if prev_loss is not None:
assert_almost_equal(prev_loss, loss, decimal=3)
prev_loss = loss
def test_fit_predict(self):
metric = Metric({"name": "logloss"})
loss_prev = None
for _ in range(2):
cat = CatBoostAlgorithm(self.params)
cat.fit(self.X, self.y)
y_predicted = cat.predict(self.X)
loss = metric(self.y, y_predicted)
if loss_prev is not None:
assert_almost_equal(loss, loss_prev, decimal=3)
loss_prev = loss
def METHOD_NAME(self):
# train model #1
metric = Metric({"name": "logloss"})
cat = CatBoostAlgorithm(self.params)
cat.fit(self.X, self.y)
y_predicted = cat.predict(self.X)
loss = metric(self.y, y_predicted)
# create model #2
cat2 = CatBoostAlgorithm(self.params)
# model #2 is initialized in constructor
self.assertTrue(cat2.model is not None)
# do a copy and use it for predictions
cat2 = cat.copy()
self.assertEqual(type(cat), type(cat2))
y_predicted = cat2.predict(self.X)
loss2 = metric(self.y, y_predicted)
self.assertEqual(loss, loss2)
def test_save_and_load(self):
metric = Metric({"name": "logloss"})
cat = CatBoostAlgorithm(self.params)
cat.fit(self.X, self.y)
y_predicted = cat.predict(self.X)
loss = metric(self.y, y_predicted)
filename = os.path.join(tempfile.gettempdir(), os.urandom(12).hex())
cat.save(filename)
cat2 = CatBoostAlgorithm(self.params)
self.assertTrue(cat.uid != cat2.uid)
self.assertTrue(cat2.model is not None)
cat2.load(filename)
# Finished with the file, delete it
os.remove(filename)
y_predicted = cat2.predict(self.X)
loss2 = metric(self.y, y_predicted)
assert_almost_equal(loss, loss2, decimal=3)
def test_get_metric_name(self):
model = CatBoostAlgorithm(self.params)
self.assertEqual(model.get_metric_name(), "logloss")
params = dict(self.params)
params["loss_function"] = "MultiClass"
params["eval_metric"] = "MultiClass"
model = CatBoostAlgorithm(params)
self.assertEqual(model.get_metric_name(), "logloss")
def test_is_fitted(self):
cat = CatBoostAlgorithm(self.params)
self.assertFalse(cat.is_fitted())
cat.fit(self.X, self.y)
self.assertTrue(cat.is_fitted()) |
5,920 | on download finished | # Copyright (c) 2022 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import os
from typing import List, Dict, Any, cast
from UM import i18n_catalog
from UM.Extension import Extension
from UM.Logger import Logger
from UM.Message import Message
from UM.PluginRegistry import PluginRegistry
from cura.CuraApplication import CuraApplication
from .CloudPackageChecker import CloudPackageChecker
from .CloudApiClient import CloudApiClient
from .DiscrepanciesPresenter import DiscrepanciesPresenter
from .DownloadPresenter import DownloadPresenter
from .LicensePresenter import LicensePresenter
from .RestartApplicationPresenter import RestartApplicationPresenter
from .SubscribedPackagesModel import SubscribedPackagesModel
class SyncOrchestrator(Extension):
"""Orchestrates the synchronizing of packages from the user account to the installed packages
Example flow:
- CloudPackageChecker compares a list of packages the user `subscribed` to in their account
If there are `discrepancies` between the account and locally installed packages, they are emitted
- DiscrepanciesPresenter shows a list of packages to be added or removed to the user. It emits the `packageMutations`
the user selected to be performed
- The SyncOrchestrator uses PackageManager to remove local packages the users wants to see removed
- The DownloadPresenter shows a download progress dialog. It emits A tuple of succeeded and failed downloads
- The LicensePresenter extracts licenses from the downloaded packages and presents a license for each package to
be installed. It emits the `licenseAnswers` signal for accept or declines
- The CloudApiClient removes the declined packages from the account
- The SyncOrchestrator uses PackageManager to install the downloaded packages and delete temp files.
- The RestartApplicationPresenter notifies the user that a restart is required for changes to take effect
"""
def __init__(self, app: CuraApplication) -> None:
super().__init__()
# Differentiate This PluginObject from the Marketplace. self.getId() includes _name.
# getPluginId() will return the same value for The Marketplace extension and this one
self._name = "SyncOrchestrator"
self._package_manager = app.getPackageManager()
# Keep a reference to the CloudApiClient. it watches for installed packages and subscribes to them
self._cloud_api: CloudApiClient = CloudApiClient.getInstance(app)
self._checker: CloudPackageChecker = CloudPackageChecker(app)
self._checker.discrepancies.connect(self._onDiscrepancies)
self._discrepancies_presenter: DiscrepanciesPresenter = DiscrepanciesPresenter(app)
self._discrepancies_presenter.packageMutations.connect(self._onPackageMutations)
self._download_presenter: DownloadPresenter = DownloadPresenter(app)
self._license_presenter: LicensePresenter = LicensePresenter(app)
self._license_presenter.licenseAnswers.connect(self._onLicenseAnswers)
self._restart_presenter = RestartApplicationPresenter(app)
def _onDiscrepancies(self, model: SubscribedPackagesModel) -> None:
plugin_path = cast(str, PluginRegistry.getInstance().getPluginPath(self.getPluginId()))
self._discrepancies_presenter.present(plugin_path, model)
def _onPackageMutations(self, mutations: SubscribedPackagesModel) -> None:
self._download_presenter = self._download_presenter.resetCopy()
self._download_presenter.done.connect(self.METHOD_NAME)
self._download_presenter.download(mutations)
def METHOD_NAME(self, success_items: Dict[str, Dict[str, str]], error_items: List[str]) -> None:
"""Called when a set of packages have finished downloading
:param success_items:: Dict[package_id, Dict[str, str]]
:param error_items:: List[package_id]
"""
if error_items:
message = i18n_catalog.i18nc("@info:generic", "{} plugins failed to download".format(len(error_items)))
self._showErrorMessage(message)
plugin_path = cast(str, PluginRegistry.getInstance().getPluginPath(self.getPluginId()))
self._license_presenter = self._license_presenter.resetCopy()
self._license_presenter.licenseAnswers.connect(self._onLicenseAnswers)
self._license_presenter.present(plugin_path, success_items)
# Called when user has accepted / declined all licenses for the downloaded packages
def _onLicenseAnswers(self, answers: List[Dict[str, Any]]) -> None:
has_changes = False # True when at least one package is installed
for item in answers:
if item["accepted"]:
# install and subscribe packages
if not self._package_manager.installPackage(item["package_path"]):
message = "Could not install {}".format(item["package_id"])
self._showErrorMessage(message)
continue
has_changes = True
else:
self._cloud_api.unsubscribe(item["package_id"])
# delete temp file
try:
os.remove(item["package_path"])
except EnvironmentError as e: # File was already removed, no access rights, etc.
Logger.error("Can't delete temporary package file: {err}".format(err = str(e)))
if has_changes:
self._restart_presenter.present()
def _showErrorMessage(self, text: str):
"""Logs an error and shows it to the user"""
Logger.error(text)
Message(text, lifetime = 0, message_type = Message.MessageType.ERROR).show() |
5,921 | to csv | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import csv
import json
from io import StringIO
import requests
import frappe
from frappe import _, msgprint
from frappe.utils import cint, comma_or, cstr, flt
def read_csv_content_from_attached_file(doc):
fileid = frappe.get_all(
"File",
fields=["name"],
filters={"attached_to_doctype": doc.doctype, "attached_to_name": doc.name},
order_by="creation desc",
)
if fileid:
fileid = fileid[0].name
if not fileid:
msgprint(_("File not attached"))
raise Exception
try:
_file = frappe.get_doc("File", fileid)
fcontent = _file.get_content()
return read_csv_content(fcontent)
except Exception:
frappe.throw(
_("Unable to open attached file. Did you export it as CSV?"), title=_("Invalid CSV Format")
)
def read_csv_content(fcontent):
if not isinstance(fcontent, str):
decoded = False
for encoding in ["utf-8", "windows-1250", "windows-1252"]:
try:
fcontent = str(fcontent, encoding)
decoded = True
break
except UnicodeDecodeError:
continue
if not decoded:
frappe.msgprint(
_("Unknown file encoding. Tried utf-8, windows-1250, windows-1252."), raise_exception=True
)
fcontent = fcontent.encode("utf-8")
content = [frappe.safe_decode(line) for line in fcontent.splitlines(True)]
try:
rows = []
for row in csv.reader(content):
r = []
for val in row:
# decode everything
val = val.strip()
if val == "":
# reason: in maraidb strict config, one cannot have blank strings for non string datatypes
r.append(None)
else:
r.append(val)
rows.append(r)
return rows
except Exception:
frappe.msgprint(_("Not a valid Comma Separated Value (CSV File)"))
raise
@frappe.whitelist()
def send_csv_to_client(args):
if isinstance(args, str):
args = json.loads(args)
args = frappe._dict(args)
frappe.response["result"] = cstr(METHOD_NAME(args.data))
frappe.response["doctype"] = args.filename
frappe.response["type"] = "csv"
def METHOD_NAME(data):
writer = UnicodeWriter()
for row in data:
writer.writerow(row)
return writer.getvalue()
def build_csv_response(data, filename):
frappe.response["result"] = cstr(METHOD_NAME(data))
frappe.response["doctype"] = filename
frappe.response["type"] = "csv"
class UnicodeWriter:
def __init__(self, encoding="utf-8", quoting=csv.QUOTE_NONNUMERIC):
self.encoding = encoding
self.queue = StringIO()
self.writer = csv.writer(self.queue, quoting=quoting)
def writerow(self, row):
self.writer.writerow(row)
def getvalue(self):
return self.queue.getvalue()
def check_record(d):
"""check for mandatory, select options, dates. these should ideally be in doclist"""
from frappe.utils.dateutils import parse_date
doc = frappe.get_doc(d)
for key in d:
docfield = doc.meta.get_field(key)
val = d[key]
if docfield:
if docfield.reqd and (val == "" or val is None):
frappe.msgprint(_("{0} is required").format(docfield.label), raise_exception=1)
if docfield.fieldtype == "Select" and val and docfield.options:
if val not in docfield.options.split("\n"):
frappe.throw(
_("{0} must be one of {1}").format(_(docfield.label), comma_or(docfield.options.split("\n")))
)
if val and docfield.fieldtype == "Date":
d[key] = parse_date(val)
elif val and docfield.fieldtype in ["Int", "Check"]:
d[key] = cint(val)
elif val and docfield.fieldtype in ["Currency", "Float", "Percent"]:
d[key] = flt(val)
def import_doc(d, doctype, overwrite, row_idx, submit=False, ignore_links=False):
"""import main (non child) document"""
if d.get("name") and frappe.db.exists(doctype, d["name"]):
if overwrite:
doc = frappe.get_doc(doctype, d["name"])
doc.flags.ignore_links = ignore_links
doc.update(d)
if d.get("docstatus") == 1:
doc.update_after_submit()
elif d.get("docstatus") == 0 and submit:
doc.submit()
else:
doc.save()
return "Updated row (#%d) %s" % (row_idx + 1, getlink(doctype, d["name"]))
else:
return "Ignored row (#%d) %s (exists)" % (row_idx + 1, getlink(doctype, d["name"]))
else:
doc = frappe.get_doc(d)
doc.flags.ignore_links = ignore_links
doc.insert()
if submit:
doc.submit()
return "Inserted row (#%d) %s" % (row_idx + 1, getlink(doctype, doc.get("name")))
def getlink(doctype, name):
return '<a href="/app/Form/%(doctype)s/%(name)s">%(name)s</a>' % locals()
def get_csv_content_from_google_sheets(url):
# https://docs.google.com/spreadsheets/d/{sheetid}}/edit#gid={gid}
validate_google_sheets_url(url)
# get gid, defaults to first sheet
if "gid=" in url:
gid = url.rsplit("gid=", 1)[1]
else:
gid = 0
# remove /edit path
url = url.rsplit("/edit", 1)[0]
# add /export path,
url = url + f"/export?format=csv&gid={gid}"
headers = {"Accept": "text/csv"}
response = requests.get(url, headers=headers)
if response.ok:
# if it returns html, it couldn't find the CSV content
# because of invalid url or no access
if response.text.strip().endswith("</html>"):
frappe.throw(
_("Google Sheets URL is invalid or not publicly accessible."), title=_("Invalid URL")
)
return response.content
elif response.status_code == 400:
frappe.throw(
_(
'Google Sheets URL must end with "gid={number}". Copy and paste the URL from the browser address bar and try again.'
),
title=_("Incorrect URL"),
)
else:
response.raise_for_status()
def validate_google_sheets_url(url):
from urllib.parse import urlparse
u = urlparse(url)
if u.scheme != "https" or u.netloc != "docs.google.com" or "/spreadsheets/" not in u.path:
frappe.throw(
_('"{0}" is not a valid Google Sheets URL').format(url),
title=_("Invalid URL"),
) |
5,922 | transformed input np | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for float16 and uint8 global_avg_pool2d."""
import numpy as np
import tvm
from tvm import te
from tvm.topi.testing import adaptive_pool
import tvm.topi.hexagon.qnn as qn
import tvm.topi.hexagon.slice_ops as sl
from tvm.contrib.hexagon import allocate_hexagon_array
from ...infrastructure import transform_numpy, quantize_np, get_hexagon_target
SCALE_M_VAL = None
ZERO_POINT_M_VAL = None
SCALE_VAL = None
ZERO_POINT_VAL = None
class TestGlobalPool2D:
(input_shape,) = tvm.testing.parameters(
([1, 32, 8, 8],),
([1, 1056, 16, 16],),
)
# Fixed chunk layout is set as nchw-32c8h8w-2d for uint8 and nchw-32c8h4w-2d for float16.
# For optimization, it might get changed later.
# Since output shape will be NxCx1x1 which is not a
# multiple of fixed-chunk, output_layout is NCHW.
input_layout, output_layout, pool_type, layout, dtype = tvm.testing.parameters(
("nchw-32c8h8w-2d", "nchw", "avg", "NCHW", "uint8"),
("nchw-32c8h4w-2d", "nchw", "avg", "NCHW", "float16"),
)
@tvm.testing.fixture
def expected_output_np(
self,
input_np,
pool_type,
layout,
):
"""Generate expected output."""
ref_np = tvm.topi.testing.adaptive_pool(
input_np,
(1, 1),
pool_type,
layout,
)
return ref_np
@tvm.testing.fixture
def input_np(self, input_shape, dtype):
if dtype in ("uint8", "int8"):
dtype = "float32"
return np.random.random(input_shape).astype(dtype)
@tvm.testing.fixture
def quantize_input_np(self, input_np, dtype):
if dtype in ("uint8", "int8"):
global ZERO_POINT_VAL, SCALE_VAL
input_np_quantized, SCALE_VAL, ZERO_POINT_VAL = quantize_np(input_np, dtype)
return input_np_quantized
@tvm.testing.fixture
def METHOD_NAME(self, input_np, quantize_input_np, input_layout, layout, dtype):
if dtype == "float16":
return transform_numpy(input_np, layout.lower(), input_layout)
if dtype in ("uint8", "int8"):
return transform_numpy(quantize_input_np, layout.lower(), input_layout)
raise RuntimeError(f"Unsupported data type '{dtype}'")
@tvm.testing.fixture
def quantize_expected_output_np(self, expected_output_np, dtype):
if dtype in ("uint8", "int8"):
global ZERO_POINT_M_VAL, SCALE_M_VAL
out_ref_quantized, SCALE_M_VAL, ZERO_POINT_M_VAL = quantize_np(
expected_output_np, dtype
)
# Since output_layout is nchw, no transformation is needed.
return out_ref_quantized
@tvm.testing.requires_hexagon
def test_global_pool2d(
self,
dtype,
input_shape,
input_layout,
METHOD_NAME,
expected_output_np,
quantize_expected_output_np,
hexagon_session,
):
a_tensor = te.placeholder(input_shape, name="a_tensor", dtype=dtype)
if dtype == "float16":
m_tensor = sl.global_avg_pool2d(a_tensor)
tir_schedule = sl.stir_global_avg_pool2d_schedule(m_tensor, a_tensor, input_layout)
elif dtype in ["uint8", "int8"]:
m_tensor = qn.global_avg_pool2d_u8(
a_tensor,
dtype,
ZERO_POINT_VAL,
SCALE_VAL,
ZERO_POINT_M_VAL,
SCALE_M_VAL,
)
tir_schedule = qn.stir_global_avg_pool2d_u8_schedule(m_tensor, a_tensor, input_layout)
sch = tir_schedule.mod
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(
sch,
[a_tensor, m_tensor],
get_hexagon_target("v69"),
name="global_pool2d",
)
input_axis_separator = [4]
a_data_nd = allocate_hexagon_array(
hexagon_session.device,
data=METHOD_NAME,
dtype=dtype,
axis_separators=input_axis_separator,
mem_scope="global.vtcm",
)
m_data_nd = allocate_hexagon_array(
hexagon_session.device,
expected_output_np.shape,
dtype=dtype,
)
mod = hexagon_session.load_module(func)
mod(a_data_nd, m_data_nd)
# Convert nd to np
m_data_np = m_data_nd.numpy()
if dtype == "float16":
np.testing.assert_allclose(expected_output_np, m_data_np, rtol=1e-3, atol=1e-3)
elif dtype in ["int8", "uint8"]:
np.testing.assert_allclose(quantize_expected_output_np, m_data_np, atol=1)
if __name__ == "__main__":
tvm.testing.main() |
5,923 | test args lat | import pytest
from pyroSAR.ancillary import getargs
from pyroSAR.gamma import api
@pytest.mark.skipif('diff' not in dir(api), reason='requires GAMMA installation with module DIFF')
def test_args_diff():
from pyroSAR.gamma.api import diff
assert getargs(diff.gc_map) == ['DEM', 'DEM_par', 'DEM_seg', 'DEM_seg_par', 'MLI_par', 'OFF_par', 'frame',
'inc', 'lat_ovr', 'logpath', 'lon_ovr', 'lookup_table', 'ls_map', 'ls_mode',
'outdir', 'pix', 'psi', 'r_ovr', 'shellscript', 'sim_sar', 'u', 'v']
assert getargs(diff.gc_map_grd) == ['DEM', 'DEM_par', 'DEM_seg', 'DEM_seg_par', 'GRD_par', 'frame', 'inc',
'lat_ovr', 'logpath', 'lon_ovr', 'lookup_table', 'ls_map', 'ls_mode', 'outdir',
'pix', 'psi', 'r_ovr', 'shellscript', 'sim_sar', 'u', 'v']
args = getargs(diff.geocode_back)
args_ref = ['data_in', 'data_out', 'dtype', 'interp_mode', 'logpath', 'lookup_table',
'lr_in', 'lr_out', 'nlines_out', 'order', 'outdir', 'shellscript',
'width_in', 'width_out']
comp = [x in args for x in args_ref]
assert sum(comp) == len(args_ref)
assert getargs(diff.par_EORC_PALSAR_geo) == ['CEOS_data', 'CEOS_leader', 'DEM_par', 'MLI',
'MLI_par', 'cal', 'logpath', 'outdir', 'shellscript']
assert getargs(diff.par_TX_geo) == ['DEM_par', 'GEO', 'GeoTIFF', 'MLI_par', 'annotation_XML',
'logpath', 'outdir', 'pol', 'shellscript']
args = getargs(diff.pixel_area)
args_ref = ['DEM', 'DEM_par', 'MLI_par', 'area_fact', 'inc_map', 'logpath', 'lookup_table',
'ls_map', 'nstep', 'outdir', 'pix_gamma0', 'pix_sigma0', 'shellscript']
comp = [x in args for x in args_ref]
assert sum(comp) == len(args_ref)
@pytest.mark.skipif('disp' not in dir(api), reason='requires GAMMA installation with module DISP')
def test_args_disp():
from pyroSAR.gamma.api import disp
args = getargs(disp.data2geotiff)
args_ref = ['DEM_par', 'GeoTIFF', 'data', 'logpath', 'no_data', 'outdir', 'shellscript',
'type']
comp = [x in args for x in args_ref]
assert sum(comp) == len(args_ref)
@pytest.mark.skipif('isp' not in dir(api), reason='requires GAMMA installation with module ISP')
def test_args_isp():
from pyroSAR.gamma.api import isp
assert getargs(isp.multi_look) == ['MLI', 'MLI_par', 'SLC', 'SLC_par', 'azlks', 'exp', 'loff', 'logpath',
'nlines', 'outdir', 'rlks', 'scale', 'shellscript']
args = getargs(isp.multi_look_MLI)
args_ref = ['MLI_in', 'MLI_in_par', 'MLI_out', 'MLI_out_par', 'azlks', 'loff',
'logpath', 'nlines', 'outdir', 'rlks', 'scale', 'shellscript']
comp = [x in args for x in args_ref]
assert sum(comp) == len(args_ref)
assert getargs(isp.par_ASAR) == ['ASAR_ERS_file', 'K_dB', 'logpath', 'outdir', 'output_name', 'shellscript']
assert getargs(isp.par_EORC_PALSAR) == ['CEOS_data', 'CEOS_leader', 'SLC', 'SLC_par', 'dtype',
'logpath', 'outdir', 'sc_dB', 'shellscript']
assert getargs(isp.par_ESA_ERS) == ['CEOS_DAT', 'CEOS_SAR_leader', 'SLC', 'SLC_par', 'inlist',
'logpath', 'outdir', 'shellscript']
args = getargs(isp.par_S1_GRD)
args_ref = ['GRD', 'GRD_par', 'GeoTIFF', 'MLI', 'MLI_par', 'annotation_XML',
'calibration_XML', 'eflg', 'logpath', 'noise_XML', 'noise_pwr',
'outdir', 'rps', 'shellscript']
comp = [x in args for x in args_ref]
assert sum(comp) == len(args_ref)
assert getargs(isp.par_S1_SLC) == ['GeoTIFF', 'SLC', 'SLC_par', 'TOPS_par', 'annotation_XML', 'calibration_XML',
'dtype', 'logpath', 'noise_XML', 'noise_pwr', 'outdir', 'sc_dB', 'shellscript']
assert getargs(isp.par_TX_GRD) == ['GRD', 'GRD_par', 'GeoTIFF', 'annotation_XML', 'logpath',
'outdir', 'pol', 'shellscript']
assert getargs(isp.par_TX_SLC) == ['COSAR', 'SLC', 'SLC_par', 'annotation_XML', 'dtype',
'logpath', 'outdir', 'pol', 'shellscript']
assert getargs(isp.radcal_MLI) == ['CMLI', 'K_dB', 'MLI', 'MLI_par', 'OFF_par', 'ant_flag', 'antenna', 'logpath',
'outdir', 'pix_area', 'refarea_flag', 'rloss_flag', 'sc_dB', 'shellscript']
assert getargs(isp.radcal_PRI) == ['GRD', 'GRD_par', 'K_dB', 'PRI', 'PRI_par',
'inc_ref', 'loff', 'logpath', 'nl', 'nr',
'outdir', 'roff', 'shellscript']
assert getargs(isp.radcal_SLC) == ['CSLC', 'CSLC_par', 'K_dB', 'SLC', 'SLC_par',
'ant_flag', 'antenna', 'fcase', 'logpath', 'outdir',
'pix_area', 'refarea_flag', 'rloss_flag', 'sc_dB', 'shellscript']
assert getargs(isp.S1_OPOD_vec) == ['OPOD', 'SLC_par', 'logpath', 'nstate', 'outdir', 'shellscript']
args = getargs(isp.SLC_deramp_ScanSAR)
args_ref = ['SLC1_tab', 'SLC2_tab', 'logpath', 'mode', 'outdir',
'phflg', 'shellscript']
comp = [x in args for x in args_ref]
assert sum(comp) == len(args_ref)
args_ref = ['SLC', 'SLCR_tab', 'SLC_par', 'SLC_tab', 'azlks', 'logpath',
'outdir', 'rlks', 'shellscript', 'bflg']
args = getargs(isp.SLC_mosaic_S1_TOPS)
comp = [x in args for x in args_ref]
assert sum(comp) == len(args_ref)
@pytest.mark.skipif('lat' not in dir(api), reason='requires GAMMA installation with module LAT')
def METHOD_NAME():
from pyroSAR.gamma.api import lat
assert getargs(lat.linear_to_dB) == ['data_in', 'data_out', 'inverse_flag', 'logpath', 'null_value', 'outdir',
'shellscript', 'width']
assert getargs(lat.product) == ['bx', 'by', 'data_1', 'data_2', 'logpath', 'outdir', 'product',
'shellscript', 'wgt_flag', 'width']
assert getargs(lat.ratio) == ['bx', 'by', 'd1', 'd2', 'logpath', 'outdir', 'ratio',
'shellscript', 'wgt_flag', 'width']
args = getargs(lat.sigma2gamma)
args_ref = ['gamma0', 'inc', 'logpath', 'outdir', 'sigma0', 'shellscript', 'width']
comp = [x in args for x in args_ref]
assert sum(comp) == len(args_ref) |
5,924 | get | #!/usr/bin/env python
'''settings object for MAVProxy modules'''
import time
class MPSetting:
def __init__(self, name, type, default, label=None, tab=None,
range=None, increment=None, format=None,
digits=None, choice=None):
if label is None:
label = name
self.name = name
self.type = type
self.default = default
self.label = label
self.value = default
self.tab = tab
self.range = range
if range is not None:
# check syntax
(minv, maxv) = range
self.increment = increment
self.choice = choice
self.format = format
self.digits = digits
def describe(self):
if self.choice is not None:
for v in self.choice:
if isinstance(v, tuple):
(v, thisvalue) = v
if thisvalue == self.value:
return "%s(%d)" % (v, thisvalue)
return "%s" % self.value
def set(self, value):
'''set a setting'''
if value == 'None' and self.default is None:
value = None
if value is not None:
if self.type == bool:
if str(value).lower() in ['1', 'true', 'yes']:
value = True
elif str(value).lower() in ['0', 'false', 'no']:
value = False
else:
return False
else:
try:
value = self.type(value)
except:
return False
if self.range is not None:
(minv,maxv) = self.range
if value < minv or value > maxv:
print("Out of range (min=%f max=%f)" % (minv, maxv))
return False
if self.choice is not None:
found = False
options = []
for v in self.choice:
if isinstance(v, tuple):
(v, thisvalue) = v
else:
thisvalue = v
options.append(v)
if isinstance(value, self.type):
if thisvalue == value:
found = True
break
if isinstance(value, str) and v.lower() == value.lower():
found = True
value = thisvalue
break
if not found:
print("Must be one of %s" % str(options))
return False
self.value = value
return True
class MPSettings(object):
def __init__(self, vars, title='Settings'):
self._vars = {}
self._title = title
self._default_tab = 'Settings'
self._keys = []
self._callback = None
self._last_change = time.time()
for v in vars:
self.append(v)
def get_title(self):
'''return the title'''
return self._title
def get_setting(self, name):
'''return a MPSetting object'''
return self._vars[name]
def append(self, v):
'''add a new setting'''
if isinstance(v, MPSetting):
setting = v
else:
(name,type,default) = v
label = name
tab = None
if len(v) > 3:
label = v[3]
if len(v) > 4:
tab = v[4]
setting = MPSetting(name, type, default, label=label, tab=tab)
# when a tab name is set, cascade it to future settings
if setting.tab is None:
setting.tab = self._default_tab
else:
self._default_tab = setting.tab
self._vars[setting.name] = setting
self._keys.append(setting.name)
self._last_change = time.time()
def __getattr__(self, name):
try:
return self._vars[name].value
except Exception:
raise AttributeError
def __setattr__(self, name, value):
if name[0] == '_':
self.__dict__[name] = value
return
if name in self._vars:
self._vars[name].value = value
return
raise AttributeError
def set(self, name, value):
'''set a setting'''
if not name in self._vars:
raise AttributeError
setting = self._vars[name]
oldvalue = setting.value
if not setting.set(value):
print("Unable to set %s (want type=%s)" % (value, setting.type))
return False
if oldvalue != setting.value:
self._last_change = time.time()
if self._callback:
self._callback(setting)
return True
def METHOD_NAME(self, name):
'''get a setting'''
if not name in self._vars:
raise AttributeError
setting = self._vars[name]
return setting.value
def show(self, v):
'''show settings'''
print("%20s %s" % (v, self._vars[v].describe()))
def show_all(self):
'''show all settings'''
for setting in sorted(self._vars):
self.show(setting)
def list(self):
'''list all settings'''
return self._keys
def completion(self, text):
'''completion function for cmdline completion'''
return self.list()
def command(self, args):
'''control options from cmdline'''
if len(args) == 0:
self.show_all()
return
if getattr(self, args[0], [None]) == [None]:
print("Unknown setting '%s'" % args[0])
return
if len(args) == 1:
self.show(args[0])
else:
self.set(args[0], args[1])
def set_callback(self, callback):
'''set a callback to be called on set()'''
self._callback = callback
def save(self, filename):
'''save settings to a file. Return True/False on success/failure'''
try:
f = open(filename, mode='w')
except Exception:
return False
for k in self.list():
f.write("%s=%s\n" % (k, self.METHOD_NAME(k)))
f.close()
return True
def load(self, filename):
'''load settings from a file. Return True/False on success/failure'''
try:
f = open(filename, mode='r')
except Exception:
return False
while True:
line = f.readline()
if not line:
break
line = line.rstrip()
eq = line.find('=')
if eq == -1:
continue
name = line[:eq]
value = line[eq+1:]
self.set(name, value)
f.close()
return True
def last_change(self):
'''return last change time'''
return self._last_change |
5,925 | update | import time
import numpy as np
from openpilot.common.realtime import DT_MDL
from openpilot.common.numpy_fast import interp
from openpilot.system.swaglog import cloudlog
from openpilot.selfdrive.controls.lib.lateral_mpc_lib.lat_mpc import LateralMpc
from openpilot.selfdrive.controls.lib.lateral_mpc_lib.lat_mpc import N as LAT_MPC_N
from openpilot.selfdrive.controls.lib.drive_helpers import CONTROL_N, MIN_SPEED, get_speed_error
from openpilot.selfdrive.controls.lib.desire_helper import DesireHelper
import cereal.messaging as messaging
from cereal import log
TRAJECTORY_SIZE = 33
CAMERA_OFFSET = 0.04
PATH_COST = 1.0
LATERAL_MOTION_COST = 0.11
LATERAL_ACCEL_COST = 0.0
LATERAL_JERK_COST = 0.04
# Extreme steering rate is unpleasant, even
# when it does not cause bad jerk.
# TODO this cost should be lowered when low
# speed lateral control is stable on all cars
STEERING_RATE_COST = 700.0
class LateralPlanner:
def __init__(self, CP, debug=False):
self.DH = DesireHelper()
# Vehicle model parameters used to calculate lateral movement of car
self.factor1 = CP.wheelbase - CP.centerToFront
self.factor2 = (CP.centerToFront * CP.mass) / (CP.wheelbase * CP.tireStiffnessRear)
self.last_cloudlog_t = 0
self.solution_invalid_cnt = 0
self.path_xyz = np.zeros((TRAJECTORY_SIZE, 3))
self.velocity_xyz = np.zeros((TRAJECTORY_SIZE, 3))
self.plan_yaw = np.zeros((TRAJECTORY_SIZE,))
self.plan_yaw_rate = np.zeros((TRAJECTORY_SIZE,))
self.t_idxs = np.arange(TRAJECTORY_SIZE)
self.y_pts = np.zeros((TRAJECTORY_SIZE,))
self.v_plan = np.zeros((TRAJECTORY_SIZE,))
self.v_ego = 0.0
self.l_lane_change_prob = 0.0
self.r_lane_change_prob = 0.0
self.d_path_w_lines_xyz = np.zeros((TRAJECTORY_SIZE, 3))
self.debug_mode = debug
self.lat_mpc = LateralMpc()
self.reset_mpc(np.zeros(4))
def reset_mpc(self, x0=None):
if x0 is None:
x0 = np.zeros(4)
self.x0 = x0
self.lat_mpc.reset(x0=self.x0)
def METHOD_NAME(self, sm):
# clip speed , lateral planning is not possible at 0 speed
measured_curvature = sm['controlsState'].curvature
v_ego_car = sm['carState'].vEgo
# Parse model predictions
md = sm['modelV2']
if len(md.position.x) == TRAJECTORY_SIZE and len(md.orientation.x) == TRAJECTORY_SIZE:
self.path_xyz = np.column_stack([md.position.x, md.position.y, md.position.z])
self.t_idxs = np.array(md.position.t)
self.plan_yaw = np.array(md.orientation.z)
self.plan_yaw_rate = np.array(md.orientationRate.z)
self.velocity_xyz = np.column_stack([md.velocity.x, md.velocity.y, md.velocity.z])
car_speed = np.linalg.norm(self.velocity_xyz, axis=1) - get_speed_error(md, v_ego_car)
self.v_plan = np.clip(car_speed, MIN_SPEED, np.inf)
self.v_ego = self.v_plan[0]
# Lane change logic
desire_state = md.meta.desireState
if len(desire_state):
self.l_lane_change_prob = desire_state[log.LateralPlan.Desire.laneChangeLeft]
self.r_lane_change_prob = desire_state[log.LateralPlan.Desire.laneChangeRight]
lane_change_prob = self.l_lane_change_prob + self.r_lane_change_prob
self.DH.METHOD_NAME(sm['carState'], sm['carControl'].latActive, lane_change_prob)
self.lat_mpc.set_weights(PATH_COST, LATERAL_MOTION_COST,
LATERAL_ACCEL_COST, LATERAL_JERK_COST,
STEERING_RATE_COST)
y_pts = self.path_xyz[:LAT_MPC_N+1, 1]
heading_pts = self.plan_yaw[:LAT_MPC_N+1]
yaw_rate_pts = self.plan_yaw_rate[:LAT_MPC_N+1]
self.y_pts = y_pts
assert len(y_pts) == LAT_MPC_N + 1
assert len(heading_pts) == LAT_MPC_N + 1
assert len(yaw_rate_pts) == LAT_MPC_N + 1
lateral_factor = np.clip(self.factor1 - (self.factor2 * self.v_plan**2), 0.0, np.inf)
p = np.column_stack([self.v_plan, lateral_factor])
self.lat_mpc.run(self.x0,
p,
y_pts,
heading_pts,
yaw_rate_pts)
# init state for next iteration
# mpc.u_sol is the desired second derivative of psi given x0 curv state.
# with x0[3] = measured_yaw_rate, this would be the actual desired yaw rate.
# instead, interpolate x_sol so that x0[3] is the desired yaw rate for lat_control.
self.x0[3] = interp(DT_MDL, self.t_idxs[:LAT_MPC_N + 1], self.lat_mpc.x_sol[:, 3])
# Check for infeasible MPC solution
mpc_nans = np.isnan(self.lat_mpc.x_sol[:, 3]).any()
t = time.monotonic()
if mpc_nans or self.lat_mpc.solution_status != 0:
self.reset_mpc()
self.x0[3] = measured_curvature * self.v_ego
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning("Lateral mpc - nan: True")
if self.lat_mpc.cost > 1e6 or mpc_nans:
self.solution_invalid_cnt += 1
else:
self.solution_invalid_cnt = 0
def publish(self, sm, pm):
plan_solution_valid = self.solution_invalid_cnt < 2
plan_send = messaging.new_message('lateralPlan')
plan_send.valid = sm.all_checks(service_list=['carState', 'controlsState', 'modelV2'])
lateralPlan = plan_send.lateralPlan
lateralPlan.modelMonoTime = sm.logMonoTime['modelV2']
lateralPlan.dPathPoints = self.y_pts.tolist()
lateralPlan.psis = self.lat_mpc.x_sol[0:CONTROL_N, 2].tolist()
lateralPlan.curvatures = (self.lat_mpc.x_sol[0:CONTROL_N, 3]/self.v_ego).tolist()
lateralPlan.curvatureRates = [float(x.item() / self.v_ego) for x in self.lat_mpc.u_sol[0:CONTROL_N - 1]] + [0.0]
lateralPlan.mpcSolutionValid = bool(plan_solution_valid)
lateralPlan.solverExecutionTime = self.lat_mpc.solve_time
if self.debug_mode:
lateralPlan.solverCost = self.lat_mpc.cost
lateralPlan.solverState = log.LateralPlan.SolverState.new_message()
lateralPlan.solverState.x = self.lat_mpc.x_sol.tolist()
lateralPlan.solverState.u = self.lat_mpc.u_sol.flatten().tolist()
lateralPlan.desire = self.DH.desire
lateralPlan.useLaneLines = False
lateralPlan.laneChangeState = self.DH.lane_change_state
lateralPlan.laneChangeDirection = self.DH.lane_change_direction
lateralPlan.laneChangePrev = self.DH.prev_lane_change
lateralPlan.dPathWLinesX = [float(x) for x in self.d_path_w_lines_xyz[:, 0]]
lateralPlan.dPathWLinesY = [float(y) for y in self.d_path_w_lines_xyz[:, 1]]
pm.send('lateralPlan', plan_send) |
5,926 | response generator | import argparse
from io import BytesIO
import zlib
from flask import Flask, Response
import numpy as np
from PIL import Image
from drake import lcmt_image, lcmt_image_array
from pydrake.lcm import DrakeLcm
from pydrake.systems.sensors import ImageDepth32F, ImageLabel16I, ImageRgba8U
from pydrake.visualization import ColorizeDepthImage, ColorizeLabelImage
class _ImageServer(Flask):
"""Streams images via the HTTP protocol given an image source. The image
source, i.e., `image_generator`, should be a generator function that yields
a (mime_type, image_data) pair. The `mime-type` is a str, and the
`image_data` is bytes representing the image.
"""
def __init__(self, *, image_generator):
super().__init__("meldis_lcm_image_viewer")
self.add_url_rule("/", view_func=self._serve_image)
self._image_generator = image_generator
def _serve_image(self):
return Response(
self.METHOD_NAME(),
mimetype="multipart/x-mixed-replace; boundary=frame",
)
def METHOD_NAME(self):
for mime_type, image_data in self._image_generator():
yield (
b"--frame\r\nContent-Type: "
+ mime_type.encode("utf-8")
+ b"\r\n\r\n"
+ image_data
+ b"\r\n"
)
class LcmImageArrayViewer:
"""Displays LCM images to an URL. The program waits for `lcmt_image_array`
messages from a particular channel and processes them to image files. It
contains a flask server, _ImageServer, that grabs images whenever available
and broadcasts them to an URL for visualization.
"""
_IMAGE_DATA_TYPE = {
lcmt_image.CHANNEL_TYPE_UINT8: np.uint8,
lcmt_image.CHANNEL_TYPE_INT16: np.int16,
lcmt_image.CHANNEL_TYPE_FLOAT32: np.float32,
}
"""The mapping from `lcmt_image` channel_type enum to numpy data type."""
_IMAGE_CHANNEL_NUM = {
lcmt_image.PIXEL_FORMAT_RGBA: 4,
lcmt_image.PIXEL_FORMAT_DEPTH: 1,
lcmt_image.PIXEL_FORMAT_LABEL: 1,
}
"""The mapping from `lcmt_image` pixel_format enum to the number of
channels.
"""
def __init__(self, *, host, port, channel, unit_test=False):
# Only the latest message from LCM is kept.
self._latest_message = None
# Subscribe to the channel.
self._lcm = DrakeLcm()
self._lcm.Subscribe(channel=channel, handler=self._update_message)
# Helpers to convert images to aid visualization.
self._colorized_label = ColorizeLabelImage()
self._colorized_depth = ColorizeDepthImage()
# Instantiate an `_ImageServer` and run it. If `unit_test` is True, the
# server will not be launched.
if not unit_test:
self._image_server = _ImageServer(
image_generator=self.image_generator
)
self._image_server.run(
host=host, port=port, debug=False, threaded=False
)
def image_generator(self):
mime_type = "image/png"
while True:
self._lcm.HandleSubscriptions(timeout_millis=1000)
if self._latest_message is not None:
new_image = self._process_message()
self._latest_message = None
yield (mime_type, new_image)
def _update_message(self, message):
self._latest_message = message
def _process_message(self):
"""Processes the latest lcmt_image_array message into a single PNG
image. Depth and label images will be colorized to color images for
visualization. If the LCM message contains multiple images, they will
be concatenated together horizontally.
"""
image_array = lcmt_image_array.decode(self._latest_message)
assert len(image_array.images) > 0
rgba_images = []
for image in image_array.images:
w = image.width
h = image.height
data_type = self._IMAGE_DATA_TYPE[image.channel_type]
num_channels = self._IMAGE_CHANNEL_NUM[image.pixel_format]
bytes_per_pixel = np.dtype(data_type).itemsize * num_channels
assert image.row_stride == w * bytes_per_pixel, image.row_stride
if (
image.compression_method
== lcmt_image.COMPRESSION_METHOD_NOT_COMPRESSED
):
data_bytes = image.data
elif (
image.compression_method == lcmt_image.COMPRESSION_METHOD_ZLIB
):
# TODO(eric): Consider using `data`s buffer, if possible.
# Can decompress() somehow use an existing buffer in Python?
data_bytes = zlib.decompress(image.data)
else:
raise RuntimeError(
f"Unsupported compression type:{image.compression_method}"
)
np_image_data = np.frombuffer(data_bytes, dtype=data_type)
rgba = ImageRgba8U(w, h)
if image.pixel_format == lcmt_image.PIXEL_FORMAT_RGBA:
rgba.mutable_data[:] = np_image_data.reshape(h, w, 4)
elif image.pixel_format == lcmt_image.PIXEL_FORMAT_LABEL:
label = ImageLabel16I(w, h)
label.mutable_data[:] = np_image_data.reshape(h, w, 1)
self._colorized_label._colorize_label_image(label, rgba)
elif image.pixel_format == lcmt_image.PIXEL_FORMAT_DEPTH:
depth = ImageDepth32F(w, h)
depth.mutable_data[:] = np_image_data.reshape(h, w, 1)
self._colorized_depth._colorize_depth_image(depth, rgba)
rgba_images.append(rgba)
# Stack the images horizontally.
np_concatenated_image = self._concatenate_images(
rgba_images, rows=1, cols=len(rgba_images)
)
# Save the image in-memory.
pil_image = Image.fromarray(np_concatenated_image)
buffer = BytesIO()
pil_image.save(buffer, format="png", compress_level=0)
return buffer.getbuffer()
@staticmethod
def _concatenate_images(images, rows, cols):
"""Helper function to concatenate multiple images. It is assumed that
`images` to be a list of systems::sensors::Image with the same size.
"""
assert len(images) == rows * cols
col_images = []
for r in range(rows):
row_images = []
for c in range(cols):
image = images[r * cols + c]
row_images.append(image.data)
row_image = np.hstack(row_images)
col_images.append(row_image)
return np.vstack(col_images)
def main():
parser = argparse.ArgumentParser(
description=__doc__,
)
parser.add_argument(
"--host",
type=str,
required=False,
default="127.0.0.1",
help="URL to host on, default: 127.0.0.1.",
)
parser.add_argument(
"--port",
type=int,
required=False,
default=8000,
help="Port to host on, default: 8000.",
)
parser.add_argument(
"--channel",
type=str,
required=True,
help="The LCM channel to subscribe to.",
)
args = parser.parse_args()
image_array_viewer = LcmImageArrayViewer(
host=args.host, port=args.port, channel=args.channel
)
if __name__ == "__main__":
main() |
5,927 | test propagation credentials endpoint put not found | import json
from http import HTTPStatus
from typing import Sequence
from urllib.parse import urljoin
import pytest
from tests.common import StubDIContainer
from tests.data_for_tests.propagation_credentials import LM_HASH, NT_HASH, PASSWORD_1, PASSWORD_2
from tests.monkey_island import InMemoryCredentialsRepository
from common.credentials import Credentials, LMHash, NTHash, Password
from monkey_island.cc.repositories import ICredentialsRepository
from monkey_island.cc.resources import PropagationCredentials
from monkey_island.cc.resources.propagation_credentials import (
_configured_collection,
_stolen_collection,
)
ALL_CREDENTIALS_URL = PropagationCredentials.urls[0]
CONFIGURED_CREDENTIALS_URL = urljoin(ALL_CREDENTIALS_URL + "/", _configured_collection)
STOLEN_CREDENTIALS_URL = urljoin(ALL_CREDENTIALS_URL + "/", _stolen_collection)
CREDENTIALS_1 = Credentials(identity=None, secret=Password(password=PASSWORD_1))
CREDENTIALS_2 = Credentials(identity=None, secret=LMHash(lm_hash=LM_HASH))
CREDENTIALS_3 = Credentials(identity=None, secret=NTHash(nt_hash=NT_HASH))
CREDENTIALS_4 = Credentials(identity=None, secret=Password(password=PASSWORD_2))
@pytest.fixture
def credentials_repository():
return InMemoryCredentialsRepository()
@pytest.fixture
def flask_client(build_flask_client, credentials_repository):
container = StubDIContainer()
container.register_instance(ICredentialsRepository, credentials_repository)
with build_flask_client(container) as flask_client:
yield flask_client
def test_propagation_credentials_endpoint_get(flask_client, credentials_repository):
credentials_repository.save_configured_credentials([CREDENTIALS_1, CREDENTIALS_2])
credentials_repository.save_stolen_credentials([CREDENTIALS_3, CREDENTIALS_4])
resp = flask_client.get(ALL_CREDENTIALS_URL)
actual_propagation_credentials = [Credentials(**creds) for creds in resp.json]
assert resp.status_code == HTTPStatus.OK
assert len(actual_propagation_credentials) == 4
assert CREDENTIALS_1 in actual_propagation_credentials
assert CREDENTIALS_2 in actual_propagation_credentials
assert CREDENTIALS_3 in actual_propagation_credentials
assert CREDENTIALS_4 in actual_propagation_credentials
def pre_populate_repository(
url: str, credentials_repository: ICredentialsRepository, credentials: Sequence[Credentials]
):
if "configured" in url:
credentials_repository.save_configured_credentials(credentials)
else:
credentials_repository.save_stolen_credentials(credentials)
@pytest.mark.parametrize("url", [CONFIGURED_CREDENTIALS_URL, STOLEN_CREDENTIALS_URL])
def test_propagation_credentials_endpoint__get_stolen(flask_client, credentials_repository, url):
pre_populate_repository(url, credentials_repository, [CREDENTIALS_1, CREDENTIALS_2])
resp = flask_client.get(url)
actual_propagation_credentials = [Credentials(**creds) for creds in resp.json]
assert resp.status_code == HTTPStatus.OK
assert len(actual_propagation_credentials) == 2
assert actual_propagation_credentials[0].secret.password == PASSWORD_1
assert actual_propagation_credentials[1].secret.lm_hash == LM_HASH
def test_configured_propagation_credentials_endpoint_put(flask_client, credentials_repository):
pre_populate_repository(
CONFIGURED_CREDENTIALS_URL,
credentials_repository,
[CREDENTIALS_1, CREDENTIALS_2],
)
resp = flask_client.put(CONFIGURED_CREDENTIALS_URL, json=[])
assert resp.status_code == HTTPStatus.NO_CONTENT
resp = flask_client.get(CONFIGURED_CREDENTIALS_URL)
assert len(json.loads(resp.text)) == 0
def test_stolen_propagation_credentials_endpoint__put_not_allowed(flask_client):
resp = flask_client.put(STOLEN_CREDENTIALS_URL, json=[])
assert resp.status_code == HTTPStatus.METHOD_NOT_ALLOWED
def test_all_propagation_credentials_endpoint__put_not_allowed(flask_client):
resp = flask_client.put(ALL_CREDENTIALS_URL, json=[])
assert resp.status_code == HTTPStatus.METHOD_NOT_ALLOWED
NON_EXISTENT_COLLECTION_URL = urljoin(ALL_CREDENTIALS_URL + "/", "bogus-credentials")
def test_propagation_credentials_endpoint__get_not_found(flask_client):
resp = flask_client.get(NON_EXISTENT_COLLECTION_URL)
assert resp.status_code == HTTPStatus.NOT_FOUND
def METHOD_NAME(flask_client):
resp = flask_client.put(NON_EXISTENT_COLLECTION_URL, json=[])
assert resp.status_code == HTTPStatus.NOT_FOUND |
5,928 | test encode | # SPDX-FileCopyrightText: 2022 James R. Barlow
# SPDX-License-Identifier: CC0-1.0
from __future__ import annotations
from io import BytesIO
from pathlib import Path
import pytest
from hypothesis import example, given, settings
from hypothesis.strategies import binary, characters, text
import pikepdf.codec
def test_encodable_table():
for ordnum in pikepdf.codec.PDFDOC_ENCODABLE:
char = chr(ordnum)
pdfdoc_encoded = char.encode('pdfdoc')
involuted = pdfdoc_encoded.decode('pdfdoc')
assert char == involuted
def METHOD_NAME():
assert 'abc'.encode('pdfdoc') == b'abc'
with pytest.raises(UnicodeEncodeError):
'你好'.encode('pdfdoc')
assert '你好 world'.encode('pdfdoc', 'replace') == b'?? world'
assert '你好 world'.encode('pdfdoc', 'ignore') == b' world'
def test_decode():
assert b'A'.decode('pdfdoc') == 'A'
assert b'\xa0'.decode('pdfdoc') == '€'
def test_unicode_surrogate():
with pytest.raises(UnicodeEncodeError, match=r'surrogate'):
'\ud800'.encode('pdfdoc')
@given(binary())
@example(b'\x9f')
@example(b'\xfe\xff')
@example(b'\xff\xfe')
def test_codec_involution(b):
# For most binary strings, there is a pdfdoc decoding and the encoding of that
# decoding recovers the initial string.
try:
assert b.decode('pdfdoc').encode('pdfdoc') == b
except UnicodeDecodeError as e:
# 0x7f, 0x9f, and 0xad have no defined mapping to Unicode, so we expect
# strings contain them to raise a decoding exception
assert set(e.object[e.start : e.end]) & set(b'\x7f\x9f\xad')
except UnicodeEncodeError as e:
assert "'pdfdoc' codec can't encode characters in position 0-1" in str(e)
assert b.startswith(b'\xfe\xff') or b.startswith(b'\xff\xfe')
@given(text())
@example('\xfe\xff')
def test_break_encode(s):
try:
encoded_bytes = s.encode('pdfdoc')
except ValueError as e:
allowed_errors = [
"'pdfdoc' codec can't encode character",
"'pdfdoc' codec can't process Unicode surrogates",
"'pdfdoc' codec can't encode some characters",
]
if any((allowed in str(e)) for allowed in allowed_errors):
return
raise
else:
try:
assert encoded_bytes.decode('pdfdoc') == s, "encode -> decode failed"
except UnicodeDecodeError as e:
if "can't decode byte 0x9f" in str(e):
return
raise
# whitelist_categories ensures that the listed Unicode categories will be produced
# whitelist_characters adds further characters (everything that is pdfdoc encodable)
# We specifically add Cs, surrogates, which pybind11 needs extra help with.
pdfdoc_text = text(
alphabet=characters(
whitelist_categories=('N', 'L', 'M', 'P', 'Cs'),
whitelist_characters=[chr(c) for c in pikepdf.codec.PDFDOC_ENCODABLE],
),
max_size=1000,
)
@given(pdfdoc_text)
@example('\r\n')
@example('\r')
@example('\n')
@settings(deadline=4000) # CI workers can be flakey
def test_open_encoding_pdfdoc_write(tmp_path_factory, s):
folder = tmp_path_factory.mktemp('pdfdoc')
txt = folder / 'pdfdoc.txt'
with open(txt, 'w', encoding='pdfdoc', newline='') as f:
try:
f.write(s)
except UnicodeEncodeError:
return
assert txt.read_bytes() == s.encode('pdfdoc')
@given(pdfdoc_text)
@settings(deadline=4000) # CI workers can be flakey
@example('\r\n')
@example('\r')
@example('\n')
def test_open_encoding_pdfdoc_read(tmp_path_factory, s: str):
folder = tmp_path_factory.mktemp('pdfdoc')
txt: Path = folder / 'pdfdoc.txt'
with open(txt, 'w', encoding='pdfdoc', newline='') as f:
try:
f.write(s)
except UnicodeEncodeError:
return
with open(txt, encoding='pdfdoc', newline='') as f:
result: str = f.read()
assert result == s
@given(pdfdoc_text)
def test_stream_writer(s):
bio = BytesIO()
sw = pikepdf.codec.PdfDocStreamWriter(bio)
try:
sw.write(s)
except UnicodeEncodeError:
return
bio.seek(0)
data = bio.read()
assert data == s.encode('pdfdoc')
@given(pdfdoc_text)
def test_stream_reader(s):
try:
bio = BytesIO(s.encode('pdfdoc_pikepdf'))
except UnicodeEncodeError:
return
sr = pikepdf.codec.PdfDocStreamReader(bio)
result = sr.read()
assert result == s |
5,929 | test get none | from __future__ import annotations
import unittest
from functools import partial
from typing import Any, MutableMapping
from unittest.mock import Mock, patch
import pytest
from sentry.testutils.cases import TestCase
from sentry.utils.canonical import CanonicalKeyDict
from sentry.utils.safe import (
get_path,
safe_execute,
safe_urlencode,
set_path,
setdefault_path,
trim,
)
a_very_long_string = "a" * 1024
class TrimTest(unittest.TestCase):
def test_simple_string(self):
assert trim(a_very_long_string) == a_very_long_string[:509] + "..."
def test_list_of_strings(self):
assert trim([a_very_long_string, a_very_long_string]) == [a_very_long_string[:507] + "..."]
def test_nonascii(self):
assert trim({"x": "\xc3\xbc"}) == {"x": "\xc3\xbc"}
assert trim(["x", "\xc3\xbc"]) == ["x", "\xc3\xbc"]
def test_idempotent(self):
trm = partial(trim, max_depth=2)
a = {"a": {"b": {"c": {"d": 1}}}}
assert trm(a) == {"a": {"b": {"c": '{"d":1}'}}}
assert trm(trm(trm(trm(a)))) == trm(a)
def test_sorted_trim(self):
# Trim should always trim the keys in alpha order
# regardless of the original order.
alpha = {"a": "12345", "z": "12345"}
reverse = {"z": "12345", "a": "12345"}
trm = partial(trim, max_size=12)
expected = {"a": "12345", "z": "1..."}
assert trm(alpha) == expected
assert trm(reverse) == expected
def test_max_depth(self):
trm = partial(trim, max_depth=2)
a: dict[str, Any] = {"a": {"b": {"c": "d"}}}
assert trm(a) == a
a = {"a": {"b": {"c": "d"}}}
assert trm(a) == {"a": {"b": {"c": "d"}}}
a = {"a": {"b": {"c": {"d": "e"}}}}
assert trm(a) == {"a": {"b": {"c": '{"d":"e"}'}}}
a = {"a": {"b": {"c": []}}}
assert trm(a) == {"a": {"b": {"c": "[]"}}}
class SafeExecuteTest(TestCase):
def test_with_nameless_function(self):
assert safe_execute(lambda a: a, 1) == 1
assert safe_execute(lambda: eval("a")) is None
def test_with_simple_function(self):
def simple(a):
return a
assert safe_execute(simple, 1) == 1
def test_with_simple_function_raising_exception(self):
def simple(a):
raise Exception()
assert safe_execute(simple, 1) is None
def test_with_instance_method(self):
class Foo:
def simple(self, a):
return a
assert safe_execute(Foo().simple, 1) == 1
def test_with_instance_method_raising_exception(self):
class Foo:
def simple(self, a):
raise Exception()
assert safe_execute(Foo().simple, 1) is None
@patch("sentry.utils.safe.logging.getLogger")
def test_with_expected_errors(self, mock_get_logger):
mock_log = Mock()
mock_get_logger.return_value = mock_log
def simple(a):
raise ValueError()
assert safe_execute(simple, 1, expected_errors=(ValueError,)) is None
assert mock_log.info.called
assert mock_log.error.called is False
class GetPathTest(unittest.TestCase):
def METHOD_NAME(self):
assert get_path(None, "foo") is None
assert get_path("foo", "foo") is None
assert get_path(42, "foo") is None # type: ignore[arg-type]
assert get_path(ValueError(), "foo") is None # type: ignore[arg-type]
assert get_path(True, "foo") is None # type: ignore[arg-type]
def test_get_path_dict(self):
assert get_path({}, "a") is None
assert get_path({"a": 2}, "a") == 2
assert get_path({"a": 2}, "b") is None
assert get_path({"a": {"b": []}}, "a", "b") == []
assert get_path({"a": []}, "a", "b") is None
assert get_path(CanonicalKeyDict({"a": 2}), "a") == 2
def test_get_default(self):
assert get_path({"a": 2}, "b", default=1) == 1
assert get_path({"a": 2}, "a", default=1) == 2
assert get_path({"a": None}, "a", default=1) == 1
def test_get_path_list(self):
arr = [1, 2]
assert get_path(arr, 1) == 2
assert get_path(arr, -1) == 2
assert get_path(arr, 2) is None
assert get_path(arr, "1") is None
assert get_path([], 1) is None
assert get_path({"items": [2]}, "items", 0) == 2
def test_filter_list(self):
data = {"a": [False, 1, None]}
assert get_path(data, "a", filter=True) == [False, 1]
assert get_path(data, "a", filter=lambda x: x) == [1]
def test_filter_tuple(self):
data = {"a": (False, 1, None)}
assert get_path(data, "a", filter=True) == [False, 1]
assert get_path(data, "a", filter=lambda x: x) == [1]
def test_filter_other(self):
assert get_path({"a": 42}, "a", filter=True) == 42
assert get_path({"a": True}, "a", filter=True) is True
assert get_path({"a": {"b": 42}}, "a", filter=True) == {"b": 42}
assert get_path({"a": 42}, "b", filter=True) is None
# We use get_path to process Event's Http's query_strings to remove Nones
# (which can occur as a result of normalization and datascrubbing).
assert get_path([["foo", "bar"], None], filter=True) == [["foo", "bar"]]
def test_kwargs(self):
with pytest.raises(TypeError):
get_path({}, "foo", unknown=True)
class SetPathTest(unittest.TestCase):
def test_set_none(self):
assert not set_path(None, "foo", value=42)
assert not set_path("foo", "foo", value=42)
assert not set_path(42, "foo", value=42)
assert not set_path(ValueError(), "foo", value=42)
assert not set_path(True, "foo", value=42)
def test_set_dict(self):
data: MutableMapping[str, Any] = {}
assert set_path(data, "a", value=42)
assert data == {"a": 42}
data = {"a": 2}
assert set_path(data, "a", value=42)
assert data == {"a": 42}
data = {}
assert set_path(data, "a", "b", value=42)
assert data == {"a": {"b": 42}}
data = CanonicalKeyDict({})
assert set_path(data, "a", value=42)
assert data == {"a": 42}
def test_set_default(self):
data = {"a": {"b": 2}}
assert not setdefault_path(data, "a", "b", value=42)
assert data == {"a": {"b": 2}}
data = {}
assert setdefault_path(data, "a", "b", value=42)
assert data == {"a": {"b": 42}}
def test_kwargs(self):
with pytest.raises(TypeError):
set_path({}, "foo")
with pytest.raises(TypeError):
set_path({}, "foo", value=1, unknown=True)
class SafeUrlencodeTest(unittest.TestCase):
def test_dict(self):
d = {"1": None, "3": "4"}
assert safe_urlencode(d) == "1=&3=4"
assert d == {"1": None, "3": "4"}
d = {"1": "2", "3": "4"}
assert safe_urlencode(d) == "1=2&3=4"
def test_pair_sequence(self):
d = [["1", None], ["3", "4"]]
assert safe_urlencode(d) == "1=&3=4"
assert d == [["1", None], ["3", "4"]]
d = [["1", "2"], ["3", "4"]]
assert safe_urlencode(d) == "1=2&3=4" |
5,930 | blocked path | import json
import os
import secrets
from django.conf import settings
from django.utils.functional import cached_property
from filtercascade import FilterCascade
from filtercascade.fileformats import HashAlgorithm
import olympia.core.logger
from olympia.amo.utils import SafeStorage
from olympia.constants.blocklist import BASE_REPLACE_THRESHOLD
log = olympia.core.logger.getLogger('z.amo.blocklist')
def generate_mlbf(stats, blocked, not_blocked):
log.info('Starting to generating bloomfilter')
cascade = FilterCascade(
defaultHashAlg=HashAlgorithm.SHA256,
salt=secrets.token_bytes(16),
)
error_rates = sorted((len(blocked), len(not_blocked)))
cascade.set_crlite_error_rates(
include_len=error_rates[0], exclude_len=error_rates[1]
)
stats['mlbf_blocked_count'] = len(blocked)
stats['mlbf_notblocked_count'] = len(not_blocked)
cascade.initialize(include=blocked, exclude=not_blocked)
stats['mlbf_version'] = cascade.version
stats['mlbf_layers'] = cascade.layerCount()
stats['mlbf_bits'] = cascade.bitCount()
log.info(
f'Filter cascade layers: {cascade.layerCount()}, ' f'bit: {cascade.bitCount()}'
)
cascade.verify(include=blocked, exclude=not_blocked)
return cascade
def fetch_blocked_from_db():
from olympia.blocklist.models import BlockVersion
qs = BlockVersion.objects.filter(version__file__is_signed=True).values_list(
'block__guid', 'version__version', 'version_id', named=True
)
all_versions = {
block_version.version_id: (
block_version.block__guid,
block_version.version__version,
)
for block_version in qs
}
return all_versions
def fetch_all_versions_from_db(excluding_version_ids=None):
from olympia.versions.models import Version
qs = Version.unfiltered.exclude(id__in=excluding_version_ids or ()).values_list(
'addon__addonguid__guid', 'version'
)
return list(qs)
class MLBF:
KEY_FORMAT = '{guid}:{version}'
def __init__(self, id_):
# simplify later code by assuming always a string
self.id = str(id_)
self.storage = SafeStorage(root_setting='MLBF_STORAGE_PATH')
@classmethod
def hash_filter_inputs(cls, input_list):
"""Returns a set"""
return {
cls.KEY_FORMAT.format(guid=guid, version=version)
for (guid, version) in input_list
}
@property
def METHOD_NAME(self):
return os.path.join(settings.MLBF_STORAGE_PATH, self.id, 'blocked.json')
@cached_property
def blocked_items(self):
raise NotImplementedError
def write_blocked_items(self):
blocked_path = self.METHOD_NAME
with self.storage.open(blocked_path, 'w') as json_file:
log.info(f'Writing to file {blocked_path}')
json.dump(self.blocked_items, json_file)
@property
def _not_blocked_path(self):
return os.path.join(settings.MLBF_STORAGE_PATH, self.id, 'notblocked.json')
@cached_property
def not_blocked_items(self):
raise NotImplementedError
def write_not_blocked_items(self):
not_blocked_path = self._not_blocked_path
with self.storage.open(not_blocked_path, 'w') as json_file:
log.info(f'Writing to file {not_blocked_path}')
json.dump(self.not_blocked_items, json_file)
@property
def filter_path(self):
return os.path.join(settings.MLBF_STORAGE_PATH, self.id, 'filter')
@property
def _stash_path(self):
return os.path.join(settings.MLBF_STORAGE_PATH, self.id, 'stash.json')
@cached_property
def stash_json(self):
with self.storage.open(self._stash_path, 'r') as json_file:
return json.load(json_file)
def generate_and_write_filter(self):
stats = {}
self.write_blocked_items()
self.write_not_blocked_items()
bloomfilter = generate_mlbf(
stats=stats, blocked=self.blocked_items, not_blocked=self.not_blocked_items
)
# write bloomfilter
mlbf_path = self.filter_path
with self.storage.open(mlbf_path, 'wb') as filter_file:
log.info(f'Writing to file {mlbf_path}')
bloomfilter.tofile(filter_file)
stats['mlbf_filesize'] = os.stat(mlbf_path).st_size
log.info(json.dumps(stats))
@classmethod
def generate_diffs(cls, previous, current):
previous = set(previous)
current = set(current)
extras = current - previous
deletes = previous - current
return extras, deletes
def generate_and_write_stash(self, previous_mlbf):
self.write_blocked_items()
self.write_not_blocked_items()
# compare previous with current blocks
extras, deletes = self.generate_diffs(
previous_mlbf.blocked_items, self.blocked_items
)
self.stash_json = {
'blocked': list(extras),
'unblocked': list(deletes),
}
# write stash
stash_path = self._stash_path
with self.storage.open(stash_path, 'w') as json_file:
log.info(f'Writing to file {stash_path}')
json.dump(self.stash_json, json_file)
def should_reset_base_filter(self, previous_bloom_filter):
try:
# compare base with current blocks
extras, deletes = self.generate_diffs(
previous_bloom_filter.blocked_items, self.blocked_items
)
return (len(extras) + len(deletes)) > BASE_REPLACE_THRESHOLD
except FileNotFoundError:
# when previous_base_mlfb._blocked_path doesn't exist
return True
def blocks_changed_since_previous(self, previous_bloom_filter):
try:
# compare base with current blocks
extras, deletes = self.generate_diffs(
previous_bloom_filter.blocked_items, self.blocked_items
)
return len(extras) + len(deletes)
except FileNotFoundError:
# when previous_bloom_filter._blocked_path doesn't exist
return len(self.blocked_items)
@classmethod
def load_from_storage(cls, *args, **kwargs):
return StoredMLBF(*args, **kwargs)
@classmethod
def generate_from_db(cls, *args, **kwargs):
return DatabaseMLBF(*args, **kwargs)
class StoredMLBF(MLBF):
@cached_property
def blocked_items(self):
with self.storage.open(self.METHOD_NAME, 'r') as json_file:
return json.load(json_file)
@cached_property
def not_blocked_items(self):
with self.storage.open(self._not_blocked_path, 'r') as json_file:
return json.load(json_file)
class DatabaseMLBF(MLBF):
@cached_property
def blocked_items(self):
blocked_ids_to_versions = fetch_blocked_from_db()
blocked = blocked_ids_to_versions.values()
# cache version ids so query in not_blocked_items is efficient
self._version_excludes = blocked_ids_to_versions.keys()
return list(self.hash_filter_inputs(blocked))
@cached_property
def not_blocked_items(self):
# see blocked_items - we need self._version_excludes populated
blocked_items = self.blocked_items
# even though we exclude all the version ids in the query there's an
# edge case where the version string occurs twice for an addon so we
# ensure not_blocked_items doesn't contain any blocked_items.
return list(
self.hash_filter_inputs(fetch_all_versions_from_db(self._version_excludes))
- set(blocked_items)
) |
5,931 | get build info | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import re
import subprocess
import sys
from typing import Dict, List, Tuple, Union
import ansible_runner
import attr
import config
from TS.aws_lib import AWSbase
ANSIBLE_LOC = os.path.join(
os.path.dirname(os.path.abspath(sys.path[0])), "Magma_Ansible",
)
LOGS_LOC = os.path.join(os.path.abspath(sys.path[0]), "logs")
@attr.s
class SUT_AGW(object):
sut = attr.ib()
def __attrs_post_init__(self):
agw_pass = config.MAGMA.get("password")
self.e_vars = {
"ansible_ssh_pass": agw_pass,
"ansible_sudo_pass": agw_pass,
"ansible_user": config.MAGMA.get("username"),
"ansible_become": "yes",
"ANSIBLE_HOST_KEY_CHECKING": False,
}
def upgrade(
self, book: str = None, magma_rel: str = "ci", magma_build: str = "latest",
) -> bool:
"""this is dummy mathod, EPC specific class mathod should be used"""
logging.info(f"Please use EPC required mathod to upgrade")
return True
def _run_ansible_role(self, **kwargs: Union[str, int, float]) -> Union[bool, any]:
"""Worker func to run role"""
try:
r = ansible_runner.run(
private_data_dir=ANSIBLE_LOC,
limit=self.sut,
role=kwargs["role"],
rotate_artifacts=1,
directory_isolation_base_path="/tmp/runner",
extravars=kwargs["extra_vars"],
cmdline=kwargs.get("cmdline", "--tags all"),
)
subprocess.call(["rm", "-f", ANSIBLE_LOC + "/project/main.json"])
subprocess.call(["rm", "-f", ANSIBLE_LOC + "/env/extravars"])
except Exception as e:
logging.error(f"Ansible role run got error - {e}")
# clean up
subprocess.call(["rm", "-f", ANSIBLE_LOC + "/project/main.json"])
subprocess.call(["rm", "-f", ANSIBLE_LOC + "/env/extravars"])
return False
if r.status == "successful" and r.rc == 0:
return r
else:
return False
def pcap_collection(self, **kwargs: Union[str, int, float]) -> Union[bool, str]:
"""This method will start the pcap collection on the SUT"""
kwargs["cap_vars"].update(self.e_vars)
res = self._run_ansible_role(
role=kwargs["role"],
extra_vars=kwargs["cap_vars"],
cmdline=kwargs["cmdline"],
)
if res:
logging.info(
f"Successfully executed {kwargs['cmdline']} in role {kwargs['role']}",
)
else:
logging.info(
f"Execution failed while executing {kwargs['cmdline']} in role {kwargs['role']}",
)
return res
def METHOD_NAME(self, role: str = None) -> Union[bool, str]:
"""Use EPC specific class mathod"""
logging.warn(f"Please use EPC specific mathod to retrive SW version")
return True
def sut_magma_state_check(self, role: str = None) -> Dict[str, int]:
"""
Use EPC specific health check
"""
logging.info(f"EPC specific health check needs to be used")
return config.MAGMA_AGW.get("UE_STATE", {})
def sut_check(self, sut_res: any = None, key: str = None) -> Dict[str, any]:
"""EPC specific checks mathod should be used to retrive memory and pid info"""
mem_dict = {"epc": 1}
pid_dict = {"epc": 1}
return mem_dict, pid_dict
def sut_file_cleanup(self, file_path: str) -> None:
"""EPC specific cleanup mathod should be used"""
pass
# self._run_ansible_role(role="test-cleanup", extra_vars=self.e_vars)
def sut_magma_checks(self, role: str = None) -> Union[bool, Dict[str, any]]:
"""EPC specific health check roles/mathods should be used"""
pass
# res = self._run_ansible_role(role=role, extra_vars=self.e_vars)
mem_dict, pid_dict = self.sut_check()
return mem_dict, pid_dict if bool(mem_dict and pid_dict) else False
def detect_failed_procedure(
self,
initial_process_dict: Dict[str, any],
post_test_dict: Dict[str, any],
sw_ver: str,
initial_magma_memory: Dict[str, any],
post_test_magma_memory: Dict[str, any],
memory_delta_failed_pct: float,
) -> List[str]:
"""
This is pure magma specific, EPC AGW specific mathod should be used for non-magma
"""
pass
return []
def upload_file_s3(self, file_path: str, aws_obj: any):
"""upload file to s3 with aws object"""
aws_obj.upload_file(file_path, ts="cores", c_type="application/octet-stream")
def reboot(self, role: str = None) -> bool:
"""This method would reboot the SUT"""
res = self._run_ansible_role(role=role, extra_vars=self.e_vars)
if res:
logging.info(f"SUT restarted Successfully")
return True
else:
return False
def get_sut_file(self, file_path: str, dest_location: str, dest_name: str) -> bool:
"""helper function to retrieve file from SUT"""
copy_flag = True
extra_vars = {
"dest_location": dest_location + "/" + dest_name,
"file_path": file_path,
}
extra_vars.update(self.e_vars)
res = self._run_ansible_role(role="get-file", extra_vars=extra_vars)
if res:
logging.info(f" SUT {file_path} retrieved Successfully")
else:
logging.info(f" SUT {file_path} retrieval failed")
copy_flag = False
return copy_flag
def get_logs(self, **kwargs: Union[str, int, float]) -> bool:
"""EPC specific Func to get log"""
return True |
5,932 | test thread safety | import os
import sys
import unittest
import random
from string import ascii_lowercase as ascii_lc
from time import sleep
from copy import deepcopy
from abc import ABC
from concurrent.futures import ThreadPoolExecutor, wait
from virttest import _wrappers
def create_module(name, inner_val, path=''):
""" Creates a module with a variable in it named inner_variable whose
value is equal to the one set
:param name: name of the module
:type name: String
:param inner_val: value that will be assigned to inner_variable
:type inner_val: Int
:param path: path to the module
:type path: String
"""
module_code = """# This is a file created during virttest._wrappers tests
# This file should be deleted once the tests are finished
inner_variable = %s
""" % inner_val
if path != '' and not os.path.isdir(path):
os.makedirs(path)
with open(os.path.join(path, f"{name}.py"), 'w') as new_module:
new_module.write(module_code)
def check_imported_module(testcase, name, module, value):
""" Wraps general checks that are repeated across almost all tests.
"""
testcase.assertIsNotNone(module)
testcase.assertEqual(module.inner_variable, value)
testcase.assertTrue(module in sys.modules.values())
testcase.assertTrue(module is sys.modules[name])
class baseImportTests(ABC):
_tmp_in_module_name = "tmp_module_in"
_tmp_sub_module_name = "tmp_module_sub"
_tmp_sub_module_dir = "_wrappers_tests_mods"
_aux_sub_mod_dir = "_wrappers_aux_test_mods"
_subdir_inner_val = 1
_indir_inner_val = 0
@classmethod
def setUpClass(cls):
create_module(cls._tmp_in_module_name, cls._indir_inner_val)
create_module(cls._tmp_sub_module_name, cls._subdir_inner_val,
cls._tmp_sub_module_dir)
os.makedirs(cls._aux_sub_mod_dir)
# Wait a bit so the import mechanism cache can be refreshed
sleep(2)
@classmethod
def tearDownClass(cls):
def rm_subdir(subdir):
# Remove inner __pycache__
sub_pycache_dir = os.path.join(subdir, "__pycache__")
if os.path.exists(sub_pycache_dir):
for pycache_file in os.listdir(sub_pycache_dir):
os.remove(os.path.join(sub_pycache_dir, pycache_file))
os.rmdir(sub_pycache_dir)
# Remove sub-module files
for tmp_sub_mod in os.listdir(subdir):
os.remove(os.path.join(subdir, tmp_sub_mod))
# Finally delete created directory
os.rmdir(subdir)
rm_subdir(cls._tmp_sub_module_dir)
rm_subdir(cls._aux_sub_mod_dir)
# And the file created in the exec dir
os.remove(f"{cls._tmp_in_module_name}.py")
def _compare_mods(self, one, other):
self.assertEqual(one.__name__, other.__name__)
self.assertEqual(one.__spec__.origin, other.__spec__.origin)
def test_import_from_subdir(self):
""" Imports a module that's in another directory """
pre_sys_path = deepcopy(sys.path)
self._check_import(self._tmp_sub_module_name, self._subdir_inner_val,
self._tmp_sub_module_dir)
self.assertEqual(pre_sys_path, sys.path)
def test_import_just_created(self):
""" Creates modules repeatedly and checks it can import them
without waiting any time
"""
n_repeats = 10
for i in range(n_repeats):
mod_name = f"tmp_rep_mod_{i}"
# Create module
create_module(mod_name, i, self._tmp_sub_module_dir)
# Import and check
self._check_import(mod_name, i, self._tmp_sub_module_dir)
def test_import_from_dir(self):
""" Imports a module that's in the same directory """
pre_sys_path = deepcopy(sys.path)
self._check_import(self._tmp_in_module_name, self._indir_inner_val)
self.assertEqual(pre_sys_path, sys.path)
class ImportModuleTest(baseImportTests, unittest.TestCase):
def _check_import(self, name, value, path=''):
""" Wraps the import checking workflow used in some tests """
module = _wrappers.import_module(name, path)
check_imported_module(self, name, module, value)
def test_import_from_pythonpath(self):
""" Imports a module that's in the python path """
# Import os which is also being used in the other tests
module = _wrappers.import_module('os')
self.assertIsNotNone(module)
self._compare_mods(module, os)
def test_import_from_builtins(self):
""" Imports a module that's in the python path """
# Import os which is also being used in the other tests
import pwd
module = _wrappers.import_module('pwd')
self.assertIsNotNone(module)
self._compare_mods(module, pwd)
def METHOD_NAME(self):
""" Create 5 pairs of modules. Each pair consists of two equally named
files with a different inner value, and saved in different
directories.
"""
def check_routine(module_check_data):
module = _wrappers.import_module(module_check_data["name"],
module_check_data["path"])
val = module.inner_variable
return val
def check(module_val, module_data):
self.assertEqual(module_val, module_data["value"])
def get_random_name(length=20):
return "".join([random.choice(ascii_lc) for _ in range(length)])
check_mod_names = [get_random_name() for _ in range(50)]
check_import_data = []
for mod_name in check_mod_names:
value = random.randint(0, 100)
create_module(mod_name, value, self._aux_sub_mod_dir)
in_dir = {"name": mod_name, "value": value,
"path": self._aux_sub_mod_dir}
value = random.randint(0, 100)
create_module(mod_name, value, self._tmp_sub_module_dir)
sub_dir = {"name": mod_name, "value": value,
"path": self._tmp_sub_module_dir}
# We don't want to test if two modules with the same name
# are imported safely in the same execution.
# We want to test that sys.path priorities are not mixed up
# So select only one
check_import_data.append(random.choice([in_dir, sub_dir]))
results = []
with ThreadPoolExecutor(max_workers=len(check_mod_names)) as executor:
for mod_data in check_import_data:
results.append(executor.submit(check_routine, mod_data))
wait(results)
for res, mod_data in zip(results, check_import_data):
check(res.result(), mod_data)
class LoadSourceTest(baseImportTests, unittest.TestCase):
def _check_import(self, name, value, path=''):
path = os.path.join(path, f"{name}.py")
module = _wrappers.load_source(name, path)
check_imported_module(self, name, module, value)
def test_mismatching_names(self):
# test that importing a module mismatching the file name works good
module = _wrappers.load_source("name", f"{self._tmp_in_module_name}.py")
check_imported_module(self, "name", module, self._indir_inner_val)
def test_no_existing_file(self):
# Assert an error is launched if a non existing file is imported
with self.assertRaises(FileNotFoundError):
self._check_import('os', None, 'os.py') |
5,933 | extract data | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-02-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.ContainerService/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2021_02_01.ContainerServiceClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.OperationValue"]:
"""Gets a list of compute operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationValue or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2021_02_01.models.OperationValue]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2021-02-01"))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, METHOD_NAME)
list.metadata = {"url": "/providers/Microsoft.ContainerService/operations"} |
5,934 | callback | # Copyright: Ankitects Pty Ltd and contributors
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
from __future__ import annotations
from anki.collection import OpChanges
from anki.decks import DEFAULT_DECK_ID, DeckId
from aqt import AnkiQt, gui_hooks
from aqt.qt import *
from aqt.utils import HelpPage, shortcut, tr
class DeckChooser(QHBoxLayout):
def __init__(
self,
mw: AnkiQt,
widget: QWidget,
label: bool = True,
starting_deck_id: DeckId | None = None,
on_deck_changed: Callable[[int], None] | None = None,
) -> None:
QHBoxLayout.__init__(self)
self._widget = widget # type: ignore
self.mw = mw
self._setup_ui(show_label=label)
self._selected_deck_id = DeckId(0)
# default to current deck if starting id not provided
if starting_deck_id is None:
starting_deck_id = DeckId(self.mw.col.get_config("curDeck", default=1) or 1)
self.selected_deck_id = starting_deck_id
self.on_deck_changed = on_deck_changed
gui_hooks.operation_did_execute.append(self.on_operation_did_execute)
def _setup_ui(self, show_label: bool) -> None:
self.setContentsMargins(0, 0, 0, 0)
self.setSpacing(8)
# text label before button?
if show_label:
self.deckLabel = QLabel(tr.decks_deck())
self.addWidget(self.deckLabel)
# decks box
self.deck = QPushButton()
qconnect(self.deck.clicked, self.choose_deck)
self.deck.setAutoDefault(False)
self.deck.setToolTip(shortcut(tr.qt_misc_target_deck_ctrlandd()))
qconnect(
QShortcut(QKeySequence("Ctrl+D"), self._widget).activated, self.choose_deck
)
sizePolicy = QSizePolicy(QSizePolicy.Policy(7), QSizePolicy.Policy(0))
self.deck.setSizePolicy(sizePolicy)
self.addWidget(self.deck)
self._widget.setLayout(self)
def selected_deck_name(self) -> str:
return (
self.mw.col.decks.name_if_exists(self.selected_deck_id) or "missing default"
)
@property
def selected_deck_id(self) -> DeckId:
self._ensure_selected_deck_valid()
return self._selected_deck_id
@selected_deck_id.setter
def selected_deck_id(self, id: DeckId) -> None:
if id != self._selected_deck_id:
self._selected_deck_id = id
self._ensure_selected_deck_valid()
self._update_button_label()
def _ensure_selected_deck_valid(self) -> None:
deck = self.mw.col.decks.get(self._selected_deck_id, default=False)
if not deck or deck["dyn"]:
self.selected_deck_id = DEFAULT_DECK_ID
def _update_button_label(self) -> None:
self.deck.setText(self.selected_deck_name().replace("&", "&&"))
def show(self) -> None:
self._widget.show() # type: ignore
def hide(self) -> None:
self._widget.hide() # type: ignore
def choose_deck(self) -> None:
from aqt.studydeck import StudyDeck
current = self.selected_deck_name()
def METHOD_NAME(ret: StudyDeck) -> None:
if not ret.name:
return
new_selected_deck_id = self.mw.col.decks.by_name(ret.name)["id"]
if self.selected_deck_id != new_selected_deck_id:
self.selected_deck_id = new_selected_deck_id
if func := self.on_deck_changed:
func(new_selected_deck_id)
StudyDeck(
self.mw,
current=current,
accept=tr.actions_choose(),
title=tr.qt_misc_choose_deck(),
help=HelpPage.EDITING,
cancel=True,
parent=self._widget,
geomKey="selectDeck",
METHOD_NAME=METHOD_NAME,
)
def on_operation_did_execute(
self, changes: OpChanges, handler: object | None
) -> None:
if changes.deck:
self._update_button_label()
def cleanup(self) -> None:
gui_hooks.operation_did_execute.remove(self.on_operation_did_execute)
# legacy
onDeckChange = choose_deck
deckName = selected_deck_name
def selectedId(self) -> DeckId:
return self.selected_deck_id |
5,935 | create optimizer | """ optim factory """
import os
from typing import Optional
from mindspore import load_checkpoint, load_param_into_net, nn
from .adamw import AdamW
from .adan import Adan
from .lion import Lion
from .nadam import NAdam
__all__ = ["create_optimizer"]
def init_group_params(params, weight_decay):
decay_params = []
no_decay_params = []
for param in params:
if "beta" not in param.name and "gamma" not in param.name and "bias" not in param.name:
decay_params.append(param)
else:
no_decay_params.append(param)
return [
{"params": decay_params, "weight_decay": weight_decay},
{"params": no_decay_params},
{"order_params": params},
]
def METHOD_NAME(
params,
opt: str = "adam",
lr: Optional[float] = 1e-3,
weight_decay: float = 0,
momentum: float = 0.9,
nesterov: bool = False,
filter_bias_and_bn: bool = True,
loss_scale: float = 1.0,
schedule_decay: float = 4e-3,
checkpoint_path: str = "",
eps: float = 1e-10,
**kwargs,
):
r"""Creates optimizer by name.
Args:
params: network parameters. Union[list[Parameter],list[dict]], which must be the list of parameters
or list of dicts. When the list element is a dictionary, the key of the dictionary can be
"params", "lr", "weight_decay","grad_centralization" and "order_params".
opt: wrapped optimizer. You could choose like 'sgd', 'nesterov', 'momentum', 'adam', 'adamw', 'lion',
'rmsprop', 'adagrad', 'lamb'. 'adam' is the default choose for convolution-based networks.
'adamw' is recommended for ViT-based networks. Default: 'adam'.
lr: learning rate: float or lr scheduler. Fixed and dynamic learning rate are supported. Default: 1e-3.
weight_decay: weight decay factor. It should be noted that weight decay can be a constant value or a Cell.
It is a Cell only when dynamic weight decay is applied. Dynamic weight decay is similar to
dynamic learning rate, users need to customize a weight decay schedule only with global step as input,
and during training, the optimizer calls the instance of WeightDecaySchedule to get the weight decay value
of current step. Default: 0.
momentum: momentum if the optimizer supports. Default: 0.9.
nesterov: Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients. Default: False.
filter_bias_and_bn: whether to filter batch norm parameters and bias from weight decay.
If True, weight decay will not apply on BN parameters and bias in Conv or Dense layers. Default: True.
loss_scale: A floating point value for the loss scale, which must be larger than 0.0. Default: 1.0.
Returns:
Optimizer object
"""
opt = opt.lower()
if weight_decay and filter_bias_and_bn:
params = init_group_params(params, weight_decay)
opt_args = dict(**kwargs)
# if lr is not None:
# opt_args.setdefault('lr', lr)
# non-adaptive: SGD, momentum, and nesterov
if opt == "sgd":
# note: nn.Momentum may perform better if momentum > 0.
optimizer = nn.SGD(
params=params,
learning_rate=lr,
momentum=momentum,
weight_decay=weight_decay,
nesterov=nesterov,
loss_scale=loss_scale,
**opt_args,
)
elif opt in ["momentum", "nesterov"]:
optimizer = nn.Momentum(
params=params,
learning_rate=lr,
momentum=momentum,
weight_decay=weight_decay,
use_nesterov=nesterov,
loss_scale=loss_scale,
)
# adaptive
elif opt == "adam":
optimizer = nn.Adam(
params=params,
learning_rate=lr,
weight_decay=weight_decay,
loss_scale=loss_scale,
use_nesterov=nesterov,
**opt_args,
)
elif opt == "adamw":
optimizer = AdamW(
params=params,
learning_rate=lr,
weight_decay=weight_decay,
loss_scale=loss_scale,
**opt_args,
)
elif opt == "lion":
optimizer = Lion(
params=params,
learning_rate=lr,
weight_decay=weight_decay,
loss_scale=loss_scale,
**opt_args,
)
elif opt == "nadam":
optimizer = NAdam(
params=params,
learning_rate=lr,
weight_decay=weight_decay,
loss_scale=loss_scale,
schedule_decay=schedule_decay,
**opt_args,
)
elif opt == "adan":
optimizer = Adan(
params=params,
learning_rate=lr,
weight_decay=weight_decay,
loss_scale=loss_scale,
**opt_args,
)
elif opt == "rmsprop":
optimizer = nn.RMSProp(
params=params,
learning_rate=lr,
momentum=momentum,
weight_decay=weight_decay,
loss_scale=loss_scale,
epsilon=eps,
**opt_args,
)
elif opt == "adagrad":
optimizer = nn.Adagrad(
params=params,
learning_rate=lr,
weight_decay=weight_decay,
loss_scale=loss_scale,
**opt_args,
)
elif opt == "lamb":
assert loss_scale == 1.0, "Loss scaler is not supported by Lamb optimizer"
optimizer = nn.Lamb(
params=params,
learning_rate=lr,
weight_decay=weight_decay,
**opt_args,
)
else:
raise ValueError(f"Invalid optimizer: {opt}")
if os.path.exists(checkpoint_path):
param_dict = load_checkpoint(checkpoint_path)
load_param_into_net(optimizer, param_dict)
return optimizer |
5,936 | resource apply dense | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AdaGraft optimizer https://arxiv.org/abs/2002.11803 ."""
import lingvo.compat as tf
class AdaGraftOptimizer(tf.train.Optimizer):
"""Optimizer which combines per-layer direction and magnitude from two optimizers.
Disentangling Adaptive Gradient Methods from Learning Rates
Naman Agarwal, Rohan Anil, Elad Hazan, Tomer Koren, Cyril Zhang
https://arxiv.org/abs/2002.11803
"""
def __init__(self,
learning_rate,
magnitude_optimizer,
direction_optimizer,
diagnostic=False,
use_global_norm=False,
name="AdaGraft"):
"""Construct a new AdaGraft optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
magnitude_optimizer: Child Optimizer to inherit step sizes.
direction_optimizer: Child Optimizer to inherit step directions.
diagnostic: Whether to record per-tensor step norms.
use_global_norm: Graft global l2 norms rather than per-layer.
name: Optional name prefix for the operations created when applying
gradients.
"""
super().__init__(False, name)
self._learning_rate = learning_rate
self.magnitude_optimizer = magnitude_optimizer
self.direction_optimizer = direction_optimizer
self.diagnostic = diagnostic
self.use_global_norm = use_global_norm
def _create_slots(self, var_list):
self.magnitude_optimizer._create_slots(var_list) # pylint: disable=protected-access
self.direction_optimizer._create_slots(var_list) # pylint: disable=protected-access
for v in var_list:
with tf.ops.colocate_with(v):
self._zeros_slot(v, "scratch_copy", self._name)
if self.diagnostic or self.use_global_norm:
self._get_or_make_slot(v, tf.constant(0.0), "m_step_norm", self._name)
self._get_or_make_slot(v, tf.constant(0.0), "d_step_norm", self._name)
def _prepare(self):
self.magnitude_optimizer._prepare() # pylint: disable=protected-access
self.direction_optimizer._prepare() # pylint: disable=protected-access
learning_rate = self._call_if_callable(self._learning_rate)
self._learning_rate_tensor = tf.convert_to_tensor(
learning_rate, name="learning_rate")
if self.use_global_norm: # create list of all vars for global _finish
self._variables = []
def _apply_dense(self, grad, var): # pylint: disable=g-doc-args
return self._internal_apply_dense(
grad,
var,
self.magnitude_optimizer._apply_dense, # pylint: disable=protected-access
self.direction_optimizer._apply_dense) # pylint: disable=protected-access
def METHOD_NAME(self, grad, var):
return self._internal_apply_dense(
grad,
var,
self.magnitude_optimizer.METHOD_NAME, # pylint: disable=protected-access
self.direction_optimizer.METHOD_NAME) # pylint: disable=protected-access
def _internal_apply_dense(self, grad, var, magnitude_optimizer_apply_fn,
direction_optimizer_apply_fn): # pylint: disable=g-doc-args
"""Main optimization logic of AdaGraft, which calls the child optimizers.
Args:
grad: Tensor containing gradients.
var: Tensor containing parameter values.
magnitude_optimizer_apply_fn: Apply magnitude optimizer.
direction_optimizer_apply_fn: Apply direction optimizer.
Returns:
The final update op, which increments var by the grafted step.
Pseudocode:
- Copy weights into scratch space 'scratch_copy'.
- Run magnitude_optimizer in-place.
- Use scratch copy to figure out how far we moved ('magnitude_step').
- Copy weights back.
- Run direction_optimizer in-place.
- Move weights along the line segment with scratch_copy.
"""
if self.use_global_norm:
self._variables.append(var)
# Slot with current parameter values
scratch_slot = self.get_slot(var, "scratch_copy")
old_var = tf.assign(scratch_slot, var)
with tf.control_dependencies([old_var]):
m_updated_var = magnitude_optimizer_apply_fn(grad, var) # pylint: disable=protected-access
# Run magnitude optimizer and compute the norm of the update.
with tf.control_dependencies([m_updated_var]):
m_step = var - old_var
m_step_norm = tf.norm(m_step)
if self.diagnostic or self.use_global_norm:
m_step_norm = tf.assign(self.get_slot(var, "m_step_norm"), m_step_norm)
# Run direction optimizer and compute its norm, and the direction.
with tf.control_dependencies([m_step_norm]):
flushed_var = tf.assign(var, old_var)
with tf.control_dependencies([flushed_var]):
d_updated_var = direction_optimizer_apply_fn(grad, var) # pylint: disable=protected-access
# Run an update of the direction optimizer with magnitude optimizer norm.
with tf.control_dependencies([d_updated_var]):
d_step = var - old_var
d_step_norm = tf.norm(d_step)
if self.diagnostic or self.use_global_norm:
d_step_norm = tf.assign(self.get_slot(var, "d_step_norm"), d_step_norm)
if self.use_global_norm:
flushed_var = tf.assign(var, old_var)
with tf.control_dependencies([d_step_norm, flushed_var]):
return tf.assign(scratch_slot, d_step)
step = tf.where(
tf.greater(d_step_norm, 0),
(m_step_norm / tf.maximum(d_step_norm, 1e-30)) * d_step,
tf.zeros_like(d_step))
return tf.assign(var, old_var + self._learning_rate_tensor * step)
def _finish(self, update_ops, name_scope):
with tf.control_dependencies(update_ops):
ops1 = self.magnitude_optimizer._finish([], name_scope + "_m") # pylint: disable=protected-access
ops2 = self.direction_optimizer._finish([], name_scope + "_d") # pylint: disable=protected-access
if self.use_global_norm: # apply global grafting
with tf.control_dependencies([ops1, ops2]):
m_global_norm = tf.Variable(0.)
d_global_norm = tf.Variable(0.)
for var in self._variables:
m_step_norm = self.get_slot(var, "m_step_norm")
d_step_norm = self.get_slot(var, "d_step_norm")
tf.assign_add(m_global_norm, m_step_norm**2)
tf.assign_add(d_global_norm, d_step_norm**2)
multiplier = tf.sqrt(m_global_norm / tf.maximum(d_global_norm, 1e-30))
step_ops = []
for var in self._variables:
d_step = self.get_slot(var, "scratch_copy")
step = tf.where(
tf.greater(d_step_norm, 0), multiplier * d_step,
tf.zeros_like(d_step))
step_op = tf.assign_add(var, self._learning_rate_tensor * step)
step_ops.append(step_op)
return tf.group(*step_ops, name=name_scope)
return tf.group(*([ops1, ops2] + update_ops), name=name_scope)
# Sparse gradients are not handled currently and is part of future work.
def _resource_apply_sparse(self, grad_values, var, grad_indices):
return tf.no_op()
def _apply_sparse(self, grad, var):
return tf.no_op() |
5,937 | download | #
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD. See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import json
from fastapi.testclient import TestClient
import httpx
from urllib.parse import urlencode
import time
import dateutil.parser
from nomad import config
from nomad.app.resources.main import app, remove_mongo
from nomad.app.resources.routers.resources import aflow_prototypes_db, springer_materials_db, optimade_providers
def _to_datetime(datetime_str):
return dateutil.parser.isoparse(datetime_str).timestamp()
# return datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M:%S.%f').timestamp()
def _get_resources(api, params, is_retrieving_more, repeat=True):
response = api.get(f'/?{urlencode(params, doseq=True)}')
assert response.status_code == 200
response_json = response.json()
data = response_json['data']
if is_retrieving_more is not None:
assert response_json['is_retrieving_more'] == is_retrieving_more
if not response_json['is_retrieving_more']:
return data
if not repeat:
return data
while response_json['is_retrieving_more']:
time.sleep(1)
response = api.get(f'/?{urlencode(params, doseq=True)}')
assert response.status_code == 200
response_json = response.json()
data = response_json['data']
return data
@pytest.fixture(scope='session')
def api():
return TestClient(app, base_url='http://testserver/')
@pytest.fixture(scope='function')
def patched_download(monkeypatch):
with open('tests/data/api/resources_mocked_responses.json') as f:
responses = json.load(f)
async def METHOD_NAME(session: httpx.AsyncClient, path: str) -> httpx.Response:
async def get(path):
response_dict = responses.get(path)
if path is None:
return httpx.Response(status_code=404)
json_data = response_dict.get('json')
response = httpx.Response(
text=response_dict.get('text'), status_code=response_dict.get('status_code'),
json=json_data if json_data else {})
return response
response = await get(path)
return response
monkeypatch.setattr('nomad.app.resources.routers.resources._download', METHOD_NAME)
@pytest.fixture(scope='function')
def resources(mongo, monkeypatch):
monkeypatch.setattr('nomad.config.resources.enabled', True)
monkeypatch.setattr('nomad.config.resources.db_name', 'test_db_resources')
remove_mongo()
yield
remove_mongo()
def _perform_initial_get_resources(api, params, data_length):
data = _get_resources(api, params, is_retrieving_more=None)
assert len(data) == data_length
return sorted(data, key=lambda x: x['id'])
@pytest.mark.timeout(config.tests.default_timeout)
def test_initial_get_resources(api, resources, patched_download, worker):
params = dict(chemical_formula_reduced='AcAg', wyckoff_letters=['a', 'b'], space_group_number=225, n_sites=2)
data = _perform_initial_get_resources(api, params, data_length=7)
aflow_data = [d for d in data if d['database_name'] == aflow_prototypes_db]
assert len(aflow_data) == 1
assert 'Space group symbol' in aflow_data[0]['available_data']
assert aflow_data[0]['id'] == 'AB_cF8_225_a_b'
springer_data = [d for d in data if d['database_name'] == springer_materials_db]
assert len(springer_data) == 0
optimade_dbs = [provider['name'] for provider in optimade_providers.values()]
optimade_data = [d for d in data if d['database_name'] in optimade_dbs]
assert len(optimade_data) == 6
assert optimade_data[0]['database_version'] == '1.0.0'
assert optimade_data[1]['url'] == 'https://oqmd.org/materials/entry/675180'
assert optimade_data[2]['id'] == '4815213'
@pytest.mark.timeout(config.tests.default_timeout)
def test_cached_get_resources(api, resources, patched_download, worker):
params = dict(chemical_formula_reduced='Mg', wyckoff_letters=['a'], space_group_number=229, n_sites=1)
# do initial request
data_initial = _perform_initial_get_resources(api, params, data_length=88)
# repeat request
data_repeat = _get_resources(api, params, is_retrieving_more=False)
assert len(data_repeat) == 88
# check if download_time is the same
# sort data according to id
data_repeat = sorted(data_repeat, key=lambda x: x['id'])
for i in range(len(data_initial)):
assert data_initial[i]['id'] == data_repeat[i]['id']
# mongodb does not save datetime precisely
# (https://www.mongodb.com/community/forums/t/for-date-field-dont-save-milliseconds-in-mongodb/110557)
assert _to_datetime(data_initial[i]['download_time']) == pytest.approx(_to_datetime(data_repeat[i]['download_time']), 0.001)
@pytest.mark.timeout(config.tests.default_timeout)
def test_cache_invalidation_get_resources(api, resources, patched_download, worker, monkeypatch):
params = dict(chemical_formula_reduced='Mg', wyckoff_letters=['a'], space_group_number=229, n_sites=1)
# do initial request
data_initial = _perform_initial_get_resources(api, params, data_length=88)
# mimic mongo update by setting max_time_in_mongo to 0
monkeypatch.setattr('nomad.config.resources.max_time_in_mongo', 0.)
# repeat request, expect that resources are downloaded again
_get_resources(api, params, is_retrieving_more=True, repeat=False)
monkeypatch.setattr('nomad.config.resources.max_time_in_mongo', 3600)
data_repeat = sorted(_get_resources(api, params, is_retrieving_more=True, repeat=True), key=lambda x: x['id'])
for i in range(len(data_initial)):
assert data_initial[i]['id'] == data_repeat[i]['id']
assert _to_datetime(data_initial[i]['download_time']) < _to_datetime(data_repeat[i]['download_time']) |
5,938 | test emit multiline message | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.python.failure import Failure
from twisted.trial.unittest import TestCase
try:
import syslog as _stdsyslog
except ImportError:
stdsyslog = None
else:
stdsyslog = _stdsyslog
from twisted.python import syslog
class SyslogObserverTests(TestCase):
"""
Tests for L{SyslogObserver} which sends Twisted log events to the syslog.
"""
events = None
if stdsyslog is None:
skip = "syslog is not supported on this platform"
def setUp(self):
self.patch(syslog.SyslogObserver, "openlog", self.openlog)
self.patch(syslog.SyslogObserver, "syslog", self.syslog)
self.observer = syslog.SyslogObserver("SyslogObserverTests")
def openlog(self, prefix, options, facility):
self.logOpened = (prefix, options, facility)
self.events = []
def syslog(self, options, message):
self.events.append((options, message))
def test_emitWithoutMessage(self):
"""
L{SyslogObserver.emit} ignores events with an empty value for the
C{'message'} key.
"""
self.observer.emit({"message": (), "isError": False, "system": "-"})
self.assertEqual(self.events, [])
def test_emitCustomPriority(self):
"""
L{SyslogObserver.emit} uses the value of the C{'syslogPriority'} as the
syslog priority, if that key is present in the event dictionary.
"""
self.observer.emit(
{
"message": ("hello, world",),
"isError": False,
"system": "-",
"syslogPriority": stdsyslog.LOG_DEBUG,
}
)
self.assertEqual(self.events, [(stdsyslog.LOG_DEBUG, "[-] hello, world")])
def test_emitErrorPriority(self):
"""
L{SyslogObserver.emit} uses C{LOG_ALERT} if the event represents an
error.
"""
self.observer.emit(
{
"message": ("hello, world",),
"isError": True,
"system": "-",
"failure": Failure(Exception("foo")),
}
)
self.assertEqual(self.events, [(stdsyslog.LOG_ALERT, "[-] hello, world")])
def test_emitCustomPriorityOverridesError(self):
"""
L{SyslogObserver.emit} uses the value of the C{'syslogPriority'} key if
it is specified even if the event dictionary represents an error.
"""
self.observer.emit(
{
"message": ("hello, world",),
"isError": True,
"system": "-",
"syslogPriority": stdsyslog.LOG_NOTICE,
"failure": Failure(Exception("bar")),
}
)
self.assertEqual(self.events, [(stdsyslog.LOG_NOTICE, "[-] hello, world")])
def test_emitCustomFacility(self):
"""
L{SyslogObserver.emit} uses the value of the C{'syslogPriority'} as the
syslog priority, if that key is present in the event dictionary.
"""
self.observer.emit(
{
"message": ("hello, world",),
"isError": False,
"system": "-",
"syslogFacility": stdsyslog.LOG_CRON,
}
)
self.assertEqual(
self.events, [(stdsyslog.LOG_INFO | stdsyslog.LOG_CRON, "[-] hello, world")]
)
def test_emitCustomSystem(self):
"""
L{SyslogObserver.emit} uses the value of the C{'system'} key to prefix
the logged message.
"""
self.observer.emit(
{
"message": ("hello, world",),
"isError": False,
"system": "nonDefaultSystem",
}
)
self.assertEqual(
self.events, [(stdsyslog.LOG_INFO, "[nonDefaultSystem] hello, world")]
)
def test_emitMessage(self):
"""
L{SyslogObserver.emit} logs the value of the C{'message'} key of the
event dictionary it is passed to the syslog.
"""
self.observer.emit(
{"message": ("hello, world",), "isError": False, "system": "-"}
)
self.assertEqual(self.events, [(stdsyslog.LOG_INFO, "[-] hello, world")])
def METHOD_NAME(self):
"""
Each line of a multiline message is emitted separately to the syslog.
"""
self.observer.emit(
{"message": ("hello,\nworld",), "isError": False, "system": "-"}
)
self.assertEqual(
self.events,
[(stdsyslog.LOG_INFO, "[-] hello,"), (stdsyslog.LOG_INFO, "[-] \tworld")],
)
def test_emitStripsTrailingEmptyLines(self):
"""
Trailing empty lines of a multiline message are omitted from the
messages sent to the syslog.
"""
self.observer.emit(
{"message": ("hello,\nworld\n\n",), "isError": False, "system": "-"}
)
self.assertEqual(
self.events,
[(stdsyslog.LOG_INFO, "[-] hello,"), (stdsyslog.LOG_INFO, "[-] \tworld")],
) |
5,939 | handle | # Copyright © Michal Čihař <michal@weblate.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
import argparse
import json
from django.core.exceptions import ValidationError
from django.core.management.base import CommandError
from django.utils.text import slugify
from weblate.trans.models import Component, Project
from weblate.utils.management.base import BaseCommand
class Command(BaseCommand):
"""Command for mass importing of repositories into Weblate based on JSON data."""
help = "imports projects based on JSON data"
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--project", default=None, required=True, help="Project where to operate"
)
parser.add_argument(
"--ignore",
default=False,
action="store_true",
help="Ignore already existing entries",
)
parser.add_argument(
"--update",
default=False,
action="store_true",
help="Update already existing entries",
)
parser.add_argument(
"--main-component",
default=None,
help="Define which component will be used as main for the VCS repository",
)
parser.add_argument(
"json-file",
type=argparse.FileType("r"),
help="JSON file containing component definition",
)
def METHOD_NAME(self, *args, **options): # noqa: C901
"""Automatic import of components."""
# Get project
try:
project = Project.objects.get(slug=options["project"])
except Project.DoesNotExist:
raise CommandError("Project does not exist!")
# Get main component
main_component = None
if options["main_component"]:
try:
main_component = Component.objects.get(
project=project, slug=options["main_component"]
)
except Component.DoesNotExist:
raise CommandError("Main component does not exist!")
try:
data = json.load(options["json-file"])
except ValueError:
raise CommandError("Could not parse JSON file!")
finally:
options["json-file"].close()
allfields = {
field.name
for field in Component._meta.get_fields()
if field.editable and not field.is_relation
}
# Handle dumps from API
if "results" in data:
data = data["results"]
for item in data:
if "filemask" not in item or "name" not in item:
raise CommandError("Missing required fields in JSON!")
if "slug" not in item:
item["slug"] = slugify(item["name"])
if "repo" not in item:
if main_component is None:
raise CommandError("No main component and no repository URL!")
item["repo"] = main_component.get_repo_link_url()
try:
component = Component.objects.get(slug=item["slug"], project=project)
except Component.DoesNotExist:
params = {key: item[key] for key in allfields if key in item}
component = Component(project=project, **params)
try:
component.full_clean()
except ValidationError as error:
for key, value in error.message_dict.items():
self.stderr.write(
"Error in {}: {}".format(key, ", ".join(value))
)
raise CommandError("Component failed validation!")
component.save(force_insert=True)
self.stdout.write(
"Imported {} with {} translations".format(
component, component.translation_set.count()
)
)
else:
self.stderr.write(f"Component {component} already exists")
if options["ignore"]:
continue
if options["update"]:
for key in item:
if key not in allfields or key == "slug":
continue
setattr(component, key, item[key])
component.save()
continue
raise CommandError(
"Component already exists, use --ignore or --update!"
) |
5,940 | measure frequency | import time
import threading
import multiprocessing
import sys
from datetime import datetime
import re
import json
import importlib
import rospy
from std_srvs.srv import Empty
import cv2
from user_functions import GUIFunctions, HALFunctions
from console import start_console, close_console
from shared.value import SharedValue
# The brain process class
class BrainProcess(multiprocessing.Process):
def __init__(self, code, exit_signal):
super(BrainProcess, self).__init__()
# Initialize exit signal
self.exit_signal = exit_signal
# Function definitions for users to use
self.hal = HALFunctions()
self.gui = GUIFunctions()
# Time variables
self.time_cycle = SharedValue('brain_time_cycle')
self.ideal_cycle = SharedValue('brain_ideal_cycle')
self.iteration_counter = 0
# Get the sequential and iterative code
# Something wrong over here! The code is reversing
# Found a solution but could not find the reason for this
self.sequential_code = code[1]
self.iterative_code = code[0]
# Function to run to start the process
def run(self):
# Two threads for running and measuring
self.measure_thread = threading.Thread(target=self.METHOD_NAME)
self.thread = threading.Thread(target=self.process_code)
self.measure_thread.start()
self.thread.start()
print("Brain Process Started!")
self.exit_signal.wait()
# The process function
def process_code(self):
# Redirect information to console
start_console()
# Reference Environment for the exec() function
iterative_code, sequential_code = self.iterative_code, self.sequential_code
# print(iterative_code)
# Whatever the code is, first step is to just stop!
self.hal.sendV(0)
self.hal.sendW(0)
# The Python exec function
# Run the sequential part
gui_module, hal_module = self.generate_modules()
if sequential_code != "":
reference_environment = {"GUI": gui_module, "HAL": hal_module}
exec(sequential_code, reference_environment)
# Run the iterative part inside template
# and keep the check for flag
while not self.exit_signal.is_set():
start_time = datetime.now()
# Execute the iterative portion
if iterative_code != "":
exec(iterative_code, reference_environment)
# Template specifics to run!
finish_time = datetime.now()
dt = finish_time - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
# Keep updating the iteration counter
if(iterative_code == ""):
self.iteration_counter = 0
else:
self.iteration_counter = self.iteration_counter + 1
# The code should be run for atleast the target time step
# If it's less put to sleep
# If it's more no problem as such, but we can change it!
time_cycle = self.time_cycle.get()
if(ms < time_cycle):
time.sleep((time_cycle - ms) / 1000.0)
close_console()
print("Current Thread Joined!", flush=True)
# Function to generate the modules for use in ACE Editor
def generate_modules(self):
# Define HAL module
hal_module = importlib.util.module_from_spec(importlib.machinery.ModuleSpec("HAL", None))
hal_module.HAL = importlib.util.module_from_spec(importlib.machinery.ModuleSpec("HAL", None))
hal_module.HAL.motors = importlib.util.module_from_spec(importlib.machinery.ModuleSpec("motors", None))
# Add HAL functions
hal_module.HAL.getImage = self.hal.getImage
hal_module.HAL.setV = self.hal.sendV
hal_module.HAL.setW = self.hal.sendW
# Define GUI module
gui_module = importlib.util.module_from_spec(importlib.machinery.ModuleSpec("GUI", None))
gui_module.GUI = importlib.util.module_from_spec(importlib.machinery.ModuleSpec("GUI", None))
# Add GUI functions
gui_module.GUI.showImage = self.gui.showImage
# Adding modules to system
# Protip: The names should be different from
# other modules, otherwise some errors
sys.modules["HAL"] = hal_module
sys.modules["GUI"] = gui_module
return gui_module, hal_module
# Function to measure the frequency of iterations
def METHOD_NAME(self):
previous_time = datetime.now()
# An infinite loop
while not self.exit_signal.is_set():
# Sleep for 2 seconds
time.sleep(2)
# Measure the current time and subtract from the previous time to get real time interval
current_time = datetime.now()
dt = current_time - previous_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
previous_time = current_time
# Get the time period
try:
# Division by zero
self.ideal_cycle.add(ms / self.iteration_counter)
except:
self.ideal_cycle.add(0)
# Reset the counter
self.iteration_counter = |
5,941 | print verbose | # Copyright 2021-2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime
from shlex import quote
from subprocess import run
from textwrap import indent
from typing import TYPE_CHECKING
from ..util.system import System
from ..util.types import DataclassMixin
from ..util.ui import kvtable, rule, section, value, warn
from .command import CMD_PARTS_CANONICAL, CMD_PARTS_LEGION
from .config import ConfigProtocol
from .launcher import Launcher, SimpleLauncher
from .logs import process_logs
if TYPE_CHECKING:
from ..util.types import Command, EnvDict
__all__ = ("LegateDriver", "CanonicalDriver", "print_verbose")
_DARWIN_GDB_WARN = """\
You must start the debugging session with the following command,
as LLDB no longer forwards the environment to subprocesses for security
reasons:
(lldb) process launch -v LIB_PATH={libpath} -v PYTHONPATH={pythonpath}
"""
@dataclass(frozen=True)
class LegateVersions(DataclassMixin):
"""Collect package versions relevant to Legate."""
legate_version: str
class LegateDriver:
"""Coordinate the system, user-configuration, and launcher to appropriately
execute the Legate process.
Parameters
----------
config : Config
system : System
"""
def __init__(self, config: ConfigProtocol, system: System) -> None:
self.config = config
self.system = system
self.launcher = Launcher.create(config, system)
@property
def cmd(self) -> Command:
"""The full command invocation that should be used to start Legate."""
config = self.config
launcher = self.launcher
system = self.system
parts = (part(config, system, launcher) for part in CMD_PARTS_LEGION)
return launcher.cmd + sum(parts, ())
@property
def env(self) -> EnvDict:
"""The system environment that should be used when started Legate."""
# in case we want to augment the launcher env we could do it here
return self.launcher.env
@property
def custom_env_vars(self) -> set[str]:
"""The names of environment variables that we have explicitly set
for the system environment.
"""
# in case we want to augment the launcher env we could do it here
return self.launcher.custom_env_vars
@property
def dry_run(self) -> bool:
"""Check verbose and dry run.
Returns
-------
bool : whether dry run is enabled
"""
if self.config.info.verbose:
# we only want to print verbose output on a "head" node
if (
self.launcher.kind != "none"
or self.launcher.detected_rank_id == "0"
):
METHOD_NAME(self.system, self)
self._darwin_gdb_warn()
return self.config.other.dry_run
def run(self) -> int:
"""Run the Legate process.
Returns
-------
int : process return code
"""
if self.dry_run:
return 0
with process_logs(self.config, self.system, self.launcher):
if self.config.other.timing:
print(f"Legate start: {datetime.now()}")
ret = run(self.cmd, env=self.env).returncode
if self.config.other.timing:
print(f"Legate end: {datetime.now()}")
return ret
def _darwin_gdb_warn(self) -> None:
gdb = self.config.debugging.gdb
if gdb and self.system.os == "Darwin":
libpath = self.env[self.system.LIB_PATH]
pythonpath = self.env["PYTHONPATH"]
print(
warn(
_DARWIN_GDB_WARN.format(
libpath=libpath, pythonpath=pythonpath
)
)
)
class CanonicalDriver(LegateDriver):
"""Coordinate the system, user-configuration, and launcher to appropriately
execute the Legate process.
Parameters
----------
config : Config
system : System
"""
def __init__(self, config: ConfigProtocol, system: System) -> None:
self.config = config
self.system = system
self.launcher = SimpleLauncher(config, system)
@property
def cmd(self) -> Command:
"""The full command invocation that should be used to start Legate."""
config = self.config
launcher = self.launcher
system = self.system
parts = (
part(config, system, launcher) for part in CMD_PARTS_CANONICAL
)
return sum(parts, ())
def run(self) -> int:
"""Run the Legate process.
Returns
-------
int : process return code
"""
assert False, "This function should not be invoked."
def get_versions() -> LegateVersions:
from legate import __version__ as lg_version
return LegateVersions(legate_version=lg_version)
def METHOD_NAME(
system: System,
driver: LegateDriver | None = None,
) -> None:
"""Print system and driver configuration values.
Parameters
----------
system : System
A System instance to obtain Legate and Legion paths from
driver : Driver or None, optional
If not None, a Driver instance to obtain command invocation and
environment from (default: None)
Returns
-------
None
"""
print(f"\n{rule('Legion Python Configuration')}")
print(section("\nLegate paths:"))
print(indent(str(system.legate_paths), prefix=" "))
print(section("\nLegion paths:"))
print(indent(str(system.legion_paths), prefix=" "))
print(section("\nVersions:"))
print(indent(str(get_versions()), prefix=" "))
if driver:
print(section("\nCommand:"))
cmd = " ".join(quote(t) for t in driver.cmd)
print(f" {value(cmd)}")
if keys := sorted(driver.custom_env_vars):
print(section("\nCustomized Environment:"))
print(
indent(
kvtable(driver.env, delim="=", align=False, keys=keys),
prefix=" ",
)
)
print(f"\n{rule()}")
print(flush=True) |
5,942 | init connection | import os
import pytest
from mongoengine import connect
from kairon import Utility
from kairon.shared.metering.constants import MetricType
from kairon.shared.metering.metering_processor import MeteringProcessor
from kairon.shared.metering.data_object import Metering
class TestMetering:
@pytest.fixture(autouse=True, scope="class")
def METHOD_NAME(self):
os.environ["system_file"] = "./tests/testing_data/system.yaml"
Utility.load_environment()
Utility.load_email_configuration()
connect(**Utility.mongoengine_connection(Utility.environment['database']["url"]))
yield None
def test_add_metrics_test_chat(self):
bot = 'abcb345'
bot1 = 'rhft284'
account = 12345
metric_type = MetricType.test_chat
assert MeteringProcessor.add_metrics(bot, account, metric_type)
assert MeteringProcessor.add_metrics(bot1, account, MetricType.test_chat)
def test_add_metrics_prod_chat(self):
bot = 'abcb345'
bot1 = 'bfg4657'
account = 12345
assert MeteringProcessor.add_metrics(bot, account, MetricType.prod_chat)
assert MeteringProcessor.add_metrics(bot1, account, MetricType.prod_chat)
def test_get_metric(self):
account = 12345
bot = 'abcb345'
bot1 = 'bfg4657'
bot2 = 'rhft284'
test_chat_count = MeteringProcessor.get_logs(account, metric_type=MetricType.test_chat, bot=bot)["logs"]
del test_chat_count[0]["timestamp"]
assert test_chat_count[0]['bot'] == bot
assert test_chat_count[0]['account'] == account
assert test_chat_count[0]['metric_type'] == MetricType.test_chat.value
assert MeteringProcessor.get_logs(account, metric_type=MetricType.test_chat, bot=bot1)["logs"] == []
test_chat_count = MeteringProcessor.get_logs(account, metric_type=MetricType.test_chat, bot=bot2)["logs"]
del test_chat_count[0]["timestamp"]
assert test_chat_count[0]['bot'] == bot2
assert test_chat_count[0]['account'] == account
assert test_chat_count[0]['metric_type'] == MetricType.test_chat.value
prod_chat_count = MeteringProcessor.get_logs(account, metric_type=MetricType.prod_chat, bot=bot)["logs"]
del prod_chat_count[0]["timestamp"]
assert prod_chat_count[0]['bot'] == bot
assert prod_chat_count[0]['account'] == account
assert prod_chat_count[0]['metric_type'] == MetricType.prod_chat
prod_chat_count = MeteringProcessor.get_logs(account, metric_type=MetricType.prod_chat, bot=bot1)["logs"]
del prod_chat_count[0]["timestamp"]
assert prod_chat_count[0]['bot'] == bot1
assert prod_chat_count[0]['account'] == account
assert prod_chat_count[0]['metric_type'] == MetricType.prod_chat
def test_update_metrics_conversation_feedback(self):
bot = 'test_update_metrics_conversation_feedback'
account = 12345
data = {"feedback": "",
"rating": 1,
"botId": "6322ebbb3c62158dab4aee71",
"botReply": [{"text":"Hello! How are you?"}],
"userReply": "",
"date":"2023-07-17T06:48:02.453Z",
"sender_id": None
}
metric_type = MetricType.conversation_feedback
id = MeteringProcessor.add_metrics(bot, account, metric_type, **data)
value = Metering.objects().get(id=id)
assert value.feedback == ""
MeteringProcessor.update_metrics(id, bot, metric_type, **{"feedback": "test"})
value = Metering.objects().get(id=id)
assert value.feedback == "test"
def test_update_metrics_conversation_feedback_add_field(self):
bot = 'test_update_metrics_conversation_feedback'
account = 12345
data = {"rating": 1,
"botId": "6322ebbb3c62158dab4aee71",
"botReply": [{"text":"Hello! How are you?"}],
"userReply": "",
"date":"2023-07-17T06:48:02.453Z",
"sender_id": None
}
metric_type = MetricType.conversation_feedback
id = MeteringProcessor.add_metrics(bot, account, metric_type, **data)
value = Metering.objects().get(id=id)
assert not hasattr(value, "feedback")
MeteringProcessor.update_metrics(id, bot, metric_type, **{"feedback": "test"})
value = Metering.objects().get(id=id)
assert value.feedback == "test"
def test_update_invalid_metrics_conversation_feedback(self):
bot = 'test_update_metrics_conversation_feedback'
with pytest.raises(ValueError, match="Invalid metric type"):
MeteringProcessor.update_metrics("test", bot, "test", **{"feedback": "test"})
|
5,943 | get lprobs | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import LegacyFairseqCriterion, register_criterion
from fairseq.data import encoders
@register_criterion("wsc")
class WSCCriterion(LegacyFairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
if self.args.save_predictions is not None:
self.prediction_h = open(self.args.save_predictions, "w")
else:
self.prediction_h = None
self.bpe = encoders.build_bpe(args.bpe)
self.tokenizer = encoders.build_tokenizer(args.tokenizer)
def __del__(self):
if self.prediction_h is not None:
self.prediction_h.close()
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument("--wsc-margin-alpha", type=float, metavar="A", default=1.0)
parser.add_argument("--wsc-margin-beta", type=float, metavar="B", default=0.0)
parser.add_argument(
"--wsc-cross-entropy",
action="store_true",
help="use cross entropy formulation instead of margin loss",
)
parser.add_argument(
"--save-predictions", metavar="FILE", help="file to save predictions to"
)
def get_masked_input(self, tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask] = self.task.mask
return masked_tokens
def METHOD_NAME(self, model, tokens, mask):
logits, _ = model(src_tokens=self.get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
def get_loss(self, query_lprobs, cand_lprobs):
if self.args.wsc_cross_entropy:
return F.cross_entropy(
torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0),
query_lprobs.new([0]).long(),
)
else:
return (
-query_lprobs
+ self.args.wsc_margin_alpha
* (cand_lprobs - query_lprobs + self.args.wsc_margin_beta).clamp(min=0)
).sum()
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
loss, nloss = 0.0, 0
ncorrect, nqueries = 0, 0
for i, label in enumerate(sample["labels"]):
query_lprobs = self.METHOD_NAME(
model,
sample["query_tokens"][i].unsqueeze(0),
sample["query_masks"][i].unsqueeze(0),
)
cand_lprobs = self.METHOD_NAME(
model,
sample["candidate_tokens"][i],
sample["candidate_masks"][i],
)
pred = (query_lprobs >= cand_lprobs).all().item()
if label is not None:
label = 1 if label else 0
ncorrect += 1 if pred == label else 0
nqueries += 1
if label:
# only compute a loss for positive instances
nloss += 1
loss += self.get_loss(query_lprobs, cand_lprobs)
id = sample["id"][i].item()
if self.prediction_h is not None:
print("{}\t{}\t{}".format(id, pred, label), file=self.prediction_h)
if nloss == 0:
loss = torch.tensor(0.0, requires_grad=True)
sample_size = nqueries if nqueries > 0 else 1
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"ncorrect": ncorrect,
"nqueries": nqueries,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2),
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
nqueries = sum(log.get("nqueries", 0) for log in logging_outputs)
if nqueries > 0:
agg_output["accuracy"] = ncorrect / float(nqueries)
return agg_output
@register_criterion("winogrande")
class WinograndeCriterion(WSCCriterion):
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
query_lprobs = self.METHOD_NAME(
model,
sample["query_tokens"],
sample["query_masks"],
)
cand_lprobs = self.METHOD_NAME(
model,
sample["candidate_tokens"],
sample["candidate_masks"],
)
pred = query_lprobs >= cand_lprobs
loss = self.get_loss(query_lprobs, cand_lprobs)
sample_size = sample["query_tokens"].size(0)
ncorrect = pred.sum().item()
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"ncorrect": ncorrect,
"nqueries": sample_size,
}
return loss, sample_size, logging_output |
5,944 | test draw write round trip | # This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
import sys
import pytest
from hypothesis import HealthCheck, assume, example, given, settings, strategies as st
from hypothesis.internal.compat import ceil, floor, int_from_bytes, int_to_bytes
from hypothesis.internal.conjecture import floats as flt
from hypothesis.internal.conjecture.data import ConjectureData
from hypothesis.internal.conjecture.engine import ConjectureRunner
from hypothesis.internal.floats import float_to_int
EXPONENTS = list(range(flt.MAX_EXPONENT + 1))
assert len(EXPONENTS) == 2**11
def assert_reordered_exponents(res):
res = list(res)
assert len(res) == len(EXPONENTS)
for x in res:
assert res.count(x) == 1
assert 0 <= x <= flt.MAX_EXPONENT
def test_encode_permutes_elements():
assert_reordered_exponents(map(flt.encode_exponent, EXPONENTS))
def test_decode_permutes_elements():
assert_reordered_exponents(map(flt.decode_exponent, EXPONENTS))
def test_decode_encode():
for e in EXPONENTS:
assert flt.decode_exponent(flt.encode_exponent(e)) == e
def test_encode_decode():
for e in EXPONENTS:
assert flt.decode_exponent(flt.encode_exponent(e)) == e
@given(st.data())
def test_double_reverse_bounded(data):
n = data.draw(st.integers(1, 64))
i = data.draw(st.integers(0, 2**n - 1))
j = flt.reverse_bits(i, n)
assert flt.reverse_bits(j, n) == i
@given(st.integers(0, 2**64 - 1))
def test_double_reverse(i):
j = flt.reverse64(i)
assert flt.reverse64(j) == i
@example(1.25)
@example(1.0)
@given(st.floats())
def METHOD_NAME(f):
d = ConjectureData.for_buffer(bytes(10))
flt.write_float(d, f)
d2 = ConjectureData.for_buffer(d.buffer)
g = flt.draw_float(d2)
if f == f:
assert f == g
assert float_to_int(f) == float_to_int(g)
d3 = ConjectureData.for_buffer(d2.buffer)
flt.draw_float(d3)
assert d3.buffer == d2.buffer
@example(0.0)
@example(2.5)
@example(8.000000000000007)
@example(3.0)
@example(2.0)
@example(1.9999999999999998)
@example(1.0)
@given(st.floats(min_value=0.0))
def test_floats_round_trip(f):
i = flt.float_to_lex(f)
g = flt.lex_to_float(i)
assert float_to_int(f) == float_to_int(g)
@settings(suppress_health_check=[HealthCheck.too_slow])
@example(1, 0.5)
@given(st.integers(1, 2**53), st.floats(0, 1).filter(lambda x: x not in (0, 1)))
def test_floats_order_worse_than_their_integral_part(n, g):
f = n + g
assume(int(f) != f)
assume(int(f) != 0)
i = flt.float_to_lex(f)
if f < 0:
g = ceil(f)
else:
g = floor(f)
assert flt.float_to_lex(float(g)) < i
integral_floats = st.floats(allow_infinity=False, allow_nan=False, min_value=0.0).map(
lambda x: abs(float(int(x)))
)
@given(integral_floats, integral_floats)
def test_integral_floats_order_as_integers(x, y):
assume(x != y)
x, y = sorted((x, y))
assert flt.float_to_lex(x) < flt.float_to_lex(y)
@given(st.floats(0, 1))
def test_fractional_floats_are_worse_than_one(f):
assume(0 < f < 1)
assert flt.float_to_lex(f) > flt.float_to_lex(1)
def test_reverse_bits_table_reverses_bits():
def bits(x):
result = []
for _ in range(8):
result.append(x & 1)
x >>= 1
result.reverse()
return result
for i, b in enumerate(flt.REVERSE_BITS_TABLE):
assert bits(i) == list(reversed(bits(b)))
def test_reverse_bits_table_has_right_elements():
assert sorted(flt.REVERSE_BITS_TABLE) == list(range(256))
def float_runner(start, condition):
def parse_buf(b):
return flt.lex_to_float(int_from_bytes(b))
def test_function(data):
f = flt.draw_float(data)
if condition(f):
data.mark_interesting()
runner = ConjectureRunner(test_function)
runner.cached_test_function(bytes(1) + int_to_bytes(flt.float_to_lex(start), 8))
assert runner.interesting_examples
return runner
def minimal_from(start, condition):
runner = float_runner(start, condition)
runner.shrink_interesting_examples()
(v,) = runner.interesting_examples.values()
result = flt.draw_float(ConjectureData.for_buffer(v.buffer))
assert condition(result)
return result
INTERESTING_FLOATS = [0.0, 1.0, 2.0, sys.float_info.max, float("inf"), float("nan")]
@pytest.mark.parametrize(
("start", "end"),
[
(a, b)
for a in INTERESTING_FLOATS
for b in INTERESTING_FLOATS
if flt.float_to_lex(a) > flt.float_to_lex(b)
],
)
def test_can_shrink_downwards(start, end):
assert minimal_from(start, lambda x: not (x < end)) == end
@pytest.mark.parametrize(
"f", [1, 2, 4, 8, 10, 16, 32, 64, 100, 128, 256, 500, 512, 1000, 1024]
)
@pytest.mark.parametrize("mul", [1.1, 1.5, 9.99, 10])
def test_shrinks_downwards_to_integers(f, mul):
g = minimal_from(f * mul, lambda x: x >= f)
assert g == f
def test_shrink_to_integer_upper_bound():
assert minimal_from(1.1, lambda x: 1 < x <= 2) == 2
def test_shrink_up_to_one():
assert minimal_from(0.5, lambda x: 0.5 <= x <= 1.5) == 1
def test_shrink_down_to_half():
assert minimal_from(0.75, lambda x: 0 < x < 1) == 0.5
def test_shrink_fractional_part():
assert minimal_from(2.5, lambda x: divmod(x, 1)[1] == 0.5) == 1.5
def test_does_not_shrink_across_one():
# This is something of an odd special case. Because of our encoding we
# prefer all numbers >= 1 to all numbers in 0 < x < 1. For the most part
# this is the correct thing to do, but there are some low negative exponent
# cases where we get odd behaviour like this.
# This test primarily exists to validate that we don't try to subtract one
# from the starting point and trigger an internal exception.
assert minimal_from(1.1, lambda x: x == 1.1 or 0 < x < 1) == 1.1
@pytest.mark.parametrize("f", [2.0, 10000000.0])
def test_converts_floats_to_integer_form(f):
assert flt.is_simple(f)
buf = int_to_bytes(flt.base_float_to_lex(f), 8)
runner = float_runner(f, lambda g: g == f)
runner.shrink_interesting_examples()
(v,) = runner.interesting_examples.values()
assert v.buffer[:-1] < buf |
5,945 | test claim one | import pytest
from rotkehlchen.accounting.structures.balance import Balance
from rotkehlchen.accounting.structures.evm_event import EvmEvent
from rotkehlchen.accounting.structures.types import HistoryEventSubType, HistoryEventType
from rotkehlchen.chain.ethereum.modules.stakedao.constants import (
CPT_STAKEDAO,
STAKEDAO_CLAIMER1,
STAKEDAO_CLAIMER2,
)
from rotkehlchen.chain.evm.decoding.constants import CPT_GAS
from rotkehlchen.constants.assets import A_CRV, A_ETH
from rotkehlchen.fval import FVal
from rotkehlchen.tests.utils.ethereum import get_decoded_events_of_transaction
from rotkehlchen.types import Location, TimestampMS, deserialize_evm_tx_hash
from rotkehlchen.utils.misc import timestamp_to_date
@pytest.mark.vcr(filter_query_parameters=['apikey'])
@pytest.mark.parametrize('ethereum_accounts', [['0x6eEC7Dd840e3c1aBbaC157bB3C14e2aCBa72bC1e']])
def METHOD_NAME(database, ethereum_inquirer, ethereum_accounts):
tx_hex = deserialize_evm_tx_hash('0x3f747b34f1d0a6c59c62b5d6c3aba8f2bd278546cd53daa131327242c7c5b02e') # noqa: E501
evmhash = deserialize_evm_tx_hash(tx_hex)
user_address = ethereum_accounts[0]
events, _ = get_decoded_events_of_transaction(
evm_inquirer=ethereum_inquirer,
database=database,
tx_hash=tx_hex,
)
timestamp = TimestampMS(1684662791000)
amount_str = '215.403304465915246838'
period = 1684368000
expected_events = [
EvmEvent(
tx_hash=evmhash,
sequence_index=0,
timestamp=timestamp,
location=Location.ETHEREUM,
event_type=HistoryEventType.SPEND,
event_subtype=HistoryEventSubType.FEE,
asset=A_ETH,
balance=Balance(amount=FVal('0.003543266133945936')),
location_label=user_address,
notes='Burned 0.003543266133945936 ETH for gas',
counterparty=CPT_GAS,
address=None,
), EvmEvent(
tx_hash=evmhash,
sequence_index=580,
timestamp=timestamp,
location=Location.ETHEREUM,
event_type=HistoryEventType.RECEIVE,
event_subtype=HistoryEventSubType.REWARD,
asset=A_CRV,
balance=Balance(amount=FVal(amount_str)),
location_label=user_address,
notes=f'Claimed {amount_str} CRV from StakeDAO veCRV bribes for the period starting at {timestamp_to_date(period, formatstr="%d/%m/%Y %H:%M:%S")}', # noqa: E501
counterparty=CPT_STAKEDAO,
address=STAKEDAO_CLAIMER2,
),
]
assert events == expected_events
@pytest.mark.vcr(filter_query_parameters=['apikey'])
@pytest.mark.parametrize('ethereum_accounts', [['0x3c28C42B24B7909c8292920929f083F60C4997A6']])
def test_claim_multiple(database, ethereum_inquirer, ethereum_accounts):
tx_hex = deserialize_evm_tx_hash('0xc866db3fcbef6359919c444de324b6f059f299ed155f5bff00abd81537c88627') # noqa: E501
evmhash = deserialize_evm_tx_hash(tx_hex)
user_address = ethereum_accounts[0]
events, _ = get_decoded_events_of_transaction(
evm_inquirer=ethereum_inquirer,
database=database,
tx_hash=tx_hex,
)
timestamp = TimestampMS(1678952351000)
period = 1678924800
amount1_str = '43.57001129039620188'
amount2_str = '41.966838515681574848'
expected_events = [
EvmEvent(
tx_hash=evmhash,
sequence_index=0,
timestamp=timestamp,
location=Location.ETHEREUM,
event_type=HistoryEventType.SPEND,
event_subtype=HistoryEventSubType.FEE,
asset=A_ETH,
balance=Balance(amount=FVal('0.002833214770290904')),
location_label=user_address,
notes='Burned 0.002833214770290904 ETH for gas',
counterparty=CPT_GAS,
address=None,
), EvmEvent(
tx_hash=evmhash,
sequence_index=328,
timestamp=timestamp,
location=Location.ETHEREUM,
event_type=HistoryEventType.RECEIVE,
event_subtype=HistoryEventSubType.REWARD,
asset=A_CRV,
balance=Balance(amount=FVal(amount1_str)),
location_label=user_address,
notes=f'Claimed {amount1_str} CRV from StakeDAO veCRV bribes for the period starting at {timestamp_to_date(period, formatstr="%d/%m/%Y %H:%M:%S")}', # noqa: E501
counterparty=CPT_STAKEDAO,
address=STAKEDAO_CLAIMER1,
), EvmEvent(
tx_hash=evmhash,
sequence_index=330,
timestamp=timestamp,
location=Location.ETHEREUM,
event_type=HistoryEventType.RECEIVE,
event_subtype=HistoryEventSubType.REWARD,
asset=A_CRV,
balance=Balance(amount=FVal(amount2_str)),
location_label=user_address,
notes=f'Claimed {amount2_str} CRV from StakeDAO veCRV bribes for the period starting at {timestamp_to_date(period, formatstr="%d/%m/%Y %H:%M:%S")}', # noqa: E501
counterparty=CPT_STAKEDAO,
address=STAKEDAO_CLAIMER1,
),
]
assert events == expected_events |
5,946 | get destination | #
# Copyright (c) 2022, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__all__ = ["FileSeries"]
import io
import os
import pathlib
from typing import (
Iterable,
List,
Optional,
)
from PIL import (
Image,
UnidentifiedImageError,
)
from neptune.attributes.series.series import Series
from neptune.exceptions import (
FileNotFound,
OperationNotSupported,
)
from neptune.internal.operation import (
ClearImageLog,
ImageValue,
LogImages,
Operation,
)
from neptune.internal.types.file_types import FileType
from neptune.internal.utils import base64_encode
from neptune.internal.utils.limits import image_size_exceeds_limit_for_logging
from neptune.types import File
from neptune.types.series.file_series import FileSeries as FileSeriesVal
Val = FileSeriesVal
Data = File
LogOperation = LogImages
class FileSeries(Series[Val, Data, LogOperation], max_batch_size=1, operation_cls=LogOperation):
@classmethod
def _map_series_val(cls, value: Val) -> List[ImageValue]:
return [
ImageValue(
data=cls._get_base64_image_content(val),
name=value.name,
description=value.description,
)
for val in value.values
]
def _get_clear_operation(self) -> Operation:
return ClearImageLog(self._path)
def _data_to_value(self, values: Iterable, **kwargs) -> Val:
return FileSeriesVal(values, **kwargs)
def _is_value_type(self, value) -> bool:
return isinstance(value, FileSeriesVal)
@staticmethod
def _get_base64_image_content(file: File) -> str:
if file.file_type is FileType.LOCAL_FILE:
if not os.path.exists(file.path):
raise FileNotFound(file.path)
with open(file.path, "rb") as image_file:
file_content = File.from_stream(image_file).content
else:
file_content = file.content
try:
with Image.open(io.BytesIO(file_content)):
...
except UnidentifiedImageError:
raise OperationNotSupported(
"FileSeries supports only image files for now. Other file types will be implemented in future."
)
if image_size_exceeds_limit_for_logging(len(file_content)):
file_content = b""
return base64_encode(file_content)
def download(self, destination: Optional[str]):
target_dir = self.METHOD_NAME(destination)
item_count = self._backend.get_image_series_values(
self._container_id, self._container_type, self._path, 0, 1
).totalItemCount
for i in range(0, item_count):
self._backend.download_file_series_by_index(
self._container_id, self._container_type, self._path, i, target_dir
)
def download_last(self, destination: Optional[str]):
target_dir = self.METHOD_NAME(destination)
item_count = self._backend.get_image_series_values(
self._container_id, self._container_type, self._path, 0, 1
).totalItemCount
if item_count > 0:
self._backend.download_file_series_by_index(
self._container_id,
self._container_type,
self._path,
item_count - 1,
target_dir,
)
else:
raise ValueError("Unable to download last file - series is empty")
def METHOD_NAME(self, destination: Optional[str]):
target_dir = destination
if destination is None:
target_dir = os.path.join("neptune", self._path[-1])
pathlib.Path(os.path.abspath(target_dir)).mkdir(parents=True, exist_ok=True)
return target_dir |
5,947 | test psycopg connection params | from unittest.mock import AsyncMock, patch
import asyncpg
from tortoise import connections
from tortoise.contrib import test
class TestConnectionParams(test.SimpleTestCase):
async def asyncSetUp(self) -> None:
await super().asyncSetUp()
async def asyncTearDown(self) -> None:
await super().asyncTearDown()
async def test_mysql_connection_params(self):
with patch(
"tortoise.backends.mysql.client.mysql.create_pool", new=AsyncMock()
) as mysql_connect:
await connections._init(
{
"models": {
"engine": "tortoise.backends.mysql",
"credentials": {
"database": "test",
"host": "127.0.0.1",
"password": "foomip",
"port": 3306,
"user": "root",
"connect_timeout": 1.5,
"charset": "utf8mb4",
},
}
},
False,
)
await connections.get("models").create_connection(with_db=True)
mysql_connect.assert_awaited_once_with( # nosec
autocommit=True,
charset="utf8mb4",
connect_timeout=1.5,
db="test",
host="127.0.0.1",
password="foomip",
port=3306,
user="root",
maxsize=5,
minsize=1,
sql_mode="STRICT_TRANS_TABLES",
)
async def test_asyncpg_connection_params(self):
try:
with patch(
"tortoise.backends.asyncpg.client.asyncpg.create_pool", new=AsyncMock()
) as asyncpg_connect:
await connections._init(
{
"models": {
"engine": "tortoise.backends.asyncpg",
"credentials": {
"database": "test",
"host": "127.0.0.1",
"password": "foomip",
"port": 5432,
"user": "root",
"timeout": 30,
"ssl": True,
},
}
},
False,
)
await connections.get("models").create_connection(with_db=True)
asyncpg_connect.assert_awaited_once_with( # nosec
None,
database="test",
host="127.0.0.1",
password="foomip",
port=5432,
ssl=True,
timeout=30,
user="root",
max_size=5,
min_size=1,
connection_class=asyncpg.connection.Connection,
loop=None,
server_settings={},
)
except ImportError:
self.skipTest("asyncpg not installed")
async def METHOD_NAME(self):
try:
with patch(
"tortoise.backends.psycopg.client.PsycopgClient.create_pool", new=AsyncMock()
) as patched_create_pool:
mocked_pool = AsyncMock()
patched_create_pool.return_value = mocked_pool
await connections._init(
{
"models": {
"engine": "tortoise.backends.psycopg",
"credentials": {
"database": "test",
"host": "127.0.0.1",
"password": "foomip",
"port": 5432,
"user": "root",
"timeout": 1,
"ssl": True,
},
}
},
False,
)
await connections.get("models").create_connection(with_db=True)
patched_create_pool.assert_awaited_once()
mocked_pool.open.assert_awaited_once_with( # nosec
wait=True,
timeout=1,
)
except ImportError:
self.skipTest("psycopg not installed") |
5,948 | fetch changelog | import hashlib
import os
import sys
from functools import wraps
from pipenv.patched.pip._vendor.packaging.version import parse as parse_version
from pathlib import Path
import pipenv.vendor.click as click
# Jinja2 will only be installed if the optional deps are installed.
# It's fine if our functions fail, but don't let this top level
# import error out.
try:
import jinja2
except ImportError:
jinja2 = None
import pipenv.patched.pip._vendor.requests as requests
def highest_base_score(vulns):
highest_base_score = 0
for vuln in vulns:
if vuln['severity'] is not None:
highest_base_score = max(highest_base_score, (vuln['severity'].get('cvssv3', {}) or {}).get('base_score', 10))
return highest_base_score
def generate_branch_name(pkg, remediation):
return pkg + "/" + remediation['recommended_version']
def generate_issue_title(pkg, remediation):
return f"Security Vulnerability in {pkg}"
def generate_title(pkg, remediation, vulns):
suffix = "y" if len(vulns) == 1 else "ies"
return f"Update {pkg} from {remediation['current_version']} to {remediation['recommended_version']} to fix {len(vulns)} vulnerabilit{suffix}"
def generate_body(pkg, remediation, vulns, *, api_key):
changelog = METHOD_NAME(pkg, remediation['current_version'], remediation['recommended_version'], api_key=api_key)
p = Path(__file__).parent / 'templates'
env = jinja2.Environment(loader=jinja2.FileSystemLoader(Path(p)))
template = env.get_template('pr.jinja2')
overall_impact = cvss3_score_to_label(highest_base_score(vulns))
result = template.render({"pkg": pkg, "remediation": remediation, "vulns": vulns, "changelog": changelog, "overall_impact": overall_impact, "summary_changelog": False })
# GitHub has a PR body length limit of 65536. If we're going over that, skip the changelog and just use a link.
if len(result) > 65500:
return template.render({"pkg": pkg, "remediation": remediation, "vulns": vulns, "changelog": changelog, "overall_impact": overall_impact, "summary_changelog": True })
return result
def generate_issue_body(pkg, remediation, vulns, *, api_key):
changelog = METHOD_NAME(pkg, remediation['current_version'], remediation['recommended_version'], api_key=api_key)
p = Path(__file__).parent / 'templates'
env = jinja2.Environment(loader=jinja2.FileSystemLoader(Path(p)))
template = env.get_template('issue.jinja2')
overall_impact = cvss3_score_to_label(highest_base_score(vulns))
result = template.render({"pkg": pkg, "remediation": remediation, "vulns": vulns, "changelog": changelog, "overall_impact": overall_impact, "summary_changelog": False })
# GitHub has a PR body length limit of 65536. If we're going over that, skip the changelog and just use a link.
if len(result) > 65500:
return template.render({"pkg": pkg, "remediation": remediation, "vulns": vulns, "changelog": changelog, "overall_impact": overall_impact, "summary_changelog": True })
def generate_commit_message(pkg, remediation):
return f"Update {pkg} from {remediation['current_version']} to {remediation['recommended_version']}"
def git_sha1(raw_contents):
return hashlib.sha1(b"blob " + str(len(raw_contents)).encode('ascii') + b"\0" + raw_contents).hexdigest()
def METHOD_NAME(package, from_version, to_version, *, api_key):
from_version = parse_version(from_version)
to_version = parse_version(to_version)
changelog = {}
r = requests.get(
"https://pyup.io/api/v1/changelogs/{}/".format(package),
headers={"X-Api-Key": api_key}
)
if r.status_code == 200:
data = r.json()
if data:
# sort the changelog by release
sorted_log = sorted(data.items(), key=lambda v: parse_version(v[0]), reverse=True)
# go over each release and add it to the log if it's within the "upgrade
# range" e.g. update from 1.2 to 1.3 includes a changelog for 1.2.1 but
# not for 0.4.
for version, log in sorted_log:
parsed_version = parse_version(version)
if parsed_version > from_version and parsed_version <= to_version:
changelog[version] = log
return changelog
def cvss3_score_to_label(score):
if score >= 0.1 and score <= 3.9:
return 'low'
elif score >= 4.0 and score <= 6.9:
return 'medium'
elif score >= 7.0 and score <= 8.9:
return 'high'
elif score >= 9.0:
return 'critical'
return None
def require_files_report(func):
@wraps(func)
def inner(obj, *args, **kwargs):
if obj.report['report_meta']['scan_target'] != "files":
click.secho("This report was generated against an environment, but this alerter requires a file.", fg='red')
sys.exit(1)
files = obj.report['report_meta']['scanned']
obj.requirements_files = {}
for f in files:
if not os.path.exists(f):
cwd = os.getcwd()
click.secho("A requirements file scanned in the report, {}, does not exist (looking in {}).".format(f, cwd), fg='red')
sys.exit(1)
obj.requirements_files[f] = open(f, "rb").read()
return func(obj, *args, **kwargs)
return inner |
5,949 | get multi | """
Cache configuration.
This works in conjunction with dogpile.cache_ to provide caching for any Weasyl
project.
.. _dogpile.cache: http://dogpilecache.readthedocs.org/en/latest/
"""
import json
import threading
import dogpile.cache
import dogpile.cache.backends.memcached
import pylibmc
from dogpile.cache.api import CachedValue, NO_VALUE
from dogpile.cache.proxy import ProxyBackend
from dogpile.cache import make_region
region = make_region()
class ThreadCacheProxy(ProxyBackend):
"""
A thread-local caching proxy.
What this means is that all of the requests made to memcached (or whatever)
will be cached locally, and future requests will refer to the local cache
instead of having to make another memcached round trip.
This is convenient, but the cache must be periodically expired in order for
changes in memcached to propagate to the application. :py:meth:`.zap_cache`
will clear the entire cache for the current thread. It's intended to be
called, for example, at the end of an HTTP request's lifetime.
"""
_local = threading.local()
@classmethod
def zap_cache(cls):
"""
Clear the cache for the current thread.
If there wasn't any cache for the current thread, do nothing.
"""
try:
del cls._local.cache_dict
except AttributeError:
pass
@property
def _dict(self):
"""
Get the cache dict for the current thread.
Returns:
dict: The cache dict.
"""
if not hasattr(self._local, 'cache_dict'):
self._local.cache_dict = {}
return self._local.cache_dict
def get(self, key):
"""
Proxy a ``get`` call.
If *key* is in the thread-local cache, return that. Otherwise, fetch
from the proxied backend and store its result in the thread-local
cache as long as the value was not
:py:data:`~dogpile.cache.api.NO_VALUE`. Finally, return the fetched
value.
Parameters:
key: A :term:`native string`.
Returns:
Some value, or :py:data:`~dogpile.cache.api.NO_VALUE` if the
proxied backend returned that instead of a value.
"""
d = self._dict
if key in d:
return d[key]
ret = self.proxied.get(key)
if ret is not NO_VALUE:
d[key] = ret
return ret
def METHOD_NAME(self, keys):
"""
Proxy a ``get_multi`` call.
This works like :py:meth:`.get`, except *keys* is a list of keys, and
the result is a list of values.
Parameters:
keys: A list of :term:`native string` objects.
Returns:
list: The values corresponding to the *keys*.
"""
d = self._dict
to_fetch = []
ret = []
for key in keys:
ret.append(d.get(key, NO_VALUE))
if ret[-1] is NO_VALUE:
to_fetch.append((key, len(ret) - 1))
if not to_fetch:
return ret
keys_to_fetch, indices = zip(*to_fetch)
for key, index, value in zip(keys_to_fetch, indices, self.proxied.METHOD_NAME(keys_to_fetch)):
if value is NO_VALUE:
continue
d[key] = ret[index] = value
return ret
def set(self, key, value):
"""
Proxy a ``set`` call.
The set is passed through to the proxied backend, and the *value* is
stored in the thread-local cache under *key*.
Parameters:
key: A :term:`native string`.
value: Some object.
"""
self._dict[key] = value
self.proxied.set(key, value)
def set_multi(self, pairs):
"""
Proxy a ``set_multi`` call.
This works like :py:meth:`.set`, except *pairs* is a dict of key/value
mappings instead of a single key/value mapping.
Parameters:
pairs (dict): A mapping :term:`native string` of objects to any
objects.
"""
self._dict.update(pairs)
self.proxied.set_multi(pairs)
def delete(self, key):
"""
Proxy a ``delete`` call.
The delete is passed through to the proxied backend, and the *key* is
removed from the thread-local cache if it exists.
Parameters:
key: A :term:`native string`.
"""
self._dict.pop(key, None)
self.proxied.delete(key)
def delete_multi(self, keys):
"""
Proxy a ``delete_multi`` call.
This works like :py:meth:`.delete`, except *keys* is a list of keys.
Parameters:
keys (list): A list of :term:`native string` objects.
"""
d = self._dict
for key in keys:
d.pop(key, None)
self.proxied.delete_multi(keys)
class JsonClient(pylibmc.Client):
"""
A pylibmc.Client that stores only dogpile.cache entries, as JSON.
"""
def serialize(self, value):
return json.dumps(value).encode('ascii'), 0
def deserialize(self, bytestring, flag):
payload, metadata = json.loads(bytestring)
return CachedValue(payload, metadata)
class JsonPylibmcBackend(dogpile.cache.backends.memcached.PylibmcBackend):
def _imports(self):
pass
def _create_client(self):
return JsonClient(
self.url,
binary=self.binary,
behaviors=self.behaviors,
)
@classmethod
def register(cls):
dogpile.cache.register_backend('libweasyl.cache.pylibmc', 'libweasyl.cache', 'JsonPylibmcBackend') |
5,950 | test disctrack | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from tests import TestCase
import os
from senf import fsnative
from quodlibet.util.tagsfrompath import TagsFromPattern
class TTagsFromPattern(TestCase):
def setUp(self):
if os.name == "nt":
self.f1 = u'C:\\path\\Artist\\Album\\01 - Title.mp3'
self.f2 = u'C:\\path\\Artist - Album\\01. Title.mp3'
self.f3 = u'C:\\path\\01 - Artist - Title.mp3'
self.b1 = u'C:\\path\\01 - Title'
self.b2 = u'C:\\path\\01 - Artist - Title'
else:
self.f1 = '/path/Artist/Album/01 - Title.mp3'
self.f2 = '/path/Artist - Album/01. Title.mp3'
self.f3 = '/path/01 - Artist - Title.mp3'
self.b1 = '/path/01 - Title'
self.b2 = '/path/01 - Artist - Title'
self.nomatch = {}
def test_songtypes(self):
from quodlibet import formats
pat = TagsFromPattern('<tracknumber>. <title>')
tracktitle = {'tracknumber': '01', 'title': 'Title'}
for ext, kind in formats.loaders.items():
f = formats._audio.AudioFile()
if not isinstance(kind, type):
continue
f.__class__ = kind
if os.name == "nt":
f["~filename"] = u'C:\\path\\Artist - Album\\01. Title' + ext
else:
f["~filename"] = '/path/Artist - Album/01. Title' + ext
self.assertEquals(pat.match(f), tracktitle, ext)
def test_skip(self):
if os.name == "nt":
pat = TagsFromPattern('<path>\\<~>\\<~>\\<tracknumber> - <title>')
else:
pat = TagsFromPattern('<path>/<~>/<~>/<tracknumber> - <title>')
self.failUnlessEqual(len(pat.headers), 3)
song = pat.match({"~filename": self.f1})
self.failUnlessEqual(song.get("path"), "path")
self.failUnlessEqual(song.get("title"), "Title")
self.failIf(song.get("album"))
self.failIf(song.get("artist"))
def test_dict(self):
tracktitle = {'tracknumber': '01', 'title': 'Title'}
pat = TagsFromPattern('<tracknumber> - <title>')
self.assertEquals(pat.match({"~filename": self.f1}), tracktitle)
def test_nongreedy(self):
pat = TagsFromPattern('<artist> - <title>')
dic = pat.match_path(
fsnative(u"Prefuse 73 - The End of Biters - International.ogg"))
self.assertEquals(dic["artist"], "Prefuse 73")
self.assertEquals(dic["title"], "The End of Biters - International")
def test_empty(self):
pat = TagsFromPattern('')
self.assertEquals(pat.match_path(self.f1), self.nomatch)
self.assertEquals(pat.match_path(self.f2), self.nomatch)
self.assertEquals(pat.match_path(self.f3), self.nomatch)
self.assertEquals(pat.match_path(self.b1), self.nomatch)
self.assertEquals(pat.match_path(self.b2), self.nomatch)
def test_tracktitle(self):
tracktitle = {'tracknumber': '01', 'title': 'Title'}
btracktitle = {'tracknumber': '01', 'title': 'Artist - Title'}
pat = TagsFromPattern('<tracknumber> - <title>')
self.assertEquals(pat.match_path(self.f1), tracktitle)
self.assertEquals(pat.match_path(self.f2), self.nomatch)
self.assertEquals(pat.match_path(self.f3), btracktitle)
self.assertEquals(pat.match_path(self.b1), self.nomatch)
self.assertEquals(pat.match_path(self.b2), self.nomatch)
def test_path(self):
albumtracktitle = {'tracknumber': '01', 'title': 'Title',
'album': 'Album'}
balbumtracktitle = {'tracknumber': '01', 'title': 'Artist - Title',
'album': 'path'}
if os.name == "nt":
pat = TagsFromPattern('<album>\\<tracknumber> - <title>')
else:
pat = TagsFromPattern('<album>/<tracknumber> - <title>')
self.assertEquals(pat.match_path(self.f1), albumtracktitle)
self.assertEquals(pat.match_path(self.f2), self.nomatch)
self.assertEquals(pat.match_path(self.f3), balbumtracktitle)
self.assertEquals(pat.match_path(self.b1), self.nomatch)
self.assertEquals(pat.match_path(self.b2), self.nomatch)
def test_all(self):
all = {'tracknumber': '01', 'title': 'Title',
'album': 'Album', 'artist': 'Artist'}
if os.name == "nt":
pat = TagsFromPattern('<artist>\\<album>\\<tracknumber> - <title>')
else:
pat = TagsFromPattern('<artist>/<album>/<tracknumber> - <title>')
self.assertEquals(pat.match_path(self.f1), all)
self.assertEquals(pat.match_path(self.f2), self.nomatch)
self.assertEquals(pat.match_path(self.f3), self.nomatch)
self.assertEquals(pat.match_path(self.b1), self.nomatch)
self.assertEquals(pat.match_path(self.b2), self.nomatch)
def test_post(self):
btracktitle = {'tracknumber': '01', 'title': 'Titl'}
vbtracktitle = {'tracknumber': '01', 'title': 'Artist - Titl'}
pat = TagsFromPattern('<tracknumber> - <title>e')
self.assertEquals(pat.match_path(self.f1), btracktitle)
self.assertEquals(pat.match_path(self.f2), self.nomatch)
self.assertEquals(pat.match_path(self.f3), vbtracktitle)
self.assertEquals(pat.match_path(self.b1), btracktitle)
self.assertEquals(pat.match_path(self.b2), vbtracktitle)
def test_nofakes(self):
pat = TagsFromPattern('<~#track> - <title>')
self.assertEquals(pat.match_path(self.f1), self.nomatch)
self.assertEquals(pat.match_path(self.f2), self.nomatch)
self.assertEquals(pat.match_path(self.f3), self.nomatch)
self.assertEquals(pat.match_path(self.b1), self.nomatch)
self.assertEquals(pat.match_path(self.b2), self.nomatch)
def METHOD_NAME(self):
pat = TagsFromPattern('<discnumber><tracknumber>. <title>')
self.assertEquals(pat.match_path(fsnative(u'101. T1.ogg')),
dict(discnumber='1', tracknumber='01', title='T1'))
self.assertEquals(pat.match_path(fsnative(u'1318. T18.ogg')),
dict(discnumber='13', tracknumber='18', title='T18'))
self.assertEquals(pat.match_path(fsnative(u'24. T4.ogg')),
dict(discnumber='2', tracknumber='4', title='T4')) |
5,951 | check progress complete | import sys, os
from PyQt4.QtGui import QApplication, QWizard
from PyQt4 import QtCore
from PyQt4 import QtGui
from ui_create import Ui_Wizard
if __name__ == '__main__':
parentdir = sys.path[0].split(os.sep)[:-1]
sys.path.append(os.sep.join(parentdir))
from tomblib.tomb import Tomb
from worker import TombCreateThread
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class TombCreateWizard(QWizard):
def __init__(self, *args, **kwargs):
QWizard.__init__(self, *args, **kwargs)
self.ui = ui = Ui_Wizard()
ui.setupUi(self)
#instance attributes:
self.ignore_swap = False
self._tomb_check = False #ugly; it's used by check_progress_complete
ui.wizardPage_tomb_location.registerField('tombpath*',
ui.lineEdit_tombpath) #required field, note the *
ui.wizardPage_key_location.setCommitPage(True)
QtCore.QObject.connect(ui.button_tombpath,
QtCore.SIGNAL(_fromUtf8('clicked()')),
self.on_tomb_location_clicked)
QtCore.QObject.connect(self,
QtCore.SIGNAL(_fromUtf8('currentIdChanged(int)')),
self.on_change_page)
QtCore.QObject.connect(ui.radioButton_swapoff,
QtCore.SIGNAL(_fromUtf8('toggled(bool)')),
ui.wizardPage_check.completeChanged)
QtCore.QObject.connect(ui.radioButton_ignore,
QtCore.SIGNAL(_fromUtf8('toggled(bool)')),
ui.wizardPage_check.completeChanged)
def METHOD_NAME(*args, **kwargs):
if self.ui.progressBar.value() == 100:
return True
return False
def check_is_solved():
if self._tomb_check:
return True
if self.ui.radioButton_swapoff.isChecked() or \
self.ui.radioButton_ignore.isChecked():
return True
return False
self.ui.wizardPage_progress.isComplete = METHOD_NAME
self.ui.wizardPage_check.isComplete = check_is_solved
self.ui.groupBox_swap.setVisible(False)
self.finished.connect(self.on_finish)
def _keyloc(self):
keyloc = None
if self.ui.radioButton_usb.isChecked():
print 'Warning: it is not supported'
raise NotImplementedError
elif self.ui.radioButton_near.isChecked():
print 'going near'
keyloc = None
else:
keyloc = self.ui.lineEdit_custom.text()
if not keyloc:
raise ValueError
return keyloc
def on_tomb_location_clicked(self, *args, **kwargs):
filename = QtGui.QFileDialog.getSaveFileName(self, 'Create Tomb',
filter="Tomb(*.tomb)")
self.ui.lineEdit_tombpath.setText(filename)
def on_change_page(self, pagenumber):
if self.currentPage() == self.ui.wizardPage_progress:
self.create_tomb()
if self.currentPage() == self.ui.wizardPage_check:
self.check_requisite()
def on_finish(self, finishedint):
if self.currentPage() != self.ui.wizardPage_end:
#there has been an error
return
if self.ui.checkBox_open.isChecked():
Tomb.open(self.ui.lineEdit_tombpath.text(), self._keyloc())
def on_thread_creation_finished(self):
if self.thread.get_success():
self.ui.progressBar.setValue(100)
else:
self.ui.progressBar.setEnabled(False)
self.ui.label_progress.setText('Error while creating the tomb!')
self.ui.wizardPage_progress.setFinalPage(True)
self.ui.wizardPage_progress.completeChanged.emit()
def create_tomb(self):
self.thread = TombCreateThread(self.ui.lineEdit_tombpath.text(),
str(self.ui.spinBox_size.value()), self._keyloc(),
no_color=False, ignore_swap=self.ui.radioButton_ignore.isChecked())
self.thread.finished.connect(self.on_thread_creation_finished)
self.thread.terminated.connect(self.on_thread_creation_finished)
self.thread.line_received.connect(self.ui.textBrowser_log.append)
def err_append_to_log(text):
self.ui.textBrowser_log.append('Error: <strong>' + text +
'</strong>')
self.thread.error_received.connect(err_append_to_log)
self.thread.start()
def check_requisite(self):
self._tomb_check = check = Tomb.check('create', no_color=False)
self.ui.wizardPage_check.completeChanged.emit()
if check:
self.ui.label_check.setText('Everything seems fine!')
return
self.ui.label_check.setText('Some error occurred')
if Tomb.check('create', no_color=False, ignore_swap=True): # swap is the only error
self.ui.groupBox_swap.setVisible(True)
#TODO: support swapoff
#TODO: calculate the amount of ram available vs swap used
self.ui.radioButton_swapoff.setEnabled(False)
self.ui.label_swapoff.setEnabled(False)
def run_create_wizard():
app = QApplication(sys.argv)
window = TombCreateWizard()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
run_create_wizard()
|
5,952 | get connection | #
# We use a background thread for sharing fds on Unix, and for sharing sockets on
# Windows.
#
# A client which wants to pickle a resource registers it with the resource
# sharer and gets an identifier in return. The unpickling process will connect
# to the resource sharer, sends the identifier and its pid, and then receives
# the resource.
#
import os
import signal
import socket
import sys
import threading
from . import process
from . import reduction
from . import util
__all__ = ['stop']
if sys.platform == 'win32':
__all__ += ['DupSocket']
class DupSocket(object):
'''Picklable wrapper for a socket.'''
def __init__(self, sock):
new_sock = sock.dup()
def send(conn, pid):
share = new_sock.share(pid)
conn.send_bytes(share)
self._id = _resource_sharer.register(send, new_sock.close)
def detach(self):
'''Get the socket. This should only be called once.'''
with _resource_sharer.METHOD_NAME(self._id) as conn:
share = conn.recv_bytes()
return socket.fromshare(share)
else:
__all__ += ['DupFd']
class DupFd(object):
'''Wrapper for fd which can be used at any time.'''
def __init__(self, fd):
new_fd = os.dup(fd)
def send(conn, pid):
reduction.send_handle(conn, new_fd, pid)
def close():
os.close(new_fd)
self._id = _resource_sharer.register(send, close)
def detach(self):
'''Get the fd. This should only be called once.'''
with _resource_sharer.METHOD_NAME(self._id) as conn:
return reduction.recv_handle(conn)
class _ResourceSharer(object):
'''Manager for resouces using background thread.'''
def __init__(self):
self._key = 0
self._cache = {}
self._old_locks = []
self._lock = threading.Lock()
self._listener = None
self._address = None
self._thread = None
util.register_after_fork(self, _ResourceSharer._afterfork)
def register(self, send, close):
'''Register resource, returning an identifier.'''
with self._lock:
if self._address is None:
self._start()
self._key += 1
self._cache[self._key] = (send, close)
return (self._address, self._key)
@staticmethod
def METHOD_NAME(ident):
'''Return connection from which to receive identified resource.'''
from .connection import Client
address, key = ident
c = Client(address, authkey=process.current_process().authkey)
c.send((key, os.getpid()))
return c
def stop(self, timeout=None):
'''Stop the background thread and clear registered resources.'''
from .connection import Client
with self._lock:
if self._address is not None:
c = Client(self._address,
authkey=process.current_process().authkey)
c.send(None)
c.close()
self._thread.join(timeout)
if self._thread.is_alive():
util.sub_warning('_ResourceSharer thread did '
'not stop when asked')
self._listener.close()
self._thread = None
self._address = None
self._listener = None
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
def _afterfork(self):
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
# If self._lock was locked at the time of the fork, it may be broken
# -- see issue 6721. Replace it without letting it be gc'ed.
self._old_locks.append(self._lock)
self._lock = threading.Lock()
if self._listener is not None:
self._listener.close()
self._listener = None
self._address = None
self._thread = None
def _start(self):
from .connection import Listener
assert self._listener is None
util.debug('starting listener and thread for sending handles')
self._listener = Listener(authkey=process.current_process().authkey)
self._address = self._listener.address
t = threading.Thread(target=self._serve)
t.daemon = True
t.start()
self._thread = t
def _serve(self):
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
while 1:
try:
with self._listener.accept() as conn:
msg = conn.recv()
if msg is None:
break
key, destination_pid = msg
send, close = self._cache.pop(key)
try:
send(conn, destination_pid)
finally:
close()
except:
if not util.is_exiting():
sys.excepthook(*sys.exc_info())
_resource_sharer = _ResourceSharer()
stop = _resource_sharer.stop |
5,953 | convert ppid | import os
import sys
class ArgHandlerWithParam:
'''
Handler for some arguments which needs a value
'''
def __init__(self, arg_name, convert_val=None, default_val=None):
self.arg_name = arg_name
self.arg_v_rep = '--%s' % (arg_name,)
self.convert_val = convert_val
self.default_val = default_val
def to_argv(self, lst, setup):
v = setup.get(self.arg_name)
if v is not None and v != self.default_val:
lst.append(self.arg_v_rep)
lst.append('%s' % (v,))
def handle_argv(self, argv, i, setup):
assert argv[i] == self.arg_v_rep
del argv[i]
val = argv[i]
if self.convert_val:
val = self.convert_val(val)
setup[self.arg_name] = val
del argv[i]
class ArgHandlerBool:
'''
If a given flag is received, mark it as 'True' in setup.
'''
def __init__(self, arg_name, default_val=False):
self.arg_name = arg_name
self.arg_v_rep = '--%s' % (arg_name,)
self.default_val = default_val
def to_argv(self, lst, setup):
v = setup.get(self.arg_name)
if v:
lst.append(self.arg_v_rep)
def handle_argv(self, argv, i, setup):
assert argv[i] == self.arg_v_rep
del argv[i]
setup[self.arg_name] = True
def METHOD_NAME(ppid):
ret = int(ppid)
if ret != 0:
if ret == os.getpid():
raise AssertionError(
'ppid passed is the same as the current process pid (%s)!' % (ret,))
return ret
ACCEPTED_ARG_HANDLERS = [
ArgHandlerWithParam('port', int, 0),
ArgHandlerWithParam('ppid', METHOD_NAME, 0),
ArgHandlerWithParam('vm_type'),
ArgHandlerWithParam('client'),
ArgHandlerWithParam('access-token'),
ArgHandlerWithParam('client-access-token'),
ArgHandlerWithParam('debug-mode'),
ArgHandlerWithParam('preimport'),
# Logging
ArgHandlerWithParam('log-file'),
ArgHandlerWithParam('log-level', int, None),
ArgHandlerBool('server'),
ArgHandlerBool('multiproc'), # Used by PyCharm (reuses connection: ssh tunneling)
ArgHandlerBool('multiprocess'), # Used by PyDev (creates new connection to ide)
ArgHandlerBool('save-signatures'),
ArgHandlerBool('save-threading'),
ArgHandlerBool('save-asyncio'),
ArgHandlerBool('print-in-debugger-startup'),
ArgHandlerBool('cmd-line'),
ArgHandlerBool('module'),
ArgHandlerBool('skip-notify-stdin'),
# The ones below should've been just one setting to specify the protocol, but for compatibility
# reasons they're passed as a flag but are mutually exclusive.
ArgHandlerBool('json-dap'), # Protocol used by ptvsd to communicate with pydevd (a single json message in each read)
ArgHandlerBool('json-dap-http'), # Actual DAP (json messages over http protocol).
ArgHandlerBool('protocol-quoted-line'), # Custom protocol with quoted lines.
ArgHandlerBool('protocol-http'), # Custom protocol with http.
]
ARGV_REP_TO_HANDLER = {}
for handler in ACCEPTED_ARG_HANDLERS:
ARGV_REP_TO_HANDLER[handler.arg_v_rep] = handler
def get_pydevd_file():
import pydevd
f = pydevd.__file__
if f.endswith('.pyc'):
f = f[:-1]
elif f.endswith('$py.class'):
f = f[:-len('$py.class')] + '.py'
return f
def setup_to_argv(setup, skip_names=None):
'''
:param dict setup:
A dict previously gotten from process_command_line.
:param set skip_names:
The names in the setup which shouldn't be converted to argv.
:note: does not handle --file nor --DEBUG.
'''
if skip_names is None:
skip_names = set()
ret = [get_pydevd_file()]
for handler in ACCEPTED_ARG_HANDLERS:
if handler.arg_name in setup and handler.arg_name not in skip_names:
handler.to_argv(ret, setup)
return ret
def process_command_line(argv):
""" parses the arguments.
removes our arguments from the command line """
setup = {}
for handler in ACCEPTED_ARG_HANDLERS:
setup[handler.arg_name] = handler.default_val
setup['file'] = ''
setup['qt-support'] = ''
initial_argv = tuple(argv)
i = 0
del argv[0]
while i < len(argv):
handler = ARGV_REP_TO_HANDLER.get(argv[i])
if handler is not None:
handler.handle_argv(argv, i, setup)
elif argv[i].startswith('--qt-support'):
# The --qt-support is special because we want to keep backward compatibility:
# Previously, just passing '--qt-support' meant that we should use the auto-discovery mode
# whereas now, if --qt-support is passed, it should be passed as --qt-support=<mode>, where
# mode can be one of 'auto', 'none', 'pyqt5', 'pyqt4', 'pyside', 'pyside2'.
if argv[i] == '--qt-support':
setup['qt-support'] = 'auto'
elif argv[i].startswith('--qt-support='):
qt_support = argv[i][len('--qt-support='):]
valid_modes = ('none', 'auto', 'pyqt5', 'pyqt4', 'pyside', 'pyside2')
if qt_support not in valid_modes:
raise ValueError("qt-support mode invalid: " + qt_support)
if qt_support == 'none':
# On none, actually set an empty string to evaluate to False.
setup['qt-support'] = ''
else:
setup['qt-support'] = qt_support
else:
raise ValueError("Unexpected definition for qt-support flag: " + argv[i])
del argv[i]
elif argv[i] == '--file':
# --file is special because it's the last one (so, no handler for it).
del argv[i]
setup['file'] = argv[i]
i = len(argv) # pop out, file is our last argument
elif argv[i] == '--DEBUG':
sys.stderr.write('pydevd: --DEBUG parameter deprecated. Use `--debug-level=3` instead.\n')
else:
raise ValueError("Unexpected option: %s when processing: %s" % (argv[i], initial_argv))
return setup
|
5,954 | test local lcpencrypt | import pytest
from mock import patch, create_autospec, MagicMock
from parameterized import parameterized
from pyfakefs.fake_filesystem_unittest import Patcher
from api.lcp.encrypt import LCPEncryptor, LCPEncryptionException, LCPEncryptionConfiguration, LCPEncryptionResult
from core.model import Identifier
from core.model.configuration import HasExternalIntegration, ConfigurationStorage, ConfigurationFactory
from tests.lcp import fixtures
from tests.lcp.database_test import DatabaseTest
class TestLCPEncryptor(DatabaseTest):
@parameterized.expand([
(
'non_existing_directory',
fixtures.NOT_EXISTING_BOOK_FILE_PATH,
fixtures.LCPENCRYPT_NOT_EXISTING_DIRECTORY_RESULT,
None,
LCPEncryptionException(fixtures.LCPENCRYPT_NOT_EXISTING_DIRECTORY_RESULT.strip()),
False
),
(
'failed_encryption',
fixtures.NOT_EXISTING_BOOK_FILE_PATH,
fixtures.LCPENCRYPT_FAILED_ENCRYPTION_RESULT,
None,
LCPEncryptionException('Encryption failed')
),
(
'successful_encryption',
fixtures.EXISTING_BOOK_FILE_PATH,
fixtures.LCPENCRYPT_SUCCESSFUL_ENCRYPTION_RESULT,
LCPEncryptionResult(
content_id=fixtures.BOOK_IDENTIFIER,
content_encryption_key=fixtures.CONTENT_ENCRYPTION_KEY,
protected_content_location=fixtures.PROTECTED_CONTENT_LOCATION,
protected_content_disposition=fixtures.PROTECTED_CONTENT_DISPOSITION,
protected_content_type=fixtures.PROTECTED_CONTENT_TYPE,
protected_content_length=fixtures.PROTECTED_CONTENT_LENGTH,
protected_content_sha256=fixtures.PROTECTED_CONTENT_SHA256
)
),
(
'failed_lcp_server_notification',
fixtures.EXISTING_BOOK_FILE_PATH,
fixtures.LCPENCRYPT_FAILED_LCPSERVER_NOTIFICATION,
None,
LCPEncryptionException(fixtures.LCPENCRYPT_FAILED_LCPSERVER_NOTIFICATION.strip())
),
(
'successful_lcp_server_notification',
fixtures.EXISTING_BOOK_FILE_PATH,
fixtures.LCPENCRYPT_SUCCESSFUL_NOTIFICATION_RESULT,
LCPEncryptionResult(
content_id=fixtures.BOOK_IDENTIFIER,
content_encryption_key=fixtures.CONTENT_ENCRYPTION_KEY,
protected_content_location=fixtures.PROTECTED_CONTENT_LOCATION,
protected_content_disposition=fixtures.PROTECTED_CONTENT_DISPOSITION,
protected_content_type=fixtures.PROTECTED_CONTENT_TYPE,
protected_content_length=fixtures.PROTECTED_CONTENT_LENGTH,
protected_content_sha256=fixtures.PROTECTED_CONTENT_SHA256
)
),
])
def METHOD_NAME(
self,
_,
file_path,
lcpencrypt_output,
expected_result,
expected_exception=None,
create_file=True):
# Arrange
integration_owner = create_autospec(spec=HasExternalIntegration)
integration_owner.external_integration = MagicMock(return_value=self._integration)
configuration_storage = ConfigurationStorage(integration_owner)
configuration_factory = ConfigurationFactory()
encryptor = LCPEncryptor(configuration_storage, configuration_factory)
identifier = Identifier(identifier=fixtures.BOOK_IDENTIFIER)
with configuration_factory.create(configuration_storage, self._db, LCPEncryptionConfiguration) as configuration:
configuration.lcpencrypt_location = LCPEncryptionConfiguration.DEFAULT_LCPENCRYPT_LOCATION
with Patcher() as patcher:
patcher.fs.create_file(LCPEncryptionConfiguration.DEFAULT_LCPENCRYPT_LOCATION)
if create_file:
patcher.fs.create_file(file_path)
with patch('subprocess.check_output') as subprocess_check_output_mock:
subprocess_check_output_mock.return_value = lcpencrypt_output
if expected_exception:
with pytest.raises(expected_exception.__class__) as exception_metadata:
encryptor.encrypt(self._db, file_path, identifier.identifier)
# Assert
assert exception_metadata.value == expected_exception
else:
# Assert
result = encryptor.encrypt(self._db, file_path, identifier.identifier)
assert result == expected_result |
5,955 | kibana group | # Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License
# 2.0; you may not use this file except in compliance with the Elastic License
# 2.0.
"""Kibana cli commands."""
import sys
import click
import kql
from kibana import Signal, RuleResource
from .cli_utils import multi_collection
from .main import root
from .misc import add_params, client_error, kibana_options, get_kibana_client, nested_set
from .rule import downgrade_contents_from_rule
from .utils import format_command_options
@root.group('kibana')
@add_params(*kibana_options)
@click.pass_context
def METHOD_NAME(ctx: click.Context, **kibana_kwargs):
"""Commands for integrating with Kibana."""
ctx.ensure_object(dict)
# only initialize an kibana client if the subcommand is invoked without help (hacky)
if sys.argv[-1] in ctx.help_option_names:
click.echo('Kibana client:')
click.echo(format_command_options(ctx))
else:
ctx.obj['kibana'] = get_kibana_client(**kibana_kwargs)
@METHOD_NAME.command("upload-rule")
@multi_collection
@click.option('--replace-id', '-r', is_flag=True, help='Replace rule IDs with new IDs before export')
@click.pass_context
def upload_rule(ctx, rules, replace_id):
"""Upload a list of rule .toml files to Kibana."""
kibana = ctx.obj['kibana']
api_payloads = []
for rule in rules:
try:
payload = downgrade_contents_from_rule(rule, kibana.version, replace_id=replace_id)
except ValueError as e:
client_error(f'{e} in version:{kibana.version}, for rule: {rule.name}', e, ctx=ctx)
rule = RuleResource(payload)
api_payloads.append(rule)
with kibana:
results = RuleResource.bulk_create(api_payloads)
success = []
errors = []
for result in results:
if 'error' in result:
errors.append(f'{result["rule_id"]} - {result["error"]["message"]}')
else:
success.append(result['rule_id'])
if success:
click.echo('Successful uploads:\n - ' + '\n - '.join(success))
if errors:
click.echo('Failed uploads:\n - ' + '\n - '.join(errors))
return results
@METHOD_NAME.command('search-alerts')
@click.argument('query', required=False)
@click.option('--date-range', '-d', type=(str, str), default=('now-7d', 'now'), help='Date range to scope search')
@click.option('--columns', '-c', multiple=True, help='Columns to display in table')
@click.option('--extend', '-e', is_flag=True, help='If columns are specified, extend the original columns')
@click.option('--max-count', '-m', default=100, help='The max number of alerts to return')
@click.pass_context
def search_alerts(ctx, query, date_range, columns, extend, max_count):
"""Search detection engine alerts with KQL."""
from eql.table import Table
from .eswrap import MATCH_ALL, add_range_to_dsl
kibana = ctx.obj['kibana']
start_time, end_time = date_range
kql_query = kql.to_dsl(query) if query else MATCH_ALL
add_range_to_dsl(kql_query['bool'].setdefault('filter', []), start_time, end_time)
with kibana:
alerts = [a['_source'] for a in Signal.search({'query': kql_query}, size=max_count)['hits']['hits']]
# check for events with nested signal fields
if alerts:
table_columns = ['host.hostname']
if 'signal' in alerts[0]:
table_columns += ['signal.rule.name', 'signal.status', 'signal.original_time']
elif 'kibana.alert.rule.name' in alerts[0]:
table_columns += ['kibana.alert.rule.name', 'kibana.alert.status', 'kibana.alert.original_time']
else:
table_columns += ['rule.name', '@timestamp']
if columns:
columns = list(columns)
table_columns = table_columns + columns if extend else columns
# Table requires the data to be nested, but depending on the version, some data uses dotted keys, so
# they must be nested explicitly
for alert in alerts:
for key in table_columns:
if key in alert:
nested_set(alert, key, alert[key])
click.echo(Table.from_list(table_columns, alerts))
else:
click.echo('No alerts detected')
return alerts |
5,956 | launch | #
# SPDX-FileCopyrightText:
# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""This is a helper module for distributed training.
The code uses an official implementation of
distributed data parallel launcher as just a reference.
https://github.com/pytorch/pytorch/blob/v1.8.2/torch/distributed/launch.py
One main difference is this code focuses on
launching simple function with given arguments.
"""
import multiprocessing
import os
import signal
import socket
import time
if hasattr(signal, "valid_signals"):
_signalno_name_map = {
s.value: s.name for s in signal.valid_signals() if isinstance(s, signal.Signals)
}
else:
# TODO(lazykyama): It should be deprecated
# once Python 3.7 is removed from supported platform.
_signalno_name_map = dict(
[
(1, "SIGHUP"),
(2, "SIGINT"),
(3, "SIGQUIT"),
(4, "SIGILL"),
(5, "SIGTRAP"),
(6, "SIGABRT"),
(7, "SIGBUS"),
(8, "SIGFPE"),
(9, "SIGKILL"),
(10, "SIGUSR1"),
(11, "SIGSEGV"),
(12, "SIGUSR2"),
(13, "SIGPIPE"),
(14, "SIGALRM"),
(15, "SIGTERM"),
(17, "SIGCHLD"),
(18, "SIGCONT"),
(19, "SIGSTOP"),
(20, "SIGTSTP"),
(21, "SIGTTIN"),
(22, "SIGTTOU"),
(23, "SIGURG"),
(24, "SIGXCPU"),
(25, "SIGXFSZ"),
(26, "SIGVTALRM"),
(27, "SIGPROF"),
(28, "SIGWINCH"),
(29, "SIGIO"),
(30, "SIGPWR"),
(31, "SIGSYS"),
(34, "SIGRTMIN"),
(64, "SIGRTMAX"),
]
)
class WorkerError(multiprocessing.ProcessError):
"""An error happened within each worker."""
def __init__(self, *, msg, exitcode, worker_id):
"""Initialize error class."""
super(WorkerError, self).__init__(msg)
self._exitcode = exitcode
self._worker_id = worker_id
def __str__(self):
"""Construct and return a special error message."""
return f"worker[{self._worker_id}] failed with exitcode={self._exitcode}"
@property
def exitcode(self):
"""Return exitcode from worker process."""
return self._exitcode
@property
def worker_id(self):
"""Return worker ID related to a process causes this error."""
return self._worker_id
class MainProcessError(multiprocessing.ProcessError):
"""An error happened from main process."""
def __init__(self, *, signal_no):
"""Initialize error class."""
msg = (
f"{_signalno_name_map[signal_no]} received, "
f"exiting due to {signal.strsignal(signal_no)}."
)
super(MainProcessError, self).__init__(msg)
self._signal_no = signal_no
self._msg = msg
def __str__(self):
"""Return a custom error message."""
return self._msg
@property
def signal_no(self):
"""Return signal number which stops main process."""
return self._signal_no
def set_start_method(method):
"""Set multiprocess start method."""
assert method in ("fork", "spawn", "forkserver")
return multiprocessing.set_start_method(method)
def free_port():
"""Find free port using bind().
There are some interval between finding this port and using it
and the other process might catch the port by that time.
Thus it is not guaranteed that the port is really empty.
"""
# This method is copied from ESPnet v2's utility below.
# https://github.com/espnet/espnet/blob/43ce0c69fb32961235534b348700dc6c74ad5792/espnet2/train/distributed_utils.py#L187-L198
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.bind(("", 0))
return sock.getsockname()[1]
def _kill_processes(processes):
# TODO(lazykyama): This implementation can't stop all processes
# which have grandchildren processes launched
# within each child process directly forked from this script.
# Need improvement for more safe termination.
for p in processes:
try:
# NOTE: multiprocessing.Process.kill() was introduced in 3.7.
# https://docs.python.org/3.7/library/multiprocessing.html#multiprocessing.Process.kill
if not hasattr(p, "kill"):
p.terminate()
else:
p.kill()
except Exception: # noqa: E722
# NOTE: Ignore any exception happens during killing a process
# because this intends to send kill signal to *all* processes.
pass
def METHOD_NAME(func, args, nprocs, master_addr="localhost", master_port=None):
"""Launch processes with a given function and given arguments.
.. note:: Current implementaiton supports only single node case.
"""
if master_port is None:
master_port = free_port()
# Set PyTorch distributed related environmental variables
# NOTE: in contrast to subprocess.Popen,
# explicit environment variables can not be specified.
# It's necessary to add additional variables to
# current environment variable list.
original_env = os.environ.copy()
# TODO(lazykyama): multi-node support
os.environ["WORLD_SIZE"] = str(nprocs)
os.environ["MASTER_ADDR"] = master_addr
os.environ["MASTER_PORT"] = str(master_port)
processes = []
for local_rank in range(nprocs):
# Each process's rank
# TODO(lazykyama): multi-node support
os.environ["RANK"] = str(local_rank)
os.environ["LOCAL_RANK"] = str(local_rank)
process = multiprocessing.Process(target=func, args=(args,))
process.start()
processes.append(process)
# Set signal handler to capture signals sent to main process,
# and ensure that all children processes will be terminated.
def _handler(signal_no, _):
_kill_processes(processes)
raise MainProcessError(signal_no=signal_no)
signal.signal(signal.SIGINT, _handler)
signal.signal(signal.SIGTERM, _handler)
# Recovery environment variables.
os.environ.clear()
os.environ.update(original_env)
# Monitor all workers.
worker_error = None
finished_process_ids = set()
while len(processes) > len(finished_process_ids):
for localrank, p in enumerate(processes):
if p.pid in finished_process_ids:
# Skip rest of checks becuase
# this process has been already finished.
continue
if p.is_alive():
# This process is still running.
continue
elif p.exitcode == 0:
# This process properly finished.
finished_process_ids.add(p.pid)
else:
# An error happens in one process.
# Will try to terminate all other processes.
worker_error = WorkerError(
msg=(f"{func.__name__} failed with error code: {p.exitcode}"),
exitcode=p.exitcode,
worker_id=localrank,
)
break
if worker_error is not None:
# Go out of this while loop to terminate all processes.
break
time.sleep(1.0)
if worker_error is not None:
# Trying to stop all workers.
_kill_processes(processes)
raise worker_error |
5,957 | logging set handlers | """
This file contains general-purpose utilities.
"""
import logging
import numpy as np
import os
import uuid
def METHOD_NAME(logger_name, handler, log_level):
logger = logging.getLogger(logger_name)
# set all handlers to ERROR
for handler in logger.handlers:
handler.setLevel(logging.ERROR)
# set children oasislmf loggers to 'log_level'
if 'oasislmf.' in logger_name:
logger.addHandler(handler)
logger.setLevel(log_level)
logger.propagate = False
else:
logger.setLevel(logging.ERROR)
def logging_reset_handlers(logger_name):
logger = logging.getLogger(logger_name)
# revert all handlers to NOTSET
for handler in logger.handlers:
handler.setLevel(logging.NOTSET)
logger.propagate = True
# Remove added handlers
if 'oasislmf.' in logger_name:
logger.handlers.clear()
else:
logger.setLevel(logging.NOTSET)
def redirect_logging(exec_name, log_dir='./log', log_level=logging.WARNING):
"""
Decorator that redirects logging output to a file.
Apply to the main run function of a python exec from the pytools directory.
Only errors will be send to STDERR, all other logging is stored in a file named:
"<log_dir>/<exec_name>_<PID>.log"
Each log file is timestamped with start / finish times
❯ cat log/fmpy_112820.log
2023-03-01 13:48:31,286 - oasislmf - INFO - starting process
2023-03-01 13:48:36,476 - oasislmf - INFO - finishing process
Args:
exec_name (str): The name of the script or function being executed. This will be used as part of the log file name.
log_dir (str, optional): The path to the directory where log files will be stored. Defaults to './log'.
log_level (int or str, optional): The logging level to use. Can be an integer or a string. Defaults to logging.INFO.
Returns:
function: The decorated function.
Example:
@redirect_logging(exec_name='my_script', log_dir='./logs', log_level=logging.DEBUG)
def my_run_function():
# code here
"""
def inner(func):
def wrapper(*args, **kwargs):
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
logging_config = logging.root.manager.loggerDict.keys()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log_file = f'{exec_name}_{os.getpid()}_{uuid.uuid4()}.log'
childFileHandler = logging.FileHandler(os.path.join(log_dir, log_file))
childFileHandler.setLevel(log_level)
childFileHandler.setFormatter(formatter)
rootFileHandler = logging.FileHandler(os.path.join(log_dir, log_file))
rootFileHandler.setLevel(logging.INFO)
rootFileHandler.setFormatter(formatter)
# Set all logger handlers to level ERROR
for lg_name in logging_config:
METHOD_NAME(lg_name, childFileHandler, log_level)
# Set root oasislmf logger to INFO
logger = logging.getLogger('oasislmf')
logger.setLevel(logging.INFO)
logger.addHandler(rootFileHandler)
# # Debug: print logging tree
# import ipdb; ipdb.set_trace()
# import logging_tree; logging_tree.printout()
try:
logger.info(kwargs)
logger.info('starting process')
# Run the wrapped function
retval = func(*args, **kwargs)
logger.info('finishing process')
return retval
except Exception as err:
logger.exception(err)
raise err
finally:
for lg_name in logging_config:
logging_reset_handlers(lg_name)
logger.removeHandler(rootFileHandler)
logging.shutdown()
return wrapper
return inner
def assert_allclose(x, y, rtol=1e-10, atol=1e-8, x_name="x", y_name="y"):
"""
Drop in replacement for `numpy.testing.assert_allclose` that also shows
the nonmatching elements in a nice human-readable format.
Args:
x (np.array or scalar): first input to compare.
y (np.array or scalar): second input to compare.
rtol (float, optional): relative tolreance. Defaults to 1e-10.
atol (float, optional): absolute tolerance. Defaults to 1e-8.
x_name (str, optional): header to print for x if x and y do not match. Defaults to "x".
y_name (str, optional): header to print for y if x and y do not match. Defaults to "y".
Raises:
AssertionError: if x and y shapes do not match.
AssertionError: if x and y data do not match.
"""
if np.isscalar(x) and np.isscalar(y) == 1:
return np.testing.assert_allclose(x, y, rtol=rtol, atol=atol)
if x.shape != y.shape:
raise AssertionError("Shape mismatch: %s vs %s" % (str(x.shape), str(y.shape)))
d = ~np.isclose(x, y, rtol, atol)
if np.any(d):
miss = np.where(d)[0]
msg = f"Mismatch of {len(miss):d} elements ({len(miss) / x.size * 100:g} %) at the level of rtol={rtol:g}, atol={atol:g},\n" \
f"{repr(miss)}\n" \
f"x: {x_name}\n{str(x[d])}\n\n" \
f"y: {y_name}\n{str(y[d])}"\
raise AssertionError(msg) |
5,958 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetSqlPoolTransparentDataEncryptionResult',
'AwaitableGetSqlPoolTransparentDataEncryptionResult',
'get_sql_pool_transparent_data_encryption',
'get_sql_pool_transparent_data_encryption_output',
]
@pulumi.output_type
class GetSqlPoolTransparentDataEncryptionResult:
"""
Represents a Sql pool transparent data encryption configuration.
"""
def __init__(__self__, METHOD_NAME=None, location=None, name=None, status=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
The status of the database transparent data encryption.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetSqlPoolTransparentDataEncryptionResult(GetSqlPoolTransparentDataEncryptionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSqlPoolTransparentDataEncryptionResult(
METHOD_NAME=self.METHOD_NAME,
location=self.location,
name=self.name,
status=self.status,
type=self.type)
def get_sql_pool_transparent_data_encryption(resource_group_name: Optional[str] = None,
sql_pool_name: Optional[str] = None,
transparent_data_encryption_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlPoolTransparentDataEncryptionResult:
"""
Get a SQL pool's transparent data encryption configuration.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str sql_pool_name: SQL pool name
:param str transparent_data_encryption_name: The name of the transparent data encryption configuration.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['sqlPoolName'] = sql_pool_name
__args__['transparentDataEncryptionName'] = transparent_data_encryption_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:synapse/v20210601preview:getSqlPoolTransparentDataEncryption', __args__, opts=opts, typ=GetSqlPoolTransparentDataEncryptionResult).value
return AwaitableGetSqlPoolTransparentDataEncryptionResult(
METHOD_NAME=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
status=pulumi.get(__ret__, 'status'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_sql_pool_transparent_data_encryption)
def get_sql_pool_transparent_data_encryption_output(resource_group_name: Optional[pulumi.Input[str]] = None,
sql_pool_name: Optional[pulumi.Input[str]] = None,
transparent_data_encryption_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSqlPoolTransparentDataEncryptionResult]:
"""
Get a SQL pool's transparent data encryption configuration.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str sql_pool_name: SQL pool name
:param str transparent_data_encryption_name: The name of the transparent data encryption configuration.
:param str workspace_name: The name of the workspace.
"""
... |
5,959 | test logical not equal | import pytest
from PIL import Image, ImageMath
def pixel(im):
if hasattr(im, "im"):
return f"{im.mode} {repr(im.getpixel((0, 0)))}"
if isinstance(im, int):
return int(im) # hack to deal with booleans
A = Image.new("L", (1, 1), 1)
B = Image.new("L", (1, 1), 2)
Z = Image.new("L", (1, 1), 0) # Z for zero
F = Image.new("F", (1, 1), 3)
I = Image.new("I", (1, 1), 4) # noqa: E741
A2 = A.resize((2, 2))
B2 = B.resize((2, 2))
images = {"A": A, "B": B, "F": F, "I": I}
def test_sanity():
assert ImageMath.eval("1") == 1
assert ImageMath.eval("1+A", A=2) == 3
assert pixel(ImageMath.eval("A+B", A=A, B=B)) == "I 3"
assert pixel(ImageMath.eval("A+B", images)) == "I 3"
assert pixel(ImageMath.eval("float(A)+B", images)) == "F 3.0"
assert pixel(ImageMath.eval("int(float(A)+B)", images)) == "I 3"
def test_ops():
assert pixel(ImageMath.eval("-A", images)) == "I -1"
assert pixel(ImageMath.eval("+B", images)) == "L 2"
assert pixel(ImageMath.eval("A+B", images)) == "I 3"
assert pixel(ImageMath.eval("A-B", images)) == "I -1"
assert pixel(ImageMath.eval("A*B", images)) == "I 2"
assert pixel(ImageMath.eval("A/B", images)) == "I 0"
assert pixel(ImageMath.eval("B**2", images)) == "I 4"
assert pixel(ImageMath.eval("B**33", images)) == "I 2147483647"
assert pixel(ImageMath.eval("float(A)+B", images)) == "F 3.0"
assert pixel(ImageMath.eval("float(A)-B", images)) == "F -1.0"
assert pixel(ImageMath.eval("float(A)*B", images)) == "F 2.0"
assert pixel(ImageMath.eval("float(A)/B", images)) == "F 0.5"
assert pixel(ImageMath.eval("float(B)**2", images)) == "F 4.0"
assert pixel(ImageMath.eval("float(B)**33", images)) == "F 8589934592.0"
@pytest.mark.parametrize(
"expression",
(
"exec('pass')",
"(lambda: exec('pass'))()",
"(lambda: (lambda: exec('pass'))())()",
),
)
def test_prevent_exec(expression):
with pytest.raises(ValueError):
ImageMath.eval(expression)
def test_logical():
assert pixel(ImageMath.eval("not A", images)) == 0
assert pixel(ImageMath.eval("A and B", images)) == "L 2"
assert pixel(ImageMath.eval("A or B", images)) == "L 1"
def test_convert():
assert pixel(ImageMath.eval("convert(A+B, 'L')", images)) == "L 3"
assert pixel(ImageMath.eval("convert(A+B, '1')", images)) == "1 0"
assert pixel(ImageMath.eval("convert(A+B, 'RGB')", images)) == "RGB (3, 3, 3)"
def test_compare():
assert pixel(ImageMath.eval("min(A, B)", images)) == "I 1"
assert pixel(ImageMath.eval("max(A, B)", images)) == "I 2"
assert pixel(ImageMath.eval("A == 1", images)) == "I 1"
assert pixel(ImageMath.eval("A == 2", images)) == "I 0"
def test_one_image_larger():
assert pixel(ImageMath.eval("A+B", A=A2, B=B)) == "I 3"
assert pixel(ImageMath.eval("A+B", A=A, B=B2)) == "I 3"
def test_abs():
assert pixel(ImageMath.eval("abs(A)", A=A)) == "I 1"
assert pixel(ImageMath.eval("abs(B)", B=B)) == "I 2"
def test_binary_mod():
assert pixel(ImageMath.eval("A%A", A=A)) == "I 0"
assert pixel(ImageMath.eval("B%B", B=B)) == "I 0"
assert pixel(ImageMath.eval("A%B", A=A, B=B)) == "I 1"
assert pixel(ImageMath.eval("B%A", A=A, B=B)) == "I 0"
assert pixel(ImageMath.eval("Z%A", A=A, Z=Z)) == "I 0"
assert pixel(ImageMath.eval("Z%B", B=B, Z=Z)) == "I 0"
def test_bitwise_invert():
assert pixel(ImageMath.eval("~Z", Z=Z)) == "I -1"
assert pixel(ImageMath.eval("~A", A=A)) == "I -2"
assert pixel(ImageMath.eval("~B", B=B)) == "I -3"
def test_bitwise_and():
assert pixel(ImageMath.eval("Z&Z", A=A, Z=Z)) == "I 0"
assert pixel(ImageMath.eval("Z&A", A=A, Z=Z)) == "I 0"
assert pixel(ImageMath.eval("A&Z", A=A, Z=Z)) == "I 0"
assert pixel(ImageMath.eval("A&A", A=A, Z=Z)) == "I 1"
def test_bitwise_or():
assert pixel(ImageMath.eval("Z|Z", A=A, Z=Z)) == "I 0"
assert pixel(ImageMath.eval("Z|A", A=A, Z=Z)) == "I 1"
assert pixel(ImageMath.eval("A|Z", A=A, Z=Z)) == "I 1"
assert pixel(ImageMath.eval("A|A", A=A, Z=Z)) == "I 1"
def test_bitwise_xor():
assert pixel(ImageMath.eval("Z^Z", A=A, Z=Z)) == "I 0"
assert pixel(ImageMath.eval("Z^A", A=A, Z=Z)) == "I 1"
assert pixel(ImageMath.eval("A^Z", A=A, Z=Z)) == "I 1"
assert pixel(ImageMath.eval("A^A", A=A, Z=Z)) == "I 0"
def test_bitwise_leftshift():
assert pixel(ImageMath.eval("Z<<0", Z=Z)) == "I 0"
assert pixel(ImageMath.eval("Z<<1", Z=Z)) == "I 0"
assert pixel(ImageMath.eval("A<<0", A=A)) == "I 1"
assert pixel(ImageMath.eval("A<<1", A=A)) == "I 2"
def test_bitwise_rightshift():
assert pixel(ImageMath.eval("Z>>0", Z=Z)) == "I 0"
assert pixel(ImageMath.eval("Z>>1", Z=Z)) == "I 0"
assert pixel(ImageMath.eval("A>>0", A=A)) == "I 1"
assert pixel(ImageMath.eval("A>>1", A=A)) == "I 0"
def test_logical_eq():
assert pixel(ImageMath.eval("A==A", A=A)) == "I 1"
assert pixel(ImageMath.eval("B==B", B=B)) == "I 1"
assert pixel(ImageMath.eval("A==B", A=A, B=B)) == "I 0"
assert pixel(ImageMath.eval("B==A", A=A, B=B)) == "I 0"
def test_logical_ne():
assert pixel(ImageMath.eval("A!=A", A=A)) == "I 0"
assert pixel(ImageMath.eval("B!=B", B=B)) == "I 0"
assert pixel(ImageMath.eval("A!=B", A=A, B=B)) == "I 1"
assert pixel(ImageMath.eval("B!=A", A=A, B=B)) == "I 1"
def test_logical_lt():
assert pixel(ImageMath.eval("A<A", A=A)) == "I 0"
assert pixel(ImageMath.eval("B<B", B=B)) == "I 0"
assert pixel(ImageMath.eval("A<B", A=A, B=B)) == "I 1"
assert pixel(ImageMath.eval("B<A", A=A, B=B)) == "I 0"
def test_logical_le():
assert pixel(ImageMath.eval("A<=A", A=A)) == "I 1"
assert pixel(ImageMath.eval("B<=B", B=B)) == "I 1"
assert pixel(ImageMath.eval("A<=B", A=A, B=B)) == "I 1"
assert pixel(ImageMath.eval("B<=A", A=A, B=B)) == "I 0"
def test_logical_gt():
assert pixel(ImageMath.eval("A>A", A=A)) == "I 0"
assert pixel(ImageMath.eval("B>B", B=B)) == "I 0"
assert pixel(ImageMath.eval("A>B", A=A, B=B)) == "I 0"
assert pixel(ImageMath.eval("B>A", A=A, B=B)) == "I 1"
def test_logical_ge():
assert pixel(ImageMath.eval("A>=A", A=A)) == "I 1"
assert pixel(ImageMath.eval("B>=B", B=B)) == "I 1"
assert pixel(ImageMath.eval("A>=B", A=A, B=B)) == "I 0"
assert pixel(ImageMath.eval("B>=A", A=A, B=B)) == "I 1"
def test_logical_equal():
assert pixel(ImageMath.eval("equal(A, A)", A=A)) == "I 1"
assert pixel(ImageMath.eval("equal(B, B)", B=B)) == "I 1"
assert pixel(ImageMath.eval("equal(Z, Z)", Z=Z)) == "I 1"
assert pixel(ImageMath.eval("equal(A, B)", A=A, B=B)) == "I 0"
assert pixel(ImageMath.eval("equal(B, A)", A=A, B=B)) == "I 0"
assert pixel(ImageMath.eval("equal(A, Z)", A=A, Z=Z)) == "I 0"
def METHOD_NAME():
assert pixel(ImageMath.eval("notequal(A, A)", A=A)) == "I 0"
assert pixel(ImageMath.eval("notequal(B, B)", B=B)) == "I 0"
assert pixel(ImageMath.eval("notequal(Z, Z)", Z=Z)) == "I 0"
assert pixel(ImageMath.eval("notequal(A, B)", A=A, B=B)) == "I 1"
assert pixel(ImageMath.eval("notequal(B, A)", A=A, B=B)) == "I 1"
assert pixel(ImageMath.eval("notequal(A, Z)", A=A, Z=Z)) == "I 1" |
5,960 | sample images | import os
import random
import shutil
import json
from plantcv.plantcv import fatal_error
def METHOD_NAME(source_path, dest_path, num=100):
if not os.path.exists(source_path):
raise IOError(f"Directory does not exist: {source_path}")
if not os.path.exists(dest_path):
os.makedirs(dest_path) # exist_ok argument does not exist in python 2
# If SnapshotInfo exists then need to make a new csv for the random image sample
if os.path.exists(os.path.join(source_path, 'SnapshotInfo.csv')):
_sample_phenofront(source_path, dest_path, num)
elif os.path.exists(os.path.join(source_path, 'metadata.json')):
_sample_phenodata(source_path, dest_path, num)
else:
_sample_filenames(source_path, dest_path, num)
def _sample_phenofront(source_path, dest_path, num=100):
"""
Sample images from a phenofront dataset.
:param source_path: Path to phenofront images.
:param dest_path: Path to save sampled images.
:param num: Number of images to sample.
:return: None
"""
line_array = []
with open(os.path.join(source_path, 'SnapshotInfo.csv')) as fp:
header = fp.readline()
for line in fp:
line = line.rstrip("\n")
element_arr = line.split(',')
if element_arr[-1]:
line_array.append(element_arr)
# Check to make sure number of imgs to select is less than number of images found
if num > len(line_array):
fatal_error(f"Number of snapshots found ({len(line_array)}) less than 'num'.")
# Create SnapshotInfo file
with open(os.path.join(dest_path, 'SnapshotInfo.csv'), 'w') as out_file:
out_file.write(header)
# Get random snapshots
random_index = random.sample(range(0, len(line_array) - 1), num)
for i in random_index:
row = line_array[int(i)]
out_file.write(','.join(row) + "\n")
snap_path = os.path.join(source_path, "snapshot" + row[1])
folder_path = os.path.join(dest_path, "snapshot" + row[1])
if not os.path.exists(folder_path):
os.mkdir(folder_path) # the beginning of folder_path (dest_path) already exists from above
for root, _, files in os.walk(snap_path):
for file in files:
shutil.copy(os.path.join(root, file), folder_path)
def _sample_filenames(source_path, dest_path, num=100):
"""
Sample images from a filenames dataset.
:param source_path: Path to images.
:param dest_path: Path to save sampled images.
:param num: Number of images to sample.
:return: None
"""
img_element_array = []
img_extensions = ['.png', '.jpg', '.jpeg', '.tif', '.tiff', '.gif']
for root, _, files in os.walk(source_path):
for file in files:
# Check file type so that only images get copied over
ext = os.path.splitext(file)[1]
if ext.lower() in img_extensions:
img_element_array.append(os.path.join(root, file))
# Check to make sure number of imgs to select is less than number of images found
if num > len(img_element_array):
fatal_error(f"Number of images found ({len(img_element_array)}) less than 'num'.")
# Get random images
random_index = random.sample(range(0, len(img_element_array) - 1), num)
# Copy images over to destination
for i in random_index:
shutil.copy(img_element_array[int(i)], dest_path)
def _sample_phenodata(source_path, dest_path, num=100):
"""
Sample images from a phenodata dataset.
:param source_path: Path to phenodata images.
:param dest_path: Path to save sampled images.
:param num: Number of images to sample.
:return: None
"""
# Initialize an empty dataset
sampled_dataset = {}
# Read in the metadata
with open(os.path.join(source_path, "metadata.json"), "r") as fp:
dataset = json.load(fp)
# Set the dataset to the sampled dataset
sampled_dataset["dataset"] = dataset["dataset"]
# Leave the environment secton empty
sampled_dataset["environment"] = {}
# Initialize the images section
sampled_dataset["images"] = {}
# Create a unique dictionary of snapshot IDs
snapshots = {}
# Store the snapshot IDs in the snapshots dictionary
for value in dataset["images"].values():
snapshots[value["snapshot"]] = True
# Check to make sure number of imgs to select is less than number of images found
if len(snapshots) < num:
fatal_error(f"Number of snapshots found ({len(snapshots)}) less than 'num'.")
# Randomly select the snapshots
random_snapshots = random.sample(snapshots.keys(), num)
# Iterate over all images in the dataset
for fpath, meta in dataset["images"].items():
# If the snapshot ID is in the random snapshots
if meta["snapshot"] in random_snapshots:
# Store the image in the sampled dataset
sampled_dataset["images"][fpath] = meta
# Copy the image to the destination directory
parent_path = os.path.split(fpath)[0]
os.makedirs(os.path.join(dest_path, parent_path), exist_ok=True)
shutil.copy(os.path.join(source_path, fpath), os.path.join(dest_path, fpath))
# Write the sampled dataset to a JSON file
with open(os.path.join(dest_path, "metadata.json"), "w") as fp:
json.dump(sampled_dataset, fp, indent=4) |
5,961 | test param model is not permitted | from unittest import mock
import pytest
from fastapi import HTTPException
from fastapi.encoders import jsonable_encoder
from pydantic import ValidationError
from mlflow.gateway.config import RouteConfig
from mlflow.gateway.constants import MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS
from mlflow.gateway.providers.anthropic import AnthropicProvider
from mlflow.gateway.schemas import chat, completions, embeddings
from tests.gateway.tools import MockAsyncResponse
def completions_response():
return {
"completion": "Here is a basic overview of how a car works:\n\n1. The engine. "
"The engine is the power source that makes the car move.",
"stop_reason": "max_tokens",
"model": "claude-instant-1.1",
"truncated": False,
"stop": None,
"log_id": "dee173f87ddf1357da639dee3c38d833",
"exception": None,
"headers": {"Content-Type": "application/json"},
}
def completions_config():
return {
"name": "completions",
"route_type": "llm/v1/completions",
"model": {
"provider": "anthropic",
"name": "claude-instant-1",
"config": {
"anthropic_api_key": "key",
},
},
}
def parsed_completions_response():
return {
"candidates": [
{
"text": "Here is a basic overview of how a car works:\n\n1. The engine. "
"The engine is the power source that makes the car move.",
"metadata": {"finish_reason": "length"},
}
],
"metadata": {
"model": "claude-instant-1.1",
"route_type": "llm/v1/completions",
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
},
}
@pytest.mark.asyncio
async def test_completions():
resp = completions_response()
config = completions_config()
with mock.patch(
"aiohttp.ClientSession.post", return_value=MockAsyncResponse(resp)
) as mock_post:
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": "How does a car work?", "max_tokens": 200}
response = await provider.completions(completions.RequestPayload(**payload))
assert jsonable_encoder(response) == parsed_completions_response()
mock_post.assert_called_once()
@pytest.mark.asyncio
async def test_completions_with_default_max_tokens():
resp = completions_response()
config = completions_config()
with mock.patch(
"aiohttp.ClientSession.post", return_value=MockAsyncResponse(resp)
) as mock_post:
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": "How does a car work?"}
response = await provider.completions(completions.RequestPayload(**payload))
assert jsonable_encoder(response) == parsed_completions_response()
mock_post.assert_called_once()
@pytest.mark.asyncio
async def test_completions_throws_with_invalid_max_tokens_too_large():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": "Would Fozzie or Kermet win in a fight?", "max_tokens": 1000001}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert (
"Invalid value for max_tokens: cannot exceed "
f"{MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS}" in e.value.detail
)
assert e.value.status_code == 422
@pytest.mark.asyncio
async def test_completions_throws_with_unsupported_candidate_count():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"prompt": "Would Fozzie or Kermet win in a fight?",
"candidate_count": 5,
"max_tokens": 10,
}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert "'candidate_count' must be '1' for the Anthropic provider" in e.value.detail
assert e.value.status_code == 422
@pytest.mark.asyncio
async def test_completions_throws_with_top_p_defined():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": "Would Fozzie or Kermet win in a fight?", "max_tokens": 500, "top_p": 0.6}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert "Cannot set both 'temperature' and 'top_p' parameters. Please" in e.value.detail
assert e.value.status_code == 422
@pytest.mark.asyncio
async def test_completions_throws_with_stream_set_to_true():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"prompt": "Could the Millennium Falcon fight a Borg Cube and win?",
"max_tokens": 5000,
"stream": "true",
}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert "Setting the 'stream' parameter to 'true' is not supported" in e.value.detail
assert e.value.status_code == 422
def chat_config():
return {
"name": "chat",
"route_type": "llm/v1/chat",
"model": {
"provider": "anthropic",
"name": "claude-instant-1",
"config": {
"anthropic_api_key": "key",
},
},
}
@pytest.mark.asyncio
async def test_chat_is_not_supported_for_anthropic():
config = chat_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"messages": [{"role": "user", "content": "Claude, can you chat with me? I'm lonely."}]
}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.chat(chat.RequestPayload(**payload))
assert "The chat route is not available for Anthropic models" in e.value.detail
assert e.value.status_code == 404
def embedding_config():
return {
"name": "embeddings",
"route_type": "llm/v1/embeddings",
"model": {
"provider": "anthropic",
"name": "claude-1.3-100k",
"config": {
"anthropic_api_key": "key",
},
},
}
@pytest.mark.asyncio
async def test_embeddings_are_not_supported_for_anthropic():
config = embedding_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {"text": "give me that sweet, sweet vector, please."}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.embeddings(embeddings.RequestPayload(**payload))
assert "The embeddings route is not available for Anthropic models" in e.value.detail
assert e.value.status_code == 404
@pytest.mark.asyncio
async def METHOD_NAME():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"prompt": "This should fail",
"max_tokens": 5000,
"model": "something-else",
}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert "The parameter 'model' is not permitted" in e.value.detail
assert e.value.status_code == 422
@pytest.mark.parametrize("prompt", [{"set1", "set2"}, ["list1"], [1], ["list1", "list2"], [1, 2]])
@pytest.mark.asyncio
async def test_completions_throws_if_prompt_contains_non_string(prompt):
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": prompt}
with pytest.raises(ValidationError, match=r"prompt"):
await provider.completions(completions.RequestPayload(**payload)) |
5,962 | do genesis | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import os
from sawtooth_cli.admin_command.config import get_data_dir
from sawtooth_cli.exceptions import CliException
from sawtooth_cli.protobuf.batch_pb2 import BatchList
from sawtooth_cli.protobuf.genesis_pb2 import GenesisData
from sawtooth_cli.protobuf.settings_pb2 import SettingProposal
from sawtooth_cli.protobuf.settings_pb2 import SettingsPayload
from sawtooth_cli.protobuf.transaction_pb2 import TransactionHeader
REQUIRED_SETTINGS = [
'sawtooth.consensus.algorithm.name',
'sawtooth.consensus.algorithm.version']
def add_genesis_parser(subparsers, parent_parser):
"""Creates the arg parsers needed for the genesis command.
"""
parser = subparsers.add_parser(
'genesis',
help='Creates the genesis.batch file for initializing the validator',
description='Generates the genesis.batch file for '
'initializing the validator.',
epilog='This command generates a serialized GenesisData protobuf '
'message and stores it in the genesis.batch file. One or more input '
'files contain serialized BatchList protobuf messages to add to the '
'GenesisData. The output shows the location of this file. By default, '
'the genesis.batch file is stored in /var/lib/sawtooth. If '
'$SAWTOOTH_HOME is set, the location is '
'$SAWTOOTH_HOME/data/genesis.batch. Use the --output option to change '
'the name of the file. The following settings must be present in the '
'input batches:\n{}\n'.format(REQUIRED_SETTINGS),
parents=[parent_parser])
parser.add_argument(
'-o', '--output',
type=str,
help='choose the output file for GenesisData')
parser.add_argument(
'input_file',
nargs='*',
type=str,
help='file or files containing batches to add to the resulting '
'GenesisData')
parser.add_argument(
'--ignore-required-settings',
action='store_true',
help='skip the check for settings that are required at genesis '
'(necessary if using a settings transaction family other than '
'sawtooth_settings)')
def METHOD_NAME(args, data_dir=None):
"""Given the command args, take an series of input files containing
GenesisData, combine all the batches into one GenesisData, and output the
result into a new file.
"""
if data_dir is None:
data_dir = get_data_dir()
if not os.path.exists(data_dir):
raise CliException(
"Data directory does not exist: {}".format(data_dir))
genesis_batches = []
for input_file in args.input_file:
print('Processing {}...'.format(input_file))
input_data = BatchList()
try:
with open(input_file, 'rb') as in_file:
input_data.ParseFromString(in_file.read())
except:
raise CliException(
'Unable to read {}'.format(input_file)) from CliException
genesis_batches += input_data.batches
_validate_depedencies(genesis_batches)
if not args.ignore_required_settings:
_check_required_settings(genesis_batches)
if args.output:
genesis_file = args.output
else:
genesis_file = os.path.join(data_dir, 'genesis.batch')
print('Generating {}'.format(genesis_file))
output_data = GenesisData(batches=genesis_batches)
with open(genesis_file, 'wb') as out_file:
out_file.write(output_data.SerializeToString())
def _validate_depedencies(batches):
"""Validates the transaction dependencies for the transactions contained
within the sequence of batches. Given that all the batches are expected to
to be executed for the genesis blocks, it is assumed that any dependent
transaction will proceed the depending transaction.
"""
transaction_ids = set()
for batch in batches:
for txn in batch.transactions:
txn_header = TransactionHeader()
txn_header.ParseFromString(txn.header)
if txn_header.dependencies:
unsatisfied_deps = [
id for id in txn_header.dependencies
if id not in transaction_ids
]
if unsatisfied_deps:
raise CliException(
'Unsatisfied dependency in given transactions:'
' {}'.format(unsatisfied_deps))
transaction_ids.add(txn.header_signature)
def _check_required_settings(batches):
"""Ensure that all settings required at genesis are set."""
required_settings = REQUIRED_SETTINGS.copy()
for batch in batches:
for txn in batch.transactions:
txn_header = TransactionHeader()
txn_header.ParseFromString(txn.header)
if txn_header.family_name == 'sawtooth_settings':
settings_payload = SettingsPayload()
settings_payload.ParseFromString(txn.payload)
if settings_payload.action == SettingsPayload.PROPOSE:
proposal = SettingProposal()
proposal.ParseFromString(settings_payload.data)
if proposal.setting in required_settings:
required_settings.remove(proposal.setting)
if required_settings:
raise CliException(
'The following setting(s) are required at genesis, but were not '
'included in the genesis batches: {}'.format(required_settings)) |
5,963 | move device like | # Copyright (c) Facebook, Inc. and its affiliates.
"""
Wrappers around on some nn functions, mainly to support empty tensors.
Ideally, add support directly in PyTorch to empty tensors in those functions.
These can be removed once https://github.com/pytorch/pytorch/issues/12013
is implemented
"""
import warnings
from typing import List, Optional
import torch
from torch.nn import functional as F
from annotator.oneformer.detectron2.utils.env import TORCH_VERSION
def shapes_to_tensor(x: List[int], device: Optional[torch.device] = None) -> torch.Tensor:
"""
Turn a list of integer scalars or integer Tensor scalars into a vector,
in a way that's both traceable and scriptable.
In tracing, `x` should be a list of scalar Tensor, so the output can trace to the inputs.
In scripting or eager, `x` should be a list of int.
"""
if torch.jit.is_scripting():
return torch.as_tensor(x, device=device)
if torch.jit.is_tracing():
assert all(
[isinstance(t, torch.Tensor) for t in x]
), "Shape should be tensor during tracing!"
# as_tensor should not be used in tracing because it records a constant
ret = torch.stack(x)
if ret.device != device: # avoid recording a hard-coded device if not necessary
ret = ret.to(device=device)
return ret
return torch.as_tensor(x, device=device)
def check_if_dynamo_compiling():
if TORCH_VERSION >= (1, 14):
from torch._dynamo import is_compiling
return is_compiling()
else:
return False
def cat(tensors: List[torch.Tensor], dim: int = 0):
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
def empty_input_loss_func_wrapper(loss_func):
def wrapped_loss_func(input, target, *, reduction="mean", **kwargs):
"""
Same as `loss_func`, but returns 0 (instead of nan) for empty inputs.
"""
if target.numel() == 0 and reduction == "mean":
return input.sum() * 0.0 # connect the gradient
return loss_func(input, target, reduction=reduction, **kwargs)
return wrapped_loss_func
cross_entropy = empty_input_loss_func_wrapper(F.cross_entropy)
class _NewEmptyTensorOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
@staticmethod
def backward(ctx, grad):
shape = ctx.shape
return _NewEmptyTensorOp.apply(grad, shape), None
class Conv2d(torch.nn.Conv2d):
"""
A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features.
"""
def __init__(self, *args, **kwargs):
"""
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
Args:
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
norm = kwargs.pop("norm", None)
activation = kwargs.pop("activation", None)
super().__init__(*args, **kwargs)
self.norm = norm
self.activation = activation
def forward(self, x):
# torchscript does not support SyncBatchNorm yet
# https://github.com/pytorch/pytorch/issues/40507
# and we skip these codes in torchscript since:
# 1. currently we only support torchscript in evaluation mode
# 2. features needed by exporting module to torchscript are added in PyTorch 1.6 or
# later version, `Conv2d` in these PyTorch versions has already supported empty inputs.
if not torch.jit.is_scripting():
# Dynamo doesn't support context managers yet
is_dynamo_compiling = check_if_dynamo_compiling()
if not is_dynamo_compiling:
with warnings.catch_warnings(record=True):
if x.numel() == 0 and self.training:
# https://github.com/pytorch/pytorch/issues/12013
assert not isinstance(
self.norm, torch.nn.SyncBatchNorm
), "SyncBatchNorm does not support empty inputs!"
x = F.conv2d(
x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups
)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
ConvTranspose2d = torch.nn.ConvTranspose2d
BatchNorm2d = torch.nn.BatchNorm2d
interpolate = F.interpolate
Linear = torch.nn.Linear
def nonzero_tuple(x):
"""
A 'as_tuple=True' version of torch.nonzero to support torchscript.
because of https://github.com/pytorch/pytorch/issues/38718
"""
if torch.jit.is_scripting():
if x.dim() == 0:
return x.unsqueeze(0).nonzero().unbind(1)
return x.nonzero().unbind(1)
else:
return x.nonzero(as_tuple=True)
@torch.jit.script_if_tracing
def METHOD_NAME(src: torch.Tensor, dst: torch.Tensor) -> torch.Tensor:
"""
Tracing friendly way to cast tensor to another tensor's device. Device will be treated
as constant during tracing, scripting the casting process as whole can workaround this issue.
"""
return src.to(dst.device) |
5,964 | main | #!/usr/bin/env python3
# Copyright 2018 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import logging
from distutils.util import strtobool
import kaldiio
import numpy
import resampy
from espnet2.utils.types import int_or_none
from espnet.transform.spectrogram import logmelspectrogram
from espnet.utils.cli_utils import get_commandline_args
from espnet.utils.cli_writers import file_writer_helper
def get_parser():
parser = argparse.ArgumentParser(
description="compute FBANK feature from WAV",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--fs", type=int_or_none, help="Sampling frequency")
parser.add_argument(
"--fmax", type=int_or_none, default=None, nargs="?", help="Maximum frequency"
)
parser.add_argument(
"--fmin", type=int_or_none, default=None, nargs="?", help="Minimum frequency"
)
parser.add_argument("--n_mels", type=int, default=80, help="Number of mel basis")
parser.add_argument("--n_fft", type=int, default=1024, help="FFT length in point")
parser.add_argument(
"--n_shift", type=int, default=512, help="Shift length in point"
)
parser.add_argument(
"--win_length",
type=int_or_none,
default=None,
nargs="?",
help="Analysis window length in point",
)
parser.add_argument(
"--window",
type=str,
default="hann",
choices=["hann", "hamming"],
help="Type of window",
)
parser.add_argument(
"--write-num-frames", type=str, help="Specify wspecifer for utt2num_frames"
)
parser.add_argument(
"--filetype",
type=str,
default="mat",
choices=["mat", "hdf5"],
help="Specify the file format for output. "
'"mat" is the matrix format in kaldi',
)
parser.add_argument(
"--compress", type=strtobool, default=False, help="Save in compressed format"
)
parser.add_argument(
"--compression-method",
type=int,
default=2,
help="Specify the method(if mat) or " "gzip-level(if hdf5)",
)
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument(
"--normalize",
choices=[1, 16, 24, 32],
type=int,
default=None,
help="Give the bit depth of the PCM, "
"then normalizes data to scale in [-1,1]",
)
parser.add_argument("rspecifier", type=str, help="WAV scp file")
parser.add_argument(
"--segments",
type=str,
help="segments-file format: each line is either"
"<segment-id> <recording-id> <start-time> <end-time>"
"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5",
)
parser.add_argument("wspecifier", type=str, help="Write specifier")
return parser
def METHOD_NAME():
parser = get_parser()
args = parser.parse_args()
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
if args.verbose > 0:
logging.basicConfig(level=logging.INFO, format=logfmt)
else:
logging.basicConfig(level=logging.WARN, format=logfmt)
logging.info(get_commandline_args())
with kaldiio.ReadHelper(
args.rspecifier, segments=args.segments
) as reader, file_writer_helper(
args.wspecifier,
filetype=args.filetype,
write_num_frames=args.write_num_frames,
compress=args.compress,
compression_method=args.compression_method,
) as writer:
for utt_id, (rate, array) in reader:
array = array.astype(numpy.float32)
if args.fs is not None and rate != args.fs:
array = resampy.resample(array, rate, args.fs, axis=0)
if args.normalize is not None and args.normalize != 1:
array = array / (1 << (args.normalize - 1))
lmspc = logmelspectrogram(
x=array,
fs=args.fs if args.fs is not None else rate,
n_mels=args.n_mels,
n_fft=args.n_fft,
n_shift=args.n_shift,
win_length=args.win_length,
window=args.window,
fmin=args.fmin,
fmax=args.fmax,
)
writer[utt_id] = lmspc
if __name__ == "__main__":
METHOD_NAME() |
5,965 | test ckyx 1x1 | ################################################################################
#
# Copyright (C) 2020-2022 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
################################################################################
import logging,pytest
from Tensile.SolutionStructs import Convolution
from YamlBuilder.YamlBuilder import defaultSizes, resnetSizes, inceptionSizes
log =logging.getLogger("testlog")
@pytest.mark.parametrize("problemSizes", [defaultSizes, resnetSizes, inceptionSizes])
def METHOD_NAME(tensile_state, run_convolution_level,problemSizes):
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'TensorBFormat': 'CKYX',
'Filter': '1x1',
})
log.debug(conv.printUsage(z))
if not tensile_state.args["no_conv_assertions"]:
assert(z['NumIndicesC']==3)
assert(z['IndexAssignmentsA']==[0, 3, 2])
assert(z['IndexAssignmentsB']==[1, 3, 2])
assert(conv.solutionParms["AssertStrideAEqual"] == {0:1})
assert(conv.solutionParms["AssertStrideBEqual"] == {0:1,2:0})
assert(conv.solutionParms["AssertSizeEqual"] == {})
run_convolution_level.func(conv, z, run_convolution_level.solution, problemSizes[0], problemSizes[1])
def test_ckyx_1x1_nopack(tensile_state, run_convolution_level):
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'TensorBFormat': 'CKYX',
'PackedSpatialDims': 0,
'Filter': '1x1',
})
log.debug(conv.printUsage(z))
if not tensile_state.args["no_conv_assertions"]:
assert(z['NumIndicesC']==4)
assert(z['IndexAssignmentsA']==[0, 1, 4, 3])
assert(z['IndexAssignmentsB']==[2, 4, 3])
assert(conv.solutionParms["AssertStrideAEqual"] == {0:1})
assert(conv.solutionParms["AssertStrideBEqual"] == {0:1,2:0})
assert(conv.solutionParms["AssertSizeEqual"] == {})
run_convolution_level.func(conv, z, run_convolution_level.solution)
def test_ckyx_2x2(tensile_state, run_convolution_level):
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'TensorBFormat': 'CKYX',
'Filter': '2x3',
})
log.debug(conv.printUsage(z))
if not tensile_state.args["no_conv_assertions"]:
filterDims = [4,3] if conv.unrollOnChannel else [5,4]
cdim = 5 if conv.unrollOnChannel else 3
assert(z['NumIndicesC']==3)
assert(z['IndexAssignmentsA']==filterDims + [0, cdim, 2])
assert(z['IndexAssignmentsB']==filterDims + [1, cdim, 2])
assert(conv.solutionParms["AssertStrideAEqual"] == {0:1,2:1})
assert(conv.solutionParms["AssertStrideBEqual"] == {0:1,4:0})
assert(conv.solutionParms["AssertSizeEqual"] == {filterDims[0]:3, filterDims[1]:2})
run_convolution_level.func(conv, z, run_convolution_level.solution) |
5,966 | test area datetime nat | import datetime as dt
import pandas as pd
import numpy as np
from holoviews.element import Area, Overlay
from ...utils import LoggingComparisonTestCase
from .test_plot import TestBokehPlot, bokeh_renderer
class TestAreaPlot(LoggingComparisonTestCase, TestBokehPlot):
def test_area_with_nans(self):
area = Area([1, 2, 3, np.nan, 5, 6, 7])
plot = bokeh_renderer.get_plot(area)
cds = plot.handles['cds']
self.assertEqual(cds.data['x'], np.array([0., 1., 2., 2., 1., 0., np.nan,
4., 5., 6., 6., 5., 4.]))
self.assertEqual(cds.data['y'], np.array([0., 0., 0., 3., 2., 1., np.nan,
0., 0., 0., 7., 6., 5.]))
def test_area_empty(self):
area = Area([])
plot = bokeh_renderer.get_plot(area)
cds = plot.handles['cds']
self.assertEqual(cds.data['x'], [])
self.assertEqual(cds.data['y'], [])
def METHOD_NAME(self):
values = [(np.datetime64(dt.datetime(2017, 1, i)), i) for i in range(1, 4)]
values.append((np.datetime64('nat'), np.nan))
values += [(np.datetime64(dt.datetime(2017, 1, i)), i) for i in range(4, 6)]
area = Area(values)
plot = bokeh_renderer.get_plot(area)
cds = plot.handles['cds']
xs = np.array([
'2017-01-01T00:00:00.000000000', '2017-01-02T00:00:00.000000000',
'2017-01-03T00:00:00.000000000', '2017-01-03T00:00:00.000000000',
'2017-01-02T00:00:00.000000000', '2017-01-01T00:00:00.000000000',
'NaT', '2017-01-04T00:00:00.000000000',
'2017-01-05T00:00:00.000000000', '2017-01-05T00:00:00.000000000',
'2017-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
ys = np.array([
0., 0., 0., 3., 2., 1., np.nan, 0., 0., 5., 4.
])
self.assertEqual(cds.data['x'], xs)
self.assertEqual(cds.data['y'], ys)
def test_area_padding_square(self):
area = Area([(1, 1), (2, 2), (3, 3)]).opts(padding=0.1)
plot = bokeh_renderer.get_plot(area)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.8)
self.assertEqual(x_range.end, 3.2)
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 3.2)
def test_area_padding_square_per_axis(self):
area = Area([(1, 1), (2, 2), (3, 3)]).opts(padding=((0, 0.1), (0.1, 0.2)))
plot = bokeh_renderer.get_plot(area)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 1.0)
self.assertEqual(x_range.end, 3.2)
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 3.4)
def test_area_with_lower_vdim(self):
area = Area([(1, 0.5, 1), (2, 1.5, 2), (3, 2.5, 3)], vdims=['y', 'y2']).opts(padding=0.1)
plot = bokeh_renderer.get_plot(area)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.8)
self.assertEqual(x_range.end, 3.2)
self.assertEqual(y_range.start, 0.25)
self.assertEqual(y_range.end, 3.25)
def test_area_padding_negative(self):
area = Area([(1, -1), (2, -2), (3, -3)]).opts(padding=0.1)
plot = bokeh_renderer.get_plot(area)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.8)
self.assertEqual(x_range.end, 3.2)
self.assertEqual(y_range.start, -3.2)
self.assertEqual(y_range.end, 0)
def test_area_padding_mixed(self):
area = Area([(1, 1), (2, -2), (3, 3)]).opts(padding=0.1)
plot = bokeh_renderer.get_plot(area)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.8)
self.assertEqual(x_range.end, 3.2)
self.assertEqual(y_range.start, -2.5)
self.assertEqual(y_range.end, 3.5)
def test_area_padding_hard_range(self):
area = Area([(1, 1), (2, 2), (3, 3)]).redim.range(y=(0, 4)).opts(padding=0.1)
plot = bokeh_renderer.get_plot(area)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.8)
self.assertEqual(x_range.end, 3.2)
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 4)
def test_area_padding_soft_range(self):
area = Area([(1, 1), (2, 2), (3, 3)]).redim.soft_range(y=(0, 3.5)).opts(padding=0.1)
plot = bokeh_renderer.get_plot(area)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.8)
self.assertEqual(x_range.end, 3.2)
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 3.5)
def test_area_padding_nonsquare(self):
area = Area([(1, 1), (2, 2), (3, 3)]).opts(padding=0.1, width=600)
plot = bokeh_renderer.get_plot(area)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.9)
self.assertEqual(x_range.end, 3.1)
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 3.2)
def test_area_padding_logx(self):
area = Area([(1, 1), (2, 2), (3,3)]).opts(padding=0.1, logx=True)
plot = bokeh_renderer.get_plot(area)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.89595845984076228)
self.assertEqual(x_range.end, 3.3483695221017129)
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 3.2)
def test_area_padding_logy(self):
area = Area([(1, 1), (2, 2), (3, 3)]).opts(padding=0.1, logy=True)
plot = bokeh_renderer.get_plot(area)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.8)
self.assertEqual(x_range.end, 3.2)
self.assertEqual(y_range.start, 0.01)
self.assertEqual(y_range.end, 3.3483695221017129)
self.log_handler.assertContains('WARNING', 'Logarithmic axis range encountered value less than')
def test_area_legend(self):
python = np.array([2, 3, 7, 5, 26, 221, 44, 233, 254, 265, 266, 267, 120, 111])
pypy = np.array([12, 33, 47, 15, 126, 121, 144, 233, 254, 225, 226, 267, 110, 130])
jython = np.array([22, 43, 10, 25, 26, 101, 114, 203, 194, 215, 201, 227, 139, 160])
dims = dict(kdims="time", vdims="memory")
python = Area(python, label="python", **dims)
pypy = Area(pypy, label="pypy", **dims)
jython = Area(jython, label="jython", **dims)
overlay = Area.stack(python * pypy * jython)
labels = [n[1] for n in overlay.data]
self.assertEqual(labels, ['Python', 'Pypy', 'Jython'])
def test_area_stack_vdims(self):
df = pd.DataFrame({'x': [1, 2, 3], 'y_1': [1, 2, 3], 'y_2': [6, 4, 2], 'y_3': [8, 1, 2]})
overlay = Overlay([Area(df, kdims='x', vdims=col, label=col) for col in ['y_1', 'y_2', 'y_3']])
plot = Area.stack(overlay)
baselines = [np.array([0, 0, 0]), np.array([1., 2., 3.]), np.array([7., 6., 5.])]
for n, baseline in zip(plot.data, baselines):
self.assertEqual(plot.data[n].data.Baseline.to_numpy(), baseline) |
5,967 | expect problem | """
A simple client that uses the Python ACME library to run a test issuance against
a local Boulder server.
Usage:
$ virtualenv venv
$ . venv/bin/activate
$ pip install -r requirements.txt
$ python chisel2.py foo.com bar.com
"""
import json
import logging
import os
import sys
import signal
import threading
import time
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography import x509
from cryptography.hazmat.primitives import hashes
import OpenSSL
import josepy
from acme import challenges
from acme import client as acme_client
from acme import crypto_util as acme_crypto_util
from acme import errors as acme_errors
from acme import messages
from acme import standalone
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(int(os.getenv('LOGLEVEL', 20)))
DIRECTORY_V2 = os.getenv('DIRECTORY_V2', 'http://boulder.service.consul:4001/directory')
ACCEPTABLE_TOS = os.getenv('ACCEPTABLE_TOS',"https://boulder.service.consul:4431/terms/v7")
PORT = os.getenv('PORT', '80')
os.environ.setdefault('REQUESTS_CA_BUNDLE', 'test/wfe-tls/minica.pem')
import challtestsrv
challSrv = challtestsrv.ChallTestServer()
def uninitialized_client(key=None):
if key is None:
key = josepy.JWKRSA(key=rsa.generate_private_key(65537, 2048, default_backend()))
net = acme_client.ClientNetwork(key, user_agent="Boulder integration tester")
directory = messages.Directory.from_json(net.get(DIRECTORY_V2).json())
return acme_client.ClientV2(directory, net)
def make_client(email=None):
"""Build an acme.Client and register a new account with a random key."""
client = uninitialized_client()
tos = client.directory.meta.terms_of_service
if tos == ACCEPTABLE_TOS:
client.net.account = client.new_account(messages.NewRegistration.from_data(email=email,
terms_of_service_agreed=True))
else:
raise Exception("Unrecognized terms of service URL %s" % tos)
return client
class NoClientError(ValueError):
"""
An error that occurs when no acme.Client is provided to a function that
requires one.
"""
pass
class EmailRequiredError(ValueError):
"""
An error that occurs when a None email is provided to update_email.
"""
def update_email(client, email):
"""
Use a provided acme.Client to update the client's account to the specified
email.
"""
if client is None:
raise(NoClientError("update_email requires a valid acme.Client argument"))
if email is None:
raise(EmailRequiredError("update_email requires an email argument"))
if not email.startswith("mailto:"):
email = "mailto:"+ email
acct = client.net.account
updatedAcct = acct.update(body=acct.body.update(contact=(email,)))
return client.update_registration(updatedAcct)
def get_chall(authz, typ):
for chall_body in authz.body.challenges:
if isinstance(chall_body.chall, typ):
return chall_body
raise Exception("No %s challenge found" % typ.typ)
def make_csr(domains):
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
return acme_crypto_util.make_csr(pem, domains, False)
def http_01_answer(client, chall_body):
"""Return an HTTP01Resource to server in response to the given challenge."""
response, validation = chall_body.response_and_validation(client.net.key)
return standalone.HTTP01RequestHandler.HTTP01Resource(
chall=chall_body.chall, response=response,
validation=validation)
def auth_and_issue(domains, chall_type="dns-01", email=None, cert_output=None, client=None):
"""Make authzs for each of the given domains, set up a server to answer the
challenges in those authzs, tell the ACME server to validate the challenges,
then poll for the authzs to be ready and issue a cert."""
if client is None:
client = make_client(email)
csr_pem = make_csr(domains)
order = client.new_order(csr_pem)
authzs = order.authorizations
if chall_type == "http-01":
cleanup = do_http_challenges(client, authzs)
elif chall_type == "dns-01":
cleanup = do_dns_challenges(client, authzs)
elif chall_type == "tls-alpn-01":
cleanup = do_tlsalpn_challenges(client, authzs)
else:
raise Exception("invalid challenge type %s" % chall_type)
try:
order = client.poll_and_finalize(order)
if cert_output is not None:
with open(cert_output, "w") as f:
f.write(order.fullchain_pem)
finally:
cleanup()
return order
def do_dns_challenges(client, authzs):
cleanup_hosts = []
for a in authzs:
c = get_chall(a, challenges.DNS01)
name, value = (c.validation_domain_name(a.body.identifier.value),
c.validation(client.net.key))
cleanup_hosts.append(name)
challSrv.add_dns01_response(name, value)
client.answer_challenge(c, c.response(client.net.key))
def cleanup():
for host in cleanup_hosts:
challSrv.remove_dns01_response(host)
return cleanup
def do_http_challenges(client, authzs):
cleanup_tokens = []
challs = [get_chall(a, challenges.HTTP01) for a in authzs]
for chall_body in challs:
# Determine the token and key auth for the challenge
token = chall_body.chall.encode("token")
resp = chall_body.response(client.net.key)
keyauth = resp.key_authorization
# Add the HTTP-01 challenge response for this token/key auth to the
# challtestsrv
challSrv.add_http01_response(token, keyauth)
cleanup_tokens.append(token)
# Then proceed initiating the challenges with the ACME server
client.answer_challenge(chall_body, chall_body.response(client.net.key))
def cleanup():
# Cleanup requires removing each of the HTTP-01 challenge responses for
# the tokens we added.
for token in cleanup_tokens:
challSrv.remove_http01_response(token)
return cleanup
def do_tlsalpn_challenges(client, authzs):
cleanup_hosts = []
for a in authzs:
c = get_chall(a, challenges.TLSALPN01)
name, value = (a.body.identifier.value, c.key_authorization(client.net.key))
cleanup_hosts.append(name)
challSrv.add_tlsalpn01_response(name, value)
client.answer_challenge(c, c.response(client.net.key))
def cleanup():
for host in cleanup_hosts:
challSrv.remove_tlsalpn01_response(host)
return cleanup
def METHOD_NAME(problem_type, func):
"""Run a function. If it raises an acme_errors.ValidationError or messages.Error that
contains the given problem_type, return. If it raises no error or the wrong
error, raise an exception."""
ok = False
try:
func()
except messages.Error as e:
if e.typ == problem_type:
ok = True
else:
raise Exception("Expected %s, got %s" % (problem_type, e.__str__()))
except acme_errors.ValidationError as e:
for authzr in e.failed_authzrs:
for chall in authzr.body.challenges:
error = chall.error
if error and error.typ == problem_type:
ok = True
elif error:
raise Exception("Expected %s, got %s" % (problem_type, error.__str__()))
if not ok:
raise Exception('Expected %s, got no error' % problem_type)
if __name__ == "__main__":
# Die on SIGINT
signal.signal(signal.SIGINT, signal.SIG_DFL)
domains = sys.argv[1:]
if len(domains) == 0:
print(__doc__)
sys.exit(0)
try:
auth_and_issue(domains)
except messages.Error as e:
print(e)
sys.exit(1) |
5,968 | cam cls seg | import torch
import torch.nn.functional as F
from annotator.mmpkg.mmcv.cnn import ConvModule, Scale
from torch import nn
from annotator.mmpkg.mmseg.core import add_prefix
from ..builder import HEADS
from ..utils import SelfAttentionBlock as _SelfAttentionBlock
from .decode_head import BaseDecodeHead
class PAM(_SelfAttentionBlock):
"""Position Attention Module (PAM)
Args:
in_channels (int): Input channels of key/query feature.
channels (int): Output channels of key/query transform.
"""
def __init__(self, in_channels, channels):
super(PAM, self).__init__(
key_in_channels=in_channels,
query_in_channels=in_channels,
channels=channels,
out_channels=in_channels,
share_key_query=False,
query_downsample=None,
key_downsample=None,
key_query_num_convs=1,
key_query_norm=False,
value_out_num_convs=1,
value_out_norm=False,
matmul_norm=False,
with_out=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=None)
self.gamma = Scale(0)
def forward(self, x):
"""Forward function."""
out = super(PAM, self).forward(x, x)
out = self.gamma(out) + x
return out
class CAM(nn.Module):
"""Channel Attention Module (CAM)"""
def __init__(self):
super(CAM, self).__init__()
self.gamma = Scale(0)
def forward(self, x):
"""Forward function."""
batch_size, channels, height, width = x.size()
proj_query = x.view(batch_size, channels, -1)
proj_key = x.view(batch_size, channels, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(
energy, -1, keepdim=True)[0].expand_as(energy) - energy
attention = F.softmax(energy_new, dim=-1)
proj_value = x.view(batch_size, channels, -1)
out = torch.bmm(attention, proj_value)
out = out.view(batch_size, channels, height, width)
out = self.gamma(out) + x
return out
@HEADS.register_module()
class DAHead(BaseDecodeHead):
"""Dual Attention Network for Scene Segmentation.
This head is the implementation of `DANet
<https://arxiv.org/abs/1809.02983>`_.
Args:
pam_channels (int): The channels of Position Attention Module(PAM).
"""
def __init__(self, pam_channels, **kwargs):
super(DAHead, self).__init__(**kwargs)
self.pam_channels = pam_channels
self.pam_in_conv = ConvModule(
self.in_channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.pam = PAM(self.channels, pam_channels)
self.pam_out_conv = ConvModule(
self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.pam_conv_seg = nn.Conv2d(
self.channels, self.num_classes, kernel_size=1)
self.cam_in_conv = ConvModule(
self.in_channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.cam = CAM()
self.cam_out_conv = ConvModule(
self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.cam_conv_seg = nn.Conv2d(
self.channels, self.num_classes, kernel_size=1)
def pam_cls_seg(self, feat):
"""PAM feature classification."""
if self.dropout is not None:
feat = self.dropout(feat)
output = self.pam_conv_seg(feat)
return output
def METHOD_NAME(self, feat):
"""CAM feature classification."""
if self.dropout is not None:
feat = self.dropout(feat)
output = self.cam_conv_seg(feat)
return output
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
pam_feat = self.pam_in_conv(x)
pam_feat = self.pam(pam_feat)
pam_feat = self.pam_out_conv(pam_feat)
pam_out = self.pam_cls_seg(pam_feat)
cam_feat = self.cam_in_conv(x)
cam_feat = self.cam(cam_feat)
cam_feat = self.cam_out_conv(cam_feat)
cam_out = self.METHOD_NAME(cam_feat)
feat_sum = pam_feat + cam_feat
pam_cam_out = self.cls_seg(feat_sum)
return pam_cam_out, pam_out, cam_out
def forward_test(self, inputs, img_metas, test_cfg):
"""Forward function for testing, only ``pam_cam`` is used."""
return self.forward(inputs)[0]
def losses(self, seg_logit, seg_label):
"""Compute ``pam_cam``, ``pam``, ``cam`` loss."""
pam_cam_seg_logit, pam_seg_logit, cam_seg_logit = seg_logit
loss = dict()
loss.update(
add_prefix(
super(DAHead, self).losses(pam_cam_seg_logit, seg_label),
'pam_cam'))
loss.update(
add_prefix(
super(DAHead, self).losses(pam_seg_logit, seg_label), 'pam'))
loss.update(
add_prefix(
super(DAHead, self).losses(cam_seg_logit, seg_label), 'cam'))
return loss |
5,969 | initialize test shared folders | import logging
import os
import shlex
import shutil
import subprocess
from datetime import date, timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from core.models import Job, Event
from core.serializers import EventSerializer, JobSerializer
logger = logging.getLogger(__name__)
class JobFactory:
def __init__(self, submitter):
self.submitter = submitter
def get_default_data(self):
return {
"title": "PostDoc in ABM",
"description": "PostDoc in ABM at ASU",
"submitter": self.submitter,
}
def create(self, **overrides):
job = self.create_unsaved(**overrides)
job.save()
return job
def create_unsaved(self, **overrides):
kwargs = self.get_default_data()
kwargs.update(overrides)
return Job(**kwargs)
def data_for_create_request(self, **overrides):
job = self.create(**overrides)
return JobSerializer(job).data
class EventFactory:
def __init__(self, submitter):
self.submitter = submitter
def get_default_data(self):
return {
"title": "CoMSES Conference",
"description": "Online Conference",
"location": "Your computer",
"submitter": self.submitter,
"start_date": date.today() + timedelta(days=1),
}
def create(self, **overrides):
event = self.create_unsaved(**overrides)
event.save()
return event
def create_unsaved(self, **overrides):
kwargs = self.get_default_data()
kwargs.update(**overrides)
return Event(**kwargs)
def data_for_create_request(self, **overrides):
event = self.create(**overrides)
return EventSerializer(event).data
def make_user(
username="test_user",
password="default.testing.password",
email="comses.test@mailinator.com",
):
factory = UserFactory()
return factory.create(username=username, password=password, email=email), factory
class UserFactory:
def __init__(self, **defaults):
if not defaults.get("password"):
defaults["password"] = "test"
self.id = 0
self.password = defaults.get("password")
self.defaults = {}
username = defaults.get("username")
if username:
self.defaults.update({"username": username})
email = defaults.get("email")
if email:
self.defaults.update({"email": email})
def extract_password(self, overrides):
if overrides.get("password"):
return overrides.pop("password")
else:
return self.password
def get_default_data(self):
defaults = self.defaults.copy()
defaults["username"] = defaults.get("username", "submitter{}".format(self.id))
self.id += 1
return defaults
def create(self, **overrides):
user = self.create_unsaved(**overrides)
user.save()
return user
def create_unsaved(self, **overrides):
password = self.extract_password(overrides)
kwargs = self.get_default_data()
kwargs.update(overrides)
if not kwargs.get("email"):
kwargs["email"] = "{}@gmail.com".format(kwargs["username"])
user = User(**kwargs)
if password:
user.set_password(password)
return user
class BaseModelTestCase(TestCase):
def setUp(self):
self.user = self.create_user()
def create_user(self, username="test_user", password="test", **kwargs):
kwargs.setdefault("email", "testuser@mailinator.com")
return User.objects.create_user(username=username, password=password, **kwargs)
def METHOD_NAME():
for d in [
settings.LIBRARY_ROOT,
settings.REPOSITORY_ROOT,
settings.BACKUP_ROOT,
settings.MEDIA_ROOT,
]:
os.makedirs(d, exist_ok=True)
subprocess.run(
shlex.split("borg init --encryption=none {}".format(settings.BORG_ROOT)),
check=True,
)
def destroy_test_shared_folders():
shutil.rmtree(settings.SHARE_DIR, ignore_errors=True) |
5,970 | test graph fold bn | #
# -*- coding: utf-8 -*-
#
import unittest
import tensorflow as tf
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm import FoldBatchNormNodesOptimizer
from neural_compressor.adaptor.tf_utils.quantize_graph_common import QuantizeGraphHelper
from neural_compressor.adaptor.tf_utils.util import disable_random
class TestGraphFoldBNWithInvalidParameter(unittest.TestCase):
@disable_random()
def METHOD_NAME(self):
input_constant_name = "input_constant"
relu_name = "relu"
float_graph_def = graph_pb2.GraphDef()
input_constant = QuantizeGraphHelper.create_constant_node(
input_constant_name, value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 1]
)
float_graph_def.node.extend([input_constant])
relu_node = QuantizeGraphHelper.create_node("Relu", relu_name, [input_constant_name])
QuantizeGraphHelper.set_attr_dtype(relu_node, "T", dtypes.float32)
float_graph_def.node.extend([relu_node])
b_constant_name = "b_constant"
conv2d_name = "conv2d_1"
b_constant = QuantizeGraphHelper.create_constant_node(
b_constant_name, value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 3, 4]
)
float_graph_def.node.extend([b_constant])
conv2d_node = QuantizeGraphHelper.create_node("Conv2D", conv2d_name, [relu_name, b_constant_name])
QuantizeGraphHelper.set_attr_dtype(conv2d_node, "T", dtypes.float32)
float_graph_def.node.extend([conv2d_node])
bias_add_name = "bias_add"
offset_constant_name = "offset_constant"
offset_constant = QuantizeGraphHelper.create_constant_node(
offset_constant_name, value=[1, 2, 3, 4, 5, 6], dtype=dtypes.float32, shape=[6]
)
float_graph_def.node.extend([offset_constant])
bias_add_node = QuantizeGraphHelper.create_node("BiasAdd", bias_add_name, [conv2d_name, offset_constant_name])
QuantizeGraphHelper.set_attr_dtype(bias_add_node, "T", dtypes.float32)
float_graph_def.node.extend([bias_add_node])
bn_scale_name = "bn_scale"
bn_scale_node = QuantizeGraphHelper.create_constant_node(
bn_scale_name, value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[12, 1]
)
bn_offset_name = "bn_offset"
bn_offset_node = QuantizeGraphHelper.create_constant_node(
bn_offset_name, value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[12, 1]
)
bn_mean_name = "bn_mean"
bn_mean_node = QuantizeGraphHelper.create_constant_node(
bn_mean_name,
value=[
1,
2,
],
dtype=dtypes.float32,
shape=[
2,
],
)
bn_var_name = "bn_var"
bn_var_node = QuantizeGraphHelper.create_constant_node(bn_var_name, value=[], dtype=dtypes.float32, shape=[0])
fused_bn_node_name = "bn"
fused_bn_node = QuantizeGraphHelper.create_node(
"FusedBatchNormV3",
fused_bn_node_name,
[bias_add_name, bn_scale_name, bn_offset_name, bn_mean_name, bn_var_name],
)
QuantizeGraphHelper.set_attr_dtype(fused_bn_node, "T", dtypes.float32)
QuantizeGraphHelper.set_attr_dtype(fused_bn_node, "U", dtypes.float32)
float_graph_def.node.extend([fused_bn_node, bn_scale_node, bn_offset_node, bn_mean_node, bn_var_node])
post_relu_name = "post_relu"
post_relu_node = QuantizeGraphHelper.create_node("Relu", post_relu_name, [fused_bn_node_name])
float_graph_def.node.extend([post_relu_node])
post_graph = FoldBatchNormNodesOptimizer(float_graph_def).do_transformation()
bn_not_fused = False
for i in post_graph.node:
if i.op == "FusedBatchNormV3":
bn_not_fused = True
break
self.assertEqual(bn_not_fused, True)
if __name__ == "__main__":
unittest.main() |
5,971 | test comparator parser | """ Tests for validation"""
def test_comparatorLexer():
from processor.comparison.comparisonantlr.comparatorLexer import comparatorLexer
val = comparatorLexer()
assert val is not None
def test_comparatorListener():
from antlr4 import InputStream, ParseTreeWalker
from antlr4 import CommonTokenStream
from processor.comparison.comparisonantlr.comparatorLexer import comparatorLexer
from processor.comparison.comparisonantlr.comparatorParser import comparatorParser
from processor.comparison.comparisonantlr.comparatorListener import comparatorListener
input_stream = InputStream('exist({1}.location)')
lexer = comparatorLexer(input_stream)
stream = CommonTokenStream(lexer)
parser = comparatorParser(stream)
tree = parser.expression()
printer = comparatorListener()
walker = ParseTreeWalker()
print(walker.walk(printer, tree))
def METHOD_NAME():
from antlr4 import InputStream
from antlr4 import CommonTokenStream
from processor.comparison.comparisonantlr.comparatorLexer import comparatorLexer
from processor.comparison.comparisonantlr.comparatorParser import comparatorParser
from processor.comparison.interpreter import RuleInterpreter
vals = [
'count({1}.firewall.rules[] + {2}.firewall.rules[]) = 13',
'count({1}.firewall.rules[]) + count({2}.firewall.rules[]) = 13',
'count({1}.firewall.rules[] + {2}.firewall.rules[]) > 13',
'count({1}.firewall.rules[] + {2}.firewall.rules[]) < 13',
'count({1}.firewall.rules[] + {2}.firewall.rules[]) >= 13',
'count({1}.firewall.rules[] + {2}.firewall.rules[]) <= 13',
'count({1}.firewall.rules[] + {2}.firewall.rules[]) != 13',
'count({1}.firewall.rules[]) = count({2}.firewall.rules[])',
"{2}.properties.cost=2.34",
"{2}.properties.addressSpace={'addressPrefixes': ['172.18.116.0/23']}",
"{1}.[0].name=abcd",
"{1}.['name' = 'abcd'].location = 'eastus2'",
'{1}.dns.ip = 1.2.4.5',
'{1}.dns.ip = 1.2.4.5/32',
'{1}.location = [1,2,4]',
"{2}.properties.dhcpOptions.dnsServers[]+{3}.properties.dhcpOptions.dnsServers[]=['172.18.96.214', '172.18.96.216', '172.18.96.214', '172.18.96.216']",
'count(count(count({1}.location.name[0]))+count(count({2}.location.name[0])))= 13',
"{1}.firewall.rules['name' = 'rule1'].port = {2}.firewall.rules['name' = 'rule1'].port",
'count({1}.firewall.rules[]) = count({2}.firewall.rules[])',
'count(count({1}.firewall.rules[]) + count({1}.firewall.rules[])) = 13',
'exist({1}.location)',
'exist({1}.location) = TRUE',
'exist({1}.location) = true',
'exist({1}.location) = FALSE',
'exist({1}.location) = false',
'exist({1}[0].location)',
'exist({1}.firewall.location)',
'exist({1}.firewall.rules[])',
'count({1}.firewall.rules[]) != 13',
'count({1}.firewall.rules[]) = 13',
'{1}.firewall.port = 443',
"{1}.location = 'eastus2'",
'exist({1}.location) = FAlSE',
'{1}.firewall.port = 443',
"{1}.firewall.rules['name' = 'rule1'].port = 443",
"{1}.firewall.port = {2}.firewall.port",
'{1}.firewall.rules[0].port = {2}.firewall.port',
'exist({1}[0].location)',
"exist({1}['name' = 'abc'])",
"{1}.firewall.rules['name' = 'abc'].port = {2}.firewall.port",
"{1}.firewall.rules['name' = 'abc'].ports[2].port = {2}.firewall.port",
"{1}.firewall.cost = 443.25",
"{1}[0].location = 'eastus2'",
]
for line in vals:
code = line.rstrip()
# print('#' * 75)
# print('Actual Rule: ', code)
inputStream = InputStream(code)
lexer = comparatorLexer(inputStream)
stream = CommonTokenStream(lexer)
parser = comparatorParser(stream)
tree = parser.expression()
# print(tree.toStringTree(recog=parser))
children = []
for child in tree.getChildren():
children.append((child.getText()))
assert len(children) > 0
# print('*' * 50)
# print("All the parsed tokens: ", children)
r_i = RuleInterpreter(children)
assert r_i is not None
|
5,972 | validate details | from enum import Enum
from typing import Any, Dict, List, Optional, Union
from fideslang.validation import FidesKey
from pydantic import Extra, ValidationError, root_validator, validator
from pydantic.main import BaseModel
from fides.api.schemas.api import BulkResponse, BulkUpdateFailed
class ResponseFormat(Enum):
"""Response formats"""
json = "json"
csv = "csv"
html = "html"
class FileNaming(Enum):
"""File naming options for data uploads"""
request_id = "request_id"
class StorageDetails(Enum):
"""Enum for storage detail keys"""
# s3-specific
BUCKET = "bucket"
NAMING = "naming"
MAX_RETRIES = "max_retries"
AUTH_METHOD = "auth_method"
class Config:
"""Restrict adding other fields through this schema."""
extra = Extra.forbid
class FileBasedStorageDetails(BaseModel):
"""A base class for all storage configuration that uses a file system."""
naming: str = FileNaming.request_id.value # How to name the uploaded file
class Config:
"""Restrict adding other fields through this schema."""
extra = Extra.forbid
class S3AuthMethod(Enum):
AUTOMATIC = "automatic"
SECRET_KEYS = "secret_keys"
class StorageDetailsS3(FileBasedStorageDetails):
"""The details required to represent an AWS S3 storage bucket."""
auth_method: S3AuthMethod
bucket: str
max_retries: Optional[int] = 0
class Config:
use_enum_values = True
class StorageDetailsLocal(FileBasedStorageDetails):
"""The details required to configurate local storage configuration"""
class StorageSecrets(Enum):
"""Enum for storage secret keys"""
# s3-specific
AWS_ACCESS_KEY_ID = "aws_access_key_id"
AWS_SECRET_ACCESS_KEY = "aws_secret_access_key"
class StorageSecretsLocal(BaseModel):
"""A dummy schema for allowing any / no secrets for local filestorage."""
class Config:
"""Restrict adding other fields through this schema."""
extra = Extra.allow
class StorageSecretsS3(BaseModel):
"""The secrets required to connect to an S3 bucket."""
aws_access_key_id: str
aws_secret_access_key: str
class Config:
"""Restrict adding other fields through this schema."""
extra = Extra.forbid
class StorageType(Enum):
"""Enum for storage destination types"""
s3 = "s3"
gcs = "gcs"
transcend = "transcend"
ethyca = "ethyca"
local = "local" # local should be used for testing only, not for processing real-world privacy requests
FULLY_CONFIGURED_STORAGE_TYPES = (
StorageType.s3,
) # storage types that are considered "fully configured"
class StorageDestinationBase(BaseModel):
"""Storage Destination Schema -- used for setting defaults"""
type: StorageType
details: Union[
StorageDetailsS3,
StorageDetailsLocal,
]
format: Optional[ResponseFormat] = ResponseFormat.json.value # type: ignore
class Config:
use_enum_values = True
orm_mode = True
extra = Extra.forbid
@validator("details", pre=True, always=True)
@classmethod
def validate_details_validator(
cls,
v: Dict[str, str],
values: Dict[str, Any],
) -> Dict[str, str]:
"""
Custom validation logic for the `details` field.
"""
storage_type = values.get("type")
if not storage_type:
raise ValueError("A `type` field must be specified.")
return cls.METHOD_NAME(v, storage_type)
@classmethod
def METHOD_NAME(
cls,
details: Dict[str, str],
storage_type: str,
) -> Dict[str, str]:
"""
Validates the provided storage details field given the storage type.
Abstracts out the pydantic input parameters to make the validation logic more reusable.
"""
try:
schema = {
StorageType.s3.value: StorageDetailsS3,
StorageType.local.value: StorageDetailsLocal,
}[storage_type]
except KeyError:
raise ValueError(
f"`storage_type` {storage_type} has no supported `details` validation."
)
try:
schema.parse_obj(details) # type: ignore
except ValidationError as exc:
# Pydantic requires validators raise either a ValueError, TypeError, or AssertionError
# so this exception is cast into a `ValueError`.
errors = [f"{err['msg']} {str(err['loc'])}" for err in exc.errors()]
raise ValueError(errors)
return details
@root_validator
@classmethod
def format_validator(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""
Custom validation to ensure that local destination formats are valid.
"""
restricted_destinations = [StorageType.local.value]
storage_type = values.get("type")
response_format = values.get("format")
if (
storage_type in restricted_destinations
and response_format
and response_format
not in [ResponseFormat.json.value, ResponseFormat.html.value]
):
raise ValueError(
"Only JSON or HTML upload format are supported for local storage destinations."
)
return values
class StorageDestination(StorageDestinationBase):
"""Storage Destination Schema"""
name: str
key: Optional[FidesKey]
class Config:
use_enum_values = True
orm_mode = True
class StorageDestinationResponse(BaseModel):
"""Storage Destination Response Schema"""
name: str
type: StorageType
details: Dict[StorageDetails, Any]
key: FidesKey
format: ResponseFormat
is_default: bool = False
class Config:
orm_mode = True
use_enum_values = True
class BulkPutStorageConfigResponse(BulkResponse):
"""Schema with mixed success/failure responses for Bulk Create/Update of StorageConfig."""
succeeded: List[StorageDestinationResponse]
failed: List[BulkUpdateFailed]
SUPPORTED_STORAGE_SECRETS = StorageSecretsS3
class StorageConfigStatus(Enum):
"""Enum for configuration statuses of a storage config"""
configured = "configured"
not_configured = "not configured"
class StorageConfigStatusMessage(BaseModel):
"""A schema for checking configuration status of storage config."""
config_status: Optional[StorageConfigStatus] = None
detail: Optional[str] = None |
5,973 | cmake args | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
from spack.package import *
class RocmDbgapi(CMakePackage):
"""The AMD Debugger API is a library that provides all the support
necessary for a debugger and other tools to perform low level
control of the execution and inspection of execution state of
AMD's commercially available GPU architectures."""
homepage = "https://github.com/ROCm-Developer-Tools/ROCdbgapi"
git = "https://github.com/ROCm-Developer-Tools/ROCdbgapi.git"
url = "https://github.com/ROCm-Developer-Tools/ROCdbgapi/archive/rocm-5.5.0.tar.gz"
tags = ["rocm"]
maintainers("srekolam", "renjithravindrankannath")
libraries = ["librocm-dbgapi"]
version("master", branch="amd-master")
version("5.5.1", sha256="c41dfc62591bcf42003fe744d8bd03a51311d54e4b012f946ca0ede0c14dd977")
version("5.5.0", sha256="ce572340a3fe99e4f1538eb614933153456003f8dfe9306a5735cdd25b451e25")
version("5.4.3", sha256="d647c9121a50f2c54367c567d8f39a145cb135e1ceed931581659f57f49f61e5")
version("5.4.0", sha256="895eb7056864daada40c3f9cd37645b0bdf4b6dc408b5f8cc974fc4cd9ab7ccb")
version("5.3.3", sha256="3c81cb23fe671d391557a63c13b6a13d4dc367db5cb5de55592a6758284d8a3f")
version("5.3.0", sha256="afffec78e34fe70952cd41efc3d7ba8f64e43acb2ad20aa35c9b8b591bed48ca")
version("5.2.3", sha256="17925d23f614ecb2b40dffe5e14535cba380d4f489ea1a027762c356be9fbc2b")
version("5.2.1", sha256="169e3914ebd99d6a5c034c568964b7bad56611262e292f77c0c65a7708e02376")
version("5.2.0", sha256="44f0528a7583bc59b6585166d2289970b20115c4c70e3bcc218aff19fc242b3f")
version("5.1.3", sha256="880f80ebf741e3451676837f720551e02cffd0b9346ca4dfa6cf7f7043282f2b")
version("5.1.0", sha256="406db4b20bda12f6f32cbef88b03110aa001bf7bef6676f36e909b53c8354e43")
version(
"5.0.2",
sha256="b7554dfe96bda6c2ee762ad6e3e5f91f0f52b5a525e3fb29d5e1fe6f003652b5",
deprecated=True,
)
version(
"5.0.0",
sha256="cff72d7fe43ff791c4117fe87d57314cbebdbcb70002a0411b8a44761012a495",
deprecated=True,
)
version(
"4.5.2",
sha256="9fa574e8389ef69d116caf714af2f938777e0aeeaadd7fad451cf5d2e6699c6e",
deprecated=True,
)
version(
"4.5.0",
sha256="583bbf18df593f376c4cc70f25b68c191bd38fde20a336c0f5c8e5d85fda2fcf",
deprecated=True,
)
version(
"4.3.1",
sha256="dddf2549ad6bb806f7e5d5a5336f5a00fe87a124f2a778be18ec4dc41f891912",
deprecated=True,
)
version(
"4.3.0",
sha256="4255d83d218bb0db8be9fef18e03a955ea1c6de1c635c31685ee5fc1540ddde6",
deprecated=True,
)
version(
"4.2.0",
sha256="fcdee5aaf5ed40c0377ce007a2947da9e718eeee86ca3e13192ff9e96a1b7373",
deprecated=True,
)
version(
"4.1.0",
sha256="d04fd9b2005691313547c4134b027b56b0ec6089f67d3bccbdb8fb1c92cde9bd",
deprecated=True,
)
version(
"4.0.0",
sha256="e87f31b3a22861397eb62d8363dd1e153596097ccfe68c6eefc1a83a2432ae18",
deprecated=True,
)
version(
"3.10.0",
sha256="89a8d352d59e4c0dc13160b1bf1f4bc3bfec5af544050030aa619b1ff88f1850",
deprecated=True,
)
version(
"3.9.0",
sha256="d1553f89d2b0419304ea82ed2b97abdc323c2fed183f0e119da1a72416a48136",
deprecated=True,
)
version(
"3.8.0",
sha256="760ff77c6578f3548f367a8bd3dda8680b7519f6b20216755105b87785d1e3f8",
deprecated=True,
)
version(
"3.7.0",
sha256="bdeaf81ea8a0ac861a697e435c72cbe767c291638be43f0d09116ad605dfee4f",
deprecated=True,
)
version(
"3.5.0",
sha256="eeba0592bc79b90e5b874bba18fd003eab347e8a3cc80343708f8d19e047e87b",
deprecated=True,
)
depends_on("cmake@3:", type="build")
depends_on("hwdata", when="@5.5.0:")
for ver in [
"3.5.0",
"3.7.0",
"3.8.0",
"3.9.0",
"3.10.0",
"4.0.0",
"4.1.0",
"4.2.0",
"4.3.0",
"4.3.1",
"4.5.0",
"4.5.2",
"5.0.0",
"5.0.2",
"5.1.0",
"5.1.3",
"5.2.0",
"5.2.1",
"5.2.3",
"5.3.0",
"5.3.3",
"5.4.0",
"5.4.3",
"5.5.0",
"5.5.1",
"master",
]:
depends_on("hsa-rocr-dev@" + ver, type="build", when="@" + ver)
depends_on("comgr@" + ver, type=("build", "link"), when="@" + ver)
for ver in ["5.5.0", "5.5.1"]:
depends_on("rocm-core@" + ver, when="@" + ver)
@classmethod
def determine_version(cls, lib):
match = re.search(r"lib\S*\.so\.\d+\.\d+\.(\d)(\d\d)(\d\d)", lib)
if match:
ver = "{0}.{1}.{2}".format(
int(match.group(1)), int(match.group(2)), int(match.group(3))
)
else:
ver = None
return ver
def patch(self):
filter_file(
r"(<INSTALL_INTERFACE:include>)",
r"\1 {0}/include".format(self.spec["hsa-rocr-dev"].prefix),
"CMakeLists.txt",
)
def METHOD_NAME(self):
args = []
if self.spec.satisfies("@5.3.0:"):
args.append("-DCMAKE_INSTALL_LIBDIR=lib")
return args |
5,974 | format | import datetime
import enum
import sys
from _typeshed import Unused
from collections.abc import Iterable, Sequence
from time import struct_time
from typing import ClassVar
from typing_extensions import Literal, TypeAlias
__all__ = [
"IllegalMonthError",
"IllegalWeekdayError",
"setfirstweekday",
"firstweekday",
"isleap",
"leapdays",
"weekday",
"monthrange",
"monthcalendar",
"prmonth",
"month",
"prcal",
"calendar",
"timegm",
"month_name",
"month_abbr",
"day_name",
"day_abbr",
"Calendar",
"TextCalendar",
"HTMLCalendar",
"LocaleTextCalendar",
"LocaleHTMLCalendar",
"weekheader",
]
if sys.version_info >= (3, 10):
__all__ += ["FRIDAY", "MONDAY", "SATURDAY", "SUNDAY", "THURSDAY", "TUESDAY", "WEDNESDAY"]
if sys.version_info >= (3, 12):
__all__ += [
"Day",
"Month",
"JANUARY",
"FEBRUARY",
"MARCH",
"APRIL",
"MAY",
"JUNE",
"JULY",
"AUGUST",
"SEPTEMBER",
"OCTOBER",
"NOVEMBER",
"DECEMBER",
]
_LocaleType: TypeAlias = tuple[str | None, str | None]
class IllegalMonthError(ValueError):
def __init__(self, month: int) -> None: ...
class IllegalWeekdayError(ValueError):
def __init__(self, weekday: int) -> None: ...
def isleap(year: int) -> bool: ...
def leapdays(y1: int, y2: int) -> int: ...
def weekday(year: int, month: int, day: int) -> int: ...
def monthrange(year: int, month: int) -> tuple[int, int]: ...
class Calendar:
firstweekday: int
def __init__(self, firstweekday: int = 0) -> None: ...
def getfirstweekday(self) -> int: ...
def setfirstweekday(self, firstweekday: int) -> None: ...
def iterweekdays(self) -> Iterable[int]: ...
def itermonthdates(self, year: int, month: int) -> Iterable[datetime.date]: ...
def itermonthdays2(self, year: int, month: int) -> Iterable[tuple[int, int]]: ...
def itermonthdays(self, year: int, month: int) -> Iterable[int]: ...
def monthdatescalendar(self, year: int, month: int) -> list[list[datetime.date]]: ...
def monthdays2calendar(self, year: int, month: int) -> list[list[tuple[int, int]]]: ...
def monthdayscalendar(self, year: int, month: int) -> list[list[int]]: ...
def yeardatescalendar(self, year: int, width: int = 3) -> list[list[int]]: ...
def yeardays2calendar(self, year: int, width: int = 3) -> list[list[tuple[int, int]]]: ...
def yeardayscalendar(self, year: int, width: int = 3) -> list[list[int]]: ...
def itermonthdays3(self, year: int, month: int) -> Iterable[tuple[int, int, int]]: ...
def itermonthdays4(self, year: int, month: int) -> Iterable[tuple[int, int, int, int]]: ...
class TextCalendar(Calendar):
def prweek(self, theweek: int, width: int) -> None: ...
def formatday(self, day: int, weekday: int, width: int) -> str: ...
def formatweek(self, theweek: int, width: int) -> str: ...
def formatweekday(self, day: int, width: int) -> str: ...
def formatweekheader(self, width: int) -> str: ...
def formatmonthname(self, theyear: int, themonth: int, width: int, withyear: bool = True) -> str: ...
def prmonth(self, theyear: int, themonth: int, w: int = 0, l: int = 0) -> None: ...
def formatmonth(self, theyear: int, themonth: int, w: int = 0, l: int = 0) -> str: ...
def formatyear(self, theyear: int, w: int = 2, l: int = 1, c: int = 6, m: int = 3) -> str: ...
def pryear(self, theyear: int, w: int = 0, l: int = 0, c: int = 6, m: int = 3) -> None: ...
def firstweekday() -> int: ...
def monthcalendar(year: int, month: int) -> list[list[int]]: ...
def prweek(theweek: int, width: int) -> None: ...
def week(theweek: int, width: int) -> str: ...
def weekheader(width: int) -> str: ...
def prmonth(theyear: int, themonth: int, w: int = 0, l: int = 0) -> None: ...
def month(theyear: int, themonth: int, w: int = 0, l: int = 0) -> str: ...
def calendar(theyear: int, w: int = 2, l: int = 1, c: int = 6, m: int = 3) -> str: ...
def prcal(theyear: int, w: int = 0, l: int = 0, c: int = 6, m: int = 3) -> None: ...
class HTMLCalendar(Calendar):
cssclasses: ClassVar[list[str]]
cssclass_noday: ClassVar[str]
cssclasses_weekday_head: ClassVar[list[str]]
cssclass_month_head: ClassVar[str]
cssclass_month: ClassVar[str]
cssclass_year: ClassVar[str]
cssclass_year_head: ClassVar[str]
def formatday(self, day: int, weekday: int) -> str: ...
def formatweek(self, theweek: int) -> str: ...
def formatweekday(self, day: int) -> str: ...
def formatweekheader(self) -> str: ...
def formatmonthname(self, theyear: int, themonth: int, withyear: bool = True) -> str: ...
def formatmonth(self, theyear: int, themonth: int, withyear: bool = True) -> str: ...
def formatyear(self, theyear: int, width: int = 3) -> str: ...
def formatyearpage(
self, theyear: int, width: int = 3, css: str | None = "calendar.css", encoding: str | None = None
) -> str: ...
class different_locale:
def __init__(self, locale: _LocaleType) -> None: ...
def __enter__(self) -> None: ...
def __exit__(self, *args: Unused) -> None: ...
class LocaleTextCalendar(TextCalendar):
def __init__(self, firstweekday: int = 0, locale: _LocaleType | None = None) -> None: ...
class LocaleHTMLCalendar(HTMLCalendar):
def __init__(self, firstweekday: int = 0, locale: _LocaleType | None = None) -> None: ...
def formatweekday(self, day: int) -> str: ...
def formatmonthname(self, theyear: int, themonth: int, withyear: bool = True) -> str: ...
c: TextCalendar
def setfirstweekday(firstweekday: int) -> None: ...
def METHOD_NAME(cols: int, colwidth: int = 20, spacing: int = 6) -> str: ...
def formatstring(cols: int, colwidth: int = 20, spacing: int = 6) -> str: ...
def timegm(tuple: tuple[int, ...] | struct_time) -> int: ...
# Data attributes
day_name: Sequence[str]
day_abbr: Sequence[str]
month_name: Sequence[str]
month_abbr: Sequence[str]
if sys.version_info >= (3, 12):
class Month(enum.IntEnum):
JANUARY: Literal[1]
FEBRUARY: Literal[2]
MARCH: Literal[3]
APRIL: Literal[4]
MAY: Literal[5]
JUNE: Literal[6]
JULY: Literal[7]
AUGUST: Literal[8]
SEPTEMBER: Literal[9]
OCTOBER: Literal[10]
NOVEMBER: Literal[11]
DECEMBER: Literal[12]
JANUARY = Month.JANUARY
FEBRUARY = Month.FEBRUARY
MARCH = Month.MARCH
APRIL = Month.APRIL
MAY = Month.MAY
JUNE = Month.JUNE
JULY = Month.JULY
AUGUST = Month.AUGUST
SEPTEMBER = Month.SEPTEMBER
OCTOBER = Month.OCTOBER
NOVEMBER = Month.NOVEMBER
DECEMBER = Month.DECEMBER
class Day(enum.IntEnum):
MONDAY: Literal[0]
TUESDAY: Literal[1]
WEDNESDAY: Literal[2]
THURSDAY: Literal[3]
FRIDAY: Literal[4]
SATURDAY: Literal[5]
SUNDAY: Literal[6]
MONDAY = Day.MONDAY
TUESDAY = Day.TUESDAY
WEDNESDAY = Day.WEDNESDAY
THURSDAY = Day.THURSDAY
FRIDAY = Day.FRIDAY
SATURDAY = Day.SATURDAY
SUNDAY = Day.SUNDAY
else:
MONDAY: Literal[0]
TUESDAY: Literal[1]
WEDNESDAY: Literal[2]
THURSDAY: Literal[3]
FRIDAY: Literal[4]
SATURDAY: Literal[5]
SUNDAY: Literal[6]
EPOCH: Literal[1970] |
5,975 | kwargs row | """
psycopg row factories
"""
# Copyright (C) 2021 The Psycopg Team
import functools
from typing import Any, Callable, Dict, List, Optional, NamedTuple, NoReturn
from typing import TYPE_CHECKING, Sequence, Tuple, Type, TypeVar
from collections import namedtuple
from typing_extensions import TypeAlias
from . import pq
from . import errors as e
from ._compat import Protocol
from ._encodings import _as_python_identifier
if TYPE_CHECKING:
from .cursor import BaseCursor, Cursor
from .cursor_async import AsyncCursor
from psycopg.pq.abc import PGresult
COMMAND_OK = pq.ExecStatus.COMMAND_OK
TUPLES_OK = pq.ExecStatus.TUPLES_OK
SINGLE_TUPLE = pq.ExecStatus.SINGLE_TUPLE
T = TypeVar("T", covariant=True)
# Row factories
Row = TypeVar("Row", covariant=True)
class RowMaker(Protocol[Row]):
"""
Callable protocol taking a sequence of value and returning an object.
The sequence of value is what is returned from a database query, already
adapted to the right Python types. The return value is the object that your
program would like to receive: by default (`tuple_row()`) it is a simple
tuple, but it may be any type of object.
Typically, `!RowMaker` functions are returned by `RowFactory`.
"""
def __call__(self, __values: Sequence[Any]) -> Row:
...
class RowFactory(Protocol[Row]):
"""
Callable protocol taking a `~psycopg.Cursor` and returning a `RowMaker`.
A `!RowFactory` is typically called when a `!Cursor` receives a result.
This way it can inspect the cursor state (for instance the
`~psycopg.Cursor.description` attribute) and help a `!RowMaker` to create
a complete object.
For instance the `dict_row()` `!RowFactory` uses the names of the column to
define the dictionary key and returns a `!RowMaker` function which would
use the values to create a dictionary for each record.
"""
def __call__(self, __cursor: "Cursor[Any]") -> RowMaker[Row]:
...
class AsyncRowFactory(Protocol[Row]):
"""
Like `RowFactory`, taking an async cursor as argument.
"""
def __call__(self, __cursor: "AsyncCursor[Any]") -> RowMaker[Row]:
...
class BaseRowFactory(Protocol[Row]):
"""
Like `RowFactory`, taking either type of cursor as argument.
"""
def __call__(self, __cursor: "BaseCursor[Any, Any]") -> RowMaker[Row]:
...
TupleRow: TypeAlias = Tuple[Any, ...]
"""
An alias for the type returned by `tuple_row()` (i.e. a tuple of any content).
"""
DictRow: TypeAlias = Dict[str, Any]
"""
An alias for the type returned by `dict_row()`
A `!DictRow` is a dictionary with keys as string and any value returned by the
database.
"""
def tuple_row(cursor: "BaseCursor[Any, Any]") -> "RowMaker[TupleRow]":
r"""Row factory to represent rows as simple tuples.
This is the default factory, used when `~psycopg.Connection.connect()` or
`~psycopg.Connection.cursor()` are called without a `!row_factory`
parameter.
"""
# Implementation detail: make sure this is the tuple type itself, not an
# equivalent function, because the C code fast-paths on it.
return tuple
def dict_row(cursor: "BaseCursor[Any, Any]") -> "RowMaker[DictRow]":
"""Row factory to represent rows as dictionaries.
The dictionary keys are taken from the column names of the returned columns.
"""
names = _get_names(cursor)
if names is None:
return no_result
def dict_row_(values: Sequence[Any]) -> Dict[str, Any]:
return dict(zip(names, values))
return dict_row_
def namedtuple_row(
cursor: "BaseCursor[Any, Any]",
) -> "RowMaker[NamedTuple]":
"""Row factory to represent rows as `~collections.namedtuple`.
The field names are taken from the column names of the returned columns,
with some mangling to deal with invalid names.
"""
res = cursor.pgresult
if not res:
return no_result
nfields = _get_nfields(res)
if nfields is None:
return no_result
nt = _make_nt(cursor._encoding, *(res.fname(i) for i in range(nfields)))
return nt._make
@functools.lru_cache(512)
def _make_nt(enc: str, *names: bytes) -> Type[NamedTuple]:
snames = tuple(_as_python_identifier(n.decode(enc)) for n in names)
return namedtuple("Row", snames) # type: ignore[return-value]
def class_row(cls: Type[T]) -> BaseRowFactory[T]:
r"""Generate a row factory to represent rows as instances of the class `!cls`.
The class must support every output column name as a keyword parameter.
:param cls: The class to return for each row. It must support the fields
returned by the query as keyword arguments.
:rtype: `!Callable[[Cursor],` `RowMaker`\[~T]]
"""
def class_row_(cursor: "BaseCursor[Any, Any]") -> "RowMaker[T]":
names = _get_names(cursor)
if names is None:
return no_result
def class_row__(values: Sequence[Any]) -> T:
return cls(**dict(zip(names, values)))
return class_row__
return class_row_
def args_row(func: Callable[..., T]) -> BaseRowFactory[T]:
"""Generate a row factory calling `!func` with positional parameters for every row.
:param func: The function to call for each row. It must support the fields
returned by the query as positional arguments.
"""
def args_row_(cur: "BaseCursor[Any, T]") -> "RowMaker[T]":
def args_row__(values: Sequence[Any]) -> T:
return func(*values)
return args_row__
return args_row_
def METHOD_NAME(func: Callable[..., T]) -> BaseRowFactory[T]:
"""Generate a row factory calling `!func` with keyword parameters for every row.
:param func: The function to call for each row. It must support the fields
returned by the query as keyword arguments.
"""
def kwargs_row_(cursor: "BaseCursor[Any, T]") -> "RowMaker[T]":
names = _get_names(cursor)
if names is None:
return no_result
def kwargs_row__(values: Sequence[Any]) -> T:
return func(**dict(zip(names, values)))
return kwargs_row__
return kwargs_row_
def no_result(values: Sequence[Any]) -> NoReturn:
"""A `RowMaker` that always fail.
It can be used as return value for a `RowFactory` called with no result.
Note that the `!RowFactory` *will* be called with no result, but the
resulting `!RowMaker` never should.
"""
raise e.InterfaceError("the cursor doesn't have a result")
def _get_names(cursor: "BaseCursor[Any, Any]") -> Optional[List[str]]:
res = cursor.pgresult
if not res:
return None
nfields = _get_nfields(res)
if nfields is None:
return None
enc = cursor._encoding
return [
res.fname(i).decode(enc) for i in range(nfields) # type: ignore[union-attr]
]
def _get_nfields(res: "PGresult") -> Optional[int]:
"""
Return the number of columns in a result, if it returns tuples else None
Take into account the special case of results with zero columns.
"""
nfields = res.nfields
if (
res.status == TUPLES_OK
or res.status == SINGLE_TUPLE
# "describe" in named cursors
or (res.status == COMMAND_OK and nfields)
):
return nfields
else:
return None |
5,976 | get scp base command | """
Cloudflared Integration tests
"""
import unittest
import subprocess
import os
import tempfile
from contextlib import contextmanager
from pexpect import pxssh
class TestSSHBase(unittest.TestCase):
"""
SSH test base class containing constants and helper funcs
"""
HOSTNAME = os.environ["SSH_HOSTNAME"]
SSH_USER = os.environ["SSH_USER"]
SSH_TARGET = f"{SSH_USER}@{HOSTNAME}"
AUTHORIZED_KEYS_SSH_CONFIG = os.environ["AUTHORIZED_KEYS_SSH_CONFIG"]
SHORT_LIVED_CERT_SSH_CONFIG = os.environ["SHORT_LIVED_CERT_SSH_CONFIG"]
SSH_OPTIONS = {"StrictHostKeyChecking": "no"}
@classmethod
def get_ssh_command(cls, pty=True):
"""
Return ssh command arg list. If pty is true, a PTY is forced for the session.
"""
cmd = [
"ssh",
"-o",
"StrictHostKeyChecking=no",
"-F",
cls.AUTHORIZED_KEYS_SSH_CONFIG,
cls.SSH_TARGET,
]
if not pty:
cmd += ["-T"]
else:
cmd += ["-tt"]
return cmd
@classmethod
@contextmanager
def ssh_session_manager(cls, *args, **kwargs):
"""
Context manager for interacting with a pxssh session.
Disables pty echo on the remote server and ensures session is terminated afterward.
"""
session = pxssh.pxssh(options=cls.SSH_OPTIONS)
session.login(
cls.HOSTNAME,
username=cls.SSH_USER,
original_prompt=r"[#@$]",
ssh_config=kwargs.get("ssh_config", cls.AUTHORIZED_KEYS_SSH_CONFIG),
ssh_tunnels=kwargs.get("ssh_tunnels", {}),
)
try:
session.sendline("stty -echo")
session.prompt()
yield session
finally:
session.logout()
@staticmethod
def get_command_output(session, cmd):
"""
Executes command on remote ssh server and waits for prompt.
Returns command output
"""
session.sendline(cmd)
session.prompt()
return session.before.decode().strip()
def exec_command(self, cmd, shell=False):
"""
Executes command locally. Raises Assertion error for non-zero return code.
Returns stdout and stderr
"""
proc = subprocess.Popen(
cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=shell
)
raw_out, raw_err = proc.communicate()
out = raw_out.decode()
err = raw_err.decode()
self.assertEqual(proc.returncode, 0, msg=f"stdout: {out} stderr: {err}")
return out.strip(), err.strip()
class TestSSHCommandExec(TestSSHBase):
"""
Tests inline ssh command exec
"""
# Name of file to be downloaded over SCP on remote server.
REMOTE_SCP_FILENAME = os.environ["REMOTE_SCP_FILENAME"]
@classmethod
def METHOD_NAME(cls):
return [
"scp",
"-o",
"StrictHostKeyChecking=no",
"-v",
"-F",
cls.AUTHORIZED_KEYS_SSH_CONFIG,
]
@unittest.skip(
"This creates files on the remote. Should be skipped until server is dockerized."
)
def test_verbose_scp_sink_mode(self):
with tempfile.NamedTemporaryFile() as fl:
self.exec_command(
self.METHOD_NAME() + [fl.name, f"{self.SSH_TARGET}:"]
)
def test_verbose_scp_source_mode(self):
with tempfile.TemporaryDirectory() as tmpdirname:
self.exec_command(
self.METHOD_NAME()
+ [f"{self.SSH_TARGET}:{self.REMOTE_SCP_FILENAME}", tmpdirname]
)
local_filename = os.path.join(tmpdirname, self.REMOTE_SCP_FILENAME)
self.assertTrue(os.path.exists(local_filename))
self.assertTrue(os.path.getsize(local_filename) > 0)
def test_pty_command(self):
base_cmd = self.get_ssh_command()
out, _ = self.exec_command(base_cmd + ["whoami"])
self.assertEqual(out.strip().lower(), self.SSH_USER.lower())
out, _ = self.exec_command(base_cmd + ["tty"])
self.assertNotEqual(out, "not a tty")
def test_non_pty_command(self):
base_cmd = self.get_ssh_command(pty=False)
out, _ = self.exec_command(base_cmd + ["whoami"])
self.assertEqual(out.strip().lower(), self.SSH_USER.lower())
out, _ = self.exec_command(base_cmd + ["tty"])
self.assertEqual(out, "not a tty")
class TestSSHShell(TestSSHBase):
"""
Tests interactive SSH shell
"""
# File path to a file on the remote server with root only read privileges.
ROOT_ONLY_TEST_FILE_PATH = os.environ["ROOT_ONLY_TEST_FILE_PATH"]
def test_ssh_pty(self):
with self.ssh_session_manager() as session:
# Test shell launched as correct user
username = self.get_command_output(session, "whoami")
self.assertEqual(username.lower(), self.SSH_USER.lower())
# Test USER env variable set
user_var = self.get_command_output(session, "echo $USER")
self.assertEqual(user_var.lower(), self.SSH_USER.lower())
# Test HOME env variable set to true user home.
home_env = self.get_command_output(session, "echo $HOME")
pwd = self.get_command_output(session, "pwd")
self.assertEqual(pwd, home_env)
# Test shell launched in correct user home dir.
self.assertIn(username, pwd)
# Ensure shell launched with correct user's permissions and privs.
# Can't read root owned 0700 files.
output = self.get_command_output(
session, f"cat {self.ROOT_ONLY_TEST_FILE_PATH}"
)
self.assertIn("Permission denied", output)
def test_short_lived_cert_auth(self):
with self.ssh_session_manager(
ssh_config=self.SHORT_LIVED_CERT_SSH_CONFIG
) as session:
username = self.get_command_output(session, "whoami")
self.assertEqual(username.lower(), self.SSH_USER.lower())
unittest.main() |
5,977 | get proc with parent | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
"""
TODO: docs
"""
import os
from qautils.gppylib.gplog import *
from qautils.gppylib.gparray import *
from base import *
from unix import *
logger = get_default_logger()
GPHOME=os.environ.get('GPHOME')
#----------------------- postgresql.conf ----------------------
#TODO: what functions?
#----------------------- pg_hba.conf ----------------------
#TODO: set of functions related to pg_hba.conf including:
# - reading it in
# - writing it out
# - appending to it.
#----------------------- Basic PG maintenance ----------------------
#TODO: set of functions related to basic pg maintenance:
# - initdb
# - pg_ctl
# - pg_config
# - pg_controldata
#-------------initdb---------------------
class InitDB(Command):
def __init__(self,name,db,ctxt=LOCAL,remoteHost=None):
self.db=db
self.cmdStr="$GPHOME/bin/initdb %s" % (db.getSegmentDataDirectory())
Command.__init__(self,name,self.cmdStr,ctxt,remoteHost)
@staticmethod
def local(name,db):
cmd=InitDB(name,db)
cmd.run(validateAfter=True)
@staticmethod
def remote(name,db,host):
cmd=InitDB(name,db,ctxt=REMOTE,remoteHost=host)
cmd.run(validateAfter=True)
class DbStatus(Command):
def __init__(self,name,db,ctxt=LOCAL,remoteHost=None):
self.db=db
self.cmdStr="$GPHOME/bin/pg_ctl -D %s status" % (db.getSegmentDataDirectory())
Command.__init__(self,name,self.cmdStr,ctxt,remoteHost)
def is_running(self):
if self.results.rc != 0:
return False
elif self.results.stdout.lower().find('no server running') != -1:
return False
else:
return True
@staticmethod
def local(name,db):
cmd=DbStatus(name,db)
cmd.run(validateAfter=False)
return cmd.is_running()
@staticmethod
def remote(name,db,remoteHost):
cmd=DbStatus(name,db,ctxt=REMOTE,remoteHost=remoteHost)
cmd.run(validateAfter=False)
return cmd.is_running()
class ReloadDbConf(Command):
def __init__(self,name,db,ctxt=LOCAL,remoteHost=None):
self.db=db
cmdStr="$GPHOME/bin/pg_ctl reload -D %s" % (db.getSegmentDataDirectory())
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def local(name,db):
cmd=ReloadDbConf(name,db)
cmd.run(validateAfter=True)
return cmd
class ReadPostmasterTempFile(Command):
def __init__(self,name,port,ctxt=LOCAL,remoteHost=None):
self.port=port
self.cmdStr="cat /tmp/.s.PGSQL.%s.lock" % port
Command.__init__(self,name,self.cmdStr,ctxt,remoteHost)
def validate(self):
if not self.results.completed or self.results.halt:
raise ExecutionError("Command did not complete successfully rc: %d" % self.results.rc, self)
def getResults(self):
if self.results.stderr.find("No such file or directory") != -1:
return (False,-1,None)
if self.results.stdout is None:
return (False,-2,None)
lines = self.results.stdout.split()
if len(lines) < 2:
return (False,-3,None)
PID=int(self.results.stdout.split()[0])
datadir = self.results.stdout.split()[1]
return (True,PID,datadir)
@staticmethod
def local(name,port):
cmd=ReadPostmasterTempFile(name,port)
cmd.run(validateAfter=True)
return cmd
@staticmethod
def remote(name,port,host):
cmd=ReadPostmasterTempFile(name,port,ctxt=REMOTE,remoteHost=host)
cmd.run(validateAfter=True)
return cmd
def METHOD_NAME(host,targetParentPID,procname):
""" returns (parentPID,procPID) tuple for the procname with the specified parent """
cmdStr="ps -ef | grep '%s' | grep -v grep" % (procname)
cmd=Command("ps",cmdStr,ctxt=REMOTE,remoteHost=host)
cmd.run(validateAfter=True)
sout=cmd.get_results().stdout
logger.info(cmd.get_results().printResult())
if sout is None:
return (0,0)
lines=sout.split('\n')
for line in lines:
if line == '':
continue
fields=line.lstrip(' ').split()
if len(fields) < 3:
logger.info("not enough fields line: '%s'" % line)
return (0,0)
procPID=int(line.split()[1])
parentPID=int(line.split()[2])
if parentPID == targetParentPID:
return (parentPID,procPID)
logger.info("couldn't find process with name: %s which is a child of PID: %s" % (procname,targetParentPID))
return (0,0)
def getPostmasterPID(db):
datadir = db.getSegmentDataDirectory()
hostname = db.getSegmentHostName()
cmdStr="ps -ef | grep 'postgres -D %s' | grep -v grep" % datadir
name="get postmaster"
cmd=Command(name,cmdStr,ctxt=REMOTE,remoteHost=hostname)
cmd.run(validateAfter=True)
logger.critical(cmd.cmdStr)
logger.critical(cmd.get_results().printResult())
sout=cmd.get_results().stdout.lstrip(' ')
return int(sout.split()[1])
def killPostmaster(db,signal):
killPgProc(db,"postmaster",signal)
def getSeqServerPID(db):
postmaster_pid=getPostmasterPID(db)
hostname=db.getSegmentHostName()
return METHOD_NAME(hostname,postmaster_pid,"seqserver")
def killSeqServer(db,signal):
return killPgProc(db,"seqserver",signal)
def getBgWriterPID(db):
postmaster_pid=getPostmasterPID(db)
hostname=db.getSegmentHostName()
return METHOD_NAME(hostname,postmaster_pid,"postgres: writer process")
def killBgWriter(db,signal):
return killPgProc(db, "postgres: writer process",signal)
def getStatsCollectorPID(db):
postmaster_pid=getPostmasterPID(db)
hostname=db.getSegmentHostName()
return METHOD_NAME(hostname,postmaster_pid,"postgres: stats collector")
def killStatsCollector(db,signal):
return killPgProc(db,"postgres: stats collector",signal)
def getWALSendServerPID(db):
postmaster_pid=getPostmasterPID(db)
hostname=db.getSegmentHostName()
return METHOD_NAME(hostname,postmaster_pid,"postgres: WAL Send Server")
def killWALSendServer(db,signal):
return killPgProc(db,"postgres: WAL Send Server",signal)
def getFTSProbePID(db):
postmaster_pid=getPostmasterPID(db)
hostname=db.getSegmentHostName()
return METHOD_NAME(hostname,postmaster_pid,"postgres: ftsprobe process")
def killFTSProbe(db,signal):
return killPgProc(db,"postgres: ftsprobe process",signal)
def killPgProc(db,procname,signal):
postmasterPID=getPostmasterPID(db)
hostname=db.getSegmentHostName()
if procname == "postmaster":
procPID = postmasterPID
parentPID = 0
else:
(parentPID,procPID)=METHOD_NAME(hostname,postmasterPID,procname)
if procPID == 0:
raise Exception("Invalid PID: '0' to kill. parent postmaster PID: %s" % postmasterPID)
cmd=Kill.remote("kill "+procname,procPID,signal,hostname)
return (parentPID,procPID)
class PgControlData(Command):
def __init__(self, name, datadir, ctxt=LOCAL, remoteHost=None):
self.datadir = datadir
self.remotehost=remoteHost
self.data = None
Command.__init__(self, name, "$GPHOME/bin/pg_controldata %s" % self.datadir, ctxt, remoteHost)
def get_value(self, name):
if not self.results:
raise Exception, 'Command not yet executed'
if not self.data:
self.data = {}
for l in self.results.stdout.split('\n'):
if len(l) > 0:
(n,v) = l.split(':', 1)
self.data[n.strip()] = v.strip()
return self.data[name] |
5,978 | data received | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import asyncio
import os
import socket
import struct
import typing
_uint64_unpacker = struct.Struct('!Q').unpack
_uint64_packer = struct.Struct('!Q').pack
class MessageStream:
"""Data stream that yields messages."""
def __init__(self):
self._buffer = b''
self._curmsg_len = -1
def feed_data(self, data):
# TODO: rewrite to avoid buffer copies.
self._buffer += data
while self._buffer:
if self._curmsg_len == -1:
if len(self._buffer) >= 8:
self._curmsg_len = _uint64_unpacker(self._buffer[:8])[0]
self._buffer = self._buffer[8:]
else:
return
if self._curmsg_len > 0 and len(self._buffer) >= self._curmsg_len:
msg = self._buffer[:self._curmsg_len]
self._buffer = self._buffer[self._curmsg_len:]
self._curmsg_len = -1
yield msg
else:
return
class HubProtocol(asyncio.Protocol):
"""The Protocol used on the hub side connecting to workers."""
def __init__(self, *, loop, on_pid, on_connection_lost):
self._loop = loop
self._transport = None
self._closed = False
self._stream = MessageStream()
self._resp_waiters = {}
self._on_pid = on_pid
self._on_connection_lost = on_connection_lost
self._pid = None
def connection_made(self, tr):
self._transport = tr
def send(self, req_id: int, waiter: asyncio.Future, payload: bytes):
if req_id in self._resp_waiters:
raise RuntimeError('FramedProtocol: duplicate request ID')
self._resp_waiters[req_id] = waiter
self._transport.writelines(
(_uint64_packer(len(payload) + 8), _uint64_packer(req_id), payload)
)
def process_message(self, msg):
msgview = memoryview(msg)
req_id = _uint64_unpacker(msgview[:8])[0]
waiter = self._resp_waiters.pop(req_id, None)
if waiter is None:
# This could have happened if the previous request got cancelled.
return
if not waiter.done():
waiter.set_result(msgview[8:])
def METHOD_NAME(self, data):
if self._pid is None:
pid_data = data[:8]
version = _uint64_unpacker(data[8:16])[0]
data = data[16:]
self._pid = _uint64_unpacker(pid_data)[0]
self._on_pid(self, self._transport, self._pid, version)
for msg in self._stream.feed_data(data):
self.process_message(msg)
def connection_lost(self, exc):
self._closed = True
if self._resp_waiters:
if exc is not None:
for waiter in self._resp_waiters.values():
waiter.set_exception(exc)
else:
for waiter in self._resp_waiters.values():
waiter.set_exception(ConnectionError(
'lost connection to the worker during a call'))
self._resp_waiters = {}
self._on_connection_lost(self._pid)
class HubConnection:
"""An abstraction of the hub connections to the workers."""
def __init__(self, transport, protocol, loop, version):
self._transport = transport
self._protocol = protocol
self._loop = loop
self._req_id_cnt = 0
self._version = version
self._aborted = False
def is_closed(self):
return self._protocol._closed
async def request(self, data: bytes) -> bytes:
self._req_id_cnt += 1
req_id = self._req_id_cnt
waiter = self._loop.create_future()
self._protocol.send(req_id, waiter, data)
return await waiter
def abort(self):
self._aborted = True
self._transport.abort()
class WorkerConnection:
"""Connection object used by the worker's process."""
def __init__(self, sockname, version):
self._sock = socket.socket(socket.AF_UNIX)
self._sock.connect(sockname)
self._sock.sendall(
_uint64_packer(os.getpid()) + _uint64_packer(version)
)
self._stream = MessageStream()
def _on_message(self, msg: bytes):
msgview = memoryview(msg)
req_id = _uint64_unpacker(msgview[:8])[0]
return req_id, msgview[8:]
def reply(self, req_id, payload):
self._sock.sendall(
b"".join(
(
_uint64_packer(len(payload) + 8),
_uint64_packer(req_id),
payload,
)
)
)
def iter_request(self):
while True:
data = b'' if self._sock is None else self._sock.recv(4096)
if not data:
# EOF received - abort
self.abort()
return
yield from map(self._on_message, self._stream.feed_data(data))
def abort(self):
if self._sock is not None:
self._sock.close()
self._sock = None
class ServerProtocol:
def worker_connected(self, pid, version):
pass
def worker_disconnected(self, pid):
pass
class Server:
_proto: ServerProtocol
_pids: typing.Dict[int, HubConnection]
def __init__(self, sockname, loop, server_protocol):
self._sockname = sockname
self._loop = loop
self._srv = None
self._pids = {}
self._proto = server_protocol
def _on_pid_connected(self, proto, tr, pid, version):
assert pid not in self._pids
self._pids[pid] = HubConnection(tr, proto, self._loop, version)
self._proto.worker_connected(pid, version)
def _on_pid_disconnected(self, pid: typing.Optional[int]):
if not pid:
return
if pid in self._pids:
self._pids.pop(pid)
self._proto.worker_disconnected(pid)
def _proto_factory(self):
return HubProtocol(
loop=self._loop,
on_pid=self._on_pid_connected,
on_connection_lost=self._on_pid_disconnected,
)
def get_by_pid(self, pid):
return self._pids[pid]
async def start(self):
self._srv = await self._loop.create_unix_server(
self._proto_factory,
path=self._sockname)
async def stop(self):
self._srv.close()
await self._srv.wait_closed()
for con in self._pids.values():
con.abort()
def kill_outdated_worker(self, current_version):
for conn in self._pids.values():
if conn._version < current_version and not conn._aborted:
conn.abort()
break |
5,979 | test advocate blocks invalid urls | import re
from ipaddress import ip_network
from unittest.mock import patch
from django.core.exceptions import ValidationError
from django.test import override_settings
import httpretty as httpretty
import pytest
from baserow.contrib.database.webhooks.validators import url_validator
from baserow.test_utils.helpers import stub_getaddrinfo
URL_BLACKLIST_ONLY_ALLOWING_GOOGLE_WEBHOOKS = re.compile(r"(?!(www\.)?google\.com).*")
@httpretty.activate(verbose=True, allow_net_connect=False)
@patch("socket.getaddrinfo", wraps=stub_getaddrinfo)
def test_advocate_blocks_internal_address(mock):
httpretty.register_uri(httpretty.GET, "https://1.1.1.1/", status=200)
httpretty.register_uri(httpretty.GET, "https://2.2.2.2/", status=200)
httpretty.register_uri(httpretty.GET, "http://127.0.0.1/", status=200)
# This request should go through
url_validator("https://1.1.1.1/")
# This request should not go through
with pytest.raises(ValidationError, match="Invalid URL") as exec_info:
url_validator("http://127.0.0.1/")
@httpretty.activate(verbose=True, allow_net_connect=False)
@patch("socket.getaddrinfo", wraps=stub_getaddrinfo)
def METHOD_NAME(mock):
httpretty.register_uri(httpretty.GET, "https://1.1.1.1/", status=200)
httpretty.register_uri(httpretty.GET, "https://2.2.2.2/", status=200)
httpretty.register_uri(httpretty.GET, "http://127.0.0.1/", status=200)
# This request should go through
url_validator("https://1.1.1.1/")
# This request should not go through
with pytest.raises(ValidationError) as exec_info:
url_validator("google.com")
assert exec_info.value.code == "invalid_url"
with pytest.raises(ValidationError) as exec_info:
url_validator("127.0.0.1")
assert exec_info.value.code == "invalid_url"
@httpretty.activate(verbose=True, allow_net_connect=False)
@override_settings(BASEROW_WEBHOOKS_IP_WHITELIST=[ip_network("127.0.0.1/32")])
@patch("socket.getaddrinfo", wraps=stub_getaddrinfo)
def test_advocate_whitelist_rules(mock):
httpretty.register_uri(httpretty.GET, "http://127.0.0.1/", status=200)
httpretty.register_uri(httpretty.GET, "http://10.0.0.1/", status=200)
# This request should go through
url_validator("http://127.0.0.1/")
# Other private addresses should still blocked
with pytest.raises(ValidationError, match="Invalid URL") as exec_info:
url_validator("http://10.0.0.1/")
assert exec_info.value.code == "invalid_url"
@httpretty.activate(verbose=True, allow_net_connect=False)
@override_settings(BASEROW_WEBHOOKS_IP_BLACKLIST=[ip_network("1.1.1.1/32")])
@patch("socket.getaddrinfo", wraps=stub_getaddrinfo)
def test_advocate_blacklist_rules(mock):
httpretty.register_uri(httpretty.GET, "https://1.1.1.1", status=200)
httpretty.register_uri(httpretty.GET, "http://127.0.0.1/", status=200)
httpretty.register_uri(httpretty.GET, "https://2.2.2.2/", status=200)
# This request should not go through
with pytest.raises(ValidationError, match="Invalid URL") as exec_info:
url_validator("https://1.1.1.1/")
assert exec_info.value.code == "invalid_url"
# Private address is still blocked
with pytest.raises(ValidationError, match="Invalid URL") as exec_info:
url_validator("http://127.0.0.1/")
assert exec_info.value.code == "invalid_url"
# This request should still go through
url_validator("https://2.2.2.2/")
@httpretty.activate(verbose=True, allow_net_connect=False)
@override_settings(
BASEROW_WEBHOOKS_URL_REGEX_BLACKLIST=[re.compile(r"(?:www\.?)?google.com")]
)
@patch("socket.getaddrinfo", wraps=stub_getaddrinfo)
def test_hostname_blacklist_rules(patched_addr_info):
httpretty.register_uri(httpretty.GET, "https://google.com", status=200)
httpretty.register_uri(httpretty.GET, "http://1.1.1.1", status=200)
# The httpretty stub implemenation of socket.getaddrinfo is incorrect and doesn't
# return an IP causing advocate to fail, instead we patch to fix this.
# This request should not go through
with pytest.raises(ValidationError, match="Invalid URL") as exec_info:
url_validator("https://www.google.com/")
assert exec_info.value.code == "invalid_url"
# This request should still go through
url_validator("https://www.otherdomain.com")
@httpretty.activate(verbose=True, allow_net_connect=False)
@override_settings(
BASEROW_WEBHOOKS_URL_REGEX_BLACKLIST=[URL_BLACKLIST_ONLY_ALLOWING_GOOGLE_WEBHOOKS]
)
@patch("socket.getaddrinfo", wraps=stub_getaddrinfo)
def test_hostname_blacklist_rules_only_allow_one_host(patched_addr_info):
httpretty.register_uri(httpretty.GET, "https://google.com", status=200)
httpretty.register_uri(httpretty.GET, "http://google.com", status=200)
httpretty.register_uri(httpretty.GET, "http://1.1.1.1", status=200)
httpretty.register_uri(httpretty.GET, "https://1.1.1.1", status=200)
url_validator("https://www.google.com/")
url_validator("https://google.com/")
with pytest.raises(ValidationError, match="Invalid URL") as exec_info:
url_validator("https://www.otherdomain.com")
assert exec_info.value.code == "invalid_url"
with pytest.raises(ValidationError, match="Invalid URL") as exec_info:
url_validator("https://google2.com")
assert exec_info.value.code == "invalid_url"
@httpretty.activate(verbose=True, allow_net_connect=False)
@override_settings(
BASEROW_WEBHOOKS_IP_BLACKLIST=[ip_network("1.0.0.0/8")],
BASEROW_WEBHOOKS_IP_WHITELIST=[ip_network("1.1.1.1/32")],
)
def test_advocate_combination_of_whitelist_blacklist_rules():
httpretty.register_uri(httpretty.GET, "https://1.1.1.1", status=200)
httpretty.register_uri(httpretty.GET, "https://1.1.1.2", status=200)
httpretty.register_uri(httpretty.GET, "http://127.0.0.1/", status=200)
httpretty.register_uri(httpretty.GET, "https://2.2.2.2/", status=200)
url_validator("https://1.1.1.1/")
with pytest.raises(ValidationError, match="Invalid URL") as exec_info:
url_validator("https://1.1.1.2/")
assert exec_info.value.code == "invalid_url"
# Private address is still blocked
with pytest.raises(ValidationError, match="Invalid URL") as exec_info:
url_validator("http://127.0.0.1/")
assert exec_info.value.code == "invalid_url"
# This request should still go through
url_validator("https://2.2.2.2/")
@httpretty.activate(verbose=True, allow_net_connect=False)
@override_settings(
BASEROW_WEBHOOKS_URL_REGEX_BLACKLIST=[URL_BLACKLIST_ONLY_ALLOWING_GOOGLE_WEBHOOKS],
BASEROW_WEBHOOKS_IP_BLACKLIST=[ip_network("1.0.0.0/8")],
BASEROW_WEBHOOKS_IP_WHITELIST=[ip_network("1.1.1.1/32")],
)
@patch("socket.getaddrinfo", wraps=stub_getaddrinfo)
def test_advocate_hostname_blacklist_overrides_ip_lists(
mock,
):
httpretty.register_uri(httpretty.GET, "https://1.1.1.1", status=200)
httpretty.register_uri(httpretty.GET, "https://1.1.1.2", status=200)
httpretty.register_uri(httpretty.GET, "http://127.0.0.1/", status=200)
httpretty.register_uri(httpretty.GET, "https://2.2.2.2/", status=200)
with pytest.raises(ValidationError, match="Invalid URL") as exec_info:
url_validator("https://1.1.1.1/")
assert exec_info.value.code == "invalid_url"
with pytest.raises(ValidationError, match="Invalid URL") as exec_info:
url_validator("https://1.1.1.2/")
assert exec_info.value.code == "invalid_url"
# Private address is still blocked
with pytest.raises(ValidationError, match="Invalid URL") as exec_info:
url_validator("http://127.0.0.1/")
assert exec_info.value.code == "invalid_url"
# This request should still go through
url_validator("https://www.google.com/") |
5,980 | molar mass | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for
# the Earth and Planetary Sciences
# Copyright (C) 2012 - 2017 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
This module provides several helper minerals/materials.
"""
from __future__ import absolute_import
from __future__ import print_function
from .material import Material, material_property
from .composite import Composite
class HelperRockSwitcher(Material):
"""
A Helper that represents a Material that switches between different rocks
based on a user specified select_rock() function based on current temperature
and pressure. This class can be used in several ways:
1. By creating an instance and setting select_rock to a lambda that returns a rock
2. By deriving from this class and implementing select_rock.
"""
def __init__(self):
self.current_rock = None
Material.__init__(self)
def select_rock(self):
raise NotImplementedError("Need to implement select_rock() in derived class!")
def set_method(self, method):
raise NotImplementedError("Need to implement select_rock() in derived class!")
def debug_print(self, indent=""):
print("%sHelperRockSwitcher" % (indent))
def set_state(self, pressure, temperature):
Material.set_state(self, pressure, temperature)
self.current_rock = self.select_rock()
self.current_rock.set_state(pressure, temperature)
def unroll(self):
return self.current_rock.unroll()
@material_property
def molar_internal_energy(self):
return self.current_rock.molar_internal_energy
@material_property
def molar_gibbs(self):
return self.current_rock.molar_gibbs
@material_property
def molar_helmholtz(self):
return self.current_rock.molar_helmholtz
@material_property
def METHOD_NAME(self):
return self.current_rock.METHOD_NAME
@material_property
def molar_volume(self):
return self.current_rock.molar_volume
@material_property
def density(self):
return self.current_rock.density
@material_property
def molar_entropy(self):
return self.current_rock.molar_entropy
@material_property
def molar_enthalpy(self):
return self.current_rock.molar_enthalpy
@material_property
def isothermal_bulk_modulus(self):
return self.current_rock.isothermal_bulk_modulus
@material_property
def adiabatic_bulk_modulus(self):
return self.current_rock.adiabatic_bulk_modulus
@material_property
def isothermal_compressibility(self):
return self.current_rock.isothermal_compressibility
@material_property
def adiabatic_compressibility(self):
return self.current_rock.adiabatic_compressibility
@material_property
def shear_modulus(self):
return self.current_rock.shear_modulus
@material_property
def p_wave_velocity(self):
return self.current_rock.p_wave_velocity
@material_property
def bulk_sound_velocity(self):
return self.current_rock.bulk_sound_velocity
@material_property
def shear_wave_velocity(self):
return self.current_rock.shear_wave_velocity
@material_property
def grueneisen_parameter(self):
return self.current_rock.grueneisen_parameter
@material_property
def thermal_expansivity(self):
return self.current_rock.thermal_expansivity
@material_property
def molar_heat_capacity_v(self):
return self.current_rock.molar_heat_capacity_v
@material_property
def molar_heat_capacity_p(self):
return self.current_rock.molar_heat_capacity_p
class HelperLowHighPressureRockTransition(HelperRockSwitcher):
"""
A Helper that represents a Material that switches between two given rocks based
on a given transition pressure.
"""
def __init__(self, transition_pressure, low_pressure_rock, high_pressure_rock):
self.transition_pressure = transition_pressure
self.rocks = [low_pressure_rock, high_pressure_rock]
HelperRockSwitcher.__init__(self)
self._name = (
"HelperLowHighPressureRockTransition("
+ str(self.transition_pressure)
+ " GPa, "
+ self.rocks[0].name
+ ", "
+ self.rocks[1].name
+ ")"
)
def select_rock(self):
if self._pressure < self.transition_pressure:
return self.rocks[0]
else:
return self.rocks[1]
def set_method(self, method):
for r in self.rocks:
r.set_method(method)
def debug_print(self, indent=""):
print(
"%sHelperLowHighPressureRockTransition (%f GPa):"
% (indent, self.transition_pressure)
)
indent += " "
for r in self.rocks:
r.debug_print(indent)
class HelperSpinTransition(Composite):
"""
Helper class that makes a mineral that switches between two materials
(for low and high spin) based on some transition pressure [Pa]
"""
def __init__(self, transition_pressure, ls_mat, hs_mat):
"""
Takes a transition pressure, and two minerals. Use the
thermoelastic parameters for ls_mat below the transition
pressure, and the thermoelastic parameters for hs_mat
above the transition pressure
"""
Material.__init__(self)
self.transition_pressure = transition_pressure
self.ls_mat = ls_mat
self.hs_mat = hs_mat
Composite.__init__(self, [ls_mat, hs_mat])
def debug_print(self, indent=""):
print("%sHelperSpinTransition:" % indent)
self.ls_mat.debug_print(indent + " ")
self.hs_mat.debug_print(indent + " ")
def set_state(self, pressure, temperature):
if pressure >= self.transition_pressure:
Composite.set_fractions(self, [1.0, 0.0])
else:
Composite.set_fractions(self, [0.0, 1.0])
Composite.set_state(self, pressure, temperature) |
5,981 | load |
import copy
import glob
import os
import pickle
import sys
from GangaCore.Core.GangaRepository import RepositoryError, allRegistries
from GangaCore.GPIDev.Persistency import METHOD_NAME, stripped_export
from GangaCore.Utility.logging import getLogger
from .GangaRepository import GangaRepository
logger = getLogger()
class GangaRepositoryImmutableTransient(GangaRepository):
def __init__(self, registry, filebase, file_ext='tpl', pickle_files=False, locking=True):
"""GangaRepository constructor. Initialization should be done in startup()"""
super(GangaRepositoryImmutableTransient, self).__init__(registry)
self.filebase = filebase
self._next_id = 0
self.file_ext = file_ext
self.pickle_files = pickle_files
self.registry = registry
def startup(self):
def _readonly(): return True
# this is needed as the default registry that would be added to with Jebtemplates is the
# templates registry and for Tasks would be the tasks registry
# we put this back after loading. Note that the _auto__init for job.py
# also calls for the prep registry so we have to be able to return this
# one.
def getRegistry(name):
if name == 'prep':
return allRegistries['prep']
return self.registry
old = getattr(sys.modules['GangaCore.Core.GangaRepository'], 'getRegistry')
setattr(sys.modules['GangaCore.Core.GangaRepository'], 'getRegistry', getRegistry)
# by setting the registry started now the auto_init from the jobTemplate class
# call call getRegistry(self.default_registry)._add
self.registry._hasStarted = True
for f in glob.glob(os.path.join(self.filebase, '*.%s' % self.file_ext)):
current_id = self._next_id
try:
if self.pickle_files:
obj = pickle.METHOD_NAME(open(f, 'rb'))
else:
from GangaCore.GPIDev.Base.Proxy import stripProxy
obj = stripProxy(METHOD_NAME(f)[0])
except:
logger.error("Unable to load file '%s'" % f)
setattr(
sys.modules['GangaCore.Core.GangaRepository'], 'getRegistry', old)
raise
else:
obj.name = os.path.basename(f).rsplit('.', 1)[0]
# if this not true then add already called from _auto__init
# when loading the object. note default _auto__init is just
# pass
if self._next_id == current_id:
obj.id = self._next_id
obj._registry = self.registry
obj._registry_id = self._next_id
setattr(obj, '_readonly', _readonly)
self.objects[self._next_id] = obj
self._next_id += 1
setattr(sys.modules['GangaCore.Core.GangaRepository'], 'getRegistry', old)
def updateLocksNow(self):
pass
def update_index(self, id=None):
pass
def shutdown(self):
pass
def add(self, objs, force_ids=None):
ids = []
def _readonly(): return True
for o in objs:
obj = copy.deepcopy(o)
fn = os.path.join(self.filebase, '%s.%s' %
(obj.name, self.file_ext))
try:
if self.pickle_files:
obj._registry = None
pickle.dump(obj, open(fn, 'wb'))
else:
if not stripped_export(obj, fn):
raise RepositoryError(self, 'Failure in stripped_export method, returned False')
except:
logger.error("Unable to write to file '%s'" % fn)
raise
else:
obj.id = self._next_id
obj._registry = self.registry
obj._registry_id = self._next_id
setattr(obj, '_readonly', _readonly)
self.objects[self._next_id] = obj
ids.append(self._next_id)
self._next_id += 1
return ids
def delete(self, ids):
pass
def METHOD_NAME(self, ids):
pass
def flush(self, ids):
pass
def lock(self, ids):
return True
def unlock(self, ids):
pass |
5,982 | evaluate | import logging
import time
import jax
import numpy as np
import optax
import haiku as hk
import jax.numpy as jnp
from typing import NamedTuple
import wandb
from fedml import mlops
from fedml.core import ServerAggregator
class TrainingState(NamedTuple):
params: hk.Params
avg_params: hk.Params
opt_state: optax.OptState
class JaxHaikuServerAggregator(ServerAggregator):
static_model = None
def __init__(self, model, args):
super().__init__(model, args)
JaxHaikuServerAggregator.static_model = model
self.optimizer = None
self.aggregator_state = self.init_aggregator()
def get_model_params(self):
return self.aggregator_state.params
def set_model_params(self, model_parameters):
if self.optimizer is None:
return
current_opt_state = self.optimizer.init(model_parameters)
self.aggregator_state = TrainingState(model_parameters, model_parameters, current_opt_state)
@staticmethod
def loss(params: hk.Params, x, labels) -> jnp.ndarray:
"""Cross-entropy classification loss with regularization by L2 weight decay."""
batch_size, *_ = x.shape
logits = JaxHaikuServerAggregator.static_model.model_network.apply(params, x)
labels = jax.nn.one_hot(labels, JaxHaikuServerAggregator.static_model.output_dim)
l2_regularization = 0.5 * sum(
jnp.sum(jnp.square(p)) for p in jax.tree_util.tree_leaves(params))
log_likelihood = jnp.sum(labels * jax.nn.log_softmax(logits))
return -log_likelihood / batch_size + 1e-4 * l2_regularization
@staticmethod
@jax.jit
def METHOD_NAME(params: hk.Params, x, labels) -> jnp.ndarray:
"""Evaluation metrics (classification accuracy)."""
logits = JaxHaikuServerAggregator.static_model.model_network.apply(params, x)
predictions = jnp.argmax(logits, axis=-1)
return jnp.mean(predictions == labels)
def init_aggregator(self):
if self.args.client_optimizer == "sgd":
self.optimizer = optax.sgd(learning_rate=self.args.learning_rate)
else:
self.optimizer = optax.adam(learning_rate=self.args.learning_rate)
initial_opt_state = self.optimizer.init(self.model.initial_params)
return TrainingState(self.model.initial_params, self.model.initial_params, initial_opt_state)
def _test(self, test_data, device, args):
metrics = {
"test_acc": 0,
"test_loss": 0,
"test_precision": 0,
"test_recall": 0,
"test_total": 0,
}
batch_acc = []
batch_loss = []
for batch_idx, (x, target) in enumerate(test_data):
# start_time = time.time_ns()
x = jax.device_put(x, device)
target = jax.device_put(target, device)
accuracy = np.array(
JaxHaikuServerAggregator.METHOD_NAME(self.aggregator_state.params, x, target)).item()
loss = JaxHaikuServerAggregator.loss(self.aggregator_state.params, x, target).item()
# logging.info("test consume time: {}".format(time.time_ns() - start_time))
metrics["test_total"] += target.size
batch_acc.append(accuracy)
batch_loss.append(loss)
metrics["test_acc"] = sum(batch_acc) / len(batch_acc)
metrics["test_loss"] = sum(batch_loss) / len(batch_loss)
return metrics
def test(self, test_data, device, args):
# test data
metrics = self._test(test_data, device, args)
test_acc, test_num_sample, test_loss = (
metrics["test_acc"],
metrics["test_total"],
metrics["test_loss"],
)
# test on test dataset
if self.args.enable_wandb:
wandb.log({"Test/Acc": test_acc, "round": args.round_idx})
wandb.log({"Test/Loss": test_loss, "round": args.round_idx})
mlops.log({"Test/Acc": test_acc, "round": args.round_idx})
mlops.log({"Test/Loss": test_loss, "round": args.round_idx})
stats = {"test_acc": test_acc, "test_loss": test_loss}
logging.info(stats)
def test_all(self, train_data_local_dict, test_data_local_dict, device, args) -> bool:
train_acc = 0
train_loss = 0
for client_idx in range(len(test_data_local_dict)):
# test data
metrics = self._test(test_data_local_dict[client_idx], device, args)
train_acc, train_num_sample, train_loss = (
metrics["test_acc"],
metrics["test_total"],
metrics["test_loss"],
)
# logging.info("client_idx = {}, metrics = {}".format(client_idx, metrics))
# test on training dataset
if self.args.enable_wandb:
wandb.log({"Test/Acc": train_acc, "round": args.round_idx})
wandb.log({"Test/Loss": train_loss, "round": args.round_idx})
mlops.log({"Test/Acc": train_acc, "round": args.round_idx})
mlops.log({"Test/Loss": train_loss, "round": args.round_idx})
stats = {"testing_acc": train_acc, "testing_loss": train_loss}
logging.info(stats)
return True |
5,983 | wrapper | import asyncio
import inspect
import warnings
from dataclasses import dataclass, field
from functools import partial
from multiprocessing import Queue
from typing import Any, Callable, Dict, Optional, Tuple
from .dataclasses import KWONLY_SLOTS
from .globals import log
method_queue: Queue = Queue()
response_queue: Queue = Queue()
try:
with warnings.catch_warnings():
# webview depends on bottle which uses the deprecated CGI function (https://github.com/bottlepy/bottle/issues/1403)
warnings.filterwarnings('ignore', category=DeprecationWarning)
import webview
from webview.window import FixPoint
class WindowProxy(webview.Window):
def __init__(self) -> None: # pylint: disable=super-init-not-called
pass # NOTE we don't call super().__init__ here because this is just a proxy to the actual window
async def get_always_on_top(self) -> bool:
"""Get whether the window is always on top."""
return await self._request()
def set_always_on_top(self, on_top: bool) -> None:
"""Set whether the window is always on top."""
self._send(on_top)
async def get_size(self) -> Tuple[int, int]:
"""Get the window size as tuple (width, height)."""
return await self._request()
async def get_position(self) -> Tuple[int, int]:
"""Get the window position as tuple (x, y)."""
return await self._request()
def load_url(self, url: str) -> None:
self._send(url)
def load_html(self, content: str, base_uri: str = ...) -> None: # type: ignore
self._send(content, base_uri)
def load_css(self, stylesheet: str) -> None:
self._send(stylesheet)
def set_title(self, title: str) -> None:
self._send(title)
async def get_cookies(self) -> Any: # pylint: disable=invalid-overridden-method
return await self._request()
async def get_current_url(self) -> str: # pylint: disable=invalid-overridden-method
return await self._request()
def destroy(self) -> None:
self._send()
def show(self) -> None:
self._send()
def hide(self) -> None:
self._send()
def set_window_size(self, width: int, height: int) -> None:
self._send(width, height)
def resize(self, width: int, height: int, fix_point: FixPoint = FixPoint.NORTH | FixPoint.WEST) -> None:
self._send(width, height, fix_point)
def minimize(self) -> None:
self._send()
def restore(self) -> None:
self._send()
def toggle_fullscreen(self) -> None:
self._send()
def move(self, x: int, y: int) -> None:
self._send(x, y)
async def evaluate_js(self, script: str) -> str: # pylint: disable=arguments-differ,invalid-overridden-method
return await self._request(script)
async def create_confirmation_dialog(self, title: str, message: str) -> bool: # pylint: disable=invalid-overridden-method
return await self._request(title, message)
async def create_file_dialog( # pylint: disable=invalid-overridden-method
self,
dialog_type: int = webview.OPEN_DIALOG,
directory: str = '',
allow_multiple: bool = False,
save_filename: str = '',
file_types: Tuple[str, ...] = (),
) -> Tuple[str, ...]:
return await self._request(
dialog_type=dialog_type,
directory=directory,
allow_multiple=allow_multiple,
save_filename=save_filename,
file_types=file_types,
)
def expose(self, function: Callable) -> None: # pylint: disable=arguments-differ
raise NotImplementedError(f'exposing "{function}" is not supported')
def _send(self, *args: Any, **kwargs: Any) -> None:
name = inspect.currentframe().f_back.f_code.co_name # type: ignore
method_queue.put((name, args, kwargs))
async def _request(self, *args: Any, **kwargs: Any) -> Any:
def METHOD_NAME(*args: Any, **kwargs: Any) -> Any:
try:
method_queue.put((name, args, kwargs))
return response_queue.get() # wait for the method to be called and writing its result to the queue
except Exception:
log.exception(f'error in {name}')
return None
name = inspect.currentframe().f_back.f_code.co_name # type: ignore
return await asyncio.get_event_loop().run_in_executor(None, partial(METHOD_NAME, *args, **kwargs))
def signal_server_shutdown(self) -> None:
self._send()
except ModuleNotFoundError:
class WindowProxy: # type: ignore
pass # just a dummy if webview is not installed
@dataclass(**KWONLY_SLOTS)
class Native:
start_args: Dict[str, Any] = field(default_factory=dict)
window_args: Dict[str, Any] = field(default_factory=dict)
main_window: Optional[WindowProxy] = None |
5,984 | basic ack | from __future__ import annotations
import time
from itertools import count
from typing import TYPE_CHECKING
from unittest.mock import Mock
from kombu.transport import base
from kombu.utils import json
if TYPE_CHECKING:
from types import TracebackType
class _ContextMock(Mock):
"""Dummy class implementing __enter__ and __exit__
as the :keyword:`with` statement requires these to be implemented
in the class, not just the instance."""
def __enter__(self):
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None
) -> None:
pass
def ContextMock(*args, **kwargs):
"""Mock that mocks :keyword:`with` statement contexts."""
obj = _ContextMock(*args, **kwargs)
obj.attach_mock(_ContextMock(), '__enter__')
obj.attach_mock(_ContextMock(), '__exit__')
obj.__enter__.return_value = obj
# if __exit__ return a value the exception is ignored,
# so it must return None here.
obj.__exit__.return_value = None
return obj
def PromiseMock(*args, **kwargs):
m = Mock(*args, **kwargs)
def on_throw(exc=None, *args, **kwargs):
if exc:
raise exc
raise
m.throw.side_effect = on_throw
m.set_error_state.side_effect = on_throw
m.throw1.side_effect = on_throw
return m
class MockPool:
def __init__(self, value=None):
self.value = value or ContextMock()
def acquire(self, **kwargs):
return self.value
class Message(base.Message):
def __init__(self, *args, **kwargs):
self.throw_decode_error = kwargs.get('throw_decode_error', False)
super().__init__(*args, **kwargs)
def decode(self):
if self.throw_decode_error:
raise ValueError("can't decode message")
return super().decode()
class Channel(base.StdChannel):
open = True
throw_decode_error = False
_ids = count(1)
def __init__(self, connection):
self.connection = connection
self.called = []
self.deliveries = count(1)
self.to_deliver = []
self.events = {'basic_return': set()}
self.channel_id = next(self._ids)
def _called(self, name):
self.called.append(name)
def __contains__(self, key):
return key in self.called
def exchange_declare(self, *args, **kwargs):
self._called('exchange_declare')
def prepare_message(self, body, priority=0, content_type=None,
content_encoding=None, headers=None, properties={}):
self._called('prepare_message')
return {'body': body,
'headers': headers,
'properties': properties,
'priority': priority,
'content_type': content_type,
'content_encoding': content_encoding}
def basic_publish(self, message, exchange='', routing_key='',
mandatory=False, immediate=False, **kwargs):
self._called('basic_publish')
return message, exchange, routing_key
def exchange_delete(self, *args, **kwargs):
self._called('exchange_delete')
def queue_declare(self, *args, **kwargs):
self._called('queue_declare')
def queue_bind(self, *args, **kwargs):
self._called('queue_bind')
def queue_unbind(self, *args, **kwargs):
self._called('queue_unbind')
def queue_delete(self, queue, if_unused=False, if_empty=False, **kwargs):
self._called('queue_delete')
def basic_get(self, *args, **kwargs):
self._called('basic_get')
try:
return self.to_deliver.pop()
except IndexError:
pass
def queue_purge(self, *args, **kwargs):
self._called('queue_purge')
def basic_consume(self, *args, **kwargs):
self._called('basic_consume')
def basic_cancel(self, *args, **kwargs):
self._called('basic_cancel')
def METHOD_NAME(self, *args, **kwargs):
self._called('basic_ack')
def basic_recover(self, requeue=False):
self._called('basic_recover')
def exchange_bind(self, *args, **kwargs):
self._called('exchange_bind')
def exchange_unbind(self, *args, **kwargs):
self._called('exchange_unbind')
def close(self):
self._called('close')
def message_to_python(self, message, *args, **kwargs):
self._called('message_to_python')
return Message(body=json.dumps(message),
channel=self,
delivery_tag=next(self.deliveries),
throw_decode_error=self.throw_decode_error,
content_type='application/json',
content_encoding='utf-8')
def flow(self, active):
self._called('flow')
def basic_reject(self, delivery_tag, requeue=False):
if requeue:
return self._called('basic_reject:requeue')
return self._called('basic_reject')
def basic_qos(self, prefetch_size=0, prefetch_count=0,
apply_global=False):
self._called('basic_qos')
class Connection:
connected = True
def __init__(self, client):
self.client = client
def channel(self):
return Channel(self)
class Transport(base.Transport):
def establish_connection(self):
return Connection(self.client)
def create_channel(self, connection):
return connection.channel()
def drain_events(self, connection, **kwargs):
return 'event'
def close_connection(self, connection):
connection.connected = False
class TimeoutingTransport(Transport):
recoverable_connection_errors = (TimeoutError,)
def __init__(self, connect_timeout=1, **kwargs):
self.connect_timeout = connect_timeout
super().__init__(**kwargs)
def establish_connection(self):
time.sleep(self.connect_timeout)
raise TimeoutError('timed out') |
5,985 | size to int | #!/usr/bin/env python3
# group: rw
#
# Tests for shrinking images
#
# Copyright (c) 2016-2017 Parallels International GmbH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os, random, iotests, struct, qcow2, sys
from iotests import qemu_img, qemu_io, image_size
test_img = os.path.join(iotests.test_dir, 'test.img')
check_img = os.path.join(iotests.test_dir, 'check.img')
def METHOD_NAME(str):
suff = ['B', 'K', 'M', 'G', 'T']
return int(str[:-1]) * 1024**suff.index(str[-1:])
class ShrinkBaseClass(iotests.QMPTestCase):
image_len = '128M'
shrink_size = '10M'
chunk_size = '16M'
refcount_bits = '16'
def __qcow2_check(self, filename):
entry_bits = 3
entry_size = 1 << entry_bits
l1_mask = 0x00fffffffffffe00
div_roundup = lambda n, d: (n + d - 1) // d
def split_by_n(data, n):
for x in range(0, len(data), n):
yield struct.unpack('>Q', data[x:x + n])[0] & l1_mask
def check_l1_table(h, l1_data):
l1_list = list(split_by_n(l1_data, entry_size))
real_l1_size = div_roundup(h.size,
1 << (h.cluster_bits*2 - entry_size))
used, unused = l1_list[:real_l1_size], l1_list[real_l1_size:]
self.assertTrue(len(used) != 0, "Verifying l1 table content")
self.assertFalse(any(unused), "Verifying l1 table content")
def check_reftable(fd, h, reftable):
for offset in split_by_n(reftable, entry_size):
if offset != 0:
fd.seek(offset)
cluster = fd.read(1 << h.cluster_bits)
self.assertTrue(any(cluster), "Verifying reftable content")
with open(filename, "rb") as fd:
h = qcow2.QcowHeader(fd)
fd.seek(h.l1_table_offset)
l1_table = fd.read(h.l1_size << entry_bits)
fd.seek(h.refcount_table_offset)
reftable = fd.read(h.refcount_table_clusters << h.cluster_bits)
check_l1_table(h, l1_table)
check_reftable(fd, h, reftable)
def __raw_check(self, filename):
pass
image_check = {
'qcow2' : __qcow2_check,
'raw' : __raw_check
}
def setUp(self):
if iotests.imgfmt == 'raw':
qemu_img('create', '-f', iotests.imgfmt, test_img, self.image_len)
qemu_img('create', '-f', iotests.imgfmt, check_img,
self.shrink_size)
else:
qemu_img('create', '-f', iotests.imgfmt,
'-o', 'cluster_size=' + self.cluster_size +
',refcount_bits=' + self.refcount_bits,
test_img, self.image_len)
qemu_img('create', '-f', iotests.imgfmt,
'-o', 'cluster_size=%s'% self.cluster_size,
check_img, self.shrink_size)
qemu_io('-c', 'write -P 0xff 0 ' + self.shrink_size, check_img)
def tearDown(self):
os.remove(test_img)
os.remove(check_img)
def image_verify(self):
self.assertEqual(image_size(test_img), image_size(check_img),
"Verifying image size")
self.image_check[iotests.imgfmt](self, test_img)
if iotests.imgfmt == 'raw':
return
qemu_img('check', test_img)
def test_empty_image(self):
qemu_img('resize', '-f', iotests.imgfmt, '--shrink', test_img,
self.shrink_size)
qemu_io('-c', f"read -P 0x00 0 {self.shrink_size}", test_img)
self.image_verify()
def test_sequential_write(self):
for offs in range(0, METHOD_NAME(self.image_len),
METHOD_NAME(self.chunk_size)):
qemu_io('-c', 'write -P 0xff %d %s' % (offs, self.chunk_size),
test_img)
qemu_img('resize', '-f', iotests.imgfmt, '--shrink', test_img,
self.shrink_size)
qemu_img("compare", test_img, check_img)
self.image_verify()
def test_random_write(self):
offs_list = list(range(0, METHOD_NAME(self.image_len),
METHOD_NAME(self.chunk_size)))
random.shuffle(offs_list)
for offs in offs_list:
qemu_io('-c', 'write -P 0xff %d %s' % (offs, self.chunk_size),
test_img)
qemu_img('resize', '-f', iotests.imgfmt, '--shrink', test_img,
self.shrink_size)
qemu_img("compare", test_img, check_img)
self.image_verify()
class TestShrink512(ShrinkBaseClass):
image_len = '3M'
shrink_size = '1M'
chunk_size = '256K'
cluster_size = '512'
refcount_bits = '64'
class TestShrink64K(ShrinkBaseClass):
cluster_size = '64K'
class TestShrink1M(ShrinkBaseClass):
cluster_size = '1M'
refcount_bits = '1'
ShrinkBaseClass = None
if __name__ == '__main__':
iotests.main(supported_fmts=['raw', 'qcow2'],
supported_protocols=['file'],
unsupported_imgopts=['compat']) |
5,986 | mde integration | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetDefenderSettingResult',
'AwaitableGetDefenderSettingResult',
'get_defender_setting',
]
@pulumi.output_type
class GetDefenderSettingResult:
"""
IoT Defender settings
"""
def __init__(__self__, device_quota=None, evaluation_end_time=None, id=None, METHOD_NAME=None, name=None, onboarding_kind=None, sentinel_workspace_resource_ids=None, type=None):
if device_quota and not isinstance(device_quota, int):
raise TypeError("Expected argument 'device_quota' to be a int")
pulumi.set(__self__, "device_quota", device_quota)
if evaluation_end_time and not isinstance(evaluation_end_time, str):
raise TypeError("Expected argument 'evaluation_end_time' to be a str")
pulumi.set(__self__, "evaluation_end_time", evaluation_end_time)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'mde_integration' to be a dict")
pulumi.set(__self__, "mde_integration", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if onboarding_kind and not isinstance(onboarding_kind, str):
raise TypeError("Expected argument 'onboarding_kind' to be a str")
pulumi.set(__self__, "onboarding_kind", onboarding_kind)
if sentinel_workspace_resource_ids and not isinstance(sentinel_workspace_resource_ids, list):
raise TypeError("Expected argument 'sentinel_workspace_resource_ids' to be a list")
pulumi.set(__self__, "sentinel_workspace_resource_ids", sentinel_workspace_resource_ids)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="deviceQuota")
def device_quota(self) -> int:
"""
Size of the device quota. Value is required to be in multiples of 100.
"""
return pulumi.get(self, "device_quota")
@property
@pulumi.getter(name="evaluationEndTime")
def evaluation_end_time(self) -> str:
"""
End time of the evaluation period, if such exist
"""
return pulumi.get(self, "evaluation_end_time")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="mdeIntegration")
def METHOD_NAME(self) -> 'outputs.DefenderSettingsPropertiesResponseMdeIntegration':
"""
MDE integration configuration
"""
return pulumi.get(self, "mde_integration")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="onboardingKind")
def onboarding_kind(self) -> str:
"""
The kind of onboarding for the subscription
"""
return pulumi.get(self, "onboarding_kind")
@property
@pulumi.getter(name="sentinelWorkspaceResourceIds")
def sentinel_workspace_resource_ids(self) -> Sequence[str]:
"""
Sentinel Workspace Resource Ids
"""
return pulumi.get(self, "sentinel_workspace_resource_ids")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetDefenderSettingResult(GetDefenderSettingResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDefenderSettingResult(
device_quota=self.device_quota,
evaluation_end_time=self.evaluation_end_time,
id=self.id,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
onboarding_kind=self.onboarding_kind,
sentinel_workspace_resource_ids=self.sentinel_workspace_resource_ids,
type=self.type)
def get_defender_setting(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDefenderSettingResult:
"""
Get IoT Defender Settings
"""
__args__ = dict()
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:iotsecurity/v20210201preview:getDefenderSetting', __args__, opts=opts, typ=GetDefenderSettingResult).value
return AwaitableGetDefenderSettingResult(
device_quota=pulumi.get(__ret__, 'device_quota'),
evaluation_end_time=pulumi.get(__ret__, 'evaluation_end_time'),
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'mde_integration'),
name=pulumi.get(__ret__, 'name'),
onboarding_kind=pulumi.get(__ret__, 'onboarding_kind'),
sentinel_workspace_resource_ids=pulumi.get(__ret__, 'sentinel_workspace_resource_ids'),
type=pulumi.get(__ret__, 'type')) |
5,987 | test func | import json
from django.contrib.auth.mixins import UserPassesTestMixin
from django.http import Http404
from rest_framework.response import Response
from rest_framework.views import APIView
from bims.models import (
TaxonGroup, Taxonomy, BiologicalCollectionRecord,
TaxonExtraAttribute
)
def update_taxon_group_orders(taxon_group_ids):
"""
Update taxon group orders
:param taxon_group_ids: list of taxon taxon group ids
"""
taxon_groups = TaxonGroup.objects.filter(
id__in=taxon_group_ids
)
for taxon_group in taxon_groups:
try:
order = taxon_group_ids.index(taxon_group.id)
taxon_group.display_order = order
taxon_group.save()
except ValueError:
continue
def remove_taxa_from_taxon_group(taxa_ids, taxon_group_id):
"""
Remove taxa from taxon group
:param taxa_ids: list of taxon taxon ids
:param taxon_group_id: id of the taxon group
"""
taxa = Taxonomy.objects.filter(
id__in=taxa_ids
)
try:
taxon_group = TaxonGroup.objects.get(
id=taxon_group_id
)
except TaxonGroup.DoesNotExist:
return
for taxonomy in taxa:
taxon_group.taxonomies.remove(taxonomy)
BiologicalCollectionRecord.objects.filter(
taxonomy=taxonomy
).update(module_group=None)
def add_taxa_to_taxon_group(taxa_ids, taxon_group_id):
"""
Add taxa to taxon group
:param taxa_ids: list of taxon taxon ids
:param taxon_group_id: id of the taxon group
"""
taxa = Taxonomy.objects.filter(
id__in=taxa_ids
)
try:
taxon_group = TaxonGroup.objects.get(
id=taxon_group_id
)
except TaxonGroup.DoesNotExist:
return
for taxonomy in taxa:
taxon_group.taxonomies.add(taxonomy)
BiologicalCollectionRecord.objects.filter(
taxonomy=taxonomy
).update(module_group=taxon_group)
class TaxaUpdateMixin(UserPassesTestMixin, APIView):
def METHOD_NAME(self):
return self.request.user.has_perm('bims.can_update_taxon_group')
class UpdateTaxonGroupOrder(TaxaUpdateMixin):
"""Api to update taxon groups order.
Post data required:
{
'taxonGroups': [1,2] // List of taxon groups id sorted by their order
}
"""
def post(self, request, *args):
taxon_groups_array = self.request.POST.get('taxonGroups', None)
if not taxon_groups_array:
raise Http404('Missing taxon groups')
taxon_group_ids = json.loads(taxon_groups_array)
update_taxon_group_orders(taxon_group_ids)
return Response('Updated')
class RemoveTaxaFromTaxonGroup(TaxaUpdateMixin):
"""Api to remove taxa from taxon group.
Post data required:
{
'taxaIds': [1,2], // List of taxa id
'taxonGroupId': 1 // id of the taxon group
}
"""
def post(self, request, *args):
taxa_ids = self.request.POST.get('taxaIds', None)
taxon_group_id = self.request.POST.get('taxonGroupId', None)
if not taxa_ids or not taxon_group_id:
raise Http404('Missing required parameter')
taxa_ids = json.loads(taxa_ids)
taxon_group_id = int(taxon_group_id)
remove_taxa_from_taxon_group(taxa_ids, taxon_group_id)
return Response(
{
'taxonomy_count': TaxonGroup.objects.get(
id=taxon_group_id
).taxonomies.all().count()
}
)
class AddTaxaToTaxonGroup(TaxaUpdateMixin):
"""Api to add taxa to taxon group.
Post data required:
{
'taxaIds': [1,2], // List of taxa id
'taxonGroupId': 1 // id of the taxon group
}
"""
def post(self, request, *args):
taxa_ids = self.request.POST.get('taxaIds', None)
taxon_group_id = self.request.POST.get('taxonGroupId', None)
if not taxa_ids or not taxon_group_id:
raise Http404('Missing required parameter')
taxa_ids = json.loads(taxa_ids)
taxon_group_id = int(taxon_group_id)
add_taxa_to_taxon_group(taxa_ids, taxon_group_id)
return Response(
{
'taxonomy_count': TaxonGroup.objects.get(
id=taxon_group_id
).taxonomies.all().count()
}
)
class UpdateTaxonGroup(TaxaUpdateMixin):
"""Api to update taxon group.
Post data required:
{
'module_id': id
}
Post data optional:
{
'module_name': 'Module' // Name of the taxon group
'module_logo': File img // Img file of the logo
}
"""
def post(self, request, *args):
module_name = self.request.POST.get('module_name', None)
module_logo = self.request.FILES.get('module_logo', None)
module_id = self.request.POST.get('module_id', None)
extra_attributes = self.request.POST.getlist('extra_attribute', [])
if not module_id:
raise Http404('Missing required parameter')
try:
taxon_group = TaxonGroup.objects.get(id=module_id)
except TaxonGroup.DoesNotExist:
raise Http404('Taxon group does not exist')
if module_name:
taxon_group.name = module_name
if module_logo:
taxon_group.logo = module_logo
TaxonExtraAttribute.objects.filter(taxon_group=taxon_group).exclude(
name__in=extra_attributes).delete()
if extra_attributes:
for extra_attribute in extra_attributes:
if not extra_attribute:
continue
try:
TaxonExtraAttribute.objects.get_or_create(
name=extra_attribute,
taxon_group=taxon_group
)
except TaxonExtraAttribute.MultipleObjectsReturned:
pass
taxon_group.save()
return Response('Updated') |
5,988 | total rev for current deck | # Copyright: Ankitects Pty Ltd and contributors
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
# pylint: disable=invalid-name
from typing import Optional
from anki._legacy import deprecated
from anki.cards import Card, CardId
from anki.consts import (
CARD_TYPE_RELEARNING,
QUEUE_TYPE_DAY_LEARN_RELEARN,
QUEUE_TYPE_REV,
)
from anki.decks import DeckConfigDict, DeckId
from anki.notes import NoteId
from anki.scheduler.base import SchedulerBase, UnburyDeck
from anki.utils import from_json_bytes, ids2str
class SchedulerBaseWithLegacy(SchedulerBase):
"Legacy aliases and helpers. These will go away in the future."
def reschedCards(
self, card_ids: list[CardId], min_interval: int, max_interval: int
) -> None:
self.set_due_date(card_ids, f"{min_interval}-{max_interval}!")
def buryNote(self, nid: NoteId) -> None:
note = self.col.get_note(nid)
self.bury_cards(note.card_ids())
def unburyCards(self) -> None:
print("please use unbury_cards() or unbury_deck() instead of unburyCards()")
self.unbury_deck(self.col.decks.get_current_id())
def unburyCardsForDeck(self, type: str = "all") -> None:
print("please use unbury_deck() instead of unburyCardsForDeck()")
if type == "all":
mode = UnburyDeck.ALL
elif type == "manual":
mode = UnburyDeck.USER_ONLY
else: # elif type == "siblings":
mode = UnburyDeck.SCHED_ONLY
self.unbury_deck(self.col.decks.get_current_id(), mode)
def finishedMsg(self) -> str:
print("finishedMsg() is obsolete")
return ""
def _nextDueMsg(self) -> str:
print("_nextDueMsg() is obsolete")
return ""
def rebuildDyn(self, did: Optional[DeckId] = None) -> Optional[int]:
did = did or self.col.decks.selected()
count = self.rebuild_filtered_deck(did).count or None
if not count:
return None
# and change to our new deck
self.col.decks.select(did)
return count
def emptyDyn(self, did: Optional[DeckId], lim: Optional[str] = None) -> None:
if lim is None:
self.empty_filtered_deck(did)
return
queue = f"""
queue = (case when queue < 0 then queue
when type in (1,{CARD_TYPE_RELEARNING}) then
(case when (case when odue then odue else due end) > 1000000000 then 1 else
{QUEUE_TYPE_DAY_LEARN_RELEARN} end)
else
type
end)
"""
self.col.db.execute(
f"""
update cards set did = odid, {queue},
due = (case when odue>0 then odue else due end), odue = 0, odid = 0, usn = ? where {lim}""",
self.col.usn(),
)
def remFromDyn(self, cids: list[CardId]) -> None:
self.emptyDyn(None, f"id in {ids2str(cids)} and odid")
# used by v2 scheduler and some add-ons
def update_stats(
self,
deck_id: DeckId,
new_delta: int = 0,
review_delta: int = 0,
milliseconds_delta: int = 0,
) -> None:
self.col._backend.update_stats(
deck_id=deck_id,
new_delta=new_delta,
review_delta=review_delta,
millisecond_delta=milliseconds_delta,
)
def _updateStats(self, card: Card, type: str, cnt: int = 1) -> None:
did = card.did
if type == "new":
self.update_stats(did, new_delta=cnt)
elif type == "rev":
self.update_stats(did, review_delta=cnt)
elif type == "time":
self.update_stats(did, milliseconds_delta=cnt)
def deckDueTree(self) -> list:
"List of (base name, did, rev, lrn, new, children)"
print(
"deckDueTree() is deprecated; use decks.deck_tree() for a tree without counts, or sched.deck_due_tree()"
)
return from_json_bytes(self.col._backend.deck_tree_legacy())[5]
@deprecated(info="no longer used by Anki; will be removed in the future")
def METHOD_NAME(self) -> int:
assert self.col.db
return self.col.db.scalar(
f"""
select count() from cards where id in (
select id from cards where did in %s and queue = {QUEUE_TYPE_REV} and due <= ? limit 9999)"""
% self._deck_limit(),
self.today,
)
# legacy in v3 but used by unit tests; redefined in v2/v1
def _cardConf(self, card: Card) -> DeckConfigDict:
return self.col.decks.config_dict_for_deck_id(card.did)
def _fuzzIvlRange(self, ivl: int) -> tuple[int, int]:
return (ivl, ivl)
# simple aliases
unsuspendCards = SchedulerBase.unsuspend_cards
buryCards = SchedulerBase.bury_cards
suspendCards = SchedulerBase.suspend_cards
forgetCards = SchedulerBase.schedule_cards_as_new |
5,989 | step | """
Brax env integration.
"""
import sys
from typing import Dict, List, Optional, Tuple, Union
import gymnasium as gym
import numpy as np
import torch
import torch.utils.dlpack as tpack
from gymnasium.core import RenderFrame
from torch import Tensor
from sample_factory.algo.utils.gymnasium_utils import convert_space
from sample_factory.cfg.arguments import parse_full_cfg, parse_sf_args
from sample_factory.envs.env_utils import register_env
from sample_factory.train import run_rl
from sample_factory.utils.typing import Config, Env
from sample_factory.utils.utils import log, str2bool
BRAX_EVALUATION = False
torch.ones(1, device="cuda") # init torch cuda before jax
def jax_to_torch(tensor):
# noinspection PyProtectedMember
from jax._src.dlpack import to_dlpack
tensor = to_dlpack(tensor)
tensor = tpack.from_dlpack(tensor)
return tensor
def torch_to_jax(tensor):
# noinspection PyProtectedMember
from jax._src.dlpack import from_dlpack
tensor = tpack.to_dlpack(tensor)
tensor = from_dlpack(tensor)
return tensor
class BraxEnv(gym.Env):
# noinspection PyProtectedMember
def __init__(
self,
brax_env,
num_actors,
render_mode: Optional[str],
render_res: int,
clamp_actions: bool,
clamp_rew_obs: bool,
):
self.env = brax_env
self.num_agents = num_actors
self.env.closed = False
self.env.viewer = None
self.renderer = None
self.render_mode = render_mode
self.brax_video_res_px = render_res
self.clamp_actions = clamp_actions
self.clamp_rew_obs = clamp_rew_obs
if len(self.env.observation_space.shape) > 1:
observation_size = self.env.observation_space.shape[1]
action_size = self.env.action_space.shape[1]
obs_high = np.inf * np.ones(observation_size)
self.observation_space = gym.spaces.Box(-obs_high, obs_high, dtype=np.float32)
action_high = np.ones(action_size)
self.action_space = gym.spaces.Box(-action_high, action_high, dtype=np.float32)
else:
self.observation_space = convert_space(self.env.observation_space)
self.action_space = convert_space(self.env.action_space)
def reset(self, *args, **kwargs) -> Tuple[Tensor, Dict]:
log.debug(f"Resetting env {self.env} with {self.num_agents} parallel agents...")
obs = self.env.reset()
obs = jax_to_torch(obs)
log.debug(f"reset() done, obs.shape={obs.shape}!")
return obs, {}
def METHOD_NAME(self, action):
action_clipped = action
if self.clamp_actions:
action_clipped = torch.clamp(action, -1, 1)
action_clipped = torch_to_jax(action_clipped)
next_obs, reward, terminated, info = self.env.METHOD_NAME(action_clipped)
next_obs = jax_to_torch(next_obs)
reward = jax_to_torch(reward)
terminated = jax_to_torch(terminated).to(torch.bool)
truncated = jax_to_torch(info["truncation"]).to(torch.bool)
if self.clamp_rew_obs:
reward = torch.clamp(reward, -100, 100)
next_obs = torch.clamp(next_obs, -100, 100)
return next_obs, reward, terminated, truncated, info
def render(self) -> Optional[Union[RenderFrame, List[RenderFrame]]]:
if self.renderer is None:
from sf_examples.brax.brax_render import BraxRenderer
self.renderer = BraxRenderer(self.env, self.render_mode, self.brax_video_res_px)
return self.renderer.render()
def make_brax_env(full_env_name: str, cfg: Config, _env_config=None, render_mode: Optional[str] = None) -> Env:
assert (
full_env_name in env_configs.keys()
), f"Env {full_env_name} is not supported. Supported envs: {list(env_configs.keys())}"
# use batch size 2 instead of 1 so we don't have to deal with vector-nonvector env issues
batch_size = 64 if BRAX_EVALUATION else cfg.env_agents
from brax import envs
gym_env = envs.create_gym_env(env_name=full_env_name, batch_size=batch_size, seed=0, backend="gpu")
env = BraxEnv(gym_env, batch_size, render_mode, cfg.brax_render_res, cfg.clamp_actions, cfg.clamp_rew_obs)
return env
def add_extra_params_func(parser) -> None:
"""
Specify any additional command line arguments for this family of custom environments.
"""
p = parser
p.add_argument(
"--env_agents",
default=2048,
type=int,
help="Num. agents in a vectorized env",
)
p.add_argument(
"--clamp_actions",
default=False,
type=str2bool,
help="Clamp actions to -1,1",
)
p.add_argument(
"--clamp_rew_obs",
default=False,
type=str2bool,
help="Clamp rewards and observations to -100,100",
)
p.add_argument(
"--brax_render_res",
default=200,
type=int,
help="Brax render resolution. Software renderer is very slow so use larger resolution only for offscreen "
"video generation, i.e. with push_to_hub",
)
def override_default_params_func(env, parser):
"""Most of these parameters are the same as IsaacGymEnvs default config files."""
parser.set_defaults(
# we're using a single very vectorized env, no need to parallelize it further
batched_sampling=True,
num_workers=1,
num_envs_per_worker=1,
worker_num_splits=1,
actor_worker_gpus=[0], # obviously need a GPU
train_for_env_steps=100000000,
use_rnn=False,
adaptive_stddev=False,
policy_initialization="torch_default",
env_gpu_actions=True,
reward_scale=0.01,
max_grad_norm=1.0,
rollout=32,
batch_size=32768,
num_batches_per_epoch=2,
num_epochs=5,
ppo_clip_ratio=0.2,
ppo_clip_value=1.0,
value_loss_coeff=2.0,
exploration_loss_coeff=0.0,
nonlinearity="elu",
encoder_mlp_layers=[256, 128, 64],
actor_critic_share_weights=True,
learning_rate=3e-4,
lr_schedule="kl_adaptive_epoch",
lr_schedule_kl_threshold=0.008,
lr_adaptive_max=2e-3,
shuffle_minibatches=False,
gamma=0.99,
gae_lambda=0.95,
with_vtrace=False,
value_bootstrap=True,
normalize_input=True,
normalize_returns=True,
save_best_after=int(5e6),
serial_mode=True,
async_rl=False,
experiment_summaries_interval=3, # experiments are short so we should save summaries often
# use_env_info_cache=True, # speeds up startup
)
# override default config parameters for specific envs
if env in env_configs:
parser.set_defaults(**env_configs[env])
# custom default configuration parameters for specific envs
# add more envs here analogously (env names should match config file names in IGE)
env_configs = dict(
ant=dict(
encoder_mlp_layers=[256, 128, 64],
save_every_sec=15,
),
humanoid=dict(
encoder_mlp_layers=[512, 256, 128],
),
halfcheetah=dict(
encoder_mlp_layers=[256, 128, 64],
),
walker2d=dict(
encoder_mlp_layers=[256, 128, 64],
),
)
def register_brax_custom_components(evaluation: bool = False) -> None:
global BRAX_EVALUATION
BRAX_EVALUATION = evaluation
for env_name in env_configs:
register_env(env_name, make_brax_env)
def parse_brax_cfg(evaluation=False):
parser, partial_cfg = parse_sf_args(evaluation=evaluation)
add_extra_params_func(parser)
override_default_params_func(partial_cfg.env, parser)
final_cfg = parse_full_cfg(parser)
return final_cfg
def main():
"""Script entry point."""
register_brax_custom_components()
cfg = parse_brax_cfg()
status = run_rl(cfg)
return status
if __name__ == "__main__":
sys.exit(main()) |
5,990 | main | #!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger(__name__)
model_dict = {"facebook/bart-base": BartForConditionalGeneration}
tokenizer_dict = {"facebook/bart-base": BartTokenizer}
def parse_args():
parser = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph.")
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--max_length",
type=int,
default=5,
help="The maximum total input sequence length after tokenization.",
)
parser.add_argument(
"--num_beams",
type=int,
default=None,
help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
parser.add_argument(
"--config_name",
type=str,
default=None,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--device",
type=str,
default="cpu",
help="Device where the model will be run",
)
parser.add_argument("--output_file_path", type=str, default=None, help="Where to store the final ONNX file.")
args = parser.parse_args()
return args
def load_model_tokenizer(model_name, device="cpu"):
huggingface_model = model_dict[model_name].from_pretrained(model_name).to(device)
tokenizer = tokenizer_dict[model_name].from_pretrained(model_name)
if model_name in ["facebook/bart-base"]:
huggingface_model.config.no_repeat_ngram_size = 0
huggingface_model.config.forced_bos_token_id = None
huggingface_model.config.min_length = 0
return huggingface_model, tokenizer
def export_and_validate_model(model, tokenizer, onnx_file_path, num_beams, max_length):
model.eval()
ort_sess = None
bart_script_model = torch.jit.script(BARTBeamSearchGenerator(model))
with torch.no_grad():
ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="pt").to(model.device)
summary_ids = model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
num_beams=num_beams,
max_length=max_length,
early_stopping=True,
decoder_start_token_id=model.config.decoder_start_token_id,
)
torch.onnx.export(
bart_script_model,
(
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
),
onnx_file_path,
opset_version=14,
input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"],
output_names=["output_ids"],
dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
},
example_outputs=summary_ids,
)
logger.info("Model exported to {}".format(onnx_file_path))
new_onnx_file_path = remove_dup_initializers(os.path.abspath(onnx_file_path))
logger.info("Deduplicated and optimized model written to {}".format(new_onnx_file_path))
ort_sess = onnxruntime.InferenceSession(new_onnx_file_path)
ort_out = ort_sess.run(
None,
{
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(num_beams),
"max_length": np.array(max_length),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id),
},
)
np.testing.assert_allclose(summary_ids.cpu().numpy(), ort_out[0], rtol=1e-3, atol=1e-3)
logger.info("Model outputs from torch and ONNX Runtime are similar.")
logger.info("Success.")
def METHOD_NAME():
args = parse_args()
max_length = 5
num_beams = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.setLevel(logging.INFO)
transformers.utils.logging.set_verbosity_error()
device = torch.device(args.device)
model, tokenizer = load_model_tokenizer(args.model_name_or_path, device)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
model.to(device)
if args.max_length:
max_length = args.max_length
if args.num_beams:
num_beams = args.num_beams
if args.output_file_path:
output_name = args.output_file_path
else:
output_name = "BART.onnx"
logger.info("Exporting model to ONNX")
export_and_validate_model(model, tokenizer, output_name, num_beams, max_length)
if __name__ == "__main__":
METHOD_NAME() |
5,991 | mock list open workflow executions | from __future__ import annotations
from datetime import datetime
from simpleflow.swf.mapper.constants import REGISTERED
from simpleflow.swf.mapper.models.workflow import CHILD_POLICIES, WorkflowExecution
from simpleflow.swf.mapper.utils import datetime_timestamp
def mock_list_workflow_types(*args, **kwargs):
override_data = kwargs.pop("override_data", {})
response = {
"typeInfos": [
{
"creationDate": datetime_timestamp(datetime.now()),
"deprecationDate": datetime_timestamp(datetime.now()),
"description": "mocked workflow type",
"status": REGISTERED,
"workflowType": {
"name": "mocked-workflow type",
"version": "0.1",
},
}
]
}
response.update(override_data)
return response
def mock_describe_workflow_type(*args, **kwargs):
override_data = kwargs.pop("override_data", {})
response = {
"configuration": {
"defaultChildPolicy": CHILD_POLICIES.TERMINATE,
"defaultExecutionStartToCloseTimeout": "300",
"defaultTaskList": {"name": "mocked-tasklist"},
"defaultTaskStartToCloseTimeout": "300",
},
"typeInfo": {
"creationDate": datetime_timestamp(datetime.now()),
"deprecationDate": datetime_timestamp(datetime.now()),
"description": "mocked-workflow-type",
"status": REGISTERED,
"workflowType": {"name": "mocked-workflow-type", "version": "0.1"},
},
}
response.update(override_data)
return response
def METHOD_NAME(*args, **kwargs):
override_data = kwargs.pop("override_data", {})
response = {
"executionInfos": [
{
"cancelRequested": False,
"closeStatus": "mocked",
"closeTimestamp": datetime_timestamp(datetime.now()),
"execution": {
"runId": "mocked-run-id",
"workflowId": "mocked-workflow-id",
},
"executionStatus": WorkflowExecution.STATUS_OPEN,
"parent": {
"runId": "mocked-parent-run-id",
"workflowId": "mocked-parent-workflow-id",
},
"startTimestamp": datetime_timestamp(datetime.now()),
"tagList": ["mocked-tag-1", "mocked-tag-2", "mocked-tag-3"],
"workflowType": {"name": "mocked-workflow-type", "version": "0.1"},
}
],
}
response.update(override_data)
return response
def mock_list_closed_workflow_executions(*args, **kwargs):
override_data = kwargs.pop("override_data", {})
response = {
"executionInfos": [
{
"cancelRequested": False,
"closeStatus": "mocked",
"closeTimestamp": datetime_timestamp(datetime.now()),
"execution": {
"runId": "mocked-run-id",
"workflowId": "mocked-workflow-id",
},
"executionStatus": WorkflowExecution.STATUS_CLOSED,
"parent": {
"runId": "mocked-parent-run-id",
"workflowId": "mocked-parent-workflow-id",
},
"startTimestamp": datetime_timestamp(datetime.now()),
"tagList": ["mocked-tag-1", "mocked-tag-2"],
"workflowType": {"name": "mocked-workflow-type", "version": "0.1"},
}
],
}
response.update(override_data)
return response
def mock_describe_workflow_execution(*args, **kwargs):
override_data = kwargs.pop("override_data", {})
response = {
"executionConfiguration": {
"childPolicy": CHILD_POLICIES.TERMINATE,
"executionStartToCloseTimeout": "300",
"taskList": {"name": "mocked-task-list"},
"taskStartToCloseTimeout": "300",
},
"executionInfo": {
"cancelRequested": False,
"closeStatus": WorkflowExecution.STATUS_CLOSED,
"closeTimestamp": datetime_timestamp(datetime.now()),
"execution": {"runId": "mocked-run-id", "workflowId": "mocked-workflow-id"},
"executionStatus": WorkflowExecution.STATUS_OPEN,
"parent": {"runId": "mocked-run-id", "workflowId": "mocked-workflow-id"},
"startTimestamp": datetime_timestamp(datetime.now()),
"tagList": ["mocked-tag-1"],
"workflowType": {"name": "mocked-workflow-type", "version": "0.1"},
},
"latestActivityTaskTimestamp": datetime_timestamp(datetime.now()),
"latestExecutionContext": "string",
"openCounts": {
"openActivityTasks": 12,
"openChildWorkflowExecutions": 3,
"openDecisionTasks": 4,
"openTimers": 5,
},
}
response.update(override_data)
return response |
5,992 | test default | # Copyright Cartopy Contributors
#
# This file is part of Cartopy and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Tests for the Albers Equal Area coordinate system.
"""
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import pytest
import cartopy.crs as ccrs
from .helpers import check_proj_params
class TestAlbersEqualArea:
def METHOD_NAME(self):
aea = ccrs.AlbersEqualArea()
other_args = {'ellps=WGS84', 'lon_0=0.0', 'lat_0=0.0', 'x_0=0.0',
'y_0=0.0', 'lat_1=20.0', 'lat_2=50.0'}
check_proj_params('aea', aea, other_args)
assert_almost_equal(np.array(aea.x_limits),
[-17702759.799178038, 17702759.799178038],
decimal=0)
assert_almost_equal(np.array(aea.y_limits),
[-4782937.05107294, 15922623.93176938],
decimal=4)
def test_eccentric_globe(self):
globe = ccrs.Globe(semimajor_axis=1000, semiminor_axis=500,
ellipse=None)
aea = ccrs.AlbersEqualArea(globe=globe)
other_args = {'a=1000', 'b=500', 'lon_0=0.0', 'lat_0=0.0', 'x_0=0.0',
'y_0=0.0', 'lat_1=20.0', 'lat_2=50.0'}
check_proj_params('aea', aea, other_args)
assert_almost_equal(np.array(aea.x_limits),
[-2323.47073363411, 2323.47073363411],
decimal=-2)
assert_almost_equal(np.array(aea.y_limits),
[-572.556243423972, 2402.36176984391],
decimal=10)
def test_eastings(self):
aea_offset = ccrs.AlbersEqualArea(false_easting=1234,
false_northing=-4321)
other_args = {'ellps=WGS84', 'lon_0=0.0', 'lat_0=0.0', 'x_0=1234',
'y_0=-4321', 'lat_1=20.0', 'lat_2=50.0'}
check_proj_params('aea', aea_offset, other_args)
@pytest.mark.parametrize('lon', [-10.0, 10.0])
def test_central_longitude(self, lon):
aea = ccrs.AlbersEqualArea()
aea_offset = ccrs.AlbersEqualArea(central_longitude=lon)
other_args = {'ellps=WGS84', f'lon_0={lon}', 'lat_0=0.0',
'x_0=0.0', 'y_0=0.0', 'lat_1=20.0', 'lat_2=50.0'}
check_proj_params('aea', aea_offset, other_args)
assert_array_almost_equal(
aea_offset.boundary.coords,
aea.boundary.coords,
decimal=0,
)
def test_standard_parallels(self):
aea = ccrs.AlbersEqualArea(standard_parallels=(13, 37))
other_args = {'ellps=WGS84', 'lon_0=0.0', 'lat_0=0.0', 'x_0=0.0',
'y_0=0.0', 'lat_1=13', 'lat_2=37'}
check_proj_params('aea', aea, other_args)
aea = ccrs.AlbersEqualArea(standard_parallels=(13, ))
other_args = {'ellps=WGS84', 'lon_0=0.0', 'lat_0=0.0', 'x_0=0.0',
'y_0=0.0', 'lat_1=13'}
check_proj_params('aea', aea, other_args)
aea = ccrs.AlbersEqualArea(standard_parallels=13)
other_args = {'ellps=WGS84', 'lon_0=0.0', 'lat_0=0.0', 'x_0=0.0',
'y_0=0.0', 'lat_1=13'}
check_proj_params('aea', aea, other_args)
def test_sphere_transform(self):
# USGS Professional Paper 1395, pg 291
globe = ccrs.Globe(semimajor_axis=1.0, semiminor_axis=1.0,
ellipse=None)
lat_1 = 29 + 30 / 60
lat_2 = 45 + 30 / 60
aea = ccrs.AlbersEqualArea(central_latitude=23.0,
central_longitude=-96.0,
standard_parallels=(lat_1, lat_2),
globe=globe)
geodetic = aea.as_geodetic()
other_args = {'a=1.0', 'b=1.0', 'lon_0=-96.0', 'lat_0=23.0', 'x_0=0.0',
'y_0=0.0', 'lat_1=29.5', 'lat_2=45.5'}
check_proj_params('aea', aea, other_args)
assert_almost_equal(np.array(aea.x_limits),
[-2.6525072042232, 2.6525072042232],
decimal=3)
assert_almost_equal(np.array(aea.y_limits),
[-1.09628087472359, 2.39834724057551],
decimal=10)
result = aea.transform_point(-75.0, 35.0, geodetic)
assert_almost_equal(result, [0.2952720, 0.2416774])
def test_ellipsoid_transform(self):
# USGS Professional Paper 1395, pp 292 -- 293
globe = ccrs.Globe(semimajor_axis=6378206.4,
flattening=1 - np.sqrt(1 - 0.00676866),
ellipse=None)
lat_1 = 29 + 30 / 60
lat_2 = 45 + 30 / 60
aea = ccrs.AlbersEqualArea(central_latitude=23.0,
central_longitude=-96.0,
standard_parallels=(lat_1, lat_2),
globe=globe)
geodetic = aea.as_geodetic()
other_args = {'a=6378206.4', 'f=0.003390076308689371', 'lon_0=-96.0',
'lat_0=23.0', 'x_0=0.0', 'y_0=0.0', 'lat_1=29.5',
'lat_2=45.5'}
check_proj_params('aea', aea, other_args)
assert_almost_equal(np.array(aea.x_limits),
[-16900972.674607, 16900972.674607],
decimal=-3)
assert_almost_equal(np.array(aea.y_limits),
[-6971893.11311231, 15298166.8919989],
decimal=1)
result = aea.transform_point(-75.0, 35.0, geodetic)
assert_almost_equal(result, [1885472.7, 1535925.0], decimal=1) |
5,993 | register | from tracardi.service.notation.dict_traverser import DictTraverser
from tracardi.service.plugin.domain.METHOD_NAME import Plugin, Spec, MetaData, Documentation, PortDoc, Form, FormGroup, \
FormField, FormComponent
from tracardi.service.plugin.domain.result import Result
from tracardi.service.plugin.runner import ActionRunner
from .model.config import Config
from tracardi.service.storage.driver.elastic import resource as resource_db
from tracardi.domain.resource import Resource
from tracardi.process_engine.action.v1.connectors.hubspot.client import HubSpotClient, HubSpotClientException
from datetime import datetime
def validate(config: dict) -> Config:
return Config(**config)
class HubSpotContactAdder(ActionRunner):
resource: Resource
config: Config
client: HubSpotClient
async def set_up(self, init):
config = validate(init)
resource = await resource_db.load(config.source.id)
self.config = config
self.resource = resource
self.client = HubSpotClient(**resource.credentials.get_credentials(self, None))
self.client.set_retries(self.node.on_connection_error_repeat)
def parse_mapping(self):
for key, value in self.config.properties.items():
if isinstance(value, list):
if key == "tags":
self.config.properties[key] = ",".join(value)
else:
self.config.properties[key] = "|".join(value)
elif isinstance(value, datetime):
self.config.properties[key] = str(value)
async def run(self, payload: dict, in_edge=None) -> Result:
dot = self._get_dot_accessor(payload)
traverser = DictTraverser(dot)
self.config.properties = traverser.reshape(self.config.properties)
self.parse_mapping()
try:
result = await self.client.add_contact(
self.config.properties
)
return Result(port="response", value=result)
except HubSpotClientException as e:
return Result(port="error", value={"message": str(e)})
def METHOD_NAME() -> Plugin:
return Plugin(
start=False,
spec=Spec(
module=__name__,
className='HubSpotContactAdder',
inputs=["payload"],
outputs=["response", "error"],
version='0.7.2',
license="MIT",
author="Marcin Gaca, Risto Kowaczewski, Ben Ullrich",
manual="hubspot/hubspot_add_contact_action",
init={
"source": {
"name": "",
"id": "",
},
"properties": {},
},
form=Form(
groups=[
FormGroup(
name="Plugin configuration",
fields=[
FormField(
id="source",
name="HubSpot resource",
description="Please select your HubSpot resource.",
component=FormComponent(type="resource", props={"label": "Resource", "tag": "hubspot"})
),
FormField(
id="properties",
name="Properties fields",
description="You must add some fields to your contact. Just type in the alias of "
"the field as key, and a path as a value for this field.",
component=FormComponent(type="keyValueList", props={"label": "Fields"})
),
]
)
]
)
),
metadata=MetaData(
name='Add contact',
brand="Hubspot",
desc='Adds a new contact to HubSpot based on provided data.',
icon='hubspot',
group=["Hubspot"],
documentation=Documentation(
inputs={
"payload": PortDoc(desc="This port takes payload object.")
},
outputs={
"response": PortDoc(desc="This port returns response from HubSpot API."),
"error": PortDoc(desc="This port gets triggered if an error occurs.")
}
)
)
) |
5,994 | test c void p arg | from ctypes import *
from ctypes.test import need_symbol
import unittest
# IMPORTANT INFO:
#
# Consider this call:
# func.restype = c_char_p
# func(c_char_p("123"))
# It returns
# "123"
#
# WHY IS THIS SO?
#
# argument tuple (c_char_p("123"), ) is destroyed after the function
# func is called, but NOT before the result is actually built.
#
# If the arglist would be destroyed BEFORE the result has been built,
# the c_char_p("123") object would already have a zero refcount,
# and the pointer passed to (and returned by) the function would
# probably point to deallocated space.
#
# In this case, there would have to be an additional reference to the argument...
import _ctypes_test
testdll = CDLL(_ctypes_test.__file__)
# Return machine address `a` as a (possibly long) non-negative integer.
# Starting with Python 2.5, id(anything) is always non-negative, and
# the ctypes addressof() inherits that via PyLong_FromVoidPtr().
def positive_address(a):
if a >= 0:
return a
# View the bits in `a` as unsigned instead.
import struct
num_bits = struct.calcsize("P") * 8 # num bits in native machine address
a += 1L << num_bits
assert a >= 0
return a
def c_wbuffer(init):
n = len(init) + 1
return (c_wchar * n)(*init)
class CharPointersTestCase(unittest.TestCase):
def setUp(self):
func = testdll._testfunc_p_p
func.restype = c_long
func.argtypes = None
def test_paramflags(self):
# function returns c_void_p result,
# and has a required parameter named 'input'
prototype = CFUNCTYPE(c_void_p, c_void_p)
func = prototype(("_testfunc_p_p", testdll),
((1, "input"),))
try:
func()
except TypeError, details:
self.assertEqual(str(details), "required argument 'input' missing")
else:
self.fail("TypeError not raised")
self.assertEqual(func(None), None)
self.assertEqual(func(input=None), None)
def test_int_pointer_arg(self):
func = testdll._testfunc_p_p
func.restype = c_long
self.assertEqual(0, func(0))
ci = c_int(0)
func.argtypes = POINTER(c_int),
self.assertEqual(positive_address(addressof(ci)),
positive_address(func(byref(ci))))
func.argtypes = c_char_p,
self.assertRaises(ArgumentError, func, byref(ci))
func.argtypes = POINTER(c_short),
self.assertRaises(ArgumentError, func, byref(ci))
func.argtypes = POINTER(c_double),
self.assertRaises(ArgumentError, func, byref(ci))
def test_POINTER_c_char_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = POINTER(c_char),
self.assertEqual(None, func(None))
self.assertEqual("123", func("123"))
self.assertEqual(None, func(c_char_p(None)))
self.assertEqual("123", func(c_char_p("123")))
self.assertEqual("123", func(c_buffer("123")))
ca = c_char("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
def test_c_char_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = c_char_p,
self.assertEqual(None, func(None))
self.assertEqual("123", func("123"))
self.assertEqual(None, func(c_char_p(None)))
self.assertEqual("123", func(c_char_p("123")))
self.assertEqual("123", func(c_buffer("123")))
ca = c_char("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
def METHOD_NAME(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = c_void_p,
self.assertEqual(None, func(None))
self.assertEqual("123", func("123"))
self.assertEqual("123", func(c_char_p("123")))
self.assertEqual(None, func(c_char_p(None)))
self.assertEqual("123", func(c_buffer("123")))
ca = c_char("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
func(byref(c_int()))
func(pointer(c_int()))
func((c_int * 3)())
@need_symbol('c_wchar_p')
def test_c_void_p_arg_with_c_wchar_p(self):
func = testdll._testfunc_p_p
func.restype = c_wchar_p
func.argtypes = c_void_p,
self.assertEqual(None, func(c_wchar_p(None)))
self.assertEqual(u"123", func(c_wchar_p(u"123")))
def test_instance(self):
func = testdll._testfunc_p_p
func.restype = c_void_p
class X:
_as_parameter_ = None
func.argtypes = c_void_p,
self.assertEqual(None, func(X()))
func.argtypes = None
self.assertEqual(None, func(X()))
@need_symbol('c_wchar')
class WCharPointersTestCase(unittest.TestCase):
def setUp(self):
func = testdll._testfunc_p_p
func.restype = c_int
func.argtypes = None
def test_POINTER_c_wchar_arg(self):
func = testdll._testfunc_p_p
func.restype = c_wchar_p
func.argtypes = POINTER(c_wchar),
self.assertEqual(None, func(None))
self.assertEqual(u"123", func(u"123"))
self.assertEqual(None, func(c_wchar_p(None)))
self.assertEqual(u"123", func(c_wchar_p(u"123")))
self.assertEqual(u"123", func(c_wbuffer(u"123")))
ca = c_wchar("a")
self.assertEqual(u"a", func(pointer(ca))[0])
self.assertEqual(u"a", func(byref(ca))[0])
def test_c_wchar_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_wchar_p
func.argtypes = c_wchar_p,
c_wchar_p.from_param(u"123")
self.assertEqual(None, func(None))
self.assertEqual("123", func(u"123"))
self.assertEqual(None, func(c_wchar_p(None)))
self.assertEqual("123", func(c_wchar_p("123")))
# XXX Currently, these raise TypeErrors, although they shouldn't:
self.assertEqual("123", func(c_wbuffer("123")))
ca = c_wchar("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
class ArrayTest(unittest.TestCase):
def test(self):
func = testdll._testfunc_ai8
func.restype = POINTER(c_int)
func.argtypes = c_int * 8,
func((c_int * 8)(1, 2, 3, 4, 5, 6, 7, 8))
# This did crash before:
def func(): pass
CFUNCTYPE(None, c_int * 3)(func)
################################################################
if __name__ == '__main__':
unittest.main() |
5,995 | fetch parents | ######################################################################################################################
# Copyright (C) 2017-2022 Spine project consortium
# This file is part of Spine Toolbox.
# Spine Toolbox is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option)
# any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General
# Public License for more details. You should have received a copy of the GNU Lesser General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
######################################################################################################################
"""
Contains :class:`MetadataTableModel` and associated functionality.
"""
from enum import IntEnum, unique
from PySide6.QtCore import QModelIndex, Qt
from spinetoolbox.helpers import rows_to_row_count_tuples
from spinetoolbox.fetch_parent import FlexibleFetchParent
from .metadata_table_model_base import Column, FLAGS_FIXED, FLAGS_EDITABLE, MetadataTableModelBase
@unique
class ExtraColumn(IntEnum):
"""Identifiers for hidden table columns."""
ID = Column.max() + 1
class MetadataTableModel(MetadataTableModelBase):
"""Model for metadata."""
_ITEM_NAME_KEY = "name"
_ITEM_VALUE_KEY = "value"
def __init__(self, db_mngr, db_maps, db_editor):
"""
Args:
db_mngr (SpineDBManager): database manager
db_maps (Iterable of DatabaseMappingBase): database maps
db_editor (SpineDBEditor): DB editor
"""
super().__init__(db_mngr, db_maps, db_editor)
self._metadata_fetch_parent = FlexibleFetchParent(
"metadata",
handle_items_added=self.add_metadata,
handle_items_removed=self.remove_metadata,
handle_items_updated=self.update_metadata,
owner=self,
)
@staticmethod
def _make_hidden_adder_columns():
"""See base class."""
return [None]
def _add_data_to_db_mngr(self, name, value, db_map):
"""See base class."""
self._db_mngr.add_metadata({db_map: [{"name": name, "value": value}]})
def _update_data_in_db_mngr(self, id_, name, value, db_map):
"""See base class"""
self._db_mngr.update_metadata({db_map: [{"id": id_, "name": name, "value": value}]})
def rollback(self, db_maps):
"""Rolls back changes in database.
Args:
db_maps (Iterable of DiffDatabaseMapping): database mappings that have been rolled back
"""
spans = rows_to_row_count_tuples(
i for db_map in db_maps for i, row in enumerate(self._data) if row[Column.DB_MAP] == db_map
)
for span in spans:
first = span[0]
last = span[0] + span[1] - 1
self.beginRemoveRows(QModelIndex(), first, last)
self._data = self._data[:first] + self._data[last + 1 :]
self.endRemoveRows()
if self.canFetchMore(QModelIndex()):
self.fetchMore(QModelIndex())
def _database_table_name(self):
"""See base class"""
return "metadata"
def _row_id(self, row):
"""See base class."""
return row[ExtraColumn.ID]
def flags(self, index):
row = index.row()
column = index.column()
if column == Column.DB_MAP and row < len(self._data) and self._data[row][ExtraColumn.ID] is not None:
return FLAGS_FIXED
return FLAGS_EDITABLE
def METHOD_NAME(self):
yield self._metadata_fetch_parent
@staticmethod
def _ids_from_added_item(item):
"""See base class."""
return item["id"]
@staticmethod
def _extra_cells_from_added_item(item):
"""See base class."""
return [item["id"]]
def _set_extra_columns(self, row, ids):
"""See base class."""
row[ExtraColumn.ID] = ids
def add_metadata(self, db_map_data):
"""Adds new metadata from database manager to the model.
Args:
db_map_data (dict): added metadata items keyed by database mapping
"""
self._add_data(db_map_data)
def update_metadata(self, db_map_data):
"""Updates model according to data received from database manager.
Args:
db_map_data (dict): updated metadata items keyed by database mapping
"""
for items in db_map_data.values():
items_by_id = {item["id"]: item for item in items}
updated_rows = []
for row_index, row in enumerate(self._data):
if row[ExtraColumn.ID] is None:
continue
db_item = items_by_id.get(row[ExtraColumn.ID])
if db_item is None:
continue
if row[Column.NAME] != db_item["name"]:
row[Column.NAME] = db_item["name"]
updated_rows.append(row_index)
if row[Column.VALUE] != db_item["value"]:
row[Column.VALUE] = db_item["value"]
updated_rows.append(row_index)
if updated_rows:
top_left = self.index(updated_rows[0], 0)
bottom_right = self.index(updated_rows[-1], Column.DB_MAP - 1)
self.dataChanged.emit(top_left, bottom_right, [Qt.ItemDataRole.DisplayRole])
def remove_metadata(self, db_map_data):
"""Removes metadata from model after it has been removed from databases.
Args:
db_map_data (dict): removed items keyed by database mapping
"""
self._remove_data(db_map_data, ExtraColumn.ID) |
5,996 | info | """
This type stub file was generated by pyright.
"""
"""
Logger implementation loosely modeled on PEP 282. We don't use the
PEP 282 logger implementation in the stdlib ('logging') because it's
idiosyncratic and a bit slow for our purposes (we don't use threads).
"""
class LevelsByName:
CRIT = ...
ERRO = ...
WARN = ...
INFO = ...
DEBG = ...
TRAC = ...
BLAT = ...
class LevelsByDescription:
critical = ...
error = ...
warn = ...
METHOD_NAME = ...
debug = ...
trace = ...
blather = ...
LOG_LEVELS_BY_NUM = ...
def getLevelNumByDescription(description): # -> Any | None:
...
class Handler:
fmt = ...
level = ...
def __init__(self, stream=...) -> None:
...
def setFormat(self, fmt): # -> None:
...
def setLevel(self, level): # -> None:
...
def flush(self): # -> None:
...
def close(self): # -> None:
...
def emit(self, record): # -> None:
...
def handleError(self): # -> None:
...
class StreamHandler(Handler):
def __init__(self, strm=...) -> None:
...
def remove(self): # -> None:
...
def reopen(self): # -> None:
...
class BoundIO:
def __init__(self, maxbytes, buf=...) -> None:
...
def flush(self): # -> None:
...
def close(self): # -> None:
...
def write(self, b): # -> None:
...
def getvalue(self): # -> Unknown | bytes:
...
def clear(self): # -> None:
...
class FileHandler(Handler):
"""File handler which supports reopening of logs.
"""
def __init__(self, filename, mode=...) -> None:
...
def reopen(self): # -> None:
...
def remove(self): # -> None:
...
class RotatingFileHandler(FileHandler):
def __init__(self, filename, mode=..., maxBytes=..., backupCount=...) -> None:
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
...
def emit(self, record): # -> None:
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
...
def removeAndRename(self, sfn, dfn): # -> None:
...
def doRollover(self): # -> None:
"""
Do a rollover, as described in __init__().
"""
...
class LogRecord:
def __init__(self, level, msg, **kw) -> None:
...
def asdict(self): # -> dict[str, str | Unknown]:
...
class Logger:
def __init__(self, level=..., handlers=...) -> None:
...
def close(self): # -> None:
...
def blather(self, msg, **kw): # -> None:
...
def trace(self, msg, **kw): # -> None:
...
def debug(self, msg, **kw): # -> None:
...
def METHOD_NAME(self, msg, **kw): # -> None:
...
def warn(self, msg, **kw): # -> None:
...
def error(self, msg, **kw): # -> None:
...
def critical(self, msg, **kw): # -> None:
...
def log(self, level, msg, **kw): # -> None:
...
def addHandler(self, hdlr): # -> None:
...
def getvalue(self):
...
class SyslogHandler(Handler):
def __init__(self) -> None:
...
def close(self): # -> None:
...
def reopen(self): # -> None:
...
def emit(self, record): # -> None:
...
def getLogger(level=...): # -> Logger:
...
_2MB = ...
def handle_boundIO(logger, fmt, maxbytes=...): # -> None:
"""Attach a new BoundIO handler to an existing Logger"""
...
def handle_stdout(logger, fmt): # -> None:
"""Attach a new StreamHandler with stdout handler to an existing Logger"""
...
def handle_syslog(logger, fmt): # -> None:
"""Attach a new Syslog handler to an existing Logger"""
...
def handle_file(logger, filename, fmt, rotating=..., maxbytes=..., backups=...): # -> None:
"""Attach a new file handler to an existing Logger. If the filename
is the magic name of 'syslog' then make it a syslog handler instead."""
... |
5,997 | get representative | # @license
# Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import copy
import six
class EquivalenceMap(object):
"""Union-find data structure"""
supports_readonly = True
def __init__(self, existing=None, _readonly=False):
"""Create a new empty union-find structure."""
if isinstance(existing, EquivalenceMap):
self._weights = existing._weights.copy()
self._parents = existing._parents.copy()
self._prev_next = existing._prev_next.copy()
self._min_values = existing._min_values.copy()
else:
self._weights = {}
self._parents = {}
self._prev_next = {}
self._min_values = {}
self._readonly = False
if existing is not None:
if isinstance(existing, dict):
existing = six.viewitems(existing)
for group in existing:
self.union(*group)
self._readonly = _readonly
def METHOD_NAME(self, obj):
"""Finds and returns the root of the set containing `obj`."""
if obj not in self._parents:
self._parents[obj] = obj
self._weights[obj] = 1
self._prev_next[obj] = [obj, obj]
self._min_values[obj] = obj
return obj
path = [obj]
root = self._parents[obj]
while root != path[-1]:
path.append(root)
root = self._parents[root]
# compress the path and return
for ancestor in path:
self._parents[ancestor] = root
return root
def __getitem__(self, obj):
"""Returns the minimum element in the set containing `obj`."""
if obj not in self._parents:
return obj
return self._min_values[self.METHOD_NAME(obj)]
def __iter__(self):
"""Iterates over all elements known to this equivalence map."""
return iter(self._parents)
def items(self):
return six.viewitems(self._parents)
def keys(self):
return six.viewkeys(self._parents)
def clear(self):
self._weights.clear()
self._parents.clear()
self._prev_next.clear()
self._min_values.clear()
def union(self, *args):
"""Unions the equivalence classes containing the elements in `*args`."""
if self._readonly:
raise AttributeError
if len(args) == 0:
return None
if len(args) == 1:
return self[args[0]]
for a, b in zip(args[:-1], args[1:]):
result = self._union_pair(a, b)
return result
def _union_pair(self, a, b):
a = self.METHOD_NAME(a)
b = self.METHOD_NAME(b)
if a == b:
return self._min_values[a]
if self._weights[a] < self._weights[b]:
a, b = (b, a)
self._min_values[a] = min(self._min_values[a], self._min_values[b])
a_links_new = self._prev_next[a]
a_links_old = tuple(a_links_new)
b_links_new = self._prev_next[b]
b_links_old = tuple(b_links_new)
# We want to splice b's list at the end of a's list
# Splice beginning of b's list into end of a's list
b_links_new[0] = a_links_old[0]
self._prev_next[a_links_old[0]][1] = b
# Splice end of b's list into end of a's list
# last element of a's list is set to last element of b's list
a_links_new[0] = b_links_old[0]
# fix next pointer for last element of b's list
self._prev_next[b_links_old[0]][1] = a
self._weights[a] += self._weights[b]
self._parents[b] = a
return self._min_values[a]
def members(self, x):
"""Yields the members of the equivalence class containing `x`."""
if x not in self._parents:
yield x
return
cur_x = x
while True:
yield cur_x
cur_x = self._prev_next[cur_x][1]
if cur_x == x:
break
def sets(self):
"""Returns the equivalence classes as a set of sets."""
sets = {}
for x in self._parents:
sets.setdefault(self[x], set()).add(x)
return frozenset(frozenset(v) for v in six.viewvalues(sets))
def to_json(self):
"""Returns the equivalence classes a sorted list of sorted lists."""
sets = self.sets()
return sorted(sorted(x) for x in sets)
def __copy__(self):
"""Does not preserve _readonly attribute."""
return EquivalenceMap(self)
def __deepcopy__(self, memo):
"""Does not preserve _readonly attribute."""
result = EquivalenceMap()
result._parents = copy.deepcopy(self._parents, memo)
result._weights = copy.deepcopy(self._weights, memo)
result._prev_next = copy.deepcopy(self._prev_next, memo)
result._min_values = copy.deepcopy(self._min_values, memo)
return result
def copy(self):
"""Returns a copy of the equivalence map."""
return EquivalenceMap(self)
def delete_set(self, x):
"""Removes the equivalence class containing `x`."""
if x not in self._parents:
return
members = list(self.members(x))
for v in members:
del self._parents[v]
del self._weights[v]
del self._prev_next[v]
del self._min_values[v]
def isolate_element(self, x):
"""Isolates `x` from its equivalence class."""
members = list(self.members(x))
self.delete_set(x)
self.union(*(v for v in members if v != x)) |
5,998 | fix keywords | #!/usr/bin/python2
"""
Translate from PEP8 Python style to Mininet (i.e. Arista-like)
Python style
usage: unpep8 < old.py > new.py
- Reinstates CapWords for methods and instance variables
- Gets rid of triple single quotes
- Eliminates triple quotes on single lines
- Inserts extra spaces to improve readability
- Fixes Doxygen (or doxypy) ugliness
Does the following translations:
ClassName.method_name(foo = bar) -> ClassName.methodName( foo=bar )
Triple-single-quotes -> triple-double-quotes
@param foo description -> foo: description
@return description -> returns: description
@author me -> author: me
@todo(me) -> TODO(me)
Bugs/Limitations:
- Hack to restore strings is ugly
- Multiline strings get mangled
- Comments are mangled (which is arguably the "right thing" to do, except
that, for example, the left hand sides of the above would get translated!)
- Doesn't eliminate unnecessary backslashes
- Has no opinion on tab size
- complicated indented docstrings get flattened
- We don't (yet) have a filter to generate Doxygen/Doxypy
- Currently leaves indents on blank comment lines
- May lead to namespace collisions (e.g. some_thing and someThing)
Bob Lantz, rlantz@cs.stanford.edu
1/24/2010
"""
from __future__ import print_function
import re, sys
def fixUnderscoreTriplet( match ):
"Translate a matched triplet of the form a_b to aB."
triplet = match.group()
return triplet[ :-2 ] + triplet[ -1 ].capitalize()
def reinstateCapWords( text ):
underscoreTriplet = re.compile( r'[A-Za-z0-9]_[A-Za-z0-9]' )
return underscoreTriplet.sub( fixUnderscoreTriplet, text )
def replaceTripleApostrophes( text ):
"Replace triple apostrophes with triple quotes."
return text.replace( "'''", '"""')
def simplifyTripleQuotes( text ):
"Fix single-line doc strings."
r = re.compile( r'"""([^\"\n]+)"""' )
return r.sub( r'"\1"', text )
def insertExtraSpaces( text ):
"Insert extra spaces inside of parentheses and brackets/curly braces."
lparen = re.compile( r'\((?![\s\)])' )
text = lparen.sub( r'( ', text )
rparen = re.compile( r'([^\s\(])(?=\))' )
text = rparen.sub( r'\1 ', text)
# brackets
lbrack = re.compile( r'\[(?![\s\]])' )
text = lbrack.sub( r'[ ', text )
rbrack = re.compile( r'([^\s\[])(?=\])' )
text = rbrack.sub( r'\1 ', text)
# curly braces
lcurly = re.compile( r'\{(?![\s\}])' )
text = lcurly.sub( r'{ ', text )
rcurly = re.compile( r'([^\s\{])(?=\})' )
text = rcurly.sub( r'\1 ', text)
return text
def fixDoxygen( text ):
"""Translate @param foo to foo:, @return bar to returns: bar, and
@author me to author: me"""
param = re.compile( r'@param (\w+)' )
text = param.sub( r'\1:', text )
returns = re.compile( r'@return' )
text = returns.sub( r'returns:', text )
author = re.compile( r'@author' )
text = author.sub( r'author:', text)
# @todo -> TODO
text = text.replace( '@todo', 'TODO' )
return text
def removeCommentFirstBlankLine( text ):
"Remove annoying blank lines after first line in comments."
line = re.compile( r'("""[^\n]*\n)\s*\n', re.MULTILINE )
return line.sub( r'\1', text )
def fixArgs( match, kwarg = re.compile( r'(\w+) = ' ) ):
"Replace foo = bar with foo=bar."
return kwarg.sub( r'\1=', match.group() )
def METHOD_NAME( text ):
"Change keyword argumentsfrom foo = bar to foo=bar."
args = re.compile( r'\(([^\)]+)\)', re.MULTILINE )
return args.sub( fixArgs, text )
# Unfortunately, Python doesn't natively support balanced or recursive
# regular expressions. We could use PyParsing, but that opens another can
# of worms. For now, we just have a cheap hack to restore strings,
# so we don't end up accidentally mangling things like messages, search strings,
# and regular expressions.
def lineIter( text ):
"Simple iterator over lines in text."
for line in text.splitlines(): yield line
def stringIter( strList ):
"Yield strings in strList."
for s in strList: yield s
def restoreRegex( regex, old, new ):
"Find regexes in old and restore them into new."
oldStrs = regex.findall( old )
# Sanity check - count should be the same!
newStrs = regex.findall( new )
assert len( oldStrs ) == len( newStrs )
# Replace newStrs with oldStrs
siter = stringIter( oldStrs )
reps = lambda dummy: siter.next()
return regex.sub( reps, new )
# This is a cheap hack, and it may not work 100%, since
# it doesn't handle multiline strings.
# However, it should be mostly harmless...
def restoreStrings( oldText, newText ):
"Restore strings from oldText into newText, returning result."
oldLines, newLines = lineIter( oldText ), lineIter( newText )
quoteStrings = re.compile( r'("[^"]*")' )
tickStrings = re.compile( r"('[^']*')" )
result = ''
# It would be nice if we could blast the whole file, but for
# now it seems to work line-by-line
for newLine in newLines:
oldLine = oldLines.next()
newLine = restoreRegex( quoteStrings, oldLine, newLine )
newLine = restoreRegex( tickStrings, oldLine, newLine )
result += newLine + '\n'
return result
# This might be slightly controversial, since it uses
# three spaces to line up multiline comments. However,
# I much prefer it. Limitations: if you have deeper
# indents in comments, they will be eliminated. ;-(
def fixComment( match,
indentExp=re.compile( r'\n([ ]*)(?=[^/s])', re.MULTILINE ),
trailingQuotes=re.compile( r'\s+"""' ) ):
"Re-indent comment, and join trailing quotes."
originalIndent = match.group( 1 )
comment = match.group( 2 )
indent = '\n' + originalIndent
# Exception: leave unindented things unindented!
if len( originalIndent ) is not 0: indent += ' '
comment = indentExp.sub( indent, comment )
return originalIndent + trailingQuotes.sub( '"""', comment )
def fixCommentIndents( text ):
"Fix multiline comment indentation."
comments = re.compile( r'^([ ]*)("""[^"]*""")$', re.MULTILINE )
return comments.sub( fixComment, text )
def removeBogusLinefeeds( text ):
"Remove extra linefeeds at the end of single-line comments."
bogusLfs = re.compile( r'"([^"\n]*)\n"', re.MULTILINE )
return bogusLfs.sub( '"\1"', text)
def convertFromPep8( program ):
oldProgram = program
# Program text transforms
program = reinstateCapWords( program )
program = METHOD_NAME( program )
program = insertExtraSpaces( program )
# Undo string damage
program = restoreStrings( oldProgram, program )
# Docstring transforms
program = replaceTripleApostrophes( program )
program = simplifyTripleQuotes( program )
program = fixDoxygen( program )
program = fixCommentIndents( program )
program = removeBogusLinefeeds( program )
# Destructive transforms (these can delete lines)
program = removeCommentFirstBlankLine( program )
return program
if __name__ == '__main__':
print( convertFromPep8( sys.stdin.read() ) ) |
5,999 | test zero | #!/usr/bin/env py.test
# -*- coding: utf-8 -*-
__authors__ = "Martin Sandve Alnæs"
__date__ = "2009-02-13 -- 2009-02-13"
import pytest
import math
from ufl import *
from ufl.constantvalue import as_ufl
def testScalars():
s = as_ufl(123)
e = s((5, 7))
v = 123
assert e == v
def METHOD_NAME():
s = as_ufl(0)
e = s((5, 7))
v = 0
assert e == v
def testIdentity():
cell = triangle
I = Identity(cell.geometric_dimension())
s = 123 * I[0, 0]
e = s((5, 7))
v = 123
assert e == v
s = 123 * I[1, 0]
e = s((5, 7))
v = 0
assert e == v
def testCoords():
cell = triangle
x = SpatialCoordinate(cell)
s = x[0] + x[1]
e = s((5, 7))
v = 5 + 7
assert e == v
def testFunction1():
cell = triangle
element = FiniteElement("CG", cell, 1)
f = Coefficient(element)
s = 3 * f
e = s((5, 7), {f: 123})
v = 3 * 123
assert e == v
def testFunction2():
cell = triangle
element = FiniteElement("CG", cell, 1)
f = Coefficient(element)
def g(x):
return x[0]
s = 3 * f
e = s((5, 7), {f: g})
v = 3 * 5
assert e == v
def testArgument2():
cell = triangle
element = FiniteElement("CG", cell, 1)
f = Argument(element, 2)
def g(x):
return x[0]
s = 3 * f
e = s((5, 7), {f: g})
v = 3 * 5
assert e == v
def testAlgebra():
cell = triangle
x = SpatialCoordinate(cell)
s = 3 * (x[0] + x[1]) - 7 + x[0] ** (x[1] / 2)
e = s((5, 7))
v = 3 * (5. + 7.) - 7 + 5. ** (7. / 2)
assert e == v
def testIndexSum():
cell = triangle
x = SpatialCoordinate(cell)
i, = indices(1)
s = x[i] * x[i]
e = s((5, 7))
v = 5 ** 2 + 7 ** 2
assert e == v
def testIndexSum2():
cell = triangle
x = SpatialCoordinate(cell)
I = Identity(cell.geometric_dimension())
i, j = indices(2)
s = (x[i] * x[j]) * I[i, j]
e = s((5, 7))
# v = sum_i sum_j x_i x_j delta_ij = x_0 x_0 + x_1 x_1
v = 5 ** 2 + 7 ** 2
assert e == v
def testMathFunctions():
x = SpatialCoordinate(triangle)[0]
s = sin(x)
e = s((5, 7))
v = math.sin(5)
assert e == v
s = cos(x)
e = s((5, 7))
v = math.cos(5)
assert e == v
s = tan(x)
e = s((5, 7))
v = math.tan(5)
assert e == v
s = ln(x)
e = s((5, 7))
v = math.log(5)
assert e == v
s = exp(x)
e = s((5, 7))
v = math.exp(5)
assert e == v
s = sqrt(x)
e = s((5, 7))
v = math.sqrt(5)
assert e == v
def testListTensor():
x, y = SpatialCoordinate(triangle)
m = as_matrix([[x, y], [-y, -x]])
s = m[0, 0] + m[1, 0] + m[0, 1] + m[1, 1]
e = s((5, 7))
v = 0
assert e == v
s = m[0, 0] * m[1, 0] * m[0, 1] * m[1, 1]
e = s((5, 7))
v = 5 ** 2 * 7 ** 2
assert e == v
def testComponentTensor1():
x = SpatialCoordinate(triangle)
m = as_vector(x[i], i)
s = m[0] * m[1]
e = s((5, 7))
v = 5 * 7
assert e == v
def testComponentTensor2():
x = SpatialCoordinate(triangle)
xx = outer(x, x)
m = as_matrix(xx[i, j], (i, j))
s = m[0, 0] + m[1, 0] + m[0, 1] + m[1, 1]
e = s((5, 7))
v = 5 * 5 + 5 * 7 + 5 * 7 + 7 * 7
assert e == v
def testComponentTensor3():
x = SpatialCoordinate(triangle)
xx = outer(x, x)
m = as_matrix(xx[i, j], (i, j))
s = m[0, 0] * m[1, 0] * m[0, 1] * m[1, 1]
e = s((5, 7))
v = 5 * 5 * 5 * 7 * 5 * 7 * 7 * 7
assert e == v
def testCoefficient():
V = FiniteElement("CG", triangle, 1)
f = Coefficient(V)
e = f ** 2
def eval_f(x):
return x[0] * x[1] ** 2
assert e((3, 7), {f: eval_f}) == (3 * 7 ** 2) ** 2
def testCoefficientDerivative():
V = FiniteElement("CG", triangle, 1)
f = Coefficient(V)
e = f.dx(0) ** 2 + f.dx(1) ** 2
def eval_f(x, derivatives):
if not derivatives:
return eval_f.c * x[0] * x[1] ** 2
# assume only first order derivative
d, = derivatives
if d == 0:
return eval_f.c * x[1] ** 2
if d == 1:
return eval_f.c * x[0] * 2 * x[1]
# shows how to attach data to eval_f
eval_f.c = 5
assert e((3, 7), {f: eval_f}) == (5 * 7 ** 2) ** 2 + (5 * 3 * 2 * 7) ** 2
def test_dot():
x = SpatialCoordinate(triangle)
s = dot(x, 2 * x)
e = s((5, 7))
v = 2 * (5 * 5 + 7 * 7)
assert e == v
def test_inner():
x = SpatialCoordinate(triangle)
xx = as_matrix(((2 * x[0], 3 * x[0]), (2 * x[1], 3 * x[1])))
s = inner(xx, 2 * xx)
e = s((5, 7))
v = 2 * ((5 * 2) ** 2 + (5 * 3) ** 2 + (7 * 2) ** 2 + (7 * 3) ** 2)
assert e == v
def test_outer():
x = SpatialCoordinate(triangle)
xx = outer(outer(x, x), as_vector((2, 3)))
s = inner(xx, 2 * xx)
e = s((5, 7))
v = 2 * (5 ** 2 + 7 ** 2) ** 2 * (2 ** 2 + 3 ** 2)
assert e == v
def test_cross():
x = SpatialCoordinate(tetrahedron)
xv = (3, 5, 7)
# Test cross product of triplets of orthogonal
# vectors, where |a x b| = |a| |b|
ts = [
[as_vector((x[0], 0, 0)),
as_vector((0, x[1], 0)),
as_vector((0, 0, x[2]))],
[as_vector((x[0], x[1], 0)),
as_vector((x[1], -x[0], 0)),
as_vector((0, 0, x[2]))],
[as_vector((0, x[0], x[1])),
as_vector((0, x[1], -x[0])),
as_vector((x[2], 0, 0))],
[as_vector((x[0], 0, x[1])),
as_vector((x[1], 0, -x[0])),
as_vector((0, x[2], 0))],
]
for t in ts:
for i in range(3):
for j in range(3):
cij = cross(t[i], t[j])
dij = dot(cij, cij)
eij = dij(xv)
tni = dot(t[i], t[i])(xv)
tnj = dot(t[j], t[j])(xv)
vij = tni * tnj if i != j else 0
assert eij == vij
def xtest_dev():
x = SpatialCoordinate(triangle)
xv = (5, 7)
xx = outer(x, x)
s1 = dev(2 * xx)
s2 = 2 * (xx - xx.T) # FIXME
e = inner(s1, s1)(xv)
v = inner(s2, s2)(xv)
assert e == v
def test_skew():
x = SpatialCoordinate(triangle)
xv = (5, 7)
xx = outer(x, x)
s1 = skew(2 * xx)
s2 = (xx - xx.T)
e = inner(s1, s1)(xv)
v = inner(s2, s2)(xv)
assert e == v
def test_sym():
x = SpatialCoordinate(triangle)
xv = (5, 7)
xx = outer(x, x)
s1 = sym(2 * xx)
s2 = (xx + xx.T)
e = inner(s1, s1)(xv)
v = inner(s2, s2)(xv)
assert e == v
def test_tr():
x = SpatialCoordinate(triangle)
xv = (5, 7)
xx = outer(x, x)
s = tr(2 * xx)
e = s(xv)
v = 2 * sum(xv[i] ** 2 for i in (0, 1))
assert e == v
def test_det2D():
x = SpatialCoordinate(triangle)
xv = (5, 7)
a, b = 6.5, -4
xx = as_matrix(((x[0], x[1]), (a, b)))
s = det(2 * xx)
e = s(xv)
v = 2 ** 2 * (5 * b - 7 * a)
assert e == v
def xtest_det3D(): # FIXME
x = SpatialCoordinate(tetrahedron)
xv = (5, 7, 9)
a, b, c = 6.5, -4, 3
d, e, f = 2, 3, 4
xx = as_matrix(((x[0], x[1], x[2]),
(a, b, c),
(d, e, f)))
s = det(2 * xx)
e = s(xv)
v = 2 ** 3 * \
(xv[0] * (b * f - e * c) - xv[1] *
(a * f - c * d) + xv[2] * (a * e - b * d))
assert e == v
def test_cofac():
pass # TODO
def test_inv():
pass # TODO |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.