id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
1,700 | test popen worker recycles with initializer | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test PopenPoolExecutor."""
import pytest
import os
import psutil
import time
from tvm.contrib.popen_pool import PopenWorker, PopenPoolExecutor
from tvm.testing import (
identity_after,
terminate_self,
initializer,
after_initializer,
register_ffi,
call_py_ffi,
call_cpp_ffi,
call_cpp_py_ffi,
fast_summation,
slow_summation,
timeout_job,
)
def test_popen_worker():
proc = PopenWorker()
with pytest.raises(TimeoutError):
proc.send(identity_after, [1, 100], timeout=0.01)
proc.recv()
with pytest.raises(ChildProcessError):
proc.send(terminate_self)
proc.recv()
proc.send(identity_after, [2, 0])
assert proc.recv() == 2
proc.send(identity_after, [4, 0.0001])
assert proc.recv() == 4
def test_popen_worker_reuses():
proc = PopenWorker(maximum_uses=None)
proc.send(os.getpid)
initial_pid = proc.recv()
proc.send(os.getpid)
assert proc.recv() == initial_pid
def test_popen_worker_recycles():
proc = PopenWorker(maximum_uses=2)
proc.send(os.getpid)
initial_pid = proc.recv()
assert psutil.pid_exists(initial_pid)
proc.send(os.getpid)
assert proc.recv() == initial_pid
assert psutil.pid_exists(initial_pid)
proc.send(os.getpid)
assert proc.recv() != initial_pid
assert not psutil.pid_exists(initial_pid)
def test_popen_pool_executor():
import tvm
pool = PopenPoolExecutor(max_workers=2, timeout=0.01)
value1 = pool.submit(identity_after, 1, 100)
value2 = pool.submit(terminate_self)
value3 = pool.submit(identity_after, 3, 0)
value4 = pool.submit(tvm.runtime.String, "xyz")
with pytest.raises(TimeoutError):
value1.result()
with pytest.raises(ChildProcessError):
value2.result()
assert value3.result() == 3
value = value4.result()
assert isinstance(value, tvm.runtime.String)
assert value == "xyz"
pool = PopenPoolExecutor(max_workers=4, timeout=None)
values = pool.map_with_error_catching(lambda x: x, range(100))
for idx, val in enumerate(values):
assert val.value == idx
def test_popen_initializer():
initargs = [1, 2, 3]
proc = PopenWorker(initializer=initializer, initargs=initargs)
proc.send(after_initializer)
test_global_state_1, test_global_state_2, test_global_state_3 = proc.recv()
assert test_global_state_1 == initargs[0]
assert test_global_state_2 == initargs[1]
assert test_global_state_3 == initargs[2]
def METHOD_NAME():
initargs = [1, 2, 3]
proc = PopenWorker(initializer=initializer, initargs=initargs, maximum_uses=3)
proc.send(os.getpid)
initial_pid = proc.recv()
proc.send(after_initializer)
assert list(proc.recv()) == initargs
proc.send(os.getpid)
assert proc.recv() == initial_pid
# The process should be recycled with this send.
proc.send(os.getpid)
assert proc.recv() != initial_pid
# But the initializer should've run this time as well.
proc.send(after_initializer)
assert list(proc.recv()) == initargs
def test_popen_ffi():
proc = PopenWorker(register_ffi)
# call python function via ffi
initargs = [0]
proc.send(call_py_ffi, initargs)
assert proc.recv() == initargs[0]
# call cpp function via ffi
initargs = [1]
proc.send(call_cpp_ffi, initargs)
assert proc.recv() == initargs[0]
# call python function from cpp function via ffi
initargs = [2]
proc.send(call_cpp_py_ffi, initargs)
assert proc.recv() == initargs[0]
def test_popen_pool_executor_timeout():
timeout = 0.5
pool = PopenPoolExecutor(timeout=timeout)
f1 = pool.submit(timeout_job, timeout)
while not f1.done():
pass
try:
res = f1.result()
except Exception as ex:
assert isinstance(ex, TimeoutError)
def test_popen_pool_executor_recycles():
pool = PopenPoolExecutor(max_workers=1, timeout=None, maximum_process_uses=2)
initial_pid = pool.submit(os.getpid).result()
assert initial_pid == pool.submit(os.getpid).result()
assert initial_pid != pool.submit(os.getpid).result()
if __name__ == "__main__":
test_popen_worker()
test_popen_worker_recycles()
test_popen_pool_executor()
test_popen_initializer()
METHOD_NAME()
test_popen_ffi()
test_popen_pool_executor_timeout()
test_popen_pool_executor_recycles() |
1,701 | heartbeat | # Copyright © Michal Čihař <michal@weblate.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
import gzip
import os
import shutil
import subprocess
import sys
import time
from importlib import import_module
from shutil import copyfile
from celery.schedules import crontab
from django.conf import settings
from django.core.cache import cache
from django.core.management.commands import diffsettings
from ruamel.yaml import YAML
import weblate.utils.version
from weblate.formats.models import FILE_FORMATS
from weblate.machinery.models import MACHINERY
from weblate.trans.util import get_clean_env
from weblate.utils.backup import backup_lock
from weblate.utils.celery import app
from weblate.utils.data import data_dir
from weblate.utils.db import using_postgresql
from weblate.utils.errors import add_breadcrumb, report_error
from weblate.utils.lock import WeblateLockTimeoutError
from weblate.vcs.models import VCS_REGISTRY
@app.task(trail=False)
def ping():
return {
"version": weblate.utils.version.GIT_VERSION,
"vcs": sorted(VCS_REGISTRY.keys()),
"formats": sorted(FILE_FORMATS.keys()),
"mt_services": sorted(MACHINERY.keys()),
"encoding": [sys.getfilesystemencoding(), sys.getdefaultencoding()],
"uid": os.getuid(),
}
@app.task(trail=False)
def METHOD_NAME():
cache.set("celery_loaded", time.monotonic())
cache.set("celery_heartbeat", time.monotonic())
cache.set(
"celery_encoding", [sys.getfilesystemencoding(), sys.getdefaultencoding()]
)
@app.task(trail=False, autoretry_for=(WeblateLockTimeoutError,))
def settings_backup():
with backup_lock():
# Expand settings in case it contains non-trivial code
command = diffsettings.Command()
kwargs = {"default": None, "all": False, "output": "hash"}
with open(data_dir("backups", "settings-expanded.py"), "w") as handle:
handle.write(command.handle(**kwargs))
# Backup original settings
if settings.SETTINGS_MODULE:
settings_mod = import_module(settings.SETTINGS_MODULE)
copyfile(settings_mod.__file__, data_dir("backups", "settings.py"))
# Backup environment (to make restoring Docker easier)
with open(data_dir("backups", "environment.yml"), "w") as handle:
yaml = YAML()
yaml.dump(dict(os.environ), handle)
@app.task(trail=False, autoretry_for=(WeblateLockTimeoutError,))
def database_backup():
if settings.DATABASE_BACKUP == "none":
return
with backup_lock():
database = settings.DATABASES["default"]
env = get_clean_env()
compress = settings.DATABASE_BACKUP == "compressed"
out_compressed = data_dir("backups", "database.sql.gz")
out_text = data_dir("backups", "database.sql")
if using_postgresql():
cmd = ["pg_dump", "--dbname", database["NAME"]]
if database["HOST"]:
cmd.extend(["--host", database["HOST"]])
if database["PORT"]:
cmd.extend(["--port", database["PORT"]])
if database["USER"]:
cmd.extend(["--username", database["USER"]])
if settings.DATABASE_BACKUP == "compressed":
cmd.extend(["--file", out_compressed])
cmd.extend(["--compress", "6"])
compress = False
else:
cmd.extend(["--file", out_text])
env["PGPASSWORD"] = database["PASSWORD"]
else:
cmd = [
"mysqldump",
"--result-file",
out_text,
"--single-transaction",
"--skip-lock-tables",
]
if database["HOST"]:
cmd.extend(["--host", database["HOST"]])
if database["PORT"]:
cmd.extend(["--port", database["PORT"]])
if database["USER"]:
cmd.extend(["--user", database["USER"]])
cmd.extend(["--databases", database["NAME"]])
env["MYSQL_PWD"] = database["PASSWORD"]
try:
subprocess.run(
cmd,
env=env,
capture_output=True,
stdin=subprocess.DEVNULL,
check=True,
text=True,
)
except subprocess.CalledProcessError as error:
add_breadcrumb(
category="backup",
message="database dump output",
stdout=error.stdout,
stderr=error.stderr,
)
report_error()
raise
if compress:
with open(out_text, "rb") as f_in, gzip.open(out_compressed, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
os.unlink(out_text)
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
cache.set("celery_loaded", time.monotonic())
sender.add_periodic_task(
crontab(hour=1, minute=0), settings_backup.s(), name="settings-backup"
)
sender.add_periodic_task(
crontab(hour=1, minute=30), database_backup.s(), name="database-backup"
)
sender.add_periodic_task(60, METHOD_NAME.s(), name="heartbeat") |
1,702 | assert io | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2014 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Integration test testing distributed processing via
ServiceFactoryI.acquireProcessor().
"""
from builtins import str
from omero.testlib import ITest
import pytest
import os
import omero
import omero.clients
import omero.model
import omero.api
import uuid
from omero.util.temp_files import create_path, remove_path
from omero.rtypes import rlong, rstring, rmap
from omero.scripts import wait
PINGFILE = """
#!/usr/bin/env python
import os
import omero, omero.scripts as s
import uuid
#
# Unique name so that IScript does not reject us
# based on duplicate file names.
#
uuid = str(uuid.uuid4())
print("I am the script named %s" % uuid)
#
# Creation
#
client = s.client(uuid, "simple ping script",
s.Long("a", optional=True).inout(), s.String("b", optional=True).inout())
print("Session", client.getSession())
#
# Various diagnostics
#
import sys
from pprint import pprint
print("PATH:")
pprint(sys.path)
print("CONFIG")
f = open("config","r")
print("".join(f.readlines()))
f.close()
from omero.rtypes import *
import Ice
ic = Ice.initialize(["--Ice.Plugin.IceSSL=IceSSL:createIceSSL"])
print(ic.getProperties().getPropertiesForPrefix("Ice"))
print(ic.getProperties().getPropertiesForPrefix("omero"))
#
# Echo'ing input to output
#
keys = client.getInputKeys()
print("Keys found:")
print(keys)
for key in keys:
client.setOutput(key, client.getInput(key))
#
# Env
#
print("This was my environment:")
for k,v in os.environ.items():
print("%s => %s" %(k,v))
sys.stderr.write("Oh, and this is stderr.");
"""
class CallbackI(omero.grid.ProcessCallback):
def __init__(self):
self.finish = []
self.cancel = []
self.kill = []
def processFinished(self, rv, current=True):
self.finish.append(rv)
def processCancelled(self, rv, current=True):
self.cancel.append(rv)
def processKilled(self, rv, current=True):
self.kill.append(rv)
class TestPing(ITest):
"""
Tests which use the trivial script defined by PINGFILE to
test the scripts API.
"""
#
# Helper methods
#
def _getProcessor(self):
scripts = self.root.getSession().getScriptService()
id = scripts.uploadOfficialScript(
"/tests/ping_py/%s.py" % self.uuid(), PINGFILE)
j = omero.model.ScriptJobI()
j.linkOriginalFile(omero.model.OriginalFileI(rlong(id), False))
p = self.client.sf.sharedResources().acquireProcessor(j, 100)
return p
def _checkstd(self, output, which):
rfile = output.val[which]
ofile = rfile.val
assert ofile
tmppath = create_path("pingtest")
try:
self.client.download(ofile, str(tmppath))
assert os.path.getsize(str(tmppath))
return tmppath.text()
finally:
remove_path(tmppath)
def METHOD_NAME(self, output):
stdout = self._checkstd(output, "stdout")
stderr = self._checkstd(output, "stderr")
return stdout, stderr
def assertSuccess(self, processor, process):
wait(self.client, process)
rc = process.poll()
output = processor.getResults(process)
stdout, stderr = self.METHOD_NAME(output)
if rc is None or rc.val != 0:
assert False, "STDOUT:\n%s\nSTDERR:\n%s\n" % (stdout, stderr)
return output
#
# Test methods
#
def testPingViaISCript(self):
p = self._getProcessor()
input = rmap({})
input.val["a"] = rlong(2)
input.val["b"] = rstring("d")
process = p.execute(input)
output = self.assertSuccess(p, process)
assert output.val["a"].val == 2
def testPingParametersViaISCript(self):
p = self._getProcessor()
params = p.params()
assert params
assert params.inputs["a"]
assert params.inputs["b"]
assert params.outputs["a"]
assert params.outputs["b"]
def testPingStdout(self):
p = self._getProcessor()
params = p.params()
assert params.stdoutFormat
process = p.execute(rmap({}))
self.assertSuccess(p, process)
@pytest.mark.broken(ticket="11494")
def testProcessCallback(self):
callback = CallbackI()
id = self.client.getCommunicator().stringToIdentity(str(uuid.uuid4()))
cb = self.client.getAdapter().add(callback, id)
cb = omero.grid.ProcessCallbackPrx.uncheckedCast(cb)
p = self._getProcessor()
params = p.params()
assert params.stdoutFormat
process = p.execute(rmap({}))
process.registerCallback(cb)
self.assertSuccess(p, process)
assert len(callback.finish) > 0
def testProcessShutdown(self):
p = self._getProcessor()
process = p.execute(rmap({}))
process.shutdown()
p.getResults(process)
# Line above was: output = p.getResults(process)
# Probably doesn't have IO since killed
# self.assertIO(output)
def testProcessShutdownOneway(self):
p = self._getProcessor()
process = p.execute(rmap({}))
oneway = omero.grid.ProcessPrx.uncheckedCast(process.ice_oneway())
oneway.shutdown()
# Depending on what's faster this may or may not throw
try:
p.getResults(process)
assert process.poll()
p.getResults(process)
except omero.ServerError:
pass
# Line above was: output = p.getResults(process)
# Probably doesn't have IO since killed
# self.assertIO(output)
def testProcessorGetResultsBeforeFinished(self):
p = self._getProcessor()
process = p.execute(None)
with pytest.raises(omero.ServerError):
p.getResults(process)
self.assertSuccess(p, process)
#
# Execution-less tests
#
def testProcessorExpires(self):
p = self._getProcessor()
assert p.expires() > 0
def testProcessorGetJob(self):
p = self._getProcessor()
assert p.getJob()
def testProcessorStop(self):
p = self._getProcessor()
p.execute(rmap({}))
p.stop()
def testProcessorDetach(self):
p = self._getProcessor()
p.execute(rmap({}))
p.setDetach(True)
p.stop() |
1,703 | on train end | import os
import stat
from utils import apply_eval
from mindspore import log as logger
from mindspore import save_checkpoint
from mindspore.train.callback import Callback, CheckpointConfig, LossMonitor, ModelCheckpoint, TimeMonitor
class EvalCallBack(Callback):
"""
Evaluation callback when training.
Args:
eval_function (function): evaluation function.
eval_param_dict (dict): evaluation parameters' configure dict.
interval (int): run evaluation interval, default is 1.
eval_start_epoch (int): evaluation start epoch, default is 1.
save_best_ckpt (bool): Whether to save best checkpoint, default is True.
best_ckpt_name (str): best checkpoint name, default is `best.ckpt`.
metrics_name (str): evaluation metrics name, default is `acc`.
Returns:
None
Examples:
>>> EvalCallBack(eval_function, eval_param_dict)
"""
def __init__(
self,
eval_function,
eval_param_dict,
interval=1,
eval_start_epoch=1,
save_best_ckpt=True,
ckpt_directory="./",
best_ckpt_name="best.ckpt",
metrics_name="acc",
):
super(EvalCallBack, self).__init__()
self.eval_function = eval_function
self.eval_param_dict = eval_param_dict
self.eval_start_epoch = eval_start_epoch
if interval < 1:
raise ValueError("interval should >= 1.")
self.interval = interval
self.save_best_ckpt = save_best_ckpt
self.best_res = 0
self.best_epoch = 0
if not os.path.isdir(ckpt_directory):
os.makedirs(ckpt_directory)
self.best_ckpt_path = os.path.join(ckpt_directory, best_ckpt_name)
self.metrics_name = metrics_name
def remove_ckpoint_file(self, file_name):
"""Remove the specified checkpoint file from this checkpoint manager and also from the directory."""
try:
os.chmod(file_name, stat.S_IWRITE)
os.remove(file_name)
except OSError:
logger.warning("OSError, failed to remove the older ckpt file %s.", file_name)
except ValueError:
logger.warning("ValueError, failed to remove the older ckpt file %s.", file_name)
def on_train_epoch_end(self, run_context):
"""Callback when epoch end."""
cb_params = run_context.original_args()
cur_epoch = cb_params.cur_epoch_num
if cur_epoch >= self.eval_start_epoch and (cur_epoch - self.eval_start_epoch) % self.interval == 0:
res = self.eval_function(self.eval_param_dict)
print("epoch: {}, {}: {}".format(cur_epoch, self.metrics_name, res), flush=True)
if res >= self.best_res:
self.best_res = res
self.best_epoch = cur_epoch
print("update best result: {}".format(res), flush=True)
if self.save_best_ckpt:
if os.path.exists(self.best_ckpt_path):
self.remove_ckpoint_file(self.best_ckpt_path)
save_checkpoint(cb_params.train_network, self.best_ckpt_path)
print("update best checkpoint at: {}".format(self.best_ckpt_path), flush=True)
def METHOD_NAME(self, run_context):
print(
"End training, the best {0} is: {1}, the best {0} epoch is {2}".format(
self.metrics_name, self.best_res, self.best_epoch
),
flush=True,
)
def get_ssd_callbacks(args, steps_per_epoch, rank_id):
ckpt_config = CheckpointConfig(keep_checkpoint_max=args.keep_checkpoint_max)
ckpt_cb = ModelCheckpoint(prefix="ssd", directory=args.ckpt_save_dir, config=ckpt_config)
if rank_id == 0:
return [TimeMonitor(data_size=steps_per_epoch), LossMonitor(), ckpt_cb]
return [TimeMonitor(data_size=steps_per_epoch), LossMonitor()]
def get_ssd_eval_callback(eval_net, eval_dataset, args):
if args.dataset == "coco":
anno_json = os.path.join(args.data_dir, "annotations/instances_val2017.json")
else:
raise NotImplementedError
eval_param_dict = {"net": eval_net, "dataset": eval_dataset, "anno_json": anno_json, "args": args}
eval_cb = EvalCallBack(
apply_eval,
eval_param_dict,
interval=args.eval_interval,
eval_start_epoch=args.eval_start_epoch,
save_best_ckpt=True,
ckpt_directory=args.ckpt_save_dir,
best_ckpt_name="best.ckpt",
metrics_name="mAP",
)
return eval_cb |
1,704 | multi attachments plan | """Test storing of attachments in a report."""
import re
import os
import pytest
import testplan
from testplan.testing import multitest
@multitest.testsuite
class Suite1:
def __init__(self, attachments):
self._attachments = attachments
@multitest.testcase
def attach(self, env, result):
for attachment in self._attachments:
result.attach(attachment, description="attaching a file")
@pytest.fixture(scope="function")
def attachment_plan(tmpdir):
attachment_path = str(tmpdir.join("attachment.txt"))
with open(attachment_path, "w") as f:
f.write("testplan\n")
plan = testplan.TestplanMock(name="AttachmentPlan")
plan.add(
multitest.MultiTest(
name="AttachmentTest", suites=[Suite1([attachment_path])]
)
)
return plan
@pytest.fixture(scope="function")
def METHOD_NAME(tmpdir):
attachment_paths = [
str(tmpdir.mkdir(f"{i}").join("attachment.txt")) for i in range(2)
]
# Write different content to each file to ensure they get a unique hash.
for i, attachment_path in enumerate(attachment_paths):
with open(attachment_path, "w") as f:
f.write(f"testplan{i}\n")
plan = testplan.TestplanMock(name="AttachmentPlan")
plan.add(
multitest.MultiTest(
name="AttachmentTest",
suites=[Suite1(attachment_paths)],
)
)
return plan
@pytest.fixture(scope="function")
def same_attachments_plan(tmpdir):
attachment_path = str(tmpdir.join("attachment.txt"))
with open(attachment_path, "w") as f:
f.write("testplan\n")
plan = testplan.TestplanMock(name="AttachmentPlan")
plan.add(
multitest.MultiTest(
name="AttachmentTest",
suites=[Suite1([attachment_path] * 2)],
)
)
return plan
def test_attach(attachment_plan):
"""Test running a Testplan that stores a single attachment."""
plan_result = attachment_plan.run()
assert plan_result # Plan should pass.
report = plan_result.report
attachments = report.attachments
testcase_report = report.entries[0].entries[0].entries[0]
assert testcase_report.name == "attach"
assert len(testcase_report.entries) == 1
attachment_entry = testcase_report.entries[0]
assert len(attachments) == 1
dst_path = list(attachments.keys())[0]
# Expect the attachment to be stored as "attachment-[HASH]-[FILESIZE].txt"
assert re.match(r"attachment-[0-9a-f]+-[0-9]+.txt", dst_path)
# The source path is stored in the top-level attachments dict. Check that
# it matches the value stored on the testcase entry.
assert attachments[dst_path] == attachment_entry["source_path"]
def test_multi_attachments(METHOD_NAME):
"""
Test running a Testplan that stores unique attachments multiple times.
"""
plan_result = METHOD_NAME.run()
assert plan_result # Plan should pass.
report = plan_result.report
attachments = report.attachments
assert len(attachments) == 2 # Two unique file attachments
testcase_report = report.entries[0].entries[0].entries[0]
assert len(testcase_report.entries) == 2
for i in range(2):
entry = testcase_report.entries[i]
dst_path = entry["dst_path"]
with open(entry["source_path"], "r") as fd:
content = fd.read()
assert content == f"testplan{i}\n"
# Expect the attachment to be stored as
# "attachment-[HASH]-[FILESIZE].txt"
assert re.match(r"attachment-[0-9a-f]+-[0-9]+.txt", dst_path)
# Check that the source and dst paths match.
assert attachments[dst_path] == entry["source_path"]
def test_same_attachments(same_attachments_plan):
"""
Test running a Testplan that stores the same attachment multiple times.
The file only needs to be stored once under the attachments but
can be referenced from multiple parts of the report.
"""
plan_result = same_attachments_plan.run()
assert plan_result # Plan should pass.
report = plan_result.report
attachments = report.attachments
assert len(attachments) == 1 # Only one unique file is attached.
attachment_entries = report.entries[0].entries[0].entries[0].entries
assert len(attachment_entries) == 2
dst_path = list(attachments.keys())[0]
# Expect the attachment to be stored as "attachment-[HASH]-[FILESIZE].txt"
assert re.match(r"attachment-[0-9a-f]+-[0-9]+.txt", dst_path)
for attachment_entry in attachment_entries:
assert attachments[dst_path] == attachment_entry["source_path"] |
1,705 | post | from typing import List
from flask import request
from flask_restx import Namespace, Resource
from CTFd.api.v1.helpers.request import validate_args
from CTFd.api.v1.helpers.schemas import sqlalchemy_to_pydantic
from CTFd.api.v1.schemas import APIDetailedSuccessResponse, APIListSuccessResponse
from CTFd.constants import RawEnum
from CTFd.models import Flags, db
from CTFd.plugins.flags import FLAG_CLASSES, get_flag_class
from CTFd.schemas.flags import FlagSchema
from CTFd.utils.decorators import admins_only
from CTFd.utils.helpers.models import build_model_filters
flags_namespace = Namespace("flags", description="Endpoint to retrieve Flags")
FlagModel = sqlalchemy_to_pydantic(Flags)
class FlagDetailedSuccessResponse(APIDetailedSuccessResponse):
data: FlagModel
class FlagListSuccessResponse(APIListSuccessResponse):
data: List[FlagModel]
flags_namespace.schema_model(
"FlagDetailedSuccessResponse", FlagDetailedSuccessResponse.apidoc()
)
flags_namespace.schema_model(
"FlagListSuccessResponse", FlagListSuccessResponse.apidoc()
)
@flags_namespace.route("")
class FlagList(Resource):
@admins_only
@flags_namespace.doc(
description="Endpoint to list Flag objects in bulk",
responses={
200: ("Success", "FlagListSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
@validate_args(
{
"challenge_id": (int, None),
"type": (str, None),
"content": (str, None),
"data": (str, None),
"q": (str, None),
"field": (
RawEnum(
"FlagFields", {"type": "type", "content": "content", "data": "data"}
),
None,
),
},
location="query",
)
def get(self, query_args):
q = query_args.pop("q", None)
field = str(query_args.pop("field", None))
filters = build_model_filters(model=Flags, query=q, field=field)
flags = Flags.query.filter_by(**query_args).filter(*filters).all()
schema = FlagSchema(many=True)
response = schema.dump(flags)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@admins_only
@flags_namespace.doc(
description="Endpoint to create a Flag object",
responses={
200: ("Success", "FlagDetailedSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
def METHOD_NAME(self):
req = request.get_json()
schema = FlagSchema()
response = schema.load(req, session=db.session)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.add(response.data)
db.session.commit()
response = schema.dump(response.data)
db.session.close()
return {"success": True, "data": response.data}
@flags_namespace.route("/types", defaults={"type_name": None})
@flags_namespace.route("/types/<type_name>")
class FlagTypes(Resource):
@admins_only
def get(self, type_name):
if type_name:
flag_class = get_flag_class(type_name)
response = {"name": flag_class.name, "templates": flag_class.templates}
return {"success": True, "data": response}
else:
response = {}
for class_id in FLAG_CLASSES:
flag_class = FLAG_CLASSES.get(class_id)
response[class_id] = {
"name": flag_class.name,
"templates": flag_class.templates,
}
return {"success": True, "data": response}
@flags_namespace.route("/<flag_id>")
class Flag(Resource):
@admins_only
@flags_namespace.doc(
description="Endpoint to get a specific Flag object",
responses={
200: ("Success", "FlagDetailedSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
def get(self, flag_id):
flag = Flags.query.filter_by(id=flag_id).first_or_404()
schema = FlagSchema()
response = schema.dump(flag)
if response.errors:
return {"success": False, "errors": response.errors}, 400
response.data["templates"] = get_flag_class(flag.type).templates
return {"success": True, "data": response.data}
@admins_only
@flags_namespace.doc(
description="Endpoint to delete a specific Flag object",
responses={200: ("Success", "APISimpleSuccessResponse")},
)
def delete(self, flag_id):
flag = Flags.query.filter_by(id=flag_id).first_or_404()
db.session.delete(flag)
db.session.commit()
db.session.close()
return {"success": True}
@admins_only
@flags_namespace.doc(
description="Endpoint to edit a specific Flag object",
responses={
200: ("Success", "FlagDetailedSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
def patch(self, flag_id):
flag = Flags.query.filter_by(id=flag_id).first_or_404()
schema = FlagSchema()
req = request.get_json()
response = schema.load(req, session=db.session, instance=flag, partial=True)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.commit()
response = schema.dump(response.data)
db.session.close()
return {"success": True, "data": response.data} |
1,706 | execute add document to collection | from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from filingcabinet.admin import (
CollectionDirectoryAdmin,
CollectionDocumentBaseAdmin,
DocumentBaseAdmin,
DocumentCollectionBaseAdmin,
DocumentPortalAdmin,
PageAdmin,
PageAnnotationAdmin,
)
from filingcabinet.models import (
CollectionDirectory,
CollectionDocument,
DocumentPortal,
Page,
PageAnnotation,
TaggedDocument,
)
from froide.helper.admin_utils import (
ForeignKeyFilter,
TaggitListFilter,
make_choose_object_action,
)
from froide.team.models import Team
from .models import Document, DocumentCollection
from .utils import update_document_index
def METHOD_NAME(admin, request, queryset, action_obj):
for obj in queryset:
CollectionDocument.objects.get_or_create(collection=action_obj, document=obj)
def execute_set_team(admin, request, queryset, action_obj):
queryset.update(team=action_obj)
class DocumentTagsFilter(TaggitListFilter):
tag_class = TaggedDocument
@admin.register(Document)
class DocumentAdmin(DocumentBaseAdmin):
raw_id_fields = DocumentBaseAdmin.raw_id_fields + (
"original",
"foirequest",
"publicbody",
"team",
)
list_filter = DocumentBaseAdmin.list_filter + (
("foirequest", ForeignKeyFilter),
("publicbody", ForeignKeyFilter),
("user", ForeignKeyFilter),
("team", ForeignKeyFilter),
("document_documentcollection", ForeignKeyFilter),
DocumentTagsFilter,
)
actions = DocumentBaseAdmin.actions + ["add_document_to_collection", "set_team"]
add_document_to_collection = make_choose_object_action(
DocumentCollection,
METHOD_NAME,
_("Add documents to collection..."),
)
set_team = make_choose_object_action(
Team, execute_set_team, _("Set team for documents...")
)
def save_model(self, request, obj, form, change):
res = super().save_model(request, obj, form, change)
update_document_index(obj)
return res
@admin.action(description=_("Mark as listed"))
def mark_listed(self, request, queryset):
super().mark_listed(request, queryset)
for doc in queryset:
update_document_index(doc)
@admin.action(description=_("Mark as unlisted"))
def mark_unlisted(self, request, queryset):
super().mark_unlisted(request, queryset)
for doc in queryset:
update_document_index(doc)
@admin.register(Page)
class CustomPageAdmin(PageAdmin):
list_filter = PageAdmin.list_filter + (("document", ForeignKeyFilter),)
@admin.register(PageAnnotation)
class CustomPageAnnotationAdmin(PageAnnotationAdmin):
list_filter = [("page__document", ForeignKeyFilter), "page__number"]
@admin.register(DocumentCollection)
class DocumentCollectionAdmin(DocumentCollectionBaseAdmin):
raw_id_fields = DocumentCollectionBaseAdmin.raw_id_fields + ("team", "foirequests")
actions = list(DocumentCollectionBaseAdmin.actions) + [
"reindex_collection",
"collect_documents_from_foirequests",
]
def reindex_collection(self, request, queryset):
for collection in queryset:
for doc in collection.documents.all():
update_document_index(doc)
def collect_documents_from_foirequests(self, request, queryset):
for collection in queryset:
collection.update_from_foirequests()
@admin.register(CollectionDocument)
class CollectionDocumentAdmin(CollectionDocumentBaseAdmin):
list_filter = CollectionDocumentBaseAdmin.list_filter + (
("document", ForeignKeyFilter),
("collection", ForeignKeyFilter),
("directory", ForeignKeyFilter),
)
actions = list(CollectionDirectoryAdmin.actions) + ["move_to_directory"]
def execute_move_to_directory(self, request, queryset, action_obj):
queryset.update(directory=action_obj)
move_to_directory = make_choose_object_action(
CollectionDirectory,
execute_move_to_directory,
_("Move documents to directory..."),
)
@admin.register(CollectionDirectory)
class CustomCollectionDirectoryAdmin(CollectionDirectoryAdmin):
list_filter = CollectionDirectoryAdmin.list_filter + (
("collection", ForeignKeyFilter),
("user", ForeignKeyFilter),
)
admin.site.register(DocumentPortal, DocumentPortalAdmin) |
1,707 | test to string without checksum | #!/usr/bin/python3
import pytest
from brownie import compile_source
from brownie.exceptions import VirtualMachineError
from brownie.network.transaction import TransactionReceipt
code = """
pragma solidity ^0.6.0;
contract Foo {
fallback () external payable {}
}
"""
def test_to_string(accounts):
"""Can send to a string"""
tx = accounts[0].transfer("0x14b0Ed2a7C4cC60DD8F676AE44D0831d3c9b2a9E", 10000)
assert tx.receiver == "0x14b0Ed2a7C4cC60DD8F676AE44D0831d3c9b2a9E"
def METHOD_NAME(accounts):
to = "0x14b0Ed2a7C4cC60DD8F676AE44D0831d3c9b2a9E".lower()
tx = accounts[0].transfer(to, 10000)
assert tx.receiver.lower() == to
def test_to_account(accounts):
"""Can send to an Account object"""
tx = accounts[0].transfer(accounts[1], 10000)
assert str(tx.receiver) == accounts[1].address
def test_to_contract(accounts, tester):
"""Can send to a Contract object"""
tx = accounts[0].transfer(tester, 0, data=tester.signatures["doNothing"])
assert str(tx.receiver) == tester.address
assert tx.gas_used > 21000
def test_to_contract_fallback(accounts, tester):
tx = accounts[0].transfer(tester, "1 ether")
assert str(tx.receiver) == tester.address
assert tx.gas_used > 21000
def test_returns_tx_on_success(accounts):
"""returns a TransactionReceipt on success"""
tx = accounts[0].transfer(accounts[1], 1000)
assert type(tx) == TransactionReceipt
def test_raises_on_revert(accounts, tester):
"""raises on revert"""
with pytest.raises(VirtualMachineError):
accounts[0].transfer(tester, 0)
def test_returns_tx_on_revert_in_console(accounts, tester, console_mode):
"""returns a tx on revert in console"""
tx = accounts[0].transfer(tester, 0)
assert type(tx) == TransactionReceipt
assert tx.status == 0
def test_allow_revert(accounts, tester, config):
with pytest.raises(VirtualMachineError):
accounts[1].transfer(tester, 0)
assert accounts[1].nonce == 1
with pytest.raises(ValueError):
accounts[1].transfer(tester, 0, allow_revert=False)
assert accounts[1].nonce == 1
def test_nonce(accounts):
"""nonces increment properly"""
assert accounts[1].nonce == 0
accounts[1].transfer(accounts[2], 1000)
assert accounts[2].nonce == 0
assert accounts[1].nonce == 1
def test_balance_int(accounts, web3, chain):
"""transfers use the correct balance"""
balance = accounts[0].balance()
assert web3.eth.get_balance(accounts[0].address) == balance
accounts[1].transfer(accounts[0], 1000)
assert accounts[0].balance() == balance + 1000
chain.reset()
assert web3.eth.get_balance(accounts[0].address) == balance
def test_balance_wei(accounts, web3, chain):
"""transfer balances are converted using wei"""
balance = accounts[0].balance()
assert web3.eth.get_balance(accounts[0].address) == balance
accounts[1].transfer(accounts[0], "1 ether")
assert accounts[0].balance() == balance + 1000000000000000000
chain.reset()
assert web3.eth.get_balance(accounts[0].address) == balance
def test_gas_price_manual(accounts):
"""gas price is set correctly when specified in the call"""
balance = accounts[0].balance()
tx = accounts[0].transfer(accounts[1], 0, gas_price=100)
assert tx.gas_price == 100
assert accounts[0].balance() == balance - (100 * 21000)
@pytest.mark.parametrize("auto", (True, False, None, "auto"))
def test_gas_price_automatic(accounts, config, web3, auto):
"""gas price is set correctly using web3.eth.gas_price"""
config.active_network["settings"]["gas_price"] = auto
balance = accounts[0].balance()
tx = accounts[0].transfer(accounts[1], 0)
assert tx.gas_price == web3.eth.gas_price
assert accounts[0].balance() == balance - (tx.gas_price * 21000)
def test_gas_price_config(accounts, config):
"""gas price is set correctly from the config"""
config.active_network["settings"]["gas_price"] = 50
balance = accounts[0].balance()
tx = accounts[0].transfer(accounts[1], 0)
assert tx.gas_price == 50
assert accounts[0].balance() == balance - (50 * 21000)
def test_gas_price_zero(accounts, config):
config.active_network["settings"]["gas_price"] = 0
balance = accounts[0].balance()
tx = accounts[0].transfer(accounts[1], 1337)
assert tx.gas_price == 0
assert accounts[0].balance() == balance - 1337
def test_gas_limit_manual(accounts):
"""gas limit is set correctly when specified in the call"""
tx = accounts[0].transfer(accounts[1], 1000, gas_limit=100000)
assert tx.gas_limit == 100000
assert tx.gas_used == 21000
def test_gas_buffer_manual(accounts, config):
"""gas limit is set correctly when specified in the call"""
config.active_network["settings"]["gas_limit"] = None
foo = compile_source(code).Foo.deploy({"from": accounts[0]})
tx = accounts[0].transfer(foo, 1000, gas_buffer=1.337)
assert int(tx.gas_used * 1.337) == tx.gas_limit
def test_gas_buffer_send_to_eoa(accounts, config):
"""gas limit is set correctly when specified in the call"""
config.active_network["settings"]["gas_limit"] = None
tx = accounts[0].transfer(accounts[1], 1000, gas_buffer=1.337)
assert tx.gas_limit == 21000
@pytest.mark.parametrize("gas_limit", (True, False, None, "auto"))
@pytest.mark.parametrize("gas_buffer", (1, 1.25))
def test_gas_limit_automatic(accounts, config, gas_limit, gas_buffer):
"""gas limit is set correctly using web3.eth.estimate_gas"""
config.active_network["settings"]["gas_limit"] = gas_limit
config.active_network["settings"]["gas_buffer"] = gas_buffer
foo = compile_source(code).Foo.deploy({"from": accounts[0]})
tx = accounts[0].transfer(foo, 1000)
assert int(tx.gas_used * gas_buffer) == tx.gas_limit
def test_gas_limit_config(accounts, config):
"""gas limit is set correctly from the config"""
config.active_network["settings"]["gas_limit"] = 50000
tx = accounts[0].transfer(accounts[1], 1000)
assert tx.gas_limit == 50000
assert tx.gas_used == 21000
config.active_network["settings"]["gas_limit"] = False
def test_nonce_manual(accounts):
"""returns a Contract instance on successful deployment with the correct nonce"""
assert accounts[0].nonce == 0
tx = accounts[0].transfer(accounts[1], 1000, nonce=0)
assert tx.nonce == 0
assert accounts[0].nonce == 1
tx = accounts[0].transfer(accounts[1], 1000, nonce=1)
assert tx.nonce == 1
# this behaviour changed in ganache7, if the test suite is updated to work
# in hardhat we should still include it
# @pytest.mark.parametrize("nonce", (1, -1, 15))
# def test_raises_on_wrong_nonce(accounts, nonce):
# """raises if invalid manual nonce is provided"""
# assert accounts[0].nonce == 0
# with pytest.raises(ValueError):
# accounts[0].transfer(accounts[1], 1000, nonce=nonce)
def test_data(accounts):
"""transaction data is set correctly"""
tx = accounts[0].transfer(accounts[1], 1000)
assert tx.input == "0x"
tx = accounts[0].transfer(accounts[1], 1000, data="0x1234")
assert tx.input == "0x1234"
def test_localaccount(accounts):
local = accounts.add()
assert local.balance() == 0
accounts[0].transfer(local, "10 ether")
assert local.balance() == "10 ether"
local.transfer(accounts[1], "1 ether")
assert accounts[1].balance() == "1001 ether"
assert local.nonce == 1
def test_deploy_via_transfer(accounts, web3):
bytecode = "0x3660006000376110006000366000732157a7894439191e520825fe9399ab8655e0f7085af41558576110006000f3" # NOQA: E501
tx = accounts[0].transfer(data=bytecode)
assert tx.contract_name == "UnknownContract"
assert web3.eth.get_code(tx.contract_address)
def test_gas_limit_and_buffer(accounts):
with pytest.raises(ValueError):
accounts[0].transfer(accounts[1], 1000, gas_limit=21000, gas_buffer=1.3) |
1,708 | test systemstatus detail api redirect | import urllib.parse
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_main.models import Systemstatus
class SystemstatusAPIViewTestCase(TestCase):
"""systemstatus API view tests"""
@classmethod
def setUpTestData(cls):
# create object
Systemstatus.objects.create(systemstatus_name='systemstatus_api_1')
# create user
User.objects.create_user(
username='testuser_systemstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
def test_systemstatus_list_api_unauthorized(self):
"""unauthorized access is forbidden"""
# get response
response = self.client.get('/api/systemstatus/')
# compare
self.assertEqual(response.status_code, 401)
def test_systemstatus_list_api_method_get(self):
"""GET is allowed"""
# login testuser
self.client.login(
username='testuser_systemstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
# get response
response = self.client.get('/api/systemstatus/')
# compare
self.assertEqual(response.status_code, 200)
def test_systemstatus_list_api_method_post(self):
"""POST is forbidden"""
# login testuser
self.client.login(
username='testuser_systemstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
# create POST string
poststring = {"systemstatus_name": "systemstatus_api_2"}
# get response
response = self.client.post('/api/systemstatus/', data=poststring)
# compare
self.assertEqual(response.status_code, 405)
def test_systemstatus_list_api_redirect(self):
"""test redirect with appending slash"""
# login testuser
self.client.login(
username='testuser_systemstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
# create url
destination = urllib.parse.quote('/api/systemstatus/', safe='/')
# get response
response = self.client.get('/api/systemstatus', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_systemstatus_detail_api_unauthorized(self):
"""unauthorized access is forbidden"""
# get object
systemstatus_api_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_api_1'
)
# get response
response = self.client.get(
'/api/systemstatus/' + str(systemstatus_api_1.systemstatus_id) + '/'
)
# compare
self.assertEqual(response.status_code, 401)
def test_systemstatus_detail_api_method_get(self):
"""GET is allowed"""
# get object
systemstatus_api_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_api_1'
)
# login testuser
self.client.login(
username='testuser_systemstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
# get response
response = self.client.get(
'/api/systemstatus/' + str(systemstatus_api_1.systemstatus_id) + '/'
)
# compare
self.assertEqual(response.status_code, 200)
def test_systemstatus_detail_api_method_delete(self):
"""DELETE is forbidden"""
# get object
systemstatus_api_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_api_1'
)
# login testuser
self.client.login(
username='testuser_systemstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
# get response
response = self.client.delete(
'/api/systemstatus/' + str(systemstatus_api_1.systemstatus_id) + '/'
)
# compare
self.assertEqual(response.status_code, 405)
def test_systemstatus_detail_api_method_put(self):
"""PUT is forbidden"""
# get object
systemstatus_api_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_api_1'
)
# login testuser
self.client.login(
username='testuser_systemstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
# create url
destination = urllib.parse.quote(
'/api/systemstatus/' + str(systemstatus_api_1.systemstatus_id) + '/',
safe='/',
)
# create PUT string
putstring = {"systemstatus_name": "new_systemstatus_api_1"}
# get response
response = self.client.put(
destination, data=putstring, content_type='application/json'
)
# compare
self.assertEqual(response.status_code, 405)
def METHOD_NAME(self):
"""test redirect with appending slash"""
# get object
systemstatus_api_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_api_1'
)
# login testuser
self.client.login(
username='testuser_systemstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
# create url
destination = urllib.parse.quote(
'/api/systemstatus/' + str(systemstatus_api_1.systemstatus_id) + '/',
safe='/',
)
# get response
response = self.client.get(
'/api/systemstatus/' + str(systemstatus_api_1.systemstatus_id), follow=True
)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
) |
1,709 | goto frame | # Copyright © 2012-2023 Forschungszentrum Jülich GmbH
# SPDX-License-Identifier: LGPL-3.0-or-later
import math
from jupedsim_visualizer.geometry import Geometry
from jupedsim_visualizer.geometry_widget import RenderWidget
from jupedsim_visualizer.trajectory import Trajectory
from PySide6.QtCore import QSignalBlocker, Qt, QTimer
from PySide6.QtGui import QFont, QPaintEvent
from PySide6.QtStateMachine import QState, QStateMachine
from PySide6.QtWidgets import (
QApplication,
QHBoxLayout,
QLabel,
QPushButton,
QSlider,
QSpinBox,
QStyle,
QVBoxLayout,
QWidget,
)
from jupedsim import RoutingEngine
from jupedsim.recording import Recording
class PlayerControlWidget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.play = QPushButton(
QApplication.style().standardIcon(
QStyle.StandardPixmap.SP_MediaPlay
),
"",
)
self.play.setCheckable(True)
self.begin = QPushButton(
QApplication.style().standardIcon(
QStyle.StandardPixmap.SP_MediaSkipBackward
),
"",
)
self.backward = QPushButton(
QApplication.style().standardIcon(
QStyle.StandardPixmap.SP_MediaSeekBackward
),
"",
)
self.forward = QPushButton(
QApplication.style().standardIcon(
QStyle.StandardPixmap.SP_MediaSeekForward
),
"",
)
self.end = QPushButton(
QApplication.style().standardIcon(
QStyle.StandardPixmap.SP_MediaSkipForward
),
"",
)
self.speed_selector = QSpinBox()
self.speed_selector.setRange(1, 10)
self.speed_selector.setValue(1)
self.speed_selector.setSuffix("x")
self.slider = QSlider()
self.slider.setOrientation(Qt.Orientation.Horizontal)
self.slider.setMaximum(60)
self.slider.setPageStep(1)
self.slider.setTracking(True)
self.replay_time = QLabel("00:00:00.000")
font = QFont("monospace")
font.setStyleHint(QFont.StyleHint.Monospace)
self.replay_time.setFont(font)
row1 = QHBoxLayout()
row1.addStretch()
row1.addWidget(self.begin)
row1.addWidget(self.backward)
row1.addWidget(self.play)
row1.addWidget(self.forward)
row1.addWidget(self.end)
row1.addWidget(self.speed_selector)
row1.addStretch()
row2 = QHBoxLayout()
row2.addWidget(self.slider, 1)
row2.addWidget(self.replay_time)
layout = QVBoxLayout()
layout.addLayout(row1)
layout.addLayout(row2)
self.setLayout(layout)
self._build_state_machine()
def _build_state_machine(self) -> None:
sm = QStateMachine(self)
replay_paused = QState()
sm.addState(replay_paused)
replay_playing = QState()
replay_playing.entered.connect(lambda: self.play.setChecked(True))
replay_playing.exited.connect(lambda: self.play.setChecked(False))
sm.addState(replay_playing)
sm.setInitialState(replay_paused)
replay_paused.addTransition(self.play.clicked, replay_playing)
replay_playing.addTransition(self.play.clicked, replay_paused)
replay_playing.addTransition(self.forward.clicked, replay_paused)
replay_playing.addTransition(self.backward.clicked, replay_paused)
replay_playing.addTransition(self.begin.clicked, replay_paused)
replay_playing.addTransition(self.end.clicked, replay_paused)
replay_playing.addTransition(self.slider.valueChanged, replay_paused)
sm.start()
self.state_machine = sm
self.replay_paused = replay_paused
self.replay_playing = replay_playing
def update_replay_time(self, time_in_seconds: float) -> None:
hh = int(math.floor(time_in_seconds / 3600))
time_in_seconds = time_in_seconds - hh * 3600
mm = int(math.floor(time_in_seconds / 60))
time_in_seconds = time_in_seconds - mm * 60
ss = int(math.floor(time_in_seconds))
time_in_seconds = time_in_seconds - ss
ms = int(time_in_seconds * 1000)
self.replay_time.setText(f"{hh:02d}:{mm:02d}:{ss:02d}.{ms:03d}")
class ReplayWidget(QWidget):
def __init__(
self,
navi: RoutingEngine,
rec: Recording,
geo: Geometry,
trajectory: Trajectory,
parent=None,
):
QWidget.__init__(self, parent)
self.rec = rec
self.trajectory = trajectory
self.control = PlayerControlWidget(parent=self)
self.render_widget = RenderWidget(
geo, navi, [geo, trajectory], parent=self
)
self.geo = geo
layout = QVBoxLayout()
layout.addWidget(self.render_widget, 1)
layout.addWidget(self.control)
self.setLayout(layout)
self.control.play.toggled.connect(self.play)
self.control.forward.clicked.connect(self.frame_forward)
self.control.backward.clicked.connect(self.frame_backward)
self.control.slider.setMaximum(self.rec.num_frames - 1)
self.control.slider.valueChanged.connect(self.METHOD_NAME)
self.control.begin.clicked.connect(lambda: self.METHOD_NAME(0))
self.control.end.clicked.connect(
lambda: self.METHOD_NAME(self.trajectory.num_frames - 1)
)
def frame_forward(self):
self.trajectory.advance_frame(self.control.speed_selector.value())
self.control.update_replay_time(
self.trajectory.current_index * (1 / self.rec.fps)
)
self.render_widget.render()
with QSignalBlocker(self.control.slider):
self.control.slider.setValue(self.trajectory.current_index)
def frame_backward(self):
self.trajectory.advance_frame(-self.control.speed_selector.value())
self.control.update_replay_time(
self.trajectory.current_index * (1 / self.rec.fps)
)
self.render_widget.render()
with QSignalBlocker(self.control.slider):
self.control.slider.setValue(self.trajectory.current_index)
def METHOD_NAME(self, index: int):
self.trajectory.METHOD_NAME(index)
self.control.update_replay_time(
self.trajectory.current_index * (1 / self.rec.fps)
)
self.render_widget.render()
with QSignalBlocker(self.control.slider):
self.control.slider.setValue(self.trajectory.current_index)
def play(self, checked: bool):
if checked:
self.timer = QTimer()
self.timer.setInterval(int(1000.0 / self.rec.fps))
self.timer.timeout.connect(self.frame_forward)
self.timer.start()
else:
if self.timer:
self.timer.stop()
def render(self):
self.render_widget.render()
def paintEvent(self, event: QPaintEvent) -> None:
self.render()
return super().paintEvent(event) |
1,710 | run coro and return result | ## Copyright 2022, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Concurrency utilities for use with Python `async`."""
import asyncio
import sys
import threading
from tensorflow_federated.python.common_libs import tracing
class SharedAwaitable:
"""A wrapper allowing `async` functions to be `await`ed from multiple places.
`async` functions (those that start with `async def`) are typically `await`ed
immediately at their callsite, as in `await foo()`. However, if users want to
`await` this value from multiple `async` functions without running `foo()`
twice, it can be useful to write something like this:
```python
foo_coroutine = foo()
async def fn_one():
...
x = await foo_coroutine
...
async def fn_two():
...
x = await foo_coroutine
...
```
Unfortunately, directly `await`ing the result of an `async` function multiple
times is not supported, and will fail with an exception:
`RuntimeError: cannot reuse already awaited coroutine`
`SharedAwaitable` fixes this problem:
```python
foo_coroutine = SharedAwaitable(foo())
async def fn_one():
...
x = await foo_coroutine
...
async def fn_two():
...
x = await foo_coroutine
...
```
"""
def __init__(self, awaitable):
"""Creates a new `SharedAwaitable` from an existing `awaitable`."""
self._awaitable = awaitable
self._event = None
self._result = None
self._exception = None
def __await__(self):
# If it's the first await, spawn a separate task to actually run the
# awaitable and report back with the result.
if self._event is None:
self._event = asyncio.Event()
async def get_result():
try:
self._result = await self._awaitable
except: # pylint: disable=bare-except
self._exception = sys.exc_info()
finally:
assert self._event is not None
self._event.set()
asyncio.create_task(get_result())
# Then wait for the result to be reported back.
async def waiter():
assert self._event is not None
await self._event.wait()
if self._exception is not None:
_, exception, traceback = self._exception
raise exception.with_traceback(traceback) # pytype: disable=attribute-error
return self._result
return waiter().__await__()
class AsyncThreadRunner:
"""Class which bridges async and synchronous synchronous interfaces.
This class serves as a resource and logic container, starting an event loop
in a separate thread and managing dispatching of coroutine functions to this
event loop in both synchronous and asynchronous interfaces.
There are two main uses of this class. First, this class can be used to wrap
interfaces which use `asyncio` in a synchronous 'run this coroutine'
interface in a manner which is compatible with integrating with other async
libraries. This feature is generally useful for backwards-compatibility (e.g.,
introducing asyncio in some component which sits on top of the synchronous
function calls this interface exposes), but should generally be viewed as
suboptimal--it is preferable in a situation like this to simply expose the
underlying async interfaces.
Second, this class can be used to delegate asynchronous work from one thread
to another, using its asynchronous interface.
"""
def __init__(self):
self._event_loop = asyncio.new_event_loop()
self._event_loop.set_task_factory(
tracing.propagate_trace_context_task_factory
)
def target_fn():
self._event_loop.run_forever()
self._thread = threading.Thread(target=target_fn, daemon=True)
self._thread.start()
def finalizer(loop, thread):
loop.call_soon_threadsafe(loop.stop)
thread.join()
self._finalizer = finalizer
def __del__(self):
self._finalizer(self._event_loop, self._thread)
def METHOD_NAME(self, coro):
"""Runs coroutine in the managed event loop, returning the result."""
future = asyncio.run_coroutine_threadsafe(coro, self._event_loop)
return future.result()
async def await_coro_and_return_result(self, coro):
"""Runs coroutine in the managed event loop, returning the result."""
return await asyncio.wrap_future(
asyncio.run_coroutine_threadsafe(coro, self._event_loop)
) |
1,711 | test color overlay | from __future__ import absolute_import, unicode_literals
import pytest
import logging
from psd_tools.api.psd_image import PSDImage
from psd_tools.terminology import Enum
from ..utils import full_name
logger = logging.getLogger(__name__)
LAYER_EFFECTS = PSDImage.open(full_name('layer_effects.psd'))
@pytest.fixture
def fixture():
yield LAYER_EFFECTS
def test_effects(fixture):
assert isinstance(fixture[0].effects.scale, float)
assert fixture[0].effects.enabled is True
for layer in fixture:
assert layer.__repr__()
for effect in fixture[0].effects:
assert effect.enabled is True
def test_bevel(fixture):
effect = fixture[1].effects[0]
assert not hasattr(effect, 'blend_mode')
assert effect.altitude == 30.0
assert effect.angle == 90.0
assert effect.anti_aliased is False
assert effect.bevel_style == Enum.InnerBevel
assert effect.bevel_type == Enum.SoftMatte
assert effect.contour
assert effect.depth == 100.0
assert effect.direction == Enum.StampIn
assert effect.enabled is True
assert effect.highlight_color
assert effect.highlight_mode == Enum.Screen
assert effect.highlight_opacity == 50.0
assert effect.shadow_color
assert effect.shadow_mode == Enum.Multiply
assert effect.shadow_opacity == 50.0
assert effect.size == 41.0
assert effect.soften == 0.0
assert effect.use_global_light is True
assert effect.use_shape is False
assert effect.use_texture is False
def test_emboss(fixture):
effect = fixture[2].effects[0]
assert not hasattr(effect, 'blend_mode')
assert effect.altitude == 30.0
assert effect.angle == 90.0
assert effect.anti_aliased is False
assert effect.bevel_style == Enum.Emboss
assert effect.bevel_type == Enum.SoftMatte
assert effect.contour
assert effect.depth == 100.0
assert effect.direction == Enum.StampIn
assert effect.enabled is True
assert effect.highlight_color
assert effect.highlight_mode == Enum.Screen
assert effect.highlight_opacity == 50.0
assert effect.shadow_color
assert effect.shadow_mode == Enum.Multiply
assert effect.shadow_opacity == 50.0
assert effect.size == 41.0
assert effect.soften == 0.0
assert effect.use_global_light is True
assert effect.use_shape is False
assert effect.use_texture is False
def test_outer_glow(fixture):
effect = fixture[3].effects[0]
assert effect.anti_aliased is False
assert effect.blend_mode == Enum.Screen
assert effect.choke == 0.0
assert effect.color
assert effect.contour
assert effect.glow_type == Enum.SoftMatte
assert effect.noise == 0.0
assert effect.opacity == 35.0
assert effect.quality_jitter == 0.0
assert effect.quality_range == 50.0
assert effect.size == 41.0
assert effect.spread == 0.0
assert effect.gradient is None
def test_inner_glow(fixture):
effect = fixture[4].effects[0]
assert effect.anti_aliased is False
assert effect.blend_mode == Enum.Screen
assert effect.choke == 0.0
assert effect.color
assert effect.contour
assert effect.glow_source == Enum.EdgeGlow
assert effect.glow_type == Enum.SoftMatte
assert effect.noise == 0.0
assert effect.opacity == 46.0
assert effect.quality_jitter == 0.0
assert effect.quality_range == 50.0
assert effect.size == 18.0
assert effect.gradient is None
def test_inner_shadow(fixture):
effect = fixture[5].effects[0]
assert effect.angle == 90.0
assert effect.anti_aliased is False
assert effect.blend_mode == Enum.Multiply
assert effect.choke == 0.0
assert effect.color
assert effect.contour
assert effect.distance == 18.0
assert effect.noise == 0.0
assert effect.opacity == 35.0
assert effect.size == 41.0
assert effect.use_global_light is True
def METHOD_NAME(fixture):
effect = fixture[6].effects[0]
assert effect.blend_mode == Enum.Normal
assert effect.color
assert effect.opacity == 100.0
def test_drop_shadow(fixture):
effect = fixture[7].effects[0]
assert effect.angle == 90.0
assert effect.anti_aliased is False
assert effect.blend_mode == Enum.Multiply
assert effect.choke == 0.0
assert effect.color
assert effect.contour
assert effect.layer_knocks_out is True
assert effect.distance == 18.0
assert effect.noise == 0.0
assert effect.opacity == 35.0
assert effect.size == 41.0
assert effect.use_global_light is True
def test_gradient_overlay(fixture):
effect = fixture[8].effects[0]
assert effect.aligned is True
assert effect.angle == 87.0
assert effect.blend_mode == Enum.Normal
assert effect.dithered is False
assert effect.gradient
assert effect.offset
assert effect.opacity == 100.0
assert effect.reversed is False
assert effect.scale == 100.0
assert effect.type == Enum.Linear
def test_pattern_overlay(fixture):
effect = fixture[9].effects[0]
assert effect.aligned is True
assert effect.blend_mode == Enum.Normal
assert effect.opacity == 100.0
assert effect.pattern
assert effect.phase
assert effect.scale == 100.0
def test_stroke(fixture):
effect = fixture[10].effects[0]
assert effect.blend_mode == Enum.Normal
assert effect.fill_type == Enum.SolidColor
assert effect.opacity == 100.0
assert effect.overprint is False
assert effect.position == Enum.OutsetFrame
assert effect.size == 6.0
assert effect.color
assert effect.gradient is None
assert effect.pattern is None
def test_satin(fixture):
effect = fixture[11].effects[0]
assert effect.angle == -60.0
assert effect.anti_aliased is True
assert effect.blend_mode == Enum.Multiply
assert effect.color
assert effect.contour
assert effect.distance == 20.0
assert effect.inverted is True
assert effect.opacity == 50.0
assert effect.size == 35.0 |
1,712 | test text changed signal | ##########################################################################
#
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import weakref
import GafferUI
import GafferTest
import GafferUITest
class MultiLineTextWidgetTest( GafferUITest.TestCase ) :
def testLifespan( self ) :
w = GafferUI.MultiLineTextWidget()
r = weakref.ref( w )
self.assertTrue( r() is w )
del w
self.assertTrue( r() is None )
def testEditable( self ) :
w = GafferUI.MultiLineTextWidget( editable=False )
self.assertEqual( w.getEditable(), False )
w.setEditable( True )
self.assertEqual( w.getEditable(), True )
def METHOD_NAME( self ) :
w = GafferUI.MultiLineTextWidget()
c = GafferTest.CapturingSlot( w.textChangedSignal() )
w.setText( "hi" )
self.assertEqual( len( c ), 1 )
self.assertEqual( c[0], ( w, ) )
# shouldn't do anything as text is the same
w.setText( "hi" )
self.assertEqual( len( c ), 1 )
self.assertEqual( c[0], ( w, ) )
def testWrapMode( self ) :
w = GafferUI.MultiLineTextWidget()
self.assertEqual( w.getWrapMode(), w.WrapMode.WordOrCharacter )
for wm in w.WrapMode.values() :
w.setWrapMode( wm )
self.assertEqual( w.getWrapMode(), wm )
def testCursorPosition( self ) :
w = GafferUI.MultiLineTextWidget()
self.assertEqual( w.getCursorPosition(), 0 )
w.setText( "hello" )
self.assertEqual( w.getCursorPosition(), 0 )
w.setCursorPosition( 1 )
self.assertEqual( w.getCursorPosition(), 1 )
def testInsertText( self ) :
w = GafferUI.MultiLineTextWidget()
w.setText( "12" )
w.setCursorPosition( 1 )
w.insertText( "abc" )
self.assertEqual( w.getText(), "1abc2" )
def testFixedLineHeight( self ) :
window = GafferUI.Window()
widget = GafferUI.MultiLineTextWidget()
window.addChild( widget )
window.setVisible( True )
# initial value
widget.setFixedLineHeight( 5 )
oldHeight = widget.size().y
# changing initial value
widget.setFixedLineHeight( 2 )
self.waitForIdle( 1000 )
newHeight = widget.size().y
# checking if the geometry has been updated for the new line height
self.assertEqual( newHeight == oldHeight, False )
def testErrored( self ) :
w = GafferUI.MultiLineTextWidget()
self.assertEqual( w.getErrored(), False )
w.setErrored( True )
self.assertEqual( w.getErrored(), True )
w.setErrored( False )
self.assertEqual( w.getErrored(), False )
def testRole( self ) :
w = GafferUI.MultiLineTextWidget()
self.assertEqual( w.getRole(), w.Role.Text )
w.setRole( w.Role.Code )
self.assertEqual( w.getRole(), w.Role.Code )
w.setRole( w.Role.Text )
self.assertEqual( w.getRole(), w.Role.Text )
w = GafferUI.MultiLineTextWidget( role = w.Role.Code)
self.assertEqual( w.getRole(), w.Role.Code )
if __name__ == "__main__":
unittest.main() |
1,713 | authenticate | """
init code for run falcon API
"""
import json
import datetime
import falcon
from enum import Enum
from itsdangerous import JSONWebSignatureSerializer, BadSignature
class MonitorrentJSONEncoder(json.JSONEncoder):
"""
can return datetime in ISO format and Enum as regular string
"""
# pylint: disable=E0202
# more info https://github.com/PyCQA/pylint/issues/414
def default(self, o):
"""default method"""
if isinstance(o, datetime.datetime):
return o.isoformat()
if isinstance(o, Enum):
return str(o)
return super(MonitorrentJSONEncoder, self).default(o)
class MonitorrentRequest(falcon.Request):
"""
support for json in request
"""
json = None
class MonitorrentResponse(falcon.Response):
"""
support for json in response
"""
json = None
# noinspection PyMethodMayBeStatic,PyMethodMayBeStatic,PyUnusedLocal
class JSONTranslator(object):
"""
falcon middleware to read json from request and write json into response
"""
# pylint: disable=W0613
def process_resource(self, req, resp, resource, params):
"""
set json property on request
:type req: MonitorrentRequest
:type resp: MonitorrentResponse
"""
if req.content_length in (None, 0):
return
body = req.stream.read()
try:
req.json = json.loads(body.decode('utf-8'))
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPBadRequest('Malformed JSON',
'Could not decode the request body. The '
'JSON was incorrect or not encoded as '
'UTF-8.')
# pylint: disable=W0613
def process_response(self, req, resp, resource):
"""
set body from json property on response
:type req: MonitorrentRequest
:type resp: MonitorrentResponse
"""
if resp.json is None:
return
resp.body = json.dumps(resp.json, cls=MonitorrentJSONEncoder, ensure_ascii=False)
# noinspection PyMethodMayBeStatic,PyMethodMayBeStatic,PyUnusedLocal
class AuthMiddleware(object):
"""
falcon middleware for authenticate requests over JWT
"""
cookie_name = 'jwt'
serializer = None
token = None
auth_enabled = None
# pylint: disable=W0613
def process_resource(self, req, resp, resource, params):
"""
validate auth before request, if resource marked with no_auth decorator ignore auth
if requests hasn't valid JWT token respon 401 will be returned
"""
if getattr(resource, '__no_auth__', False):
return
if not self.validate_auth(req):
raise falcon.HTTPUnauthorized('Authentication required', 'AuthCookie is not specified', None)
@classmethod
def validate_auth(cls, req):
"""check if auth_enabled and JWT token from request is valid"""
auth_enabled = cls.auth_enabled
if auth_enabled is not None and not auth_enabled():
return True
jwt = req.cookies.get(cls.cookie_name, None)
if jwt is None:
return False
try:
value = cls.serializer.loads(jwt)
return value == cls.token
except BadSignature:
return False
@classmethod
def METHOD_NAME(cls, resp):
"""generate JWT token and write it to response"""
value = cls.serializer.dumps(cls.token).decode()
resp.set_cookie(cls.cookie_name, value, path='/', secure=False)
@classmethod
def logout(cls, resp):
"""expire JWT token cookie"""
resp.set_cookie(cls.cookie_name, "", path='/', secure=False,
expires=datetime.datetime.utcfromtimestamp(0))
@classmethod
def init(cls, secret_key, token, auth_enabled):
"""init middleware"""
cls.serializer = JSONWebSignatureSerializer(secret_key)
cls.token = token
if auth_enabled is not None:
cls.auth_enabled = classmethod(lambda lcls: auth_enabled())
else:
cls.auth_enabled = None
def no_auth(obj):
"""decorator for disable resource authentication"""
obj.__no_auth__ = True
return obj
def create_api(disable_auth=False):
"""create falcon API with Json and Auth middlewares"""
middleware = list()
middleware.append(JSONTranslator())
if not disable_auth:
middleware.append(AuthMiddleware())
return falcon.API(request_type=MonitorrentRequest, response_type=MonitorrentResponse, middleware=middleware) |
1,714 | host memory arg | """
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import builtins
import collections.abc
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.message
import sys
import tensorflow.core.framework.attr_value_pb2
if sys.version_info >= (3, 8):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class KernelDef(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class AttrConstraint(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
NAME_FIELD_NUMBER: builtins.int
ALLOWED_VALUES_FIELD_NUMBER: builtins.int
name: builtins.str
"""Name of an attr from the Op."""
@property
def allowed_values(self) -> tensorflow.core.framework.attr_value_pb2.AttrValue:
"""A list of values that this kernel supports for this attr.
Like OpDef.AttrDef.allowed_values, except for kernels instead of Ops.
"""
def __init__(
self,
*,
name: builtins.str | None = ...,
allowed_values: tensorflow.core.framework.attr_value_pb2.AttrValue | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["allowed_values", b"allowed_values"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["allowed_values", b"allowed_values", "name", b"name"]) -> None: ...
OP_FIELD_NUMBER: builtins.int
DEVICE_TYPE_FIELD_NUMBER: builtins.int
CONSTRAINT_FIELD_NUMBER: builtins.int
HOST_MEMORY_ARG_FIELD_NUMBER: builtins.int
LABEL_FIELD_NUMBER: builtins.int
PRIORITY_FIELD_NUMBER: builtins.int
op: builtins.str
"""Must match the name of an Op."""
device_type: builtins.str
"""Type of device this kernel runs on."""
@property
def constraint(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___KernelDef.AttrConstraint]: ...
@property
def METHOD_NAME(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
"""Names of the Op's input_/output_args that reside in host memory
instead of device memory.
"""
label: builtins.str
"""This allows experimental kernels to be registered for an op that
won't be used unless the user specifies a "_kernel" attr with
value matching this.
"""
priority: builtins.int
"""Prioritization of kernel amongst different devices. By default we assume
priority is 0. The higher the priority the better. By default (i.e. if
this is not set), we prefer GPU kernels over CPU.
"""
def __init__(
self,
*,
op: builtins.str | None = ...,
device_type: builtins.str | None = ...,
constraint: collections.abc.Iterable[global___KernelDef.AttrConstraint] | None = ...,
METHOD_NAME: collections.abc.Iterable[builtins.str] | None = ...,
label: builtins.str | None = ...,
priority: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["constraint", b"constraint", "device_type", b"device_type", "host_memory_arg", b"host_memory_arg", "label", b"label", "op", b"op", "priority", b"priority"]) -> None: ...
global___KernelDef = KernelDef
@typing_extensions.final
class KernelList(google.protobuf.message.Message):
"""A collection of KernelDefs"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KERNEL_FIELD_NUMBER: builtins.int
@property
def kernel(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___KernelDef]: ...
def __init__(
self,
*,
kernel: collections.abc.Iterable[global___KernelDef] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["kernel", b"kernel"]) -> None: ...
global___KernelList = KernelList |
1,715 | test it proxies to fetch by pubid | import datetime
from unittest import mock
import pytest
from h_matchers import Any
from h.models import Group
from h.models.group import ReadableBy
from h.services.group import GroupService, groups_factory
class TestGroupServiceFetch:
def test_it_proxies_to_fetch_by_groupid_if_groupid_valid(self, svc):
svc.fetch_by_groupid = mock.Mock()
result = svc.fetch("group:something@somewhere.com")
assert svc.fetch_by_groupid.called_once_with("group:something@somewhere.com")
assert result == svc.fetch_by_groupid.return_value
def METHOD_NAME(self, svc):
svc.fetch_by_pubid = mock.Mock()
result = svc.fetch("abcdppp")
assert svc.fetch_by_pubid.called_once_with("abcdppp")
assert result == svc.fetch_by_pubid.return_value
class TestGroupServiceFetchByPubid:
def test_it_returns_group_model(self, svc, factories):
group = factories.Group()
fetched_group = svc.fetch_by_pubid(group.pubid)
assert fetched_group == group
assert isinstance(fetched_group, Group)
def test_it_returns_None_if_no_group_found(self, svc):
group = svc.fetch_by_pubid("abcdeff")
assert group is None
class TestGroupServiceFetchByGroupid:
def test_it_returns_group_model_of_matching_group(self, svc, factories):
group = factories.Group(authority_provided_id="dingdong", authority="foo.com")
fetched_group = svc.fetch_by_groupid(group.groupid)
assert isinstance(fetched_group, Group)
def test_it_raises_ValueError_if_invalid_groupid(self, svc):
with pytest.raises(ValueError, match="isn't a valid groupid"):
svc.fetch_by_groupid("fiddlesticks")
def test_it_returns_None_if_no_matching_group(self, svc):
assert svc.fetch_by_groupid("group:rando@dando.com") is None
@pytest.mark.usefixtures("groups")
class TestFilterByName:
def test_it_filters_by_name(self, svc):
filtered_groups = svc.filter_by_name(name="Hello")
assert len(filtered_groups.all()) == 1
assert filtered_groups.all() == [
Any.instance_of(Group).with_attrs({"name": "Hello"})
]
def test_it_returns_all_groups_if_name_is_None(self, svc, groups):
filtered_groups = svc.filter_by_name()
# results include public group in addition to ``groups``
assert len(filtered_groups.all()) == len(groups) + 1
def test_it_is_case_insensitive(self, svc):
filtered_groups = svc.filter_by_name(name="Amber")
assert len(filtered_groups.all()) == 2
def test_it_performs_wildcard_search(self, svc):
filtered_groups = svc.filter_by_name(name="Finger")
assert len(filtered_groups.all()) == 2
def test_results_sorted_by_created_desc(self, svc):
filtered_groups = svc.filter_by_name("Finger")
assert filtered_groups.all() == [
Any.instance_of(Group).with_attrs({"name": "Fingers"}),
Any.instance_of(Group).with_attrs({"name": "Finger"}),
]
@pytest.fixture
def groups(self, factories):
return [
factories.Group(name="Finger", created=datetime.datetime(2015, 8, 2)),
factories.Group(name="Fingers", created=datetime.datetime(2018, 2, 1)),
factories.Group(name="Hello"),
factories.Group(name="Amber"),
factories.Group(name="amber"),
]
class TestGroupServiceGroupIds:
"""
Unit tests for methods related to group IDs.
- :py:meth:`GroupService.groupids_readable_by`
- :py:meth:`GroupService.groupids_created_by`
"""
@pytest.mark.parametrize("with_user", [True, False])
def test_readable_by_includes_world(self, with_user, svc, db_session, factories):
user = None
if with_user:
user = factories.User()
db_session.flush()
assert "__world__" in svc.groupids_readable_by(user)
@pytest.mark.parametrize("with_user", [True, False])
def test_readable_by_includes_world_readable_groups(
self, with_user, svc, db_session, factories
):
# group readable by members
factories.Group(readable_by=ReadableBy.members)
# group readable by everyone
group = factories.Group(readable_by=ReadableBy.world)
user = None
if with_user:
user = factories.User()
db_session.flush()
assert group.pubid in svc.groupids_readable_by(user)
def test_readable_by_includes_memberships(self, svc, db_session, factories):
user = factories.User()
group = factories.Group(readable_by=ReadableBy.members)
group.members.append(user)
db_session.flush()
assert group.pubid in svc.groupids_readable_by(user)
def test_readable_by_applies_filter(self, svc, db_session, factories):
user = factories.User()
factories.Group(
readable_by=ReadableBy.world
) # Group that shouldn't be returned
group = factories.Group(readable_by=ReadableBy.world)
db_session.flush()
pubids = [group.pubid, "doesnotexist"]
assert svc.groupids_readable_by(user, group_ids=pubids) == [group.pubid]
def test_created_by_includes_created_groups(self, svc, factories):
user = factories.User()
group = factories.Group(creator=user)
assert group.pubid in svc.groupids_created_by(user)
def test_created_by_excludes_other_groups(self, svc, db_session, factories):
user = factories.User()
private_group = factories.Group()
private_group.members.append(user)
factories.Group(readable_by=ReadableBy.world)
db_session.flush()
assert svc.groupids_created_by(user) == []
def test_created_by_returns_empty_list_for_missing_user(self, svc):
assert svc.groupids_created_by(None) == []
@pytest.mark.usefixtures("user_service")
class TestGroupsFactory:
def test_returns_groups_service(self, pyramid_request):
svc = groups_factory(None, pyramid_request)
assert isinstance(svc, GroupService)
def test_provides_request_db_as_session(self, pyramid_request):
svc = groups_factory(None, pyramid_request)
assert svc.session == pyramid_request.db
def test_wraps_user_service_as_user_fetcher(self, pyramid_request, user_service):
svc = groups_factory(None, pyramid_request)
svc.user_fetcher("foo")
user_service.fetch.assert_called_once_with("foo")
@pytest.fixture
def svc(db_session, user_service):
return GroupService(db_session, user_service) |
1,716 | grid | # Copy past from c2cgeoportal_admin.views.layer_wms except:
# - All references to LuxLayerInternalWMS and lux_layer_internal_wms
# - Addition of _list_field('category_id'),
# - renderer path (from "../templates/abc.jinja2" to "./templates/abc.jinja2")
from functools import partial
from pyramid.view import view_defaults
from pyramid.view import view_config
from sqlalchemy import inspect, insert, delete, update
from zope.sqlalchemy import mark_changed
from c2cgeoform.schema import GeoFormSchemaNode
from c2cgeoform.views.abstract_views import ListField, ItemAction
from deform.widget import FormWidget
from c2cgeoportal_commons.models.main import \
LayerWMS, LayerWMTS, OGCServer, LayerGroup, TreeItem
from c2cgeoportal_admin import _
from c2cgeoportal_admin.schemas.dimensions import dimensions_schema_node
from c2cgeoportal_admin.schemas.metadata import metadatas_schema_node
from c2cgeoportal_admin.schemas.interfaces import interfaces_schema_node
from c2cgeoportal_admin.schemas.restriction_areas import restrictionareas_schema_node
from c2cgeoportal_admin.schemas.treeitem import parent_id_node
from c2cgeoportal_admin.views.dimension_layers import DimensionLayerViews
from geoportailv3_geoportal.models import LuxLayerInternalWMS
_list_field = partial(ListField, LuxLayerInternalWMS)
base_schema = GeoFormSchemaNode(LuxLayerInternalWMS, widget=FormWidget(fields_template='layer_fields'))
base_schema.add(dimensions_schema_node.clone())
base_schema.add(metadatas_schema_node.clone())
base_schema.add(interfaces_schema_node.clone())
base_schema.add(restrictionareas_schema_node.clone())
base_schema.add_unique_validator(LuxLayerInternalWMS.name, LuxLayerInternalWMS.id)
base_schema.add(parent_id_node(LayerGroup))
@view_defaults(match_param='table=lux_layer_internal_wms')
class LuxLayerInternalWMSViews(DimensionLayerViews):
_list_fields = DimensionLayerViews._list_fields + [
_list_field('url'),
_list_field('layers'),
_list_field('is_poi'),
_list_field('use_auth'),
_list_field('collection_id'),
_list_field('rest_url'),
_list_field('layer'),
_list_field('style'),
_list_field('time_mode'),
_list_field('time_widget'),
_list_field(
'ogc_server',
renderer=lambda layer_wms: layer_wms.ogc_server.name,
sort_column=OGCServer.name,
filter_column=OGCServer.name)
] + DimensionLayerViews._extra_list_fields
_id_field = 'id'
_model = LuxLayerInternalWMS
_base_schema = base_schema
def _base_query(self):
return super()._base_query(
self._request.dbsession.query(LuxLayerInternalWMS).distinct().
outerjoin('ogc_server'))
@view_config(route_name='c2cgeoform_index',
renderer='./templates/index.jinja2')
def index(self):
return super().index()
@view_config(route_name='c2cgeoform_grid',
renderer='fast_json')
def METHOD_NAME(self):
return super().METHOD_NAME()
def _item_actions(self, item, readonly=False):
actions = super()._item_actions(item, readonly)
if inspect(item).persistent:
actions.insert(next((i for i, v in enumerate(actions) if v.name() == 'delete')), ItemAction(
name='convert_to_wmts',
label=_('Convert to WMTS'),
icon='glyphicon icon-l_wmts',
url=self._request.route_url(
'convert_to_wmts',
id=getattr(item, self._id_field)),
method='POST',
confirmation=_('Are you sure you want to convert this layer to WMTS?')))
return actions
@view_config(route_name='c2cgeoform_item',
request_method='GET',
renderer='./templates/edit.jinja2')
def view(self):
if self._is_new():
dbsession = self._request.dbsession
default_wms = LayerWMS.get_default(dbsession)
if default_wms:
return self.copy(default_wms, excludes=['name', 'layer'])
return super().edit()
@view_config(route_name='c2cgeoform_item',
request_method='POST',
renderer='./templates/edit.jinja2')
def save(self):
return super().save()
@view_config(route_name='c2cgeoform_item',
request_method='DELETE',
renderer='fast_json')
def delete(self):
return super().delete()
@view_config(route_name='c2cgeoform_item_duplicate',
request_method='GET',
renderer='./templates/edit.jinja2')
def duplicate(self):
return super().duplicate()
@view_config(route_name='convert_to_wmts',
request_method='POST',
renderer='fast_json')
def convert_to_wmts(self):
src = self._get_object()
dbsession = self._request.dbsession
default_wmts = LayerWMTS.get_default(dbsession)
values = {
'url': default_wmts.url,
'matrix_set': default_wmts.matrix_set
} if default_wmts else {
'url': '',
'matrix_set': ''
}
with dbsession.no_autoflush:
d = delete(LayerWMS.__table__)
d = d.where(LayerWMS.__table__.c.id == src.id)
i = insert(LayerWMTS.__table__)
values.update({
'id': src.id,
'layer': src.layer,
'image_type': src.ogc_server.image_type,
'style': src.style
})
i = i.values(values)
u = update(TreeItem.__table__)
u = u.where(TreeItem.__table__.c.id == src.id)
u = u.values({'type': 'l_wmts'})
dbsession.execute(d)
dbsession.execute(i)
dbsession.execute(u)
dbsession.expunge(src)
dbsession.flush()
mark_changed(dbsession)
return {
'success': True,
'redirect': self._request.route_url(
'c2cgeoform_item',
table='layers_wmts',
id=self._request.matchdict['id'],
_query=[('msg_col', 'submit_ok')])
} |
1,717 | chatter | #!/usr/bin/env python
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Simple consistency checker for FileStorage.
usage: fstest.py [-v] data.fs
The fstest tool will scan all the data in a FileStorage and report an
error if it finds any corrupt transaction data. The tool will print a
message when the first error is detected, then exit.
The tool accepts one or more -v arguments. If a single -v is used, it
will print a line of text for each transaction record it encounters.
If two -v arguments are used, it will also print a line of text for
each object. The objects for a transaction will be printed before the
transaction itself.
Note: It does not check the consistency of the object pickles. It is
possible for the damage to occur only in the part of the file that
stores object pickles. Those errors will go undetected.
"""
import binascii
import struct
import sys
from ZODB._compat import FILESTORAGE_MAGIC
# The implementation is based closely on the read_index() function in
# ZODB.FileStorage. If anything about the FileStorage layout changes,
# this file will need to be udpated.
class FormatError(ValueError):
"""There is a problem with the format of the FileStorage."""
class Status:
checkpoint = b'c'
undone = b'u'
packed_version = FILESTORAGE_MAGIC
TREC_HDR_LEN = 23
DREC_HDR_LEN = 42
VERBOSE = 0
def hexify(s):
r"""Format an 8-bit string as hex
>>> hexify(b'\x00\xff\xaa\xcc')
'0x00ffaacc'
"""
return '0x' + binascii.hexlify(s).decode()
def METHOD_NAME(msg, level=1):
if VERBOSE >= level:
sys.stdout.write(msg)
def U64(v):
"""Unpack an 8-byte string as a 64-bit long"""
h, l_ = struct.unpack(">II", v)
if h:
return (h << 32) + l_
else:
return l_
def check(path):
with open(path, 'rb') as file:
file.seek(0, 2)
file_size = file.tell()
if file_size == 0:
raise FormatError("empty file")
file.seek(0)
if file.read(4) != packed_version:
raise FormatError("invalid file header")
pos = 4
tid = b'\000' * 8 # lowest possible tid to start
i = 0
while pos:
_pos = pos
pos, tid = check_trec(path, file, pos, tid, file_size)
if tid is not None:
METHOD_NAME("%10d: transaction tid %s #%d \n" %
(_pos, hexify(tid), i))
i = i + 1
def check_trec(path, file, pos, ltid, file_size):
"""Read an individual transaction record from file.
Returns the pos of the next transaction and the transaction id.
It also leaves the file pointer set to pos. The path argument is
used for generating error messages.
"""
h = file.read(TREC_HDR_LEN) # XXX must be bytes
if not h:
return None, None
if len(h) != TREC_HDR_LEN:
raise FormatError("{} truncated at {}".format(path, pos))
tid, stl, status, ul, dl, el = struct.unpack(">8s8scHHH", h)
tmeta_len = TREC_HDR_LEN + ul + dl + el
if tid <= ltid:
raise FormatError("%s time-stamp reduction at %s: %s <= %s" %
(path, pos, hexify(tid), hexify(ltid)))
ltid = tid
tl = U64(stl) # transaction record length - 8
if pos + tl + 8 > file_size:
raise FormatError("%s truncated possibly because of"
" damaged records at %s" % (path, pos))
if status == Status.checkpoint:
raise FormatError("%s checkpoint flag was not cleared at %s"
% (path, pos))
if status not in b' up':
raise FormatError("%s has invalid status '%s' at %s" %
(path, status, pos))
if tmeta_len > tl:
raise FormatError("%s has an invalid transaction header"
" at %s" % (path, pos))
tpos = pos
tend = tpos + tl
if status != Status.undone:
pos = tpos + tmeta_len
file.read(ul + dl + el) # skip transaction metadata
i = 0
while pos < tend:
_pos = pos
pos, oid = check_drec(path, file, pos, tpos, tid)
if pos > tend:
raise FormatError("%s has data records that extend beyond"
" the transaction record; end at %s" %
(path, pos))
METHOD_NAME("%10d: object oid %s #%d\n" % (_pos, hexify(oid), i),
level=2)
i = i + 1
file.seek(tend)
rtl = file.read(8)
if rtl != stl:
raise FormatError("%s has inconsistent transaction length"
" for undone transaction at %s" % (path, pos))
pos = tend + 8
return pos, tid
def check_drec(path, file, pos, tpos, tid):
"""Check a data record for the current transaction record"""
h = file.read(DREC_HDR_LEN)
if len(h) != DREC_HDR_LEN:
raise FormatError("{} truncated at {}".format(path, pos))
oid, serial, _prev, _tloc, vlen, _plen = (
struct.unpack(">8s8s8s8sH8s", h))
U64(_prev)
tloc = U64(_tloc)
plen = U64(_plen)
dlen = DREC_HDR_LEN + (plen or 8)
if vlen:
dlen = dlen + 16 + vlen
file.seek(8, 1)
U64(file.read(8))
file.seek(vlen, 1) # skip the version data
if tloc != tpos:
raise FormatError("%s data record exceeds transaction record "
"at %s: tloc %d != tpos %d" %
(path, pos, tloc, tpos))
pos = pos + dlen
if plen:
file.seek(plen, 1)
else:
file.seek(8, 1)
# _loadBack() ?
return pos, oid
def usage():
sys.exit(__doc__)
def main(args=None):
if args is None:
args = sys.argv[1:]
import getopt
global VERBOSE
try:
opts, args = getopt.getopt(args, 'v')
if len(args) != 1:
raise ValueError("expected one argument")
for k, v in opts:
if k == '-v':
VERBOSE = VERBOSE + 1
except (getopt.error, ValueError):
usage()
try:
check(args[0])
except FormatError as msg:
sys.exit(msg)
METHOD_NAME("no errors detected")
if __name__ == "__main__":
main() |
1,718 | compile consensus statistics | import logging
import warnings
import os
import numpy as np
import pandas as pd
import xarray as xr
try:
import salem
except ImportError:
pass
from oggm import utils, cfg
# Module logger
log = logging.getLogger(__name__)
default_base_url = 'https://cluster.klima.uni-bremen.de/~fmaussion/icevol/composite/'
@utils.entity_task(log, writes=['gridded_data'])
def add_consensus_thickness(gdir, base_url=None):
"""Add the consensus thickness estimate to the gridded_data file.
varname: consensus_ice_thickness
Parameters
----------
gdir ::py:class:`oggm.GlacierDirectory`
the glacier directory to process
base_url : str
where to find the thickness data. Default is
https://cluster.klima.uni-bremen.de/~fmaussion/icevol/composite
"""
if base_url is None:
base_url = default_base_url
if not base_url.endswith('/'):
base_url += '/'
rgi_str = gdir.rgi_id
rgi_reg_str = rgi_str[:8]
url = base_url + rgi_reg_str + '/' + rgi_str + '_thickness.tif'
input_file = utils.file_downloader(url)
dsb = salem.GeoTiff(input_file)
thick = utils.clip_min(dsb.get_vardata(), 0)
in_volume = thick.sum() * dsb.grid.dx ** 2
with warnings.catch_warnings():
# This can trigger an out of bounds warning
warnings.filterwarnings("ignore", category=RuntimeWarning,
message='.*out of bounds.*')
thick = gdir.grid.map_gridded_data(thick, dsb.grid, interp='linear')
# Correct for volume
thick = utils.clip_min(thick.filled(0), 0)
out_volume = thick.sum() * gdir.grid.dx ** 2
if out_volume > 0:
thick *= in_volume / out_volume
# We mask zero ice as nodata
thick = np.where(thick == 0, np.NaN, thick)
# Write
with utils.ncDataset(gdir.get_filepath('gridded_data'), 'a') as nc:
vn = 'consensus_ice_thickness'
if vn in nc.variables:
v = nc.variables[vn]
else:
v = nc.createVariable(vn, 'f4', ('y', 'x', ), zlib=True)
v.units = 'm'
ln = 'Ice thickness from the consensus estimate'
v.long_name = ln
v.base_url = base_url
v[:] = thick
@utils.entity_task(log)
def consensus_statistics(gdir):
"""Gather statistics about the consensus data interpolated to this glacier.
"""
d = dict()
# Easy stats - this should always be possible
d['rgi_id'] = gdir.rgi_id
d['rgi_region'] = gdir.rgi_region
d['rgi_subregion'] = gdir.rgi_subregion
d['rgi_area_km2'] = gdir.rgi_area_km2
d['consensus_vol_km3'] = 0
d['consensus_area_km2'] = 0
d['consensus_perc_cov'] = 0
try:
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
thick = ds['consensus_ice_thickness'].where(ds['glacier_mask'], np.NaN).load()
d['consensus_vol_km3'] = float(thick.sum() * gdir.grid.dx ** 2 * 1e-9)
d['consensus_area_km2'] = float((~thick.isnull()).sum() * gdir.grid.dx ** 2 * 1e-6)
d['consensus_perc_cov'] = float(d['consensus_area_km2'] / gdir.rgi_area_km2)
except (FileNotFoundError, AttributeError, KeyError):
pass
return d
@utils.global_task(log)
def METHOD_NAME(gdirs, filesuffix='', path=True):
"""Gather as much statistics as possible about a list of glaciers.
Parameters
----------
gdirs : list of :py:class:`oggm.GlacierDirectory` objects
the glacier directories to process
filesuffix : str
add suffix to output file
path : str, bool
Set to "True" in order to store the info in the working directory
Set to a path to store the file to your chosen location
"""
from oggm.workflow import execute_entity_task
out_df = execute_entity_task(consensus_statistics, gdirs)
out = pd.DataFrame(out_df).set_index('rgi_id')
if path:
if path is True:
out.to_csv(os.path.join(cfg.PATHS['working_dir'],
('consensus_statistics' +
filesuffix + '.csv')))
else:
out.to_csv(path)
return out |
1,719 | message | import typing as t
if t.TYPE_CHECKING:
from .runtime import Undefined
class TemplateError(Exception):
"""Baseclass for all template errors."""
def __init__(self, METHOD_NAME: t.Optional[str] = None) -> None:
super().__init__(METHOD_NAME)
@property
def METHOD_NAME(self) -> t.Optional[str]:
return self.args[0] if self.args else None
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist.
.. versionchanged:: 2.11
If the given name is :class:`Undefined` and no message was
provided, an :exc:`UndefinedError` is raised.
"""
# Silence the Python warning about message being deprecated since
# it's not valid here.
METHOD_NAME: t.Optional[str] = None
def __init__(
self,
name: t.Optional[t.Union[str, "Undefined"]],
METHOD_NAME: t.Optional[str] = None,
) -> None:
IOError.__init__(self, name)
if METHOD_NAME is None:
from .runtime import Undefined
if isinstance(name, Undefined):
name._fail_with_undefined_error()
METHOD_NAME = name
self.METHOD_NAME = METHOD_NAME
self.name = name
self.templates = [name]
def __str__(self) -> str:
return str(self.METHOD_NAME)
class TemplatesNotFound(TemplateNotFound):
"""Like :class:`TemplateNotFound` but raised if multiple templates
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
.. versionchanged:: 2.11
If a name in the list of names is :class:`Undefined`, a message
about it being undefined is shown rather than the empty string.
.. versionadded:: 2.2
"""
def __init__(
self,
names: t.Sequence[t.Union[str, "Undefined"]] = (),
METHOD_NAME: t.Optional[str] = None,
) -> None:
if METHOD_NAME is None:
from .runtime import Undefined
parts = []
for name in names:
if isinstance(name, Undefined):
parts.append(name._undefined_message)
else:
parts.append(name)
parts_str = ", ".join(map(str, parts))
METHOD_NAME = f"none of the templates given were found: {parts_str}"
super().__init__(names[-1] if names else None, METHOD_NAME)
self.templates = list(names)
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(
self,
METHOD_NAME: str,
lineno: int,
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
) -> None:
super().__init__(METHOD_NAME)
self.lineno = lineno
self.name = name
self.filename = filename
self.source: t.Optional[str] = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self) -> str:
# for translated errors we only return the message
if self.translated:
return t.cast(str, self.METHOD_NAME)
# otherwise attach some stuff
location = f"line {self.lineno}"
name = self.filename or self.name
if name:
location = f'File "{name}", {location}'
lines = [t.cast(str, self.METHOD_NAME), " " + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
pass
else:
lines.append(" " + line.strip())
return "\n".join(lines)
def __reduce__(self): # type: ignore
# https://bugs.python.org/issue1692335 Exceptions that take
# multiple required arguments have problems with pickling.
# Without this, raises TypeError: __init__() missing 1 required
# positional argument: 'lineno'
return self.__class__, (self.METHOD_NAME, self.lineno, self.name, self.filename)
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
by a syntax error. However it's a direct subclass of
:exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
"""A generic runtime error in the template engine. Under some situations
Jinja may raise this exception.
"""
class UndefinedError(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
class SecurityError(TemplateRuntimeError):
"""Raised if a template tries to do something insecure if the
sandbox is enabled.
"""
class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
""" |
1,720 | plot for energies | import matplotlib.pyplot as plt
import numpy as np
from qaml_scripts.evolution import perform_adiabatic
import qibo
def train_adiabatic_evolution(
nsteps,
xarr,
cdf,
training_n,
init_params,
e0,
e1,
target_loss,
finalT,
h0,
h1,
obs_target,
):
"""Train the adiabatic evolution to fit a target empirical CDF"""
# --------------------------- PLOTTING FUNCTION -----------------------------------------------
def METHOD_NAME(parameters, label="", true_law=None, title=""):
"""Plot energies, training points and CDF for a set of energies given a set of parameters"""
energies = perform_adiabatic(
params=parameters,
finalT=finalT,
h0=h0,
h1=h1,
obs_target=obs_target,
)
plt.title(title)
plt.plot(xarr, -np.array(cdf), label="eCDF", color="black", lw=1, ls="--")
plt.plot(
xarr, -np.array(energies), label=label, color="purple", lw=2, alpha=0.8
)
plt.plot(
xarr[idx_training],
-np.array(cdf_training),
"o",
label="Training points",
color="orange",
alpha=0.85,
markersize=8,
)
if true_law != None:
plt.plot(xarr, true_law, c="orange", lw=1, ls="--")
plt.xlabel("x")
plt.ylabel("cdf")
plt.legend()
plt.show()
# ----------------------------- LOSS FUNCTION ---------------------------------------------
def loss_evaluation(params, penalty=True):
"""Evaluating loss function related to the cdf fit"""
# Retrieve the energy per time step for this set of parameters
energies = perform_adiabatic(
params=params,
finalT=finalT,
h0=h0,
h1=h1,
obs_target=obs_target,
)
# Select the points we are training on
e_train = energies[idx_training]
loss = np.mean((e_train - cdf_training) ** 2 / norm_cdf)
if penalty:
# Penalty term for negative derivative
delta_energy = good_direction * np.diff(energies)
# Remove non-monotonous values
delta_energy *= delta_energy < 0
pos_penalty = np.abs(np.sum(delta_energy))
val_loss = loss
loss = val_loss + pos_penalty
return loss
# ------------------------------ GENETIC ALGORITHM CALL --------------------------------------------------
def optimize(
force_positive=False,
target=5e-2,
max_iterations=50000,
max_evals=500000,
initial_p=None,
):
"""Use Qibo to optimize the parameters of the schedule function"""
options = {
"verbose": -1,
"tolfun": 1e-12,
"ftarget": target, # Target error
"maxiter": max_iterations, # Maximum number of iterations
"maxfeval": max_evals, # Maximum number of function evaluations
"maxstd": 20,
}
if force_positive:
options["bounds"] = [0, 1e5]
if initial_p is None:
initial_p = initial_p
else:
print("Reusing previous best parameters")
result = qibo.optimizers.optimize(
loss_evaluation, initial_p, method="cma", options=options
)
return result, result[1]
# ------------------------------ BUILD TRAINING SET AND OPTIMIZE! --------------------------------
# Definition of the loss function and optimization routine
good_direction = 1 if (e1 - e0) > 0 else -1
# But select those for which the difference between them is greater than some threshold
min_step = 1e-3
# but never go more than max_skip points without selecting one
max_skip = 0
if training_n > nsteps:
raise Exception("The notebook cannot run with nsteps < training_n")
# Select a subset of points for training, but skip first and include last
idx_training_raw = np.linspace(0, nsteps, num=training_n, endpoint=True, dtype=int)[
1:
]
# And, from this subset, remove those that do not add that much info
idx_training = []
cval = cdf[0]
nskip = 0
for p in idx_training_raw[:-2]:
diff = cval - cdf[p]
if diff > min_step or nskip > max_skip:
nskip = 0
idx_training.append(p)
cval = cdf[p]
else:
nskip += 1
idx_training.append(idx_training_raw[-1])
cdf_training = cdf[idx_training]
norm_cdf = np.abs(
cdf_training
) # To normalize the points according to their absolute value
# Definition of the loss function and optimization routine
good_direction = 1 if (e1 - e0) > 0 else -1
# Fit before training
METHOD_NAME(init_params, label="Initial state", title="Not trained evolution")
print(f"Training on {len(idx_training)} points of the total of {nsteps}")
_, best_params = optimize(
target=target_loss, force_positive=False, initial_p=init_params
)
# Fit after training
METHOD_NAME(best_params, label="Initial state", title="Trained evolution")
return best_params |
1,721 | compare | # This file is part of Ansible
# -*- coding: utf-8 -*-
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2016, Adrian Likins <alikins@redhat.com>
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native
from ansible.parsing import vault
from ansible.parsing.yaml.loader import AnsibleLoader
# module under test
from ansible.parsing.yaml import objects
from units.mock.yaml_helper import YamlTestUtils
from units.mock.vault_helper import TextVaultSecret
class TestAnsibleVaultUnicodeNoVault(unittest.TestCase, YamlTestUtils):
def test_empty_init(self):
self.assertRaises(TypeError, objects.AnsibleVaultEncryptedUnicode)
def test_empty_string_init(self):
seq = ''.encode('utf8')
self.assert_values(seq)
def test_empty_byte_string_init(self):
seq = b''
self.assert_values(seq)
def _assert_values(self, avu, seq):
self.assertIsInstance(avu, objects.AnsibleVaultEncryptedUnicode)
self.assertTrue(avu.vault is None)
# AnsibleVaultEncryptedUnicode without a vault should never == any string
self.assertNotEqual(avu, seq)
def assert_values(self, seq):
avu = objects.AnsibleVaultEncryptedUnicode(seq)
self._assert_values(avu, seq)
def test_single_char(self):
seq = 'a'.encode('utf8')
self.assert_values(seq)
def test_string(self):
seq = 'some letters'
self.assert_values(seq)
def test_byte_string(self):
seq = 'some letters'.encode('utf8')
self.assert_values(seq)
class TestAnsibleVaultEncryptedUnicode(unittest.TestCase, YamlTestUtils):
def setUp(self):
self.good_vault_password = "hunter42"
good_vault_secret = TextVaultSecret(self.good_vault_password)
self.good_vault_secrets = [('good_vault_password', good_vault_secret)]
self.good_vault = vault.VaultLib(self.good_vault_secrets)
# TODO: make this use two vault secret identities instead of two vaultSecrets
self.wrong_vault_password = 'not-hunter42'
wrong_vault_secret = TextVaultSecret(self.wrong_vault_password)
self.wrong_vault_secrets = [('wrong_vault_password', wrong_vault_secret)]
self.wrong_vault = vault.VaultLib(self.wrong_vault_secrets)
self.vault = self.good_vault
self.vault_secrets = self.good_vault_secrets
def _loader(self, stream):
return AnsibleLoader(stream, vault_secrets=self.vault_secrets)
def test_dump_load_cycle(self):
aveu = self._from_plaintext('the test string for TestAnsibleVaultEncryptedUnicode.test_dump_load_cycle')
self._dump_load_cycle(aveu)
def assert_values(self, avu, seq):
self.assertIsInstance(avu, objects.AnsibleVaultEncryptedUnicode)
self.assertEqual(avu, seq)
self.assertTrue(avu.vault is self.vault)
self.assertIsInstance(avu.vault, vault.VaultLib)
def _from_plaintext(self, seq):
id_secret = vault.match_encrypt_secret(self.good_vault_secrets)
return objects.AnsibleVaultEncryptedUnicode.from_plaintext(seq, vault=self.vault, secret=id_secret[1])
def test_empty_init(self):
self.assertRaises(TypeError, objects.AnsibleVaultEncryptedUnicode)
def test_empty_string_init_from_plaintext(self):
seq = ''
avu = self._from_plaintext(seq)
self.assert_values(avu, seq)
def test_empty_unicode_init_from_plaintext(self):
seq = u''
avu = self._from_plaintext(seq)
self.assert_values(avu, seq)
def test_string_from_plaintext(self):
seq = 'some letters'
avu = self._from_plaintext(seq)
self.assert_values(avu, seq)
def test_unicode_from_plaintext(self):
seq = u'some letters'
avu = self._from_plaintext(seq)
self.assert_values(avu, seq)
def test_unicode_from_plaintext_encode(self):
seq = u'some text here'
avu = self._from_plaintext(seq)
b_avu = avu.encode('utf-8', 'strict')
self.assertIsInstance(avu, objects.AnsibleVaultEncryptedUnicode)
self.assertEqual(b_avu, seq.encode('utf-8', 'strict'))
self.assertTrue(avu.vault is self.vault)
self.assertIsInstance(avu.vault, vault.VaultLib)
# TODO/FIXME: make sure bad password fails differently than 'thats not encrypted'
def test_empty_string_wrong_password(self):
seq = ''
self.vault = self.wrong_vault
avu = self._from_plaintext(seq)
def METHOD_NAME(avu, seq):
return avu == seq
self.assertRaises(AnsibleError, METHOD_NAME, avu, seq)
def test_vaulted_utf8_value_37258(self):
seq = u"aöffü"
avu = self._from_plaintext(seq)
self.assert_values(avu, seq)
def test_str_vaulted_utf8_value_37258(self):
seq = u"aöffü"
avu = self._from_plaintext(seq)
assert str(avu) == to_native(seq) |
1,722 | get tokens of interest |
from vsg import parser
from vsg import violation
from vsg.vhdlFile import utils
from vsg.rule_group import whitespace
from vsg.rules import utils as rules_utils
class spaces_before_and_after_tokens_when_bounded_by_tokens(whitespace.Rule):
'''
Checks for a single space between two tokens.
Parameters
----------
name : string
The group the rule belongs to.
identifier : string
unique identifier. Usually in the form of 00N.
lTokens : list of token type pairs
The tokens to check for a single space between
'''
def __init__(self, name, identifier, lTokens, lBetween):
whitespace.Rule.__init__(self, name=name, identifier=identifier)
self.lTokens = lTokens
self.spaces_before = 1
self.configuration.append('spaces_before')
self.spaces_after = 4
self.configuration.append('spaces_after')
self.lBetween = lBetween
self.nTokens = 2
def METHOD_NAME(self, oFile):
return oFile.get_n_tokens_before_and_after_tokens_bounded_by_tokens(self.nTokens, self.lTokens, self.lBetween)
def _analyze(self, lToi):
for oToi in lToi:
fStartLine = rules_utils.token_list_is_the_beginning_of_a_line(oToi.get_tokens())
myToi = oToi.extract_tokens(1, 3)
iLine, lTokens = utils.get_toi_parameters(myToi)
dAction = {}
if not fStartLine:
check_spaces_on_left_side(lTokens, dAction, self.spaces_before)
check_spaces_on_right_side(lTokens, dAction, self.spaces_after)
if violations_found(dAction):
sSolution = create_solution_text(dAction, self.spaces_before, self.spaces_after, lTokens)
oViolation = violation.New(iLine, myToi, sSolution)
oViolation.set_action(dAction)
self.add_violation(oViolation)
def _fix_violation(self, oViolation):
lTokens = oViolation.get_tokens()
dAction = oViolation.get_action()
fix_left_violations(self, dAction, lTokens)
fix_right_violations(self, dAction, lTokens)
oViolation.set_tokens(lTokens)
def fix_left_violations(self, dAction, lTokens):
if not left_action_exists(dAction):
return
if dAction['left']['action'] == 'adjust':
lTokens[0].set_value(' '*self.spaces_before)
elif dAction['left']['action'] == 'remove':
lTokens.pop(0)
else:
rules_utils.insert_whitespace(lTokens, self.spaces_before)
def fix_right_violations(self, dAction, lTokens):
if not right_action_exists(dAction):
return
if dAction['right']['action'] == 'adjust':
lTokens[-1].set_value(' '*self.spaces_after)
else:
rules_utils.insert_whitespace(lTokens, len(lTokens) - self.spaces_after)
def right_action_exists(dAction):
if 'right' in list(dAction.keys()):
return True
return False
def left_action_exists(dAction):
if 'left' in list(dAction.keys()):
return True
return False
def create_solution_text(dAction, iNumSpacesBefore, iNumSpacesAfter, lTokens):
sReturn = ''
sReturn += create_left_solution(dAction, iNumSpacesBefore, lTokens)
sReturn += create_right_solution(dAction, iNumSpacesAfter, lTokens)
return sReturn
def create_left_solution(dAction, iNumSpaces, lTokens):
sReturn = ''
if left_action_exists(dAction):
sReturn = create_solution(dAction, 'left', iNumSpaces, lTokens)
return sReturn
def create_right_solution(dAction, iNumSpaces, lTokens):
sReturn = ''
if right_action_exists(dAction):
sReturn = create_solution(dAction, 'right', iNumSpaces, lTokens)
return sReturn
def create_solution(dAction, sKey, iNumSpaces, lTokens):
sSide = dAction[sKey]['side']
sTokenValue = lTokens[1].get_value()
if dAction[sKey]['action'] == 'adjust':
sReturn = f'Change number of spaces {sSide} *{sTokenValue}* to {iNumSpaces}. '
elif dAction[sKey]['action'] == 'remove':
sReturn = f'Remove all space(s) {sSide} *{sTokenValue}*. '
else:
sReturn = f'Add {iNumSpaces} space(s) {sSide} *{sTokenValue}*. '
return sReturn.strip()
def check_spaces_on_left_side(lTokens, dAction, iSpaces):
check_for_adjustment_of_existing_whitespace(lTokens, dAction, iSpaces)
check_for_removal_of_existing_whitespace(lTokens, dAction, iSpaces)
check_for_insertion_of_missing_whitespace(lTokens, dAction, iSpaces)
def check_for_adjustment_of_existing_whitespace(lTokens, dAction, iSpaces):
oLeft = lTokens[0]
if isinstance(oLeft, parser.whitespace) and iSpaces > 0:
set_adjust_action('left', oLeft, dAction, iSpaces)
def check_for_removal_of_existing_whitespace(lTokens, dAction, iSpaces):
oLeft = lTokens[0]
if isinstance(oLeft, parser.whitespace) and iSpaces == 0:
set_remove_action('left', dAction)
def check_for_insertion_of_missing_whitespace(lTokens, dAction, iSpaces):
oLeft = lTokens[0]
if not isinstance(oLeft, parser.whitespace) and iSpaces > 0:
set_insert_action('left', dAction)
def check_spaces_on_right_side(lTokens, dAction, iSpaces):
oRight = lTokens[-1]
if isinstance(oRight, parser.whitespace):
set_adjust_action('right', oRight, dAction, iSpaces)
else:
set_insert_action('right', dAction)
def set_adjust_action(sSide, oToken, dAction, iSpaces):
if iSpaces != len(oToken.get_value()):
dAction[sSide] = {}
dAction[sSide]['action'] = 'adjust'
set_side_of_action(sSide, dAction)
def set_remove_action(sSide, dAction):
dAction[sSide] = {}
dAction[sSide]['action'] = 'remove'
set_side_of_action(sSide, dAction)
def set_insert_action(sSide, dAction):
dAction[sSide] = {}
dAction[sSide]['action'] = 'insert'
set_side_of_action(sSide, dAction)
def set_side_of_action(sSide, dAction):
if sSide == 'right':
dAction[sSide]['side'] = 'after'
else:
dAction[sSide]['side'] = 'before'
def violations_found(dAction):
if len(list(dAction.keys())) > 0:
return True
return False |
1,723 | is stat in shape of scale | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Type
import numpy as np
import openvino.runtime as ov
import pytest
from openvino.runtime import opset9 as opset
from nncf import Dataset
from nncf.common.graph.transformations.commands import TargetPoint
from nncf.common.graph.transformations.commands import TargetType
from nncf.experimental.common.tensor_statistics.collectors import TensorReducerBase
from nncf.openvino.graph.transformations.commands import OVTargetPoint
from nncf.openvino.statistics.aggregator import OVStatisticsAggregator
from nncf.openvino.statistics.collectors import OV_REDUCERS_MAP
from nncf.openvino.statistics.collectors import OVBatchMeanReducer
from nncf.openvino.statistics.collectors import OVMeanPerChanelReducer
from nncf.quantization.algorithms.bias_correction.openvino_backend import OVBiasCorrectionAlgoBackend
from nncf.quantization.algorithms.fast_bias_correction.openvino_backend import OVFastBiasCorrectionAlgoBackend
from nncf.quantization.algorithms.min_max.openvino_backend import OVMinMaxAlgoBackend
from tests.common.test_statistics_aggregator import TemplateTestStatisticsAggregator
from tests.openvino.native.models import SharedConvModel
from tests.openvino.native.models import SplitConcatModel
INPUT_NAME = "Input"
CONV_NODE_NAME = "Conv1"
INPUT_SHAPE = [1, 3, 3, 3]
def get_StatisticAgregatorTestModel(input_shape, kernel):
input_1 = opset.parameter(input_shape, name=INPUT_NAME)
strides = [1, 1]
pads = [0, 0]
dilations = [1, 1]
conv = opset.convolution(input_1, kernel.astype(np.float32), strides, pads, pads, dilations, name=CONV_NODE_NAME)
result = opset.result(conv, name="Result")
model = ov.Model([result], [input_1])
return model
class TestStatisticsAggregator(TemplateTestStatisticsAggregator):
def get_min_max_algo_backend_cls(self) -> Type[OVMinMaxAlgoBackend]:
return OVMinMaxAlgoBackend
def get_bias_correction_algo_backend_cls(self) -> Type[OVBiasCorrectionAlgoBackend]:
return OVBiasCorrectionAlgoBackend
def get_fast_bias_correction_algo_backend_cls(self) -> Type[OVFastBiasCorrectionAlgoBackend]:
return OVFastBiasCorrectionAlgoBackend
def get_backend_model(self, dataset_samples):
sample = dataset_samples[0].reshape(INPUT_SHAPE[1:])
conv_w = self.dataset_samples_to_conv_w(sample)
return get_StatisticAgregatorTestModel(INPUT_SHAPE, conv_w)
@pytest.fixture(scope="session")
def test_params(self):
return {
"test_statistic_merging": {
"split_concat": {"model": self._get_split_concat_backend_model},
"shared_conv": {"model": self._get_shared_conv_model},
}
}
def get_statistics_aggregator(self, dataset):
return OVStatisticsAggregator(dataset)
def get_target_point_cls(self):
return OVTargetPoint
def get_dataset(self, samples):
return Dataset(samples, lambda data: {INPUT_NAME: data})
def get_target_point(self, target_type: TargetType) -> TargetPoint:
target_node_name = INPUT_NAME
port_id = 0
if target_type == TargetType.OPERATION_WITH_WEIGHTS:
target_node_name = CONV_NODE_NAME
port_id = 1
if target_type == TargetType.PRE_LAYER_OPERATION:
target_node_name = CONV_NODE_NAME
return OVTargetPoint(target_type, target_node_name, port_id)
@pytest.fixture
def dataset_samples(self, dataset_values):
input_shape = INPUT_SHAPE
dataset_samples = [np.zeros(input_shape), np.ones(input_shape)]
for i, value in enumerate(dataset_values):
dataset_samples[0][0, i, 0, 0] = value["max"]
dataset_samples[0][0, i, 0, 1] = value["min"]
return dataset_samples
@pytest.fixture
def METHOD_NAME(self) -> bool:
return True
@pytest.fixture
def is_backend_support_custom_estimators(self) -> bool:
return True
@pytest.fixture(params=[True, False], ids=["inplace", "out_of_place"])
def inplace_statistics(self, request) -> bool:
return request.param
def _get_split_concat_backend_model(self, dataset_samples):
return SplitConcatModel(input_name=INPUT_NAME).ov_model
def _get_shared_conv_model(self, dataset_samples):
sample = dataset_samples[0].reshape(INPUT_SHAPE[1:])
conv_w = self.dataset_samples_to_conv_w(sample)
return SharedConvModel(input_name=INPUT_NAME, input_shape=INPUT_SHAPE, kernel=conv_w).ov_model
def reducers_map(self) -> List[TensorReducerBase]:
map_ = OV_REDUCERS_MAP.copy()
map_.update({"batch_mean": OVBatchMeanReducer, "mean_per_ch": OVMeanPerChanelReducer})
return map_ |
1,724 | v12 | """Configuration for EcalHexReadout"""
class EcalGeometry() :
"""Configuration for EcalHexReadout for a specific geometry
Attributes
----------
layerZPositions : float
z-coordinates of sensitive ecal layers relative to front of ecal [mm]
ecalFrontZ : float
z-coordinate of front of ecal plane [mm]
gap : float
Distance separating module edges [mm]
cornersSideUp : bool
Are the corners of the modules pointed up? (or are the flat sides?)
layer_shift_x : float
Shift in x [mm] for the layers that are shifted
layer_shift_y : float
Shift in y [mm] for the layers that are shifted
layer_shift_odd : bool
Shift the odd-numbered layers
layer_shift_odd_bilayer : bool
Shift the odd-numbered bi-layers
detectors_valid : array of strings
Regular expressions identifying which detectors are valid for this geometry
moduleMinR : float
Module hexagon center-to-flat radius [mm]
UNLIKELY TO CHANGE - will only change if the CMS HGCAL High-Density design changes
nCellRHeight : float
Number of cell sides (center-to-corner radii) spanning the module height
UNLIKELY TO CHANGE - will only change if the CMS HGCAL High-Density design changes
"""
def __init__(self,
layerZPositions,
ecalFrontZ,
gap,
cornersSideUp,
detectors_valid,
layer_shift_x = 0.,
layer_shift_y = 0.,
layer_shift_odd = False,
layer_shift_odd_bilayer = False,
nCellRHeight = 35.3,
moduleMinR = 85.0) :
# parameters that must align with the geometry
self.layerZPositions = layerZPositions
self.ecalFrontZ = ecalFrontZ
self.gap = gap
self.cornersSideUp = cornersSideUp
self.layer_shift_x = layer_shift_x
self.layer_shift_y = layer_shift_y
self.layer_shift_odd = layer_shift_odd
self.layer_shift_odd_bilayer = layer_shift_odd_bilayer
self.moduleMinR = moduleMinR
self.detectors_valid = detectors_valid
# parameters which are somewhat independent of GDML
self.nCellRHeight = nCellRHeight
self.verbose = 0
def __str__(self) :
"""Stringify this configuration class"""
s = 'EcalGeometry { Module Gap: %.1f mm, Module Radius: %.1f mm, N Cell Sides Spanning Height: %.1f }' % (
self.gap , self.moduleMinR , self.nCellRHeight )
return s
def v9() :
return EcalGeometry(detectors_valid = ["ldmx-det-v9","ldmx-det-v10","ldmx-det-v11"],
gap = 0.0,
layerZPositions = [
4.550, 7.300, 13.800, 18.200, 26.050, 31.950, 41.050, 47.450, 56.550, 62.950,
72.050, 78.450, 87.550, 93.950, 103.050, 109.450, 118.550, 124.950, 134.050,
140.450, 149.550, 155.950, 165.050, 171.450, 184.050, 193.950, 206.550, 216.450,
229.050, 238.950, 251.550, 261.450, 274.050, 283.950
],
ecalFrontZ = 200.0,
cornersSideUp = False,
)
def METHOD_NAME() :
return EcalGeometry(detectors_valid = ["ldmx-det-v12","ldmx-det-v12[.].*"],
gap = 1.5,
layerZPositions = [
7.850, 13.300, 26.400, 33.500, 47.950, 56.550, 72.250, 81.350, 97.050, 106.150,
121.850, 130.950, 146.650, 155.750, 171.450, 180.550, 196.250, 205.350, 221.050,
230.150, 245.850, 254.950, 270.650, 279.750, 298.950, 311.550, 330.750, 343.350,
362.550, 375.150, 394.350, 406.950, 426.150, 438.750
],
ecalFrontZ = 240.5,
cornersSideUp = False,
)
def v13() :
return EcalGeometry(detectors_valid = ["ldmx-det-v13","ldmx-det-v13[.].*"],
gap = 1.5,
layerZPositions = [
7.850, 13.300, 26.400, 33.500, 47.950, 56.550, 72.250, 81.350, 97.050, 106.150,
121.850, 130.950, 146.650, 155.750, 171.450, 180.550, 196.250, 205.350, 221.050,
230.150, 245.850, 254.950, 270.650, 279.750, 298.950, 311.550, 330.750, 343.350,
362.550, 375.150, 394.350, 406.950, 426.150, 438.750
],
ecalFrontZ = 240.5,
cornersSideUp = True,
)
def v14() :
eg = EcalGeometry(detectors_valid = ["ldmx-det-v14","ldmx-det-v14[.].*"],
gap = 1.5,
layerZPositions = [
7.932, 14.532, 32.146, 40.746, 58.110, 67.710, 86.574, 96.774, 115.638, 125.838,
144.702, 154.902, 173.766, 183.966, 202.830, 213.030, 231.894, 242.094, 260.958,
271.158, 290.022, 300.222, 319.086, 329.286, 351.650, 365.250, 387.614, 401.214,
423.578, 437.178, 459.542, 473.142, 495.506, 509.106
],
ecalFrontZ = 240.0,
cornersSideUp = True,
layer_shift_odd = True,
)
# shift by a single cell diameter
eg.layer_shift_x = 2*eg.moduleMinR / eg.nCellRHeight
return eg
def geometries() :
return [EcalGeometry.v9(), EcalGeometry.METHOD_NAME(), EcalGeometry.v13(), EcalGeometry.v14()] |
1,725 | handle error | from authlib.common.urls import (
url_decode,
add_params_to_uri,
urlparse,
)
from authlib.common.encoding import json_loads
from .rfc5849 import (
SIGNATURE_HMAC_SHA1,
SIGNATURE_TYPE_HEADER,
ClientAuth,
)
class OAuth1Client:
auth_class = ClientAuth
def __init__(self, session, client_id, client_secret=None,
token=None, token_secret=None,
redirect_uri=None, rsa_key=None, verifier=None,
signature_method=SIGNATURE_HMAC_SHA1,
signature_type=SIGNATURE_TYPE_HEADER,
force_include_body=False, realm=None, **kwargs):
if not client_id:
raise ValueError('Missing "client_id"')
self.session = session
self.auth = self.auth_class(
client_id, client_secret=client_secret,
token=token, token_secret=token_secret,
redirect_uri=redirect_uri,
signature_method=signature_method,
signature_type=signature_type,
rsa_key=rsa_key,
verifier=verifier,
realm=realm,
force_include_body=force_include_body
)
self._kwargs = kwargs
@property
def redirect_uri(self):
return self.auth.redirect_uri
@redirect_uri.setter
def redirect_uri(self, uri):
self.auth.redirect_uri = uri
@property
def token(self):
return dict(
oauth_token=self.auth.token,
oauth_token_secret=self.auth.token_secret,
oauth_verifier=self.auth.verifier
)
@token.setter
def token(self, token):
"""This token setter is designed for an easy integration for
OAuthClient. Make sure both OAuth1Session and OAuth2Session
have token setters.
"""
if token is None:
self.auth.token = None
self.auth.token_secret = None
self.auth.verifier = None
elif 'oauth_token' in token:
self.auth.token = token['oauth_token']
if 'oauth_token_secret' in token:
self.auth.token_secret = token['oauth_token_secret']
if 'oauth_verifier' in token:
self.auth.verifier = token['oauth_verifier']
else:
message = f'oauth_token is missing: {token!r}'
self.METHOD_NAME('missing_token', message)
def create_authorization_url(self, url, request_token=None, **kwargs):
"""Create an authorization URL by appending request_token and optional
kwargs to url.
This is the second step in the OAuth 1 workflow. The user should be
redirected to this authorization URL, grant access to you, and then
be redirected back to you. The redirection back can either be specified
during client registration or by supplying a callback URI per request.
:param url: The authorization endpoint URL.
:param request_token: The previously obtained request token.
:param kwargs: Optional parameters to append to the URL.
:returns: The authorization URL with new parameters embedded.
"""
kwargs['oauth_token'] = request_token or self.auth.token
if self.auth.redirect_uri:
kwargs['oauth_callback'] = self.auth.redirect_uri
return add_params_to_uri(url, kwargs.items())
def fetch_request_token(self, url, **kwargs):
"""Method for fetching an access token from the token endpoint.
This is the first step in the OAuth 1 workflow. A request token is
obtained by making a signed post request to url. The token is then
parsed from the application/x-www-form-urlencoded response and ready
to be used to construct an authorization url.
:param url: Request Token endpoint.
:param kwargs: Extra parameters to include for fetching token.
:return: A Request Token dict.
"""
return self._fetch_token(url, **kwargs)
def fetch_access_token(self, url, verifier=None, **kwargs):
"""Method for fetching an access token from the token endpoint.
This is the final step in the OAuth 1 workflow. An access token is
obtained using all previously obtained credentials, including the
verifier from the authorization step.
:param url: Access Token endpoint.
:param verifier: A verifier string to prove authorization was granted.
:param kwargs: Extra parameters to include for fetching access token.
:return: A token dict.
"""
if verifier:
self.auth.verifier = verifier
if not self.auth.verifier:
self.METHOD_NAME('missing_verifier', 'Missing "verifier" value')
return self._fetch_token(url, **kwargs)
def parse_authorization_response(self, url):
"""Extract parameters from the post authorization redirect
response URL.
:param url: The full URL that resulted from the user being redirected
back from the OAuth provider to you, the client.
:returns: A dict of parameters extracted from the URL.
"""
token = dict(url_decode(urlparse.urlparse(url).query))
self.token = token
return token
def _fetch_token(self, url, **kwargs):
resp = self.session.post(url, auth=self.auth, **kwargs)
token = self.parse_response_token(resp.status_code, resp.text)
self.token = token
self.auth.verifier = None
return token
def parse_response_token(self, status_code, text):
if status_code >= 400:
message = (
"Token request failed with code {}, "
"response was '{}'."
).format(status_code, text)
self.METHOD_NAME('fetch_token_denied', message)
try:
text = text.strip()
if text.startswith('{'):
token = json_loads(text)
else:
token = dict(url_decode(text))
except (TypeError, ValueError) as e:
error = (
"Unable to decode token from token response. "
"This is commonly caused by an unsuccessful request where"
" a non urlencoded error message is returned. "
"The decoding error was {}"
).format(e)
raise ValueError(error)
return token
@staticmethod
def METHOD_NAME(error_type, error_description):
raise ValueError(f'{error_type}: {error_description}') |
1,726 | write test nested key json dataset | # -*- coding: utf-8 -*-
import json
import logging
import os
import shutil
import subprocess
import tempfile
from urllib.error import HTTPError
import torch # noqa: F401
from torch.testing._internal.common_utils import TestCase
logger = logging.getLogger(__name__)
def third_party_download(test_func):
def inner(*args, **kwargs):
try:
return test_func(*args, **kwargs)
except HTTPError as e:
logger.warning(f"Cannot access URL in {test_func.__name__}. Error message {e}")
return inner
class TorchtextTestCase(TestCase):
def setUp(self) -> None:
logging.basicConfig(format=("%(asctime)s - %(levelname)s - " "%(name)s - %(message)s"), level=logging.INFO)
# Directory where everything temporary and test-related is written
self.project_root = os.path.abspath(
os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir))
)
self.test_dir = tempfile.mkdtemp()
self.test_ppid_dataset_path = os.path.join(self.test_dir, "test_ppid_dataset")
self.test_numerical_features_dataset_path = os.path.join(self.test_dir, "test_numerical_features_dataset")
self.test_newline_dataset_path = os.path.join(self.test_dir, "test_newline_dataset")
self.test_has_header_dataset_path = os.path.join(self.test_dir, "test_has_header_dataset")
self.test_missing_field_dataset_path = os.path.join(self.test_dir, "test_msg_field_dst")
self.test_dataset_splitting_path = os.path.join(self.test_dir, "test_dataset_split")
self.test_nested_key_json_dataset_path = os.path.join(self.test_dir, "test_nested_key_json")
def tearDown(self) -> None:
try:
shutil.rmtree(self.test_dir)
except:
subprocess.call(["rm", "-rf", self.test_dir])
def write_test_ppid_dataset(self, data_format="csv"):
data_format = data_format.lower()
if data_format == "csv":
delim = ","
elif data_format == "tsv":
delim = "\t"
dict_dataset = [
{
"id": "0",
"question1": "When do you use シ instead of し?",
"question2": 'When do you use "&" instead of "and"?',
"label": "0",
},
{
"id": "1",
"question1": "Where was Lincoln born?",
"question2": "Which location was Abraham Lincoln born?",
"label": "1",
},
{"id": "2", "question1": "What is 2+2", "question2": "2+2=?", "label": "1"},
]
with open(self.test_ppid_dataset_path, "w", encoding="utf-8") as test_ppid_dataset_file:
for example in dict_dataset:
if data_format == "json":
test_ppid_dataset_file.write(json.dumps(example) + "\n")
elif data_format == "csv" or data_format == "tsv":
test_ppid_dataset_file.write(
"{}\n".format(
delim.join([example["id"], example["question1"], example["question2"], example["label"]])
)
)
else:
raise ValueError("Invalid format {}".format(data_format))
def METHOD_NAME(self) -> None:
"""
Used only to test nested key parsing of Example.fromJSON()
"""
dict_dataset = [
{"foods": {"fruits": ["Apple", "Banana"], "vegetables": [{"name": "Broccoli"}, {"name": "Cabbage"}]}},
{
"foods": {
"fruits": ["Cherry", "Grape", "Lemon"],
"vegetables": [{"name": "Cucumber"}, {"name": "Lettuce"}],
}
},
{
"foods": {
"fruits": ["Orange", "Pear", "Strawberry"],
"vegetables": [{"name": "Marrow"}, {"name": "Spinach"}],
}
},
]
with open(self.test_nested_key_json_dataset_path, "w") as test_nested_key_json_dataset_file:
for example in dict_dataset:
test_nested_key_json_dataset_file.write(json.dumps(example) + "\n")
def write_test_numerical_features_dataset(self) -> None:
with open(self.test_numerical_features_dataset_path, "w") as test_numerical_features_dataset_file:
test_numerical_features_dataset_file.write("0.1\t1\tteststring1\n")
test_numerical_features_dataset_file.write("0.5\t12\tteststring2\n")
test_numerical_features_dataset_file.write("0.2\t0\tteststring3\n")
test_numerical_features_dataset_file.write("0.4\t12\tteststring4\n")
test_numerical_features_dataset_file.write("0.9\t9\tteststring5\n")
def make_mock_dataset(self, num_examples=30, num_labels=3):
num_repetitions = int(round(num_examples / num_labels)) + 1
texts = [str(i) for i in range(num_examples)]
labels = list(range(num_labels)) * num_repetitions
labels = [str(line) for line in labels[:num_examples]]
dict_dataset = [{"text": t, "label": l} for t, l in zip(texts, labels)]
return dict_dataset
def write_test_splitting_dataset(self, num_examples=30, num_labels=3):
dict_dataset = self.make_mock_dataset(num_examples, num_labels)
delim = ","
with open(self.test_dataset_splitting_path, "w") as test_splitting_dataset_file:
for example in dict_dataset:
test_splitting_dataset_file.write("{}\n".format(delim.join([example["text"], example["label"]])))
def verify_numericalized_example(
field, test_example_data, test_example_numericalized, test_example_lengths=None, batch_first=False, train=True
):
"""
Function to verify that numericalized example is correct
with respect to the Field's Vocab.
"""
if isinstance(test_example_numericalized, tuple):
test_example_numericalized, lengths = test_example_numericalized
assert test_example_lengths == lengths.tolist()
if batch_first:
test_example_numericalized.t_()
# Transpose numericalized example so we can compare over batches
for example_idx, numericalized_single_example in enumerate(test_example_numericalized.t()):
assert len(test_example_data[example_idx]) == len(numericalized_single_example)
assert numericalized_single_example.volatile is not train
for token_idx, numericalized_token in enumerate(numericalized_single_example):
# Convert from Variable to int
numericalized_token = numericalized_token.item() # Pytorch v4 compatibility
test_example_token = test_example_data[example_idx][token_idx]
# Check if the numericalized example is correct, taking into
# account unknown tokens.
if field.vocab.stoi[test_example_token] != 0:
# token is in-vocabulary
assert field.vocab.itos[numericalized_token] == test_example_token
else:
# token is OOV and <unk> always has an index of 0
assert numericalized_token == 0 |
1,727 | group by | import math
from collections import OrderedDict
from itertools import groupby
from functools import reduce
def percent(num, denom, places=2):
if denom == 0:
return 0
else:
return round(num / denom * 100, places)
def ratio(num, denom, places=2):
if denom == 0:
return 0
else:
return round(num / denom, places)
def generate_expected_quarter_keys(starting_key, amount):
year, quarter = starting_key
return map(
lambda n: (
(year - math.floor((n + (4 - quarter)) / 4)),
(((((quarter - 1) - n)) % 4) + 1),
),
range(amount)
)
def quarter_index(month):
return ((month - 1) // 3) + 1
def year_month_key(result):
return (
result["financial_year_end.year"],
result["financial_period.period"],
)
def year_key(result):
return result["financial_year_end.year"]
def item_amount_pair(item):
return (item["item.code"], item["amount.sum"])
def collect_item_amounts(item):
key, value = item
return (key, dict(map(item_amount_pair, value)))
def group_items_by_year(results):
results = groupby(results, key=year_key)
return map(collect_item_amounts, results)
def group_items_by_month(results):
results = groupby(results, key=year_month_key)
return map(collect_item_amounts, results)
def sum_item_amounts(result, codes):
return reduce(lambda r, c: r + result.get(c, 0), codes, 0)
def add_none_as_zero(a, b):
a_non_none = 0 if a is None else a
b_non_none = 0 if b is None else b
return a_non_none + b_non_none
def year_quarter_key(item):
key, _ = item
year, month = key
return (year, quarter_index(month))
def select_latest_month(item):
key, months = item
sorted_months = sorted(months, key=lambda month: month[0])
latest_month = sorted_months[-1]
_, month_value = latest_month
return (key, month_value)
def group_quarters(periods, select):
quarters = groupby(
sorted(periods, key=lambda o: o[0], reverse=True),
key=year_quarter_key
)
return map(select, quarters)
def item_has_keys(item, keys):
year, values = item
def f(result, key):
print(f"result={result} key={key} year={year} values={values}")
return (result and (values.get(key) != None))
return reduce(
f,
keys,
True
)
def filter_for_all_keys(obj, keys):
return filter(
lambda item: item_has_keys(item, keys),
obj.items()
)
def filter_for_all_keys_versioned(obj, keys):
result = list()
for year, values_dict in obj.items():
if all((k, "v2") in values_dict for k in keys):
unversioned_dict = {k: values_dict[(k, "v2")] for k in keys}
unversioned_dict["cube_version"] = "v2"
result.extend([(year, unversioned_dict)])
elif all((k, "v1") in values_dict for k in keys):
unversioned_dict = {k: values_dict[(k, "v1")] for k in keys}
unversioned_dict["cube_version"] = "v1"
result.extend([(year, unversioned_dict)])
return result
def data_source_version(year):
if year > 2019:
return "v2"
else:
return "v1"
def year_amount_key(result):
return (
result["financial_year_end.year"],
result["amount.sum"],
)
def year_assets_key(result):
return (
result["financial_year_end.year"],
result["total_assets.sum"],
)
def group_by_year(results, key="amount"):
return map(
lambda result: (
result["financial_year_end.year"],
result[f"{key}.sum"],
),
results,
)
def populate_periods(periods, years, key):
for year, result in years:
periods.setdefault(year, {})
periods[year][key] = result
def METHOD_NAME(items, keyfunc):
"""
Returns dictionary of lists
[{"a": 1}, {"b": 2}] -> {"a": [{"a": 1}], "b": [{"b": 2}]}
"""
grouper = groupby(sorted(items, key=keyfunc), key=keyfunc)
return dict(map(lambda g: (g[0], list(g[1])), grouper)) |
1,728 | extract terms set | # Copyright 2016-22 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import time
import operator
from collections import defaultdict
from datetime import datetime
from quodlibet import print_d
from quodlibet.formats import AudioFile
from quodlibet.query import Query, QueryType
from quodlibet.query._match import Tag, Inter, Union, Numcmp, NumexprTag, \
Numexpr, True_, False_
INVERSE_OPS = {operator.le: operator.gt,
operator.gt: operator.le,
operator.lt: operator.ge,
operator.ge: operator.lt}
_DUMMY_AF = AudioFile()
_CLOCK = time.time
def convert_time(t):
return datetime.strftime(datetime.fromtimestamp(int(t)), '%Y-%m-%d %H:%S')
_QL_TO_SC = {
'genre': ('genres', None),
'length': ('duration', lambda x: int((x or 0) * 1000)),
'date': ('created_at', convert_time),
'tags': ('tags', None),
'bpm': ('bpm', None),
'artist': ('q', None),
'title': ('q', None),
'comments': ('q', None),
'soundcloud_user_id': ('user_id', None)
}
""" Convert QL to Soundcloud tags with optional value mapper"""
SUPPORTED = set(_QL_TO_SC.keys()) | {"rating"}
class SoundcloudQuery(Query):
def __init__(self, string, star=None, clock=time.time):
super().__init__(string, star)
self._clock = clock
try:
self.terms = self._extract_terms(self._match)
except self.Error as e:
print_d("Couldn't use query: %s" % e)
self.type = QueryType.INVALID
self.terms = {}
def _extract_terms(self, node):
""" Return a dict of sets keyed on API search term,
with values for these that could be used to query the API
and might return results useful for populating the songlist.
Note this is not a *translation* of the query in any sense,
and that (currently) the browser filters ingested API results
so that the QL results are still valid based on
the query given, even if some more could have been returned.
...so if in doubt, *less* restrictive is better here."""
tuples = self.METHOD_NAME(node)
terms = defaultdict(set)
for (k, v) in tuples:
terms[k].add(v)
return terms
def METHOD_NAME(self, node, tag=None):
def to_api(tag, raw_value):
try:
api_tag, converter = _QL_TO_SC[tag] if tag else ('q', None)
except KeyError:
if tag not in SUPPORTED:
raise self.Error("Unsupported '%s' tag. Try: %s"
% (tag, ", ". join(SUPPORTED)))
return None, None
else:
value = str(converter(raw_value) if converter else raw_value)
return api_tag, value
def terms_from_re(pattern, t):
"""Best efforts to de-regex"""
pat = pattern.lstrip('^').rstrip('$')
api_tag, pat = to_api(t, pat)
return {(api_tag, p) for p in pat.split('|')} if api_tag else set()
if isinstance(node, Tag) and set(node._names) & SUPPORTED:
if len(node._names) == 1:
return self.METHOD_NAME(node.res, tag=node._names[0])
return self.METHOD_NAME(node.res)
elif isinstance(node, (Inter, Union)):
# Treat identically as the text-based query will perform
# relevance ranking itself, meaning that any term is still useful
terms = set()
for n in node.res:
terms |= self.METHOD_NAME(n)
return terms
elif isinstance(node, Numcmp):
def from_relative(op, l, r):
raw_value = r.evaluate(_DUMMY_AF, self._clock(), True)
tag, value = to_api(l._tag, raw_value)
if not value:
return set()
if op == operator.eq:
return {(tag, value)}
elif op in (operator.le, operator.lt):
return {(tag + "[to]", value)}
elif op in (operator.ge, operator.gt):
return {(tag + "[from]", value)}
raise self.Error("Unsupported operator: %s" % op)
left = node._expr
right = node._expr2
if isinstance(left, NumexprTag) and isinstance(right, Numexpr):
return from_relative(node._op, left, right)
elif isinstance(right, NumexprTag) and isinstance(left, Numexpr):
# We can reduce the logic by flipping the expression
return from_relative(INVERSE_OPS[node._op], right, left)
raise self.Error("Unsupported numeric: %s" % node)
elif hasattr(node, 'pattern'):
return terms_from_re(node.pattern, tag)
elif isinstance(node, True_):
return set()
elif isinstance(node, False_):
raise self.Error("False can never be queried")
raise self.Error("Unhandled node: %r" % (node,)) |
1,729 | test gpu | # Copyright 2019-2022 ETH Zurich and the DaCe authors. All rights reserved.
# Original application code: NPBench - https://github.com/spcl/npbench
import dace.dtypes
import numpy as np
import dace as dc
import pytest
import argparse
from dace.fpga_testing import fpga_test, xilinx_test
from dace.transformation.interstate import FPGATransformSDFG, InlineSDFG
from dace.transformation.dataflow import StreamingMemory, StreamingComposition
from dace.transformation.auto.auto_optimize import auto_optimize, fpga_auto_opt
from dace.config import set_temporary
NA, NB, Nkz, NE, Nqz, Nw, Norb, N3D = (dc.symbol(s, dc.int64)
for s in ('NA', 'NB', 'Nkz', 'NE', 'Nqz', 'Nw', 'Norb', 'N3D'))
@dc.program
def scattering_self_energies_kernel(neigh_idx: dc.int32[NA, NB], dH: dc.complex128[NA, NB, N3D, Norb, Norb],
G: dc.complex128[Nkz, NE, NA, Norb, Norb],
D: dc.complex128[Nqz, Nw, NA, NB, N3D, N3D], Sigma: dc.complex128[Nkz, NE, NA, Norb,
Norb]):
for k in range(Nkz):
for E in range(NE):
for q in range(Nqz):
for w in range(Nw):
for i in range(N3D):
for j in range(N3D):
for a in range(NA):
for b in range(NB):
if E - w >= 0:
dHG = G[k, E - w, neigh_idx[a, b]] @ dH[a, b, i]
dHD = dH[a, b, j] * D[q, w, a, b, i, j]
Sigma[k, E, a] += dHG @ dHD
#### Initialization
def rng_complex(shape, rng):
return (rng.random(shape) + rng.random(shape) * 1j)
def initialize(Nkz, NE, Nqz, Nw, N3D, NA, NB, Norb):
from numpy.random import default_rng
rng = default_rng(42)
neigh_idx = np.ndarray([NA, NB], dtype=np.int32)
for i in range(NA):
neigh_idx[i] = np.positive(np.arange(i - NB / 2, i + NB / 2) % NA)
dH = rng_complex([NA, NB, N3D, Norb, Norb], rng)
G = rng_complex([Nkz, NE, NA, Norb, Norb], rng)
D = rng_complex([Nqz, Nw, NA, NB, N3D, N3D], rng)
Sigma = np.zeros([Nkz, NE, NA, Norb, Norb], dtype=np.complex128)
return neigh_idx, dH, G, D, Sigma
### Ground Truth
def ground_truth(neigh_idx, dH, G, D, Sigma):
for k in range(G.shape[0]):
for E in range(G.shape[1]):
for q in range(D.shape[0]):
for w in range(D.shape[1]):
for i in range(D.shape[-2]):
for j in range(D.shape[-1]):
for a in range(neigh_idx.shape[0]):
for b in range(neigh_idx.shape[1]):
if E - w >= 0:
dHG = G[k, E - w, neigh_idx[a, b]] @ dH[a, b, i]
dHD = dH[a, b, j] * D[q, w, a, b, i, j]
Sigma[k, E, a] += dHG @ dHD
def run_scattering_self_test(device_type: dace.dtypes.DeviceType):
'''
Runs scattering_self for the given device
:return: the SDFG
'''
# Initialize data (npbench small size)
Nkz, NE, Nqz, Nw, N3D, NA, NB, Norb = 2, 4, 2, 2, 2, 6, 2, 3
neigh_idx, dH, G, D, Sigma = initialize(Nkz, NE, Nqz, Nw, N3D, NA, NB, Norb)
Sigma_ref = np.copy(Sigma)
if device_type in {dace.dtypes.DeviceType.CPU, dace.dtypes.DeviceType.GPU}:
# Parse the SDFG and apply auto-opt
sdfg = scattering_self_energies_kernel.to_sdfg()
sdfg = auto_optimize(sdfg, device_type)
sdfg(neigh_idx, dH, G, D, Sigma, Nkz=Nkz, NE=NE, Nqz=Nqz, N3D=N3D, NA=NA, NB=NB, Norb=Norb, Nw=Nw)
elif device_type == dace.dtypes.DeviceType.FPGA:
# Parse SDFG and apply FPGA friendly optimization
sdfg = scattering_self_energies_kernel.to_sdfg(simplify=True)
applied = sdfg.apply_transformations([FPGATransformSDFG])
assert applied == 1
from dace.libraries.blas import Gemm
Gemm.default_implementation = "FPGA1DSystolic"
sdfg.expand_library_nodes()
sdfg.apply_transformations_repeated([InlineSDFG], print_report=True)
sdfg.specialize(dict(Nkz=Nkz, NE=NE, Nqz=Nqz, N3D=N3D, NA=NA, NB=NB, Norb=Norb, Nw=Nw))
sdfg(neigh_idx, dH, G, D, Sigma)
# Compute ground truth and validate
ground_truth(neigh_idx, dH, G, D, Sigma_ref)
assert np.allclose(Sigma, Sigma_ref)
return sdfg
def test_cpu():
run_scattering_self_test(dace.dtypes.DeviceType.CPU)
@pytest.mark.gpu
def METHOD_NAME():
run_scattering_self_test(dace.dtypes.DeviceType.GPU)
@pytest.mark.skip(reason="Compiler error")
@fpga_test(assert_ii_1=False)
def test_fpga():
return run_scattering_self_test(dace.dtypes.DeviceType.FPGA)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--target", default='cpu', choices=['cpu', 'gpu', 'fpga'], help='Target platform')
args = vars(parser.parse_args())
target = args["target"]
if target == "cpu":
run_scattering_self_test(dace.dtypes.DeviceType.CPU)
elif target == "gpu":
run_scattering_self_test(dace.dtypes.DeviceType.GPU)
elif target == "fpga":
run_scattering_self_test(dace.dtypes.DeviceType.FPGA) |
1,730 | test evaluate tuple | import datetime
import sys
from decimal import Decimal
from enum import Enum
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
from unittest import TestCase
from xml.etree.ElementTree import QName
from xsdata.formats.bindings import T
from xsdata.formats.dataclass.typing import evaluate
from xsdata.formats.dataclass.typing import get_args
from xsdata.formats.dataclass.typing import get_origin
from xsdata.models.datatype import XmlDate
from xsdata.models.datatype import XmlDateTime
from xsdata.models.datatype import XmlDuration
from xsdata.models.datatype import XmlPeriod
from xsdata.models.datatype import XmlTime
from xsdata.models.enums import Namespace
class TypingTests(TestCase):
def assertCases(self, cases):
for tp, result in cases.items():
if result is False:
with self.assertRaises(TypeError):
evaluate(tp)
else:
self.assertEqual(result, evaluate(tp), msg=tp)
def test_evaluate_simple(self):
types = (
int,
str,
int,
bool,
float,
bytes,
object,
datetime.time,
datetime.date,
datetime.datetime,
XmlTime,
XmlDate,
XmlDateTime,
XmlDuration,
XmlPeriod,
QName,
Decimal,
Enum,
Namespace,
)
cases = {tp: (tp,) for tp in types}
self.assertCases(cases)
def test_evaluate_unsupported_typing(self):
cases = [Any, Set[str]]
for case in cases:
with self.assertRaises(TypeError):
evaluate(case)
def test_evaluate_dict(self):
cases = {
Dict: (dict, str, str),
Dict[str, int]: (dict, str, int),
Dict[Any, Any]: False,
Dict[Union[str, int], int]: False,
Dict[int, Union[str, int]]: False,
Dict[TypeVar("A", bound=int), str]: False,
Dict[TypeVar("A"), str]: (dict, str, str),
}
if sys.version_info[:2] >= (3, 9):
cases.update(
{
dict: (dict, str, str),
dict[str, int]: (dict, str, int),
dict[Any, Any]: False,
dict[Union[str, int], int]: False,
dict[int, Union[str, int]]: False,
dict[TypeVar("A", bound=int), str]: False,
dict[TypeVar("A"), str]: (dict, str, str),
}
)
if sys.version_info[:2] >= (3, 10):
cases.update({dict[str | int, int]: False})
self.assertCases(cases)
def test_evaluate_list(self):
A = TypeVar("A", int, str)
cases = {
List[A]: (list, int, str),
List[int]: (list, int),
List[Union[float, str]]: (list, float, str),
List[Optional[int]]: (list, int),
List[Tuple[int]]: (list, tuple, int),
List[List[Union[bool, str]]]: (list, list, bool, str),
List: (list, str),
List[Dict[str, str]]: False,
List[Any]: False,
}
if sys.version_info[:2] >= (3, 9):
cases.update(
{
list[A]: (list, int, str),
list[int]: (list, int),
list[Union[float, str]]: (list, float, str),
list[Optional[int]]: (list, int),
list[Tuple[int]]: (list, tuple, int),
list[list[Union[bool, str]]]: (list, list, bool, str),
list: (list, str),
list["str"]: (list, str),
list[dict[str, str]]: False,
list[Any]: False,
}
)
self.assertCases(cases)
def METHOD_NAME(self):
A = TypeVar("A", int, str)
cases = {
Tuple[A]: (tuple, int, str),
Tuple[int]: (tuple, int),
Tuple[int, ...]: (tuple, int),
Tuple[List[int], ...]: (tuple, list, int),
Tuple[Union[float, str]]: (tuple, float, str),
Tuple[Optional[int]]: (tuple, int),
Tuple[Tuple[int]]: (tuple, tuple, int),
Tuple[Tuple[Union[bool, str]]]: (tuple, tuple, bool, str),
Tuple: (tuple, str),
Tuple[Dict[str, str]]: False,
Tuple[Any, ...]: False,
}
if sys.version_info[:2] >= (3, 9):
cases.update(
{
tuple[A]: (tuple, int, str),
tuple[int]: (tuple, int),
tuple[int, ...]: (tuple, int),
tuple[List[int], ...]: (tuple, list, int),
tuple[Union[float, str]]: (tuple, float, str),
tuple[Optional[int]]: (tuple, int),
tuple[tuple[int]]: (tuple, tuple, int),
tuple[tuple[Union[bool, str]]]: (tuple, tuple, bool, str),
tuple: (tuple, str),
tuple[dict[str, str]]: False,
tuple[Any, ...]: False,
}
)
self.assertCases(cases)
def test_evaluate_union(self):
A = TypeVar("A", int, str)
cases = {
Optional[Union[bool, str]]: (bool, str),
Optional[List[Union[int, float]]]: (list, int, float),
Optional[A]: (int, str),
Union[List[int], None]: (list, int),
Union[List[int], List[str]]: False,
}
if sys.version_info[:2] >= (3, 10):
cases.update(
{
None | bool | str: (bool, str),
None | List[int | float]: (list, int, float),
None | A: (int, str),
List[int] | None: (list, int),
List[int] | List[str]: False,
}
)
self.assertCases(cases)
def test_evaluate_type(self):
self.assertEqual((str,), evaluate(Type["str"]))
with self.assertRaises(TypeError):
evaluate(Type)
def test_evaluate_typevar(self):
A = TypeVar("A", int, str)
B = TypeVar("B", bound=object)
self.assertEqual((int, str), evaluate(A))
self.assertEqual((object,), evaluate(B))
with self.assertRaises(TypeError):
evaluate(T) |
1,731 | print msg | #-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Stephan Krause <stephan.krause@eox.at>
# Stephan Meissl <stephan.meissl@eox.at>
# Fabian Schindler <fabian.schindler@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2011 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
import logging
import traceback
from optparse import OptionValueError
import django
from django.db import transaction
from django.core.management.base import CommandParser
logger = logging.getLogger(__name__)
def _variable_args_cb(option, opt_str, value, parser):
""" Helper function for optparse module. Allows
variable number of option values when used
as a callback.
"""
args = []
for arg in parser.rargs:
if not arg.startswith('-'):
args.append(arg)
else:
break
del parser.rargs[:len(args)]
if getattr(parser.values, option.dest):
args.extend(getattr(parser.values, option.dest))
setattr(parser.values, option.dest, args)
class StringFormatCallback(object):
""" Small helper class to supply a variable number of arguments to a
callback function and store the resulting value in the `dest` field of the
parser.
"""
def __init__(self, callback):
self.callback = callback
def __call__(self, option, opt_str, value, parser):
args = []
for arg in parser.rargs:
if not arg.startswith('-'):
args.append(arg)
else:
del parser.rargs[:len(args)]
break
try:
setattr(parser.values, option.dest, self.callback(" ".join(args)))
except ValueError as e:
raise OptionValueError(str(e))
class CommandOutputMixIn(object):
""" Helper mix-in class to ease the handling of user message reporting.
"""
def print_err(self, msg):
" Print an error message which is both logged and written to stderr. "
logger.error(msg)
self.stderr.write("ERROR: %s\n" % msg)
def print_wrn(self, msg):
""" Print a warning message, which is logged and posibly written to
stderr, depending on the set verbosity.
"""
logger.warning(msg)
if 0 < max(0, getattr(self, "verbosity", 1)):
self.stderr.write("WARNING: %s\n" % msg)
def METHOD_NAME(self, msg, level=1):
""" Print a basic message with a given level. The message is possibly
logged and/or written to stderr depending on the verbosity setting.
"""
# three basic level of info messages
# level == 0 - always printed even in the silent mode - not recommended
# level == 1 - normal info suppressed in silent mode
# level >= 2 - debuging message (additional levels allowed)
# messages ALWAYS logged (as either info or debug)
level = max(0, level)
verbosity = max(0, getattr(self, "verbosity", 1))
# everything with level 2 or higher is DEBUG
if level >= 2:
prefix = "DEBUG"
logger.debug(msg)
# levels 0 (silent) and 1 (default-verbose)
else:
prefix = "INFO"
logger.info(msg)
if level <= verbosity:
self.stdout.write("%s: %s\n" % (prefix, msg))
def print_traceback(self, e, kwargs):
""" Prints a traceback/stacktrace if the traceback option is set.
"""
if kwargs.get("traceback", False):
self.METHOD_NAME(traceback.format_exc())
def create_parser(cmd, kwargs):
if django.VERSION[0] < 2:
return CommandParser(cmd, **kwargs)
else:
return CommandParser(**kwargs)
class SubParserMixIn(object):
def add_subparser(self, parser, name, *args, **kwargs):
if not getattr(self, 'subparsers', None):
self.subparsers = parser.add_subparsers(
title="subcommands",
parser_class=lambda **kw: create_parser(self, kw)
)
subparser = self.subparsers.add_parser(name, *args, **kwargs)
subparser.set_defaults(subcommand=name)
subparser.add_argument('--traceback', action="store_true", default=False)
subparser.add_argument('--settings', nargs=1)
subparser.add_argument('--pythonpath', nargs=1)
subparser.add_argument('--no-color', action="store_true", default=False)
return subparser |
1,732 | get project root files | # Copyright (C) 2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
import string
from ycmd import responses, utils
from ycmd.completers.language_server import language_server_completer
class GenericLSPCompleter( language_server_completer.LanguageServerCompleter ):
def __init__( self, user_options, server_settings ):
utils.LOGGER.info( "Initializing generic LSP completer with: %s",
server_settings )
self._name = server_settings[ 'name' ]
self._supported_filetypes = server_settings[ 'filetypes' ]
self._project_root_files = server_settings.get( 'project_root_files', [] )
self._capabilities = server_settings.get( 'capabilities', {} )
self._command_line = server_settings.get( 'cmdline' )
self._server_settings = server_settings
self._port = server_settings.get( 'port' )
if self._port:
connection_type = 'tcp'
if self._port == '*':
self._port = utils.GetUnusedLocalhostPort()
else:
connection_type = 'stdio'
if self._command_line:
# We modify this, so take a copy
self._command_line = list( self._command_line )
cmd = utils.FindExecutable( self._command_line[ 0 ] )
if cmd is None:
utils.LOGGER.warn( "Unable to find any executable with the path %s. "
"Cannot use %s completer.",
self._command_line[ 0 ],
self._name )
raise RuntimeError( f"Invalid cmdline: { str( self._command_line ) }" )
self._command_line[ 0 ] = cmd
for idx in range( len( self._command_line ) ):
self._command_line[ idx ] = string.Template(
self._command_line[ idx ] ).safe_substitute( {
'port': self._port
} )
super().__init__( user_options, connection_type )
def METHOD_NAME( self ):
return self._project_root_files
def Language( self ):
return self._name
def GetServerName( self ):
return self._name + 'Completer'
def GetCommandLine( self ):
return self._command_line
def GetCustomSubcommands( self ):
return { 'GetHover': lambda self, request_data, args:
self._GetHover( request_data ) }
def _GetHover( self, request_data ):
raw_hover = self.GetHoverResponse( request_data )
if isinstance( raw_hover, dict ):
# Both MarkedString and MarkupContent contain 'value' key.
# MarkupContent is the only one not deprecated.
return responses.BuildDetailedInfoResponse( raw_hover[ 'value' ] )
if isinstance( raw_hover, str ):
# MarkedString might be just a string.
return responses.BuildDetailedInfoResponse( raw_hover )
# If we got this far, this is a list of MarkedString objects.
lines = []
for marked_string in raw_hover:
if isinstance( marked_string, str ):
lines.append( marked_string )
else:
lines.append( marked_string[ 'value' ] )
return responses.BuildDetailedInfoResponse( '\n'.join( lines ) )
def GetCodepointForCompletionRequest( self, request_data ):
if request_data[ 'force_semantic' ]:
return request_data[ 'column_codepoint' ]
return super().GetCodepointForCompletionRequest( request_data )
def SupportedFiletypes( self ):
return self._supported_filetypes
def ExtraCapabilities( self ):
return self._capabilities
def WorkspaceConfigurationResponse( self, request ):
if self._capabilities.get( 'workspace', {} ).get( 'configuration' ):
sections_to_config_map = self._settings.get( 'config_sections', {} )
return [ sections_to_config_map.get( item.get( 'section', '' ) )
for item in request[ 'params' ][ 'items' ] ]
def GetTriggerCharacters( self, server_trigger_characters ):
return self._server_settings.get( 'triggerCharacters',
server_trigger_characters ) |
1,733 | test expected values | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2023
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import pytest
from telegram import (
InlineKeyboardButton,
InlineKeyboardMarkup,
InlineQueryResultCachedVideo,
InlineQueryResultCachedVoice,
InputTextMessageContent,
MessageEntity,
)
from tests.auxil.slots import mro_slots
@pytest.fixture(scope="module")
def inline_query_result_cached_video():
return InlineQueryResultCachedVideo(
TestInlineQueryResultCachedVideoBase.id_,
TestInlineQueryResultCachedVideoBase.video_file_id,
TestInlineQueryResultCachedVideoBase.title,
caption=TestInlineQueryResultCachedVideoBase.caption,
parse_mode=TestInlineQueryResultCachedVideoBase.parse_mode,
caption_entities=TestInlineQueryResultCachedVideoBase.caption_entities,
description=TestInlineQueryResultCachedVideoBase.description,
input_message_content=TestInlineQueryResultCachedVideoBase.input_message_content,
reply_markup=TestInlineQueryResultCachedVideoBase.reply_markup,
)
class TestInlineQueryResultCachedVideoBase:
id_ = "id"
type_ = "video"
video_file_id = "video file id"
title = "title"
caption = "caption"
parse_mode = "Markdown"
caption_entities = [MessageEntity(MessageEntity.ITALIC, 0, 7)]
description = "description"
input_message_content = InputTextMessageContent("input_message_content")
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton("reply_markup")]])
class TestInlineQueryResultCachedVideoWithoutRequest(TestInlineQueryResultCachedVideoBase):
def test_slot_behaviour(self, inline_query_result_cached_video):
inst = inline_query_result_cached_video
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def METHOD_NAME(self, inline_query_result_cached_video):
assert inline_query_result_cached_video.type == self.type_
assert inline_query_result_cached_video.id == self.id_
assert inline_query_result_cached_video.video_file_id == self.video_file_id
assert inline_query_result_cached_video.title == self.title
assert inline_query_result_cached_video.description == self.description
assert inline_query_result_cached_video.caption == self.caption
assert inline_query_result_cached_video.parse_mode == self.parse_mode
assert inline_query_result_cached_video.caption_entities == tuple(self.caption_entities)
assert (
inline_query_result_cached_video.input_message_content.to_dict()
== self.input_message_content.to_dict()
)
assert (
inline_query_result_cached_video.reply_markup.to_dict() == self.reply_markup.to_dict()
)
def test_caption_entities_always_tuple(self):
video = InlineQueryResultCachedVideo(self.id_, self.video_file_id, self.title)
assert video.caption_entities == ()
def test_to_dict(self, inline_query_result_cached_video):
inline_query_result_cached_video_dict = inline_query_result_cached_video.to_dict()
assert isinstance(inline_query_result_cached_video_dict, dict)
assert (
inline_query_result_cached_video_dict["type"] == inline_query_result_cached_video.type
)
assert inline_query_result_cached_video_dict["id"] == inline_query_result_cached_video.id
assert (
inline_query_result_cached_video_dict["video_file_id"]
== inline_query_result_cached_video.video_file_id
)
assert (
inline_query_result_cached_video_dict["title"]
== inline_query_result_cached_video.title
)
assert (
inline_query_result_cached_video_dict["description"]
== inline_query_result_cached_video.description
)
assert (
inline_query_result_cached_video_dict["caption"]
== inline_query_result_cached_video.caption
)
assert (
inline_query_result_cached_video_dict["parse_mode"]
== inline_query_result_cached_video.parse_mode
)
assert inline_query_result_cached_video_dict["caption_entities"] == [
ce.to_dict() for ce in inline_query_result_cached_video.caption_entities
]
assert (
inline_query_result_cached_video_dict["input_message_content"]
== inline_query_result_cached_video.input_message_content.to_dict()
)
assert (
inline_query_result_cached_video_dict["reply_markup"]
== inline_query_result_cached_video.reply_markup.to_dict()
)
def test_equality(self):
a = InlineQueryResultCachedVideo(self.id_, self.video_file_id, self.title)
b = InlineQueryResultCachedVideo(self.id_, self.video_file_id, self.title)
c = InlineQueryResultCachedVideo(self.id_, "", self.title)
d = InlineQueryResultCachedVideo("", self.video_file_id, self.title)
e = InlineQueryResultCachedVoice(self.id_, "", "")
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e) |
1,734 | test partitioned csv | import bz2
import gzip
import logging
import lzma
from io import BytesIO, TextIOWrapper
from typing import Optional
import boto3
import pyarrow as pa
import pytest
import awswrangler as wr
import awswrangler.pandas as pd
from .._utils import get_df_csv, is_ray_modin
EXT = {"gzip": ".gz", "bz2": ".bz2", "xz": ".xz", "zip": ".zip"}
logging.getLogger("awswrangler").setLevel(logging.DEBUG)
pytestmark = pytest.mark.distributed
# XFail issue: https://github.com/aws/aws-sdk-pandas/issues/2005
@pytest.mark.parametrize(
"compression",
[
"gzip",
"bz2",
pytest.param(
"xz", marks=pytest.mark.xfail(is_ray_modin, reason="Arrow compression errors", raises=pa.lib.ArrowInvalid)
),
],
)
def test_csv_read(bucket: str, path: str, compression: str) -> None:
key_prefix = path.replace(f"s3://{bucket}/", "")
wr.s3.delete_objects(path=path)
df = get_df_csv()
if compression == "gzip":
buffer = BytesIO()
with gzip.GzipFile(mode="w", fileobj=buffer) as zipped_file:
df.to_csv(TextIOWrapper(zipped_file, "utf8"), index=False, header=None)
s3_resource = boto3.resource("s3")
s3_object = s3_resource.Object(bucket, f"{key_prefix}test.csv.gz")
s3_object.put(Body=buffer.getvalue())
file_path = f"{path}test.csv.gz"
elif compression == "bz2":
buffer = BytesIO()
with bz2.BZ2File(mode="w", filename=buffer) as zipped_file:
df.to_csv(TextIOWrapper(zipped_file, "utf8"), index=False, header=None)
s3_resource = boto3.resource("s3")
s3_object = s3_resource.Object(bucket, f"{key_prefix}test.csv.bz2")
s3_object.put(Body=buffer.getvalue())
file_path = f"{path}test.csv.bz2"
elif compression == "xz":
buffer = BytesIO()
with lzma.LZMAFile(mode="w", filename=buffer) as zipped_file:
df.to_csv(TextIOWrapper(zipped_file, "utf8"), index=False, header=None)
s3_resource = boto3.resource("s3")
s3_object = s3_resource.Object(bucket, f"{key_prefix}test.csv.xz")
s3_object.put(Body=buffer.getvalue())
file_path = f"{path}test.csv.xz"
else:
file_path = f"{path}test.csv"
wr.s3.to_csv(df=df, path=file_path, index=False, header=None)
df2 = wr.s3.read_csv(path=[file_path], names=df.columns)
assert df2.shape == (3, 10)
dfs = wr.s3.read_csv(path=[file_path], names=df.columns, chunksize=1)
for df3 in dfs:
assert len(df3.columns) == 10
# XFail issue: https://github.com/aws/aws-sdk-pandas/issues/2005
@pytest.mark.parametrize(
"compression",
[
"gzip",
"bz2",
pytest.param(
"xz", marks=pytest.mark.xfail(is_ray_modin, reason="Arrow compression errors", raises=pa.lib.ArrowInvalid)
),
pytest.param(
"zip", marks=pytest.mark.xfail(is_ray_modin, reason="Arrow compression errors", raises=pa.lib.ArrowInvalid)
),
None,
],
)
def test_csv_write(path: str, compression: Optional[str]) -> None:
# Ensure we use the pd.read_csv native to Pandas, not Modin.
# Modin's read_csv has an issue in this scenario, making the test fail.
import pandas as pd
path_file = f"{path}test.csv{EXT.get(compression, '')}"
df = get_df_csv()
wr.s3.to_csv(df, path_file, compression=compression, index=False, header=None)
df2 = pd.read_csv(path_file, names=df.columns)
df3 = wr.s3.read_csv([path_file], names=df.columns)
assert df.shape == df2.shape == df3.shape
@pytest.mark.parametrize("compression", ["gzip", "bz2", "xz", "zip", None])
def test_csv_write_dataset_filename_extension(path: str, compression: Optional[str]) -> None:
df = get_df_csv()
result = wr.s3.to_csv(df, path, compression=compression, index=False, dataset=True)
for p in result["paths"]:
assert p.endswith(f".csv{EXT.get(compression, '')}")
@pytest.mark.parametrize("compression", ["gzip", "bz2", "xz", "zip", None])
def test_json(path: str, compression: Optional[str]) -> None:
path_file = f"{path}test.json{EXT.get(compression, '')}"
df = pd.DataFrame({"id": [1, 2, 3]})
wr.s3.to_json(df=df, path=path_file)
df2 = pd.read_json(path_file, compression=compression)
df3 = wr.s3.read_json(path=[path_file])
assert df.shape == df2.shape == df3.shape
@pytest.mark.parametrize("chunksize", [None, 1])
@pytest.mark.parametrize("compression", ["gzip", "bz2", "xz", "zip", None])
def test_partitioned_json(path: str, compression: Optional[str], chunksize: Optional[int]) -> None:
df = pd.DataFrame(
{
"c0": [0, 1, 2, 3],
"c1": ["foo", "boo", "bar", "baz"],
"year": [2020, 2020, 2021, 2021],
"month": [1, 2, 1, 2],
}
)
wr.s3.to_json(
df,
path=path,
orient="records",
lines=True,
compression=compression,
dataset=True,
partition_cols=["year", "month"],
)
df2 = wr.s3.read_json(path, dataset=True, chunksize=chunksize)
if chunksize is None:
assert df2.shape == (4, 4)
assert df2.c0.sum() == 6
else:
for d in df2:
assert d.shape == (1, 4)
@pytest.mark.parametrize("chunksize", [None, 1])
@pytest.mark.parametrize("compression", ["gzip", "bz2", "xz", "zip", None])
def METHOD_NAME(path: str, compression: Optional[str], chunksize: Optional[int]) -> None:
df = pd.DataFrame({"c0": [0, 1], "c1": ["foo", "boo"]})
paths = [f"{path}year={y}/month={m}/0.csv{EXT.get(compression, '')}" for y, m in [(2020, 1), (2020, 2), (2021, 1)]]
for p in paths:
wr.s3.to_csv(df, p, index=False, compression=compression, header=True)
df2 = wr.s3.read_csv(path, dataset=True, chunksize=chunksize, header=0)
if chunksize is None:
assert df2.shape == (6, 4)
assert df2.c0.sum() == 3
else:
for d in df2:
assert d.shape == (1, 4) |
1,735 | test array inline force float array | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""Tests for the Array Builder Widget."""
# Standard library imports
import sys
# Third party imports
from qtpy.QtCore import Qt
import pytest
# Local imports
from spyder.widgets.arraybuilder import ArrayBuilderDialog
# --- Fixtures
# -----------------------------------------------------------------------------
@pytest.fixture
def botinline(qtbot):
dialog = ArrayBuilderDialog(inline=True)
qtbot.addWidget(dialog)
dialog.show()
return qtbot, dialog, dialog.array_widget
@pytest.fixture
def botinlinefloat(qtbot):
dialog = ArrayBuilderDialog(inline=True, force_float=True)
qtbot.addWidget(dialog)
dialog.show()
return qtbot, dialog, dialog.array_widget
@pytest.fixture
def botarray(qtbot):
dialog = ArrayBuilderDialog(inline=False)
qtbot.addWidget(dialog)
dialog.show()
return qtbot, dialog, dialog.array_widget
# --- Tests
# -----------------------------------------------------------------------------
def test_array_inline_array(botinline):
qtbot, dialog, widget = botinline
qtbot.keyClicks(widget, '1 2 3 4 5 6')
qtbot.keyPress(widget, Qt.Key_Return)
value = dialog.text()
assert value == 'np.array([[1, 2, 3],\n [4, 5, 6]])'
def test_array_inline_matrix(botinline):
qtbot, dialog, widget = botinline
qtbot.keyClicks(widget, '4 5 6 7 8 9')
qtbot.keyPress(widget, Qt.Key_Return, modifier=Qt.ControlModifier)
value = dialog.text()
assert value == 'np.matrix([[4, 5, 6],\n [7, 8, 9]])'
def test_array_inline_array_invalid(botinline):
qtbot, dialog, widget = botinline
qtbot.keyClicks(widget, '1 2 3 4 5 6 7')
qtbot.keyPress(widget, Qt.Key_Return)
dialog.update_warning()
assert not dialog.is_valid()
def test_array_inline_1d_array(botinline):
qtbot, dialog, widget = botinline
qtbot.keyClicks(widget, '4 5 6')
qtbot.keyPress(widget, Qt.Key_Return, modifier=Qt.ControlModifier)
value = dialog.text()
assert value == 'np.matrix([4, 5, 6])'
def test_array_inline_nan_array(botinline):
qtbot, dialog, widget = botinline
qtbot.keyClicks(widget, '4 nan 6 8 9')
qtbot.keyPress(widget, Qt.Key_Return, modifier=Qt.ControlModifier)
value = dialog.text()
assert value == 'np.matrix([4, np.nan, 6, 8, 9])'
def test_array_inline_inf_array(botinline):
qtbot, dialog, widget = botinline
qtbot.keyClicks(widget, '4 inf 6 8 9')
qtbot.keyPress(widget, Qt.Key_Return, modifier=Qt.ControlModifier)
value = dialog.text()
assert value == 'np.matrix([4, np.inf, 6, 8, 9])'
def METHOD_NAME(botinlinefloat):
qtbot, dialog, widget = botinlinefloat
qtbot.keyClicks(widget, '4 5 6 8 9')
qtbot.keyPress(widget, Qt.Key_Return, modifier=Qt.ControlModifier)
value = dialog.text()
assert value == 'np.matrix([4.0, 5.0, 6.0, 8.0, 9.0])'
def test_array_inline_force_float_error_array(botinlinefloat):
qtbot, dialog, widget = botinlinefloat
qtbot.keyClicks(widget, '4 5 6 a 9')
qtbot.keyPress(widget, Qt.Key_Return, modifier=Qt.ControlModifier)
value = dialog.text()
assert value == 'np.matrix([4.0, 5.0, 6.0, a, 9.0])'
def test_array_table_array(botarray):
qtbot, dialog, widget = botarray
qtbot.keyClick(widget, Qt.Key_1)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_2)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_Backtab) # Hack: in the tests the selected cell is wrong
qtbot.keyClick(widget, Qt.Key_3)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_4)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_5)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_6)
qtbot.keyClick(widget, Qt.Key_Tab) # Hack: in the tests the selected cell is wrong
qtbot.keyClick(widget, Qt.Key_Return, modifier=Qt.NoModifier)
value = dialog.text()
assert value == 'np.array([[1, 2, 3],\n [4, 5, 6]])'
def test_array_table_matrix(botarray): # analysis:ignore
qtbot, dialog, widget = botarray
qtbot.keyClick(widget, Qt.Key_1)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_2)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_Backtab) # Hack: in the tests the selected cell is wrong
qtbot.keyClick(widget, Qt.Key_3)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_4)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_5)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_6)
qtbot.keyClick(widget, Qt.Key_Tab) # Hack: in the tests the selected cell is wrong
qtbot.keyClick(widget, Qt.Key_Return, modifier=Qt.ControlModifier)
value = dialog.text()
assert value == 'np.matrix([[1, 2, 3],\n [4, 5, 6]])'
def test_array_table_array_empty_items(botarray): # analysis:ignore
qtbot, dialog, widget = botarray
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_2)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_Backtab) # Hack: in the tests the selected cell is wrong
qtbot.keyClick(widget, Qt.Key_3)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_5)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_6)
qtbot.keyClick(widget, Qt.Key_Tab) # Hack: in the tests the selected cell is wrong
qtbot.keyClick(widget, Qt.Key_Return, modifier=Qt.NoModifier)
value = dialog.text()
assert value == 'np.array([[0, 2, 3],\n [0, 5, 6]])'
def test_array_table_array_spaces_in_item(botarray): # analysis:ignore
qtbot, dialog, widget = botarray
qtbot.keyClicks(widget, ' ')
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_2)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_Backtab)
qtbot.keyClick(widget, Qt.Key_3)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_5)
qtbot.keyClick(widget, Qt.Key_Tab)
qtbot.keyClick(widget, Qt.Key_6)
qtbot.keyClick(widget, Qt.Key_Tab) # Hack: in the tests the selected cell is wrong
qtbot.keyClick(widget, Qt.Key_Return, modifier=Qt.NoModifier)
value = dialog.text()
assert value == 'np.array([[0, 2, 3],\n [0, 5, 6]])'
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
def test_array_table_matrix_empty(botarray): # analysis:ignore
qtbot, dialog, widget = botarray
qtbot.keyClick(widget, Qt.Key_Return, modifier=Qt.NoModifier)
value = dialog.text()
assert value == '' |
1,736 | lu factor | """LU decomposition functions."""
from __future__ import division, print_function, absolute_import
from warnings import warn
from numpy import asarray, asarray_chkfinite
# Local imports
from .misc import _datacopied, LinAlgWarning
from .lapack import get_lapack_funcs
from .flinalg import get_flinalg_funcs
__all__ = ['lu', 'lu_solve', 'lu_factor']
def METHOD_NAME(a, overwrite_a=False, check_finite=True):
"""
Compute pivoted LU decomposition of a matrix.
The decomposition is::
A = P L U
where P is a permutation matrix, L lower triangular with unit
diagonal elements, and U upper triangular.
Parameters
----------
a : (M, M) array_like
Matrix to decompose
overwrite_a : bool, optional
Whether to overwrite data in A (may increase performance)
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
lu : (N, N) ndarray
Matrix containing U in its upper triangle, and L in its lower triangle.
The unit diagonal elements of L are not stored.
piv : (N,) ndarray
Pivot indices representing the permutation matrix P:
row i of matrix was interchanged with row piv[i].
See also
--------
lu_solve : solve an equation system using the LU factorization of a matrix
Notes
-----
This is a wrapper to the ``*GETRF`` routines from LAPACK.
Examples
--------
>>> from scipy.linalg import lu_factor
>>> from numpy import tril, triu, allclose, zeros, eye
>>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
>>> lu, piv = lu_factor(A)
>>> piv
array([2, 2, 3, 3], dtype=int32)
Convert LAPACK's ``piv`` array to NumPy index and test the permutation
>>> piv_py = [2, 0, 3, 1]
>>> L, U = np.tril(lu, k=-1) + np.eye(4), np.triu(lu)
>>> np.allclose(A[piv_py] - L @ U, np.zeros((4, 4)))
True
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
getrf, = get_lapack_funcs(('getrf',), (a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of '
'internal getrf (lu_factor)' % -info)
if info > 0:
warn("Diagonal number %d is exactly zero. Singular matrix." % info,
LinAlgWarning, stacklevel=2)
return lu, piv
def lu_solve(lu_and_piv, b, trans=0, overwrite_b=False, check_finite=True):
"""Solve an equation system, a x = b, given the LU factorization of a
Parameters
----------
(lu, piv)
Factorization of the coefficient matrix a, as given by lu_factor
b : array
Right-hand side
trans : {0, 1, 2}, optional
Type of system to solve:
===== =========
trans system
===== =========
0 a x = b
1 a^T x = b
2 a^H x = b
===== =========
overwrite_b : bool, optional
Whether to overwrite data in b (may increase performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Solution to the system
See also
--------
lu_factor : LU factorize a matrix
Examples
--------
>>> from scipy.linalg import lu_factor, lu_solve
>>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
>>> b = np.array([1, 1, 1, 1])
>>> lu, piv = lu_factor(A)
>>> x = lu_solve((lu, piv), b)
>>> np.allclose(A @ x - b, np.zeros((4,)))
True
"""
(lu, piv) = lu_and_piv
if check_finite:
b1 = asarray_chkfinite(b)
else:
b1 = asarray(b)
overwrite_b = overwrite_b or _datacopied(b1, b)
if lu.shape[0] != b1.shape[0]:
raise ValueError("incompatible dimensions.")
getrs, = get_lapack_funcs(('getrs',), (lu, b1))
x, info = getrs(lu, piv, b1, trans=trans, overwrite_b=overwrite_b)
if info == 0:
return x
raise ValueError('illegal value in %d-th argument of internal gesv|posv'
% -info)
def lu(a, permute_l=False, overwrite_a=False, check_finite=True):
"""
Compute pivoted LU decomposition of a matrix.
The decomposition is::
A = P L U
where P is a permutation matrix, L lower triangular with unit
diagonal elements, and U upper triangular.
Parameters
----------
a : (M, N) array_like
Array to decompose
permute_l : bool, optional
Perform the multiplication P*L (Default: do not permute)
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance)
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
**(If permute_l == False)**
p : (M, M) ndarray
Permutation matrix
l : (M, K) ndarray
Lower triangular or trapezoidal matrix with unit diagonal.
K = min(M, N)
u : (K, N) ndarray
Upper triangular or trapezoidal matrix
**(If permute_l == True)**
pl : (M, K) ndarray
Permuted L matrix.
K = min(M, N)
u : (K, N) ndarray
Upper triangular or trapezoidal matrix
Notes
-----
This is a LU factorization routine written for Scipy.
Examples
--------
>>> from scipy.linalg import lu
>>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
>>> p, l, u = lu(A)
>>> np.allclose(A - p @ l @ u, np.zeros((4, 4)))
True
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
flu, = get_flinalg_funcs(('lu',), (a1,))
p, l, u, info = flu(a1, permute_l=permute_l, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of '
'internal lu.getrf' % -info)
if permute_l:
return l, u
return p, l, u |
1,737 | get tasks id labels | from _typeshed import Incomplete
from influxdb_client.service._base_service import _BaseService
class TasksService(_BaseService):
def __init__(self, api_client: Incomplete | None = None) -> None: ...
def delete_tasks_id(self, task_id, **kwargs): ...
def delete_tasks_id_with_http_info(self, task_id, **kwargs): ...
async def delete_tasks_id_async(self, task_id, **kwargs): ...
def delete_tasks_id_labels_id(self, task_id, label_id, **kwargs): ...
def delete_tasks_id_labels_id_with_http_info(self, task_id, label_id, **kwargs): ...
async def delete_tasks_id_labels_id_async(self, task_id, label_id, **kwargs): ...
def delete_tasks_id_members_id(self, user_id, task_id, **kwargs): ...
def delete_tasks_id_members_id_with_http_info(self, user_id, task_id, **kwargs): ...
async def delete_tasks_id_members_id_async(self, user_id, task_id, **kwargs): ...
def delete_tasks_id_owners_id(self, user_id, task_id, **kwargs): ...
def delete_tasks_id_owners_id_with_http_info(self, user_id, task_id, **kwargs): ...
async def delete_tasks_id_owners_id_async(self, user_id, task_id, **kwargs): ...
def delete_tasks_id_runs_id(self, task_id, run_id, **kwargs): ...
def delete_tasks_id_runs_id_with_http_info(self, task_id, run_id, **kwargs): ...
async def delete_tasks_id_runs_id_async(self, task_id, run_id, **kwargs): ...
def get_tasks(self, **kwargs): ...
def get_tasks_with_http_info(self, **kwargs): ...
async def get_tasks_async(self, **kwargs): ...
def get_tasks_id(self, task_id, **kwargs): ...
def get_tasks_id_with_http_info(self, task_id, **kwargs): ...
async def get_tasks_id_async(self, task_id, **kwargs): ...
def METHOD_NAME(self, task_id, **kwargs): ...
def get_tasks_id_labels_with_http_info(self, task_id, **kwargs): ...
async def get_tasks_id_labels_async(self, task_id, **kwargs): ...
def get_tasks_id_logs(self, task_id, **kwargs): ...
def get_tasks_id_logs_with_http_info(self, task_id, **kwargs): ...
async def get_tasks_id_logs_async(self, task_id, **kwargs): ...
def get_tasks_id_members(self, task_id, **kwargs): ...
def get_tasks_id_members_with_http_info(self, task_id, **kwargs): ...
async def get_tasks_id_members_async(self, task_id, **kwargs): ...
def get_tasks_id_owners(self, task_id, **kwargs): ...
def get_tasks_id_owners_with_http_info(self, task_id, **kwargs): ...
async def get_tasks_id_owners_async(self, task_id, **kwargs): ...
def get_tasks_id_runs(self, task_id, **kwargs): ...
def get_tasks_id_runs_with_http_info(self, task_id, **kwargs): ...
async def get_tasks_id_runs_async(self, task_id, **kwargs): ...
def get_tasks_id_runs_id(self, task_id, run_id, **kwargs): ...
def get_tasks_id_runs_id_with_http_info(self, task_id, run_id, **kwargs): ...
async def get_tasks_id_runs_id_async(self, task_id, run_id, **kwargs): ...
def get_tasks_id_runs_id_logs(self, task_id, run_id, **kwargs): ...
def get_tasks_id_runs_id_logs_with_http_info(self, task_id, run_id, **kwargs): ...
async def get_tasks_id_runs_id_logs_async(self, task_id, run_id, **kwargs): ...
def patch_tasks_id(self, task_id, task_update_request, **kwargs): ...
def patch_tasks_id_with_http_info(self, task_id, task_update_request, **kwargs): ...
async def patch_tasks_id_async(self, task_id, task_update_request, **kwargs): ...
def post_tasks(self, task_create_request, **kwargs): ...
def post_tasks_with_http_info(self, task_create_request, **kwargs): ...
async def post_tasks_async(self, task_create_request, **kwargs): ...
def post_tasks_id_labels(self, task_id, label_mapping, **kwargs): ...
def post_tasks_id_labels_with_http_info(self, task_id, label_mapping, **kwargs): ...
async def post_tasks_id_labels_async(self, task_id, label_mapping, **kwargs): ...
def post_tasks_id_members(self, task_id, add_resource_member_request_body, **kwargs): ...
def post_tasks_id_members_with_http_info(self, task_id, add_resource_member_request_body, **kwargs): ...
async def post_tasks_id_members_async(self, task_id, add_resource_member_request_body, **kwargs): ...
def post_tasks_id_owners(self, task_id, add_resource_member_request_body, **kwargs): ...
def post_tasks_id_owners_with_http_info(self, task_id, add_resource_member_request_body, **kwargs): ...
async def post_tasks_id_owners_async(self, task_id, add_resource_member_request_body, **kwargs): ...
def post_tasks_id_runs(self, task_id, **kwargs): ...
def post_tasks_id_runs_with_http_info(self, task_id, **kwargs): ...
async def post_tasks_id_runs_async(self, task_id, **kwargs): ...
def post_tasks_id_runs_id_retry(self, task_id, run_id, **kwargs): ...
def post_tasks_id_runs_id_retry_with_http_info(self, task_id, run_id, **kwargs): ...
async def post_tasks_id_runs_id_retry_async(self, task_id, run_id, **kwargs): ... |
1,738 | write dc description | ###############################################################################
#
# Core - A class for writing the Excel XLSX Worksheet file.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright 2013-2023, John McNamara, jmcnamara@cpan.org
#
# Standard packages.
from datetime import datetime, timezone
# Package imports.
from . import xmlwriter
class Core(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX Core file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self):
"""
Constructor.
"""
super(Core, self).__init__()
self.properties = {}
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self):
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
self._write_cp_core_properties()
self._write_dc_title()
self._write_dc_subject()
self._write_dc_creator()
self._write_cp_keywords()
self.METHOD_NAME()
self._write_cp_last_modified_by()
self._write_dcterms_created()
self._write_dcterms_modified()
self._write_cp_category()
self._write_cp_content_status()
self._xml_end_tag("cp:coreProperties")
# Close the file.
self._xml_close()
def _set_properties(self, properties):
# Set the document properties.
self.properties = properties
def _datetime_to_iso8601_date(self, date):
# Convert to a ISO 8601 style "2010-01-01T00:00:00Z" date.
if not date:
date = datetime.now(timezone.utc)
return date.strftime("%Y-%m-%dT%H:%M:%SZ")
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_cp_core_properties(self):
# Write the <cp:coreProperties> element.
xmlns_cp = (
"http://schemas.openxmlformats.org/package/2006/"
+ "metadata/core-properties"
)
xmlns_dc = "http://purl.org/dc/elements/1.1/"
xmlns_dcterms = "http://purl.org/dc/terms/"
xmlns_dcmitype = "http://purl.org/dc/dcmitype/"
xmlns_xsi = "http://www.w3.org/2001/XMLSchema-instance"
attributes = [
("xmlns:cp", xmlns_cp),
("xmlns:dc", xmlns_dc),
("xmlns:dcterms", xmlns_dcterms),
("xmlns:dcmitype", xmlns_dcmitype),
("xmlns:xsi", xmlns_xsi),
]
self._xml_start_tag("cp:coreProperties", attributes)
def _write_dc_creator(self):
# Write the <dc:creator> element.
data = self.properties.get("author", "")
self._xml_data_element("dc:creator", data)
def _write_cp_last_modified_by(self):
# Write the <cp:lastModifiedBy> element.
data = self.properties.get("author", "")
self._xml_data_element("cp:lastModifiedBy", data)
def _write_dcterms_created(self):
# Write the <dcterms:created> element.
date = self.properties.get("created", datetime.now(timezone.utc))
xsi_type = "dcterms:W3CDTF"
date = self._datetime_to_iso8601_date(date)
attributes = [
(
"xsi:type",
xsi_type,
)
]
self._xml_data_element("dcterms:created", date, attributes)
def _write_dcterms_modified(self):
# Write the <dcterms:modified> element.
date = self.properties.get("created", datetime.now(timezone.utc))
xsi_type = "dcterms:W3CDTF"
date = self._datetime_to_iso8601_date(date)
attributes = [
(
"xsi:type",
xsi_type,
)
]
self._xml_data_element("dcterms:modified", date, attributes)
def _write_dc_title(self):
# Write the <dc:title> element.
if "title" in self.properties:
data = self.properties["title"]
else:
return
self._xml_data_element("dc:title", data)
def _write_dc_subject(self):
# Write the <dc:subject> element.
if "subject" in self.properties:
data = self.properties["subject"]
else:
return
self._xml_data_element("dc:subject", data)
def _write_cp_keywords(self):
# Write the <cp:keywords> element.
if "keywords" in self.properties:
data = self.properties["keywords"]
else:
return
self._xml_data_element("cp:keywords", data)
def METHOD_NAME(self):
# Write the <dc:description> element.
if "comments" in self.properties:
data = self.properties["comments"]
else:
return
self._xml_data_element("dc:description", data)
def _write_cp_category(self):
# Write the <cp:category> element.
if "category" in self.properties:
data = self.properties["category"]
else:
return
self._xml_data_element("cp:category", data)
def _write_cp_content_status(self):
# Write the <cp:contentStatus> element.
if "status" in self.properties:
data = self.properties["status"]
else:
return
self._xml_data_element("cp:contentStatus", data) |
1,739 | relative closeness | #!/usr/bin/env python
############################################################################
#
# MODULE: r.mcda.topsis
# AUTHOR: Gianluca Massei - Antonio Boggia
# PURPOSE: Generate a MCDA map based on TOPSIS algorthm.
# Based on Hwang C. L. and Yoon K. Multiple Objective Decision
# Making Methods and Applications, A State-of-the-Art Survey .
# Springer - Verlag , 1981.
# COPYRIGHT: c) 2015 Gianluca Massei, Antonio Boggia and the GRASS
# Development Team. This program is free software under the
# GNU General PublicLicense (>=v2). Read the file COPYING
# that comes with GRASS for details.
#
#############################################################################
# %Module
# % description: Generates a MCDA map based on TOPSIS algorthm.
# % keyword: raster
# % keyword: Multi Criteria Decision Analysis (MCDA)
# %End
# %option
# % key: criteria
# % type: string
# % multiple: yes
# % gisprompt: old,cell,raster
# % key_desc: name
# % description: Name of criteria raster maps
# % required: yes
# %end
# %option
# % key: preferences
# % type: string
# % key_desc: character
# % description: preference (gain,cost)
# % required: yes
# %end
# %option
# % key: weights
# % type: double
# % description: weights (w1,w2,...,wn)
# % multiple: yes
# % required: yes
# %end
# %option
# % key: topsismap
# % type: string
# % gisprompt: new_file,cell,output
# % description: Ranked raster map
# % required: yes
# %end
import sys
import grass.script as gscript
from time import time
def standardizedNormalizedMatrix(attributes, weights): # step1 and step2
criteria = []
for criterion, weight in zip(attributes, weights):
gscript.mapcalc(
"critPow=pow(${criterion},2)", criterion=criterion, overwrite="True"
)
stats = gscript.parse_command("r.univar", map="critPow", flags="g")
nameMap = "_%s" % criterion
gscript.mapcalc(
"${nameMap}=(${criterion}/sqrt(${sum}))*${weight}",
nameMap=nameMap,
criterion=criterion,
sum=stats["sum"],
weight=weight,
overwrite="True",
)
criteria.append(nameMap)
return criteria
def idealPoints(criteria, preference): # step3
idelaPointsList = []
for c, p in zip(criteria, preference):
stats = gscript.parse_command("r.univar", map=c, flags="g")
if p == "gain":
ip = float(stats["max"])
elif p == "cost":
ip = float(stats["min"])
else:
ip = -9999
print("warning! %s doesn't compliant" % p)
idelaPointsList.append(ip)
return idelaPointsList
def worstPoints(criteria, preference):
worstPointsList = []
for c, p in zip(criteria, preference):
stats = gscript.parse_command("r.univar", map=c, flags="g")
if p == "gain":
wp = float(stats["min"])
elif p == "cost":
wp = float(stats["max"])
else:
wp = -9999
print("warning! %s doesn't compliant" % p)
worstPointsList.append(wp)
return worstPointsList
def idealPointDistance(idelaPointsList, criteria): # step4a
distance = []
i = 0
for c, ip in zip(criteria, idelaPointsList):
mapname = "tmap_%s" % i
gscript.mapcalc(
"${mapname}=pow((${c}-${ip}),2)",
mapname=mapname,
c=c,
ip=ip,
overwrite="True",
)
distance.append(mapname)
i = i + 1
mapalgebra2 = "IdealPointDistance=sqrt(%s)" % ("+".join(distance))
gscript.mapcalc(mapalgebra2, overwrite="True")
gscript.run_command("g.remove", flags="f", type="raster", name=",".join(distance))
return 0
def worstPointDistance(worstPointsList, criteria): # step4b
distance = []
i = 0
for c, wp in zip(criteria, worstPointsList):
mapname = "tmap_%s" % i
gscript.mapcalc(
"${mapname}=pow((${c}-${wp}),2)",
mapname=mapname,
c=c,
wp=wp,
overwrite="True",
)
distance.append(mapname)
i = i + 1
mapalgebra2 = "WorstPointDistance=sqrt(%s)" % ("+".join(distance))
gscript.mapcalc(mapalgebra2, overwrite="True")
gscript.run_command("g.remove", flags="f", type="raster", name=",".join(distance))
def METHOD_NAME(topsismap): # step5
gscript.mapcalc(
"${topsismap}=WorstPointDistance/(WorstPointDistance+IdealPointDistance)",
topsismap=topsismap,
overwrite="True",
)
def main():
"main function for TOPSIS algorithm"
# try:
start = time()
attributes = options["criteria"].split(",")
preferences = options["preferences"].split(",")
weights = options["weights"].split(",")
topsismap = options["topsismap"]
criteria = standardizedNormalizedMatrix(attributes, weights)
idelaPointsList = idealPoints(criteria, preferences)
worstPointsList = worstPoints(criteria, preferences)
idealPointDistance(idelaPointsList, criteria)
worstPointDistance(worstPointsList, criteria)
METHOD_NAME(topsismap)
gscript.run_command("g.remove", flags="f", type="raster", name=",".join(criteria))
gscript.run_command(
"g.remove",
flags="f",
type="raster",
name="IdealPointDistance,WorstPointDistance,critPow",
)
end = time()
print("Time computing-> %.4f s" % (end - start))
if __name__ == "__main__":
options, flags = gscript.parser()
main() |
1,740 | post member access | from typing import Optional
from slither.core import expressions
from slither.core.expressions.expression import Expression
from slither.visitors.expression.expression import ExpressionVisitor
def get(expression: Expression) -> str:
val = expression.context["ExpressionPrinter"]
# we delete the item to reduce memory use
del expression.context["ExpressionPrinter"]
return val
def set_val(expression: Expression, val: str) -> None:
expression.context["ExpressionPrinter"] = val
class ExpressionPrinter(ExpressionVisitor):
def __init__(self, expression: Expression) -> None:
self._result: Optional[str] = None
super().__init__(expression)
def result(self) -> str:
if not self._result:
self._result = get(self.expression)
return self._result
def _post_assignement_operation(self, expression: expressions.AssignmentOperation) -> None:
left = get(expression.expression_left)
right = get(expression.expression_right)
val = f"{left} {expression.type} {right}"
set_val(expression, val)
def _post_binary_operation(self, expression: expressions.BinaryOperation) -> None:
left = get(expression.expression_left)
right = get(expression.expression_right)
val = f"{left} {expression.type} {right}"
set_val(expression, val)
def _post_call_expression(self, expression: expressions.CallExpression) -> None:
called = get(expression.called)
arguments = ",".join([get(x) for x in expression.arguments if x])
val = f"{called}({arguments})"
set_val(expression, val)
def _post_conditional_expression(self, expression: expressions.ConditionalExpression) -> None:
if_expr = get(expression.if_expression)
else_expr = get(expression.else_expression)
then_expr = get(expression.then_expression)
val = f"if {if_expr} then {else_expr} else {then_expr}"
set_val(expression, val)
def _post_elementary_type_name_expression(
self, expression: expressions.ElementaryTypeNameExpression
) -> None:
set_val(expression, str(expression.type))
def _post_identifier(self, expression: expressions.Identifier) -> None:
set_val(expression, str(expression.value))
def _post_index_access(self, expression: expressions.IndexAccess) -> None:
left = get(expression.expression_left)
right = get(expression.expression_right)
val = f"{left}[{right}]"
set_val(expression, val)
def _post_literal(self, expression: expressions.Literal) -> None:
set_val(expression, str(expression.value))
def METHOD_NAME(self, expression: expressions.MemberAccess) -> None:
expr = get(expression.expression)
member_name = str(expression.member_name)
val = f"{expr}.{member_name}"
set_val(expression, val)
def _post_new_array(self, expression: expressions.NewArray) -> None:
array = str(expression.array_type)
val = f"new {array}"
set_val(expression, val)
def _post_new_contract(self, expression: expressions.NewContract) -> None:
contract = str(expression.contract_name)
val = f"new {contract}"
set_val(expression, val)
def _post_new_elementary_type(self, expression: expressions.NewElementaryType) -> None:
t = str(expression.type)
val = f"new {t}"
set_val(expression, val)
def _post_tuple_expression(self, expression: expressions.TupleExpression) -> None:
underlying_expressions = [get(e) for e in expression.expressions if e]
val = f"({','.join(underlying_expressions)})"
set_val(expression, val)
def _post_type_conversion(self, expression: expressions.TypeConversion) -> None:
t = str(expression.type)
expr = get(expression.expression)
val = f"{t}({expr})"
set_val(expression, val)
def _post_unary_operation(self, expression: expressions.UnaryOperation) -> None:
t = str(expression.type)
expr = get(expression.expression)
if expression.is_prefix:
val = f"{t}{expr}"
else:
val = f"{expr}{t}"
set_val(expression, val) |
1,741 | parse dir | #! /usr/bin/env python3
from __future__ import print_function
from builtins import range
import re,os,sys
import optparse
# python 2.6 has json modue; <2.6 could use simplejson
try:
import json
except ImportError:
import simplejson as json
from mutypes import *
import pprint
pp = pprint.PrettyPrinter(indent=2)
NAME_TO_TITLE = {
"map_DTvsphi_dxdz.png" : "map of dxdz residual vs phi",
"map_DTvsphi_dydz.png" : "map of dydz residual vs phi",
"map_DTvsphi_x.png" : "map of x residual vs phi",
"map_DTvsphi_y.png" : "map of y residual vs phi",
"map_DTvsz_dxdz.png" : "map of dxdz residual vs z",
"map_DTvsz_dydz.png" : "map of dydz residual vs z",
"map_DTvsz_x.png" : "map of x residual vs z",
"map_DTvsz_y.png" : "map of y residual vs z",
"map_DTvsz_all_dxdz.png" : "map of dxdz residual vs z",
"map_DTvsz_all_dydz.png" : "map of dydz residual vs z",
"map_DTvsz_all_x.png" : "map of x residual vs z",
"map_DTvsz_all_y.png" : "map of y residual vs z",
"map_CSCvsphi_dxdz.png" : "map of d(rphi)/dz residual vs phi",
"map_CSCvsphi_x.png" : "map of rphi residual vs phi",
"map_CSCvsr_dxdz.png" : "map of d(rphi)/dz residual vs r",
"map_CSCvsr_x.png" : "map of rphi residual vs r",
"segdifphi_x_dt_csc_resid.png" : "segdiff DT-CSC in x residuals vs phi",
"segdifphi_dt13_resid.png" : "segdiff in x residuals vs phi",
"segdifphi_dt13_slope.png" : "segdiff in dxdz residuals vs phi",
"segdifphi_dt2_resid.png" : "segdiff in y residuals vs phi",
"segdifphi_dt2_slope.png" : "segdiff in dydz residuals vs phi",
"segdif_x_dt_csc_resid.png" : "segdiff DT-CSC in x residuals",
"segdif_dt13_resid.png" : "segdiff in x residuals",
"segdif_dt13_slope.png" : "segdiff in dxdz residuals",
"segdif_dt2_resid.png" : "segdiff in y residuals",
"segdif_dt2_slope.png" : "segdiff in dydz residuals",
"segdifphi_csc_resid.png" : "segdiff in rphi residuals vs phi",
"segdifphi_csc_slope.png" : "segdiff in d(rphi)/dz residuals vs phi",
"segdif_csc_resid.png" : "segdiff in rphi residuals",
"segdif_csc_slope.png" : "segdiff in d(rphi)/dz residuals",
"dt_bellcurves.png" : "residuals distributions",
"dt_polynomials.png" : "residuals relations to misalignments",
"csc_bellcurves.png" : "residuals distributions",
"csc_polynomials.png" : "residuals relations to misalignments",
'dt_curvature_deltax.png' : 'Delta x residuals vs. curvature',
'dt_curvature_deltadxdz.png' : 'Delta dxdz residuals vs. curvature',
"medians.png" : "medians distribution"
}
######################################################
# functions definitions
######################################################
# To parse commandline args
usage='%prog [options]\n'+\
'Creates a tree_items.js data file for a browsable JavaScript tree using results produced '+\
'by running alignment_validation_plots.py.'
parser=optparse.OptionParser(usage)
parser.add_option("-i", "--inputDir",
help="[REQUIRED] input directory: should contain 'iter1', 'iterN' and 'common' directories filled with alignment_validation_plots.py. The resulting tree_items.js is also dumped into this directory",
type="string",
default='',
dest="inputDir")
parser.add_option("-v", "--verbose",
help="Degree of debug info verbosity",
type="int",
default=0,
dest="verbose")
options,args=parser.parse_args()
if options.inputDir=='':
print("\nOne or more of REQUIRED options is missing!\n")
parser.print_help()
# See \n"+sys.argv[0]+" --help"
sys.exit()
######################################################
############################################################################################################
############################################################################################################
# main script
# create directory structure
#basedir='/disks/sdb5/home_reloc/khotilov/db/cms/alignment'
#os.chdir(basedir)
os.chdir(options.inputDir)
#iteration1 = "iteration_01"
#iteration3 = "iteration_03"
#iteration1 = "NOV4DT_PASS3noweight_TkHIP_01"
#iteration3 = "NOV4DT_PASS3noweight_TkHIP_05"
iteration1 = "iter1"
iterationN = "iterN"
comdir = "common/"
######################################################
# open root and py result files
iteration_directory = iterationN
def METHOD_NAME(dir,label,it1="",itN=""):
"""it1 and itN are the first and the last iterations' directory names
dir is some directory with the results from for the LAST
iteration, so it must contain a itN substring
label is a label for tree's folder for this directory"""
if len(itN)>0 and dir.find(itN)==-1:
print("directory ", dir, "has no ", itN, " in it!!")
return ["problem!!!",""]
res = [label,dir]
files = sorted(os.listdir(dir))
for f in files:
if re.match(".+\.png", f):
if len(it1)>0 and len(itN)>0:
lnN = [itN,dir+'/'+f]
dir1 = dir.replace(itN,it1)
if not os.access(dir1+'/'+f,os.F_OK):
print("WARNING: no ",dir1+'/'+f," file found!!!")
ln1 = [it1,dir1+'/'+f]
ln = [NAME_TO_TITLE[f],dir+'/'+f,ln1,lnN]
res.append(ln)
else:
ln = [NAME_TO_TITLE[f],dir+'/'+f]
#print ln
res.append(ln)
#pp.pprint(res)
return res
mytree = []
tree_level1 = ['test','']
# DT
dt_basedir = iteration_directory+'/MB/'
tree_level2 = METHOD_NAME(dt_basedir,"MB",iteration1,iterationN)
for wheel in DT_TYPES:
dd = dt_basedir + wheel[0]
print(dd)
tree_level3 = METHOD_NAME(dd,wheel[0],iteration1,iterationN)
for station in wheel[2]:
dd = dt_basedir + wheel[0]+'/'+station[1]
print(dd)
tree_level4 = METHOD_NAME(dd,station[0],iteration1,iterationN)
for sector in range(1,station[2]+1):
ssector = "%02d" % sector
dd = dt_basedir+wheel[0]+'/'+station[1]+'/'+ssector
#print dd
tree_level5 = METHOD_NAME(dd,"%s/%d" % (station[0],sector),iteration1,iterationN)
if len(tree_level5) == 2: tree_level5.append(['none',''])
tree_level4.append(tree_level5)
if len(tree_level4) == 2: tree_level4.append(['none',''])
tree_level3.append(tree_level4)
if len(tree_level3) == 2: tree_level3.append(['none',''])
tree_level2.append(tree_level3)
if len(tree_level2) == 2: tree_level2.append(['none',''])
tree_level1.append(tree_level2)
# CSC
csc_basedir = iteration_directory+'/'
for endcap in CSC_TYPES:
dd = csc_basedir+endcap[0]
print(dd)
tree_level2 = METHOD_NAME(dd,endcap[0],iteration1,iterationN)
for station in endcap[2]:
dd = csc_basedir+endcap[0]+'/'+station[1]
print(dd)
tree_level3 = METHOD_NAME(dd,station[0],iteration1,iterationN)
for ring in station[2]:
dd = csc_basedir+endcap[0]+'/'+station[1]+'/'+ring[1]
print(dd)
tree_level4 = METHOD_NAME(dd,"%s/%s" % (station[0],ring[1]),iteration1,iterationN)
for chamber in range(1,ring[2]+1):
schamber = "%02d" % chamber
dd = csc_basedir+endcap[0]+'/'+station[1]+'/'+ring[1]+'/'+schamber
#print dd
tree_level5 = METHOD_NAME(dd,"%s/%s/%d" % (station[0],ring[1],chamber),iteration1,iterationN)
tree_level4.append(tree_level5)
if len(tree_level4) == 2: tree_level4.append(['none',''])
tree_level3.append(tree_level4)
if len(tree_level3) == 2: tree_level3.append(['none',''])
tree_level2.append(tree_level3)
if len(tree_level2) == 2: tree_level2.append(['none',''])
tree_level1.append(tree_level2)
# Common plots
common_basedir = comdir
tree_level2 = METHOD_NAME(common_basedir,"All")
tree_level1.append(tree_level2)
mytree.append(tree_level1)
print(" ")
#pp.pprint(mytree)
print()
ff = open("tree_items.js",mode="w")
print("var TREE_ITEMS = ", file=ff)
json.dump(mytree,ff)
ff.close() |
1,742 | get live url | # -*- coding: utf-8 -*-
# Copyright: (c) JUL1EN094, SPM, SylvainCecchetto
# Copyright: (c) 2016, SylvainCecchetto
# GNU General Public License v2.0+ (see LICENSE.txt or https://www.gnu.org/licenses/gpl-2.0.txt)
# This file is part of Catch-up TV & More
from __future__ import unicode_literals
from builtins import str
import re
from codequick import Listitem, Resolver, Route
import urlquick
from resources.lib import resolver_proxy, web_utils
from resources.lib.menu_utils import item_post_treatment
# TO DO
# Info
# Add date of videos
URL_ROOT = 'http://www.lcp.fr'
URL_LIVE_SITE = 'https://lcp.fr/direct-lcp-5434'
URL_CATEGORIES = URL_ROOT + '/%s'
URL_VIDEO_REPLAY = 'http://play1.qbrick.com/config/avp/v1/player/' \
'media/%s/darkmatter/%s/'
# VideoID, AccountId
CATEGORIES = {
'documentaires': 'Documentaires',
'emissions': 'Emission A-Z'
}
GENERIC_HEADERS = {'User-Agent': web_utils.get_random_ua()}
@Route.register
def list_categories(plugin, item_id, **kwargs):
"""
Build categories listing
- Tous les programmes
- Séries
- Informations
- ...
"""
for category_id, category_name in list(CATEGORIES.items()):
category_url = URL_CATEGORIES % category_id
item = Listitem()
item.label = category_name
if category_id == 'documentaires':
item.set_callback(list_videos,
item_id=item_id,
videos_url=category_url,
page='0')
else:
item.set_callback(list_programs,
item_id=item_id,
category_url=category_url)
item_post_treatment(item)
yield item
@Route.register
def list_programs(plugin, item_id, category_url, **kwargs):
"""
Build programs listing
- Journal de 20H
- Cash investigation
"""
resp = urlquick.get(category_url)
root = resp.parse()
for program_datas in root.iterfind(".//div[@class='sticky- views-row']"):
program_label = program_datas.find(".//h2").text
program_image = URL_ROOT + program_datas.find(".//img").get('src')
program_url = program_datas.find(".//a").get('href')
item = Listitem()
item.label = program_label
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(list_videos_programs,
item_id=item_id,
videos_url=program_url,
page='0')
item_post_treatment(item)
yield item
@Route.register
def list_videos_programs(plugin, item_id, videos_url, page, **kwargs):
"""
Build programs listing
- Journal de 20H
- Cash investigation
"""
resp = urlquick.get(videos_url)
root = resp.parse()
all_videos_link = URL_ROOT + root.findall(".//div[@class='more-link']")[0].find(".//a").get('href')
resp2 = urlquick.get(all_videos_link + '?page=%s' % page)
root2 = resp2.parse("main", attrs={"class": "layout-3col__left-content"})
for video_datas in root2.iterfind(".//div[@class='views-row']"):
video_label = video_datas.findall(".//span[@class='field-content']")[0].text
video_image = URL_ROOT + video_datas.find(".//img").get('src')
video_url = video_datas.find(".//a").get('href')
item = Listitem()
item.label = video_label
item.art['thumb'] = item.art['landscape'] = video_image
item.set_callback(get_video_url,
item_id=item_id,
video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
yield Listitem.next_page(item_id=item_id,
videos_url=videos_url,
page=str(int(page) + 1))
@Route.register
def list_videos(plugin, item_id, videos_url, page, **kwargs):
"""
Build programs listing
- Journal de 20H
- Cash investigation
"""
resp = urlquick.get(videos_url + '?page=%s' % page)
root = resp.parse()
for video_datas in root.iterfind(".//div[@class='views-row']"):
video_label = video_datas.findall(".//span[@class='field-content']")[0].text
video_image = URL_ROOT + video_datas.find(".//img").get('src')
video_url = video_datas.find(".//a").get('href')
item = Listitem()
item.label = video_label
item.art['thumb'] = item.art['landscape'] = video_image
item.set_callback(get_video_url,
item_id=item_id,
video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
yield Listitem.next_page(item_id=item_id,
videos_url=videos_url,
page=str(int(page) + 1))
@Resolver.register
def get_video_url(plugin,
item_id,
video_url,
download_mode=False,
**kwargs):
resp = urlquick.get(video_url,
headers={'User-Agent': web_utils.get_random_ua()},
max_age=-1,
timeout=120)
video_id = re.compile(
r'www.dailymotion.com/embed/video/(.*?)[\?\"]').findall(
resp.text)[0]
return resolver_proxy.get_stream_dailymotion(plugin, video_id,
download_mode)
@Resolver.register
def METHOD_NAME(plugin, item_id, **kwargs):
try:
resp = urlquick.get(URL_LIVE_SITE, headers=GENERIC_HEADERS, max_age=-1)
root = resp.parse()
url_video = root.find('.//iframe').get('data-src')
live_id = re.compile(r'www.dailymotion.com/embed/video/(.*?)[\?\"]').findall(url_video)[0]
except Exception:
live_id = 'xgepjr'
return resolver_proxy.get_stream_dailymotion(plugin, live_id, False) |
1,743 | get rank | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
import torch
_LOCAL_PROCESS_GROUP = None
"""
A torch process group which only includes processes that on the same machine as the current process.
This variable is set when processes are spawned by `launch()` in "engine/launch.py".
"""
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def METHOD_NAME() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.METHOD_NAME()
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.METHOD_NAME(group=_LOCAL_PROCESS_GROUP)
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def is_main_process() -> bool:
return METHOD_NAME() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
backend = dist.get_backend(group)
assert backend in ["gloo", "nccl"]
device = torch.device("cpu" if backend == "gloo" else "cuda")
buffer = pickle.dumps(data)
if len(buffer) > 1024 ** 3:
logger = logging.getLogger(__name__)
logger.warning(
"Rank {} trying to all-gather {:.2f} GB of data on device {}".format(
METHOD_NAME(), len(buffer) / (1024 ** 3), device
)
)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), "comm.gather/all_gather must be called from ranks within the given group!"
local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device)
for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros(
(max_size - local_size,), dtype=torch.uint8, device=tensor.device
)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
def all_gather(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def gather(data, dst=0, group=None):
"""
Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group=group) == 1:
return [data]
rank = dist.METHOD_NAME(group=group)
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
# receiving Tensor from all ranks
if rank == dst:
max_size = max(size_list)
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.gather(tensor, tensor_list, dst=dst, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
else:
dist.gather(tensor, [], dst=dst, group=group)
return []
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2 ** 31)
all_ints = all_gather(ints)
return all_ints[0]
def reduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that process with rank
0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.METHOD_NAME() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict |
1,744 | build | from conan import ConanFile
from conan.tools.scm import Version
from conan.tools import files
from conan.tools.files import copy
from conan.errors import ConanInvalidConfiguration
from conans import AutoToolsBuildEnvironment
import os
required_conan_version = ">=1.47.0"
class MoldConan(ConanFile):
name = "mold"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/rui314/mold/"
license = "AGPL-3.0"
description = ("mold is a faster drop-in replacement for existing Unix linkers. It is several times faster than the LLVM lld linker")
topics = ("mold", "ld", "linkage", "compilation")
settings = "os", "arch", "compiler", "build_type"
generators = "make"
def validate(self):
if self.settings.build_type == "Debug":
raise ConanInvalidConfiguration('Mold is a build tool, specify mold:build_type=Release in your build profile, see https://github.com/conan-io/conan-center-index/pull/11536#issuecomment-1195607330')
if self.settings.compiler in ["gcc", "clang", "intel-cc"] and self.settings.compiler.libcxx != "libstdc++11":
raise ConanInvalidConfiguration('Mold can only be built with libstdc++11; specify mold:compiler.libcxx=libstdc++11 in your build profile')
if self.settings.os == "Windows":
raise ConanInvalidConfiguration(f'{self.name} can not be built on {self.settings.os}.')
if self.settings.compiler == "gcc" and Version(self.settings.compiler.version) < "10":
raise ConanInvalidConfiguration("GCC version 10 or higher required")
if (self.settings.compiler == "clang" or self.settings.compiler == "apple-clang") and Version(self.settings.compiler.version) < "12":
raise ConanInvalidConfiguration("Clang version 12 or higher required")
if self.settings.compiler == "apple-clang" and "armv8" == self.settings.arch :
raise ConanInvalidConfiguration(f'{self.name} is still not supported by Mac M1.')
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def _get_include_path(self, dependency):
include_path = self.deps_cpp_info[dependency].rootpath
include_path = os.path.join(include_path, "include")
return include_path
def _patch_sources(self):
if self.settings.compiler == "apple-clang" or (self.settings.compiler == "gcc" and Version(self.settings.compiler.version) < "11"):
files.replace_in_file(self, "source_subfolder/Makefile", "-std=c++20", "-std=c++2a")
files.replace_in_file(self, "source_subfolder/Makefile", "-Ithird-party/xxhash ", "-I{} -I{} -I{} -I{} -I{}".format(
self._get_include_path("zlib"),
self._get_include_path("openssl"),
self._get_include_path("xxhash"),
self._get_include_path("mimalloc"),
self._get_include_path("onetbb")
))
files.replace_in_file(self, "source_subfolder/Makefile", "MOLD_LDFLAGS += -ltbb", "MOLD_LDFLAGS += -L{} -ltbb".format(
self.deps_cpp_info["onetbb"].lib_paths[0]))
files.replace_in_file(self, "source_subfolder/Makefile", "MOLD_LDFLAGS += -lmimalloc", "MOLD_LDFLAGS += -L{} -lmimalloc".format(
self.deps_cpp_info["mimalloc"].lib_paths[0]))
def requirements(self):
self.requires("zlib/1.2.12")
self.requires("openssl/1.1.1q")
self.requires("xxhash/0.8.1")
self.requires("onetbb/2021.3.0")
self.requires("mimalloc/2.0.6")
def source(self):
files.get(self, **self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def METHOD_NAME(self):
self._patch_sources()
with files.chdir(self, self._source_subfolder):
autotools = AutoToolsBuildEnvironment(self)
autotools.make(target="mold", args=['SYSTEM_TBB=1', 'SYSTEM_MIMALLOC=1'])
def package(self):
copy(self, "LICENSE", src=self._source_subfolder, dst=os.path.join(self.package_folder, "licenses"))
copy(self, "mold", src="bin", dst=os.path.join(self.package_folder, "bin"), keep_path=False)
copy(self, "mold", src=self._source_subfolder, dst=os.path.join(self.package_folder, "bin"), keep_path=False)
def package_id(self):
del self.info.settings.compiler
def package_info(self):
bindir = os.path.join(self.package_folder, "bin")
mold_location = os.path.join(bindir, "bindir")
self.output.info('Appending PATH environment variable: {}'.format(bindir))
self.env_info.PATH.append(bindir)
self.env_info.LD = mold_location
self.buildenv_info.prepend_path("MOLD_ROOT", bindir)
self.cpp_info.includedirs = []
if self.settings.os == "Linux":
self.cpp_info.system_libs.extend(["m", "pthread", "dl"]) |
1,745 | test glob symlinks | import glob
import os
import shutil
import sys
import unittest
from test.support import (run_unittest, TESTFN, skip_unless_symlink,
can_symlink, create_empty_file)
class GlobTests(unittest.TestCase):
def norm(self, *parts):
return os.path.normpath(os.path.join(self.tempdir, *parts))
def mktemp(self, *parts):
filename = self.norm(*parts)
base, file = os.path.split(filename)
if not os.path.exists(base):
os.makedirs(base)
create_empty_file(filename)
def setUp(self):
self.tempdir = TESTFN + "_dir"
self.mktemp('a', 'D')
self.mktemp('aab', 'F')
self.mktemp('.aa', 'G')
self.mktemp('.bb', 'H')
self.mktemp('aaa', 'zzzF')
self.mktemp('ZZZ')
self.mktemp('a', 'bcd', 'EF')
self.mktemp('a', 'bcd', 'efg', 'ha')
if can_symlink():
os.symlink(self.norm('broken'), self.norm('sym1'))
os.symlink('broken', self.norm('sym2'))
os.symlink(os.path.join('a', 'bcd'), self.norm('sym3'))
def tearDown(self):
shutil.rmtree(self.tempdir)
def glob(self, *parts):
if len(parts) == 1:
pattern = parts[0]
else:
pattern = os.path.join(*parts)
p = os.path.join(self.tempdir, pattern)
res = glob.glob(p)
self.assertEqual(list(glob.iglob(p)), res)
bres = [os.fsencode(x) for x in res]
self.assertEqual(glob.glob(os.fsencode(p)), bres)
self.assertEqual(list(glob.iglob(os.fsencode(p))), bres)
return res
def assertSequencesEqual_noorder(self, l1, l2):
l1 = list(l1)
l2 = list(l2)
self.assertEqual(set(l1), set(l2))
self.assertEqual(sorted(l1), sorted(l2))
def test_glob_literal(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('a'), [self.norm('a')])
eq(self.glob('a', 'D'), [self.norm('a', 'D')])
eq(self.glob('aab'), [self.norm('aab')])
eq(self.glob('zymurgy'), [])
res = glob.glob('*')
self.assertEqual({type(r) for r in res}, {str})
res = glob.glob(os.path.join(os.curdir, '*'))
self.assertEqual({type(r) for r in res}, {str})
res = glob.glob(b'*')
self.assertEqual({type(r) for r in res}, {bytes})
res = glob.glob(os.path.join(os.fsencode(os.curdir), b'*'))
self.assertEqual({type(r) for r in res}, {bytes})
def test_glob_one_directory(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('a*'), map(self.norm, ['a', 'aab', 'aaa']))
eq(self.glob('*a'), map(self.norm, ['a', 'aaa']))
eq(self.glob('.*'), map(self.norm, ['.aa', '.bb']))
eq(self.glob('?aa'), map(self.norm, ['aaa']))
eq(self.glob('aa?'), map(self.norm, ['aaa', 'aab']))
eq(self.glob('aa[ab]'), map(self.norm, ['aaa', 'aab']))
eq(self.glob('*q'), [])
def test_glob_nested_directory(self):
eq = self.assertSequencesEqual_noorder
if os.path.normcase("abCD") == "abCD":
# case-sensitive filesystem
eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF')])
else:
# case insensitive filesystem
eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF'),
self.norm('a', 'bcd', 'efg')])
eq(self.glob('a', 'bcd', '*g'), [self.norm('a', 'bcd', 'efg')])
def test_glob_directory_names(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('*', 'D'), [self.norm('a', 'D')])
eq(self.glob('*', '*a'), [])
eq(self.glob('a', '*', '*', '*a'),
[self.norm('a', 'bcd', 'efg', 'ha')])
eq(self.glob('?a?', '*F'), [self.norm('aaa', 'zzzF'),
self.norm('aab', 'F')])
def test_glob_directory_with_trailing_slash(self):
# Patterns ending with a slash shouldn't match non-dirs
res = glob.glob(self.norm('Z*Z') + os.sep)
self.assertEqual(res, [])
res = glob.glob(self.norm('ZZZ') + os.sep)
self.assertEqual(res, [])
# When there is a wildcard pattern which ends with os.sep, glob()
# doesn't blow up.
res = glob.glob(self.norm('aa*') + os.sep)
self.assertEqual(len(res), 2)
# either of these results is reasonable
self.assertIn(set(res), [
{self.norm('aaa'), self.norm('aab')},
{self.norm('aaa') + os.sep, self.norm('aab') + os.sep},
])
def test_glob_bytes_directory_with_trailing_slash(self):
# Same as test_glob_directory_with_trailing_slash, but with a
# bytes argument.
res = glob.glob(os.fsencode(self.norm('Z*Z') + os.sep))
self.assertEqual(res, [])
res = glob.glob(os.fsencode(self.norm('ZZZ') + os.sep))
self.assertEqual(res, [])
res = glob.glob(os.fsencode(self.norm('aa*') + os.sep))
self.assertEqual(len(res), 2)
# either of these results is reasonable
self.assertIn(set(res), [
{os.fsencode(self.norm('aaa')),
os.fsencode(self.norm('aab'))},
{os.fsencode(self.norm('aaa') + os.sep),
os.fsencode(self.norm('aab') + os.sep)},
])
@skip_unless_symlink
def METHOD_NAME(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('sym3'), [self.norm('sym3')])
eq(self.glob('sym3', '*'), [self.norm('sym3', 'EF'),
self.norm('sym3', 'efg')])
self.assertIn(self.glob('sym3' + os.sep),
[[self.norm('sym3')], [self.norm('sym3') + os.sep]])
eq(self.glob('*', '*F'),
[self.norm('aaa', 'zzzF'),
self.norm('aab', 'F'), self.norm('sym3', 'EF')])
@skip_unless_symlink
def test_glob_broken_symlinks(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('sym*'), [self.norm('sym1'), self.norm('sym2'),
self.norm('sym3')])
eq(self.glob('sym1'), [self.norm('sym1')])
eq(self.glob('sym2'), [self.norm('sym2')])
@unittest.skipUnless(sys.platform == "win32", "Win32 specific test")
def test_glob_magic_in_drive(self):
eq = self.assertSequencesEqual_noorder
eq(glob.glob('*:'), [])
eq(glob.glob(b'*:'), [])
eq(glob.glob('?:'), [])
eq(glob.glob(b'?:'), [])
eq(glob.glob('\\\\?\\c:\\'), ['\\\\?\\c:\\'])
eq(glob.glob(b'\\\\?\\c:\\'), [b'\\\\?\\c:\\'])
eq(glob.glob('\\\\*\\*\\'), [])
eq(glob.glob(b'\\\\*\\*\\'), [])
def check_escape(self, arg, expected):
self.assertEqual(glob.escape(arg), expected)
self.assertEqual(glob.escape(os.fsencode(arg)), os.fsencode(expected))
def test_escape(self):
check = self.check_escape
check('abc', 'abc')
check('[', '[[]')
check('?', '[?]')
check('*', '[*]')
check('[[_/*?*/_]]', '[[][[]_/[*][?][*]/_]]')
check('/[[_/*?*/_]]/', '/[[][[]_/[*][?][*]/_]]/')
@unittest.skipUnless(sys.platform == "win32", "Win32 specific test")
def test_escape_windows(self):
check = self.check_escape
check('?:?', '?:[?]')
check('*:*', '*:[*]')
check(r'\\?\c:\?', r'\\?\c:\[?]')
check(r'\\*\*\*', r'\\*\*\[*]')
check('//?/c:/?', '//?/c:/[?]')
check('//*/*/*', '//*/*/[*]')
def test_main():
run_unittest(GlobTests)
if __name__ == "__main__":
test_main() |
1,746 | test delete clusters | import json
import shutil
from glob import glob
from pathlib import Path
import pytest
from assisted_service_client import PlatformType
from junit_report import JunitTestSuite
from assisted_test_infra.test_infra import ClusterName
from assisted_test_infra.test_infra.helper_classes.cluster import Cluster
from assisted_test_infra.test_infra.helper_classes.config import BaseNodesConfig
from consts import consts
from service_client import InventoryClient, SuppressAndLog, log
from tests.base_test import BaseTest
from tests.config import ClusterConfig, InfraEnvConfig
class TestMakefileTargets(BaseTest):
@JunitTestSuite()
def test_target_deploy_nodes(self, cluster):
cluster.prepare_nodes()
@JunitTestSuite()
def test_target_deploy_networking_with_nodes(self, cluster):
cluster.prepare_for_installation()
@JunitTestSuite()
def test_target_install_with_deploy_nodes(self, prepared_cluster):
prepared_cluster.start_install_and_wait_for_installed()
@pytest.fixture
def download_iso_override_nodes_count(self, prepared_controller_configuration: BaseNodesConfig):
"""No need creating any nodes for creating a cluster and download its ISO
Setting masters_count and workers_count to 0 on with overriding controller_configuration fixture return value
before nodes creation causing Nodes object not to generate any new nodes"""
prepared_controller_configuration.masters_count = 0
prepared_controller_configuration.workers_count = 0
yield prepared_controller_configuration
@pytest.mark.override_controller_configuration(download_iso_override_nodes_count.__name__)
def test_target_download_iso(self, cluster):
cluster.download_image()
@JunitTestSuite()
def METHOD_NAME(self, api_client: InventoryClient, cluster_configuration):
"""Delete all clusters or single cluster if CLUSTER_ID is given"""
cluster_id = cluster_configuration.cluster_id
clusters = api_client.clusters_list() if not cluster_id else [{"id": cluster_id}]
for cluster_info in clusters:
cluster = Cluster(api_client, ClusterConfig(cluster_id=cluster_info["id"]), InfraEnvConfig())
cluster.delete()
log.info(f"Successfully deleted {len(clusters)} clusters")
@JunitTestSuite()
def test_destroy_available_terraform(
self, prepared_controller_configuration: BaseNodesConfig, cluster_configuration
):
clusters_tf_folders = glob(f"{consts.TF_FOLDER}/*")
destroyed_clusters = 0
def onerror(*args):
log.error(f"Error while attempting to delete {args[1]}, {args[2]}")
for cluster_dir in clusters_tf_folders:
tfvar_files = glob(f"{cluster_dir}/*/{consts.TFVARS_JSON_NAME}", recursive=True)
resources_deleted = False
for tfvar_file in tfvar_files:
with SuppressAndLog(Exception):
with open(tfvar_file) as f:
tfvars = json.load(f)
for key, value in tfvars.items():
if key == "cluster_name":
value = ClusterName(value)
if hasattr(cluster_configuration, key):
setattr(cluster_configuration, key, value)
if hasattr(prepared_controller_configuration, key):
setattr(prepared_controller_configuration, key, value)
platform = cluster_configuration.platform or PlatformType.BAREMETAL
parent_folder = Path(tfvar_file).resolve().parent
if platform != parent_folder.stem:
continue
# iso is not needed for destroy
dummy_iso_path = Path(parent_folder).resolve() / "dummy.iso"
cluster_configuration.iso_download_path = str(dummy_iso_path)
cluster_configuration.worker_iso_download_path = str(dummy_iso_path)
dummy_iso_path.touch(exist_ok=True)
controller = self.get_terraform_controller(prepared_controller_configuration, cluster_configuration)
config_vars = controller.get_all_vars()
controller.tf.set_vars(**config_vars)
controller.destroy_all_nodes()
destroyed_clusters += 1
resources_deleted = True
log.debug(f"Successfully deleted {cluster_dir} resources")
if resources_deleted:
shutil.rmtree(cluster_dir, onerror=onerror)
log.info(f"Successfully destroyed {destroyed_clusters} clusters") |
1,747 | on flag take | """
Team Deathmatch game mode.
Options
^^^^^^^
.. code-block:: toml
[tdm]
# Maximum kills to win the game
kill_limit = 100
# How many points you will get by intel capture
intel_points = 10
# Hide intel from the map and disable the captures
remove_intel = false
# Use intel scores as a percentage to win the game
# This can cause trouble if remove_intel is false
score_percentage = false
..Maintainer: Triplefox
"""
from pyspades.constants import *
from pyspades.contained import IntelCapture
from piqueserver.config import config
from piqueserver.commands import command
import math
TDM_CONFIG = config.section("tdm")
KILL_LIMIT = TDM_CONFIG.option("kill_limit", default=100)
INTEL_POINTS = TDM_CONFIG.option("intel_points", default=10)
REMOVE_INTEL = TDM_CONFIG.option("remove_intel", default=False)
SCORE_PERCENTAGE = TDM_CONFIG.option("score_percentage", default=False)
HIDE_COORD = (0, 0, 0)
@command()
def score(connection):
return connection.protocol.get_kill_count()
def apply_script(protocol, connection, config):
class TDMConnection(connection):
def on_spawn(self, pos):
self.send_chat(self.explain_game_mode())
self.send_chat(self.protocol.get_kill_count())
return connection.on_spawn(self, pos)
def METHOD_NAME(self):
if REMOVE_INTEL.get():
return False
return connection.METHOD_NAME(self)
def on_flag_capture(self):
result = connection.on_flag_capture(self)
self.team.kills += INTEL_POINTS.get()
self.protocol.check_end_game(self)
return result
def on_kill(self, killer, type, grenade):
result = connection.on_kill(self, killer, type, grenade)
self.protocol.check_end_game(killer)
return result
def explain_game_mode(self):
msg = 'Team Deathmatch: Kill the opposing team.'
if not REMOVE_INTEL.get():
msg += ' Intel is worth %s kills.' % INTEL_POINTS.get()
return msg
class TDMProtocol(protocol):
game_mode = CTF_MODE
def on_flag_spawn(self, x, y, z, flag, entity_id):
if REMOVE_INTEL.get():
return HIDE_COORD
return protocol.on_flag_spawn(self, x, y, z, flag, entity_id)
def get_kill_count(self):
green_kills = self.green_team.kills
blue_kills = self.blue_team.kills
diff = green_kills - blue_kills
if green_kills > blue_kills:
return ("%s leads %s-%s (+%s, %s left). Playing to %s kills." %
(self.green_team.name,
green_kills, blue_kills,
diff,
KILL_LIMIT.get() - green_kills,
KILL_LIMIT.get()))
elif green_kills < blue_kills:
return ("%s leads %s-%s (+%s, %s left). Playing to %s kills." %
(self.blue_team.name,
blue_kills, green_kills,
-diff,
KILL_LIMIT.get() - blue_kills,
KILL_LIMIT.get()))
else:
return ("%s-%s, %s left. Playing to %s kills." %
(green_kills,
blue_kills,
KILL_LIMIT.get() - green_kills,
KILL_LIMIT.get()))
# since its a team based game, we gonna split the caps
# for all players in the team
def do_captures(self, team, caps):
while (team.score < caps):
for player in team.get_players():
if team.score >= caps:
break
team.score += 1
intel_capture = IntelCapture()
intel_capture.player_id = player.player_id
intel_capture.winning = False
self.broadcast_contained(intel_capture)
def check_end_game(self, player):
if SCORE_PERCENTAGE.get() and player:
team = player.team
caps_percent = math.floor(
self.max_score*team.kills/KILL_LIMIT.get())
if caps_percent > team.score:
self.do_captures(team, caps_percent)
if self.green_team.kills >= KILL_LIMIT.get():
self.broadcast_chat("%s Team Wins, %s - %s" %
(self.green_team.name,
self.green_team.kills,
self.blue_team.kills))
self.reset_game(player)
protocol.on_game_end(self)
elif self.blue_team.kills >= KILL_LIMIT.get():
self.broadcast_chat("%s Team Wins, %s - %s" %
(self.blue_team.name,
self.blue_team.kills,
self.green_team.kills))
self.reset_game(player)
protocol.on_game_end(self)
return TDMProtocol, TDMConnection |
1,748 | max num favorites | # Copyright (c) 2021 Charles University, Faculty of Arts,
# Institute of the Czech National Corpus
# Copyright (c) 2021 Martin Zimandl <martin.zimandl@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import logging
import plugins
import ujson as json
from action.control import http_action
from action.krequest import KRequest
from action.model.user import UserActionModel
from action.response import KResponse
from plugin_types.auth import AbstractAuth
from plugin_types.user_items import (
AbstractUserItems, FavoriteItem, UserItemException)
from plugins import inject
from plugins.common.mysql import MySQLConf, MySQLOps
from plugins.mysql_integration_db import MySqlIntegrationDb
from sanic.blueprints import Blueprint
from .backend import Backend
bp = Blueprint('mysql_user_items')
def import_legacy_record(data):
ans = FavoriteItem()
ans.ident = data['id']
ans.name = data.get('name', '??')
ans.corpora = [dict(id=data['corpus_id'], name=data['name'])]
if data.get('corpora', None):
for item in data.get('corpora', []):
try:
ans.corpora.append(dict(id=item['canonical_id'], name=item['name']))
except Exception as ex:
logging.getLogger(__name__).warning(
'Failed to import legacy fav. item record component: {0}'.format(ex))
ans.subcorpus_id = data.get('subcorpus_id', None)
ans.subcorpus_name = data.get('subcorpus_orig_id', ans.subcorpus_id)
ans.size = data.get('size', None)
ans.size_info = data.get('size_info', None)
return ans
@bp.route('/user/set_favorite_item', methods=['POST'])
@http_action(return_type='json', access_level=2, action_model=UserActionModel)
async def set_favorite_item(amodel: UserActionModel, req: KRequest, resp: KResponse):
"""
"""
corpora = []
req_corpora = req.form_getlist('corpora')
subc = req.form.get('subcorpus_id')
if subc:
with plugins.runtime.SUBC_STORAGE as sa:
ident = await sa.get_info(subc)
maincorp = await amodel.cf.get_corpus(ident)
subcorpus_name = ident.name
subcorpus_id = ident.id
else:
maincorp = await amodel.cf.get_corpus(req_corpora[0])
subcorpus_name = None
subcorpus_id = None
main_size = maincorp.search_size
for i, c_id in enumerate(req_corpora):
if i == 0:
corp = maincorp
else:
corp = await amodel.cf.get_corpus(c_id)
corpora.append(dict(id=c_id, name=corp.get_conf('NAME')))
item = FavoriteItem(
ident=None, # will be updated after database insert (autoincrement)
name=' || '.join(c['name'] for c in corpora) +
(' / ' + subcorpus_name if subcorpus_name else ''),
corpora=corpora,
subcorpus_id=subcorpus_id,
subcorpus_name=subcorpus_name,
size=main_size
)
with plugins.runtime.USER_ITEMS as uit:
await uit.add_user_item(amodel.plugin_ctx, item)
return item.to_dict()
@bp.route('/user/unset_favorite_item', methods=['POST'])
@http_action(return_type='json', access_level=2, action_model=UserActionModel)
async def unset_favorite_item(amodel, req, resp):
with plugins.runtime.USER_ITEMS as uit:
await uit.delete_user_item(amodel.plugin_ctx, req.form.get('id'))
return dict(id=req.form.get('id'))
class MySQLUserItems(AbstractUserItems):
"""
A mysql implementation of user_items plug-in.
"""
def __init__(self, settings, db_backend: Backend, auth: AbstractAuth):
super(MySQLUserItems, self).__init__()
self._settings = settings
self._auth = auth
self._backend = db_backend
def serialize(self, obj):
"""
This is used for server-side serialization only
"""
return json.dumps(obj.to_dict())
async def get_user_items(self, plugin_ctx):
ans = []
if self._auth.anonymous_user(plugin_ctx)['id'] != plugin_ctx.user_id:
ans = await self._backend.get_favitems(plugin_ctx.user_id)
# ans = l10n.sort(ans, plugin_ctx.user_lang, key=lambda itm: itm.sort_key, reverse=False)
return ans
async def add_user_item(self, plugin_ctx, item):
if (await self._backend.count_favitems(plugin_ctx.user_id))['count'] >= self.METHOD_NAME:
raise UserItemException(
'Max. number of fav. items exceeded', error_code='defaultCorparch__err001',
error_args={'maxNum': self.METHOD_NAME})
await self._backend.insert_favitem(plugin_ctx.user_id, item)
async def delete_user_item(self, plugin_ctx, item_id):
await self._backend.delete_favitem(item_id)
@staticmethod
def export_actions():
return bp
@property
def METHOD_NAME(self):
return int(self._settings.get('plugins', 'user_items')['max_num_favorites'])
@inject(plugins.runtime.INTEGRATION_DB, plugins.runtime.AUTH)
def create_instance(settings, integ_db: MySqlIntegrationDb, auth: AbstractAuth):
plugin_conf = settings.get('plugins', 'user_items')
if integ_db.is_active and 'mysql_host' not in plugin_conf:
logging.getLogger(__name__).info(f'mysql_user_items uses integration_db[{integ_db.info}]')
db_backend = Backend(integ_db)
else:
logging.getLogger(__name__).info(
'mysql_user_items uses custom database configuration {}@{}'.format(
plugin_conf['mysql_user'], plugin_conf['mysql_host']))
db_backend = Backend(MySQLOps(**MySQLConf(plugin_conf).conn_dict).connection)
return MySQLUserItems(settings, db_backend, auth) |
1,749 | do validate | # coding=utf-8
#
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Most of this work is copyright (C) 2013-2019 David R. MacIver
# (david@drmaciver.com), but it contains contributions by others. See
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
#
# END HEADER
from __future__ import absolute_import, division, print_function
from hypothesis.internal.compat import getfullargspec
from hypothesis.internal.reflection import (
arg_string,
convert_keyword_arguments,
convert_positional_arguments,
)
from hypothesis.strategies._internal.strategies import SearchStrategy
if False:
from typing import Dict # noqa
unwrap_cache = {} # type: Dict[SearchStrategy, SearchStrategy]
unwrap_depth = 0
def unwrap_strategies(s):
global unwrap_depth
if not isinstance(s, SearchStrategy):
return s
try:
return unwrap_cache[s]
except KeyError:
pass
unwrap_cache[s] = s
try:
unwrap_depth += 1
try:
result = unwrap_strategies(s.wrapped_strategy)
unwrap_cache[s] = result
try:
assert result.force_has_reusable_values == s.force_has_reusable_values
except AttributeError:
pass
try:
result.force_has_reusable_values = s.force_has_reusable_values
except AttributeError:
pass
return result
except AttributeError:
return s
finally:
unwrap_depth -= 1
if unwrap_depth <= 0:
unwrap_cache.clear()
assert unwrap_depth >= 0
class LazyStrategy(SearchStrategy):
"""A strategy which is defined purely by conversion to and from another
strategy.
Its parameter and distribution come from that other strategy.
"""
def __init__(self, function, args, kwargs):
SearchStrategy.__init__(self)
self.__wrapped_strategy = None
self.__representation = None
self.function = function
self.__args = args
self.__kwargs = kwargs
@property
def supports_find(self):
return self.wrapped_strategy.supports_find
def calc_is_empty(self, recur):
return recur(self.wrapped_strategy)
def calc_has_reusable_values(self, recur):
return recur(self.wrapped_strategy)
def calc_is_cacheable(self, recur):
for source in (self.__args, self.__kwargs.values()):
for v in source:
if isinstance(v, SearchStrategy) and not v.is_cacheable:
return False
return True
@property
def wrapped_strategy(self):
if self.__wrapped_strategy is None:
unwrapped_args = tuple(unwrap_strategies(s) for s in self.__args)
unwrapped_kwargs = {
k: unwrap_strategies(v) for k, v in self.__kwargs.items()
}
base = self.function(*self.__args, **self.__kwargs)
if unwrapped_args == self.__args and unwrapped_kwargs == self.__kwargs:
self.__wrapped_strategy = base
else:
self.__wrapped_strategy = self.function(
*unwrapped_args, **unwrapped_kwargs
)
return self.__wrapped_strategy
def METHOD_NAME(self):
w = self.wrapped_strategy
assert isinstance(w, SearchStrategy), "%r returned non-strategy %r" % (self, w)
w.validate()
def __repr__(self):
if self.__representation is None:
_args = self.__args
_kwargs = self.__kwargs
argspec = getfullargspec(self.function)
defaults = dict(argspec.kwonlydefaults or {})
if argspec.defaults is not None:
for name, value in zip(
reversed(argspec.args), reversed(argspec.defaults)
):
defaults[name] = value
if len(argspec.args) > 1 or argspec.defaults:
_args, _kwargs = convert_positional_arguments(
self.function, _args, _kwargs
)
else:
_args, _kwargs = convert_keyword_arguments(
self.function, _args, _kwargs
)
kwargs_for_repr = dict(_kwargs)
for k, v in defaults.items():
if k in kwargs_for_repr and kwargs_for_repr[k] is v:
del kwargs_for_repr[k]
self.__representation = "%s(%s)" % (
self.function.__name__,
arg_string(self.function, _args, kwargs_for_repr, reorder=False),
)
return self.__representation
def do_draw(self, data):
return data.draw(self.wrapped_strategy)
def do_filtered_draw(self, data, filter_strategy):
return self.wrapped_strategy.do_filtered_draw(
data=data, filter_strategy=filter_strategy
)
@property
def label(self):
return self.wrapped_strategy.label |
1,750 | checkpoint artifact | """catboost init."""
from pathlib import Path
from types import SimpleNamespace
from typing import List, Union
from catboost import CatBoostClassifier, CatBoostRegressor # type: ignore
import wandb
from wandb.sdk.lib import telemetry as wb_telemetry
class WandbCallback:
"""`WandbCallback` automatically integrates CatBoost with wandb.
Arguments:
- metric_period: (int) if you are passing `metric_period` to your CatBoost model please pass the same value here (default=1).
Passing `WandbCallback` to CatBoost will:
- log training and validation metrics at every `metric_period`
- log iteration at every `metric_period`
Example:
```
train_pool = Pool(train[features], label=train["label"], cat_features=cat_features)
test_pool = Pool(test[features], label=test["label"], cat_features=cat_features)
model = CatBoostRegressor(
iterations=100,
loss_function="Cox",
eval_metric="Cox",
)
model.fit(
train_pool,
eval_set=test_pool,
callbacks=[WandbCallback()],
)
```
"""
def __init__(self, metric_period: int = 1):
if wandb.run is None:
raise wandb.Error("You must call `wandb.init()` before `WandbCallback()`")
with wb_telemetry.context() as tel:
tel.feature.catboost_wandb_callback = True
self.metric_period: int = metric_period
def after_iteration(self, info: SimpleNamespace) -> bool:
if info.iteration % self.metric_period == 0:
for data, metric in info.metrics.items():
for metric_name, log in metric.items():
# todo: replace with wandb.run._log once available
wandb.log({f"{data}-{metric_name}": log[-1]}, commit=False)
# todo: replace with wandb.run._log once available
wandb.log({f"iteration@metric-period-{self.metric_period}": info.iteration})
return True
def METHOD_NAME(
model: Union[CatBoostClassifier, CatBoostRegressor], aliases: List[str]
) -> None:
"""Upload model checkpoint as W&B artifact."""
if wandb.run is None:
raise wandb.Error(
"You must call `wandb.init()` before `_checkpoint_artifact()`"
)
model_name = f"model_{wandb.run.id}"
# save the model in the default `cbm` format
model_path = Path(wandb.run.dir) / "model"
model.save_model(model_path)
model_artifact = wandb.Artifact(name=model_name, type="model")
model_artifact.add_file(str(model_path))
wandb.log_artifact(model_artifact, aliases=aliases)
def _log_feature_importance(
model: Union[CatBoostClassifier, CatBoostRegressor]
) -> None:
"""Log feature importance with default settings."""
if wandb.run is None:
raise wandb.Error(
"You must call `wandb.init()` before `_checkpoint_artifact()`"
)
feat_df = model.get_feature_importance(prettified=True)
fi_data = [
[feat, feat_imp]
for feat, feat_imp in zip(feat_df["Feature Id"], feat_df["Importances"])
]
table = wandb.Table(data=fi_data, columns=["Feature", "Importance"])
# todo: replace with wandb.run._log once available
wandb.log(
{
"Feature Importance": wandb.plot.bar(
table, "Feature", "Importance", title="Feature Importance"
)
},
commit=False,
)
def log_summary(
model: Union[CatBoostClassifier, CatBoostRegressor],
log_all_params: bool = True,
save_model_checkpoint: bool = False,
log_feature_importance: bool = True,
) -> None:
"""`log_summary` logs useful metrics about catboost model after training is done.
Arguments:
model: it can be CatBoostClassifier or CatBoostRegressor.
log_all_params: (boolean) if True (default) log the model hyperparameters as W&B config.
save_model_checkpoint: (boolean) if True saves the model upload as W&B artifacts.
log_feature_importance: (boolean) if True (default) logs feature importance as W&B bar chart using the default setting of `get_feature_importance`.
Using this along with `wandb_callback` will:
- save the hyperparameters as W&B config,
- log `best_iteration` and `best_score` as `wandb.summary`,
- save and upload your trained model to Weights & Biases Artifacts (when `save_model_checkpoint = True`)
- log feature importance plot.
Example:
```python
train_pool = Pool(train[features], label=train["label"], cat_features=cat_features)
test_pool = Pool(test[features], label=test["label"], cat_features=cat_features)
model = CatBoostRegressor(
iterations=100,
loss_function="Cox",
eval_metric="Cox",
)
model.fit(
train_pool,
eval_set=test_pool,
callbacks=[WandbCallback()],
)
log_summary(model)
```
"""
if wandb.run is None:
raise wandb.Error("You must call `wandb.init()` before `log_summary()`")
if not (isinstance(model, (CatBoostClassifier, CatBoostRegressor))):
raise wandb.Error(
"Model should be an instance of CatBoostClassifier or CatBoostRegressor"
)
with wb_telemetry.context() as tel:
tel.feature.catboost_log_summary = True
# log configs
params = model.get_all_params()
if log_all_params:
wandb.config.update(params)
# log best score and iteration
wandb.run.summary["best_iteration"] = model.get_best_iteration()
wandb.run.summary["best_score"] = model.get_best_score()
# log model
if save_model_checkpoint:
aliases = ["best"] if params["use_best_model"] else ["last"]
METHOD_NAME(model, aliases=aliases)
# Feature importance
if log_feature_importance:
_log_feature_importance(model) |
1,751 | test parse results combined | # Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contributed by: Zi Shen Lim.
"""Tests for scimark2_benchmark."""
import unittest
from perfkitbenchmarker import sample
from perfkitbenchmarker import test_util
from perfkitbenchmarker.linux_benchmarks import scimark2_benchmark
TEST_OUTPUT_C = """;;; C small
** **
** SciMark2 Numeric Benchmark, see http://math.nist.gov/scimark **
** for details. (Results can be submitted to pozo@nist.gov) **
** **
Using 2.00 seconds min time per kenel.
Composite Score: 1596.04
FFT Mflops: 1568.64 (N=1024)
SOR Mflops: 1039.98 (100 x 100)
MonteCarlo: Mflops: 497.64
Sparse matmult Mflops: 1974.39 (N=1000, nz=5000)
LU Mflops: 2899.56 (M=100, N=100)
"""
TEST_OUTPUT_JAVA = """;;; Java small
SciMark 2.0a
Composite Score: 1716.3662351463677
FFT (1024): 1000.1380057152871
SOR (100x100): 1353.1987180103354
Monte Carlo : 727.7138820888014
Sparse matmult (N=1000, nz=5000): 1495.40225150659
LU (100x100): 4005.3783184108247
java.vendor: Oracle Corporation
java.version: 1.7.0_75
os.arch: amd64
os.name: Linux
os.version: 3.16.0-25-generic
"""
EXPECTED_C_METADATA = {
'benchmark_language': 'C',
'benchmark_size': 'small',
}
EXPECTED_JAVA_METADATA = {
'benchmark_language': 'Java',
'benchmark_size': 'small',
'java.vendor': 'Oracle Corporation',
'os.version': '3.16.0-25-generic',
'os.arch': 'amd64',
'os.name': 'Linux',
'java.version': '1.7.0_75',
}
EXPECTED_RESULT_C = [
sample.Sample(metric='Composite Score', value=1596.04,
unit='Mflops', metadata=EXPECTED_C_METADATA),
sample.Sample(metric='FFT (N=1024)', value=1568.64,
unit='Mflops', metadata=EXPECTED_C_METADATA),
sample.Sample(metric='SOR (100 x 100)', value=1039.98,
unit='Mflops', metadata=EXPECTED_C_METADATA),
sample.Sample(metric='MonteCarlo', value=497.64,
unit='Mflops', metadata=EXPECTED_C_METADATA),
sample.Sample(metric='Sparse matmult (N=1000, nz=5000)', value=1974.39,
unit='Mflops', metadata=EXPECTED_C_METADATA),
sample.Sample(metric='LU (M=100, N=100)', value=2899.56,
unit='Mflops', metadata=EXPECTED_C_METADATA),
]
EXPECTED_RESULT_JAVA = [
sample.Sample(metric='Composite Score', value=1716.3662351463677,
unit='Mflops', metadata=EXPECTED_JAVA_METADATA),
sample.Sample(metric='FFT (1024)', value=1000.1380057152871,
unit='Mflops', metadata=EXPECTED_JAVA_METADATA),
sample.Sample(metric='SOR (100x100)', value=1353.1987180103354,
unit='Mflops', metadata=EXPECTED_JAVA_METADATA),
sample.Sample(metric='Monte Carlo', value=727.7138820888014,
unit='Mflops', metadata=EXPECTED_JAVA_METADATA),
sample.Sample(metric='Sparse matmult (N=1000, nz=5000)',
value=1495.40225150659, unit='Mflops',
metadata=EXPECTED_JAVA_METADATA),
sample.Sample(metric='LU (100x100)', value=4005.3783184108247,
unit='Mflops', metadata=EXPECTED_JAVA_METADATA),
]
class Scimark2BenchmarkTestCase(unittest.TestCase, test_util.SamplesTestMixin):
def testParseResultsC(self):
samples = scimark2_benchmark.ParseResults(TEST_OUTPUT_C)
self.assertSampleListsEqualUpToTimestamp(samples, EXPECTED_RESULT_C)
def testParseResultsJava(self):
samples = scimark2_benchmark.ParseResults(TEST_OUTPUT_JAVA)
self.assertSampleListsEqualUpToTimestamp(samples, EXPECTED_RESULT_JAVA)
def METHOD_NAME(self):
samples = scimark2_benchmark.ParseResults(TEST_OUTPUT_C + TEST_OUTPUT_JAVA)
self.assertSampleListsEqualUpToTimestamp(
samples,
EXPECTED_RESULT_C + EXPECTED_RESULT_JAVA)
if __name__ == '__main__':
unittest.main() |
1,752 | test too many positional | # This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
import functools
import pytest
from hypothesis import find, given
from hypothesis.errors import InvalidArgument
from hypothesis.internal.validation import check_type
from hypothesis.strategies import (
SearchStrategy as ActualSearchStrategy,
binary,
booleans,
data,
dictionaries,
floats,
frozensets,
integers,
lists,
nothing,
recursive,
sets,
text,
)
from hypothesis.strategies._internal.strategies import check_strategy
from tests.common.utils import fails_with
def test_errors_when_given_varargs():
@given(integers())
def has_varargs(*args):
pass
with pytest.raises(InvalidArgument) as e:
has_varargs()
assert "varargs" in e.value.args[0]
def test_varargs_without_positional_arguments_allowed():
@given(somearg=integers())
def has_varargs(somearg, *args):
pass
def test_errors_when_given_varargs_and_kwargs_with_positional_arguments():
@given(integers())
def has_varargs(*args, **kw):
pass
with pytest.raises(InvalidArgument) as e:
has_varargs()
assert "varargs" in e.value.args[0]
def test_varargs_and_kwargs_without_positional_arguments_allowed():
@given(somearg=integers())
def has_varargs(*args, **kw):
pass
def test_bare_given_errors():
@given()
def test():
pass
with pytest.raises(InvalidArgument):
test()
def test_errors_on_unwanted_kwargs():
@given(hello=int, world=int)
def greet(world):
pass
with pytest.raises(InvalidArgument):
greet()
def test_errors_on_too_many_positional_args():
@given(integers(), int, int)
def foo(x, y):
pass
with pytest.raises(InvalidArgument):
foo()
def test_errors_on_any_varargs():
@given(integers())
def oops(*args):
pass
with pytest.raises(InvalidArgument):
oops()
def test_can_put_arguments_in_the_middle():
@given(y=integers())
def foo(x, y, z):
pass
foo(1, 2)
def test_float_ranges():
with pytest.raises(InvalidArgument):
floats(float("nan"), 0).example()
with pytest.raises(InvalidArgument):
floats(1, -1).example()
def test_float_range_and_allow_nan_cannot_both_be_enabled():
with pytest.raises(InvalidArgument):
floats(min_value=1, allow_nan=True).example()
with pytest.raises(InvalidArgument):
floats(max_value=1, allow_nan=True).example()
def test_float_finite_range_and_allow_infinity_cannot_both_be_enabled():
with pytest.raises(InvalidArgument):
floats(0, 1, allow_infinity=True).example()
def test_does_not_error_if_min_size_is_bigger_than_default_size():
lists(integers(), min_size=50).example()
sets(integers(), min_size=50).example()
frozensets(integers(), min_size=50).example()
lists(integers(), min_size=50, unique=True).example()
def test_list_unique_and_unique_by_cannot_both_be_enabled():
@given(lists(integers(), unique=True, unique_by=lambda x: x))
def boom(t):
pass
with pytest.raises(InvalidArgument) as e:
boom()
assert "unique " in e.value.args[0]
assert "unique_by" in e.value.args[0]
def test_min_before_max():
with pytest.raises(InvalidArgument):
integers(min_value=1, max_value=0).validate()
def test_filter_validates():
with pytest.raises(InvalidArgument):
integers(min_value=1, max_value=0).filter(bool).validate()
def test_recursion_validates_base_case():
with pytest.raises(InvalidArgument):
recursive(integers(min_value=1, max_value=0), lists).validate()
def test_recursion_validates_recursive_step():
with pytest.raises(InvalidArgument):
recursive(integers(), lambda x: lists(x, min_size=3, max_size=1)).validate()
@fails_with(InvalidArgument)
@given(x=integers())
def test_stuff_keyword(x=1):
pass
@fails_with(InvalidArgument)
@given(integers())
def test_stuff_positional(x=1):
pass
@fails_with(InvalidArgument)
@given(integers(), integers())
def METHOD_NAME(x):
pass
def test_given_warns_on_use_of_non_strategies():
@given(bool)
def test(x):
pass
with pytest.raises(InvalidArgument):
test()
def test_given_warns_when_mixing_positional_with_keyword():
@given(booleans(), y=booleans())
def test(x, y):
pass
with pytest.raises(InvalidArgument):
test()
def test_cannot_find_non_strategies():
with pytest.raises(InvalidArgument):
find(bool, bool)
@pytest.mark.parametrize(
"strategy",
[
functools.partial(lists, elements=integers()),
functools.partial(dictionaries, keys=integers(), values=integers()),
text,
binary,
],
)
@pytest.mark.parametrize("min_size,max_size", [(0, "10"), ("0", 10)])
def test_valid_sizes(strategy, min_size, max_size):
@given(strategy(min_size=min_size, max_size=max_size))
def test(x):
pass
with pytest.raises(InvalidArgument):
test()
def test_check_type_with_tuple_of_length_two():
def type_checker(x):
check_type((int, str), x, "x")
type_checker(1)
type_checker("1")
with pytest.raises(InvalidArgument, match="Expected one of int, str but got "):
type_checker(1.0)
def test_validation_happens_on_draw():
@given(data())
def test(data):
data.draw(integers().flatmap(lambda _: lists(nothing(), min_size=1)))
with pytest.raises(InvalidArgument, match="has no values"):
test()
class SearchStrategy:
"""Not the SearchStrategy type you were looking for."""
def check_type_(*args):
return check_type(*args)
def test_check_type_suggests_check_strategy():
check_type_(SearchStrategy, SearchStrategy(), "this is OK")
with pytest.raises(AssertionError, match="use check_strategy instead"):
check_type_(ActualSearchStrategy, None, "SearchStrategy assertion")
def check_strategy_(*args):
return check_strategy(*args)
def test_check_strategy_might_suggest_sampled_from():
with pytest.raises(InvalidArgument) as excinfo:
check_strategy_("not a strategy")
assert "sampled_from" not in str(excinfo.value)
with pytest.raises(InvalidArgument, match="such as st.sampled_from"):
check_strategy_([1, 2, 3])
with pytest.raises(InvalidArgument, match="such as st.sampled_from"):
check_strategy_((1, 2, 3))
check_strategy_(integers(), "passes for our custom coverage check") |
1,753 | test disallow break in code | """Test suite for testing the ForbiddenPythonSyntaxChecker."""
import astroid
import pylint.testutils
from astroid import nodes
from python_ta.checkers.forbidden_python_syntax_checker import (
ForbiddenPythonSyntaxChecker,
)
class TestForbiddenPythonSyntaxCheckerDisallowedsyntax(pylint.testutils.CheckerTestCase):
CHECKER_CLASS = ForbiddenPythonSyntaxChecker
CONFIG = {"disallowed_python_syntax": ["Break", "Continue", "Comprehension", "For", "While"]}
def set_up(self) -> None:
"""Perform the set up before each test case executes."""
self.setup_method()
def METHOD_NAME(self) -> None:
"""Test that the checker correctly reports a break statement in the code when its usage is
disallowed.
"""
src = """
for i in range(0, 10):
break
"""
mod = astroid.parse(src)
break_node, *_ = mod.nodes_of_class(nodes.Break)
name = break_node.__class__.__name__
with self.assertAddsMessages(
pylint.testutils.MessageTest(
msg_id="forbidden-python-syntax", node=break_node, args=name
),
ignore_position=True,
):
self.checker.visit_default(break_node)
def test_disallow_continue_in_code(self) -> None:
"""Test that the checker correctly reports a continue statement in the code when its usage
is disallowed.
"""
src = """
for i in range(0, 10):
continue
"""
mod = astroid.parse(src)
continue_node, *_ = mod.nodes_of_class(nodes.Continue)
name = continue_node.__class__.__name__
with self.assertAddsMessages(
pylint.testutils.MessageTest(
msg_id="forbidden-python-syntax", node=continue_node, args=name
),
ignore_position=True,
):
self.checker.visit_default(continue_node)
def test_disallow_comprehension_in_code(self) -> None:
"""Test that the checker correctly reports a comprehension in the code when its usage is
disallowed.
"""
src = """
comp = [i ** 2 for i in range(1, 11)]
"""
mod = astroid.parse(src)
comprehension_node, *_ = mod.nodes_of_class(nodes.Comprehension)
name = comprehension_node.__class__.__name__
with self.assertAddsMessages(
pylint.testutils.MessageTest(
msg_id="forbidden-python-syntax", node=comprehension_node, args=name
),
ignore_position=True,
):
self.checker.visit_default(comprehension_node)
def test_disallow_for_loop_in_code(self) -> None:
"""Test that the checker correctly reports a for loop in the code when its usage is
disallowed.
"""
src = """
for i in range(0, 10):
print(i)
"""
mod = astroid.parse(src)
for_node, *_ = mod.nodes_of_class(nodes.For)
name = for_node.__class__.__name__
with self.assertAddsMessages(
pylint.testutils.MessageTest(
msg_id="forbidden-python-syntax", node=for_node, args=name
),
ignore_position=True,
):
self.checker.visit_default(for_node)
def test_disallow_while_loop_in_code(self) -> None:
"""Test that the checker correctly reports a while loop in the code when its usage is
disallowed.
"""
src = """
count = 10
while count > -1:
count -= 1
"""
mod = astroid.parse(src)
while_node, *_ = mod.nodes_of_class(nodes.While)
name = while_node.__class__.__name__
with self.assertAddsMessages(
pylint.testutils.MessageTest(
msg_id="forbidden-python-syntax", node=while_node, args=name
),
ignore_position=True,
):
self.checker.visit_default(while_node)
class TestForbiddenPythonSyntaxCheckerAllowedsyntax(pylint.testutils.CheckerTestCase):
CHECKER_CLASS = ForbiddenPythonSyntaxChecker
CONFIG = {}
def set_up(self) -> None:
"""Perform the set up before each test case executes."""
self.setup_method()
def test_allow_break_in_code(self) -> None:
"""Test that the checker correctly doesn't report a break statement when its usage is
allowed.
"""
src = """
for i in range(0, 10):
break
"""
mod = astroid.parse(src)
break_node, *_ = mod.nodes_of_class(nodes.Break)
with self.assertNoMessages():
self.checker.visit_default(break_node)
def test_allow_continue_in_code(self) -> None:
"""Test that the checker correctly doesn't report a continue statement when its usage is
allowed.
"""
src = """
for i in range(0, 10):
continue
"""
mod = astroid.parse(src)
continue_node, *_ = mod.nodes_of_class(nodes.Continue)
with self.assertNoMessages():
self.checker.visit_default(continue_node)
def test_allow_comprehension_in_code(self) -> None:
"""Test that the checker correctly doesn't report a comprehension when its usage is allowed."""
src = """
comp = [i ** 2 for i in range(1, 11)]
"""
mod = astroid.parse(src)
comprehension_node, *_ = mod.nodes_of_class(nodes.Comprehension)
with self.assertNoMessages():
self.checker.visit_default(comprehension_node)
def test_allow_for_in_code(self) -> None:
"""Test that the checker correctly doesn't report a for loop when its usage is allowed."""
src = """
for i in range(0, 10):
print(i)
"""
mod = astroid.parse(src)
for_node, *_ = mod.nodes_of_class(nodes.For)
with self.assertNoMessages():
self.checker.visit_default(for_node)
def test_allow_while_in_code(self) -> None:
"""Test that the checker correctly doesn't report a while loop when its usage is allowed."""
src = """
count = 10
while count > -1:
count -= 1
"""
mod = astroid.parse(src)
while_node, *_ = mod.nodes_of_class(nodes.While)
with self.assertNoMessages():
self.checker.visit_default(while_node)
if __name__ == "__main__":
import pytest
pytest.main(["test_forbidden_python_syntax_checker.py"]) |
1,754 | print point values |
# Helium atom with a combination of two orbitals and simple jastrow factor
# Uses automatic differentiation via the autograd package to
# compute spatial and parameter derivatives
import autograd.numpy as np
from autograd import hessian,grad
from stats import averager
from run_qmc import run_qmc
# Point values used in test_RotatedSPOs_LCAO.cpp
# QMC values used to validate tests/molecules/He_param/He_orb_rot_param_grad_legacy
class Wavefunction:
def __init__(self, use_jastrow=False):
self.coeff = np.eye(2)
self.use_jastrow = use_jastrow
# Spatial derivatives
self.hess0 = hessian(self.psi_internal, 0)
self.hess1 = hessian(self.psi_internal, 1)
self.hess_log_0 = hessian(self.log_psi_internal, 0)
self.hess_log_1 = hessian(self.log_psi_internal, 1)
self.grad0 = grad(self.psi_internal, 0)
self.grad1 = grad(self.psi_internal, 1)
# Derivative wrt parameters
self.dpsi = grad(self.psi, 1)
self.dlocal_energy = grad(self.local_energy, 1)
def set_coeff(self, coeff):
self.coeff = coeff
def mag(self, r):
return np.sqrt(r[0]*r[0] + r[1]*r[1] + r[2]*r[2])
# normalized STO's correspond to the 'normalized="no"' part of the input
# <atomicBasisSet type="STO" elementType="He" normalized="no">
def sto_norm1(self, zeta):
return 2*np.sqrt(zeta**3)
def sto_norm2(self, zeta):
return 2*np.sqrt(3)*np.sqrt(zeta**5)/3
def orb1(self, R):
r = self.mag(R)
Z = 2.0
y00 = 1/np.sqrt(4 * np.pi)
snorm1 = self.sto_norm1(Z)
return y00 * snorm1 * np.exp(-Z*r)
def orb2(self, R):
r = self.mag(R)
zeta = 1.0
y00 = 1/np.sqrt(4*np.pi)
snorm2 = self.sto_norm2(zeta)
return snorm2* y00 * r* np.exp(-zeta*r)
def jastrow(self, r12, B):
A = 0.5
return np.exp(A*r12/(1.0 + B*r12) - A/B)
def rot_orb(self, R, theta):
c00 = self.coeff[0,0] * np.cos(theta) + self.coeff[1,0] * np.sin(theta)
c01 = self.coeff[0,1] * np.cos(theta) + self.coeff[1,1] * np.sin(theta)
return self.orb1(R) * c00 + self.orb2(R) * c01
def psi_no_jastrow(self, r1, r2, VP):
theta1 = VP[0]
theta2 = VP[1]
o1 = self.rot_orb(r1,theta1)
o2 = self.rot_orb(r2,theta2)
return o1*o2
def psi_with_jastrow(self, r1, r2, VP):
theta1 = VP[0]
theta2 = VP[1]
B = VP[2]
o1 = self.rot_orb(r1,theta1)
o2 = self.rot_orb(r2,theta2)
r12 = r2 - r1
j = self.jastrow(r12, B)
return o1*o2*j
def psi(self, r, VP):
r1 = r[0,:]
r2 = r[1,:]
return self.psi_internal(r1, r2, VP)
# It's easier to take spatial derivatives if each particle is a separate argument.
# Hence the use of psi as a uniform interface to run_qmc, and psi_internal for spatial derivatives.
def psi_internal(self, r1, r2, VP):
theta1 = VP[0]
theta2 = VP[1]
j = 1.0
if self.use_jastrow:
B = VP[2]
r12 = self.mag(r2 - r1)
j = self.jastrow(r12, B)
o1 = self.rot_orb(r1,theta1)
o2 = self.rot_orb(r2,theta2)
return o1*o2*j
def log_psi_internal(self, r1, r2, B):
return np.log(self.psi_internal(r1, r2, B))
def lap0(self, r1, r2, VP):
h0 = np.sum(np.diag(self.hess_log_0(r1, r2, VP)))
return h0
def lap1(self, r1, r2, VP):
h1 = np.sum(np.diag(self.hess_log_1(r1, r2, VP)))
return h1
def lap(self, r1, r2, VP):
h0 = np.sum(np.diag(self.hess0(r1, r2, VP)))
h1 = np.sum(np.diag(self.hess1(r1, r2, VP)))
return h0 + h1
def en_pot(self, r1, r2):
r1_mag = self.mag(r1)
r2_mag = self.mag(r2)
Z = 2.0
return -Z/r1_mag - Z/r2_mag
def ee_pot(self, r1, r2):
r12 = r2 - r1
r12_mag = self.mag(r12)
return 1.0/r12_mag
def local_energy(self, r, VP):
r1 = r[0,:]
r2 = r[1,:]
pot = self.en_pot(r1, r2) + self.ee_pot(r1, r2)
psi_val = self.psi_internal(r1, r2, VP)
lapl = self.lap(r1, r2, VP)
h = -0.5*lapl/psi_val + pot
return h
# Return the 2x2 rotation matrix
def rot_mat_size2(theta):
return np.array([[ np.cos(theta), np.sin(theta) ],
[ -np.sin(theta), np.cos(theta) ]])
def print_wf_values(theta1=0.0, theta2=0.0, use_j=False, B=0.0):
wf = Wavefunction(use_jastrow=use_j)
# Adjust numpy output so arrays are printed with higher precision
float_formatter = "{:.15g}".format
np.set_printoptions(formatter={'float_kind':float_formatter})
if use_j:
VP = np.array([theta1, theta2, B])
print("Values for theta = ",theta1,theta2," and jastrow B = ",B)
else:
VP = np.array([theta1, theta2])
print("Values for theta = ",theta1,theta2," and no jastrow")
r1 = np.array([1.0, 2.0, 3.0])
r2 = np.array([0.0, 1.1, 2.2])
r = np.zeros((2,3))
r[0,:] = r1
r[1,:] = r2
psi_val = wf.psi(r, VP)
print(" wf = ",psi_val," log wf = ",np.log(np.abs(psi_val)))
g0 = wf.grad0(r1, r2, VP)/psi_val
print(" grad/psi for particle 0 = ",g0[0],g0[1],g0[2])
# Using the laplacian of log psi to match internal QMCPACK values
lap_0 = wf.lap0(r1, r2, VP)
print(" laplacian of log psi for particle 0 = ",lap_0)
lap_1 = wf.lap1(r1, r2, VP)
print(" laplacian for log psi particle 1 = ",lap_1)
eloc = wf.local_energy(r, VP)
print(" local energy = ",eloc)
dp = wf.dpsi(r, VP)
print(" parameter derivative of log psi = ",dp / psi_val)
deloc = wf.dlocal_energy(r, VP)
print(" parameter derivative of local energy = ",deloc)
print("")
# Generate the wavefunction values for a single set of electron positions
# used in test_RotatedSPOs_LCAO.cpp
def METHOD_NAME():
r1 = np.array([1.0, 2.0, 3.0])
r2 = np.array([0.0, 1.1, 2.2])
print_wf_values(theta1=0.1, theta2=0.2)
print_wf_values(theta1=0.0, theta2=0.0)
print_wf_values(theta1=0.0, theta2=0.0, use_j=True, B=0.1)
def run_qmc_parameter_derivatives():
wf = Wavefunction(use_jastrow=True)
theta = 0.1
wf.set_coeff(rot_mat_size2(theta))
print("Initial rotation matrix coefficients for theta = ",theta)
print(wf.coeff)
# Apply the rotation to the coefficients, then compute the derivative at zero angle
# to match how QMCPACK computes the derivative of the rotation parameters.
# Doesn't matter for 2x2 case, but will matter for larger sizes.
theta1 = 0.0
theta2 = 0.0
#VP = np.array([theta1, theta2])
beta = 0.2
VP = np.array([theta1, theta2, beta])
r = np.array([[1.0, 2.0, 3.0],
[0.0, 1.1, 2.2]])
run_qmc(r, wf, VP)
# Some results from run_qmc_parameter_derivatives
# Run took about 10 minutes on laptop
# nblock=20, nstep=1000, nsubstep=10
# parameter values = [0.1 0.1]
# parameter derivatives = [-0.20164722 -0.18347461]
# parameter derivative errors = [0.01201481 0.01314164]
# Run took about 40 minutes on laptop
# nblock=40, nstep=2000, nsubstep=10
# parameter values = [0.1 0.1]
# parameter derivatives = [-0.2204924 -0.21471184]
# parameter derivative errors = [0.00493837 0.00571082]
# Run took about 20 minutes on laptop
# nblock=20, nstep=1000, nsubstep=10
# Initial rotation matrix coefficients from theta = 0.1
# parameter values = [0. 0. 0.2]
# parameter derivatives = [ 0.10530185 0.08058737 -0.11595301]
# parameter derivative errors = [0.02598407 0.02115345 0.01133443]
if __name__=='__main__':
#print_point_values()
run_qmc_parameter_derivatives()
|
1,755 | test configuration valid obs input | import pytest
from pydantic import ValidationError
from ert.analysis import UpdateConfiguration
from ert.analysis.row_scaling import RowScaling
def test_configuration():
config = UpdateConfiguration(
update_steps=[
{
"name": "update_step_name",
"observations": ["MY_OBS"],
"parameters": ["MY_PARAMETER"],
"row_scaling_parameters": [("MY_ROW_SCALING", RowScaling())],
}
]
)
config.context_validate(["MY_OBS"], ["MY_PARAMETER", "MY_ROW_SCALING"])
@pytest.mark.parametrize(
"config, expectation",
[
[
{"name": "not_relevant", "observations": ["not_relevant"]},
pytest.raises(ValidationError, match="Must provide at least one parameter"),
],
[
{"name": "not_relevant", "parameters": ["not_relevant"]},
pytest.raises(ValidationError, match="update_steps -> 0 -> observations"),
],
[
{
"name": "not_relevant",
"observations": ["not_relevant"],
"parameters": "relevant",
},
pytest.raises(ValidationError, match="value is not a valid list"),
],
[
{
"name": "not_relevant",
"observations": "relevant",
"parameters": ["not_relevant"],
},
pytest.raises(ValidationError, match="value is not a valid list"),
],
[
{
"name": "not_relevant",
"observations": ["relevant"],
"parameters": [],
},
pytest.raises(ValidationError, match="Must provide at least one parameter"),
],
[
{
"name": "not_relevant",
"observations": [],
"parameters": ["not_relevant"],
},
pytest.raises(
ValidationError, match=" ensure this value has at least 1 item"
),
],
[
{
"observations": ["not_relevant"],
"parameters": ["not_relevant"],
},
pytest.raises(ValidationError, match="update_steps -> 0 -> name"),
],
],
)
def test_missing(config, expectation):
with expectation:
UpdateConfiguration(update_steps=[config])
@pytest.mark.parametrize(
"config, expectation",
[
[
[
{
"name": "not_relevant",
"observations": ["not_relevant"],
"parameters": "not_list",
},
],
pytest.raises(
ValidationError, match="1 validation error for UpdateConfiguration"
),
],
[
[
{
"name": "not_relevant",
"observations": ["not_relevant"],
"parameters": [],
},
{"name": "not_relevant", "parameters": ["not_relevant"]},
],
pytest.raises(
ValidationError, match="2 validation errors for UpdateConfiguration"
),
],
[
[
{},
{"name": "not_relevant", "observations": ["not_relevant"]},
],
pytest.raises(
ValidationError, match="3 validation errors for UpdateConfiguration"
),
],
],
)
def test_missing_multiple(config, expectation):
with expectation:
UpdateConfiguration(update_steps=config)
@pytest.mark.parametrize(
"input_obs",
[
["OBS"],
[{"name": "OBS"}],
[{"name": "OBS", "index_list": [1, 2, 3]}],
[["OBS", [1, 2, 3]]],
[("OBS", [1, 2, 3])],
["OBS_1", ["OBS", [1, 2, 3]]],
],
)
def METHOD_NAME(input_obs):
config = UpdateConfiguration(
update_steps=[
{
"name": "not_relevant",
"observations": input_obs,
"parameters": ["not_relevant"],
}
]
)
config.context_validate(["OBS", "OBS_1"], ["not_relevant"])
def test_user_setup():
test_input = [
{
"name": "update_step_NAME",
"observations": [
"WOPR_OP1_72",
("MY_INDEX_OBS", [1, 2, 3]),
("JUST OBS"),
],
"parameters": ["SNAKE_OIL_PARAM", ("INDEX_PARAMETER", [1, 2, 3])],
"row_scaling_parameters": [
("ROW_SCALE", RowScaling()),
("ROW_SCALE", RowScaling(), [1, 2, 3]),
],
}
]
UpdateConfiguration(update_steps=test_input) |
1,756 | min install resolver | """
resolver configuration to match portage behaviour (misbehaviour in a few spots)
"""
__all__ = ["upgrade_resolver", "min_install_resolver"]
from functools import partial
from itertools import chain
from ..repository import misc, multiplex
from ..resolver import plan
from ..restrictions import packages, values
from .atom import atom
def upgrade_resolver(
vdbs,
dbs,
verify_vdb=True,
nodeps=False,
force_replace=False,
resolver_cls=plan.merge_plan,
**kwds,
):
"""
generate and configure a resolver for upgrading all processed nodes.
:param vdbs: list of :obj:`pkgcore.repository.prototype.tree` instances
that represents the livefs
:param dbs: list of :obj:`pkgcore.repository.prototype.tree` instances
representing sources of pkgs
:param verify_vdb: should we stop resolving once we hit the vdb,
or do full resolution?
:return: :obj:`pkgcore.resolver.plan.merge_plan` instance
"""
f = plan.merge_plan.prefer_highest_version_strategy
# hack.
if nodeps:
vdbs = list(map(misc.nodeps_repo, vdbs))
dbs = list(map(misc.nodeps_repo, dbs))
elif not verify_vdb:
vdbs = list(map(misc.nodeps_repo, vdbs))
dbs = list(dbs)
if force_replace:
resolver_cls = generate_replace_resolver_kls(resolver_cls)
return resolver_cls(dbs + vdbs, plan.pkg_sort_highest, f, **kwds)
def downgrade_resolver(
vdbs,
dbs,
verify_vdb=True,
nodeps=False,
force_replace=False,
resolver_cls=plan.merge_plan,
**kwds,
):
"""
generate and configure a resolver for downgrading all processed nodes.
:param vdbs: list of :obj:`pkgcore.repository.prototype.tree` instances
that represents the livefs
:param dbs: list of :obj:`pkgcore.repository.prototype.tree` instances
representing sources of pkgs
:param verify_vdb: should we stop resolving once we hit the vdb,
or do full resolution?
:return: :obj:`pkgcore.resolver.plan.merge_plan` instance
"""
restrict = packages.OrRestriction(
*list(atom(f">={x.cpvstr}") for x in chain.from_iterable(vdbs))
)
f = partial(plan.merge_plan.prefer_downgrade_version_strategy, restrict)
dbs = list(map(partial(misc.restrict_repo, restrict), dbs))
# hack.
if nodeps:
vdbs = list(map(misc.nodeps_repo, vdbs))
dbs = list(map(misc.nodeps_repo, dbs))
elif not verify_vdb:
vdbs = list(map(misc.nodeps_repo, vdbs))
dbs = list(dbs)
if force_replace:
resolver_cls = generate_replace_resolver_kls(resolver_cls)
return resolver_cls(dbs + vdbs, plan.pkg_sort_highest, f, **kwds)
def METHOD_NAME(
vdbs,
dbs,
verify_vdb=True,
nodeps=False,
force_replace=False,
resolver_cls=plan.merge_plan,
**kwds,
):
"""
Resolver that tries to minimize the number of changes while installing.
generate and configure a resolver that is focused on just
installing requests- installs highest version it can build a
solution for, but tries to avoid building anything not needed
:param vdbs: list of :obj:`pkgcore.repository.prototype.tree` instances
that represents the livefs
:param dbs: list of :obj:`pkgcore.repository.prototype.tree` instances
representing sources of pkgs
:param verify_vdb: should we stop resolving once we hit the vdb,
or do full resolution?
:return: :obj:`pkgcore.resolver.plan.merge_plan` instance
"""
if nodeps:
vdbs = list(map(misc.nodeps_repo, vdbs))
dbs = list(map(misc.nodeps_repo, dbs))
elif not verify_vdb:
vdbs = list(map(misc.nodeps_repo, vdbs))
dbs = list(dbs)
if force_replace:
resolver_cls = generate_replace_resolver_kls(resolver_cls)
return resolver_cls(
vdbs + dbs, plan.pkg_sort_highest, plan.merge_plan.prefer_reuse_strategy, **kwds
)
_vdb_restrict = packages.OrRestriction(
packages.PackageRestriction("repo.livefs", values.EqualityMatch(False)),
packages.AndRestriction(
packages.PackageRestriction("category", values.StrExactMatch("virtual")),
packages.PackageRestriction("package_is_real", values.EqualityMatch(False)),
),
)
class empty_tree_merge_plan(plan.merge_plan):
_vdb_restriction = _vdb_restrict
def __init__(self, dbs, *args, **kwds):
"""
:param args: see :obj:`pkgcore.resolver.plan.merge_plan.__init__`
for valid args
:param kwds: see :obj:`pkgcore.resolver.plan.merge_plan.__init__`
for valid args
"""
super().__init__(dbs, *args, **kwds)
# XXX *cough*, hack.
self.default_dbs = multiplex.tree(
*[x for x in self.all_raw_dbs if not x.livefs]
)
def generate_replace_resolver_kls(resolver_kls):
class replace_resolver(resolver_kls):
overriding_resolver_kls = resolver_kls
_vdb_restriction = _vdb_restrict
def add_atoms(self, restricts, **kwds):
restricts = [
packages.KeyedAndRestriction(self._vdb_restriction, x, key=x.key)
for x in restricts
]
return self.overriding_resolver_kls.add_atoms(self, restricts, **kwds)
return replace_resolver |
1,757 | test failure state message | import time
from unittest.mock import Mock
import pytest
import remoulade
from remoulade import group
from remoulade.cancel import Cancel
from remoulade.middleware import Middleware, SkipMessage
from remoulade.state.backend import State, StateStatusesEnum
from remoulade.state.backends import PostgresBackend
from remoulade.state.middleware import MessageState, State
from tests.conftest import mock_func
class TestMessageState:
"""Class to test the middleware
MessageState
"""
def test_pending_state_message(self, stub_broker, state_middleware, do_work, frozen_datetime):
msg = do_work.send()
state = state_middleware.backend.get_state(msg.message_id)
assert state.message_id == msg.message_id
assert state.status == StateStatusesEnum.Pending
assert state.enqueued_datetime.isoformat() == "2020-02-03T00:00:00+00:00"
def test_success_state_message(self, stub_broker, stub_worker, state_middleware, frozen_datetime):
@remoulade.actor
def do_work():
frozen_datetime.tick(delta=15)
stub_broker.declare_actor(do_work)
stub_worker.pause()
msg = do_work.send()
frozen_datetime.tick(delta=15)
stub_worker.resume()
stub_broker.join(do_work.queue_name)
stub_worker.join()
state = state_middleware.backend.get_state(msg.message_id)
assert state.status == StateStatusesEnum.Success
assert state.enqueued_datetime.isoformat() == "2020-02-03T00:00:00+00:00"
assert state.started_datetime.isoformat() == "2020-02-03T00:00:15+00:00"
assert state.end_datetime.isoformat() == "2020-02-03T00:00:30+00:00"
def test_started_state_message(self, stub_broker, stub_worker, state_middleware, frozen_datetime):
state_middleware.before_process_message, event_started = mock_func(state_middleware.before_process_message)
state_middleware.backend.get_state, event_get_state = mock_func(state_middleware.backend.get_state)
@remoulade.actor
def wait():
event_get_state.wait(10)
stub_broker.declare_actor(wait)
msg = wait.send()
# We wait the message be started
event_started.wait(10)
state = state_middleware.backend.get_state(msg.message_id)
assert state.status == StateStatusesEnum.Started
assert state.started_datetime.isoformat() == "2020-02-03T00:00:00+00:00"
def METHOD_NAME(self, stub_broker, state_middleware, stub_worker, frozen_datetime):
@remoulade.actor
def error():
raise Exception()
remoulade.declare_actors([error])
msg = error.send()
stub_broker.join(error.queue_name)
stub_worker.join()
state = state_middleware.backend.get_state(msg.message_id)
assert state.status == StateStatusesEnum.Failure
assert state.end_datetime.isoformat() == "2020-02-03T00:00:00+00:00"
def test_cancel_state_message(self, stub_broker, stub_worker, cancel_backend, state_middleware, do_work):
stub_broker.add_middleware(Cancel(backend=cancel_backend))
# Pause the worker to be able to cancel the message, and
# this does not been processed when it enters the queue
stub_worker.pause()
msg = do_work.send()
msg.cancel()
stub_worker.resume()
stub_broker.join(do_work.queue_name)
stub_worker.join()
state = state_middleware.backend.get_state(msg.message_id)
assert state.status == StateStatusesEnum.Canceled
# should not finish, since is cancelled
assert state.end_datetime is None
def test_skip_state_message(self, stub_broker, stub_worker, state_middleware, do_work):
class SkipMiddleware(Middleware):
def before_process_message(self, broker, message):
raise SkipMessage()
stub_broker.add_middleware(SkipMiddleware())
msg = do_work.send()
stub_broker.join(do_work.queue_name)
stub_worker.join()
state = state_middleware.backend.get_state(msg.message_id)
assert state.status == StateStatusesEnum.Skipped
# should not finish, since is skipped and does not
# try again
assert state.end_datetime is None
@pytest.mark.parametrize("ttl, result_type", [pytest.param(1000, State), pytest.param(1, type(None))])
def test_expiration_data_backend(self, ttl, result_type, stub_broker, state_backend):
if type(state_backend) == PostgresBackend:
pytest.skip("Skipping this test as there is no expiration on PostgresBackend")
@remoulade.actor
def wait():
pass
stub_broker.add_middleware(MessageState(backend=state_backend, state_ttl=ttl))
stub_broker.declare_actor(wait)
msg = wait.send()
time.sleep(2)
data = state_backend.get_state(msg.message_id)
# if the ttl is greater than the expiration, the data should be None
assert type(data) == result_type
@pytest.mark.parametrize("max_size", [200, 1000])
def test_maximum_size_args(self, max_size, stub_broker, state_backend, do_work):
@remoulade.actor
def do_work(x):
return x
state_backend.max_size = max_size
stub_broker.add_middleware(MessageState(backend=state_backend))
stub_broker.declare_actor(do_work)
long_string = "".join("a" for _ in range(256))
msg = do_work.send(long_string)
args = state_backend.get_state(msg.message_id).args
# if the max_size == 0, then should not storage nothing
if max_size > 200:
assert list(args) == [long_string]
else:
assert args is None
def test_save_composition_id_in_message(self, stub_broker, state_middleware, do_work):
msg = do_work.message()
composition_id = group([msg]).run().group_id
state = state_middleware.backend.get_state(msg.message_id)
assert state.message_id == msg.message_id
assert state.composition_id == composition_id
@pytest.mark.parametrize("state_ttl", [0, -1, None])
def test_backend_not_called_if_no_state_ttl(self, stub_broker, do_work, state_ttl):
backend = Mock()
stub_broker.add_middleware(MessageState(backend=backend, state_ttl=state_ttl))
do_work.send()
assert backend.set_state.call_count == 0 |
1,758 | to did | #!/usr/bin/env python3
import re
from typing import Literal, TypedDict
from uuid import UUID
from did_resolver import DIDDocument, DIDResolutionResult, ParsedDID, Resolvable
from shortuuid import ShortUUID
from typing_extensions import NotRequired
from .utils import HASH_LENGTH, hash_func, number_to_version_char, to_kebab_case
# https://github.com/skorokithakis/shortuuid/issues/68
# shortuuid decode() sorts alphabets before using it for translation
# this is incompatible with shortuuid implementation in javascript
# we therefore overwrite the _alphabet attribute so the answers are the same in javascript
FLICKR_BASE58_ALPHABET = "123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ"
su = ShortUUID(alphabet=FLICKR_BASE58_ALPHABET)
su._alphabet = list(FLICKR_BASE58_ALPHABET)
UUIDV5_NAMESPACE = (
"1b671a64-40d5-491e-99b0-da01ff1f3341" # this NAMESPACE is reserved for uuid5
)
CURRENT_VERSION = 1
CardstackIdentifierType = Literal[
"PrepaidCardCustomization",
"MerchantInfo",
"SupplierInfo",
"RewardRule",
]
SHORT_TYPE = Literal["p", "m", "s", "r"]
def did_type_to_short_type(cardstack_identifier: CardstackIdentifierType) -> SHORT_TYPE:
if cardstack_identifier == "PrepaidCardCustomization":
return "p"
elif cardstack_identifier == "MerchantInfo":
return "m"
elif cardstack_identifier == "SupplierInfo":
return "s"
elif cardstack_identifier == "RewardRule":
return "r"
def short_type_to_did_type(short_type: str) -> CardstackIdentifierType:
"""inverse of did_type_to_short_type()"""
if short_type == "p":
return "PrepaidCardCustomization"
elif short_type == "m":
return "MerchantInfo"
elif short_type == "s":
return "SupplierInfo"
elif short_type == "r":
return "RewardRule"
else:
raise Exception(f'Invalid DID identifier: unknown type "{short_type}"')
class CardstackIdentifier:
version: int
type: CardstackIdentifierType
uniqueId: str
def __init__(
self, version: int, type: CardstackIdentifierType, unique_id: str
) -> None:
self.version = version
self.type = type
self.uniqueId = normalize_unique_id(unique_id)
def METHOD_NAME(self):
version_string = number_to_version_char(self.version)
result = f"{version_string}{did_type_to_short_type(self.type)}{self.uniqueId}"
checksum = hash_func(result)
return f"did:cardstack:{result}{checksum}"
def __repr__(self):
return f"""{self.__class__.__name__}(version: {self.version}, type: {self.type}, unique_id: {self.uniqueId})"""
def normalize_unique_id(candidate: str):
if is_flickr_base_58(candidate):
return candidate
else:
if is_valid_uuid(candidate):
return su.encode(candidate)
else:
raise Exception(
f'uniqueId must be a flickrBase58 or RFC4122 v4-compliant UUID. Was: "{candidate}"'
)
def is_valid_uuid(uuid_to_test, version=4):
try:
UUID(uuid_to_test, version=version)
return True
except ValueError:
return False
def is_flickr_base_58(candidate: str) -> bool:
BASE_58_CHAR_LENGTH = 22
return len(candidate) == BASE_58_CHAR_LENGTH and bool(
re.match(f"^[{FLICKR_BASE58_ALPHABET}]+$", candidate)
)
def s3_resolution_method(
did: str, parsed_did: ParsedDID, resolver: Resolvable
) -> DIDResolutionResult:
cardstack_identifier = parse_identifier(parsed_did["id"])
path = to_kebab_case(cardstack_identifier.type)
did_document: DIDDocument = {
"@context": [
"https://www.w3.org/ns/did/v1",
"https://identity.foundation/EcdsaSecp256k1RecoverySignature2020/lds-ecdsa-secp256k1-recovery2020-0.0.jsonld",
],
"id": did,
"alsoKnownAs": [
f"https://storage.cardstack.com/{path}/{cardstack_identifier.uniqueId}.json"
],
"verificationMethod": [],
"authentication": [],
"assertionMethod": [],
}
return {
"didResolutionMetadata": {"contentType": "application/did+ld+json"},
"didDocument": did_document,
"didDocumentMetadata": {},
}
def parse_identifier(identifier: str) -> CardstackIdentifier:
cutoff = len(identifier) - HASH_LENGTH
data = identifier[:cutoff]
checksum = identifier[cutoff : len(identifier)]
if checksum != hash_func(data):
raise Exception("Invalid DID identifier: checksum failed")
version = data[0]
type = short_type_to_did_type(data[1])
unique_id = data[2:]
return CardstackIdentifier(version, type, unique_id)
class EncodeOptions(TypedDict):
type: CardstackIdentifierType
version: NotRequired[int]
uniqueId: NotRequired[str]
def encode_did(opts: EncodeOptions) -> str:
version = opts.get("version", CURRENT_VERSION)
unique_id = opts.get("uniqueId", su.uuid())
return CardstackIdentifier(version, opts["type"], unique_id).METHOD_NAME()
def get_resolver():
return {"cardstack": s3_resolution_method} |
1,759 | func sort versions | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from collections import OrderedDict
from jmespath import Options
from jmespath import compile as compile_jmes
from jmespath import functions
def aks_agentpool_show_table_format(result):
"""Format an agent pool as summary results for display with "-o table"."""
return [_aks_agentpool_table_format(result)]
def _aks_agentpool_table_format(result):
parsed = compile_jmes("""{
name: name,
osType: osType,
kubernetesVersion: orchestratorVersion,
vmSize: vmSize,
osDiskSizeGB: osDiskSizeGB,
count: count,
maxPods: maxPods,
provisioningState: provisioningState,
mode: mode
}""")
# use ordered dicts so headers are predictable
return parsed.search(result, Options(dict_cls=OrderedDict))
def aks_agentpool_list_table_format(results):
"""Format an agent pool list for display with "-o table"."""
return [_aks_agentpool_table_format(r) for r in results]
def aks_list_table_format(results):
""""Format a list of managed clusters as summary results for display with "-o table"."""
return [_aks_table_format(r) for r in results]
def aks_run_command_result_format(cmdResult):
result = OrderedDict()
if cmdResult['provisioningState'] == "Succeeded":
result['exit code'] = cmdResult['exitCode']
result['logs'] = cmdResult['logs']
return result
if cmdResult['provisioningState'] == "Failed":
result['provisioning state'] = cmdResult['provisioningState']
result['reason'] = cmdResult['reason']
return result
result['provisioning state'] = cmdResult['provisioningState']
result['started At'] = cmdResult['startedAt']
return result
def aks_show_table_format(result):
"""Format a managed cluster as summary results for display with "-o table"."""
return [_aks_table_format(result)]
def _aks_table_format(result):
parsed = compile_jmes("""{
name: name,
location: location,
resourceGroup: resourceGroup,
kubernetesVersion: kubernetesVersion,
currentKubernetesVersion: currentKubernetesVersion,
provisioningState: provisioningState,
fqdn: fqdn || privateFqdn
}""")
# use ordered dicts so headers are predictable
return parsed.search(result, Options(dict_cls=OrderedDict))
def aks_upgrades_table_format(result):
"""Format get-upgrades results as a summary for display with "-o table"."""
preview = {}
def find_preview_versions(versions_bag):
for upgrade in versions_bag.get('upgrades', []):
if upgrade.get('isPreview', False):
preview[upgrade['kubernetesVersion']] = True
find_preview_versions(result.get('controlPlaneProfile', {}))
# This expression assumes there is one node pool, and that the master and nodes upgrade in lockstep.
parsed = compile_jmes("""{
name: name,
resourceGroup: resourceGroup,
masterVersion: controlPlaneProfile.kubernetesVersion || `unknown`,
upgrades: controlPlaneProfile.upgrades[].kubernetesVersion || [`None available`] | sort_versions(@) | set_preview_array(@) | join(`, `, @)
}""")
# use ordered dicts so headers are predictable
return parsed.search(result, Options(dict_cls=OrderedDict, custom_functions=_custom_functions(preview)))
def aks_versions_table_format(result):
"""Format get-versions results as a summary for display with "-o table"."""
version_table = flatten_version_table(result.get("values", []))
parsed = compile_jmes("""[].{
kubernetesVersion: version,
isPreview: isPreview,
upgrades: upgrades || [`None available`] | sort_versions(@) | join(`, `, @)
}""")
# use ordered dicts so headers are predictable
results = parsed.search(version_table, Options(
dict_cls=OrderedDict, custom_functions=_custom_functions({})))
return sorted(results, key=lambda x: version_to_tuple(x.get("kubernetesVersion")), reverse=True)
def aks_list_nodepool_snapshot_table_format(results):
""""Format a list of nodepool snapshots as summary results for display with "-o table"."""
return [_aks_nodepool_snapshot_table_format(r) for r in results]
def aks_show_nodepool_snapshot_table_format(result):
"""Format a nodepool snapshot as summary results for display with "-o table"."""
return [_aks_nodepool_snapshot_table_format(result)]
def _aks_nodepool_snapshot_table_format(result):
parsed = compile_jmes("""{
name: name,
location: location,
resourceGroup: resourceGroup,
nodeImageVersion: nodeImageVersion,
kubernetesVersion: kubernetesVersion,
osType: osType,
enableFIPS: enableFIPS
}""")
# use ordered dicts so headers are predictable
return parsed.search(result, Options(dict_cls=OrderedDict))
def version_to_tuple(version):
"""Removes preview suffix"""
if version.endswith('(preview)'):
version = version[:-len('(preview)')]
return tuple(map(int, (version.split('.'))))
def flatten_version_table(release_info):
"""Flattens version table"""
flattened = []
for release in release_info:
isPreview = release.get("isPreview", False)
for k, v in release.get("patchVersions", {}).items():
item = {"version": k, "upgrades": v.get("upgrades", []), "isPreview": isPreview}
flattened.append(item)
return flattened
def _custom_functions(preview_versions):
class CustomFunctions(functions.Functions): # pylint: disable=too-few-public-methods
@functions.signature({'types': ['array']})
def METHOD_NAME(self, versions): # pylint: disable=no-self-use
"""Custom JMESPath `sort_versions` function that sorts an array of strings as software versions."""
try:
return sorted(versions, key=version_to_tuple)
# if it wasn't sortable, return the input so the pipeline continues
except (TypeError, ValueError):
return versions
@functions.signature({'types': ['array']})
def _func_set_preview_array(self, versions):
"""Custom JMESPath `set_preview_array` function that suffixes preview version"""
try:
for i, _ in enumerate(versions):
versions[i] = self._func_set_preview(versions[i])
return versions
except (TypeError, ValueError):
return versions
@functions.signature({'types': ['string']})
def _func_set_preview(self, version): # pylint: disable=no-self-use
"""Custom JMESPath `set_preview` function that suffixes preview version"""
try:
if preview_versions.get(version, False):
return version + '(preview)'
return version
except (TypeError, ValueError):
return version
return CustomFunctions() |
1,760 | test eval exception2 | #!/usr/bin/env python
import unittest
import sys
import os
from test import test_support
# Skip this test if the _tkinter module wasn't built.
_tkinter = test_support.import_module('_tkinter')
from Tkinter import Tcl
from _tkinter import TclError
class TkinterTest(unittest.TestCase):
def testFlattenLen(self):
# flatten(<object with no length>)
self.assertRaises(TypeError, _tkinter._flatten, True)
class TclTest(unittest.TestCase):
def setUp(self):
self.interp = Tcl()
def testEval(self):
tcl = self.interp
tcl.eval('set a 1')
self.assertEqual(tcl.eval('set a'),'1')
def testEvalException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'set a')
def METHOD_NAME(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'this is wrong')
def testCall(self):
tcl = self.interp
tcl.call('set','a','1')
self.assertEqual(tcl.call('set','a'),'1')
def testCallException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.call,'set','a')
def testCallException2(self):
tcl = self.interp
self.assertRaises(TclError,tcl.call,'this','is','wrong')
def testSetVar(self):
tcl = self.interp
tcl.setvar('a','1')
self.assertEqual(tcl.eval('set a'),'1')
def testSetVarArray(self):
tcl = self.interp
tcl.setvar('a(1)','1')
self.assertEqual(tcl.eval('set a(1)'),'1')
def testGetVar(self):
tcl = self.interp
tcl.eval('set a 1')
self.assertEqual(tcl.getvar('a'),'1')
def testGetVarArray(self):
tcl = self.interp
tcl.eval('set a(1) 1')
self.assertEqual(tcl.getvar('a(1)'),'1')
def testGetVarException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.getvar,'a')
def testGetVarArrayException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.getvar,'a(1)')
def testUnsetVar(self):
tcl = self.interp
tcl.setvar('a',1)
self.assertEqual(tcl.eval('info exists a'),'1')
tcl.unsetvar('a')
self.assertEqual(tcl.eval('info exists a'),'0')
def testUnsetVarArray(self):
tcl = self.interp
tcl.setvar('a(1)',1)
tcl.setvar('a(2)',2)
self.assertEqual(tcl.eval('info exists a(1)'),'1')
self.assertEqual(tcl.eval('info exists a(2)'),'1')
tcl.unsetvar('a(1)')
self.assertEqual(tcl.eval('info exists a(1)'),'0')
self.assertEqual(tcl.eval('info exists a(2)'),'1')
def testUnsetVarException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.unsetvar,'a')
def testEvalFile(self):
tcl = self.interp
filename = "testEvalFile.tcl"
fd = open(filename,'w')
script = """set a 1
set b 2
set c [ expr $a + $b ]
"""
fd.write(script)
fd.close()
tcl.evalfile(filename)
os.remove(filename)
self.assertEqual(tcl.eval('set a'),'1')
self.assertEqual(tcl.eval('set b'),'2')
self.assertEqual(tcl.eval('set c'),'3')
def testEvalFileException(self):
tcl = self.interp
filename = "doesnotexists"
try:
os.remove(filename)
except Exception,e:
pass
self.assertRaises(TclError,tcl.evalfile,filename)
def testPackageRequireException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'package require DNE')
def testLoadWithUNC(self):
import sys
if sys.platform != 'win32':
return
# Build a UNC path from the regular path.
# Something like
# \\%COMPUTERNAME%\c$\python27\python.exe
fullname = os.path.abspath(sys.executable)
if fullname[1] != ':':
return
unc_name = r'\\%s\%s$\%s' % (os.environ['COMPUTERNAME'],
fullname[0],
fullname[3:])
with test_support.EnvironmentVarGuard() as env:
env.unset("TCL_LIBRARY")
f = os.popen('%s -c "import Tkinter; print Tkinter"' % (unc_name,))
self.assertTrue('Tkinter.py' in f.read())
# exit code must be zero
self.assertEqual(f.close(), None)
def test_passing_values(self):
def passValue(value):
return self.interp.call('set', '_', value)
self.assertEqual(passValue(True), True)
self.assertEqual(passValue(False), False)
self.assertEqual(passValue('string'), 'string')
self.assertEqual(passValue('string\u20ac'), 'string\u20ac')
self.assertEqual(passValue(u'string'), u'string')
self.assertEqual(passValue(u'string\u20ac'), u'string\u20ac')
for i in (0, 1, -1, int(2**31-1), int(-2**31)):
self.assertEqual(passValue(i), i)
for f in (0.0, 1.0, -1.0, 1//3, 1/3.0,
sys.float_info.min, sys.float_info.max,
-sys.float_info.min, -sys.float_info.max):
self.assertEqual(passValue(f), f)
for f in float('nan'), float('inf'), -float('inf'):
if f != f: # NaN
self.assertNotEqual(passValue(f), f)
else:
self.assertEqual(passValue(f), f)
self.assertEqual(passValue((1, '2', (3.4,))), (1, '2', (3.4,)))
def test_main():
test_support.run_unittest(TclTest, TkinterTest)
if __name__ == "__main__":
test_main() |
1,761 | test samba gupsgen mmu 4 kb | # -*- coding: utf-8 -*-
from sst_unittest import *
from sst_unittest_support import *
################################################################################
# Code to support a single instance module initialize, must be called setUp method
module_init = 0
module_sema = threading.Semaphore()
def initializeTestModule_SingleInstance(class_inst):
global module_init
global module_sema
module_sema.acquire()
if module_init != 1:
try:
# Put your single instance Init Code Here
pass
except:
pass
module_init = 1
module_sema.release()
################################################################################
class testcase_Samba_Component(SSTTestCase):
def initializeClass(self, testName):
super(type(self), self).initializeClass(testName)
# Put test based setup code here. it is called before testing starts
# NOTE: This method is called once for every test
def setUp(self):
super(type(self), self).setUp()
initializeTestModule_SingleInstance(self)
# Put test based setup code here. it is called once before every test
def tearDown(self):
# Put test based teardown code here. it is called once after every test
super(type(self), self).tearDown()
#####
def test_Samba_gupsgen_mmu(self):
self.Samba_test_template("gupsgen_mmu")
def METHOD_NAME(self):
self.Samba_test_template("gupsgen_mmu_4KB")
def test_Samba_gupsgen_mmu_three_levels(self):
self.Samba_test_template("gupsgen_mmu_three_levels")
def test_Samba_stencil3dbench_mmu(self):
self.Samba_test_template("stencil3dbench_mmu", testtimeout=240)
def test_Samba_streambench_mmu(self):
self.Samba_test_template("streambench_mmu")
#####
def Samba_test_template(self, testcase, testtimeout=120):
# Get the path to the test files
test_path = self.get_testsuite_dir()
outdir = self.get_test_output_run_dir()
tmpdir = self.get_test_output_tmp_dir()
# Set the various file paths
testDataFileName="test_Samba_{0}".format(testcase)
sdlfile = "{0}/{1}.py".format(test_path, testcase)
reffile = "{0}/refFiles/{1}.out".format(test_path, testDataFileName)
outfile = "{0}/{1}.out".format(outdir, testDataFileName)
errfile = "{0}/{1}.err".format(outdir, testDataFileName)
tmpfile = "{0}/{1}.tmp".format(tmpdir, testDataFileName)
self.tmp_file = tmpfile
mpioutfiles = "{0}/{1}.testfile".format(outdir, testDataFileName)
newoutfile = "{0}/{1}.newout".format(outdir, testDataFileName)
newreffile = "{0}/{1}.newref".format(outdir, testDataFileName)
self.run_sst(sdlfile, outfile, errfile, mpi_out_files=mpioutfiles, timeout_sec=testtimeout)
testing_remove_component_warning_from_file(outfile)
# NOTE: THE PASS / FAIL EVALUATIONS ARE PORTED FROM THE SQE BAMBOO
# BASED testSuite_XXX.sh THESE SHOULD BE RE-EVALUATED BY THE
# DEVELOPER AGAINST THE LATEST VERSION OF SST TO SEE IF THE
# TESTS & RESULT FILES ARE STILL VALID
# Perform the tests
if os_test_file(errfile, "-s"):
log_testing_note("Samba test {0} has a Non-Empty Error File {1}".format(testDataFileName, errfile))
cmp_result = testing_compare_diff(testDataFileName, outfile, reffile)
if cmp_result != True:
diff_data = testing_get_diff_data(testDataFileName)
log_debug("{0} - DIFF DATA =\n{1}".format(self.get_testcase_name(), diff_data))
# We need to use some bailing wire to allow serialization
# branch to work with same reference files
cmd = "sed s/' (.*)'// {0} > {1}".format(reffile, newreffile)
os.system(cmd)
ref_wc_data = self._get_file_data_counts(newreffile)
cmd = "sed s/' (.*)'// {0} > {1}".format(outfile, newoutfile)
os.system(cmd)
out_wc_data = self._get_file_data_counts(newoutfile)
cmp_result = ref_wc_data == out_wc_data
if not cmp_result:
log_failure("{0} - DIFF DATA\nref_wc_data = {1}\nout_wc_data = {2}".format(self.get_testcase_name(), ref_wc_data, out_wc_data))
self.assertTrue(cmp_result, "Output file {0} word/line count does NOT match Reference file {1} word/line count".format(outfile, reffile))
else:
self.assertTrue(cmp_result, "Diffed compared Output file {0} does not match Reference File {1}".format(outfile, reffile))
###
def _get_file_data_counts(self, in_file):
cmd = "wc {0} | awk '{{print $1, $2}}' > {1}".format(in_file, self.tmp_file)
os.system(cmd)
cmd = "cat {0}".format(self.tmp_file)
cmd_rtn = os_simple_command(cmd)
cat_out = cmd_rtn[1]
return cat_out |
1,762 | teardown | # -*- coding: utf-8 -*-
"""
flask.testsuite.ext
~~~~~~~~~~~~~~~~~~~
Tests the extension import thing.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import unittest
try:
from imp import reload as reload_module
except ImportError:
reload_module = reload
from flask.testsuite import FlaskTestCase
from flask._compat import PY2
class ExtImportHookTestCase(FlaskTestCase):
def setup(self):
# we clear this out for various reasons. The most important one is
# that a real flaskext could be in there which would disable our
# fake package. Secondly we want to make sure that the flaskext
# import hook does not break on reloading.
for entry, value in list(sys.modules.items()):
if (entry.startswith('flask.ext.') or
entry.startswith('flask_') or
entry.startswith('flaskext.') or
entry == 'flaskext') and value is not None:
sys.modules.pop(entry, None)
from flask import ext
reload_module(ext)
# reloading must not add more hooks
import_hooks = 0
for item in sys.meta_path:
cls = type(item)
if cls.__module__ == 'flask.exthook' and \
cls.__name__ == 'ExtensionImporter':
import_hooks += 1
self.assert_equal(import_hooks, 1)
def METHOD_NAME(self):
from flask import ext
for key in ext.__dict__:
self.assert_not_in('.', key)
def test_flaskext_new_simple_import_normal(self):
from flask.ext.newext_simple import ext_id
self.assert_equal(ext_id, 'newext_simple')
def test_flaskext_new_simple_import_module(self):
from flask.ext import newext_simple
self.assert_equal(newext_simple.ext_id, 'newext_simple')
self.assert_equal(newext_simple.__name__, 'flask_newext_simple')
def test_flaskext_new_package_import_normal(self):
from flask.ext.newext_package import ext_id
self.assert_equal(ext_id, 'newext_package')
def test_flaskext_new_package_import_module(self):
from flask.ext import newext_package
self.assert_equal(newext_package.ext_id, 'newext_package')
self.assert_equal(newext_package.__name__, 'flask_newext_package')
def test_flaskext_new_package_import_submodule_function(self):
from flask.ext.newext_package.submodule import test_function
self.assert_equal(test_function(), 42)
def test_flaskext_new_package_import_submodule(self):
from flask.ext.newext_package import submodule
self.assert_equal(submodule.__name__, 'flask_newext_package.submodule')
self.assert_equal(submodule.test_function(), 42)
def test_flaskext_old_simple_import_normal(self):
from flask.ext.oldext_simple import ext_id
self.assert_equal(ext_id, 'oldext_simple')
def test_flaskext_old_simple_import_module(self):
from flask.ext import oldext_simple
self.assert_equal(oldext_simple.ext_id, 'oldext_simple')
self.assert_equal(oldext_simple.__name__, 'flaskext.oldext_simple')
def test_flaskext_old_package_import_normal(self):
from flask.ext.oldext_package import ext_id
self.assert_equal(ext_id, 'oldext_package')
def test_flaskext_old_package_import_module(self):
from flask.ext import oldext_package
self.assert_equal(oldext_package.ext_id, 'oldext_package')
self.assert_equal(oldext_package.__name__, 'flaskext.oldext_package')
def test_flaskext_old_package_import_submodule(self):
from flask.ext.oldext_package import submodule
self.assert_equal(submodule.__name__, 'flaskext.oldext_package.submodule')
self.assert_equal(submodule.test_function(), 42)
def test_flaskext_old_package_import_submodule_function(self):
from flask.ext.oldext_package.submodule import test_function
self.assert_equal(test_function(), 42)
def test_flaskext_broken_package_no_module_caching(self):
for x in range(2):
with self.assert_raises(ImportError):
import flask.ext.broken
def test_no_error_swallowing(self):
try:
import flask.ext.broken
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
self.assert_true(exc_type is ImportError)
if PY2:
message = 'No module named missing_module'
else:
message = 'No module named \'missing_module\''
self.assert_equal(str(exc_value), message)
self.assert_true(tb.tb_frame.f_globals is globals())
# reraise() adds a second frame so we need to skip that one too.
# On PY3 we even have another one :(
next = tb.tb_next.tb_next
if not PY2:
next = next.tb_next
self.assert_in('flask_broken/__init__.py', next.tb_frame.f_code.co_filename)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExtImportHookTestCase))
return suite |
1,763 | add mappings | from typing import TYPE_CHECKING, Optional
from ..config import Config
from ..utils import RichStatus
from .ircluster import IRCluster
from .irresource import IRResource
if TYPE_CHECKING:
from .ir import IR # pragma: no cover
class IRTracing(IRResource):
cluster: Optional[IRCluster]
service: str
driver: str
driver_config: dict
# TODO: tag_headers is deprecated and should be removed once migrated to CRD v3
tag_headers: list
custom_tags: list
host_rewrite: Optional[str]
sampling: dict
def __init__(
self,
ir: "IR",
aconf: Config,
rkey: str = "ir.tracing",
kind: str = "ir.tracing",
name: str = "tracing",
namespace: Optional[str] = None,
**kwargs
) -> None:
del kwargs # silence unused-variable warning
super().__init__(ir=ir, aconf=aconf, rkey=rkey, kind=kind, name=name, namespace=namespace)
self.cluster = None
def setup(self, ir: "IR", aconf: Config) -> bool:
# Some of the validations might go away if JSON Schema is doing the validations, but need to check on that
config_info = aconf.get_config("tracing_configs")
if not config_info:
ir.logger.debug("IRTracing: no tracing config, bailing")
# No tracing info. Be done.
return False
configs = config_info.values()
number_configs = len(configs)
if number_configs != 1:
self.post_error(
RichStatus.fromError(
"exactly one TracingService is supported, got {}".format(number_configs),
module=aconf,
)
)
return False
config = list(configs)[0]
service = config.get("service")
if not service:
self.post_error(RichStatus.fromError("service field is required in TracingService"))
return False
driver = config.get("driver")
if not driver:
self.post_error(RichStatus.fromError("driver field is required in TracingService"))
return False
self.namespace = config.get("namespace", self.namespace)
grpc = False
if driver == "lightstep":
self.post_error(
RichStatus.fromError(
"as of v3.4+ the 'lightstep' driver is no longer supported in the TracingService, please see docs for migration options"
)
)
return False
if driver == "opentelemetry":
ir.logger.warning(
"The OpenTelemetry tracing driver is work-in-progress. Functionality is incomplete and it is not intended for production use. This extension has an unknown security posture and should only be used in deployments where both the downstream and upstream are trusted."
)
grpc = True
if driver == "datadog":
driver = "envoy.tracers.datadog"
# This "config" is a field on the aconf for the TracingService, not to be confused with the
# envoyv2 untyped "config" field. We actually use a "typed_config" in the final Envoy
# config, see envoy/v2/v2tracer.py.
driver_config = config.get("config", {})
if driver == "zipkin":
# fill zipkin defaults
if not driver_config.get("collector_endpoint"):
driver_config["collector_endpoint"] = "/api/v2/spans"
if not driver_config.get("collector_endpoint_version"):
driver_config["collector_endpoint_version"] = "HTTP_JSON"
if not "trace_id_128bit" in driver_config:
# Make 128-bit traceid the default
driver_config["trace_id_128bit"] = True
# validate
if driver_config["collector_endpoint_version"] not in ["HTTP_JSON", "HTTP_PROTO"]:
self.post_error(
RichStatus.fromError(
"collector_endpoint_version must be one of HTTP_JSON, HTTP_PROTO'"
)
)
return False
# OK, we have a valid config.
self.sourced_by(config)
self.service = service
self.driver = driver
self.grpc = grpc
self.cluster = None
self.driver_config = driver_config
self.tag_headers = config.get("tag_headers", [])
self.custom_tags = config.get("custom_tags", [])
self.sampling = config.get("sampling", {})
self.stats_name = config.get("stats_name", None)
# XXX host_rewrite actually isn't in the schema right now.
self.host_rewrite = config.get("host_rewrite", None)
# Remember that the config references us.
self.referenced_by(config)
return True
def METHOD_NAME(self, ir: "IR", aconf: Config):
cluster = ir.add_cluster(
IRCluster(
ir=ir,
aconf=aconf,
parent_ir_resource=self,
location=self.location,
service=self.service,
host_rewrite=self.get("host_rewrite", None),
marker="tracing",
grpc=self.grpc,
stats_name=self.get("stats_name", None),
)
)
cluster.referenced_by(self)
self.cluster = cluster
def finalize(self):
assert self.cluster
self.ir.logger.debug("tracing cluster envoy name: %s" % self.cluster.envoy_name)
# Opentelemetry is the only one that does not use collector_cluster
if self.driver == "opentelemetry":
self.driver_config["grpc_service"] = {
"envoy_grpc": {"cluster_name": self.cluster.envoy_name}
}
else:
self.driver_config["collector_cluster"] = self.cluster.envoy_name |
1,764 | l1 | import unittest
import shelve
import glob
from test import support
from collections.abc import MutableMapping
from test.test_dbm import dbm_iterator
def METHOD_NAME(s):
return s.decode("latin-1")
class byteskeydict(MutableMapping):
"Mapping that supports bytes keys"
def __init__(self):
self.d = {}
def __getitem__(self, key):
return self.d[METHOD_NAME(key)]
def __setitem__(self, key, value):
self.d[METHOD_NAME(key)] = value
def __delitem__(self, key):
del self.d[METHOD_NAME(key)]
def __len__(self):
return len(self.d)
def iterkeys(self):
for k in self.d.keys():
yield k.encode("latin-1")
__iter__ = iterkeys
def keys(self):
return list(self.iterkeys())
def copy(self):
return byteskeydict(self.d)
class TestCase(unittest.TestCase):
fn = "shelftemp.db"
def tearDown(self):
for f in glob.glob(self.fn+"*"):
support.unlink(f)
def test_close(self):
d1 = {}
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
self.assertEqual(len(s), 1)
s.close()
self.assertRaises(ValueError, len, s)
try:
s['key1']
except ValueError:
pass
else:
self.fail('Closed shelf should not find a key')
def test_ascii_file_shelf(self):
s = shelve.open(self.fn, protocol=0)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_binary_file_shelf(self):
s = shelve.open(self.fn, protocol=1)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_proto2_file_shelf(self):
s = shelve.open(self.fn, protocol=2)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_in_memory_shelf(self):
d1 = byteskeydict()
s = shelve.Shelf(d1, protocol=0)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
d2 = byteskeydict()
s = shelve.Shelf(d2, protocol=1)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
self.assertNotEqual(d1.items(), d2.items())
def test_mutable_entry(self):
d1 = byteskeydict()
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4])
s.close()
d2 = byteskeydict()
s = shelve.Shelf(d2, protocol=2, writeback=True)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4,5])
s.close()
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
def test_keyencoding(self):
d = {}
key = 'Pöp'
# the default keyencoding is utf-8
shelve.Shelf(d)[key] = [1]
self.assertIn(key.encode('utf-8'), d)
# but a different one can be given
shelve.Shelf(d, keyencoding='latin-1')[key] = [1]
self.assertIn(key.encode('latin-1'), d)
# with all consequences
s = shelve.Shelf(d, keyencoding='ascii')
self.assertRaises(UnicodeEncodeError, s.__setitem__, key, [1])
def test_writeback_also_writes_immediately(self):
# Issue 5754
d = {}
key = 'key'
encodedkey = key.encode('utf-8')
s = shelve.Shelf(d, writeback=True)
s[key] = [1]
p1 = d[encodedkey] # Will give a KeyError if backing store not updated
s['key'].append(2)
s.close()
p2 = d[encodedkey]
self.assertNotEqual(p1, p2) # Write creates new object in store
def test_with(self):
d1 = {}
with shelve.Shelf(d1, protocol=2, writeback=False) as s:
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
self.assertEqual(len(s), 1)
self.assertRaises(ValueError, len, s)
try:
s['key1']
except ValueError:
pass
else:
self.fail('Closed shelf should not find a key')
from test import mapping_tests
class TestShelveBase(mapping_tests.BasicTestMappingProtocol):
fn = "shelftemp.db"
counter = 0
def __init__(self, *args, **kw):
self._db = []
mapping_tests.BasicTestMappingProtocol.__init__(self, *args, **kw)
type2test = shelve.Shelf
def _reference(self):
return {"key1":"value1", "key2":2, "key3":(1,2,3)}
def _empty_mapping(self):
if self._in_mem:
x= shelve.Shelf(byteskeydict(), **self._args)
else:
self.counter+=1
x= shelve.open(self.fn+str(self.counter), **self._args)
self._db.append(x)
return x
def tearDown(self):
for db in self._db:
db.close()
self._db = []
if not self._in_mem:
for f in glob.glob(self.fn+"*"):
support.unlink(f)
class TestAsciiFileShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = False
class TestBinaryFileShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = False
class TestProto2FileShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = False
class TestAsciiMemShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = True
class TestBinaryMemShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = True
class TestProto2MemShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = True
def test_main():
for module in dbm_iterator():
support.run_unittest(
TestAsciiFileShelve,
TestBinaryFileShelve,
TestProto2FileShelve,
TestAsciiMemShelve,
TestBinaryMemShelve,
TestProto2MemShelve,
TestCase
)
if __name__ == "__main__":
test_main() |
1,765 | send imsi detach indication | #!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import grpc
from feg.protos.csfb_pb2 import (
AlertAck,
AlertReject,
EPSDetachIndication,
IMSIDetachIndication,
)
from feg.protos.csfb_pb2_grpc import CSFBFedGWServiceStub
from magma.common.rpc_utils import cloud_grpc_wrapper
@cloud_grpc_wrapper
def send_alert_ack(client, args):
req = AlertAck(imsi=args.imsi)
print("Sending Alert Ack with following fields:\n %s" % req)
try:
client.AlertAc(req)
except grpc.RpcError as e:
print("gRPC failed with %s: %s" % (e.code(), e.details()))
@cloud_grpc_wrapper
def send_alert_reject(client, args):
req = AlertReject(imsi=args.imsi, sgs_cause=b'\x01')
print("Sending Alert Reject with following fields:\n %s" % req)
try:
client.AlertRej(req)
except grpc.RpcError as e:
print("gRPC failed with %s: %s" % (e.code(), e.details()))
@cloud_grpc_wrapper
def send_eps_detach_indication(client, args):
req = EPSDetachIndication(
imsi=args.imsi,
mme_name=args.mme_name,
imsi_detach_from_eps_service_type=bytes(
[args.imsi_detach_from_eps_service_type],
),
)
print("Sending EPS Detach Indication with following fields:\n %s" % req)
try:
client.EPSDetachInd(req)
except grpc.RpcError as e:
print("gRPC failed with %s: %s" % (e.code(), e.details()))
@cloud_grpc_wrapper
def METHOD_NAME(client, args):
req = IMSIDetachIndication(
imsi=args.imsi,
mme_name=args.mme_name,
imsi_detach_from_non_eps_service_type=b'\x11',
)
print("Sending IMSI Detach Indication with following fields:\n %s" % req)
try:
client.IMSIDetachInd(req)
except grpc.RpcError as e:
print("gRPC failed with %s: %s" % (e.code(), e.details()))
def create_parser():
"""
Creates the argparse parser with all the arguments.
"""
parser = argparse.ArgumentParser(
description='Management CLI for CSFB',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Add subcommands
subparsers = parser.add_subparsers(title='subcommands', dest='cmd')
# Alert Ack
alert_ack_parser = subparsers.add_parser(
'AA', help='Send Alert Ack to CSFB service in FeG',
)
alert_ack_parser.add_argument('imsi', help='e.g.123456')
alert_ack_parser.set_defaults(func=send_alert_ack)
# Alert Reject
alert_reject_parser = subparsers.add_parser(
'AR', help='Send Alert Reject to csfb in feg',
)
alert_reject_parser.add_argument('imsi', help='e.g. 123456')
alert_reject_parser.set_defaults(func=send_alert_reject)
# EPS Detach Indication
eps_detach_indication_parser = subparsers.add_parser(
'EDI', help='Send EPS Detach Indication to CSFB service in FeG',
)
eps_detach_indication_parser.add_argument('imsi', help='e.g. 123456')
eps_detach_indication_parser.add_argument(
'mme_name',
help='MME name is a 55-character FQDN, specified in 3GPP TS 23.003',
)
eps_detach_indication_parser.add_argument(
'imsi_detach_from_eps_service_type',
help='Enter either 1, 2 or 3', choices=[1, 2, 3], type=int,
)
eps_detach_indication_parser.set_defaults(func=send_eps_detach_indication)
# IMSI Detach Indication
imsi_detach_indication_parser = subparsers.add_parser(
'IDI', help='Send IMSI Detach Indication to CSFB service in FeG',
)
imsi_detach_indication_parser.add_argument('imsi', help='e.g. 123456')
imsi_detach_indication_parser.add_argument(
'mme_name',
help='MME name is a 55-character FQDN, specified in 3GPP TS 23.003',
)
imsi_detach_indication_parser.set_defaults(func=METHOD_NAME)
return parser
def main():
parser = create_parser()
# Parse the args
args = parser.parse_args()
if not args.cmd:
parser.print_usage()
exit(1)
# Execute the subcommand function
args.func(args, CSFBFedGWServiceStub, 'csfb')
if __name__ == "__main__":
main() |
1,766 | test check incompatible user agent | """Unit test for modules/http_analyzer/http_analyzer.py"""
from tests.module_factory import ModuleFactory
import random
# dummy params used for testing
profileid = 'profile_192.168.1.1'
twid = 'timewindow1'
uid = 'CAeDWs37BipkfP21u8'
timestamp = 1635765895.037696
SAFARI_UA = (
'Mozilla/5.0 (Macintosh; Intel Mac OS X 12_3_1) '
'AppleWebKit/605.1.15 (KHTML, like Gecko) '
'Version/15.3 Safari/605.1.15'
)
def get_random_MAC():
return "02:00:00:%02x:%02x:%02x" % (random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255))
def test_check_suspicious_user_agents(mock_db):
http_analyzer = ModuleFactory().create_http_analyzer_obj(mock_db)
# create a flow with suspicious user agent
host = '147.32.80.7'
uri = '/wpad.dat'
user_agent = 'CHM_MSDN'
assert (
http_analyzer.check_suspicious_user_agents(uid, host, uri, timestamp, user_agent, profileid, twid) is True
)
def test_check_multiple_google_connections(mock_db):
http_analyzer = ModuleFactory().create_http_analyzer_obj(mock_db)
# {"ts":1635765765.435485,"uid":"C7mv0u4M1zqJBHydgj",
# "id.orig_h":"192.168.1.28","id.orig_p":52102,"id.resp_h":"216.58.198.78",
# "id.resp_p":80,"trans_depth":1,"method":"GET","host":"google.com","uri":"/",
# "version":"1.1","user_agent":"Wget/1.20.3 (linux-gnu)","request_body_len":0,"response_body_len":219,
# "status_code":301,"status_msg":"Moved Permanently","tags":[],"resp_fuids":["FGhwTU1OdvlfLrzBKc"],
# "resp_mime_types":["text/html"]}
host = 'google.com'
# uri = '/'
request_body_len = 0
for _ in range(4):
found_detection = http_analyzer.check_multiple_empty_connections(
uid, host, timestamp, request_body_len, profileid, twid
)
assert found_detection is True
def test_parsing_online_ua_info(mock_db, mocker):
"""
tests the parsing and processing the ua found by the online query
"""
http_analyzer = ModuleFactory().create_http_analyzer_obj(mock_db)
# use a different profile for this unit test to make sure we don't already have info about
# it in the db
profileid = 'profile_192.168.99.99'
mock_db.get_user_agent_from_profile.return_value = None
# mock the function that gets info about the given ua from an online db
mock_requests = mocker.patch("requests.get")
mock_requests.return_value.status_code = 200
mock_requests.return_value.text = """{
"agent_name":"Safari",
"os_type":"Macintosh",
"os_name":"OS X"
}"""
# add os_type , os_name and agent_name to the db
ua_info = http_analyzer.get_user_agent_info(SAFARI_UA, profileid)
assert ua_info['os_type'] == 'Macintosh'
assert ua_info['browser'] == 'Safari'
def test_get_user_agent_info(mock_db, mocker):
http_analyzer = ModuleFactory().create_http_analyzer_obj(mock_db)
# mock the function that gets info about the
# given ua from an online db: get_ua_info_online()
mock_requests = mocker.patch("requests.get")
mock_requests.return_value.status_code = 200
mock_requests.return_value.text = """{
"agent_name":"Safari",
"os_type":"Macintosh",
"os_name":"OS X"
}"""
mock_db.add_all_user_agent_to_profile.return_value = True
mock_db.get_user_agent_from_profile.return_value = None
expected_ret_value = {'browser': 'Safari',
'os_name': 'OS X',
'os_type': 'Macintosh',
'user_agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 12_3_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.3 Safari/605.1.15'}
assert http_analyzer.get_user_agent_info(SAFARI_UA, profileid) == expected_ret_value
# # get ua info online, and add os_type , os_name and agent_name anout this profile
# # to the db
# assert ua_added_to_db is not None, 'Error getting UA info online'
# assert ua_added_to_db is not False, 'We already have UA info about this profile in the db'
def METHOD_NAME(mock_db):
http_analyzer = ModuleFactory().create_http_analyzer_obj(mock_db)
# use a different profile for this unit test to make sure we don't already have info about
# it in the db. it has to be a private IP for its' MAC to not be marked as the gw MAC
profileid = 'profile_192.168.77.254'
# Mimic an intel mac vendor using safari
mock_db.get_mac_vendor_from_profile.return_value = 'Intel Corp'
mock_db.get_user_agent_from_profile.return_value = {'browser': 'safari'}
assert (
http_analyzer.check_incompatible_user_agent('google.com', '/images', timestamp, profileid, twid, uid) is True
)
def test_extract_info_from_UA(mock_db):
http_analyzer = ModuleFactory().create_http_analyzer_obj(mock_db)
# use another profile, because the default
# one already has a ua in the db
mock_db.get_user_agent_from_profile.return_value = None
profileid = 'profile_192.168.1.2'
server_bag_ua = 'server-bag[macOS,11.5.1,20G80,MacBookAir10,1]'
assert (
http_analyzer.extract_info_from_UA(server_bag_ua, profileid)
== '{"user_agent": "macOS,11.5.1,20G80,MacBookAir10,1", "os_name": "macOS", "os_type": "macOS11.5.1", "browser": ""}'
)
def test_check_multiple_UAs(mock_db):
http_analyzer = ModuleFactory().create_http_analyzer_obj(mock_db)
mozilla_ua = 'Mozilla/5.0 (X11; Fedora;Linux x86; rv:60.0) Gecko/20100101 Firefox/60.0'
# old ua
cached_ua = {'os_type': 'Fedora', 'os_name': 'Linux'}
# current ua
user_agent = mozilla_ua
# should set evidence
assert (
http_analyzer.check_multiple_UAs(cached_ua, user_agent, timestamp, profileid, twid, uid) is False
)
# in this case we should alert
user_agent = SAFARI_UA
assert (
http_analyzer.check_multiple_UAs(cached_ua, user_agent, timestamp, profileid, twid, uid) is True
) |
1,767 | bboxes diou | # -*- coding: utf-8 -*-
'''
'''
import torch
import os, sys
from torch.nn import functional as F
import numpy as np
from packaging import version
__all__ = [
"bboxes_iou",
"bboxes_giou",
"bboxes_diou",
"bboxes_ciou",
]
if version.parse(torch.__version__) >= version.parse('1.5.0'):
def _true_divide(dividend, divisor):
return torch.true_divide(dividend, divisor)
else:
def _true_divide(dividend, divisor):
return dividend / divisor
def bboxes_iou(bboxes_a, bboxes_b, fmt='voc', iou_type='iou'):
"""Calculate the Intersection of Unions (IoUs) between bounding boxes.
IoU is calculated as a ratio of area of the intersection
and area of the union.
Args:
bbox_a (array): An array whose shape is :math:`(N, 4)`.
:math:`N` is the number of bounding boxes.
The dtype should be :obj:`numpy.float32`.
bbox_b (array): An array similar to :obj:`bbox_a`,
whose shape is :math:`(K, 4)`.
The dtype should be :obj:`numpy.float32`.
Returns:
array:
An array whose shape is :math:`(N, K)`. \
An element at index :math:`(n, k)` contains IoUs between \
:math:`n` th bounding box in :obj:`bbox_a` and :math:`k` th bounding \
box in :obj:`bbox_b`.
from: https://github.com/chainer/chainercv
"""
if bboxes_a.shape[1] != 4 or bboxes_b.shape[1] != 4:
raise IndexError
N, K = bboxes_a.shape[0], bboxes_b.shape[0]
if fmt.lower() == 'voc': # xmin, ymin, xmax, ymax
# top left
tl_intersect = torch.max(
bboxes_a[:, np.newaxis, :2],
bboxes_b[:, :2]
) # of shape `(N,K,2)`
# bottom right
br_intersect = torch.min(
bboxes_a[:, np.newaxis, 2:],
bboxes_b[:, 2:]
)
bb_a = bboxes_a[:, 2:] - bboxes_a[:, :2]
bb_b = bboxes_b[:, 2:] - bboxes_b[:, :2]
# bb_* can also be seen vectors representing box_width, box_height
elif fmt.lower() == 'yolo': # xcen, ycen, w, h
# top left
tl_intersect = torch.max(
bboxes_a[:, np.newaxis, :2] - bboxes_a[:, np.newaxis, 2:] / 2,
bboxes_b[:, :2] - bboxes_b[:, 2:] / 2
)
# bottom right
br_intersect = torch.min(
bboxes_a[:, np.newaxis, :2] + bboxes_a[:, np.newaxis, 2:] / 2,
bboxes_b[:, :2] + bboxes_b[:, 2:] / 2
)
bb_a = bboxes_a[:, 2:]
bb_b = bboxes_b[:, 2:]
elif fmt.lower() == 'coco': # xmin, ymin, w, h
# top left
tl_intersect = torch.max(
bboxes_a[:, np.newaxis, :2],
bboxes_b[:, :2]
)
# bottom right
br_intersect = torch.min(
bboxes_a[:, np.newaxis, :2] + bboxes_a[:, np.newaxis, 2:],
bboxes_b[:, :2] + bboxes_b[:, 2:]
)
bb_a = bboxes_a[:, 2:]
bb_b = bboxes_b[:, 2:]
area_a = torch.prod(bb_a, 1)
area_b = torch.prod(bb_b, 1)
# torch.prod(input, dim, keepdim=False, dtype=None) → Tensor
# Returns the product of each row of the input tensor in the given dimension dim
# if tl, br does not form a nondegenerate squre, then the corr. element in the `prod` would be 0
en = (tl_intersect < br_intersect).type(tl_intersect.type()).prod(dim=2) # shape `(N,K,2)` ---> shape `(N,K)`
area_intersect = torch.prod(br_intersect - tl_intersect, 2) * en # * ((tl < br).all())
area_union = (area_a[:, np.newaxis] + area_b - area_intersect)
iou = _true_divide(area_intersect, area_union)
if iou_type.lower() == 'iou':
return iou
if fmt.lower() == 'voc': # xmin, ymin, xmax, ymax
# top left
tl_union = torch.min(
bboxes_a[:, np.newaxis, :2],
bboxes_b[:, :2]
) # of shape `(N,K,2)`
# bottom right
br_union = torch.max(
bboxes_a[:, np.newaxis, 2:],
bboxes_b[:, 2:]
)
elif fmt.lower() == 'yolo': # xcen, ycen, w, h
# top left
tl_union = torch.min(
bboxes_a[:, np.newaxis, :2] - bboxes_a[:, np.newaxis, 2:] / 2,
bboxes_b[:, :2] - bboxes_b[:, 2:] / 2
)
# bottom right
br_union = torch.max(
bboxes_a[:, np.newaxis, :2] + bboxes_a[:, np.newaxis, 2:] / 2,
bboxes_b[:, :2] + bboxes_b[:, 2:] / 2
)
elif fmt.lower() == 'coco': # xmin, ymin, w, h
# top left
tl_union = torch.min(
bboxes_a[:, np.newaxis, :2],
bboxes_b[:, :2]
)
# bottom right
br_union = torch.max(
bboxes_a[:, np.newaxis, :2] + bboxes_a[:, np.newaxis, 2:],
bboxes_b[:, :2] + bboxes_b[:, 2:]
)
# c for covering, of shape `(N,K,2)`
# the last dim is box width, box hight
bboxes_c = br_union - tl_union
area_covering = torch.prod(bboxes_c, 2) # shape `(N,K)`
giou = iou - _true_divide(area_covering - area_union, area_covering)
if iou_type.lower() == 'giou':
return giou
if fmt.lower() == 'voc': # xmin, ymin, xmax, ymax
centre_a = (bboxes_a[..., 2 :] + bboxes_a[..., : 2]) / 2
centre_b = (bboxes_b[..., 2 :] + bboxes_b[..., : 2]) / 2
elif fmt.lower() == 'yolo': # xcen, ycen, w, h
centre_a = bboxes_a[..., : 2]
centre_b = bboxes_b[..., : 2]
elif fmt.lower() == 'coco': # xmin, ymin, w, h
centre_a = bboxes_a[..., 2 :] + bboxes_a[..., : 2]/2
centre_b = bboxes_b[..., 2 :] + bboxes_b[..., : 2]/2
centre_dist = torch.norm(centre_a[:, np.newaxis] - centre_b, p='fro', dim=2)
diag_len = torch.norm(bboxes_c, p='fro', dim=2)
diou = iou - _true_divide(centre_dist.pow(2), diag_len.pow(2))
if iou_type.lower() == 'diou':
return diou
""" the legacy custom cosine similarity:
# bb_a of shape `(N,2)`, bb_b of shape `(K,2)`
v = torch.einsum('nm,km->nk', bb_a, bb_b)
v = _true_divide(v, (torch.norm(bb_a, p='fro', dim=1)[:,np.newaxis] * torch.norm(bb_b, p='fro', dim=1)))
# avoid nan for torch.acos near \pm 1
# https://github.com/pytorch/pytorch/issues/8069
eps = 1e-7
v = torch.clamp(v, -1+eps, 1-eps)
"""
v = F.cosine_similarity(bb_a[:,np.newaxis,:], bb_b, dim=-1)
v = (_true_divide(2*torch.acos(v), np.pi)).pow(2)
with torch.no_grad():
alpha = (_true_divide(v, 1-iou+v)) * ((iou>=0.5).type(iou.type()))
ciou = diou - alpha * v
if iou_type.lower() == 'ciou':
return ciou
def bboxes_giou(bboxes_a, bboxes_b, fmt='voc'):
return bboxes_iou(bboxes_a, bboxes_b, fmt, 'giou')
def METHOD_NAME(bboxes_a, bboxes_b, fmt='voc'):
return bboxes_iou(bboxes_a, bboxes_b, fmt, 'diou')
def bboxes_ciou(bboxes_a, bboxes_b, fmt='voc'):
return bboxes_iou(bboxes_a, bboxes_b, fmt, 'ciou') |
1,768 | test caches | # SPDX-License-Identifier: MIT OR Apache-2.0
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the MIT License. See the LICENSE file in the root of this
# repository for complete details.
import pytest
from structlog import get_config, get_logger, reset_defaults, testing
from structlog.testing import (
CapturedCall,
CapturingLogger,
CapturingLoggerFactory,
LogCapture,
ReturnLogger,
ReturnLoggerFactory,
)
class TestCaptureLogs:
@classmethod
def teardown_class(cls):
reset_defaults()
def test_captures_logs(self):
"""
Log entries are captured and retain their structure.
"""
with testing.capture_logs() as logs:
get_logger().bind(x="y").info("hello", answer=42)
get_logger().bind(a="b").info("goodbye", foo={"bar": "baz"})
assert [
{"event": "hello", "log_level": "info", "x": "y", "answer": 42},
{
"a": "b",
"event": "goodbye",
"log_level": "info",
"foo": {"bar": "baz"},
},
] == logs
def get_active_procs(self):
return get_config()["processors"]
def test_restores_processors_on_success(self):
"""
Processors are patched within the contextmanager and restored on
exit.
"""
orig_procs = self.get_active_procs()
assert len(orig_procs) > 1
with testing.capture_logs():
modified_procs = self.get_active_procs()
assert len(modified_procs) == 1
assert isinstance(modified_procs[0], LogCapture)
restored_procs = self.get_active_procs()
assert orig_procs is restored_procs
assert len(restored_procs) > 1
def test_restores_processors_on_error(self):
"""
Processors are restored even on errors.
"""
orig_procs = self.get_active_procs()
with pytest.raises(NotImplementedError), testing.capture_logs():
raise NotImplementedError("from test")
assert orig_procs is self.get_active_procs()
def test_captures_bound_logers(self):
"""
Even logs from already bound loggers are captured and their processors
restored on exit.
"""
logger = get_logger("bound").bind(foo="bar")
logger.info("ensure logger is bound")
with testing.capture_logs() as logs:
logger.info("hello", answer=42)
assert logs == [
{
"event": "hello",
"answer": 42,
"foo": "bar",
"log_level": "info",
}
]
class TestReturnLogger:
# @pytest.mark.parametrize("method", stdlib_log_methods)
def test_stdlib_methods_support(self, stdlib_log_method):
"""
ReturnLogger implements methods of stdlib loggers.
"""
v = getattr(ReturnLogger(), stdlib_log_method)("hello")
assert "hello" == v
def test_return_logger(self):
"""
Return logger returns exactly what's sent in.
"""
obj = ["hello"]
assert obj is ReturnLogger().msg(obj)
class TestReturnLoggerFactory:
def test_builds_returnloggers(self):
"""
Factory returns ReturnLoggers.
"""
f = ReturnLoggerFactory()
assert isinstance(f(), ReturnLogger)
def METHOD_NAME(self):
"""
There's no need to have several loggers so we return the same one on
each call.
"""
f = ReturnLoggerFactory()
assert f() is f()
def test_ignores_args(self):
"""
ReturnLogger doesn't take positional arguments. If any are passed to
the factory, they are not passed to the logger.
"""
ReturnLoggerFactory()(1, 2, 3)
class TestCapturingLogger:
def test_factory_caches(self):
"""
CapturingLoggerFactory returns one CapturingLogger over and over again.
"""
clf = CapturingLoggerFactory()
cl1 = clf()
cl2 = clf()
assert cl1 is cl2
def test_repr(self):
"""
repr says how many calls there were.
"""
cl = CapturingLogger()
cl.info("hi")
cl.error("yolo")
assert "<CapturingLogger with 2 call(s)>" == repr(cl)
def test_captures(self):
"""
All calls to all names are captured.
"""
cl = CapturingLogger()
cl.info("hi", val=42)
cl.trololo("yolo", foo={"bar": "baz"})
assert [
CapturedCall(method_name="info", args=("hi",), kwargs={"val": 42}),
CapturedCall(
method_name="trololo",
args=("yolo",),
kwargs={"foo": {"bar": "baz"}},
),
] == cl.calls |
1,769 | write val | from SCons.Defaults import SharedCheck, ProgScan
from SCons.Script.SConscript import SConsEnvironment
def TOOL_BUNDLE(env):
"""defines env.LinkBundle() for linking bundles on Darwin/OSX, and
env.MakeBundle() for installing a bundle into its dir.
A bundle has this structure: (filenames are case SENSITIVE)
sapphire.bundle/
Contents/
Info.plist (an XML key->value database; defined by BUNDLE_INFO_PLIST)
PkgInfo (trivially short; defined by value of BUNDLE_PKGINFO)
MacOS/
executable (the executable or shared lib, linked with Bundle())
Resources/
"""
if 'BUNDLE' in env['TOOLS']: return
if env['osx'] == 1:
if tools_verbose:
print(" running tool: TOOL_BUNDLE")
env.Append(TOOLS = 'BUNDLE')
# This is like the regular linker, but uses different vars.
# XXX: NOTE: this may be out of date now, scons 0.96.91 has some bundle linker stuff built in.
# Check the docs before using this.
LinkBundle = SCons.Builder.Builder(action=[SharedCheck, "$BUNDLECOM"],
emitter="$SHLIBEMITTER",
prefix = '$BUNDLEPREFIX',
suffix = '$BUNDLESUFFIX',
target_scanner = ProgScan,
src_suffix = '$BUNDLESUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['LinkBundle'] = LinkBundle
env['BUNDLEEMITTER'] = None
env['BUNDLEPREFIX'] = ''
env['BUNDLESUFFIX'] = ''
env['BUNDLEDIRSUFFIX'] = '.bundle'
env['FRAMEWORKS'] = ['-framework Carbon', '-framework System']
env['BUNDLE'] = '$SHLINK'
env['BUNDLEFLAGS'] = ' -bundle'
env['BUNDLECOM'] = '$BUNDLE $BUNDLEFLAGS -o ${TARGET} $SOURCES $_LIBDIRFLAGS $_LIBFLAGS $FRAMEWORKS'
# This requires some other tools:
TOOL_WRITE_VAL(env)
TOOL_SUBST(env)
# Common type codes are BNDL for generic bundle and APPL for application.
def MakeBundle(env, bundledir, app,
key, info_plist,
typecode='BNDL', creator='SapP',
icon_file='#macosx-install/sapphire-icon.icns',
subst_dict=None,
resources=[]):
"""Install a bundle into its dir, in the proper format"""
# Substitute construction vars:
for a in [bundledir, key, info_plist, icon_file, typecode, creator]:
a = env.subst(a)
if SCons.Util.is_List(app):
app = app[0]
if SCons.Util.is_String(app):
app = env.subst(app)
appbase = basename(app)
else:
appbase = basename(str(app))
if not ('.' in bundledir):
bundledir += '.$BUNDLEDIRSUFFIX'
bundledir = env.subst(bundledir) # substitute again
suffix=bundledir[string.rfind(bundledir,'.'):]
if (suffix=='.app' and typecode != 'APPL' or
suffix!='.app' and typecode == 'APPL'):
raise Error("MakeBundle: inconsistent dir suffix %s and type code %s: app bundles should end with .app and type code APPL."%(suffix, typecode))
if subst_dict is None:
subst_dict={'%SHORTVERSION%': '$VERSION_NUM',
'%LONGVERSION%': '$VERSION_NAME',
'%YEAR%': '$COMPILE_YEAR',
'%BUNDLE_EXECUTABLE%': appbase,
'%ICONFILE%': basename(icon_file),
'%CREATOR%': creator,
'%TYPE%': typecode,
'%BUNDLE_KEY%': key}
env.Install(bundledir+'/Contents/MacOS', app)
f=env.SubstInFile(bundledir+'/Contents/Info.plist', info_plist,
SUBST_DICT=subst_dict)
env.Depends(f,SCons.Node.Python.Value(key+creator+typecode+env['VERSION_NUM']+env['VERSION_NAME']))
env.WriteVal(target=bundledir+'/Contents/PkgInfo',
source=SCons.Node.Python.Value(typecode+creator))
resources.append(icon_file)
for r in resources:
if SCons.Util.is_List(r):
env.InstallAs(join(bundledir+'/Contents/Resources',
r[1]),
r[0])
else:
env.Install(bundledir+'/Contents/Resources', r)
return [ SCons.Node.FS.default_fs.Dir(bundledir) ]
# This is not a regular Builder; it's a wrapper function.
# So just make it available as a method of Environment.
SConsEnvironment.MakeBundle = MakeBundle
def TOOL_WRITE_VAL(env):
if tools_verbose:
print(" running tool: TOOL_WRITE_VAL")
env.Append(TOOLS = 'WRITE_VAL')
def METHOD_NAME(target, source, env):
"""Write the contents of the first source into the target.
source is usually a Value() node, but could be a file."""
f = open(str(target[0]), 'wb')
f.write(source[0].get_contents())
f.close()
env['BUILDERS']['WriteVal'] = Builder(action=METHOD_NAME) |
1,770 | infer | import argparse
import grpc
import inference_pb2
import inference_pb2_grpc
import management_pb2
import management_pb2_grpc
def get_inference_stub():
channel = grpc.insecure_channel("localhost:7070")
stub = inference_pb2_grpc.InferenceAPIsServiceStub(channel)
return stub
def get_management_stub():
channel = grpc.insecure_channel("localhost:7071")
stub = management_pb2_grpc.ManagementAPIsServiceStub(channel)
return stub
def METHOD_NAME(stub, model_name, model_input, metadata):
with open(model_input, "rb") as f:
data = f.read()
input_data = {"data": data}
response = stub.Predictions(
inference_pb2.PredictionsRequest(model_name=model_name, input=input_data),
metadata=metadata,
)
try:
prediction = response.prediction.decode("utf-8")
print(prediction)
except grpc.RpcError as e:
exit(1)
def infer_stream(stub, model_name, model_input, metadata):
with open(model_input, "rb") as f:
data = f.read()
input_data = {"data": data}
responses = stub.StreamPredictions(
inference_pb2.PredictionsRequest(model_name=model_name, input=input_data),
metadata=metadata,
)
try:
for resp in responses:
prediction = resp.prediction.decode("utf-8")
print(prediction)
except grpc.RpcError as e:
exit(1)
def register(stub, model_name, mar_set_str):
mar_set = set()
if mar_set_str:
mar_set = set(mar_set_str.split(","))
marfile = f"{model_name}.mar"
print(f"## Check {marfile} in mar_set :", mar_set)
if marfile not in mar_set:
marfile = "https://torchserve.s3.amazonaws.com/mar_files/{}.mar".format(
model_name
)
print(f"## Register marfile: {marfile}\n")
params = {
"url": marfile,
"initial_workers": 1,
"synchronous": True,
"model_name": model_name,
}
try:
response = stub.RegisterModel(management_pb2.RegisterModelRequest(**params))
print(f"Model {model_name} registered successfully")
except grpc.RpcError as e:
print(f"Failed to register model {model_name}.")
print(str(e.details()))
exit(1)
def unregister(stub, model_name):
try:
response = stub.UnregisterModel(
management_pb2.UnregisterModelRequest(model_name=model_name)
)
print(f"Model {model_name} unregistered successfully")
except grpc.RpcError as e:
print(f"Failed to unregister model {model_name}.")
print(str(e.details()))
exit(1)
if __name__ == "__main__":
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
"model_name",
type=str,
default=None,
help="Name of the model used.",
)
parser = argparse.ArgumentParser(
description="TorchServe gRPC client",
formatter_class=argparse.RawTextHelpFormatter,
)
subparsers = parser.add_subparsers(help="Action", dest="action")
infer_action_parser = subparsers.add_parser(
"infer", parents=[parent_parser], add_help=False
)
infer_stream_action_parser = subparsers.add_parser(
"infer_stream", parents=[parent_parser], add_help=False
)
register_action_parser = subparsers.add_parser(
"register", parents=[parent_parser], add_help=False
)
unregister_action_parser = subparsers.add_parser(
"unregister", parents=[parent_parser], add_help=False
)
infer_action_parser.add_argument(
"model_input", type=str, default=None, help="Input for model for inferencing."
)
infer_stream_action_parser.add_argument(
"model_input",
type=str,
default=None,
help="Input for model for stream inferencing.",
)
register_action_parser.add_argument(
"mar_set",
type=str,
default=None,
nargs="?",
help="Comma separated list of mar models to be loaded using [model_name=]model_location format.",
)
args = parser.parse_args()
metadata = (("protocol", "gRPC"), ("session_id", "12345"))
if args.action == "infer":
METHOD_NAME(get_inference_stub(), args.model_name, args.model_input, metadata)
elif args.action == "infer_stream":
infer_stream(get_inference_stub(), args.model_name, args.model_input, metadata)
elif args.action == "register":
register(get_management_stub(), args.model_name, args.mar_set)
elif args.action == "unregister":
unregister(get_management_stub(), args.model_name) |
1,771 | create unsigned transaction | from functools import (
partial,
)
from typing import (
Tuple,
)
from eth_keys.datatypes import (
PrivateKey,
)
from eth_typing import (
Address,
)
import rlp
from eth._utils.transactions import (
V_OFFSET,
IntrinsicGasSchedule,
calculate_intrinsic_gas,
create_transaction_signature,
extract_transaction_sender,
validate_transaction_signature,
)
from eth.abc import (
ReceiptAPI,
SignedTransactionAPI,
)
from eth.constants import (
CREATE_CONTRACT_ADDRESS,
GAS_TX,
GAS_TXDATANONZERO,
GAS_TXDATAZERO,
)
from eth.rlp.logs import (
Log,
)
from eth.rlp.receipts import (
Receipt,
)
from eth.rlp.transactions import (
BaseTransaction,
BaseUnsignedTransaction,
)
from eth.validation import (
validate_canonical_address,
validate_gte,
validate_is_bytes,
validate_is_integer,
validate_lt_secpk1n,
validate_lte,
validate_uint64,
validate_uint256,
)
FRONTIER_TX_GAS_SCHEDULE = IntrinsicGasSchedule(
gas_tx=GAS_TX,
gas_txcreate=0,
gas_txdatazero=GAS_TXDATAZERO,
gas_txdatanonzero=GAS_TXDATANONZERO,
)
frontier_get_intrinsic_gas = partial(calculate_intrinsic_gas, FRONTIER_TX_GAS_SCHEDULE)
class FrontierTransaction(BaseTransaction):
@property
def y_parity(self) -> int:
return self.v - V_OFFSET
@property
def v_min(self) -> int:
return V_OFFSET
@property
def v_max(self) -> int:
return V_OFFSET + 1
def validate(self) -> None:
validate_uint64(self.nonce, title="Transaction.nonce")
validate_uint256(self.gas_price, title="Transaction.gas_price")
validate_uint256(self.gas, title="Transaction.gas")
if self.to != CREATE_CONTRACT_ADDRESS:
validate_canonical_address(self.to, title="Transaction.to")
validate_uint256(self.value, title="Transaction.value")
validate_is_bytes(self.data, title="Transaction.data")
validate_uint256(self.v, title="Transaction.v")
validate_uint256(self.r, title="Transaction.r")
validate_uint256(self.s, title="Transaction.s")
validate_lt_secpk1n(self.r, title="Transaction.r")
validate_gte(self.r, minimum=1, title="Transaction.r")
validate_lt_secpk1n(self.s, title="Transaction.s")
validate_gte(self.s, minimum=1, title="Transaction.s")
validate_gte(self.v, minimum=self.v_min, title="Transaction.v")
validate_lte(self.v, maximum=self.v_max, title="Transaction.v")
super().validate()
def check_signature_validity(self) -> None:
validate_transaction_signature(self)
def get_sender(self) -> Address:
return extract_transaction_sender(self)
def get_intrinsic_gas(self) -> int:
return frontier_get_intrinsic_gas(self)
def get_message_for_signing(self) -> bytes:
return rlp.encode(
FrontierUnsignedTransaction(
nonce=self.nonce,
gas_price=self.gas_price,
gas=self.gas,
to=self.to,
value=self.value,
data=self.data,
)
)
@classmethod
def METHOD_NAME(
cls,
*,
nonce: int,
gas_price: int,
gas: int,
to: Address,
value: int,
data: bytes
) -> "FrontierUnsignedTransaction":
return FrontierUnsignedTransaction(nonce, gas_price, gas, to, value, data)
@classmethod
def new_transaction(
cls,
nonce: int,
gas_price: int,
gas: int,
to: Address,
value: int,
data: bytes,
v: int,
r: int,
s: int,
) -> SignedTransactionAPI:
return cls(nonce, gas_price, gas, to, value, data, v, r, s)
def make_receipt(
self,
status: bytes,
gas_used: int,
log_entries: Tuple[Tuple[bytes, Tuple[int, ...], bytes], ...],
) -> ReceiptAPI:
# 'status' is a misnomer in Frontier. Until Byzantium, it is the
# intermediate state root.
logs = [Log(address, topics, data) for address, topics, data in log_entries]
return Receipt(
state_root=status,
gas_used=gas_used,
logs=logs,
)
# Old transactions are treated as setting both max-fees as the gas price
@property
def max_priority_fee_per_gas(self) -> int:
return self.gas_price
@property
def max_fee_per_gas(self) -> int:
return self.gas_price
class FrontierUnsignedTransaction(BaseUnsignedTransaction):
def validate(self) -> None:
validate_uint64(self.nonce, title="Transaction.nonce")
validate_is_integer(self.gas_price, title="Transaction.gas_price")
validate_uint256(self.gas, title="Transaction.gas")
if self.to != CREATE_CONTRACT_ADDRESS:
validate_canonical_address(self.to, title="Transaction.to")
validate_uint256(self.value, title="Transaction.value")
validate_is_bytes(self.data, title="Transaction.data")
super().validate()
def as_signed_transaction(
self,
private_key: PrivateKey,
chain_id: int = None, # unused until SpuriousDragon
) -> FrontierTransaction:
v, r, s = create_transaction_signature(self, private_key)
return FrontierTransaction(
nonce=self.nonce,
gas_price=self.gas_price,
gas=self.gas,
to=self.to,
value=self.value,
data=self.data,
v=v,
r=r,
s=s,
)
def get_intrinsic_gas(self) -> int:
return frontier_get_intrinsic_gas(self)
# Old transactions are treated as setting both max-fees as the gas price
@property
def max_priority_fee_per_gas(self) -> int:
return self.gas_price
@property
def max_fee_per_gas(self) -> int:
return self.gas_price |
1,772 | load detectors | from __future__ import annotations
import logging
from typing import Any, Dict, List, Optional
import yaml
from checkov.common.bridgecrew.platform_integration import bc_integration
from checkov.common.util.file_utils import decompress_file_gzip_base64
def METHOD_NAME() -> list[dict[str, Any]]:
detectors: List[dict[str, Any]] = []
try:
customer_run_config_response = bc_integration.customer_run_config_response
policies_list: List[dict[str, Any]] = []
if customer_run_config_response:
policies_list = customer_run_config_response.get('secretsPolicies', [])
except Exception as e:
logging.error(f"Failed to get detectors from customer_run_config_response, error: {e}")
return []
if policies_list:
detectors = modify_secrets_policy_to_detectors(policies_list)
if detectors:
logging.info(f"Successfully loaded {len(detectors)} detectors from bc_integration")
return detectors
def modify_secrets_policy_to_detectors(policies_list: List[dict[str, Any]]) -> List[dict[str, Any]]:
secrets_list = transforms_policies_to_detectors_list(policies_list)
logging.debug(f"(modify_secrets_policy_to_detectors) len secrets_list = {len(secrets_list)}")
return secrets_list
def add_to_custom_detectors(custom_detectors: List[Dict[str, Any]], name: str, check_id: str, regex: str,
is_custom: str, is_multiline: bool = False, supported_files: Optional[List[str]] = None) -> None:
custom_detectors.append({
'Name': name,
'Check_ID': check_id,
'Regex': regex,
'isCustom': is_custom,
'isMultiline': is_multiline,
'supportedFiles': supported_files if supported_files else []
})
def add_detectors_from_condition_query(custom_detectors: List[Dict[str, Any]], condition_query: Dict[str, Any],
secret_policy: Dict[str, Any], check_id: str) -> bool:
parsed = False
cond_type = condition_query['cond_type']
if cond_type == 'secrets':
value = condition_query['value']
if type(value) is str:
value = [value]
for regex in value:
parsed = True
add_to_custom_detectors(custom_detectors, secret_policy['title'], check_id, regex,
secret_policy['isCustom'])
return parsed
def add_detectors_from_code(custom_detectors: List[Dict[str, Any]], code: str, secret_policy: Dict[str, Any],
check_id: str) -> bool:
parsed = False
code_dict = yaml.safe_load(code)
if 'definition' in code_dict:
if 'value' in code_dict['definition'] and 'is_runnable' not in code_dict['definition']:
parsed = True
if type(code_dict['definition']['value']) is str:
code_dict['definition']['value'] = [code_dict['definition']['value']]
for regex in code_dict['definition']['value']:
add_to_custom_detectors(
custom_detectors,
secret_policy['title'],
check_id,
regex,
secret_policy['isCustom'],
code_dict['definition'].get("multiline", False),
code_dict['definition'].get("supported_files", [])
)
return parsed
def transforms_policies_to_detectors_list(custom_secrets: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
custom_detectors: List[Dict[str, Any]] = []
condition_query = None
for secret_policy in custom_secrets:
parsed = False
check_id = secret_policy['checkovCheckId'] if secret_policy['checkovCheckId'] else \
secret_policy['incidentId']
code = secret_policy['code']
if 'conditionQuery' in secret_policy:
condition_query = secret_policy['conditionQuery']
if condition_query:
parsed = add_detectors_from_condition_query(custom_detectors, condition_query, secret_policy, check_id)
elif code:
parsed = add_detectors_from_code(custom_detectors, code, secret_policy, check_id)
if not parsed:
logging.info(f"policy : {secret_policy} could not be parsed")
return custom_detectors
def get_runnable_plugins(policies: List[Dict[str, Any]]) -> Dict[str, str]:
runnables: dict[str, str] = {}
for policy in policies:
code = policy['code']
if code:
try:
code_dict = yaml.safe_load(code)
if 'definition' in code_dict:
if 'is_runnable' in code_dict['definition'] and 'value' in code_dict['definition']:
encoded_payload = code_dict['definition']['value']
if isinstance(encoded_payload, list):
encoded_payload = encoded_payload[0]
decoded_payload = decompress_file_gzip_base64(encoded_payload)
name: str = policy['title']
runnables[name] = decoded_payload.decode('utf8')
except Exception as e:
logging.warning(f"Could not parse runnable policy {policy['title']} due to: {e}")
return runnables |
1,773 | connect | import asyncio
import time
from collections import defaultdict, deque
from robot.api import logger
from robot.api.deco import keyword, library
from gmqtt import Client as MQTTClient, Subscription
from gmqtt.mqtt.constants import MQTTv311
ROBOT_AUTO_KEYWORDS = False
@library
class Client:
"""A keyword library for Robot Framework. It provides keywords for
performing various operations on an MQTT broker. See http://mqtt.org/
for more details on MQTT specification.
This library uses gmqtt: Python async MQTT client. For more information
on underlying methods and documentation, see:
https://github.com/wialon/gmqtt
"""
ROBOT_LIBRARY_SCOPE = "GLOBAL"
ROBOT_LIBRARY_VERSION = "0.1.0"
def __init__(self, reconnect_retries=5, reconnect_delay=0):
"""
`reconnect_retries` number of reconnect attempt in case of lost
connections, default to 5
`reconnect_delay` delay between reconnect attemps in seconds,
default to 0
"""
self._client = None
self._reconnect_retries = reconnect_retries
self._reconnect_delay = reconnect_delay
self._messages = defaultdict(deque)
self._subcribed = asyncio.Event()
self._unsubcribed = asyncio.Event()
self._recved = asyncio.Event()
self._run = asyncio.get_event_loop().run_until_complete
@keyword
def METHOD_NAME(
self,
host,
port=1883,
client_id="",
username="",
password="",
keepalive=60,
clean_session=True,
):
"""Connect to an MQTT broker. This is a pre-requisite step for publish
and subscribe keywords.
`host` MQTT broker host
`port` broker port (default 1883)
`client_id` if not specified, a random id is generated
`username` user name, default to ""
`password` user password, default to ""
`keepalive` keepalive in seconds
`clean_session` specifies the clean session flag for the connection
Examples:
Connect to a broker with default port and client id
| Connect | 127.0.0.1 |
Connect to a broker by specifying the port and client id explicitly
| Connect | 127.0.0.1 | 1883 | test.client |
Connect to a broker with clean session flag set to false
| Connect | 127.0.0.1 | clean_session=${false} |
"""
client = MQTTClient(client_id, clean_session=clean_session)
client.on_connect = self._on_connect
client.on_message = self._on_message
client.on_disconnect = self._on_disconnect
client.on_subscribe = self._on_subscribe
client.on_unsubscribe = self._on_unsubscribe
client.set_config(
{
"reconnect_retries": self._reconnect_retries,
"reconnect_delay": self._reconnect_delay,
}
)
self._client = client
self._run(self._client.METHOD_NAME(host, port, version=MQTTv311))
@keyword
def disconnect(self):
"""Disconnect from MQTT Broker.
Example:
| Disconnect |
"""
self._run(self._client.disconnect())
self._messages.clear()
@keyword
def unsubscribe_all_and_clear_messages(self):
"""Unsubscribe all subscriptions and clear all messages in queue."""
for topic in self._messages:
self.unsubscribe(topic)
self._messages.clear()
@keyword
def subscribe(self, topic, qos=0, timeout=1):
"""Subscribe to a topic and return a message payload received
within the specified time.
`topic` topic to subscribe to
`qos` quality of service for the subscription, default to QoS 0
`timeout` duration of subscription, default to 1 second
Examples:
Subscribe and get a list of all messages received within 5 seconds
| ${messages}= | Subscribe | test/test | qos=1 | timeout=5 |
"""
timeout = 1 if timeout < 1 else timeout
self._client.subscribe(Subscription(topic, qos))
if self._wait(self._subcribed, timeout=timeout):
raise Exception(
"MQTT client subscribe timeout topic=%s qos=%s" % (topic, qos))
logger.info("MQTT client subscribed to topic=%s qos=%s" % (topic, qos))
@keyword
def unsubscribe(self, topic, timeout=1):
"""Unsubscribe the client from the specified topic.
`topic` topic to unsubscribe from
`timeout` duration of unsubscription, default to 1 second
Example:
| Unsubscribe | test/mqtt_test |
"""
timeout = 1 if timeout < 1 else timeout
self._client.unsubscribe(topic)
if self._wait(self._unsubcribed, timeout=timeout):
raise Exception("MQTT client unsubscribe timeout topic=%s" % topic)
logger.info("MQTT client unsubscribed topic=%s" % topic)
@keyword
def publish(self, topic, payload, qos=0, retain=False):
"""Publish a message to a topic with specified qos and retained flag.
It is required that a connection has been established using `Connect`
keyword before using this keyword.
`topic` topic to which the message will be published
`paylod` message payload to publish
`qos` qos of the message, default to QoS 0
`retain` retained flag
Examples:
| Publish | test/test | test message | 1 | ${false} |
"""
self._client.publish(topic, payload, qos=qos, retain=retain)
@keyword
def listen(self, topic, timeout=1):
"""Listen to a topic and return a message payload received within the
specified time. Requires an Subscribe to have been called previously.
`topic` topic to listen to
`timeout` duration to listen, default to 1 second
Examples:
Listen and get a message received within 5 seconds
| ${messages}= | Listen | test/test | timeout=5 |
"""
timeout = 1 if timeout < 1 else timeout
start = time.time()
while True:
if topic in self._messages and self._messages[topic]:
msg = self._messages[topic].popleft()
return msg
if (
self._wait(self._recved, timeout=timeout)
or (time.time() - start) >= timeout
):
break
def _wait(self, event, timeout):
"""Waits for the given event within timeout.
Returns true on timeout, false otherwise."""
try:
self._run(asyncio.wait_for(event.wait(), timeout))
event.clear()
except asyncio.TimeoutError:
return True
return False
def _on_connect(self, client, flags, rc, properties):
logger.info("MQTT client connected")
def _on_message(self, client, topic, payload, qos, properties):
logger.info("MQTT client received: topic=%s qos=%s payload=%s" %
(topic, qos, payload))
self._messages[topic].append(payload.decode())
self._recved.set()
return 0
def _on_disconnect(self, client, packet, exc=None):
logger.info("MQTT client disconnected")
def _on_subscribe(self, client, mid, qos, properties):
self._subcribed.set()
def _on_unsubscribe(self, client, mid, granted_qos):
self._unsubcribed.set()
if __name__ == "__main__":
# import logging
# logging.basicConfig(level=logging.DEBUG)
host = "127.0.0.1"
topic = "test/test_mqtt"
client = Client()
client.METHOD_NAME(host)
client.subscribe(topic)
for i in range(3):
client.publish(topic, "hello world %s" % i)
msg = client.listen(topic)
print("MQTT client receive: topic=%s msg=%s" % (topic, msg))
client.unsubscribe(topic)
client.disconnect() |
1,774 | set safety text | #!/usr/bin/env python
# -*- coding: utf-8 -*-
## Copyright (C) 2021 University of Oxford
##
## This file is part of Cockpit.
##
## Cockpit is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Cockpit is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Cockpit. If not, see <http://www.gnu.org/licenses/>.
## Copyright 2013, The Regents of University of California
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions
## are met:
##
## 1. Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
##
## 2. Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
##
## 3. Neither the name of the copyright holder nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
## FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
## COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
## CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
## LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
## ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
## POSSIBILITY OF SUCH DAMAGE.
import cockpit.gui.guiUtils
import cockpit.interfaces.stageMover
import wx
## @package safetyMinDialog.py
# This package contains the SafetyMin_Dialog class and associated constants and
# functions.
## Altitude for slides.
SLIDE_SAFETY = 7300
## Altitude for dishes.
DISH_SAFETY = 5725
## This class provides a simple wrapper around the interfaces.stageMover's
# safety functionality.
# Note that unlike most
# dialogs, this one does not save the user's settings; instead, it always
# shows the current safety min as the default setting. This is to keep
# users from blindly setting the safety min to what they always use;
# we want them to think about what they're doing.
class SafetyMinDialog(wx.Dialog):
def __init__(
self, parent, size = wx.DefaultSize, pos = wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER | wx.TAB_TRAVERSAL
):
super().__init__(parent, -1, "Set Z motion minimum", pos, size, style)
self.mainSizer = wx.BoxSizer(wx.VERTICAL)
self.mainSizer.Add(wx.StaticText(self, -1,
"Set the minimum altitude the stage is allowed\n" +
"to move to."),
0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 10)
self.minStageZ = cockpit.gui.guiUtils.addLabeledInput(
parent = self, sizer = self.mainSizer,
label = "Stage Z minimum (µm):",
defaultValue = str(cockpit.interfaces.stageMover.getSoftLimits()[2][0]),
size = (70, -1), minSize = (150, -1),
shouldRightAlignInput = True, border = 3,
controlType = wx.TextCtrl)
rowSizer = wx.BoxSizer(wx.HORIZONTAL)
slideSafetyButton = wx.Button(self, -1, "Slide")
slideSafetyButton.SetToolTip(wx.ToolTip("Set the safety to a good value for slide experiments"))
slideSafetyButton.Bind(wx.EVT_BUTTON, lambda event: self.METHOD_NAME(SLIDE_SAFETY))
rowSizer.Add(slideSafetyButton, 0, wx.ALL, 5 )
dishSafetyButton = wx.Button(self, -1, "Dish")
dishSafetyButton.SetToolTip(wx.ToolTip("Set the safety to a good value for dish experiments"))
dishSafetyButton.Bind(wx.EVT_BUTTON, lambda event: self.METHOD_NAME(DISH_SAFETY))
rowSizer.Add(dishSafetyButton, 0, wx.ALL, 5)
self.mainSizer.Add(rowSizer, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 3)
buttonsBox = wx.BoxSizer(wx.HORIZONTAL)
cancelButton = wx.Button(self, label="Cancel")
cancelButton.SetToolTip(wx.ToolTip("Close this window"))
buttonsBox.Add(cancelButton, 0, wx.ALL, 5)
startButton = wx.Button(self, label="Apply")
startButton.SetToolTip(wx.ToolTip("Apply the chosen safety min"))
buttonsBox.Add(startButton, 0, wx.ALL, 5)
self.mainSizer.Add(buttonsBox, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 3)
self.SetSizer(self.mainSizer)
self.SetAutoLayout(True)
self.mainSizer.Fit(self)
startButton.Bind(wx.EVT_BUTTON, self.OnStart)
## Set the text for the stage safety min to a default value.
def METHOD_NAME(self, value):
self.minStageZ.SetValue('%.1f' % value)
## Save the user's selected Z min to the user config, and then set the
# new min.
def OnStart(self, event):
self.Hide()
cockpit.interfaces.stageMover.setSoftMin(2, float(self.minStageZ.GetValue()))
## Global dialog singleton.
dialog = None
## Generate the dialog for display. If it already exists, just bring it
# forwards.
def showDialog(parent):
global dialog
if dialog:
try:
dialog.Show()
dialog.SetFocus()
return
except:
# dialog got destroyed, so just remake it.
pass
dialog = SafetyMinDialog(parent)
dialog.Show()
|
1,775 | convert redd | import pandas as pd
import numpy as np
from copy import deepcopy
from os.path import join, isdir, isfile
from os import listdir
import re
from sys import stdout
from nilmtk.utils import get_datastore
from nilmtk.datastore import Key
from nilmtk.timeframe import TimeFrame
from nilmtk.measurement import LEVEL_NAMES
from nilmtk.utils import get_module_directory, check_directory_exists
from nilm_metadata import convert_yaml_to_hdf5, save_yaml_to_datastore
"""
TODO:
* The bottleneck appears to be CPU. So could be sped up by using
multiprocessing module to use multiple CPU cores to load REDD channels in
parallel.
"""
def METHOD_NAME(redd_path, output_filename, format='HDF'):
"""
Parameters
----------
redd_path : str
The root path of the REDD low_freq dataset.
output_filename : str
The destination filename (including path and suffix).
format : str
format of output. Either 'HDF' or 'CSV'. Defaults to 'HDF'
"""
def _redd_measurement_mapping_func(house_id, chan_id):
ac_type = 'apparent' if chan_id <= 2 else 'active'
return [('power', ac_type)]
# Open DataStore
store = get_datastore(output_filename, format, mode='w')
# Convert raw data to DataStore
_convert(redd_path, store, _redd_measurement_mapping_func, 'US/Eastern')
s=join(get_module_directory(),
'dataset_converters',
'redd',
'metadata')
# Add metadata
save_yaml_to_datastore(join(get_module_directory(),
'dataset_converters',
'redd',
'metadata'),
store)
store.close()
print("Done converting REDD to HDF5!")
def _convert(input_path, store, measurement_mapping_func, tz, sort_index=True, drop_duplicates=False):
"""
Parameters
----------
input_path : str
The root path of the REDD low_freq dataset.
store : DataStore
The NILMTK DataStore object.
measurement_mapping_func : function
Must take these parameters:
- house_id
- chan_id
Function should return a list of tuples e.g. [('power', 'active')]
tz : str
Timezone e.g. 'US/Eastern'
sort_index : bool
Defaults to True
drop_duplicates : bool
Remove entries with duplicated timestamp (keeps the first value)
Defaults to False for backwards compatibility.
"""
check_directory_exists(input_path)
# Iterate though all houses and channels
houses = _find_all_houses(input_path)
for house_id in houses:
print("Loading house", house_id, end="... ")
stdout.flush()
chans = _find_all_chans(input_path, house_id)
for chan_id in chans:
print(chan_id, end=" ")
stdout.flush()
key = Key(building=house_id, meter=chan_id)
measurements = measurement_mapping_func(house_id, chan_id)
csv_filename = _get_csv_filename(input_path, key)
df = _load_csv(csv_filename, measurements, tz,
sort_index=sort_index,
drop_duplicates=drop_duplicates
)
store.put(str(key), df)
print()
def _find_all_houses(input_path):
"""
Returns
-------
list of integers (house instances)
"""
dir_names = [p for p in listdir(input_path) if isdir(join(input_path, p))]
return _matching_ints(dir_names, r'^house_(\d)$')
def _find_all_chans(input_path, house_id):
"""
Returns
-------
list of integers (channels)
"""
house_path = join(input_path, 'house_{:d}'.format(house_id))
filenames = [p for p in listdir(house_path) if isfile(join(house_path, p))]
return _matching_ints(filenames, r'^channel_(\d\d?).dat$')
def _matching_ints(strings, regex):
"""Uses regular expression to select and then extract an integer from
strings.
Parameters
----------
strings : list of strings
regex : string
Regular Expression. Including one group. This group is used to
extract the integer from each string.
Returns
-------
list of ints
"""
ints = []
p = re.compile(regex)
for string in strings:
m = p.match(string)
if m:
integer = int(m.group(1))
ints.append(integer)
ints.sort()
return ints
def _get_csv_filename(input_path, key_obj):
"""
Parameters
----------
input_path : (str) the root path of the REDD low_freq dataset
key_obj : (nilmtk.Key) the house and channel to load
Returns
-------
filename : str
"""
assert isinstance(input_path, str)
assert isinstance(key_obj, Key)
# Get path
house_path = 'house_{:d}'.format(key_obj.building)
path = join(input_path, house_path)
assert isdir(path)
# Get filename
filename = 'channel_{:d}.dat'.format(key_obj.meter)
filename = join(path, filename)
assert isfile(filename)
return filename
def _load_csv(filename, columns, tz, drop_duplicates=False, sort_index=False):
"""
Parameters
----------
filename : str
columns : list of tuples (for hierarchical column index)
tz : str
e.g. 'US/Eastern'
sort_index : bool
Defaults to True
drop_duplicates : bool
Remove entries with duplicated timestamp (keeps the first value)
Defaults to False for backwards compatibility.
Returns
-------
pandas.DataFrame
"""
# Load data
df = pd.read_csv(filename, sep=' ', names=columns,
dtype={m:np.float32 for m in columns})
# Modify the column labels to reflect the power measurements recorded.
df.columns.set_names(LEVEL_NAMES, inplace=True)
# Convert the integer index column to timezone-aware datetime
df.index = pd.to_datetime(df.index.values, unit='s', utc=True)
df = df.tz_convert(tz)
if sort_index:
df = df.sort_index() # raw REDD data isn't always sorted
if drop_duplicates:
dups_in_index = df.index.duplicated(keep='first')
if dups_in_index.any():
df = df[~dups_in_index]
return df |
1,776 | test h2 o molden | import pytest
import psi4
import os
from shutil import copytree
from psi4.driver.p4util.testing import compare_strings, compare_values, compare_integers
from psi4.driver.p4util.exceptions import ValidationError
pytestmark = [pytest.mark.psi, pytest.mark.api]
# Checks for
# Molden files are the same as reference
@pytest.fixture
def datadir(tmpdir, request):
"""
from: https://stackoverflow.com/a/29631801
Fixture responsible for searching a folder with the same name of test
module and, if available, moving all contents to a temporary directory so
tests can use them freely.
"""
filename = request.module.__file__
test_dir, _ = os.path.splitext(filename)
if os.path.isdir(test_dir):
copytree(test_dir, str(tmpdir), dirs_exist_ok=True)
return tmpdir
@pytest.mark.parametrize('inp_h2o', [
pytest.param({'name': 'h2o_normal', 'energy': 'scf', 'do_virtual':True, 'use_natural': False, 'options': {'e_convergence': 10}}, id='h2o_normal'),
pytest.param({'name': 'dovirt_false', 'energy': 'scf', 'do_virtual':False, 'use_natural': False, 'options': {'e_convergence': 10}}, id='dovirt_false'),
pytest.param({'name': 'orbso_detci', 'energy': 'cisd', 'do_virtual':True, 'use_natural': True, 'options': {'e_convergence': 10, 'qc_module':'detci', 'opdm':True}}, id='orbso_detci')
])
def METHOD_NAME(inp_h2o, datadir):
mol = psi4.geometry("""
0 1
O
H 1 0.951342
H 1 0.951342 2 112.505645
""")
psi4.set_options({
'basis': 'dz',
'scf_type': 'pk',
})
psi4.set_options(inp_h2o['options'])
molden_file = f"{inp_h2o['name']}.molden"
ref = datadir.join(f"{inp_h2o['name']}.ref")
e, wfn = psi4.energy(inp_h2o['energy'], return_wfn=True, molecule=mol)
wfn.write_molden(molden_file, do_virtual=inp_h2o['do_virtual'], use_natural=inp_h2o['use_natural'])
assert psi4.compare_moldenfiles(ref, molden_file)
@pytest.mark.parametrize('inp_h2o_density', [
pytest.param({'name': 'orbso_density', 'energy': 'ccsd', 'do_virtual':True, 'use_natural': True}, id='orbso_density'),
])
def test_H2O_density_molden(inp_h2o_density, datadir):
mol = psi4.geometry("""
0 1
O
H 1 0.951342
H 1 0.951342 2 112.505645
""")
psi4.set_options({
'basis': 'dz',
'scf_type': 'pk',
'e_convergence': 10
})
molden_file = f"{inp_h2o_density['name']}.molden"
ref = datadir.join(f"{inp_h2o_density['name']}.ref")
e, wfn = psi4.properties(inp_h2o_density['energy'], return_wfn=True, molecule=mol)
wfn.write_molden(molden_file, do_virtual=inp_h2o_density['do_virtual'], use_natural=inp_h2o_density['use_natural'])
assert psi4.compare_moldenfiles(ref, molden_file)
@pytest.mark.parametrize('inp_oh', [
pytest.param({'name': 'ref_uhf', 'ref':'uhf'}, id='ref_uhf'),
pytest.param({'name': 'ref_rohf', 'ref':'rohf'}, id='ref_rohf')
])
def test_OH_molden(inp_oh, datadir):
mol = psi4.geometry("""
0 2
O
H 1 0.970369
symmetry c1
""")
psi4.set_options({
'basis': 'dz',
'scf_type': 'pk',
'e_convergence': 11,
'reference':inp_oh['ref']
})
molden_file = f"{inp_oh['name']}.molden"
ref = datadir.join(f"{inp_oh['name']}.ref")
e, wfn = psi4.energy('scf', return_wfn=True, molecule=mol)
wfn.write_molden(molden_file, do_virtual=True, use_natural=False)
assert psi4.compare_moldenfiles(ref, molden_file)
@pytest.mark.parametrize('inp_h2s', [
pytest.param({'name': 'dorbs_cartesian', 'options': {'basis': '6-31g'}}, id='dorbs_cartesian'),
pytest.param({'name': 'dorbs_spherical', 'options': {'basis': 'dz'}}, id='dorbs_spherical'),
])
def test_H2S_molden(inp_h2s, datadir):
mol = psi4.geometry("""
0 1
S
H 1 1.350490
H 1 1.350490 2 96.061977
""")
psi4.set_options({
'scf_type': 'pk',
'e_convergence': 10
})
psi4.set_options(inp_h2s['options'])
molden_file = f"{inp_h2s['name']}.molden"
ref = datadir.join(f"{inp_h2s['name']}.ref")
e, wfn = psi4.energy('scf', return_wfn=True, molecule=mol)
wfn.write_molden(molden_file, do_virtual=True, use_natural=False)
assert psi4.compare_moldenfiles(ref, molden_file)
@pytest.mark.parametrize('inp_clfhcoh', [
pytest.param({'name': 'sym_trivial'}, id='sym_trivial'),
])
def test_ClFHCOH_molden(inp_clfhcoh, datadir):
mol = psi4.geometry("""
0 1
C
F 1 1.395520
Cl 1 1.853978 2 106.297922
H 1 1.066516 2 109.322008 3 -116.352650
O 1 1.363622 2 110.838591 3 122.400775
H 5 0.955096 1 116.200547 3 59.282816
""")
psi4.set_options({
'basis': 'dz',
'scf_type': 'pk',
'e_convergence': 11
})
molden_file = f"{inp_clfhcoh['name']}.molden"
ref = datadir.join(f"{inp_clfhcoh['name']}.ref")
e, wfn = psi4.energy('scf', return_wfn=True, molecule=mol)
wfn.write_molden(molden_file, do_virtual=True, use_natural=False)
assert psi4.compare_moldenfiles(ref, molden_file) |
1,777 | whitespace clean | # -------------------------------------------------------------------------
# MIT License
#
# Copyright (c) 2021 OpenAI
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Modified by Jiarui Xu
# -------------------------------------------------------------------------
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
import torch
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
@lru_cache()
def bytes_to_unicode():
"""Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent
coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables
between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord('!'), ord('~') + 1)) + list(range(ord('¡'), ord('¬') + 1)) + list(range(ord('®'), ord('ÿ') + 1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def METHOD_NAME(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class Tokenize:
def __init__(self, tokenizer, max_seq_len=77, truncate=True):
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
self.truncate = truncate
def __call__(self, texts):
expanded_dim = False
if isinstance(texts, str):
texts = [texts]
expanded_dim = True
sot_token = self.tokenizer.encoder['<|startoftext|>']
eot_token = self.tokenizer.encoder['<|endoftext|>']
all_tokens = [[sot_token] + self.tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), self.max_seq_len, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > self.max_seq_len:
if self.truncate:
tokens = tokens[:self.max_seq_len]
tokens[-1] = eot_token
else:
raise RuntimeError(f'Input {texts[i]} is too long for context length {self.max_seq_len}')
result[i, :len(tokens)] = torch.tensor(tokens)
if expanded_dim:
return result[0]
return result
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode('utf-8').split('\n')
merges = merges[1:49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + '</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + '</w>', )
pairs = get_pairs(word)
if not pairs:
return token + '</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except: # noqa: E722
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = METHOD_NAME(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ')
return tex |
1,778 | assert gb subject | import pytest
from _ssl import SSLError
from settings import TEST_DATA
from suite.utils.resources_utils import (
create_ingress_from_yaml,
create_secret_from_yaml,
delete_items_from_yaml,
delete_secret,
ensure_connection_to_public_endpoint,
is_secret_present,
replace_secret,
wait_before_test,
)
from suite.utils.ssl_utils import get_server_certificate_subject
from suite.utils.yaml_utils import get_first_ingress_host_from_yaml, get_name_from_yaml
def assert_unrecognized_name_error(endpoint, host):
try:
get_server_certificate_subject(endpoint.public_ip, host, endpoint.port_ssl)
pytest.fail("We expected an SSLError here, but didn't get it or got another error. Exiting...")
except SSLError as e:
assert "SSL" in e.library
assert "TLSV1_UNRECOGNIZED_NAME" in e.reason
def assert_us_subject(endpoint, host):
subject_dict = get_server_certificate_subject(endpoint.public_ip, host, endpoint.port_ssl)
assert subject_dict[b"C"] == b"US"
assert subject_dict[b"ST"] == b"CA"
assert subject_dict[b"O"] == b"Internet Widgits Pty Ltd"
assert subject_dict[b"CN"] == b"cafe.example.com"
def METHOD_NAME(endpoint, host):
subject_dict = get_server_certificate_subject(endpoint.public_ip, host, endpoint.port_ssl)
assert subject_dict[b"C"] == b"GB"
assert subject_dict[b"ST"] == b"Cambridgeshire"
assert subject_dict[b"O"] == b"nginx"
assert subject_dict[b"CN"] == b"cafe.example.com"
class TLSSetup:
def __init__(self, ingress_host, secret_name, secret_path, new_secret_path, invalid_secret_path):
self.ingress_host = ingress_host
self.secret_name = secret_name
self.secret_path = secret_path
self.new_secret_path = new_secret_path
self.invalid_secret_path = invalid_secret_path
@pytest.fixture(scope="class")
def tls_setup(
request,
kube_apis,
ingress_controller_prerequisites,
ingress_controller_endpoint,
ingress_controller,
test_namespace,
) -> TLSSetup:
print("------------------------- Deploy TLS setup -----------------------------------")
test_data_path = f"{TEST_DATA}/tls"
ingress_path = f"{test_data_path}/{request.param}/ingress.yaml"
create_ingress_from_yaml(kube_apis.networking_v1, test_namespace, ingress_path)
wait_before_test(1)
ingress_host = get_first_ingress_host_from_yaml(ingress_path)
secret_name = get_name_from_yaml(f"{test_data_path}/tls-secret.yaml")
ensure_connection_to_public_endpoint(
ingress_controller_endpoint.public_ip, ingress_controller_endpoint.port, ingress_controller_endpoint.port_ssl
)
def fin():
if request.config.getoption("--skip-fixture-teardown") == "no":
print("Clean up TLS setup")
delete_items_from_yaml(kube_apis, ingress_path, test_namespace)
if is_secret_present(kube_apis.v1, secret_name, test_namespace):
delete_secret(kube_apis.v1, secret_name, test_namespace)
request.addfinalizer(fin)
return TLSSetup(
ingress_host,
secret_name,
f"{test_data_path}/tls-secret.yaml",
f"{test_data_path}/new-tls-secret.yaml",
f"{test_data_path}/invalid-tls-secret.yaml",
)
@pytest.mark.ingresses
@pytest.mark.parametrize("tls_setup", ["standard", "mergeable"], indirect=True)
class TestIngressTLS:
def test_tls_termination(self, kube_apis, ingress_controller_endpoint, test_namespace, tls_setup):
print("Step 1: no secret")
assert_unrecognized_name_error(ingress_controller_endpoint, tls_setup.ingress_host)
print("Step 2: deploy secret and check")
create_secret_from_yaml(kube_apis.v1, test_namespace, tls_setup.secret_path)
wait_before_test(1)
assert_us_subject(ingress_controller_endpoint, tls_setup.ingress_host)
print("Step 3: remove secret and check")
delete_secret(kube_apis.v1, tls_setup.secret_name, test_namespace)
wait_before_test(1)
assert_unrecognized_name_error(ingress_controller_endpoint, tls_setup.ingress_host)
print("Step 4: restore secret and check")
create_secret_from_yaml(kube_apis.v1, test_namespace, tls_setup.secret_path)
wait_before_test(1)
assert_us_subject(ingress_controller_endpoint, tls_setup.ingress_host)
print("Step 5: deploy invalid secret and check")
delete_secret(kube_apis.v1, tls_setup.secret_name, test_namespace)
create_secret_from_yaml(kube_apis.v1, test_namespace, tls_setup.invalid_secret_path)
wait_before_test(1)
assert_unrecognized_name_error(ingress_controller_endpoint, tls_setup.ingress_host)
print("Step 6: restore secret and check")
delete_secret(kube_apis.v1, tls_setup.secret_name, test_namespace)
create_secret_from_yaml(kube_apis.v1, test_namespace, tls_setup.secret_path)
wait_before_test(1)
assert_us_subject(ingress_controller_endpoint, tls_setup.ingress_host)
print("Step 7: update secret and check")
replace_secret(kube_apis.v1, tls_setup.secret_name, test_namespace, tls_setup.new_secret_path)
wait_before_test(1)
METHOD_NAME(ingress_controller_endpoint, tls_setup.ingress_host) |
1,779 | get treatment for split | """Split evaluator module."""
import logging
from splitio.models.grammar.condition import ConditionType
from splitio.models.impressions import Label
CONTROL = 'control'
_LOGGER = logging.getLogger(__name__)
class Evaluator(object): # pylint: disable=too-few-public-methods
"""Split Evaluator class."""
def __init__(self, feature_flag_storage, segment_storage, splitter):
"""
Construct a Evaluator instance.
:param feature_flag_storage: feature_flag storage.
:type feature_flag_storage: splitio.storage.SplitStorage
:param segment_storage: Segment storage.
:type segment_storage: splitio.storage.SegmentStorage
"""
self._feature_flag_storage = feature_flag_storage
self._segment_storage = segment_storage
self._splitter = splitter
def _evaluate_treatment(self, feature_flag_name, matching_key, bucketing_key, attributes, feature_flag):
"""
Evaluate the user submitted data against a feature and return the resulting treatment.
:param feature_flag_name: The feature flag for which to get the treatment
:type feature: str
:param matching_key: The matching_key for which to get the treatment
:type matching_key: str
:param bucketing_key: The bucketing_key for which to get the treatment
:type bucketing_key: str
:param attributes: An optional dictionary of attributes
:type attributes: dict
:param feature_flag: Split object
:type attributes: splitio.models.splits.Split|None
:return: The treatment for the key and feature flag
:rtype: object
"""
label = ''
_treatment = CONTROL
_change_number = -1
if feature_flag is None:
_LOGGER.warning('Unknown or invalid feature: %s', feature_flag_name)
label = Label.SPLIT_NOT_FOUND
else:
_change_number = feature_flag.change_number
if feature_flag.killed:
label = Label.KILLED
_treatment = feature_flag.default_treatment
else:
treatment, label = self.METHOD_NAME(
feature_flag,
matching_key,
bucketing_key,
attributes
)
if treatment is None:
label = Label.NO_CONDITION_MATCHED
_treatment = feature_flag.default_treatment
else:
_treatment = treatment
return {
'treatment': _treatment,
'configurations': feature_flag.get_configurations_for(_treatment) if feature_flag else None,
'impression': {
'label': label,
'change_number': _change_number
}
}
def evaluate_feature(self, feature_flag_name, matching_key, bucketing_key, attributes=None):
"""
Evaluate the user submitted data against a feature and return the resulting treatment.
:param feature_flag_name: The feature flag for which to get the treatment
:type feature: str
:param matching_key: The matching_key for which to get the treatment
:type matching_key: str
:param bucketing_key: The bucketing_key for which to get the treatment
:type bucketing_key: str
:param attributes: An optional dictionary of attributes
:type attributes: dict
:return: The treatment for the key and split
:rtype: object
"""
# Fetching Split definition
feature_flag = self._feature_flag_storage.get(feature_flag_name)
# Calling evaluation
evaluation = self._evaluate_treatment(feature_flag_name, matching_key,
bucketing_key, attributes, feature_flag)
return evaluation
def evaluate_features(self, feature_flag_names, matching_key, bucketing_key, attributes=None):
"""
Evaluate the user submitted data against multiple features and return the resulting
treatment.
:param feature_flag_names: The feature flags for which to get the treatments
:type feature: list(str)
:param matching_key: The matching_key for which to get the treatment
:type matching_key: str
:param bucketing_key: The bucketing_key for which to get the treatment
:type bucketing_key: str
:param attributes: An optional dictionary of attributes
:type attributes: dict
:return: The treatments for the key and feature flags
:rtype: object
"""
return {
feature_flag_name: self._evaluate_treatment(feature_flag_name, matching_key,
bucketing_key, attributes, feature_flag)
for (feature_flag_name, feature_flag) in self._feature_flag_storage.fetch_many(feature_flag_names).items()
}
def METHOD_NAME(self, feature_flag, matching_key, bucketing_key, attributes=None):
"""
Evaluate the feature considering the conditions.
If there is a match, it will return the condition and the label.
Otherwise, it will return (None, None)
:param feature_flag: The feature flag for which to get the treatment
:type feature_flag: Split
:param matching_key: The key for which to get the treatment
:type key: str
:param bucketing_key: The key for which to get the treatment
:type key: str
:param attributes: An optional dictionary of attributes
:type attributes: dict
:return: The resulting treatment and label
:rtype: tuple
"""
if bucketing_key is None:
bucketing_key = matching_key
roll_out = False
context = {
'segment_storage': self._segment_storage,
'evaluator': self,
'bucketing_key': bucketing_key
}
for condition in feature_flag.conditions:
if (not roll_out and
condition.condition_type == ConditionType.ROLLOUT):
if feature_flag.traffic_allocation < 100:
bucket = self._splitter.get_bucket(
bucketing_key,
feature_flag.traffic_allocation_seed,
feature_flag.algo
)
if bucket > feature_flag.traffic_allocation:
return feature_flag.default_treatment, Label.NOT_IN_SPLIT
roll_out = True
condition_matches = condition.matches(
matching_key,
attributes=attributes,
context=context
)
if condition_matches:
return self._splitter.get_treatment(
bucketing_key,
feature_flag.seed,
condition.partitions,
feature_flag.algo
), condition.label
# No condition matches
return None, None |
1,780 | validate model | # Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import copy
import numpy as np
import pytest
import torch
from composer.algorithms import EMA
from composer.algorithms.ema.ema import EMAParameters, compute_ema
from composer.core import Event, Time, Timestamp, TimeUnit
from tests.common import SimpleConvModel, SimpleTransformerClassifier
from tests.common.models import configure_tiny_bert_hf_model
def validate_ema(model, original_model, ema_model, smoothing):
model_params, model_buffers = dict(model.named_parameters()), dict(model.named_buffers())
original_params, original_buffers = dict(original_model.named_parameters()), dict(original_model.named_buffers())
ema_params, ema_buffers = dict(ema_model.named_parameters()), dict(ema_model.named_buffers())
for name, param in model_params.items():
new_param = (original_params[name] * smoothing + (1. - smoothing) * param)
torch.testing.assert_close(ema_params[name].data, new_param)
for name, buffer in model_buffers.items():
new_buffer = (original_buffers[name] * smoothing + (1. - smoothing) * buffer).type(ema_buffers[name].data.dtype)
torch.testing.assert_close(ema_buffers[name].data, new_buffer)
def METHOD_NAME(model1, model2):
model1_params, model1_buffers = dict(model1.named_parameters()), dict(model1.named_buffers())
model2_params, model2_buffers = dict(model2.named_parameters()), dict(model2.named_buffers())
for name in model1_params.keys():
torch.testing.assert_close(model1_params[name].data, model2_params[name].data)
for name in model1_buffers.keys():
torch.testing.assert_close(model1_buffers[name].data, model2_buffers[name].data)
@pytest.mark.parametrize('smoothing', [0, 0.5, 0.99, 1])
@pytest.mark.parametrize('model_cls', [(SimpleConvModel), (SimpleTransformerClassifier),
(configure_tiny_bert_hf_model)])
def test_ema(smoothing, model_cls):
model = model_cls()
ema_model = model_cls()
original_model = copy.deepcopy(ema_model)
compute_ema(model=model, ema_model=ema_model, smoothing=smoothing)
validate_ema(model, original_model, ema_model, smoothing)
# params = [(half_life, update_interval)]
@pytest.mark.parametrize('params', [{
'half_life': '10ba',
'update_interval': '1ba'
}, {
'half_life': '1ep',
'update_interval': '1ep'
}, {
'smoothing': 0.999,
'update_interval': '1ba'
}])
@pytest.mark.parametrize('model_cls', [(SimpleConvModel), (SimpleTransformerClassifier),
(configure_tiny_bert_hf_model)])
def test_ema_algorithm(params, model_cls, minimal_state, empty_logger):
# Initialize input tensor
input = torch.rand((32, 5))
if 'smoothing' in params:
smoothing, update_interval = params['smoothing'], params['update_interval']
algorithm = EMA(half_life=None, smoothing=smoothing, update_interval=update_interval)
else:
half_life, update_interval = params['half_life'], params['update_interval']
algorithm = EMA(half_life=half_life, update_interval=update_interval)
state = minimal_state
state.model = model_cls()
state.batch = (input, torch.Tensor())
# Start EMA
algorithm.ema_model = EMAParameters(state.model)
# Check if ema correctly calculated smoothing
update_interval = Time.from_timestring(params['update_interval'])
if 'half_life' in params:
half_life = Time.from_timestring(params['half_life'])
smoothing = np.exp(-np.log(2) * (update_interval.value / half_life.value))
np.testing.assert_allclose(np.asarray(smoothing), np.asarray(algorithm.smoothing))
# Fake a training update by replacing state.model after ema grabbed it.
original_model = copy.deepcopy(state.model)
state.model = model_cls()
training_updated_model = copy.deepcopy(state.model)
# Do the EMA update
state.timestamp = Timestamp()
if update_interval.unit == TimeUnit.BATCH:
state.timestamp._batch = update_interval
algorithm.apply(Event.BATCH_END, state, empty_logger)
elif update_interval.unit == TimeUnit.EPOCH:
state.timestamp._epoch = update_interval
algorithm.apply(Event.EPOCH_END, state, empty_logger)
else:
raise ValueError(f'Invalid time string for parameter half_life')
# Check if EMA correctly computed the average.
validate_ema(state.model, original_model, algorithm.ema_model, algorithm.smoothing)
ema_updated_model = copy.deepcopy(algorithm.ema_model)
# Check if the EMA model is swapped in for testing
algorithm.apply(Event.EVAL_START, state, empty_logger)
METHOD_NAME(state.model, ema_updated_model)
# Check if the training model is swapped back in for training
algorithm.apply(Event.EVAL_END, state, empty_logger)
METHOD_NAME(state.model, training_updated_model) |
1,781 | on connect | #!/usr/bin/python3
from gi.repository import Gio
LG_DBUS_NAME = "org.Cinnamon.LookingGlass"
LG_DBUS_PATH = "/org/Cinnamon/LookingGlass"
class LookingGlassProxy:
def __init__(self):
self._signals = []
self._status_change_callbacks = []
self._proxy = None
Gio.bus_watch_name(Gio.BusType.SESSION,
LG_DBUS_NAME,
Gio.BusNameWatcherFlags.NONE,
self.METHOD_NAME,
self.on_disconnect)
def add_status_change_callback(self, callback):
self._status_change_callbacks.append(callback)
def refresh_status(self):
self.set_status(self._proxy is not None)
def get_is_ready(self):
return self._proxy is not None
def connect(self, name, callback):
self._signals.append((name, callback))
def on_signal(self, proxy, sender_name, signal_name, params):
for name, callback in self._signals:
if signal_name == name:
callback(*params)
def set_status(self, state):
for callback in self._status_change_callbacks:
callback(state)
def METHOD_NAME(self, connection, name, owner):
if self._proxy:
return
self.init_proxy()
def on_disconnect(self, connection, name):
self._proxy = None
self.set_status(False)
def init_proxy(self):
try:
self._proxy = Gio.DBusProxy.new_for_bus(Gio.BusType.SESSION,
Gio.DBusProxyFlags.NONE,
None,
LG_DBUS_NAME,
LG_DBUS_PATH,
LG_DBUS_NAME,
None,
self.on_proxy_ready,
None)
except GLib.Error as e:
print(e.message)
self._proxy = None
def on_proxy_ready(self, obj, result, data=None):
self._proxy = Gio.DBusProxy.new_for_bus_finish(result)
self._proxy.connect("g-signal", self.on_signal)
self.set_status(True)
# Proxy Methods:
def Eval(self, code):
if self._proxy:
try:
self._proxy.Eval('(s)', code)
except Exception:
pass
def GetResults(self):
if self._proxy:
try:
return self._proxy.GetResults('()')
except Exception:
pass
return False, ""
def AddResult(self, code):
if self._proxy:
try:
self._proxy.AddResult('(s)', code)
except Exception:
pass
def GetErrorStack(self):
if self._proxy:
try:
return self._proxy.GetErrorStack('()')
except Exception:
pass
return False, ""
def GetMemoryInfo(self):
if self._proxy:
try:
return self._proxy.GetMemoryInfo('()')
except Exception:
pass
return False, 0, {}
def FullGc(self):
if self._proxy:
try:
self._proxy.FullGc('()')
except Exception:
pass
def Inspect(self, code):
if self._proxy:
try:
return self._proxy.Inspect('(s)', code)
except Exception:
pass
return False, ""
def GetLatestWindowList(self):
if self._proxy:
try:
return self._proxy.GetLatestWindowList('()')
except Exception:
pass
return False, ""
def StartInspector(self):
if self._proxy:
try:
self._proxy.StartInspector('()')
except Exception:
pass
def GetExtensionList(self):
if self._proxy:
try:
return self._proxy.GetExtensionList('()')
except Exception:
pass
return False, ""
def ReloadExtension(self, uuid, xlet_type):
if self._proxy:
try:
return self._proxy.ReloadExtension('(ss)', uuid, xlet_type)
except Exception:
pass
return False, "" |
1,782 | execute and monitor | '''
Created on Jun 16, 2009
@author: meloam
'''
import os
import sys
from types import *
class ProcessMonitor(object):
'''
Lets us fork (and optionally exec) processes, monitoring their exit codes
'''
def __init__(self):
'''
Constructor
'''
self.processList = {}
self.returnedList = {}
def METHOD_NAME(self, child):
pid = child.forkAndExecute()
self.processList[pid] = child
def checkChildren(self, deleteOldOnes = True):
"""
Checks the status of all our children, updating our lists
"""
if (deleteOldOnes):
self.returnedList = {}
for child in self.processList:
newstatus, newsignal = child.isRunning()
if (not ((newstatus == True) and (newsignal == True))):
# if they BOTH aren't true, we have something to do
self.returnedList[child.processID] = child
del self.processList[child.processID]
if (child.callback):
child.callback( child )
class ChildProcess(object):
"""
base class for child processes
"""
def __init__(self):
self.processID = -1
self.callback = None
self.ourStdout = None
self.ourStderr = None
def setStdout(self, handle):
self.ourStdout = handle
def setStderr(self, handle):
self.ourStderr = handle
def setCallback(self, newCallback):
self.callback = newCallback
def forkAndExecute(self):
pid = os.fork()
if (not pid):
try:
# we're in the child
if (self.ourStderr):
sys.stderr = self.ourStderr
if (self.ourStdout):
sys.stderr = self.ourStdout
exitCode = self.execute()
print("Falling through ChildProcess.forkAndExecute with code %s" %\
exitCode)
sys.stdout.flush()
sys.stderr.flush()
os._exit( exitCode )
except Exception as e:
print("Something bad happened in ChildProcess.forkAndExecute in the child: %s" % e)
os._exit(99)
else:
# we're in the parent
self.processID = pid
return pid
raise RuntimeError("Something bad happened in fork()")
def execute(self):
"""
overridden in child classes to provide specific things to do
"""
msg = "ProcessMonitor.ProcessMonitor.execute method not overridden in "
msg += "implementation: %s\n" % self.__class__.__name__
raise NotImplementedError(msg)
def isRunning(self):
if (self.processID == -1):
raise RuntimeError("Trying to waitpid on nonexistant process")
pid, status = os.waitpid( self.processID, os.WNOHANG )
if ((pid == 0) and (status == 0)):
# we're definately still running if this pops up
return (True, True)
else:
# the child returned, get the status back from it
# os.waitpid does dumb things to the exit code
self.realstatus = ((0xFF00 & status) >> 8)
self.realsignal = ((0x00FF & status))
return (self.realstatus, self.realsignal)
class ExecProcess(ChildProcess):
"""
calls a given external executable
"""
def __init__(self):
self.args = []
def setArgs(self, arguments):
"""
Either accepts a list of arguments which are passed to execvp
OR accepts a string which is passed to bash and shell-expanded
"""
if not isinstance(arguments, list):
# we got passed a string, pass it to a shell
self.args[0] = 'bash'
self.args[1] = '-c'
self.args[2] = arguments
else:
# we got passed a list
self.args = arguments
def execute(self):
if (self.args == []):
raise RuntimeError("No arguments were set")
os.execvp(self.args[0], self.args[1:])
class PythonProcess(ChildProcess):
"""
Calls a function specified by the user
to pass arguments, wrap it in a lambda
"""
def __init__(self):
self.target = None
def setTarget(self, newtarget):
if not isinstance(newtarget, (FunctionType, LambdaType)):
raise RuntimeError("PythonProcess requires a function for target")
self.target = newtarget
def execute(self):
if (self.target == None):
raise RuntimeError("No execute process was set")
return self.target() |
1,783 | id | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetAppleAppConfigResult',
'AwaitableGetAppleAppConfigResult',
'get_apple_app_config',
'get_apple_app_config_output',
]
@pulumi.output_type
class GetAppleAppConfigResult:
"""
A collection of values returned by getAppleAppConfig.
"""
def __init__(__self__, app_id=None, config_file_contents=None, config_filename=None, METHOD_NAME=None, project=None):
if app_id and not isinstance(app_id, str):
raise TypeError("Expected argument 'app_id' to be a str")
pulumi.set(__self__, "app_id", app_id)
if config_file_contents and not isinstance(config_file_contents, str):
raise TypeError("Expected argument 'config_file_contents' to be a str")
pulumi.set(__self__, "config_file_contents", config_file_contents)
if config_filename and not isinstance(config_filename, str):
raise TypeError("Expected argument 'config_filename' to be a str")
pulumi.set(__self__, "config_filename", config_filename)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="appId")
def app_id(self) -> str:
return pulumi.get(self, "app_id")
@property
@pulumi.getter(name="configFileContents")
def config_file_contents(self) -> str:
"""
The content of the XML configuration file as a base64-encoded string.
"""
return pulumi.get(self, "config_file_contents")
@property
@pulumi.getter(name="configFilename")
def config_filename(self) -> str:
"""
The filename that the configuration artifact for the IosApp is typically saved as.
"""
return pulumi.get(self, "config_filename")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def project(self) -> Optional[str]:
return pulumi.get(self, "project")
class AwaitableGetAppleAppConfigResult(GetAppleAppConfigResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAppleAppConfigResult(
app_id=self.app_id,
config_file_contents=self.config_file_contents,
config_filename=self.config_filename,
METHOD_NAME=self.METHOD_NAME,
project=self.project)
def get_apple_app_config(app_id: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAppleAppConfigResult:
"""
Use this data source to access information about an existing resource.
:param str app_id: The id of the Firebase iOS App.
- - -
:param str project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
__args__ = dict()
__args__['appId'] = app_id
__args__['project'] = project
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('gcp:firebase/getAppleAppConfig:getAppleAppConfig', __args__, opts=opts, typ=GetAppleAppConfigResult).value
return AwaitableGetAppleAppConfigResult(
app_id=pulumi.get(__ret__, 'app_id'),
config_file_contents=pulumi.get(__ret__, 'config_file_contents'),
config_filename=pulumi.get(__ret__, 'config_filename'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
project=pulumi.get(__ret__, 'project'))
@_utilities.lift_output_func(get_apple_app_config)
def get_apple_app_config_output(app_id: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAppleAppConfigResult]:
"""
Use this data source to access information about an existing resource.
:param str app_id: The id of the Firebase iOS App.
- - -
:param str project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
... |
1,784 | transform aspect | import logging
from typing import Callable, List, Optional, cast
import datahub.emitter.mce_builder as builder
from datahub.configuration.common import (
KeyValuePattern,
TransformerSemantics,
TransformerSemanticsConfigModel,
)
from datahub.configuration.import_resolver import pydantic_resolve_key
from datahub.emitter.mce_builder import Aspect
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.graph.client import DataHubGraph
from datahub.ingestion.transformer.dataset_transformer import DatasetTermsTransformer
from datahub.metadata.schema_classes import (
AuditStampClass,
GlossaryTermAssociationClass,
GlossaryTermsClass,
)
class AddDatasetTermsConfig(TransformerSemanticsConfigModel):
get_terms_to_add: Callable[[str], List[GlossaryTermAssociationClass]]
_resolve_term_fn = pydantic_resolve_key("get_terms_to_add")
class AddDatasetTerms(DatasetTermsTransformer):
"""Transformer that adds glossary terms to datasets according to a callback function."""
ctx: PipelineContext
config: AddDatasetTermsConfig
def __init__(self, config: AddDatasetTermsConfig, ctx: PipelineContext):
super().__init__()
self.ctx = ctx
self.config = config
self.log = logging.getLogger(__name__)
@classmethod
def create(cls, config_dict: dict, ctx: PipelineContext) -> "AddDatasetTerms":
config = AddDatasetTermsConfig.parse_obj(config_dict)
return cls(config, ctx)
@staticmethod
def _merge_with_server_glossary_terms(
graph: DataHubGraph,
urn: str,
glossary_terms_aspect: Optional[GlossaryTermsClass],
) -> Optional[GlossaryTermsClass]:
if not glossary_terms_aspect or not glossary_terms_aspect.terms:
# nothing to add, no need to consult server
return None
# Merge the transformed terms with existing server terms.
# The transformed terms takes precedence, which may change the term context.
server_glossary_terms_aspect = graph.get_glossary_terms(entity_urn=urn)
if server_glossary_terms_aspect is not None:
glossary_terms_aspect.terms = list(
{
**{term.urn: term for term in server_glossary_terms_aspect.terms},
**{term.urn: term for term in glossary_terms_aspect.terms},
}.values()
)
return glossary_terms_aspect
def METHOD_NAME(
self, entity_urn: str, aspect_name: str, aspect: Optional[Aspect]
) -> Optional[Aspect]:
in_glossary_terms: Optional[GlossaryTermsClass] = cast(
Optional[GlossaryTermsClass], aspect
)
out_glossary_terms: GlossaryTermsClass = GlossaryTermsClass(
terms=[],
auditStamp=in_glossary_terms.auditStamp
if in_glossary_terms is not None
else AuditStampClass(
time=builder.get_sys_time(), actor="urn:li:corpUser:restEmitter"
),
)
# Check if user want to keep existing terms
if in_glossary_terms is not None and self.config.replace_existing is False:
out_glossary_terms.terms.extend(in_glossary_terms.terms)
out_glossary_terms.auditStamp = in_glossary_terms.auditStamp
terms_to_add = self.config.get_terms_to_add(entity_urn)
if terms_to_add is not None:
out_glossary_terms.terms.extend(terms_to_add)
patch_glossary_terms: Optional[GlossaryTermsClass] = None
if self.config.semantics == TransformerSemantics.PATCH:
assert self.ctx.graph
patch_glossary_terms = AddDatasetTerms._merge_with_server_glossary_terms(
self.ctx.graph, entity_urn, out_glossary_terms
)
return cast(Optional[Aspect], patch_glossary_terms)
else:
return cast(Aspect, out_glossary_terms)
class SimpleDatasetTermsConfig(TransformerSemanticsConfigModel):
term_urns: List[str]
class SimpleAddDatasetTerms(AddDatasetTerms):
"""Transformer that adds a specified set of glossary terms to each dataset."""
def __init__(self, config: SimpleDatasetTermsConfig, ctx: PipelineContext):
terms = [GlossaryTermAssociationClass(urn=term) for term in config.term_urns]
generic_config = AddDatasetTermsConfig(
get_terms_to_add=lambda _: terms,
replace_existing=config.replace_existing,
semantics=config.semantics,
)
super().__init__(generic_config, ctx)
@classmethod
def create(cls, config_dict: dict, ctx: PipelineContext) -> "SimpleAddDatasetTerms":
config = SimpleDatasetTermsConfig.parse_obj(config_dict)
return cls(config, ctx)
class PatternDatasetTermsConfig(TransformerSemanticsConfigModel):
term_pattern: KeyValuePattern = KeyValuePattern.all()
class PatternAddDatasetTerms(AddDatasetTerms):
"""Transformer that adds a specified set of glossary terms to each dataset."""
def __init__(self, config: PatternDatasetTermsConfig, ctx: PipelineContext):
term_pattern = config.term_pattern
generic_config = AddDatasetTermsConfig(
get_terms_to_add=lambda entity_urn: [
GlossaryTermAssociationClass(urn=term_urn)
for term_urn in term_pattern.value(entity_urn)
],
replace_existing=config.replace_existing,
semantics=config.semantics,
)
super().__init__(generic_config, ctx)
@classmethod
def create(
cls, config_dict: dict, ctx: PipelineContext
) -> "PatternAddDatasetTerms":
config = PatternDatasetTermsConfig.parse_obj(config_dict)
return cls(config, ctx) |
1,785 | process dir | #!/usr/bin/env python3
#
# Copyright 2016 WebAssembly Community Group participants
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DIR = SCRIPT_DIR
REPO_ROOT_DIR = os.path.dirname(SCRIPT_DIR)
TESTSUITE_DIR = os.path.join(REPO_ROOT_DIR, 'third_party', 'testsuite')
PROPOSALS_DIR = os.path.join(TESTSUITE_DIR, 'proposals')
SPEC_TEST_DIR = os.path.join(TEST_DIR, 'spec')
WASM2C_SPEC_TEST_DIR = os.path.join(TEST_DIR, 'wasm2c', 'spec')
options = None
def GetFilesWithExtension(src_dir, want_ext):
result = set()
if os.path.exists(src_dir):
for filename in os.listdir(src_dir):
name, ext = os.path.splitext(filename)
if ext == want_ext:
result.add(name)
return result
def METHOD_NAME(wabt_test_dir, testsuite_dir, tool, flags=None):
testsuite_tests = GetFilesWithExtension(testsuite_dir, '.wast')
wabt_tests = GetFilesWithExtension(wabt_test_dir, '.txt')
for removed_test_name in wabt_tests - testsuite_tests:
test_filename = os.path.join(wabt_test_dir, removed_test_name + '.txt')
if options.verbose:
print('Removing %s' % test_filename)
os.remove(test_filename)
for added_test_name in testsuite_tests - wabt_tests:
wast_filename = os.path.join(
os.path.relpath(testsuite_dir, REPO_ROOT_DIR),
added_test_name + '.wast')
test_filename = os.path.join(wabt_test_dir, added_test_name + '.txt')
if options.verbose:
print('Adding %s' % test_filename)
test_dirname = os.path.dirname(test_filename)
if not os.path.exists(test_dirname):
os.makedirs(test_dirname)
with open(test_filename, 'w') as f:
f.write(';;; TOOL: %s\n' % tool)
f.write(';;; STDIN_FILE: %s\n' % wast_filename.replace(os.sep, '/'))
if flags:
f.write(';;; ARGS*: %s\n' % flags)
def ProcessProposalDir(name, flags=None):
METHOD_NAME(os.path.join(SPEC_TEST_DIR, name),
os.path.join(PROPOSALS_DIR, name),
'run-interp-spec',
flags)
METHOD_NAME(os.path.join(WASM2C_SPEC_TEST_DIR, name),
os.path.join(PROPOSALS_DIR, name),
'run-spec-wasm2c',
flags)
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', help='print more diagnotic messages.',
action='store_true')
global options
options = parser.parse_args(args)
METHOD_NAME(SPEC_TEST_DIR, TESTSUITE_DIR, 'run-interp-spec')
METHOD_NAME(WASM2C_SPEC_TEST_DIR, TESTSUITE_DIR, 'run-spec-wasm2c')
all_proposals = [e.name for e in os.scandir(PROPOSALS_DIR) if e.is_dir()]
flags = {
'memory64': '--enable-memory64',
'multi-memory': '--enable-multi-memory',
'exception-handling': '--enable-exceptions',
'extended-const': '--enable-extended-const',
'tail-call': '--enable-tail-call',
'relaxed-simd': '--enable-relaxed-simd',
}
unimplemented = set([
'gc',
'function-references',
'threads',
'annotations',
'exception-handling',
])
# sanity check to verify that all flags are valid
for proposal in flags:
assert proposal in all_proposals, proposal
# sanity check to verify that all unimplemented are valid
for proposal in unimplemented:
assert proposal in all_proposals, proposal
proposals = [p for p in all_proposals if p not in unimplemented]
for proposal in proposals:
ProcessProposalDir(proposal, flags.get(proposal))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:])) |
1,786 | sort assigned attribute values | from typing import TYPE_CHECKING, Iterable, Set, Union
from ..page.models import Page
from ..product.models import Product, ProductVariant
from .models import (
AssignedPageAttribute,
AssignedPageAttributeValue,
AssignedProductAttribute,
AssignedProductAttributeValue,
AssignedVariantAttribute,
AssignedVariantAttributeValue,
Attribute,
AttributeValue,
)
AttributeAssignmentType = Union[
AssignedProductAttribute, AssignedVariantAttribute, AssignedPageAttribute
]
T_INSTANCE = Union[Product, ProductVariant, Page]
if TYPE_CHECKING:
from .models import AttributePage, AttributeProduct, AttributeVariant
def associate_attribute_values_to_instance(
instance: T_INSTANCE,
attribute: Attribute,
*values: AttributeValue,
) -> AttributeAssignmentType:
"""Assign given attribute values to a product or variant.
Note: be aware this function invokes the ``set`` method on the instance's
attribute association. Meaning any values already assigned or concurrently
assigned will be overridden by this call.
"""
values_ids = {value.pk for value in values}
# Ensure the values are actually form the given attribute
validate_attribute_owns_values(attribute, values_ids)
# Associate the attribute and the passed values
assignment = _associate_attribute_to_instance(instance, attribute.pk)
assignment.values.set(values)
# While migrating to a new structure we need to make sure we also
# copy the assigned product to AssignedProductAttributeValue
# where it will live after issue #12881 will be implemented
if isinstance(instance, Product):
AssignedProductAttributeValue.objects.filter(
assignment_id=assignment.pk
).update(product_id=instance.pk)
# This code will be deleted in new release (3.17), it is temporary solution between
# releases to keep database in sync
elif isinstance(instance, Page):
AssignedPageAttributeValue.objects.filter(assignment_id=assignment.pk).update(
page_id=instance.pk
)
METHOD_NAME(instance, assignment, values)
return assignment
def validate_attribute_owns_values(attribute: Attribute, value_ids: Set[int]) -> None:
"""Check given value IDs are belonging to the given attribute.
:raise: AssertionError
"""
attribute_actual_value_ids = set(attribute.values.values_list("pk", flat=True))
found_associated_ids = attribute_actual_value_ids & value_ids
if found_associated_ids != value_ids:
raise AssertionError("Some values are not from the provided attribute.")
def _associate_attribute_to_instance(
instance: T_INSTANCE, attribute_pk: int
) -> AttributeAssignmentType:
"""Associate a given attribute to an instance."""
assignment: AttributeAssignmentType
if isinstance(instance, Product):
attribute_rel: Union[
"AttributeProduct", "AttributeVariant", "AttributePage"
] = instance.product_type.attributeproduct.get(attribute_id=attribute_pk)
assignment, _ = AssignedProductAttribute.objects.get_or_create(
product=instance, assignment=attribute_rel
)
elif isinstance(instance, ProductVariant):
attribute_rel = instance.product.product_type.attributevariant.get(
attribute_id=attribute_pk
)
assignment, _ = AssignedVariantAttribute.objects.get_or_create(
variant=instance, assignment=attribute_rel
)
elif isinstance(instance, Page):
attribute_rel = instance.page_type.attributepage.get(attribute_id=attribute_pk)
assignment, _ = AssignedPageAttribute.objects.get_or_create(
page=instance, assignment=attribute_rel
)
else:
raise AssertionError(f"{instance.__class__.__name__} is unsupported")
return assignment
def METHOD_NAME(
instance: T_INSTANCE,
assignment: AttributeAssignmentType,
values: Iterable[AttributeValue],
) -> None:
"""Sort assigned attribute values based on values list order."""
instance_to_value_assignment_mapping = {
"Product": ("productvalueassignment", AssignedProductAttributeValue),
"ProductVariant": ("variantvalueassignment", AssignedVariantAttributeValue),
"Page": ("pagevalueassignment", AssignedPageAttributeValue),
}
assignment_lookup, assignment_model = instance_to_value_assignment_mapping[
instance.__class__.__name__
]
values_pks = [value.pk for value in values]
values_assignment = list(
getattr(assignment, assignment_lookup).select_related("value")
)
values_assignment.sort(key=lambda e: values_pks.index(e.value.pk))
for index, value_assignment in enumerate(values_assignment):
value_assignment.sort_order = index
assignment_model.objects.bulk_update(values_assignment, ["sort_order"]) |
1,787 | test bad user agent | ############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2016 humbug <bah> #
# Copyright 2017 Hugo <hugovk@users.noreply.github.com> #
# Copyright 2017 Simon <spam@esemi.ru> #
# Copyright 2018 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import pickle
import github
from . import Framework
class Exceptions(Framework.TestCase):
def testInvalidInput(self):
with self.assertRaises(github.GithubException) as raisedexp:
self.g.get_user().create_key("Bad key", "xxx")
self.assertEqual(raisedexp.exception.status, 422)
self.assertEqual(
raisedexp.exception.data,
{
"errors": [
{
"code": "custom",
"field": "key",
"message": "key is invalid. It must begin with 'ssh-rsa' or 'ssh-dss'. Check that you're copying the public half of the key",
"resource": "PublicKey",
}
],
"message": "Validation Failed",
},
)
def testNonJsonDataReturnedByGithub(self):
# Replay data was forged according to https://github.com/jacquev6/PyGithub/pull/182
with self.assertRaises(github.GithubException) as raisedexp:
# 503 would be retried, disable retries
self.get_github(retry=None, pool_size=self.pool_size).get_user("jacquev6")
self.assertEqual(raisedexp.exception.status, 503)
self.assertEqual(
raisedexp.exception.data,
{
"data": "<html><body><h1>503 Service Unavailable</h1>No server is available to handle this request.</body></html>",
},
)
def testUnknownObject(self):
with self.assertRaises(github.GithubException) as raisedexp:
self.g.get_user().get_repo("Xxx")
self.assertEqual(raisedexp.exception.status, 404)
self.assertEqual(raisedexp.exception.data, {"message": "Not Found"})
self.assertEqual(str(raisedexp.exception), '404 {"message": "Not Found"}')
def testUnknownUser(self):
with self.assertRaises(github.GithubException) as raisedexp:
self.g.get_user("ThisUserShouldReallyNotExist")
self.assertEqual(raisedexp.exception.status, 404)
self.assertEqual(raisedexp.exception.data, {"message": "Not Found"})
self.assertEqual(str(raisedexp.exception), '404 {"message": "Not Found"}')
def testBadAuthentication(self):
with self.assertRaises(github.GithubException) as raisedexp:
github.Github(auth=github.Auth.Login("BadUser", "BadPassword")).get_user().login
self.assertEqual(raisedexp.exception.status, 401)
self.assertEqual(raisedexp.exception.data, {"message": "Bad credentials"})
self.assertEqual(str(raisedexp.exception), '401 {"message": "Bad credentials"}')
def testExceptionPickling(self):
pickle.loads(pickle.dumps(github.GithubException("foo", "bar", None)))
def testJSONParseError(self):
# Replay data was forged to force a JSON error
with self.assertRaises(ValueError):
self.g.get_user("jacquev6")
class SpecificExceptions(Framework.TestCase):
def testBadCredentials(self):
self.assertRaises(
github.BadCredentialsException,
lambda: github.Github(auth=github.Auth.Login("BadUser", "BadPassword")).get_user().login,
)
def test2FARequired(self):
self.assertRaises(
github.TwoFactorException,
lambda: github.Github(auth=github.Auth.Login("2fauser", "password")).get_user().login,
)
def testUnknownObject(self):
self.assertRaises(github.UnknownObjectException, lambda: self.g.get_user().get_repo("Xxx"))
def METHOD_NAME(self):
self.assertRaises(
github.BadUserAgentException,
lambda: github.Github(auth=self.login, user_agent="").get_user().name,
)
def testRateLimitExceeded(self):
# rate limit errors would be retried if retry is not set None
g = github.Github(retry=None)
def exceed():
for i in range(100):
g.get_user("jacquev6")
self.assertRaises(github.RateLimitExceededException, exceed)
def testAuthenticatedRateLimitExceeded(self):
def exceed():
for i in range(100):
res = self.g.search_code("jacquev6")
res.get_page(0)
with self.assertRaises(github.RateLimitExceededException) as raised:
exceed()
self.assertEqual(raised.exception.headers.get("retry-after"), "60")
def testIncompletableObject(self):
github.UserKey.UserKey.setCheckAfterInitFlag(False)
obj = github.UserKey.UserKey(None, {}, {}, False)
self.assertRaises(github.IncompletableObject, obj._completeIfNeeded) |
1,788 | test command by user ids | """
Unittests for populate_created_on_site_user_attribute management command.
"""
from unittest import mock
import ddt
import pytest
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.management import CommandError, call_command
from django.test import TestCase
from common.djangoapps.student.models import Registration, UserAttribute
from common.djangoapps.student.tests.factories import UserFactory
from openedx.core.djangoapps.site_configuration.tests.mixins import SiteMixin
CREATED_ON_SITE = 'created_on_site'
@ddt.ddt
class TestPopulateUserAttribute(SiteMixin, TestCase):
"""
Test populate_created_on_site_user_attribute management command.
"""
def setUp(self):
super().setUp()
self._create_sample_data()
self.users = User.objects.all()
self.registered_users = Registration.objects.all()
self.user_ids = ','.join([str(user.id) for user in self.users])
self.activation_keys = ','.join([registered_user.activation_key for registered_user in self.registered_users])
def _create_sample_data(self):
"""
Creates the users and register them.
"""
for __ in range(3):
Registration().register(UserFactory.create())
def METHOD_NAME(self):
"""
Test population of created_on_site attribute by user ids.
"""
call_command(
"populate_created_on_site_user_attribute",
"--users", self.user_ids,
"--site-domain", self.site.domain
)
for user in self.users:
assert UserAttribute.get_user_attribute(user, CREATED_ON_SITE) == self.site.domain
# Populate 'created_on_site' attribute with different site domain
call_command(
"populate_created_on_site_user_attribute",
"--users", self.user_ids,
"--site-domain", self.site_other.domain
)
for user in self.users:
# 'created_on_site' attribute already exists. Attribute's value will not change
assert UserAttribute.get_user_attribute(user, CREATED_ON_SITE) != self.site_other.domain
def test_command_by_activation_keys(self):
"""
Test population of created_on_site attribute by activation keys.
"""
call_command(
"populate_created_on_site_user_attribute",
"--activation-keys", self.activation_keys,
"--site-domain", self.site.domain
)
for register_user in self.registered_users:
assert UserAttribute.get_user_attribute(register_user.user, CREATED_ON_SITE) == self.site.domain
# Populate 'created_on_site' attribute with different site domain
call_command(
"populate_created_on_site_user_attribute",
"--activation-keys", self.activation_keys,
"--site-domain", self.site_other.domain
)
for register_user in self.registered_users:
# 'created_on_site' attribute already exists. Attribute's value will not change
assert UserAttribute.get_user_attribute(register_user.user, CREATED_ON_SITE) != self.site_other.domain
def test_command_with_incomplete_argument(self):
"""
Test management command raises CommandError without '--users' and '--activation_keys' arguments.
"""
with pytest.raises(CommandError):
call_command(
"populate_created_on_site_user_attribute",
"--site-domain", self.site.domain
)
def test_command_with_invalid_arguments(self):
"""
Test management command with invalid user ids and activation keys.
"""
user = self.users[0]
call_command(
"populate_created_on_site_user_attribute",
"--users", f'9{user.id}', # invalid id
"--site-domain", self.site.domain
)
assert UserAttribute.get_user_attribute(user, CREATED_ON_SITE) is None
register_user = self.registered_users[0]
call_command(
"populate_created_on_site_user_attribute",
"--activation-keys", f"invalid-{register_user.activation_key}", # invalid key
"--site-domain", self.site.domain
)
assert UserAttribute.get_user_attribute(register_user.user, CREATED_ON_SITE) is None
def test_command_without_site_domain(self):
"""
Test management command raises CommandError without '--site-domain' argument.
"""
with pytest.raises(CommandError):
call_command(
"populate_created_on_site_user_attribute",
"--user", self.user_ids,
"--activation-keys", self.activation_keys
)
@ddt.data('y', 'n')
def test_with_invalid_site_domain(self, populate):
"""
Test management command with invalid site domain.
"""
fake_site_domain = 'fake-site-domain'
with mock.patch('six.moves.input', return_value=populate):
call_command(
"populate_created_on_site_user_attribute",
"--users", self.user_ids,
"--site-domain", fake_site_domain
)
for user in self.users:
if populate == 'y':
assert UserAttribute.get_user_attribute(user, CREATED_ON_SITE) == fake_site_domain
else:
assert UserAttribute.get_user_attribute(user, CREATED_ON_SITE) is None |
1,789 | real extract | # coding: utf-8
from __future__ import unicode_literals
import random
import string
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
determine_ext,
int_or_none,
js_to_json,
ExtractorError,
urlencode_postdata
)
class FunimationIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?funimation(?:\.com|now\.uk)/(?:[^/]+/)?shows/[^/]+/(?P<id>[^/?#&]+)'
_NETRC_MACHINE = 'funimation'
_TOKEN = None
_TESTS = [{
'url': 'https://www.funimation.com/shows/hacksign/role-play/',
'info_dict': {
'id': '91144',
'display_id': 'role-play',
'ext': 'mp4',
'title': '.hack//SIGN - Role Play',
'description': 'md5:b602bdc15eef4c9bbb201bb6e6a4a2dd',
'thumbnail': r're:https?://.*\.jpg',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'https://www.funimation.com/shows/attack-on-titan-junior-high/broadcast-dub-preview/',
'info_dict': {
'id': '210051',
'display_id': 'broadcast-dub-preview',
'ext': 'mp4',
'title': 'Attack on Titan: Junior High - Broadcast Dub Preview',
'thumbnail': r're:https?://.*\.(?:jpg|png)',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'https://www.funimationnow.uk/shows/puzzle-dragons-x/drop-impact/simulcast/',
'only_matching': True,
}, {
# with lang code
'url': 'https://www.funimation.com/en/shows/hacksign/role-play/',
'only_matching': True,
}]
def _login(self):
username, password = self._get_login_info()
if username is None:
return
try:
data = self._download_json(
'https://prod-api-funimationnow.dadcdigital.com/api/auth/login/',
None, 'Logging in', data=urlencode_postdata({
'username': username,
'password': password,
}))
self._TOKEN = data['token']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
error = self._parse_json(e.cause.read().decode(), None)['error']
raise ExtractorError(error, expected=True)
raise
def _real_initialize(self):
self._login()
def METHOD_NAME(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
def _search_kane(name):
return self._search_regex(
r"KANE_customdimensions\.%s\s*=\s*'([^']+)';" % name,
webpage, name, default=None)
title_data = self._parse_json(self._search_regex(
r'TITLE_DATA\s*=\s*({[^}]+})',
webpage, 'title data', default=''),
display_id, js_to_json, fatal=False) or {}
video_id = title_data.get('id') or self._search_regex([
r"KANE_customdimensions.videoID\s*=\s*'(\d+)';",
r'<iframe[^>]+src="/player/(\d+)',
], webpage, 'video_id', default=None)
if not video_id:
player_url = self._html_search_meta([
'al:web:url',
'og:video:url',
'og:video:secure_url',
], webpage, fatal=True)
video_id = self._search_regex(r'/player/(\d+)', player_url, 'video id')
title = episode = title_data.get('title') or _search_kane('videoTitle') or self._og_search_title(webpage)
series = _search_kane('showName')
if series:
title = '%s - %s' % (series, title)
description = self._html_search_meta(['description', 'og:description'], webpage, fatal=True)
try:
headers = {}
if self._TOKEN:
headers['Authorization'] = 'Token %s' % self._TOKEN
sources = self._download_json(
'https://www.funimation.com/api/showexperience/%s/' % video_id,
video_id, headers=headers, query={
'pinst_id': ''.join([random.choice(string.digits + string.ascii_letters) for _ in range(8)]),
})['items']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
error = self._parse_json(e.cause.read(), video_id)['errors'][0]
raise ExtractorError('%s said: %s' % (
self.IE_NAME, error.get('detail') or error.get('title')), expected=True)
raise
formats = []
for source in sources:
source_url = source.get('src')
if not source_url:
continue
source_type = source.get('videoType') or determine_ext(source_url)
if source_type == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4',
m3u8_id='hls', fatal=False))
else:
formats.append({
'format_id': source_type,
'url': source_url,
})
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': self._og_search_thumbnail(webpage),
'series': series,
'season_number': int_or_none(title_data.get('seasonNum') or _search_kane('season')),
'episode_number': int_or_none(title_data.get('episodeNum')),
'episode': episode,
'season_id': title_data.get('seriesId'),
'formats': formats,
} |
1,790 | fetch | import requests
from waste_collection_schedule import Collection # type: ignore[attr-defined]
from bs4 import BeautifulSoup, NavigableString
import datetime
TITLE = "Armadale (Western Australia)"
DESCRIPTION = "Source for Armadale (Western Australia)."
URL = "https://www.armadale.wa.gov.au"
TEST_CASES = {
"23 Sexty St, ARMADALE": {"address": "23 Sexty St, ARMADALE"},
"270 Skeet Rd, HARRISDALE": {"address": "270 Skeet Rd, HARRISDALE"}
}
WEEKDAYS = {
"Monday": 0,
"Tuesday": 1,
"Wednesday": 2,
"Thursday": 3,
"Friday": 4,
"Saturday": 5,
"Sunday": 6,
}
API_URL = "https://www.armadale.wa.gov.au/system/ajax"
def easter(year):
# taken from dateutil easter https://dateutil.readthedocs.io/en/stable/_modules/dateutil/easter.html to prevent dependency
y = year
g = y % 19
e = 0
# New method
c = y//100
h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30
i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11))
j = (y + y//4 + i + 2 - c + c//4) % 7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i - j + e
d = 1 + (p + 27 + (p + 6)//40) % 31
m = 3 + (p + 26)//30
return datetime.date(int(y), int(m), int(d))
class Source:
def __init__(self, address: str):
self._address: str = address
def METHOD_NAME(self):
args: dict[str, str] = {
"address": self._address,
"form_id": "waste_collection_form"
}
s = requests.Session()
r = s.get("https://www.armadale.wa.gov.au/my-waste-collection-day")
r.raise_for_status()
soup = BeautifulSoup(r.text, "html.parser")
form_build_id = soup.find(
"input", {"type": "hidden", "name": "form_build_id"})
if not form_build_id or isinstance(form_build_id, NavigableString) or not form_build_id.attrs["value"]:
raise Exception("Could not find form_build_id")
form_build_id = form_build_id["value"]
if not isinstance(form_build_id, str):
raise Exception("Could not find form_build_id")
args["form_build_id"] = form_build_id
# get json
r = s.post(API_URL, data=args)
r.raise_for_status()
data = r.json()
if len(data) < 2:
raise Exception("wrong data returned")
data = data[1]["data"]
soup = BeautifulSoup(data, "html.parser")
trs = soup.find_all("tr")
if not trs or len(trs) < 3:
raise Exception("Could not parse data correctly")
bin_day = trs[1].find("td").text.strip()
if not bin_day or not bin_day in WEEKDAYS:
raise Exception("Could not parse data correctly")
bin_day = WEEKDAYS[bin_day]
recycling: bool = trs[2].find(
"td").text.strip().lower().startswith("this week")
current_day = datetime.datetime.now().date()
diff_to_next = (bin_day - current_day.weekday()) % 7
# next is next week
if current_day.weekday() + diff_to_next >= 7:
recycling = not recycling
current_day = current_day + datetime.timedelta(days=diff_to_next)
entries = []
for i in range(52):
date = current_day
start_of_week = date - datetime.timedelta(days=date.weekday())
christmas = datetime.date(current_day.year, 12, 25)
new_years_day = datetime.date(
current_day.year + (1 if current_day.month == 12 else 0), 1, 1)
good_friday = easter(current_day.year) - datetime.timedelta(days=2)
if start_of_week <= christmas <= date or start_of_week <= new_years_day <= date:
# if christmas or new years day is in the current week
if 0 <= christmas.weekday() < 5: # if christmas is on a weekday
date += datetime.timedelta(days=1)
if date == good_friday:
date += datetime.timedelta(days=1)
entries.append(Collection(
date=date, t="rubbish", icon="mdi:trash-can"))
if recycling:
entries.append(Collection(
date=date, t="recycling", icon="mdi:recycle"))
current_day += datetime.timedelta(days=7)
recycling = not recycling
return entries |
1,791 | average multiclass ovo score | # Copyright 1999-2022 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import combinations
from ... import tensor as mt
from ..utils import check_array, check_consistent_length
from ..utils.multiclass import type_of_target
def _average_binary_score(
binary_metric,
y_true,
y_score,
average,
sample_weight=None,
session=None,
run_kwargs=None,
):
average_options = (None, "micro", "macro", "weighted", "samples")
if average not in average_options: # pragma: no cover
raise ValueError("average has to be one of {0}".format(average_options))
y_type = type_of_target(y_true).to_numpy(session=session, **(run_kwargs or dict()))
if y_type not in ("binary", "multilabel-indicator"): # pragma: no cover
raise ValueError("{0} format is not supported".format(y_type))
if y_type == "binary":
return binary_metric(y_true, y_score, sample_weight=sample_weight)
check_consistent_length(
y_true, y_score, sample_weight, session=session, run_kwargs=run_kwargs
)
y_true = check_array(y_true)
y_score = check_array(y_score)
not_average_axis = 1
score_weight = sample_weight
average_weight = None
if average == "micro":
if score_weight is not None: # pragma: no cover
score_weight = mt.repeat(score_weight, y_true.shape[1])
y_true = y_true.ravel()
y_score = y_score.ravel()
elif average == "weighted":
if score_weight is not None: # pragma: no cover
average_weight = mt.sum(
mt.multiply(y_true, mt.reshape(score_weight, (-1, 1))), axis=0
)
else:
average_weight = mt.sum(y_true, axis=0)
if mt.isclose(average_weight.sum(), 0.0).to_numpy(
session=session, **(run_kwargs or dict())
):
return 0
elif average == "samples":
# swap average_weight <-> score_weight
average_weight = score_weight
score_weight = None
not_average_axis = 0
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_score.ndim == 1:
y_score = y_score.reshape((-1, 1))
n_classes = y_score.shape[not_average_axis]
score = mt.zeros((n_classes,))
for c in range(n_classes):
y_true_c = y_true.take([c], axis=not_average_axis).ravel()
y_score_c = y_score.take([c], axis=not_average_axis).ravel()
score[c] = binary_metric(y_true_c, y_score_c, sample_weight=score_weight)
# Average the results
if average is not None:
if average_weight is not None:
# Scores with 0 weights are forced to be 0, preventing the average
# score from being affected by 0-weighted NaN elements.
average_weight = mt.asarray(average_weight)
score[average_weight == 0] = 0
return mt.average(score, weights=average_weight)
else:
return score
def METHOD_NAME(
binary_metric, y_true, y_score, average="macro", session=None, run_kwargs=None
):
check_consistent_length(y_true, y_score, session=session, run_kwargs=run_kwargs)
y_true_unique = mt.unique(y_true).to_numpy()
n_classes = y_true_unique.shape[0]
n_pairs = n_classes * (n_classes - 1) // 2
pair_scores = mt.empty(n_pairs)
is_weighted = average == "weighted"
prevalence = mt.empty(n_pairs) if is_weighted else None
# Compute scores treating a as positive class and b as negative class,
# then b as positive class and a as negative class
for ix, (a, b) in enumerate(combinations(y_true_unique, 2)):
a_mask = y_true == a
b_mask = y_true == b
ab_mask = mt.logical_or(a_mask, b_mask)
if is_weighted:
prevalence[ix] = mt.average(ab_mask)
a_true = a_mask[ab_mask]
b_true = b_mask[ab_mask]
a_true_score = binary_metric(a_true, y_score[ab_mask, a])
b_true_score = binary_metric(b_true, y_score[ab_mask, b])
pair_scores[ix] = (a_true_score + b_true_score) / 2
return mt.average(pair_scores, weights=prevalence) |
1,792 | test cleanup | from collections import defaultdict
from datetime import timedelta
from ichnaea.data.tasks import cleanup_datamap, update_datamap
from ichnaea.models.content import DataMap, encode_datamap_grid
from ichnaea import util
class TestDataMapCleaner(object):
@property
def today(self):
return util.utcnow().date()
def _one(self, lat, lon, time):
lat, lon = DataMap.scale(lat, lon)
return DataMap.shard_model(lat, lon)(
grid=(lat, lon), created=time, modified=time
)
def test_empty(self, celery, session):
for shard_id, shard in DataMap.shards().items():
cleanup_datamap.delay(shard_id=shard_id).get()
assert session.query(shard).count() == 0
def METHOD_NAME(self, celery, session):
session.add_all(
[
self._one(37.0, 6.0, self.today),
self._one(37.0, 6.1, self.today - timedelta(days=366)),
self._one(37.0, 4.0, self.today),
self._one(37.0, 4.1, self.today - timedelta(days=366)),
self._one(10.0, 6.0, self.today),
self._one(10.0, 6.1, self.today - timedelta(days=366)),
self._one(10.0, 4.0, self.today),
self._one(10.0, 4.1, self.today - timedelta(days=366)),
]
)
session.flush()
for shard_id, shard in DataMap.shards().items():
cleanup_datamap.delay(shard_id=shard_id).get()
assert session.query(shard).count() == 1
class TestDataMapUpdater(object):
@property
def today(self):
return util.utcnow().date()
@property
def yesterday(self):
return self.today - timedelta(days=1)
def _add(self, session, entries):
for lat, lon, time in entries:
lat, lon = DataMap.scale(lat, lon)
session.add(
DataMap.shard_model(lat, lon)(
grid=(lat, lon), created=time, modified=time
)
)
session.flush()
def _check_position(self, stat, lat, lon):
assert stat.grid == DataMap.scale(lat, lon)
def _queue(self, celery, pairs):
grids = defaultdict(list)
for lat, lon in pairs:
lat, lon = DataMap.scale(lat, lon)
shard_id = DataMap.shard_id(lat, lon)
grids[shard_id].append(encode_datamap_grid(lat, lon))
for shard_id, values in grids.items():
queue = celery.data_queues["update_datamap_" + shard_id]
queue.enqueue(list(values))
def test_empty(self, celery, session):
for shard_id, shard in DataMap.shards().items():
update_datamap.delay(shard_id=shard_id).get()
assert session.query(shard).count() == 0
def test_one(self, celery, session):
lat = 1.234567
lon = 2.345678
shard_id = DataMap.shard_id(*DataMap.scale(lat, lon))
self._queue(celery, [(lat, lon)])
update_datamap.delay(shard_id=shard_id).get()
grids = session.query(DataMap.shards()[shard_id]).all()
assert len(grids) == 1
self._check_position(grids[0], 1.235, 2.346)
assert grids[0].created == self.today
assert grids[0].modified == self.today
def test_update(self, celery, session):
lat = 1.0
lon = 2.0
shard_id = DataMap.shard_id(*DataMap.scale(lat, lon))
self._add(session, [(lat, lon, self.yesterday)])
self._queue(celery, [(lat, lon)])
update_datamap.delay(shard_id=shard_id).get()
grids = session.query(DataMap.shards()[shard_id]).all()
assert len(grids) == 1
self._check_position(grids[0], 1.0, 2.0)
assert grids[0].created == self.yesterday
assert grids[0].modified == self.today
def test_multiple(self, celery, session):
self._add(
session,
[
(0.0, 1.0, self.today),
(1.0, 2.0, self.yesterday),
(-10.0, 40.0, self.yesterday),
],
)
self._queue(
celery,
[
(0.0, 1.0),
(1.0, 2.0),
(1.0, 2.0),
(40.0011, 3.0011),
(40.0012, 3.0012),
(40.0013, 3.0013),
(0.0, 0.0),
(1.0, 2.0),
(1.00001, 2.00001),
],
)
for shard_id in DataMap.shards():
update_datamap.delay(shard_id=shard_id).get()
rows = []
for shard in DataMap.shards().values():
rows.extend(session.query(shard).all())
assert len(rows) == 5
created = set()
modified = set()
positions = set()
for row in rows:
lat, lon = row.grid
created.add(row.created)
modified.add(row.modified)
positions.add((lat / 1000.0, lon / 1000.0))
assert created == set([self.today, self.yesterday])
assert modified == set([self.today, self.yesterday])
assert positions == set(
[(0.0, 0.0), (0.0, 1.0), (1.0, 2.0), (-10.0, 40.0), (40.001, 3.001)]
) |
1,793 | get login url | # Copyright (c) 2018 Charles University, Faculty of Arts,
# Institute of the Czech National Corpus
# Copyright (c) 2018 Tomas Machalek <tomas.machalek@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
A simple auth for users with private API key.
Please note that this is not intended for installation with many
users as sharing a single token between many people is not
very secure.
required xml conf: please see ./config.rng
"""
import hashlib
from dataclasses import dataclass
from typing import Dict, List, Optional
import plugins
from action.plugin.ctx import PluginCtx
from plugin_types.auth import (
AbstractRemoteAuth, CorpusAccess, GetUserInfo, UserInfo)
@dataclass
class ApiTokenZone:
api_key: str
user_id: int
user_info: str
corpora: Dict[str, str] # normalized name => full name
class StaticAuth(AbstractRemoteAuth):
_zones: Dict[str, ApiTokenZone]
def __init__(self, anonymous_id, api_key_cookie_name, api_key_http_header, zones, login_url, logout_url):
super(StaticAuth, self).__init__(anonymous_id)
self._api_key_cookie_name = api_key_cookie_name
self._api_key_http_header = api_key_http_header
self._login_url = login_url
self._logout_url = logout_url
self._zones = {}
for zone in zones:
norm_corpora = {}
for corp in zone.get('corpora', []):
tmp = corp.split('/')
if len(tmp) == 2:
norm_corpora[tmp[1].lower()] = tmp[0]
else:
norm_corpora[tmp[0].lower()] = None
self._zones[zone['api_key']] = ApiTokenZone(
user_id=zone['user_id'],
user_info=zone.get('user_info', 'User {}'.format(zone['user_id'])),
api_key=zone['api_key'],
corpora=norm_corpora)
def anonymous_user(self, plugin_ctx) -> UserInfo:
return UserInfo(
id=self._anonymous_id,
user='unauthorized',
fullname='Unauthorized user',
email=None,
api_key=None)
def _find_user(self, user_id) -> Optional[ApiTokenZone]:
for item in self._zones.values():
if item.user_id == user_id:
return item
return None
def is_anonymous(self, user_id):
return user_id == self._anonymous_id
def is_administrator(self, user_id):
return False
async def corpus_access(self, user_dict: UserInfo, corpus_id: str) -> CorpusAccess:
zone = self._find_user(user_dict['id'])
if zone is None:
return CorpusAccess(False, False, '')
if corpus_id not in zone.corpora:
return CorpusAccess(False, False, '')
return CorpusAccess(False, True, zone.corpora[corpus_id])
async def permitted_corpora(self, user_dict: UserInfo) -> List[str]:
if self.is_anonymous(user_dict['id']):
return []
else:
zone = self._find_user(user_dict['id'])
return list(zone.corpora.keys())
async def get_user_info(self, plugin_ctx: PluginCtx) -> GetUserInfo:
return {
'username' if k == 'user' else k: v
for k, v in plugin_ctx.user_dict.items()
}
def _hash_key(self, k):
if not k:
return None
return hashlib.sha256(k.encode()).hexdigest()
def _get_api_key(self, plugin_ctx):
if self._api_key_cookie_name:
api_key_cookie = plugin_ctx.cookies.get(self._api_key_cookie_name)
return api_key_cookie.value if api_key_cookie else None
elif self._api_key_http_header:
return plugin_ctx.request.headers.get(self._api_key_http_header)
async def revalidate(self, plugin_ctx):
curr_user_id = plugin_ctx.session.get('user', {'id': None})['id']
api_key = self._get_api_key(plugin_ctx)
hash_key = self._hash_key(api_key)
if api_key and hash_key in self._zones:
zone = self._zones[hash_key]
if self.is_anonymous(curr_user_id):
plugin_ctx.session.clear()
plugin_ctx.session['user'] = dict(
id=zone.user_id, user='api_user', fullname=zone.user_info)
else:
if not self.is_anonymous(curr_user_id):
plugin_ctx.session.clear()
plugin_ctx.session['user'] = self.anonymous_user(plugin_ctx)
def METHOD_NAME(self, return_url=None):
return self._login_url
def get_logout_url(self, return_url=None):
return self._logout_url
def create_instance(conf):
"""
This function must be always implemented. KonText uses it to create an instance of your
authentication object. The settings module is passed as a parameter.
"""
plugin_conf = conf.get('plugins', plugins.runtime.AUTH.name)
custom_conf = conf.get_plugin_custom_conf(plugins.runtime.AUTH.name)
return StaticAuth(
anonymous_id=int(plugin_conf['anonymous_user_id']),
api_key_cookie_name=custom_conf.get('api_key_cookie_name', None),
api_key_http_header=custom_conf['api_key_http_header'],
zones=custom_conf['zones'],
login_url=plugin_conf['login_url'],
logout_url=plugin_conf['login_url']) |
1,794 | has verbosity | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging and debugging utilities."""
import os
import sys
import traceback
import logging
# TODO(mdan): Use a custom logger class.
from nvidia.dali._autograph.utils.all_utils import export_symbol
VERBOSITY_VAR_NAME = 'AUTOGRAPH_VERBOSITY'
DEFAULT_VERBOSITY = 0
verbosity_level = None # vlog-like. Takes precedence over the env variable.
echo_log_to_stdout = False
# In interactive Python, logging echo is enabled by default.
if hasattr(sys, 'ps1') or hasattr(sys, 'ps2'):
echo_log_to_stdout = True
@export_symbol('autograph.set_verbosity')
def set_verbosity(level, alsologtostdout=False):
"""Sets the AutoGraph verbosity level.
_Debug logging in AutoGraph_
More verbose logging is useful to enable when filing bug reports or doing
more in-depth debugging.
There are two means to control the logging verbosity:
* The `set_verbosity` function
* The `AUTOGRAPH_VERBOSITY` environment variable
`set_verbosity` takes precedence over the environment variable.
For example:
```python
import os
import tensorflow as tf
os.environ['AUTOGRAPH_VERBOSITY'] = '5'
# Verbosity is now 5
tf.autograph.set_verbosity(0)
# Verbosity is now 0
os.environ['AUTOGRAPH_VERBOSITY'] = '1'
# No effect, because set_verbosity was already called.
```
Logs entries are output to [absl](https://abseil.io)'s
[default output](https://abseil.io/docs/python/guides/logging),
with `INFO` level.
Logs can be mirrored to stdout by using the `alsologtostdout` argument.
Mirroring is enabled by default when Python runs in interactive mode.
Args:
level: int, the verbosity level; larger values specify increased verbosity;
0 means no logging. When reporting bugs, it is recommended to set this
value to a larger number, like 10.
alsologtostdout: bool, whether to also output log messages to `sys.stdout`.
"""
global verbosity_level
global echo_log_to_stdout
verbosity_level = level
echo_log_to_stdout = alsologtostdout
@export_symbol('autograph.trace')
def trace(*args):
"""Traces argument information at compilation time.
`trace` is useful when debugging, and it always executes during the tracing
phase, that is, when the TF graph is constructed.
_Example usage_
```python
import tensorflow as tf
for i in tf.range(10):
tf.autograph.trace(i)
# Output: <Tensor ...>
```
Args:
*args: Arguments to print to `sys.stdout`.
"""
print(*args)
def get_verbosity():
global verbosity_level
if verbosity_level is not None:
return verbosity_level
return int(os.getenv(VERBOSITY_VAR_NAME, DEFAULT_VERBOSITY))
def METHOD_NAME(level):
return get_verbosity() >= level
def _output_to_stdout(msg, *args, **kwargs):
print(msg % args)
if kwargs.get('exc_info', False):
traceback.print_exc()
def error(level, msg, *args, **kwargs):
if METHOD_NAME(level):
logging.error(msg, *args, **kwargs)
if echo_log_to_stdout:
_output_to_stdout('ERROR: ' + msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
if METHOD_NAME(level):
logging.info(msg, *args, **kwargs)
if echo_log_to_stdout:
_output_to_stdout(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
logging.warning(msg, *args, **kwargs)
if echo_log_to_stdout:
_output_to_stdout('WARNING: ' + msg, *args, **kwargs)
sys.stdout.flush() |
1,795 | test styles inlining | from unittest.mock import patch
from django.conf import settings
from django.contrib.sites.models import Site
from django.utils.functional import lazy
from django.utils.translation import get_language
from kitsune.sumo.email_utils import emails_with_users_and_watches, safe_translation
from kitsune.sumo.tests import TestCase
from kitsune.sumo.utils import uselocale
from kitsune.users.tests import UserFactory
mock_translations = {
"Hello": {"en-us": "Hello", "fr": "Bonjour", "es": "Hola"},
"Hello {name}": {
"en-us": "Hello {name}",
"fr": "Bonjour {0}",
"es": "Hola {name}",
},
}
def mock_ugettext(msg_id):
locale = get_language()
return mock_translations[msg_id][locale]
mock_gettext_lazy = lazy(mock_ugettext)
def mock_gettext(f):
f = patch("django.utils.translation.gettext", mock_ugettext)(f)
f = patch("django.utils.translation.gettext_lazy", mock_gettext_lazy)(f)
return f
class SafeTranslationTests(TestCase):
def setUp(self):
# These tests assume English is the fall back language. If it
# isn't we are gonna have a bad time.
self.assertEqual("en-US", settings.WIKI_DEFAULT_LANGUAGE)
@mock_gettext
def test_mocked_gettext(self):
"""I'm not entirely sure about the mocking, so test that."""
# Import translation now so it is affected by the mock.
from django.utils.translation import gettext as _
with uselocale("en-US"):
self.assertEqual(_("Hello"), "Hello")
with uselocale("fr"):
self.assertEqual(_("Hello"), "Bonjour")
with uselocale("es"):
self.assertEqual(_("Hello"), "Hola")
@mock_gettext
def test_safe_translation_noop(self):
"""Test that safe_translation doesn't mess with good translations."""
# Import translation now so it is affected by the mock.
from django.utils.translation import gettext as _
@safe_translation
def simple(locale):
return _("Hello")
# These should just work normally.
self.assertEqual(simple("en-US"), "Hello")
self.assertEqual(simple("fr"), "Bonjour")
self.assertEqual(simple("es"), "Hola")
@mock_gettext
def test_safe_translation_bad_trans(self):
"""Test that safe_translation insulates from bad translations."""
# Import translation now so it is affected by the mock.
from django.utils.translation import gettext as _
# `safe_translation` will call this with the given locale, and
# if that fails, fall back to English.
@safe_translation
def bad_trans(locale):
return _("Hello {name}").format(name="Mike")
# French should come back as English, because it has a bad
# translation, but Spanish should come back in Spanish.
self.assertEqual(bad_trans("en-US"), "Hello Mike")
self.assertEqual(bad_trans("fr"), "Hello Mike")
self.assertEqual(bad_trans("es"), "Hola Mike")
@mock_gettext
@patch("kitsune.sumo.email_utils.log")
def test_safe_translation_logging(self, mocked_log):
"""Logging translation errors is really important, so test it."""
# Import translation now so it is affected by the mock.
from django.utils.translation import gettext as _
# Assert that bad translations cause error logging.
@safe_translation
def bad_trans(locale):
return _("Hello {name}").format(name="Mike")
# English and Spanish should not log anything. French should.
bad_trans("en-US")
bad_trans("es")
self.assertEqual(len(mocked_log.method_calls), 0)
bad_trans("fr")
self.assertEqual(len(mocked_log.method_calls), 1)
method_name, method_args, method_kwargs = mocked_log.method_calls[0]
self.assertEqual(method_name, "exception")
assert "Bad translation" in method_args[0]
self.assertEqual(method_args[1], "fr")
class UseLocaleTests(TestCase):
def test_uselocale(self):
"""Test that uselocale does what it says on the tin."""
with uselocale("en-US"):
self.assertEqual(get_language(), "en-us")
with uselocale("de"):
self.assertEqual(get_language(), "de")
with uselocale("fr"):
self.assertEqual(get_language(), "fr")
class PremailerTests(TestCase):
def METHOD_NAME(self):
"""Test that styles tags are converted to inline styles"""
with patch("kitsune.sumo.email_utils.render_to_string") as mocked:
mocked.return_value = (
"<html>"
"<head>"
"<style>a { color: #000; }</style>"
"</head>"
"<body>"
'<a href="/test">Hyperlink</a>'
"</body>"
"</html>"
)
u = UserFactory()
msg = emails_with_users_and_watches("test", "a.ltxt", "a.html", {}, [(u, [None])])
for m in msg:
tag = '<a href="https://%s/test" style="color:#000">Hyperlink</a>'
self.assertIn(tag % Site.objects.get_current().domain, str(m.message())) |
1,796 | delete files and dir | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.METHOD_NAME()
def tearDown(self):
self.METHOD_NAME()
def METHOD_NAME(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main() |
1,797 | mock check task start | import asyncio
import itertools
import random
import time
from avocado.utils.astring import tabular_output
DEBUG = False
def debug(msg):
if DEBUG:
print(msg)
async def sleep_random():
await asyncio.sleep(random.random())
def true_or_false(handicap=3):
"""Returns a random positive or negative outcome, with some bias."""
if handicap > 1:
choices = [True] + ([False] * handicap)
else:
choices = [False] + ([True] * abs(handicap))
return random.choice(choices)
def mock_check_task_requirement():
# More success than failures, please
return true_or_false(-8)
def METHOD_NAME():
# More success than failures, please
return true_or_false(-6)
def mock_monitor_task_finished():
# More failures than successes, please
return true_or_false(5)
class Task:
"""Used here as a placeholder for an avocado.core.nrunner.Task."""
def __init__(self, identification):
self._identification = identification
class TaskInfo(Task):
"""Task with extra status information on its life-cycle.
The equivalent of a StatusServer will contain this information
in the real implementation."""
def __init__(self, identification):
super().__init__(identification)
self._status = None
self._timeout = None
@property
def status(self):
return self._status
@status.setter
def status(self, status):
self._status = status
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, timeout):
self._timeout = timeout
def __repr__(self):
if self._status is None:
return f"{self._identification}"
else:
return f"{self._identification} ({self.status})"
class TaskStateMachine:
"""Represents all phases that a task can go through its life."""
def __init__(self, tasks):
self._requested = tasks
self._triaging = []
self._ready = []
self._started = []
self._finished = []
self._lock = asyncio.Lock()
@property
def requested(self):
return self._requested
@property
def triaging(self):
return self._triaging
@property
def ready(self):
return self._ready
@property
def started(self):
return self._started
@property
def finished(self):
return self._finished
@property
def lock(self):
return self._lock
@property
async def complete(self):
async with self._lock:
pending = any([self._requested, self._triaging, self._ready, self._started])
return not pending
def __str__(self):
headers = (
"|_REQUESTED_|",
"|_TRIAGING__|",
"|___READY___|",
"|__STARTED__|",
"|______FINISHED_______|",
)
data = itertools.zip_longest(
self._requested,
self._triaging,
self._ready,
self._started,
self._finished,
fillvalue="",
)
matrix = [_ for _ in data]
return tabular_output(matrix, headers)
async def bootstrap(lc):
"""Reads from requested, moves into triaging."""
# fake some rate limiting
if true_or_false(10):
return
try:
async with lc.lock:
task = lc.requested.pop()
lc.triaging.append(task)
debug(f"Moved Task {task}: REQUESTED => TRIAGING")
except IndexError:
debug("BOOTSTRAP: nothing to do")
return
async def triage(lc):
"""Reads from triaging, moves into either: ready or finished."""
await sleep_random()
try:
async with lc.lock:
task = lc.triaging.pop()
except IndexError:
debug("TRIAGE done")
return
if mock_check_task_requirement():
async with lc.lock:
lc.ready.append(task)
debug(f"Moving Task {task}: TRIAGING => READY")
else:
async with lc.lock:
lc.finished.append(task)
task.status = "FAILED ON TRIAGE"
debug(f"Moving Task {task}: TRIAGING => FINISHED")
async def start(lc):
"""Reads from ready, moves into either: started or finished."""
await sleep_random()
try:
async with lc.lock:
task = lc.ready.pop()
except IndexError:
debug("START: nothing to do")
return
# enforce a rate limit on the number of started (currently running) tasks.
# this is a global limit, but the spawners can also be queried with regards
# to their capacity to handle new tasks
MAX_RUNNING_TASKS = 8
async with lc.lock:
if len(lc.started) >= MAX_RUNNING_TASKS:
lc.ready.insert(0, task)
task.status = "WAITING"
return
# suppose we're starting the tasks
if METHOD_NAME():
async with lc.lock:
task.status = None
# Let's give each task 15 seconds from start time
task.timeout = time.monotonic() + 15
lc.started.append(task)
debug(f"Moving Task {task}: READY => STARTED")
else:
async with lc.lock:
lc.finished.append(task)
task.status = "FAILED ON START"
debug(f"Moving Task {task}: READY => FINISHED (ERRORED ON START)")
async def monitor(lc):
"""Reads from started, moves into finished."""
await sleep_random()
try:
async with lc.lock:
task = lc.started.pop()
except IndexError:
debug("MONITOR: nothing to do")
return
if time.monotonic() > task.timeout:
async with lc.lock:
task.status = "FAILED W/ TIMEOUT"
lc.finished.append(task)
debug(f"Moving Task {task}: STARTED => FINISHED (FAILED ON TIMEOUT)")
elif mock_monitor_task_finished():
async with lc.lock:
lc.finished.append(task)
debug(f"Moving Task {task}: STARTED => FINISHED (COMPLETED AFTER STARTED)")
else:
async with lc.lock:
lc.started.insert(0, task)
debug(f"Task {task}: has not finished yet")
def print_lc_status(lc):
print("\033c", end="")
print(str(lc))
async def worker(lc):
"""Pushes Tasks forward and makes them do something with their lives."""
while True:
complete = await lc.complete
debug(f"Complete? {complete}")
if complete:
break
await bootstrap(lc)
print_lc_status(lc)
await triage(lc)
print_lc_status(lc)
await start(lc)
print_lc_status(lc)
await monitor(lc)
print_lc_status(lc)
if __name__ == "__main__":
NUMBER_OF_TASKS = 40
NUMBER_OF_LIFECYCLE_WORKERS = 4
tasks_info = [
# pylint: disable=C0209
TaskInfo("%03i" % _)
for _ in range(1, NUMBER_OF_TASKS - 1)
]
state_machine = TaskStateMachine(tasks_info)
loop = asyncio.get_event_loop()
workers = [
loop.create_task(worker(state_machine))
for _ in range(NUMBER_OF_LIFECYCLE_WORKERS)
]
loop.run_until_complete(asyncio.gather(*workers))
print("JOB COMPLETED") |
1,798 | test shader include fail | """
Low level tests for OpenGL 3.3 wrappers.
"""
import pytest
from pyglet.math import Mat4
def test_ctx(ctx):
if ctx.gl_api == "gl":
assert ctx.gl_version >= (3, 3)
elif ctx.gl_api == "gles":
assert ctx.gl_version >= (3, 1)
else:
raise ValueError(f"Unsupported api: {ctx.gl_api}")
assert ctx.info.MAX_TEXTURE_SIZE >= 4096
assert ctx.info.MAX_ARRAY_TEXTURE_LAYERS >= 256
assert ctx.blend_func == ctx.BLEND_DEFAULT
ctx.blend_func = ctx.BLEND_PREMULTIPLIED_ALPHA
assert ctx.blend_func == ctx.BLEND_PREMULTIPLIED_ALPHA
def test_viewport(ctx):
vp = 0, 0, 100, 100
ctx.viewport = vp
assert ctx.viewport == vp
def test_projection(window):
ctx = window.ctx
assert ctx.projection_2d == (0, window.width, 0, window.height)
ctx.projection_2d = (1, 10, 2, 11)
assert ctx.projection_2d == (1, 10, 2, 11)
# Attempt to assign illegal values
with pytest.raises(ValueError):
ctx.projection_2d = "moo"
with pytest.raises(ValueError):
ctx.projection_2d = 1, 2, 3, 4, 5
# Set matrices directly checking projection
# parameter reconstruction
ctx.projection_2d_matrix = Mat4.orthogonal_projection(0, 100, 0, 200, -100, 100)
assert ctx.projection_2d == (0, 100, 0, 200)
ctx.projection_2d_matrix = Mat4.orthogonal_projection(100, 200, 200, 400, -100, 100)
assert ctx.projection_2d == (100, 200, 200, 400)
ctx.projection_2d_matrix = Mat4.orthogonal_projection(200, 800, 300, 900, -100, 100)
assert ctx.projection_2d == (200, 800, 300, 900)
def test_projection_matrix(window):
"""Test setting projection matrix directly"""
window.ctx.projection_2d_matrix = Mat4()
with pytest.raises(ValueError):
window.ctx.projection_2d_matrix = "moo"
def test_point_size(ctx):
"""Attempt to set point size"""
assert ctx.point_size == 1.0
ctx.point_size = 2.0
assert ctx.point_size == 2.0
def test_primitive_restart(ctx):
"""Get or set primitive restart"""
assert ctx.primitive_restart_index == -1
ctx.primitive_restart_index = -2
assert ctx.primitive_restart_index == -2
def test_enable_disable(ctx):
"""Try enable and disable states manually"""
assert ctx.is_enabled(ctx.BLEND)
ctx.enable_only()
assert len(ctx._flags) == 0
ctx.enable(ctx.BLEND)
ctx.enable(ctx.BLEND, ctx.DEPTH_TEST, ctx.CULL_FACE)
assert ctx.is_enabled(ctx.BLEND)
assert ctx.is_enabled(ctx.DEPTH_TEST)
assert ctx.is_enabled(ctx.CULL_FACE)
ctx.disable(ctx.BLEND)
assert ctx.is_enabled(ctx.BLEND) is False
assert len(ctx._flags) == 2
ctx.enable_only(ctx.BLEND, ctx.CULL_FACE, ctx.DEPTH_TEST, ctx.PROGRAM_POINT_SIZE)
def test_enabled(ctx):
"""Enabled only context manager"""
assert ctx.is_enabled(ctx.BLEND)
assert not ctx.is_enabled(ctx.DEPTH_TEST)
with ctx.enabled(ctx.DEPTH_TEST):
assert ctx.is_enabled(ctx.BLEND)
assert ctx.is_enabled(ctx.DEPTH_TEST)
assert ctx.is_enabled(ctx.BLEND)
assert not ctx.is_enabled(ctx.DEPTH_TEST)
def test_enabled_only(ctx):
"""Enabled only context manager"""
assert ctx.is_enabled(ctx.BLEND)
with ctx.enabled_only(ctx.DEPTH_TEST):
assert not ctx.is_enabled(ctx.BLEND)
assert ctx.is_enabled(ctx.DEPTH_TEST)
assert ctx.is_enabled(ctx.BLEND)
assert not ctx.is_enabled(ctx.DEPTH_TEST)
def test_load_texture(ctx):
# Default flipped and read value of corner pixel
texture = ctx.load_texture(":resources:images/test_textures/test_texture.png", build_mipmaps=True)
assert texture.read()[:4] == b'\x00\x00\xff\xff' # Blue
# Don't flip the texture
texture = ctx.load_texture(":resources:images/test_textures/test_texture.png", flip=False, build_mipmaps=True)
assert texture.read()[:4] == b'\xff\x00\x00\xff' # Red
def test_shader_include(ctx):
"""Test shader include directive"""
# Without quotes
src = """
#version 330
#include :resources:shaders/lib/sprite.glsl
"""
assert len(ctx.shader_inc(src)) > len(src)
# With quotes
src = """
#version 330
#include ":resources:shaders/lib/sprite.glsl"
"""
assert len(ctx.shader_inc(src)) > len(src)
def METHOD_NAME(ctx):
"""Test shader include directive"""
src = """
#version 330
#include "test_shader_include.vert"
"""
with pytest.raises(FileNotFoundError):
ctx.shader_inc(src)
def test_front_face(ctx):
"""Test front face"""
# Default
assert ctx.front_face == "ccw"
# Set valid values
ctx.front_face = "cw"
assert ctx.front_face == "cw"
ctx.front_face = "ccw"
assert ctx.front_face == "ccw"
# Set invalid value
with pytest.raises(ValueError):
ctx.front_face = "moo"
def test_cull_face(ctx):
assert ctx.cull_face == "back"
# Set valid values
ctx.cull_face = "front"
assert ctx.cull_face == "front"
ctx.cull_face = "back"
assert ctx.cull_face == "back"
ctx.cull_face = "front_and_back"
assert ctx.cull_face == "front_and_back"
# Set invalid value
with pytest.raises(ValueError):
ctx.cull_face = "moo" |
1,799 | act | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Teachers that wrap around other teachers, for instance, to modify message fields while
keeping the examples/episodes the same.
This is useful when working with agents that expect examples to be in a certain format,
for instance a classifier that classifies the "text" field of a message. The meta-
teachers in this module can be used to avoid writing several different nearly identical
variants of different teachers: for instance, if you want to flatten examples and strip
away all but the previous utterance in the 'text' field for several different teachers,
it would be much easier to do so with one teacher in this module than with a brand new
teacher for each of the original teachers.
"""
from typing import Optional
from parlai.core.params import ParlaiParser
import copy
from abc import ABC
from parlai.core.agents import create_agent_from_shared
from parlai.core.message import Message
from parlai.core.opt import Opt
from parlai.core.teachers import (
create_task_agent_from_taskname,
FixedDialogTeacher,
Teacher,
)
from parlai.utils.misc import warn_once
class AbstractWrapperTeacher(Teacher, ABC):
"""
Abstract teacher that wraps around another teacher.
This teacher allows for manipulating the fields returned by the inner teacher, in
the abstract self._edit_action() method that is called during self.act(). The inner
teacher must subclass FixedDialogTeacher in order to make use of that teacher's
.get_orig_action() and .process_action() methods.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
agent = parser.add_argument_group('AbstractWrapper args')
agent.add_argument(
'-wt',
'--wrapper-task',
type=str,
help='The task whose fields will be manipulated.',
)
try:
parser.add_task_args(partial_opt['wrapper_task'], partial_opt)
except KeyError:
warn_once(
'The task name cannot be parsed from command-line arguments! '
'Task-specific flags will not be added.'
)
return parser
def __init__(self, opt: Opt, shared=None):
if ',' in opt['task']:
raise ValueError(
'AbstractWrapperTeacher cannot be used with multiple tasks!'
)
self.id = opt['task']
self.opt = opt
if shared:
self.task = create_agent_from_shared(shared['task'])
else:
opt_singletask = copy.deepcopy(opt)
opt_singletask['task'] = opt['wrapper_task']
self.task = create_task_agent_from_taskname(opt_singletask)[0]
assert isinstance(self.task, FixedDialogTeacher)
def METHOD_NAME(self):
"""
Act on the previous observation.
Normally, the inner teacher would call .get_orig_action() and .process_action();
here, we insert an ._edit_action() method in between these two methods in order
to allow for arbitrary manipulation of the action before it is registered and
processed further by the inner teacher.
"""
orig_action = self.task.get_orig_action()
edited_action = self._edit_action(orig_action)
processed_action = self.task.process_action(edited_action)
return processed_action
def _edit_action(self, METHOD_NAME: Message) -> Message:
"""
Edit and return the input action.
The input action typically comes from the inner teacher's .get_orig_action()
method.
"""
raise NotImplementedError(
'Abstract class: user must implement the _edit_action() method'
)
def num_examples(self):
"""
Return the number of examples.
"""
return self.task.num_examples()
def num_episodes(self):
"""
Return the number of episodes.
Because the dataset is flattened, there will be one episode per example.
"""
return self.task.num_examples()
def observe(self, observation):
"""
Make an observation.
"""
return self.task.observe(observation)
def epoch_done(self):
"""
Return whether the subtask is completed.
"""
return self.task.epoch_done()
def report(self):
"""
Report metrics for the subtask.
"""
return self.task.report()
def reset(self):
"""
Reset the subtask.
"""
self.task.reset()
def reset_metrics(self):
"""
Reset metrics for the subtask.
"""
self.task.reset_metrics()
def save(self):
"""
Save the subtask.
"""
self.task.save()
def share(self):
"""
Share the subtask.
"""
shared = {}
shared['class'] = type(self)
shared['opt'] = self.opt
shared['task'] = self.task.share()
return shared
class LabelToTextTeacher(AbstractWrapperTeacher):
"""
Teacher that will shift message['labels'][0] into message['text'] for whatever task
is specified with --wrapper-task.
Because the dialogue history is effectively overwritten by this action, all episodes
will be flattened into one example each.
"""
def __init__(self, opt: Opt, shared=None):
super().__init__(opt, shared)
def _edit_action(self, METHOD_NAME: Message) -> Message:
"""
Edit the fields of the action manually.
"""
if 'labels' in METHOD_NAME:
labels = METHOD_NAME['labels']
if len(labels) != 1:
raise ValueError(
f'{type(self).__name__} can only be used with one label!'
)
METHOD_NAME.force_set('text', labels[0])
METHOD_NAME.force_set('labels', [''])
else:
assert 'text' not in METHOD_NAME and METHOD_NAME['episode_done'] is True
METHOD_NAME.force_set('episode_done', True) # Clear the dialogue history
return METHOD_NAME |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.