id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
1,800 | superscript | #################################################################################
# FOQUS Copyright (c) 2012 - 2023, by the software owners: Oak Ridge Institute
# for Science and Education (ORISE), TRIAD National Security, LLC., Lawrence
# Livermore National Security, LLC., The Regents of the University of
# California, through Lawrence Berkeley National Laboratory, Battelle Memorial
# Institute, Pacific Northwest Division through Pacific Northwest National
# Laboratory, Carnegie Mellon University, West Virginia University, Boston
# University, the Trustees of Princeton University, The University of Texas at
# Austin, URS Energy & Construction, Inc., et al. All rights reserved.
#
# Please see the file LICENSE.md for full copyright and license information,
# respectively. This file is also available online at the URL
# "https://github.com/CCSI-Toolset/FOQUS".
#################################################################################
import os
from PyQt5 import QtCore, uic, QtGui
from PyQt5 import uic
mypath = os.path.dirname(__file__)
_sessionDescriptionDialogUI, _sessionDescriptionDialog = uic.loadUiType(
os.path.join(mypath, "sessionDescriptionEdit_UI.ui")
)
class sessionDescriptionDialog(_sessionDescriptionDialog, _sessionDescriptionDialogUI):
def __init__(self, parent=None, text=""):
super(sessionDescriptionDialog, self).__init__(parent=parent)
self.setupUi(self)
self.textEdit.setHtml(text)
self.underlineButton.clicked.connect(self.underline)
self.overlineButton.clicked.connect(self.overline)
self.boldButton.clicked.connect(self.bold)
self.superscriptButton.clicked.connect(self.METHOD_NAME)
self.subscriptButton.clicked.connect(self.subscript)
self.fontButton.clicked.connect(self.font)
self.colorButton.clicked.connect(self.color)
self.textEdit.currentCharFormatChanged.connect(self.getFormat)
self.getFormat()
def getFormat(self):
self.format = self.textEdit.currentCharFormat()
self.underlineButton.setChecked(self.format.fontUnderline())
self.overlineButton.setChecked(self.format.fontOverline())
if self.format.fontWeight() == QtGui.QFont.Bold:
self.boldButton.setChecked(True)
else:
self.boldButton.setChecked(False)
if self.format.verticalAlignment() == QtGui.QTextCharFormat.AlignSuperScript:
self.superscriptButton.setChecked(True)
self.subscriptButton.setChecked(False)
elif self.format.verticalAlignment() == QtGui.QTextCharFormat.AlignSubScript:
self.superscriptButton.setChecked(False)
self.subscriptButton.setChecked(True)
else:
self.superscriptButton.setChecked(False)
self.subscriptButton.setChecked(False)
def color(self):
color = QtGui.QColorDialog.getColor(self.textEdit.textColor(), self)
self.textEdit.setTextColor(color)
self.textEdit.setFocus()
def font(self):
font, ok = QtGui.QFontDialog.getFont(self.format.font(), self)
if ok:
self.format.setFont(font)
self.textEdit.setCurrentCharFormat(self.format)
self.textEdit.setFocus()
def METHOD_NAME(self):
if self.superscriptButton.isChecked():
self.subscriptButton.setChecked(False)
self.format.setVerticalAlignment(QtGui.QTextCharFormat.AlignSuperScript)
else:
self.format.setVerticalAlignment(QtGui.QTextCharFormat.AlignNormal)
self.textEdit.setCurrentCharFormat(self.format)
self.textEdit.setFocus()
def subscript(self):
if self.subscriptButton.isChecked():
self.superscriptButton.setChecked(False)
self.format.setVerticalAlignment(QtGui.QTextCharFormat.AlignSubScript)
else:
self.format.setVerticalAlignment(QtGui.QTextCharFormat.AlignNormal)
self.textEdit.setCurrentCharFormat(self.format)
self.textEdit.setFocus()
def bold(self):
if self.boldButton.isChecked():
self.format.setFontWeight(QtGui.QFont.Bold)
else:
self.format.setFontWeight(QtGui.QFont.Normal)
self.textEdit.setCurrentCharFormat(self.format)
self.textEdit.setFocus()
def underline(self):
self.format.setFontUnderline(self.underlineButton.isChecked())
self.textEdit.setCurrentCharFormat(self.format)
self.textEdit.setFocus()
def overline(self):
self.format.setFontOverline(self.overlineButton.isChecked())
self.textEdit.setCurrentCharFormat(self.format)
self.textEdit.setFocus()
def reject(self):
self.done(QtGui.QDialog.Rejected)
def accept(self):
self.done(QtGui.QDialog.Accepted)
def html(self):
return self.textEdit.toHtml() |
1,801 | test common neighbors index | #!/usr/bin/env python3
import unittest
import os
import networkit as nk
class TestLinkprediction(unittest.TestCase):
def setUp(self):
self.L = nk.readGraph("input/looptest1.gml", nk.Format.GML) #without self-loops
def testAdamicAdarIndex(self):
AI = nk.linkprediction.AdamicAdarIndex()
AI.setGraph(self.L)
self.assertEqual(AI.run(0,7), 0.0)
def testAdjustedRandIndex(self):
ARI = nk.linkprediction.AdjustedRandIndex()
ARI.setGraph(self.L)
self.assertAlmostEqual(ARI.run(0,7), -0.44, delta=0.1)
def testAlgebraicDistanceIndex(self):
ADI = nk.linkprediction.AlgebraicDistanceIndex(self.L, 5, 10)
ADI.setGraph(self.L)
ADI.preprocess()
self.assertAlmostEqual(ADI.run(0,7), 0.31, delta=0.25)
def METHOD_NAME(self):
CNI = nk.linkprediction.CommonNeighborsIndex()
CNI.setGraph(self.L)
self.assertEqual(CNI.run(0,7), 0.0)
self.assertEqual(len(CNI.runAll()), 57)
res = CNI.runOn([(0,1), (2,3), (3,7)])
expectedRes = [((0, 1), 0.0), ((2, 3), 2.0), ((3, 7), 0.0)]
for i in range(3):
self.assertAlmostEqual(res[i][1], expectedRes[i][1], delta = 0.1)
def testJaccardIndex(self):
JI = nk.linkprediction.JaccardIndex()
JI.setGraph(self.L)
self.assertEqual(JI.run(0,4), 0.25)
def testKatzIndex(self):
KI = nk.linkprediction.KatzIndex()
KI.setGraph(self.L)
self.assertAlmostEqual(KI.run(0,7), 6.3125e-10, 2)
self.assertEqual(len(KI.runAll()), 57)
res = KI.runOn([(0,1), (2,3), (3,7)])
expectedRes = [((0, 1), 0.005), ((2, 3), 0.005), ((3, 7), 1.262e-07)]
for i in range(3):
self.assertAlmostEqual(res[i][1], expectedRes[i][1], delta = 0.1)
def testLinkThresholder(self):
LTH = nk.linkprediction.LinkThresholder()
pred = [((0,1), 0.25), ((2,3), 2.5), ((4,7), 3.0)]
self.assertListEqual(LTH.byScore(pred, 0.3), [(2,3), (4,7)])
self.assertListEqual(LTH.byCount(pred, 2), [(2,3), (4,7)])
def testMissingLinksFinder(self):
MLF = nk.linkprediction.MissingLinksFinder(self.L)
self.assertListEqual(MLF.findAtDistance(3),[(0, 6), (1, 5), (1, 7), (2, 5), (2, 7), (3, 5), (3, 7), (4, 8)])
self.assertListEqual(MLF.findFromNode(0, 2), [(0, 2), (0, 3), (0, 4)])
def testNeighborhoodDistanceIndex(self):
NDI = nk.linkprediction.NeighborhoodDistanceIndex()
NDI.setGraph(self.L)
self.assertEqual(NDI.run(0,7), 0.0)
def testNeighborhoodUtility(self):
NHU = nk.linkprediction.NeighborhoodUtility()
self.assertListEqual(NHU.getNeighborsUnion(self.L, 0, 5), [1,6,7])
self.assertListEqual(NHU.getCommonNeighbors(self.L, 0, 3), [1])
def testNeighborsMeasureIndex(self):
NMI = nk.linkprediction.NeighborsMeasureIndex()
NMI.setGraph(self.L)
self.assertEqual(NMI.run(0,7), 0.0)
def testPredictionSorter(self):
PS = nk.linkprediction.PredictionsSorter()
pred = [((0,1), 0.25), ((2,3), 12.5), ((4,7), 3.0)]
PS.sortByScore(pred)
self.assertListEqual(pred, [((2,3), 12.5), ((4,7), 3.0), ((0,1), 0.25)])
PS.sortByNodePair(pred)
self.assertListEqual(pred, [((0,1), 0.25), ((2,3), 12.5), ((4,7), 3.0)])
def testPreferentialAttachmentIndex(self):
PAI = nk.linkprediction.PreferentialAttachmentIndex()
PAI.setGraph(self.L)
self.assertEqual(PAI.run(0,7), 3.0)
def testPrecisionRecallMetric(self):
PRM = nk.linkprediction.PrecisionRecallMetric(self.L)
PRM.setTestGraph(self.L)
pred = [((0,1),5), ((2,3), 2.5), ((4,7), 3)]
self.assertListEqual(PRM.getCurve(pred)[0], [0.0, 0.5, 1.0])
self.assertEqual(PRM.getCurve(pred)[1][0], 1.0)
self.assertEqual(PRM.getCurve(pred)[1][1], 0.5)
self.assertAlmostEqual(PRM.getCurve(pred)[1][2], 0.667, 3)
self.assertAlmostEqual(PRM.getAreaUnderCurve(), 0.667 , 3)
def testRandomLinkSampler(self):
RLS = nk.linkprediction.RandomLinkSampler(self.L, 50)
resGraph = (RLS.byCount(self.L, 3))
self.assertEqual(resGraph.numberOfEdges(), 3)
def testROCMetric(self):
RM = nk.linkprediction.ROCMetric(self.L)
RM.setTestGraph(self.L)
pred = [((0,1),5), ((2,3), 2.5), ((4,7), 3)]
self.assertTupleEqual(RM.getCurve(pred), ([0.0, 1.0], [0.5, 1.0]))
self.assertEqual(RM.getAreaUnderCurve(), 0.75)
def testResourceAllocationIndex(self):
I = nk.linkprediction.ResourceAllocationIndex()
I.setGraph(self.L)
self.assertEqual(I.run(0,4), 0.25)
def testSameCommunityIndex(self):
SCI = nk.linkprediction.SameCommunityIndex()
SCI.setGraph(self.L)
self.assertEqual(SCI.run(0,4), 1.0)
def testTotalNeighborsIndex(self):
TNI = nk.linkprediction.TotalNeighborsIndex()
TNI.setGraph(self.L)
self.assertEqual(TNI.run(0,7), 4.0)
def testUDegreeIndex(self):
UI = nk.linkprediction.UDegreeIndex()
UI.setGraph(self.L)
self.assertEqual(UI.run(0,7), 1.0)
def testVDegreeIndex(self):
VI = nk.linkprediction.VDegreeIndex()
VI.setGraph(self.L)
self.assertEqual(VI.run(0,7), 3.0)
if __name__ == "__main__":
unittest.main()
|
1,802 | set up | import pickle
import unittest
from unittest import mock
from atorch.auto.engine.client import GlobalAutoAccelerationClient
from atorch.protos import acceleration_pb2
class AutoAccelerationClient(unittest.TestCase):
def METHOD_NAME(self):
self.client = GlobalAutoAccelerationClient.AUTO_ACC_CLIENT
def test_get_analyse_task(self):
method = acceleration_pb2.AnalysisMethod()
method.names.extend(["default", "self-defined"])
task = acceleration_pb2.AutoAccelerationTask(
task_id=0,
task_type="ANALYSE",
process_mode="ONE_PROCESS",
analysis_method=method,
time_limit=30,
)
self.client._stub.get_task = mock.MagicMock(return_value=task)
(
task_id,
task_type,
process_mode,
time_limit,
task_info,
) = self.client.get_task()
self.assertEqual(task_id, 0)
self.assertEqual(task_type, "ANALYSE")
self.assertEqual(process_mode, "ONE_PROCESS")
self.assertEqual(time_limit, 30)
self.assertEqual(task_info, ["default", "self-defined"])
def test_get_parallel_task(self):
parallel_group_info = {
"model_parallel_size": 3,
"model_parallel_group": [[0, 1, 2], [3, 4, 5]],
}
task = acceleration_pb2.AutoAccelerationTask(
task_id=1,
task_type="SETUP_PARALLEL_GROUP",
process_mode="ALL_PROCESS",
parallel_group_info=pickle.dumps(parallel_group_info),
time_limit=5,
)
self.client._stub.get_task = mock.MagicMock(return_value=task)
(
task_id,
task_type,
process_mode,
time_limit,
task_info,
) = self.client.get_task()
self.assertEqual(task_id, 1)
self.assertEqual(task_type, "SETUP_PARALLEL_GROUP")
self.assertEqual(process_mode, "ALL_PROCESS")
self.assertEqual(time_limit, 5)
self.assertEqual(parallel_group_info, pickle.loads(task_info))
def test_get_strategy_task(self):
parallel_group_info = {
"model_parallel_size": 3,
"model_parallel_group": [[0, 1, 2], [3, 4, 5]],
}
methods = [
("1D", parallel_group_info, True),
("2D", parallel_group_info, False),
]
opt_method0 = acceleration_pb2.OptimizationMethod(
name=methods[0][0],
config=pickle.dumps(methods[0][1]),
tunable=methods[0][2],
)
opt_method1 = acceleration_pb2.OptimizationMethod(
name=methods[1][0],
config=pickle.dumps(methods[1][1]),
tunable=methods[1][2],
)
strategy = acceleration_pb2.Strategy()
strategy.opt.extend([opt_method0, opt_method1])
task = acceleration_pb2.AutoAccelerationTask(
task_id=2,
task_type="TUNE",
process_mode="ONE_MODEL_PARALLEL_GROUP",
strategy=strategy,
time_limit=600,
)
self.client._stub.get_task = mock.MagicMock(return_value=task)
(
task_id,
task_type,
process_mode,
time_limit,
task_info,
) = self.client.get_task()
self.assertEqual(task_id, 2)
self.assertEqual(task_type, "TUNE")
self.assertEqual(process_mode, "ONE_MODEL_PARALLEL_GROUP")
self.assertEqual(time_limit, 600)
for method, info in zip(methods, task_info):
self.assertEqual(method[0], info[0])
self.assertEqual(method[1], pickle.loads(info[1]))
self.assertEqual(method[2], info[2])
def test_report_more_than_one_result(self):
self.client._stub.report_task_result = mock.MagicMock(return_value=None)
self.client.report_task_result(0, "TUNE", True, [("method", pickle.dumps({}), False)])
self.client.report_task_result(0, "DRYRUN", True, pickle.dumps({}))
self.client.report_task_result(0, "ANALYSE", True, pickle.dumps({})) |
1,803 | decode | import time
import json
from abc import ABC, abstractmethod
from typing import Callable, Any, Dict, List, Optional
from helm.common.hierarchical_logger import hlog
from helm.common.request import Request, RequestResult, Sequence, Token
from helm.common.tokenization_request import (
TokenizationRequest,
TokenizationRequestResult,
DecodeRequest,
DecodeRequestResult,
)
class Client(ABC):
@staticmethod
def make_cache_key(raw_request: Dict, request: Request) -> Dict:
"""
Construct the key for the cache using the raw request.
Add `request.random` to the key, if defined.
"""
if request.random is not None:
assert "random" not in raw_request
cache_key = {**raw_request, "random": request.random}
else:
cache_key = raw_request
return cache_key
@abstractmethod
def make_request(self, request: Request) -> RequestResult:
pass
@abstractmethod
def tokenize(self, request: TokenizationRequest) -> TokenizationRequestResult:
pass
@abstractmethod
def METHOD_NAME(self, request: DecodeRequest) -> DecodeRequestResult:
pass
def wrap_request_time(compute: Callable[[], Any]) -> Callable[[], Any]:
"""Return a version of `compute` that puts `request_time` into its output."""
def wrapped_compute():
start_time = time.time()
response = compute()
end_time = time.time()
response["request_time"] = end_time - start_time
response["request_datetime"] = int(start_time)
return response
return wrapped_compute
def truncate_sequence(sequence: Sequence, request: Request, print_warning: bool = True) -> Sequence:
"""
Certain providers have bugs where they aren't respecting max_tokens,
stop_sequences and the end of text token, so as a hack, we have to manually
truncate the suffix of `sequence` and `tokens` as a post-hoc process.
"""
# TODO: if echo_prompt, then we should only ignore the prompt, but we don't
# know how many tokens the prompt takes up.
# In the benchmark, usually echo_prompt is only used for language modeling,
# where max_tokens = 0, so there's nothing to truncate.
if request.echo_prompt:
if request.max_tokens != 0:
hlog("WARNING: don't know how to handle echo_prompt and max_tokens > 0, not truncating")
return sequence
for stop in request.stop_sequences:
# Find `stop` in the text
try:
new_text = sequence.text[: sequence.text.index(stop)]
except ValueError:
# The stop sequence doesn't exist, but it might exist in the list of tokens.
new_text = sequence.text
# Strip `stop` off the tokens
new_tokens: List[Token] = []
# Need to start
for token in sequence.tokens:
# Note: we can only strip at token boundaries
if token.text.startswith(stop):
break
new_tokens.append(token)
if len(new_text) < len(sequence.text) and len(new_tokens) == len(sequence.tokens):
hlog(
f"WARNING: Stripped characters from text ({len(sequence.text)} -> {len(new_text)}), "
f"but wasn't able to strip the tokens"
)
# Recompute log probability
new_logprob = sum(token.logprob for token in new_tokens)
if print_warning:
hlog(f"WARNING: truncate_sequence needs to strip {json.dumps(stop)}")
sequence = Sequence(text=new_text, logprob=new_logprob, tokens=new_tokens)
# Truncate based on the max number of tokens.
if len(sequence.tokens) > request.max_tokens:
if print_warning:
hlog(f"WARNING: truncate_sequence needs to truncate {len(sequence.tokens)} down to {request.max_tokens}")
new_tokens = sequence.tokens[: request.max_tokens]
# This is imperfect stitching together of tokens, so just to make sure this is okay
# TODO: should use the proper detokenizer since T5-style models.
# Usually, in our benchmark, max_tokens is active when it's 1, so hopefully this isn't an issue.
new_text = "".join(token.text for token in new_tokens)
if not sequence.text.startswith(new_text):
hlog(f"WARNING: {json.dumps(sequence.text)} does not start with truncated text {json.dumps(new_text)}")
new_logprob = sum(token.logprob for token in new_tokens)
sequence = Sequence(text=new_text, logprob=new_logprob, tokens=new_tokens)
return sequence
def cleanup_str(token: str, tokenizer_name: Optional[str] = None) -> str:
"""
Certain tokenizers introduce special characters to represent spaces, such as
"Ġ" or "▁". This function removes those characters.
"""
if tokenizer_name in [
"TsinghuaKEG/ice",
"bigscience/T0pp",
"google/t5-11b",
"google/flan-t5-xxl",
"google/ul2",
"Yandex/yalm",
"ai21/j1",
"together",
]:
return token.replace("▁", " ")
elif tokenizer_name is not None and tokenizer_name.startswith("huggingface"):
return token.replace("Ġ", " ")
return token
def cleanup_tokens(tokens: List[str], tokenizer_name: Optional[str] = None) -> List[str]:
"""
Applies `cleanup_str` to each token in `tokens`.
"""
return [cleanup_str(token, tokenizer_name) for token in tokens] |
1,804 | assert count | #
# SPDX-License-Identifier: MIT
#
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import bitbake
class BitBakeLogging(OESelftestTestCase):
def METHOD_NAME(self, item, entry, count):
self.assertEqual(item.count(entry), count, msg="Output:\n'''\n%s\n'''\ndoesn't contain %d copies of:\n'''\n%s\n'''\n" % (item, count, entry))
def test_shell_logging(self):
# no logs, no verbose
self.write_config('BBINCLUDELOGS = ""')
result = bitbake("logging-test -c shelltest -f", ignore_status = True)
self.assertIn("ERROR: Logfile of failure stored in:", result.output)
self.assertNotIn("This is shell stdout", result.output)
self.assertNotIn("This is shell stderr", result.output)
# logs, no verbose
self.write_config('BBINCLUDELOGS = "yes"')
result = bitbake("logging-test -c shelltest -f", ignore_status = True)
self.assertIn("ERROR: Logfile of failure stored in:", result.output)
self.METHOD_NAME(result.output, "This is shell stdout", 1)
self.METHOD_NAME(result.output, "This is shell stderr", 1)
# no logs, verbose
self.write_config('BBINCLUDELOGS = ""')
result = bitbake("logging-test -c shelltest -f -v", ignore_status = True)
self.assertIn("ERROR: Logfile of failure stored in:", result.output)
# two copies due to set +x
self.METHOD_NAME(result.output, "This is shell stdout", 2)
self.METHOD_NAME(result.output, "This is shell stderr", 2)
# logs, verbose
self.write_config('BBINCLUDELOGS = "yes"')
result = bitbake("logging-test -c shelltest -f -v", ignore_status = True)
self.assertIn("ERROR: Logfile of failure stored in:", result.output)
# two copies due to set +x
self.METHOD_NAME(result.output, "This is shell stdout", 2)
self.METHOD_NAME(result.output, "This is shell stderr", 2)
def test_python_exit_logging(self):
# no logs, no verbose
self.write_config('BBINCLUDELOGS = ""')
result = bitbake("logging-test -c pythontest_exit -f", ignore_status = True)
self.assertIn("ERROR: Logfile of failure stored in:", result.output)
self.assertNotIn("This is python stdout", result.output)
# logs, no verbose
self.write_config('BBINCLUDELOGS = "yes"')
result = bitbake("logging-test -c pythontest_exit -f", ignore_status = True)
self.assertIn("ERROR: Logfile of failure stored in:", result.output)
# A sys.exit() should include the output
self.METHOD_NAME(result.output, "This is python stdout", 1)
# no logs, verbose
self.write_config('BBINCLUDELOGS = ""')
result = bitbake("logging-test -c pythontest_exit -f -v", ignore_status = True)
self.assertIn("ERROR: Logfile of failure stored in:", result.output)
# python tasks don't log output with -v currently
#self.assertCount(result.output, "This is python stdout", 1)
# logs, verbose
self.write_config('BBINCLUDELOGS = "yes"')
result = bitbake("logging-test -c pythontest_exit -f -v", ignore_status = True)
self.assertIn("ERROR: Logfile of failure stored in:", result.output)
# python tasks don't log output with -v currently
#self.assertCount(result.output, "This is python stdout", 1)
def test_python_fatal_logging(self):
# no logs, no verbose
self.write_config('BBINCLUDELOGS = ""')
result = bitbake("logging-test -c pythontest_fatal -f", ignore_status = True)
self.assertIn("ERROR: Logfile of failure stored in:", result.output)
self.assertNotIn("This is python fatal test stdout", result.output)
self.METHOD_NAME(result.output, "This is a fatal error", 1)
# logs, no verbose
self.write_config('BBINCLUDELOGS = "yes"')
result = bitbake("logging-test -c pythontest_fatal -f", ignore_status = True)
self.assertIn("ERROR: Logfile of failure stored in:", result.output)
# A bb.fatal() should not include the output
self.assertNotIn("This is python fatal test stdout", result.output)
self.METHOD_NAME(result.output, "This is a fatal error", 1)
# no logs, verbose
self.write_config('BBINCLUDELOGS = ""')
result = bitbake("logging-test -c pythontest_fatal -f -v", ignore_status = True)
self.assertIn("ERROR: Logfile of failure stored in:", result.output)
# python tasks don't log output with -v currently
#self.assertCount(result.output, "This is python fatal test stdout", 1)
self.METHOD_NAME(result.output, "This is a fatal error", 1)
# logs, verbose
self.write_config('BBINCLUDELOGS = "yes"')
result = bitbake("logging-test -c pythontest_fatal -f -v", ignore_status = True)
self.assertIn("ERROR: Logfile of failure stored in:", result.output)
# python tasks don't log output with -v currently
#self.assertCount(result.output, "This is python fatal test stdout", 1)
self.METHOD_NAME(result.output, "This is a fatal error", 1)
|
1,805 | stop pages | import discord
from redbot.core.bank import get_currency_name
from redbot.vendored.discord.ext import menus
class BadgeMenu(menus.MenuPages, inherit_buttons=False):
def __init__(
self,
source: menus.PageSource,
timeout: int = 30,
can_buy=False,
):
super().__init__(
source,
timeout=timeout,
clear_reactions_after=True,
delete_message_after=True,
)
self.can_buy = can_buy
async def start(self, ctx, *, channel=None, wait=False):
if self.can_buy:
self.can_buy = await ctx.cog.buy_badge.can_run(ctx, check_all_parents=True)
await super().start(ctx, channel=channel, wait=wait)
def should_add_reactions(self):
return True
def _no_pages(self):
return not self._source.is_paginating()
def _skip_double_triangle_buttons(self):
return (not self._source.is_paginating()) or super()._skip_double_triangle_buttons()
async def finalize(self, timed_out):
"""|coro|
A coroutine that is called when the menu loop has completed
its run. This is useful if some asynchronous clean-up is
required after the fact.
Parameters
--------------
timed_out: :class:`bool`
Whether the menu completed due to timing out.
"""
if timed_out and self.delete_message_after:
self.delete_message_after = False
def cant_buy_check(self):
return not self.can_buy
@menus.button("\N{BANKNOTE WITH DOLLAR SIGN}", position=menus.First(0), skip_if=cant_buy_check)
async def buy_badge(self, payload):
page = await self.source.get_page(self.current_page)
await self.ctx.invoke(
self.ctx.cog.buy_badge,
is_global=True if page["server_id"] == "global" else False,
name=page["badge_name"],
)
@menus.button(
"\N{BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}\ufe0f",
position=menus.First(0),
skip_if=_skip_double_triangle_buttons,
)
async def go_to_first_page(self, payload):
"""go to the first page"""
await self.show_page(0)
@menus.button(
"\N{BLACK LEFT-POINTING TRIANGLE}\ufe0f", position=menus.First(1), skip_if=_no_pages
)
async def go_to_previous_page(self, payload):
"""go to the previous page"""
if self.current_page == 0:
await self.show_page(self._source.get_max_pages() - 1)
else:
await self.show_checked_page(self.current_page - 1)
@menus.button(
"\N{BLACK RIGHT-POINTING TRIANGLE}\ufe0f", position=menus.Last(0), skip_if=_no_pages
)
async def go_to_next_page(self, payload):
"""go to the next page"""
if self.current_page == self._source.get_max_pages() - 1:
await self.show_page(0)
else:
await self.show_checked_page(self.current_page + 1)
@menus.button(
"\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}\ufe0f",
position=menus.Last(1),
skip_if=_skip_double_triangle_buttons,
)
async def go_to_last_page(self, payload):
"""go to the last page"""
# The call here is safe because it's guarded by skip_if
await self.show_page(self._source.get_max_pages() - 1)
@menus.button("\N{CROSS MARK}", position=menus.First(2))
async def METHOD_NAME(self, payload: discord.RawReactionActionEvent) -> None:
self.stop()
class AvailableBadgePager(menus.ListPageSource):
def __init__(self, entries, server_name, server_id, icon):
super().__init__(entries, per_page=1)
self.server_name = server_name
self.icon = icon
self.server_id = server_id
async def format_page(self, menu: BadgeMenu, page):
em = discord.Embed(
title=page["badge_name"],
description=page["description"],
color=int(page["border_color"][1:], base=16),
)
if page["price"] > 0:
em.add_field(
name="Price", value=f"{page['price']}{await get_currency_name(menu.ctx.guild)}"
)
elif page["price"] == 0:
em.add_field(name="Price", value="Free")
em.set_author(name=self.server_name, icon_url=self.icon)
em.set_thumbnail(url=page["bg_img"])
em.set_footer(text=f"Badge {menu.current_page+1}/{self.get_max_pages()}")
return em
class OwnBadgePager(menus.ListPageSource):
def __init__(self, entries, user: discord.Member):
super().__init__(entries, per_page=1)
self.user = user
async def format_page(self, menu: BadgeMenu, page):
em = discord.Embed(
title=page["badge_name"],
description=page["description"],
color=int(page["border_color"][1:], base=16),
)
em.set_author(name=self.user.display_name, icon_url=self.user.avatar_url)
em.set_thumbnail(url=page["bg_img"])
em.set_footer(
text=f"Server: {page['server_name']} • Badge {menu.current_page+1}/{self.get_max_pages()}"
)
return em |
1,806 | socket clear | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import hci
import socket
import ctypes
import struct
import asyncio
import logging
import subprocess
import sys
import time
import multiprocessing
SOCKET_RECV_BUFFER_SIZE = 425984
SOCKET_RECV_TIMEOUT = 3
def btmgmt_dev_reset(index):
logging.info(f"Selecting index {index}")
proc = subprocess.Popen(['btmgmt', '-i', str(index), 'power', 'off'],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
class BindingError(Exception):
pass
class HCI_User_Channel_Socket_Error(BaseException):
pass
class HCI_User_Channel_Socket():
def __init__(self, device_index=0, device_mode=None,
asyncio_loop=None):
logging.debug(
"Device index: %s, Device address: %s",
device_index,
device_mode)
self.loop = asyncio_loop
self.libc = ctypes.cdll.LoadLibrary('libc.so.6')
self.rx_buffer_q = multiprocessing.Manager().Queue()
self.counter = 0
self.device_index = device_index
self.device_mode = device_mode
self.hci_socket = self.socket_create()
self.socket_bind(self.device_index)
self.METHOD_NAME()
self.listener_proc = None
self.listener_ev = multiprocessing.Manager().Event()
def socket_create(self):
logging.debug("%s", self.socket_create.__name__)
new_socket = socket.socket(socket.AF_BLUETOOTH,
socket.SOCK_RAW | socket.SOCK_NONBLOCK,
socket.BTPROTO_HCI)
if new_socket is None:
raise HCI_User_Channel_Socket_Error("Socket error. \
Opening socket failed")
new_socket.setblocking(False)
socket_size = new_socket.getsockopt(
socket.SOL_SOCKET, socket.SO_RCVBUF)
logging.info(f"Default socket recv buffer size: {socket_size}")
new_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 500000)
socket_size = new_socket.getsockopt(
socket.SOL_SOCKET, socket.SO_RCVBUF)
logging.info(f"Set socket recv buffer size: {socket_size}")
return new_socket
def socket_bind(self, index):
logging.debug("%s index: %s", self.socket_bind.__name__, index)
# addr: struct sockaddr_hci from /usr/include/bluetooth/hci.h
addr = struct.pack(
'HHH',
hci.AF_BLUETOOTH,
index,
hci.HCI_CHANNEL_USER)
retry_binding = 2
for i in range(retry_binding):
try:
bind = self.libc.bind(self.hci_socket.fileno(),
ctypes.cast(addr,
ctypes.POINTER(ctypes.c_ubyte)),
len(addr))
if bind != 0:
raise BindingError
except BindingError:
logging.warning("Binding error. Trying to reset bluetooth.")
btmgmt_dev_reset(self.device_index)
if i < retry_binding - 1:
continue
else:
self.hci_socket.close()
logging.error("Binding error. Check HCI index present.")
sys.exit()
logging.info("Binding done!")
break
def METHOD_NAME(self):
logging.debug("%s", self.METHOD_NAME.__name__)
try:
logging.info("Clearing the buffer...")
time.sleep(1)
cnt = 0
while True:
buff = self.hci_socket.recv(SOCKET_RECV_BUFFER_SIZE)
cnt += len(buff)
logging.debug(f"Read from buffer {cnt} bytes")
except BlockingIOError:
logging.info("Buffer empty and ready!")
return
async def send(self, ba_message):
await self.loop.sock_sendall(self.hci_socket, ba_message)
def socket_listener(self):
recv_at_once = 0
while True:
try:
if self.listener_ev.is_set():
logging.info("listener_ev set")
break
buffer = self.hci_socket.recv(SOCKET_RECV_BUFFER_SIZE)
logging.info(
f"Socket recv: {self.counter} th packet with len: {len(buffer)}")
self.rx_buffer_q.put((buffer, time.perf_counter()))
recv_at_once += 1
self.counter += 1
except BlockingIOError:
if recv_at_once > 1:
logging.info(f"Socket recv in one loop: {recv_at_once}")
recv_at_once = 0
pass
except BrokenPipeError:
logging.info("BrokenPipeError: Closing...")
print("BrokenPipeError. Press Ctrl-C to exit...")
def close(self):
logging.debug("%s ", self.close.__name__)
return self.hci_socket.close()
def start(self):
self.listener_proc = multiprocessing.Process(
target=self.socket_listener, daemon=True)
self.listener_proc.start()
logging.info(f"start listener_proc pid: {self.listener_proc.pid}")
def stop(self):
logging.info(f"stop listener_proc pid: {self.listener_proc.pid}")
self.listener_ev.set()
self.listener_proc.join()
self.close() |
1,807 | model agnostic extract | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Public API for extracting FeaturesPredictionsLabels without eval model."""
import copy
import datetime
from typing import Generator, List, Optional
import apache_beam as beam
from apache_beam.utils import shared
import tensorflow as tf
from tensorflow_model_analysis import constants
from tensorflow_model_analysis.api import types
from tensorflow_model_analysis.eval_metrics_graph import eval_metrics_graph
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.model_agnostic_eval import model_agnostic_predict as agnostic_predict
def ModelAgnosticGetFPLFeedConfig(
model_agnostic_config: agnostic_predict.ModelAgnosticConfig
) -> eval_metrics_graph.FPLFeedConfig:
"""Creates an FPLFeedConfig from the input ModelAgnosticConfig.
Creates the placeholder ops based on the input ModelAgnosticConfig. The
feature_spec is used to determine Tensor vs SparseTensor and dtype for the
placeholder op.
Args:
model_agnostic_config: The config to use to generate the placeholder ops.
Returns:
An eval_metrics_graph.FPLFeedConfig which can be used to instantiate the
infeed on a metric graph.
Raises:
ValueError: Supplied ModelAgnosticConfig is invalid.
"""
features = {}
predictions = {}
labels = {}
for key, value in model_agnostic_config.feature_spec.items():
placeholder = None
if isinstance(value, tf.io.FixedLenFeature):
placeholder = (constants.PLACEHOLDER, value.dtype)
elif isinstance(value, tf.io.VarLenFeature):
placeholder = (constants.SPARSE_PLACEHOLDER, value.dtype)
else:
raise ValueError('Unsupported type %s in feature_spec.' % value)
if key in model_agnostic_config.prediction_keys:
predictions[key] = placeholder
elif key in model_agnostic_config.label_keys:
labels[key] = placeholder
else:
features[key] = placeholder
return eval_metrics_graph.FPLFeedConfig(
features=features, predictions=predictions, labels=labels)
# pylint: disable=no-value-for-parameter
def ModelAgnosticExtractor(
model_agnostic_config: agnostic_predict.ModelAgnosticConfig,
desired_batch_size: Optional[int] = None) -> extractor.Extractor:
"""Creates an Extractor for ModelAgnosticEval.
The extractor's PTransform creates and runs ModelAgnosticEval against every
example yielding a copy of the Extracts input with an additional extract of
type FeaturesPredictionsLabels keyed by tfma.FEATURES_PREDICTIONS_LABELS_KEY.
Args:
model_agnostic_config: The config to use to be able to generate Features,
Predictions, and Labels dict. This can be done through explicit labeling
of keys in the input tf.Example.
desired_batch_size: Optional batch size for batching in Predict.
Returns:
Extractor for extracting features, predictions, and labels during predict.
Raises:
ValueError: Supplied ModelAgnosticConfig is invalid.
"""
return extractor.Extractor(
stage_name='ModelAgnosticExtractor',
ptransform=METHOD_NAME(
model_agnostic_config=model_agnostic_config,
desired_batch_size=desired_batch_size))
@beam.typehints.with_input_types(beam.typehints.List[types.Extracts])
@beam.typehints.with_output_types(types.Extracts)
class _ModelAgnosticExtractDoFn(beam.DoFn):
"""A DoFn that extracts the FPL from the examples."""
def __init__(self, model_agnostic_config: agnostic_predict.ModelAgnosticConfig
) -> None:
self._model_agnostic_config = model_agnostic_config
# TODO(b/140805724): It's odd that shared_handle is not passed as an
# argument to the constructor. Logically, it seems to have a 1-1
# correspondence with the model_agnostic_config, so it should be passed with
# it.
self._shared_handle = shared.Shared()
self._model_agnostic_wrapper = None
self._model_load_seconds = None
self._model_load_seconds_distribution = beam.metrics.Metrics.distribution(
constants.METRICS_NAMESPACE, 'model_load_seconds')
def _make_construct_fn( # pylint: disable=invalid-name
self, model_agnostic_config: agnostic_predict.ModelAgnosticConfig):
"""Returns construct func for Shared for constructing ModelAgnosticEval."""
def construct(): # pylint: disable=invalid-name
"""Function for constructing a EvalSavedModel."""
start_time = datetime.datetime.now()
model_agnostic_wrapper = agnostic_predict.ModelAgnosticPredict(
model_agnostic_config)
end_time = datetime.datetime.now()
self._model_load_seconds = int((end_time - start_time).total_seconds())
return model_agnostic_wrapper
return construct
def setup(self):
self._model_agnostic_wrapper = self._shared_handle.acquire(
self._make_construct_fn(self._model_agnostic_config))
def process(self, element: List[types.Extracts]
) -> Generator[types.Extracts, None, None]:
serialized_examples = [x[constants.INPUT_KEY] for x in element]
# Compute FeaturesPredictionsLabels for each serialized_example using
# the constructed model_agnostic_wrapper.
for fpl in self._model_agnostic_wrapper.get_fpls_from_examples(
serialized_examples):
element_copy = copy.copy(element[fpl.input_ref])
element_copy[constants.FEATURES_PREDICTIONS_LABELS_KEY] = fpl
yield element_copy
def finish_bundle(self):
# Must update distribution in finish_bundle instead of setup
# because Beam metrics are not supported in setup.
if self._model_load_seconds is not None:
self._model_load_seconds_distribution.update(self._model_load_seconds)
self._model_load_seconds = None
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(types.Extracts)
def METHOD_NAME( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection,
model_agnostic_config: agnostic_predict.ModelAgnosticConfig,
desired_batch_size: Optional[int] = None) -> beam.pvalue.PCollection:
"""A PTransform that generates features, predictions, labels.
Args:
extracts: PCollection of Extracts containing a serialized example to be fed
to the model.
model_agnostic_config: A config specifying how to extract
FeaturesPredictionsLabels from the input input Extracts.
desired_batch_size: Optional batch size for batching in Aggregate.
Returns:
PCollection of Extracts, where the extracts contains the features,
predictions, labels retrieved.
"""
batch_args = {}
if desired_batch_size:
batch_args = dict(
min_batch_size=desired_batch_size, max_batch_size=desired_batch_size)
return (extracts
| 'Batch' >> beam.BatchElements(**batch_args)
| 'ModelAgnosticExtract' >> beam.ParDo(
_ModelAgnosticExtractDoFn(
model_agnostic_config=model_agnostic_config))) |
1,808 | no suggestion if not bool | """Functional test"""
# pylint: disable=missing-function-docstring, invalid-name
def any_even(items):
"""Return True if the list contains any even numbers"""
for item in items: # [consider-using-any-or-all]
if item % 2 == 0:
return True
return False
def all_even(items):
"""Return True if the list contains all even numbers"""
for item in items: # [consider-using-any-or-all]
if not item % 2 == 0:
return False
return True
def any_uneven(items):
"""Return True if the list contains any uneven numbers"""
for item in items: # [consider-using-any-or-all]
if not item % 2 == 0:
return True
return False
def all_uneven(items):
"""Return True if the list contains all uneven numbers"""
for item in items: # [consider-using-any-or-all]
if item % 2 == 0:
return False
return True
def is_from_string(item):
"""Return True if one of parents of item is a string"""
for parent in item.parents(): # [consider-using-any-or-all]
if isinstance(parent, str):
return True
return False
def is_not_from_string(item):
"""Return True if one of parents of item isn't a string"""
for parent in item.parents(): # [consider-using-any-or-all]
if not isinstance(parent, str):
return True
return False
def nested_check(items):
"""Tests that for loops at deeper levels are picked up"""
if items and len(items) > 5:
print(items)
for item in items: # [consider-using-any-or-all]
if item in (1, 2, 3):
return False
return True
print(items)
return items[3] > 5
def words_contains_word(words):
"""Return whether words contains 'word'"""
for word in words: # [consider-using-any-or-all]
if word == "word":
return True
return False
def complicated_condition_check(items):
"""Case where we expect not any statement with a more complicated condition"""
for item in items: # [consider-using-any-or-all]
if item % 2 == 0 and (item % 3 == 0 or item > 15):
return False
return True
def is_from_decorator1(node):
"""Case where we expect a particularly long message to be emitted."""
for ancestor in node: # [consider-using-any-or-all]
if (
ancestor.name in ("Exception", "BaseException")
and ancestor.root().name == "Exception"
):
return True
return False
def is_from_decorator2(items):
"""Case where we expect an all statement because of negation in the condition"""
for item in items: # [consider-using-any-or-all]
if not(item % 2 == 0 and (item % 3 == 0 or item > 15)):
return False
return True
def is_from_decorator3(node):
"""Case where we expect a not all statement because of negation in the condition"""
for ancestor in node: # [consider-using-any-or-all]
if not (
ancestor.name in ("Exception", "BaseException")
and ancestor.root().name == "Exception"
):
return True
return False
def no_suggestion_if_not_if():
"""Do not emit if the for loop does not have the pattern we are looking for"""
for val in range(1):
var = val
return var
def METHOD_NAME(item):
"""Do not emit if the if-statement does not return a bool"""
for parent in item.parents():
if isinstance(parent, str):
return "True"
return "False"
def print_items(items):
"""Do not emit if there is no If condition in the for loop."""
for item in items:
print(item)
return True
def print_items2(items):
"""Do not emit if anything besides a boolean is returned."""
for item in items:
return item
return True
def print_items3(items):
"""Do not emit if anything besides a boolean is returned."""
for _ in items:
return False
return items
def print_items4(items):
"""Do not emit if there is more logic which can cause side effects
or become less readable in a list comprehension.
"""
for item in items:
if isinstance(item, str):
print(item)
return False
return True
def is_from_decorator(node):
"""Do not emit if the if has an else condition. Generally implies more complicated logic."""
for parent in node.node_ancestors():
if isinstance(parent, str): # pylint: disable=no-else-return
return True
else:
if parent in parent.selected_annotations:
return False
return False
def optimized_any_with_break(split_lines, max_chars):
"""False negative found in https://github.com/pylint-dev/pylint/pull/7697"""
potential_line_length_warning = False
for line in split_lines: # [consider-using-any-or-all]
if len(line) > max_chars:
potential_line_length_warning = True
break
return potential_line_length_warning
def optimized_any_without_break(split_lines, max_chars):
potential_line_length_warning = False
for line in split_lines: # [consider-using-any-or-all]
if len(line) > max_chars:
potential_line_length_warning = True
return potential_line_length_warning
def print_line_without_break(split_lines, max_chars):
potential_line_length_warning = False
for line in split_lines:
print(line)
if len(line) > max_chars:
potential_line_length_warning = True
return potential_line_length_warning
def print_line_without_reassign(split_lines, max_chars):
potential_line_length_warning = False
for line in split_lines:
if len(line) > max_chars:
print(line)
return potential_line_length_warning
def multiple_flags(split_lines, max_chars):
potential_line_length_warning = False
for line in split_lines:
if len(line) > max_chars:
num = 1
print(num)
potential_line_length_warning = True
return potential_line_length_warning
s = ["hi", "hello", "goodbye", None]
flag = True
for i, elem in enumerate(s):
if elem is None:
continue
cnt_s = cnt_t = 0
for j in range(i, len(s)):
if s[j] == elem:
cnt_s += 1
s[j] = None
Flag = False
def with_elif(split_lines, max_chars):
"""
Do not raise consider-using-any-or-all because the intent in this code
is to iterate over all the lines (not short-circuit) and see what
the last value would be.
"""
last_longest_line = False
for line in split_lines:
if len(line) > max_chars:
last_longest_line = True
elif len(line) == max_chars:
last_longest_line = False
return last_longest_line
def first_even(items):
"""Return first even number"""
for item in items:
if item % 2 == 0:
return item
return None
def even(items):
for item in items:
if item % 2 == 0:
return True
return None
def iterate_leaves(leaves, current_node):
results = []
current_node.was_checked = True
for leaf in leaves:
if isinstance(leaf, bool):
current_node.was_checked = False
else:
results.append(leaf)
return results |
1,809 | test boundary height units | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Tests for the orographic-enhancement CLI
"""
import pytest
from improver.constants import LOOSE_TOLERANCE
from . import acceptance as acc
pytestmark = [pytest.mark.acc, acc.skip_if_kgo_missing]
OE = "orographic_enhancement_high_resolution"
CLI = acc.cli_name_with_dashes(__file__)
run_cli = acc.run_cli(CLI)
@pytest.mark.slow
def test_basic(tmp_path):
"""Test basic orographic enhancement"""
kgo_dir = acc.kgo_root() / "orographic_enhancement/basic"
kgo_path = kgo_dir / "kgo_hi_res.nc"
input_args = [
kgo_dir / f"{param}.nc"
for param in (
"temperature",
"humidity",
"pressure",
"wind_speed",
"wind_direction",
"orography_uk-standard_1km",
)
]
output_path = tmp_path / "output.nc"
args = [*input_args, "--output", output_path]
run_cli(args)
acc.compare(output_path, kgo_path, rtol=LOOSE_TOLERANCE)
@pytest.mark.slow
def test_boundary_height(tmp_path):
"""Test orographic enhancement with specified boundary height"""
kgo_dir = acc.kgo_root() / "orographic_enhancement/boundary_height"
kgo_path = kgo_dir / "kgo_hi_res.nc"
input_dir = kgo_dir / "../basic"
input_args = [
input_dir / f"{param}.nc"
for param in (
"temperature",
"humidity",
"pressure",
"wind_speed",
"wind_direction",
"orography_uk-standard_1km",
)
]
output_path = tmp_path / "output.nc"
args = [
*input_args,
"--boundary-height=500.",
"--boundary-height-units=m",
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path, rtol=LOOSE_TOLERANCE)
@pytest.mark.slow
def METHOD_NAME(tmp_path):
"""Test orographic enhancement with boundary height unit conversion"""
kgo_dir = acc.kgo_root() / "orographic_enhancement/boundary_height"
kgo_path = kgo_dir / "kgo_hi_res.nc"
input_dir = kgo_dir / "../basic"
input_args = [
input_dir / f"{param}.nc"
for param in (
"temperature",
"humidity",
"pressure",
"wind_speed",
"wind_direction",
"orography_uk-standard_1km",
)
]
output_path = tmp_path / "output.nc"
args = [
*input_args,
"--boundary-height=1640.41994751",
"--boundary-height-units=ft",
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path, rtol=LOOSE_TOLERANCE)
def test_invalid_boundary_height(tmp_path):
"""Test excessively high boundary height"""
kgo_dir = acc.kgo_root() / "orographic_enhancement/boundary_height"
input_dir = kgo_dir / "../basic"
input_args = [
input_dir / f"{param}.nc"
for param in (
"temperature",
"humidity",
"pressure",
"wind_speed",
"wind_direction",
"orography_uk-standard_1km",
)
]
output_path = tmp_path / "output.nc"
args = [
*input_args,
"--boundary-height=500000.",
"--boundary-height-units=m",
"--output",
output_path,
]
with pytest.raises(ValueError, match=".*height.*"):
run_cli(args) |
1,810 | families | # Tkinter font wrapper
#
# written by Fredrik Lundh, February 1998
#
# FIXME: should add 'displayof' option where relevant (actual, families,
# measure, and metrics)
#
__version__ = "0.9"
import Tkinter
# weight/slant
NORMAL = "normal"
ROMAN = "roman"
BOLD = "bold"
ITALIC = "italic"
def nametofont(name):
"""Given the name of a tk named font, returns a Font representation.
"""
return Font(name=name, exists=True)
class Font:
"""Represents a named font.
Constructor options are:
font -- font specifier (name, system font, or (family, size, style)-tuple)
name -- name to use for this font configuration (defaults to a unique name)
exists -- does a named font by this name already exist?
Creates a new named font if False, points to the existing font if True.
Raises _Tkinter.TclError if the assertion is false.
the following are ignored if font is specified:
family -- font 'family', e.g. Courier, Times, Helvetica
size -- font size in points
weight -- font thickness: NORMAL, BOLD
slant -- font slant: ROMAN, ITALIC
underline -- font underlining: false (0), true (1)
overstrike -- font strikeout: false (0), true (1)
"""
def _set(self, kw):
options = []
for k, v in kw.items():
if not isinstance(v, basestring):
v = str(v)
options.append("-"+k)
options.append(v)
return tuple(options)
def _get(self, args):
options = []
for k in args:
options.append("-"+k)
return tuple(options)
def _mkdict(self, args):
options = {}
for i in range(0, len(args), 2):
options[args[i][1:]] = args[i+1]
return options
def __init__(self, root=None, font=None, name=None, exists=False, **options):
if not root:
root = Tkinter._default_root
tk = getattr(root, 'tk', root)
if font:
# get actual settings corresponding to the given font
font = tk.splitlist(tk.call("font", "actual", font))
else:
font = self._set(options)
if not name:
name = "font" + str(id(self))
self.name = name
if exists:
self.delete_font = False
# confirm font exists
if self.name not in tk.splitlist(tk.call("font", "names")):
raise Tkinter._tkinter.TclError, "named font %s does not already exist" % (self.name,)
# if font config info supplied, apply it
if font:
tk.call("font", "configure", self.name, *font)
else:
# create new font (raises TclError if the font exists)
tk.call("font", "create", self.name, *font)
self.delete_font = True
self._tk = tk
self._split = tk.splitlist
self._call = tk.call
def __str__(self):
return self.name
def __eq__(self, other):
return isinstance(other, Font) and self.name == other.name
def __getitem__(self, key):
return self.cget(key)
def __setitem__(self, key, value):
self.configure(**{key: value})
def __del__(self):
try:
if self.delete_font:
self._call("font", "delete", self.name)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
def copy(self):
"Return a distinct copy of the current font"
return Font(self._tk, **self.actual())
def actual(self, option=None):
"Return actual font attributes"
if option:
return self._call("font", "actual", self.name, "-"+option)
else:
return self._mkdict(
self._split(self._call("font", "actual", self.name))
)
def cget(self, option):
"Get font attribute"
return self._call("font", "config", self.name, "-"+option)
def config(self, **options):
"Modify font attributes"
if options:
self._call("font", "config", self.name,
*self._set(options))
else:
return self._mkdict(
self._split(self._call("font", "config", self.name))
)
configure = config
def measure(self, text):
"Return text width"
return int(self._call("font", "measure", self.name, text))
def metrics(self, *options):
"""Return font metrics.
For best performance, create a dummy widget
using this font before calling this method."""
if options:
return int(
self._call("font", "metrics", self.name, self._get(options))
)
else:
res = self._split(self._call("font", "metrics", self.name))
options = {}
for i in range(0, len(res), 2):
options[res[i][1:]] = int(res[i+1])
return options
def METHOD_NAME(root=None):
"Get font families (as a tuple)"
if not root:
root = Tkinter._default_root
return root.tk.splitlist(root.tk.call("font", "families"))
def names(root=None):
"Get names of defined fonts (as a tuple)"
if not root:
root = Tkinter._default_root
return root.tk.splitlist(root.tk.call("font", "names"))
# --------------------------------------------------------------------
# test stuff
if __name__ == "__main__":
root = Tkinter.Tk()
# create a font
f = Font(family="times", size=30, weight=NORMAL)
print f.actual()
print f.actual("family")
print f.actual("weight")
print f.config()
print f.cget("family")
print f.cget("weight")
print names()
print f.measure("hello"), f.metrics("linespace")
print f.metrics()
f = Font(font=("Courier", 20, "bold"))
print f.measure("hello"), f.metrics("linespace")
w = Tkinter.Label(root, text="Hello, world", font=f)
w.pack()
w = Tkinter.Button(root, text="Quit!", command=root.destroy)
w.pack()
fb = Font(font=w["font"]).copy()
fb.config(weight=BOLD)
w.config(font=fb)
Tkinter.mainloop() |
1,811 | init queue | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from json import loads as loadjson
from os import environ
from random import randint
from urllib import urlencode
from twisted.python import log as logger
from twisted.internet.task import LoopingCall
from twisted.internet.defer import DeferredList, inlineCallbacks, returnValue as returnD
from twisted.internet.error import ConnectionRefusedError, TimeoutError
from hyphe_backend.lib.mongo import MongoDB
from hyphe_backend.lib.utils import format_error, is_error, deferredSleep, now_ts, getPage
class JobsQueue(object):
def __init__(self, config):
self.db = MongoDB(config)
self.scrapyd = 'http://%s:%s/' % (environ.get('HYPHE_CRAWLER_HOST', config['host']), int(environ.get('HYPHE_CRAWLER_PORT', config['scrapy_port'])))
self.max_simul_crawls = config["max_simul_requests"]
self.db_name = config["db_name"]
self.queue = None
self.depiler = LoopingCall(self.depile)
self.depiler.start(0.2, True)
@inlineCallbacks
def METHOD_NAME(self):
self.queue = {}
corpora = yield self.db.list_corpus(projection=[])
dl = [self.db.get_waiting_jobs(corpus["_id"]) for corpus in corpora]
alljobs = yield DeferredList(dl, consumeErrors=True)
for bl, res in alljobs:
if not bl:
print "ERROR collecting old crawljobs for a corpus", res
corpus, jobs = res
for job in jobs:
self.queue[job["_id"]] = {
"corpus": corpus,
"timestamp": job["created_at"],
"crawl_arguments": job["crawl_arguments"]
}
def stop(self):
if self.depiler.running:
self.depiler.stop()
# Let's scrape ScrapyD's internal jobs webpage since the API
# does not provide global information on all spiders...
@inlineCallbacks
def get_scrapyd_status(self):
url = "%sjobs" % self.scrapyd
try:
jobs = yield getPage(url)
except TimeoutError:
logger.msg("WARNING: ScrapyD's monitoring website seems like not answering")
returnD(None)
except Exception as e:
logger.msg("WARNING: ScrapyD's monitoring website seems down: %s %s" % (type(e), e))
returnD(None)
status = {"pending": 0, "running": 0}
read = None
for line in jobs.split("><tr"):
if ">Pending<" in line:
read = "pending"
elif ">Running<" in line:
read = "running"
elif ">Finished<" in line:
read = None
elif read == "running":
pattern = ">" + self.db_name + "_"
if pattern not in line:
continue
corpus = line.split(pattern)[1].split("</td>")[0]
if corpus not in status:
status[corpus] = 0
status[corpus] += 1
status[read] += 1
elif read:
status[read] += 1
returnD(status)
@inlineCallbacks
def send_scrapy_query(self, action, arguments=None):
url = "%s%s.json" % (self.scrapyd, action)
method = "POST"
headers = None
if action.startswith('list'):
method = "GET"
if arguments:
args = [str(k)+'='+str(v) for (k, v) in arguments.iteritems()]
url += '?' + '&'.join(args)
arguments = None
elif arguments:
arguments = urlencode(arguments)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
try:
res = yield getPage(url, method=method, postdata=arguments, \
headers=headers, timeout=30)
result = loadjson(res)
returnD(result)
except ConnectionRefusedError:
returnD(format_error("Could not contact scrapyd server, " + \
"maybe it's not started..."))
except Exception as e:
returnD(format_error(e))
@inlineCallbacks
def add_job(self, args, corpus, webentity_id):
ts = now_ts()
job_id = yield self.db.add_job(corpus, webentity_id, args, ts)
self.queue[job_id] = {
"corpus": corpus,
"timestamp": ts,
"crawl_arguments": args
}
yield self.db.add_log(corpus, job_id, "CRAWL_ADDED", ts)
returnD(job_id)
@inlineCallbacks
def depile(self):
if self.queue is None:
yield self.METHOD_NAME()
if not len(self.queue):
returnD(None)
status = yield self.get_scrapyd_status()
if not status or status["pending"] > 0 or status["running"] >= self.max_simul_crawls:
returnD(None)
# Add some random wait to allow possible concurrent Hyphe instance
# to compete for ScrapyD's empty slots
yield deferredSleep(1./randint(4,20))
# Order jobs by corpus with less currently running crawls then age
ordered = sorted(self.queue.items(), key=lambda x: \
float("%s.%s" % (status.get(x[1]["corpus"], 0), x[1]["timestamp"])))
job_id, job = ordered[0]
args = job["crawl_arguments"]
res = yield self.send_scrapy_query('schedule', {"project": args["project"], "spider": args["spider"], "setting": args["setting"], "job_id": job_id})
ts = now_ts()
if is_error(res):
logger.msg("WARNING: error sending job %s to ScrapyD: %s" % (job, res))
if job_id in self.queue:
self.queue[job_id]['timestamp'] = ts # let it retry a bit later
else:
yield self.db.update_job(job["corpus"], job_id, res['jobid'], ts)
yield self.db.add_log(job["corpus"], job_id, "CRAWL_SCHEDULED", ts)
if job_id in self.queue:
del(self.queue[job_id])
def cancel_corpus_jobs(self, corpus):
for _id, job in self.queue.items():
if job["corpus"] == corpus:
del(self.queue[_id])
def count_waiting_jobs(self, corpus):
return len([0 for j in self.queue.values() if j["corpus"] == corpus])
|
1,812 | get divider options | #!/usr/bin/env python3
# SPDX-FileCopyrightText: 2012-2023 MOD Audio UG
# SPDX-License-Identifier: AGPL-3.0-or-later
# List of all subdividers' 'value's and 'label's
dividers = [{
'value': 0.333,
'label': "2."
},
{
'value': 0.5,
'label': "2"
},
{
'value': 0.75,
'label': "2T"
},{
'value': 0.666,
'label': "1."
},
{
'value': 1,
'label': "1"
},
{
'value': 1.5,
'label': "1T"
},
{
'value': 1.333,
'label': "1/2."
},
{
'value': 2,
'label': "1/2"
},
{
'value': 3,
'label': "1/2T"
},
{
'value': 2.666,
'label': "1/4."
},
{
'value': 4,
'label': "1/4"
},
{
'value': 6,
'label': "1/4T"
},
{
'value': 5.333,
'label': "1/8."
},
{
'value': 8,
'label': "1/8"
},
{
'value': 12,
'label': "1/8T"
},
{
'value': 10.666,
'label': "1/16."
},
{
'value': 16,
'label': "1/16"
},
{
'value': 24,
'label': "1/16T"
},
{
'value': 21.333,
'label': "1/32."
},
{
'value': 32,
'label': "1/32"
},
{
'value': 48,
'label': "1/32T"
}
]
unit_conversion_factors = {
# to/from s:
's': {
'to': 1,
'from': 1
},
'ms': {
'to': 0.001,
'from': 1000
},
'min': {
'to': 60.0,
'from': 1 / 60.0,
},
# to/from Hz:
'Hz': {
'to': 1,
'from': 1,
},
'MHz': {
'to': 1000000,
'from': 0.000001,
},
'kHz': {
'to': 1000,
'from': 0.001,
}
}
def get_filtered_dividers(smin, smax):
"""Get list of filtered dividers s such as smin <= s <= smax
Args:
smin (float): min divider value
smax (float): max divider value
Returns:
list: filtered dividers as dicts with subdivider value and label
"""
filtered_dividers = []
for d in dividers:
if smin <= d['value'] and d['value'] <= smax:
filtered_dividers.append(d)
return filtered_dividers
def get_divider_value(b, v):
"""Compute divider value
Args:
b (float): BPM s-1
v (float): control port value in seconds
Returns:
float: divider value
"""
return 240 / (b * v)
def get_port_value(b, s, port_unit_symbol):
"""Compute Control Port value if BPM addressed
Args:
b (float): BPM s-1
s (float): divider value (subdivider)
port_unit_symbol (string): Control port unit symbol
Returns:
float: control port value in seconds
"""
if port_unit_symbol == "BPM":
return b / s;
return 240 / (b * s)
def convert_equivalent(value, conversion_factor, port_unit_symbol):
"""Convert value in any of the listed units to equivalent in seconds or value in seconds to any of the listed units
Args:
value (float): input value
conversion_factor (float): Conversion factor based on unit_conversion_factors
port_unit_symbol (string): Control port unit symbol
Returns:
float: output value
"""
if value == 0: # avoid division by zero
value = 0.001
if port_unit_symbol == "s" or port_unit_symbol == "ms" or port_unit_symbol == "min":
return conversion_factor * value
elif port_unit_symbol == "Hz" or port_unit_symbol == "MHz" or port_unit_symbol == "kHz":
return conversion_factor / value
else:
return None
def convert_seconds_to_port_value_equivalent(value, port_unit_symbol):
"""Convert value in seconds to control port unit
Args:
value (float): Value in seconds
port_unit_symbol (string): Control port unit symbol
Returns:
float: Equivalent value using control port unit
"""
unit = unit_conversion_factors.get(port_unit_symbol, None)
if unit is None:
return None
conversion_factor = unit['from']
return convert_equivalent(value, conversion_factor, port_unit_symbol)
def convert_port_value_to_seconds_equivalent(value, port_unit_symbol):
"""Convert value from one of the listed units to seconds equivalent
Args:
value (float): Control port value (usually min or max)
port_unit_symbol (string): Control port unit symbol
Returns:
float: Equivalent value in seconds
"""
unit = unit_conversion_factors.get(port_unit_symbol, None)
if unit is None:
return None
conversion_factor = unit['to']
return convert_equivalent(value, conversion_factor, port_unit_symbol)
def METHOD_NAME(port, min_bpm, max_bpm):
"""Get dividers options for given port and bpmPort min and max
Args:
port (dict): port info
min_bpm (float): minimum value for bpm
max_bpm (float): maximum value for bpm
Return:
list: all available dividers as dicts with subdivider value and label
"""
if port['units']['symbol'] == "BPM":
s1_min_bpm = min_bpm / port['ranges']['minimum']
s2_min_bpm = min_bpm / port['ranges']['maximum']
s1_max_bpm = max_bpm / port['ranges']['minimum']
s2_max_bpm = max_bpm / port['ranges']['maximum']
else:
# First, convert min and max port values to equivalent in seconds
min_value = convert_port_value_to_seconds_equivalent(port['ranges']['minimum'], port['units']['symbol'])
max_value = convert_port_value_to_seconds_equivalent(port['ranges']['maximum'], port['units']['symbol'])
# Then, compute min and max subdividers that will fit all bpms
s1_min_bpm = get_divider_value(min_bpm, min_value)
s2_min_bpm = get_divider_value(min_bpm, max_value)
s1_max_bpm = get_divider_value(max_bpm, min_value)
s2_max_bpm = get_divider_value(max_bpm, max_value)
if "hasStrictBounds" in port['properties']:
smin = max(s1_min_bpm, s1_max_bpm) if s1_min_bpm < s2_min_bpm else max(s2_min_bpm, s2_max_bpm)
smax = min(s2_min_bpm, s2_max_bpm) if s1_min_bpm < s2_min_bpm else min(s1_min_bpm, s1_max_bpm)
else:
smin = min(s1_min_bpm, s2_min_bpm, s1_max_bpm, s2_max_bpm)
smax = max(s1_min_bpm, s2_min_bpm, s1_max_bpm, s2_max_bpm)
return get_filtered_dividers(smin, smax) |
1,813 | verify | #!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import config
import thread_cert
from pktverify.consts import MLE_ADVERTISEMENT, MLE_CHILD_ID_REQUEST, MLE_DATA_RESPONSE, MLE_CHILD_ID_RESPONSE, MLE_CHILD_UPDATE_RESPONSE, MLE_CHILD_UPDATE_REQUEST, SVR_DATA_URI, SOURCE_ADDRESS_TLV, MODE_TLV, ADDRESS_REGISTRATION_TLV, LEADER_DATA_TLV, NETWORK_DATA_TLV, ACTIVE_TIMESTAMP_TLV, ROUTE64_TLV
from pktverify.packet_verifier import PacketVerifier
from pktverify.addrs import Ipv6Addr
LEADER = 1
ROUTER = 2
ED2 = 3
SED2 = 4
MTDS = [SED2, ED2]
class Cert_7_1_4_BorderRouterAsRouter(thread_cert.TestCase):
TOPOLOGY = {
LEADER: {
'name': 'LEADER',
'mode': 'rdn',
'allowlist': [ROUTER]
},
ROUTER: {
'name': 'ROUTER',
'mode': 'rdn',
'allowlist': [LEADER, ED2, SED2]
},
ED2: {
'name': 'MED',
'is_mtd': True,
'mode': 'rn',
'allowlist': [ROUTER]
},
SED2: {
'name': 'SED',
'is_mtd': True,
'mode': '-',
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(config.LEADER_STARTUP_DELAY)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(config.ROUTER_STARTUP_DELAY)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ED2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED2].get_state(), 'child')
self.nodes[SED2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[SED2].get_state(), 'child')
self.collect_rloc16s()
self.nodes[ROUTER].add_prefix('2001:2:0:1::/64', 'paros')
self.nodes[ROUTER].add_prefix('2001:2:0:2::/64', 'paro')
self.nodes[ROUTER].register_netdata()
self.simulator.go(5)
# Set lowpan context of sniffer
self.simulator.set_lowpan_context(1, '2001:2:0:1::/64')
self.simulator.set_lowpan_context(2, '2001:2:0:2::/64')
addrs = self.nodes[ED2].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertTrue(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
addrs = self.nodes[SED2].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertFalse(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
def METHOD_NAME(self, pv):
pkts = pv.pkts
pv.summary.show()
ROUTER = pv.vars['ROUTER']
MED = pv.vars['MED']
SED = pv.vars['SED']
_rpkts = pkts.filter_wpan_src64(ROUTER)
# Step 3: The DUT MUST send properly formatted MLE Advertisements
# The DUT MUST send a CoAP Server Data Notification message
# with the server’s information (Prefix, Border Router) to the Leader
_rpkts.filter_mle_cmd(MLE_CHILD_ID_REQUEST).must_next()
_rpkts.filter_mle_cmd(MLE_ADVERTISEMENT).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ROUTE64_TLV} == set(p.mle.tlv.type))
_pkt = _rpkts.filter_coap_request(SVR_DATA_URI).must_next()
_pkt.must_verify(lambda p: p.wpan.dst16 == pv.vars['LEADER_RLOC16'] and {
Ipv6Addr('2001:2:0:1::'), Ipv6Addr('2001:2:0:2::')
} == set(p.thread_nwd.tlv.prefix) and p.thread_nwd.tlv.border_router.flag.p == [1, 1] and p.thread_nwd.tlv.
border_router.flag.s == [1, 1] and p.thread_nwd.tlv.border_router.flag.r == [1, 1] and p.
thread_nwd.tlv.border_router.flag.o == [1, 1] and p.thread_nwd.tlv.stable == [1, 1, 0, 0])
_rpkts_med = _rpkts.copy()
_rpkts_sed = _rpkts.copy()
# Step 4: Automatically transmits a 2.04 Changed CoAP response to the DUT
# Step 5: The DUT MUST send a multicast MLE Data Response
_rpkts.filter_mle_cmd(MLE_DATA_RESPONSE).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, NETWORK_DATA_TLV, ACTIVE_TIMESTAMP_TLV} == set(
p.mle.tlv.type) and {Ipv6Addr('2001:2:0:1::'), Ipv6Addr('2001:2:0:2::')} == set(
p.thread_nwd.tlv.prefix) and p.thread_nwd.tlv.border_router.flag.p == [1, 1] and p.thread_nwd.tlv.
border_router.flag.s == [1, 1] and p.thread_nwd.tlv.border_router.flag.r == [1, 1] and p.thread_nwd.tlv.
border_router.flag.o == [1, 1] and p.thread_nwd.tlv.stable == [0, 1, 1, 1, 0, 0, 0])
# Step 6: The DUT MUST send a Child Update Response to MED_1
_rpkts_med.filter_wpan_dst64(MED).filter_mle_cmd(MLE_CHILD_UPDATE_RESPONSE).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, MODE_TLV, ADDRESS_REGISTRATION_TLV} <= set(p.mle.tlv.type))
# Step 7: The DUT MUST send an MLE Child Update Request to SED_1
_rpkts_sed.filter_wpan_dst64(SED).filter_mle_cmd(MLE_CHILD_UPDATE_REQUEST).must_next().must_verify(
lambda p: {Ipv6Addr('2001:2:0:1::')} == set(p.thread_nwd.tlv.prefix
) and p.thread_nwd.tlv.border_router_16 == [0xFFFE])
# Step 8: SED_1 send its configured global address to the DUT
# Step 9: The DUT MUST send a Child Update Response to SED_1
_sed_pkt = pkts.range(
_rpkts_sed.index).filter_wpan_src64(SED).filter_mle_cmd(MLE_CHILD_UPDATE_REQUEST).must_next()
_rpkts_sed.filter_wpan_dst64(SED).filter_mle_cmd(MLE_CHILD_UPDATE_RESPONSE).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, MODE_TLV, ADDRESS_REGISTRATION_TLV} <= set(p.mle.tlv.type) and set(
p.mle.tlv.addr_reg_iid) < set(_sed_pkt.mle.tlv.addr_reg_iid))
if __name__ == '__main__':
unittest.main() |
1,814 | test xml case f | """
tests.unit.xmlutil_test
~~~~~~~~~~~~~~~~~~~~
"""
import xml.etree.ElementTree as ET
import salt.utils.xmlutil as xml
from tests.support.unit import TestCase
class XMLUtilTestCase(TestCase):
"""
Tests that salt.utils.xmlutil properly parses XML data and returns as a properly formatted
dictionary. The default method of parsing will ignore attributes and return only the child
items. The full method will include parsing attributes.
"""
def setUp(self):
# Populate our use cases for specific XML formats.
self.cases = {
"a": {
"xml": "<parent>data</parent>",
"legacy": {"parent": "data"},
"full": "data",
},
"b": {
"xml": '<parent value="data">data</parent>',
"legacy": {"parent": "data"},
"full": {"parent": "data", "value": "data"},
},
"c": {
"xml": (
'<parent><child>data</child><child value="data">data</child>'
'<child value="data"/><child/></parent>'
),
"legacy": {
"child": [
"data",
{"child": "data"},
{"child": None},
{"child": None},
]
},
"full": {
"child": [
"data",
{"child": "data", "value": "data"},
{"value": "data"},
None,
]
},
},
"d": {
"xml": (
'<parent value="data" another="data"><child>data</child></parent>'
),
"legacy": {"child": "data"},
"full": {"child": "data", "another": "data", "value": "data"},
},
"e": {
"xml": (
'<parent value="data" another="data"><child'
' value="data">data</child></parent>'
),
"legacy": {"child": "data"},
"full": {
"child": {"child": "data", "value": "data"},
"another": "data",
"value": "data",
},
},
"f": {
"xml": (
'<parent><child><sub-child value="data">data</sub-child></child>'
"<child>data</child></parent>"
),
"legacy": {"child": [{"sub-child": "data"}, {"child": "data"}]},
"full": {
"child": [
{"sub-child": {"value": "data", "sub-child": "data"}},
"data",
]
},
},
}
def test_xml_case_a(self):
xmldata = ET.fromstring(self.cases["a"]["xml"])
defaultdict = xml.to_dict(xmldata)
self.assertEqual(defaultdict, self.cases["a"]["legacy"])
def test_xml_case_a_legacy(self):
xmldata = ET.fromstring(self.cases["a"]["xml"])
defaultdict = xml.to_dict(xmldata, False)
self.assertEqual(defaultdict, self.cases["a"]["legacy"])
def test_xml_case_a_full(self):
xmldata = ET.fromstring(self.cases["a"]["xml"])
defaultdict = xml.to_dict(xmldata, True)
self.assertEqual(defaultdict, self.cases["a"]["full"])
def test_xml_case_b(self):
xmldata = ET.fromstring(self.cases["b"]["xml"])
defaultdict = xml.to_dict(xmldata)
self.assertEqual(defaultdict, self.cases["b"]["legacy"])
def test_xml_case_b_legacy(self):
xmldata = ET.fromstring(self.cases["b"]["xml"])
defaultdict = xml.to_dict(xmldata, False)
self.assertEqual(defaultdict, self.cases["b"]["legacy"])
def test_xml_case_b_full(self):
xmldata = ET.fromstring(self.cases["b"]["xml"])
defaultdict = xml.to_dict(xmldata, True)
self.assertEqual(defaultdict, self.cases["b"]["full"])
def test_xml_case_c(self):
xmldata = ET.fromstring(self.cases["c"]["xml"])
defaultdict = xml.to_dict(xmldata)
self.assertEqual(defaultdict, self.cases["c"]["legacy"])
def test_xml_case_c_legacy(self):
xmldata = ET.fromstring(self.cases["c"]["xml"])
defaultdict = xml.to_dict(xmldata, False)
self.assertEqual(defaultdict, self.cases["c"]["legacy"])
def test_xml_case_c_full(self):
xmldata = ET.fromstring(self.cases["c"]["xml"])
defaultdict = xml.to_dict(xmldata, True)
self.assertEqual(defaultdict, self.cases["c"]["full"])
def test_xml_case_d(self):
xmldata = ET.fromstring(self.cases["d"]["xml"])
defaultdict = xml.to_dict(xmldata)
self.assertEqual(defaultdict, self.cases["d"]["legacy"])
def test_xml_case_d_legacy(self):
xmldata = ET.fromstring(self.cases["d"]["xml"])
defaultdict = xml.to_dict(xmldata, False)
self.assertEqual(defaultdict, self.cases["d"]["legacy"])
def test_xml_case_d_full(self):
xmldata = ET.fromstring(self.cases["d"]["xml"])
defaultdict = xml.to_dict(xmldata, True)
self.assertEqual(defaultdict, self.cases["d"]["full"])
def test_xml_case_e(self):
xmldata = ET.fromstring(self.cases["e"]["xml"])
defaultdict = xml.to_dict(xmldata)
self.assertEqual(defaultdict, self.cases["e"]["legacy"])
def test_xml_case_e_legacy(self):
xmldata = ET.fromstring(self.cases["e"]["xml"])
defaultdict = xml.to_dict(xmldata, False)
self.assertEqual(defaultdict, self.cases["e"]["legacy"])
def test_xml_case_e_full(self):
xmldata = ET.fromstring(self.cases["e"]["xml"])
defaultdict = xml.to_dict(xmldata, True)
self.assertEqual(defaultdict, self.cases["e"]["full"])
def METHOD_NAME(self):
xmldata = ET.fromstring(self.cases["f"]["xml"])
defaultdict = xml.to_dict(xmldata)
self.assertEqual(defaultdict, self.cases["f"]["legacy"])
def test_xml_case_f_legacy(self):
xmldata = ET.fromstring(self.cases["f"]["xml"])
defaultdict = xml.to_dict(xmldata, False)
self.assertEqual(defaultdict, self.cases["f"]["legacy"])
def test_xml_case_f_full(self):
xmldata = ET.fromstring(self.cases["f"]["xml"])
defaultdict = xml.to_dict(xmldata, True)
self.assertEqual(defaultdict, self.cases["f"]["full"]) |
1,815 | get indexes | import numpy as np
import random
import os
from functools import lru_cache
from pathlib import Path
# Various pre-crafted datasets/variables for testing
# !!! Must not be changed -- only appended !!!
# while testing numpy we better not rely on numpy to produce random
# sequences
random.seed(1)
# but will seed it nevertheless
np.random.seed(1)
nx, ny = 1000, 1000
# reduced squares based on indexes_rand, primarily for testing more
# time-consuming functions (ufunc, linalg, etc)
nxs, nys = 100, 100
# a list of interesting types to test
TYPES1 = [
'int16', 'float16',
'int32', 'float32',
'int64', 'float64', 'complex64',
'complex128',
]
DLPACK_TYPES = [
'int16', 'float16',
'int32', 'float32',
'int64', 'float64', 'complex64',
'complex128', 'bool',
]
# Path for caching
CACHE_ROOT = Path(__file__).resolve().parent.parent / 'env' / 'numpy_benchdata'
# values which will be used to construct our sample data matrices
# replicate 10 times to speed up initial imports of this helper
# and generate some redundancy
@lru_cache(typed=True)
def get_values():
rnd = np.random.RandomState(1)
values = np.tile(rnd.uniform(0, 100, size=nx*ny//10), 10)
return values
@lru_cache(typed=True)
def get_square(dtype):
values = get_values()
arr = values.astype(dtype=dtype).reshape((nx, ny))
# adjust complex ones to have non-degenerated imagery part -- use
# original data transposed for that
if arr.dtype.kind == 'c':
arr += arr.T*1j
return arr
@lru_cache(typed=True)
def get_squares():
return {t: get_square(t) for t in TYPES1}
@lru_cache(typed=True)
def get_square_(dtype):
arr = get_square(dtype)
return arr[:nxs, :nys]
@lru_cache(typed=True)
def get_squares_():
# smaller squares
return {t: get_square_(t) for t in TYPES1}
@lru_cache(typed=True)
def get_vectors():
# vectors
vectors = {t: s[0] for t, s in get_squares().items()}
return vectors
@lru_cache(typed=True)
def get_indexes():
indexes = list(range(nx))
# so we do not have all items
indexes.pop(5)
indexes.pop(95)
indexes = np.array(indexes)
return indexes
@lru_cache(typed=True)
def get_indexes_rand():
rnd = random.Random(1)
indexes_rand = get_indexes().tolist() # copy
rnd.shuffle(indexes_rand) # in-place shuffle
indexes_rand = np.array(indexes_rand)
return indexes_rand
@lru_cache(typed=True)
def METHOD_NAME():
# smaller versions
indexes = get_indexes()
indexes_ = indexes[indexes < nxs]
return indexes_
@lru_cache(typed=True)
def get_indexes_rand_():
indexes_rand = get_indexes_rand()
indexes_rand_ = indexes_rand[indexes_rand < nxs]
return indexes_rand_
@lru_cache(typed=True)
def get_data(size, dtype, ip_num=0, zeros=False, finite=True, denormal=False):
"""
Generates a cached random array that covers several scenarios that
may affect the benchmark for fairness and to stabilize the benchmark.
Parameters
----------
size: int
Array length.
dtype: dtype or dtype specifier
ip_num: int
Input number, to avoid memory overload
and to provide unique data for each operand.
zeros: bool
Spreading zeros along with generated data.
finite: bool
Avoid spreading fp special cases nan/inf.
denormal:
Spreading subnormal numbers along with generated data.
"""
dtype = np.dtype(dtype)
dname = dtype.name
cache_name = f'{dname}_{size}_{ip_num}_{int(zeros)}'
if dtype.kind in 'fc':
cache_name += f'{int(finite)}{int(denormal)}'
cache_name += '.bin'
cache_path = CACHE_ROOT / cache_name
if cache_path.exists():
return np.fromfile(cache_path, dtype)
array = np.ones(size, dtype)
rands = []
if dtype.kind == 'i':
dinfo = np.iinfo(dtype)
scale = 8
if zeros:
scale += 1
lsize = size // scale
for low, high in (
(-0x80, -1),
(1, 0x7f),
(-0x8000, -1),
(1, 0x7fff),
(-0x80000000, -1),
(1, 0x7fffffff),
(-0x8000000000000000, -1),
(1, 0x7fffffffffffffff),
):
rands += [np.random.randint(
max(low, dinfo.min),
min(high, dinfo.max),
lsize, dtype
)]
elif dtype.kind == 'u':
dinfo = np.iinfo(dtype)
scale = 4
if zeros:
scale += 1
lsize = size // scale
for high in (0xff, 0xffff, 0xffffffff, 0xffffffffffffffff):
rands += [np.random.randint(1, min(high, dinfo.max), lsize, dtype)]
elif dtype.kind in 'fc':
scale = 1
if zeros:
scale += 1
if not finite:
scale += 2
if denormal:
scale += 1
dinfo = np.finfo(dtype)
lsize = size // scale
rands = [np.random.rand(lsize).astype(dtype)]
if not finite:
rands += [
np.empty(lsize, dtype=dtype), np.empty(lsize, dtype=dtype)
]
rands[1].fill(float('nan'))
rands[2].fill(float('inf'))
if denormal:
rands += [np.empty(lsize, dtype=dtype)]
rands[-1].fill(dinfo.smallest_subnormal)
if rands:
if zeros:
rands += [np.zeros(lsize, dtype)]
stride = len(rands)
for start, r in enumerate(rands):
array[start:len(r)*stride:stride] = r
if not CACHE_ROOT.exists():
CACHE_ROOT.mkdir(parents=True)
array.tofile(cache_path)
return array
class Benchmark:
pass |
1,816 | is required checksum | """
This linter ensures that users don't set a SHA hash checksum in Bazel for the http_archive.
Although the security practice of setting the checksum is good, it doesn't work when the
archive is downloaded from some sites like GitHub because it can change. Specifically,
GitHub gives no guarantee to keep the same value forever. Check for more details at
https://github.com/community/community/discussions/46034.
"""
import argparse
import json
import re
import shlex
import subprocess
import xml.etree.ElementTree as ET
from enum import Enum
from typing import List, NamedTuple, Optional, Set
from urllib.parse import urlparse
LINTER_CODE = "BAZEL_LINTER"
SHA256_REGEX = re.compile(r"\s*sha256\s*=\s*['\"](?P<sha256>[a-zA-Z0-9]{64})['\"]\s*,")
DOMAINS_WITH_UNSTABLE_CHECKSUM = {"github.com"}
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def METHOD_NAME(urls: List[Optional[str]]) -> bool:
if not urls:
return False
for url in urls:
if not url:
continue
parsed_url = urlparse(url)
if parsed_url.hostname in DOMAINS_WITH_UNSTABLE_CHECKSUM:
return False
return True
def get_disallowed_checksums(
binary: str,
) -> Set[str]:
"""
Return the set of disallowed checksums from all http_archive rules
"""
# Use bazel to get the list of external dependencies in XML format
proc = subprocess.run(
[binary, "query", "kind(http_archive, //external:*)", "--output=xml"],
capture_output=True,
check=True,
text=True,
)
root = ET.fromstring(proc.stdout)
disallowed_checksums = set()
# Parse all the http_archive rules in the XML output
for rule in root.findall('.//rule[@class="http_archive"]'):
urls_node = rule.find('.//list[@name="urls"]')
if urls_node is None:
continue
urls = [n.get("value") for n in urls_node.findall(".//string")]
checksum_node = rule.find('.//string[@name="sha256"]')
if checksum_node is None:
continue
checksum = checksum_node.get("value")
if not checksum:
continue
if not METHOD_NAME(urls):
disallowed_checksums.add(checksum)
return disallowed_checksums
def check_bazel(
filename: str,
disallowed_checksums: Set[str],
) -> List[LintMessage]:
original = ""
replacement = ""
with open(filename) as f:
for line in f:
original += f"{line}"
m = SHA256_REGEX.match(line)
if m:
sha256 = m.group("sha256")
if sha256 in disallowed_checksums:
continue
replacement += f"{line}"
if original == replacement:
return []
return [
LintMessage(
path=filename,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ADVICE,
name="format",
original=original,
replacement=replacement,
description="Found redundant SHA checksums. Run `lintrunner -a` to apply this patch.",
)
]
def main() -> None:
parser = argparse.ArgumentParser(
description="A custom linter to detect redundant SHA checksums in Bazel",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--binary",
required=True,
help="bazel binary path",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
try:
disallowed_checksums = get_disallowed_checksums(args.binary)
except subprocess.CalledProcessError as err:
err_msg = LintMessage(
path=None,
line=None,
char=None,
code=__file__,
severity=LintSeverity.ADVICE,
name="command-failed",
original=None,
replacement=None,
description=(
f"COMMAND (exit code {err.returncode})\n"
f"{shlex.join(err.cmd)}\n\n"
f"STDERR\n{err.stderr or '(empty)'}\n\n"
f"STDOUT\n{err.stdout or '(empty)'}"
),
)
print(json.dumps(err_msg._asdict()))
return
except Exception as e:
err_msg = LintMessage(
path=None,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(f"Failed due to {e.__class__.__name__}:\n{e}"),
)
print(json.dumps(err_msg._asdict()), flush=True)
exit(0)
for filename in args.filenames:
for lint_message in check_bazel(filename, disallowed_checksums):
print(json.dumps(lint_message._asdict()), flush=True)
if __name__ == "__main__":
main() |
1,817 | test with error while getting key | from unittest.mock import patch
import boto3
import pytest
from botocore.exceptions import ClientError
from moto import mock_s3
from celery import states
from celery.backends.s3 import S3Backend
from celery.exceptions import ImproperlyConfigured
class test_S3Backend:
@patch('botocore.credentials.CredentialResolver.load_credentials')
def test_with_missing_aws_credentials(self, mock_load_credentials):
self.app.conf.s3_access_key_id = None
self.app.conf.s3_secret_access_key = None
self.app.conf.s3_bucket = 'bucket'
mock_load_credentials.return_value = None
with pytest.raises(ImproperlyConfigured, match="Missing aws s3 creds"):
S3Backend(app=self.app)
@patch('botocore.credentials.CredentialResolver.load_credentials')
def test_with_no_credentials_in_config_attempts_to_load_credentials(self, mock_load_credentials):
self.app.conf.s3_access_key_id = None
self.app.conf.s3_secret_access_key = None
self.app.conf.s3_bucket = 'bucket'
S3Backend(app=self.app)
mock_load_credentials.assert_called_once()
@patch('botocore.credentials.CredentialResolver.load_credentials')
def test_with_credentials_in_config_does_not_search_for_credentials(self, mock_load_credentials):
self.app.conf.s3_access_key_id = 'somekeyid'
self.app.conf.s3_secret_access_key = 'somesecret'
self.app.conf.s3_bucket = 'bucket'
S3Backend(app=self.app)
mock_load_credentials.assert_not_called()
def test_with_no_given_bucket(self):
self.app.conf.s3_access_key_id = 'somekeyid'
self.app.conf.s3_secret_access_key = 'somesecret'
self.app.conf.s3_bucket = None
with pytest.raises(ImproperlyConfigured, match='Missing bucket name'):
S3Backend(app=self.app)
@pytest.mark.parametrize('aws_region',
[None, 'us-east-1'],
ids=['No given aws region',
'Specific aws region'])
@patch('celery.backends.s3.boto3')
def test_it_creates_an_aws_s3_connection(self, mock_boto3, aws_region):
self.app.conf.s3_access_key_id = 'somekeyid'
self.app.conf.s3_secret_access_key = 'somesecret'
self.app.conf.s3_bucket = 'bucket'
self.app.conf.s3_region = aws_region
S3Backend(app=self.app)
mock_boto3.Session.assert_called_once_with(
aws_access_key_id='somekeyid',
aws_secret_access_key='somesecret',
region_name=aws_region)
@pytest.mark.parametrize('endpoint_url',
[None, 'https://custom.s3'],
ids=['No given endpoint url',
'Custom endpoint url'])
@patch('celery.backends.s3.boto3')
def test_it_creates_an_aws_s3_resource(self,
mock_boto3,
endpoint_url):
self.app.conf.s3_access_key_id = 'somekeyid'
self.app.conf.s3_secret_access_key = 'somesecret'
self.app.conf.s3_bucket = 'bucket'
self.app.conf.s3_endpoint_url = endpoint_url
S3Backend(app=self.app)
mock_boto3.Session().resource.assert_called_once_with(
's3', endpoint_url=endpoint_url)
@pytest.mark.parametrize("key", ['uuid', b'uuid'])
@mock_s3
def test_set_and_get_a_key(self, key):
self._mock_s3_resource()
self.app.conf.s3_access_key_id = 'somekeyid'
self.app.conf.s3_secret_access_key = 'somesecret'
self.app.conf.s3_bucket = 'bucket'
s3_backend = S3Backend(app=self.app)
s3_backend._set_with_state(key, 'another_status', states.SUCCESS)
assert s3_backend.get(key) == 'another_status'
@mock_s3
def test_set_and_get_a_result(self):
self._mock_s3_resource()
self.app.conf.result_serializer = 'pickle'
self.app.conf.s3_access_key_id = 'somekeyid'
self.app.conf.s3_secret_access_key = 'somesecret'
self.app.conf.s3_bucket = 'bucket'
s3_backend = S3Backend(app=self.app)
s3_backend.store_result('foo', 'baar', 'STARTED')
value = s3_backend.get_result('foo')
assert value == 'baar'
@mock_s3
def test_get_a_missing_key(self):
self._mock_s3_resource()
self.app.conf.s3_access_key_id = 'somekeyid'
self.app.conf.s3_secret_access_key = 'somesecret'
self.app.conf.s3_bucket = 'bucket'
s3_backend = S3Backend(app=self.app)
result = s3_backend.get('uuidddd')
assert result is None
@patch('celery.backends.s3.boto3')
def METHOD_NAME(self, mock_boto3):
error = ClientError({'Error': {'Code': '403',
'Message': 'Permission denied'}},
'error')
mock_boto3.Session().resource().Object().load.side_effect = error
self.app.conf.s3_access_key_id = 'somekeyid'
self.app.conf.s3_secret_access_key = 'somesecret'
self.app.conf.s3_bucket = 'bucket'
s3_backend = S3Backend(app=self.app)
with pytest.raises(ClientError):
s3_backend.get('uuidddd')
@pytest.mark.parametrize("key", ['uuid', b'uuid'])
@mock_s3
def test_delete_a_key(self, key):
self._mock_s3_resource()
self.app.conf.s3_access_key_id = 'somekeyid'
self.app.conf.s3_secret_access_key = 'somesecret'
self.app.conf.s3_bucket = 'bucket'
s3_backend = S3Backend(app=self.app)
s3_backend._set_with_state(key, 'another_status', states.SUCCESS)
assert s3_backend.get(key) == 'another_status'
s3_backend.delete(key)
assert s3_backend.get(key) is None
@mock_s3
def test_with_a_non_existing_bucket(self):
self._mock_s3_resource()
self.app.conf.s3_access_key_id = 'somekeyid'
self.app.conf.s3_secret_access_key = 'somesecret'
self.app.conf.s3_bucket = 'bucket_not_exists'
s3_backend = S3Backend(app=self.app)
with pytest.raises(ClientError,
match=r'.*The specified bucket does not exist'):
s3_backend._set_with_state('uuid', 'another_status', states.SUCCESS)
def _mock_s3_resource(self):
# Create AWS s3 Bucket for moto.
session = boto3.Session(
aws_access_key_id='moto_key_id',
aws_secret_access_key='moto_secret_key',
region_name='us-east-1'
)
s3 = session.resource('s3')
s3.create_bucket(Bucket='bucket') |
1,818 | get file map f | from random import shuffle
class AniDBMapper:
blacklist = ("unused", "retired", "reserved")
def getAnimeBitsA(self, amask):
map = self.getAnimeMapA()
return self._getBitChain(map, amask)
def getAnimeCodesA(self, aBitChain):
amap = self.getAnimeMapA()
return self._getCodes(amap, aBitChain)
def getFileBitsF(self, fmask):
fmap = self.METHOD_NAME()
return self._getBitChain(fmap, fmask)
def getFileCodesF(self, bitChainF):
fmap = self.METHOD_NAME()
return self._getCodes(fmap, bitChainF)
def getFileBitsA(self, amask):
amap = self.getFileMapA()
return self._getBitChain(amap, amask)
def getFileCodesA(self, bitChainA):
amap = self.getFileMapA()
return self._getCodes(amap, bitChainA)
def _getBitChain(self, map, wanted):
"""Return an hex string with the correct bit set corresponding to the wanted fields in the map"""
bit = 0
for index, field in enumerate(map):
if field in wanted and not field in self.blacklist:
bit = bit ^ (1 << len(map) - index - 1)
bit = str(hex(bit)).lstrip("0x").rstrip("L")
bit = "".join(["0" for unused in range(len(map) // 4 - len(bit))]) + bit
return bit
@staticmethod
def _getCodes(map, bitChain):
"""Returns a list with the corresponding fields as set in the bitChain (hex string)"""
codeList = []
bitChain = int(bitChain, 16)
mapLength = len(map)
for i in reversed(list(range(mapLength))):
if bitChain & (2**i):
codeList.append(map[mapLength - i - 1])
return codeList
@staticmethod
def getAnimeMapA():
# each line is one byte
# only chnage this if the api changes
map = [
"aid",
"unused",
"year",
"type",
"related_aid_list",
"related_aid_type",
"category_list",
"category_weight_list",
"romaji_name",
"kanji_name",
"english_name",
"other_name",
"short_name_list",
"synonym_list",
"retired",
"retired",
"episodes",
"highest_episode_number",
"special_ep_count",
"air_date",
"end_date",
"url",
"picname",
"category_id_list",
"rating",
"vote_count",
"temp_rating",
"temp_vote_count",
"average_review_rating",
"review_count",
"award_list",
"is_18_restricted",
"anime_planet_id",
"ANN_id",
"allcinema_id",
"AnimeNfo_id",
"unused",
"unused",
"unused",
"date_record_updated",
"character_id_list",
"creator_id_list",
"main_creator_id_list",
"main_creator_name_list",
"unused",
"unused",
"unused",
"unused",
"specials_count",
"credits_count",
"other_count",
"trailer_count",
"parody_count",
"unused",
"unused",
"unused",
]
return map
@staticmethod
def METHOD_NAME():
# each line is one byte
# only chnage this if the api changes
map = [
"unused",
"aid",
"eid",
"gid",
"mylist_id",
"list_other_episodes",
"IsDeprecated",
"state",
"size",
"ed2k",
"md5",
"sha1",
"crc32",
"unused",
"unused",
"reserved",
"quality",
"source",
"audio_codec_list",
"audio_bitrate_list",
"video_codec",
"video_bitrate",
"video_resolution",
"file_type_extension",
"dub_language",
"sub_language",
"length_in_seconds",
"description",
"aired_date",
"unused",
"unused",
"anidb_file_name",
"mylist_state",
"mylist_filestate",
"mylist_viewed",
"mylist_viewdate",
"mylist_storage",
"mylist_source",
"mylist_other",
"unused",
]
return map
@staticmethod
def getFileMapA():
# each line is one byte
# only chnage this if the api changes
map = [
"anime_total_episodes",
"highest_episode_number",
"year",
"type",
"related_aid_list",
"related_aid_type",
"category_list",
"reserved",
"romaji_name",
"kanji_name",
"english_name",
"other_name",
"short_name_list",
"synonym_list",
"retired",
"retired",
"epno",
"ep_name",
"ep_romaji_name",
"ep_kanji_name",
"episode_rating",
"episode_vote_count",
"unused",
"unused",
"group_name",
"group_short_name",
"unused",
"unused",
"unused",
"unused",
"unused",
"date_aid_record_updated",
]
return map
def checkMapping(self, verbos=False):
print("------")
print(f"File F: {self.checkMapFileF(verbos)}")
print("------")
print(f"File A: {self.checkMapFileA(verbos)}")
def checkMapFileF(self, verbos=False):
return self._checkMapGeneral(self.METHOD_NAME, self.getFileBitsF, self.getFileCodesF, verbos=verbos)
def checkMapFileA(self, verbos=False):
return self._checkMapGeneral(self.getFileMapA, self.getFileBitsA, self.getFileCodesA, verbos=verbos)
def _checkMapGeneral(self, getGeneralMap, getBits, getCodes, verbos=False):
map = getGeneralMap()
shuffle(map)
mask = [elem for elem in map if elem not in self.blacklist][:5]
bits = getBits(mask)
mask_re = getCodes(bits)
bits_re = getBits(mask_re)
if verbos:
print(mask)
print(mask_re)
print(bits)
print(bits_re)
print(f"bits are: {bits_re == bits}")
print(f"map is: {sorted(mask_re) == sorted(mask)}")
return (bits_re == bits) and sorted(mask_re) == sorted(mask) |
1,819 | get kriging kernel distance | import numpy as np
def get_spatial_interpolation_kernel(
source_location,
target_location,
method="kriging",
sigma_um=20.0,
p=1,
num_closest=3,
sparse_thresh=None,
dtype="float32",
force_extrapolate=False,
):
"""
Compute the spatial kernel for linear spatial interpolation.
This is used for interpolation of bad channels or to correct the drift
by interpolating between contacts.
For reference, here is a simple overview on spatial interpolation:
https://www.aspexit.com/spatial-data-interpolation-tin-idw-kriging-block-kriging-co-kriging-what-are-the-differences/
Parameters
----------
source_location: array shape (m, 2)
The recording extractor to be transformed
target_location: array shape (n, 2)
Scale for the output distribution
method: 'kriging' or 'idw' or 'nearest'
Choice of the method
'kriging' : the same one used in kilosort
'idw' : inverse distance weithed
'nearest' : use nereast channel
sigma_um : float or list (default 20.)
Used in the 'kriging' formula. When list, it needs to have 2 elements (for the x and y directions).
p: int (default 1)
Used in the 'kriging' formula
sparse_thresh: None or float (default None)
If not None for 'kriging' force small value to be zeros to get a sparse matrix.
num_closest: int (default 3)
Used for 'idw'
force_extrapolate: bool (false by default)
How to handle when target location are outside source location.
When False : no extrapolation all target location outside are set to zero.
When True : extrapolation done with the formula of the method.
In that case the sum of the kernel is not force to be 1.
Returns
-------
interpolation_kernel: array (m, n)
"""
import scipy.spatial
target_is_inside = np.ones(target_location.shape[0], dtype=bool)
for dim in range(source_location.shape[1]):
l0, l1 = np.min(source_location[:, dim]), np.max(source_location[:, dim])
target_is_inside &= (target_location[:, dim] >= l0) & (target_location[:, dim] <= l1)
if method == "kriging":
# this is an adaptation of the pykilosort implementation by Kush Benga
# https://github.com/int-brain-lab/pykilosort/blob/ibl_prod/pykilosort/datashift2.py#L352
Kxx = METHOD_NAME(source_location, source_location, sigma_um, p)
Kyx = METHOD_NAME(target_location, source_location, sigma_um, p)
interpolation_kernel = Kyx @ np.linalg.pinv(Kxx + 1e-6 * np.eye(Kxx.shape[0]))
interpolation_kernel = interpolation_kernel.T.copy()
# sparsify
if sparse_thresh is not None:
interpolation_kernel[interpolation_kernel < sparse_thresh] = 0.0
# ensure sum = 1 for target inside
s = np.sum(interpolation_kernel, axis=0)
interpolation_kernel[:, target_is_inside] /= s[target_is_inside].reshape(1, -1)
elif method == "idw":
distances = scipy.spatial.distance.cdist(source_location, target_location, metric="euclidean")
interpolation_kernel = np.zeros((source_location.shape[0], target_location.shape[0]), dtype="float64")
for c in range(target_location.shape[0]):
ind_sorted = np.argsort(distances[:, c])
chan_closest = ind_sorted[:num_closest]
dists = distances[chan_closest, c]
if dists[0] == 0.0:
# no interpolation the first have zeros distance
interpolation_kernel[chan_closest[0], c] = 1.0
else:
interpolation_kernel[chan_closest, c] = 1 / dists
# ensure sum = 1 for target inside
s = np.sum(interpolation_kernel, axis=0)
interpolation_kernel[:, target_is_inside] /= s[target_is_inside].reshape(1, -1)
elif method == "nearest":
distances = scipy.spatial.distance.cdist(source_location, target_location, metric="euclidean")
interpolation_kernel = np.zeros((source_location.shape[0], target_location.shape[0]), dtype="float64")
for c in range(target_location.shape[0]):
ind_closest = np.argmin(distances[:, c])
interpolation_kernel[ind_closest, c] = 1.0
else:
raise ValueError("get_interpolation_kernel wrong method")
if not force_extrapolate:
interpolation_kernel[:, ~target_is_inside] = 0
return interpolation_kernel.astype(dtype)
def METHOD_NAME(locations_1, locations_2, sigma_um, p, distance_metric="euclidean"):
"""
Get the kriging kernel between two sets of locations.
Parameters
----------
locations_1 / locations_2 : 2D np.array
Locations of shape (N, D) where N is number of
channels and d is spatial dimension (e.g. 2 for [x, y])
sigma_um : float or list
Scale paremter on the Gaussian kernel,
typically distance between contacts in micrometers.
In case sigma_um is list then this mimics the Kilosort2.5 behavior, which uses two separate sigmas for each dimension.
In the later case the metric is always a 'cityblock'
p : float
Weight parameter on the exponential function. Default
in IBL kriging interpolation is 1.3.
Results
----------
kernal_dist : n x m array (i.e. locations 1 x locations 2) of
distances (gaussian kernel) between locations 1 and 2.
"""
if np.isscalar(sigma_um):
import scipy
dist = scipy.spatial.distance.cdist(locations_1, locations_2, metric=distance_metric)
kernal_dist = np.exp(-((dist / sigma_um) ** p))
else:
# this mimic the kilosort case where a sigma on x and y are diffrents.
# note that in that case the distance metric become a cityblock
sigma_x, sigma_y = sigma_um
distx = np.abs(locations_1[:, 0][:, np.newaxis] - locations_2[:, 0][np.newaxis, :])
disty = np.abs(locations_1[:, 1][:, np.newaxis] - locations_2[:, 1][np.newaxis, :])
kernal_dist = np.exp(-((distx / sigma_x) ** p) - (disty / sigma_y) ** p)
return kernal_dist
def get_kriging_channel_weights(contact_positions1, contact_positions2, sigma_um, p, weight_threshold=0.005):
"""
Calculate weights for kriging interpolation. Weights below weight_threshold are set to 0.
Based on the interpolate_bad_channels() function of the International Brain Laboratory.
International Brain Laboratory et al. (2022). Spike sorting pipeline for the
International Brain Laboratory. https://www.internationalbrainlab.com/repro-ephys
"""
weights = METHOD_NAME(contact_positions1, contact_positions2, sigma_um, p)
weights[weights < weight_threshold] = 0
with np.errstate(divide="ignore", invalid="ignore"):
weights /= np.sum(weights, axis=0)[None, :]
weights[np.logical_or(weights < weight_threshold, np.isnan(weights))] = 0
return weights |
1,820 | test order after complete | import pytest
from iommi.sort_after import (
LAST,
SortAfterException,
)
from iommi.struct import Struct
from iommi.base import (
keys,
values,
)
from iommi.sort_after import sort_after
def test_order_after_0():
sorts_right(
dict(
foo=Struct(expected_position=1),
bar=Struct(expected_position=2),
quux=Struct(after=0, expected_position=0),
baz=Struct(expected_position=3),
)
)
def test_order_after_LAST():
sorts_right(
dict(
foo=Struct(expected_position=0),
bar=Struct(expected_position=1),
quux=Struct(after=LAST, expected_position=3),
baz=Struct(expected_position=2),
)
)
def test_order_after_large():
sorts_right(
dict(
foo=Struct(expected_position=2, after=42),
bar=Struct(expected_position=0),
quux=Struct(expected_position=3, after=42),
baz=Struct(expected_position=1, after=17),
)
)
def test_order_after_name():
sorts_right(
dict(
foo=Struct(expected_position=0),
bar=Struct(expected_position=2),
quux=Struct(after='foo', expected_position=1),
baz=Struct(expected_position=3),
)
)
def test_order_after_name_stable():
sorts_right(
dict(
foo=Struct(expected_position=0),
bar=Struct(expected_position=3),
quux=Struct(after='foo', expected_position=1),
qoox=Struct(after='foo', expected_position=2),
baz=Struct(expected_position=4),
)
)
def test_order_after_name_interleave():
sorts_right(
dict(
foo=Struct(expected_position=0),
bar=Struct(expected_position=3),
qoox=Struct(after=1, expected_position=2),
quux=Struct(after='foo', expected_position=1),
)
)
def test_order_after_name_last():
sorts_right(
dict(
foo=Struct(expected_position=0),
quux=Struct(after='qoox', expected_position=3),
qoox=Struct(after=LAST, expected_position=2),
bar=Struct(expected_position=1),
)
)
def METHOD_NAME():
sorts_right(
{
# header1
'quux': Struct(expected_position=2),
'foo': Struct(expected_position=3),
# header2
'bar': Struct(expected_position=6),
'asd': Struct(expected_position=7),
'header1': Struct(after=0, expected_position=0),
'header1b': Struct(after=0, expected_position=1),
'header2': Struct(after='foo', expected_position=4),
# header3
'header2.b': Struct(after='foo', expected_position=5),
'header3': Struct(after='quux2', expected_position=9),
'quux2': Struct(expected_position=8),
'quux3': Struct(expected_position=10),
'quux4': Struct(expected_position=11),
'quux5': Struct(after=LAST, expected_position=12),
'quux6': Struct(after=LAST, expected_position=13),
}
)
def test_sort_after_chaining():
sorts_right(
dict(
foo=Struct(after='bar', expected_position=1),
bar=Struct(after=0, expected_position=0),
)
)
def test_sort_after_name_chaining():
sorts_right(
dict(
baz=Struct(after='foo', expected_position=2),
foo=Struct(after='bar', expected_position=1),
bar=Struct(after=0, expected_position=0),
)
)
def test_sort_after_indexes():
sorts_right(
dict(
baz=Struct(after=1, expected_position=2),
foo=Struct(after=0, expected_position=1),
bar=Struct(after=-1, expected_position=0),
)
)
def sorts_right(objects):
assert {y.expected_position for y in values(objects)} == set(range(len(objects))), "Borken test"
sort_after(objects)
assert [x.expected_position for x in values(objects)] == list(range(len(objects))), keys(objects)
def test_sort_after_points_to_nothing():
with pytest.raises(SortAfterException) as e:
sort_after(
dict(
quux=Struct(),
foo=Struct(),
quux6=Struct(after='does-not-exist'),
)
)
assert (
e.value.args[0]
== """\
Tried to order after does-not-exist but that key does not exist.
Available names:
foo
quux
quux6"""
)
def test_sort_after_points_to_nothing_plural():
with pytest.raises(SortAfterException) as e:
sort_after(
dict(
quux=Struct(),
foo=Struct(after='does-not-exist2'),
quux6=Struct(after='does-not-exist'),
)
)
assert (
e.value.args[0]
== """\
Tried to order after does-not-exist, does-not-exist2 but those keys do not exist.
Available names:
foo
quux
quux6"""
) |
1,821 | runtime | """Python project."""
from __future__ import annotations
import logging
import shutil
from typing import TYPE_CHECKING, ClassVar, Optional, Set, Tuple
from typing_extensions import Literal
from .....compat import cached_property
from .....dependency_managers import (
Pip,
Pipenv,
PipenvNotFoundError,
Poetry,
PoetryNotFoundError,
)
from ..base_classes import Project
from ..models.args import PythonHookArgs
from . import PythonDockerDependencyInstaller
if TYPE_CHECKING:
from pathlib import Path
LOGGER = logging.getLogger(__name__.replace("._", "."))
class PythonProject(Project[PythonHookArgs]):
"""Python project."""
DEFAULT_CACHE_DIR_NAME: ClassVar[str] = "pip_cache"
"""Name of the default cache directory."""
@cached_property
def docker(self) -> Optional[PythonDockerDependencyInstaller]:
"""Docker interface that can be used to build the project."""
return PythonDockerDependencyInstaller.from_project(self)
@cached_property
def metadata_files(self) -> Tuple[Path, ...]:
"""Project metadata files.
Files are only included in return value if they exist.
"""
if self.project_type == "poetry":
config_files = [
self.project_root / config_file for config_file in Poetry.CONFIG_FILES
]
elif self.project_type == "pipenv":
config_files = [
self.project_root / config_file for config_file in Pipenv.CONFIG_FILES
]
else:
config_files = [
self.project_root / config_file for config_file in Pip.CONFIG_FILES
]
return tuple(path for path in config_files if path.exists())
@cached_property
def METHOD_NAME(self) -> str:
"""Runtime of the build system.
Value should be a valid Lambda Function runtime
(https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html).
"""
if self._runtime_from_docker:
return self._validate_runtime(self._runtime_from_docker)
return self._validate_runtime(
f"python{self.pip.python_version.major}.{self.pip.python_version.minor}"
)
@cached_property
def pip(self) -> Pip:
"""Pip dependency manager."""
return Pip(self.ctx, self.project_root)
@cached_property
def pipenv(self) -> Optional[Pipenv]:
"""Pipenv dependency manager.
Return:
If the project uses pipenv and pipenv is not explicitly disabled,
an object for interfacing with pipenv will be returned.
Raises:
PipenvNotFoundError: pipenv is not installed or not found in PATH.
"""
if self.project_type != "pipenv":
return None
if Pipenv.found_in_path():
return Pipenv(self.ctx, self.project_root)
raise PipenvNotFoundError
@cached_property
def poetry(self) -> Optional[Poetry]:
"""Poetry dependency manager.
Return:
If the project uses poetry and poetry is not explicitly disabled,
an object for interfacing with poetry will be returned.
Raises:
PoetryNotFound: poetry is not installed or not found in PATH.
"""
if self.project_type != "poetry":
return None
if Poetry.found_in_path():
return Poetry(self.ctx, self.project_root)
raise PoetryNotFoundError
@cached_property
def project_type(self) -> Literal["pip", "pipenv", "poetry"]:
"""Type of python project."""
if Poetry.dir_is_project(self.project_root):
if self.args.use_poetry:
return "poetry"
LOGGER.warning(
"poetry project detected but use of poetry is explicitly disabled"
)
if Pipenv.dir_is_project(self.project_root):
if self.args.use_pipenv:
return "pipenv"
LOGGER.warning(
"pipenv project detected but use of pipenv is explicitly disabled"
)
return "pip"
@cached_property
def requirements_txt(self) -> Optional[Path]:
"""Dependency file for the project."""
if self.poetry: # prioritize poetry
return self.poetry.export(output=self.tmp_requirements_txt)
if self.pipenv:
return self.pipenv.export(output=self.tmp_requirements_txt)
requirements_txt = self.project_root / "requirements.txt"
if Pip.dir_is_project(self.project_root, file_name=requirements_txt.name):
return requirements_txt
return None
@cached_property
def supported_metadata_files(self) -> Set[str]:
"""Names of all supported metadata files.
Returns:
Set of file names - not paths.
"""
file_names = {*Pip.CONFIG_FILES}
if self.args.use_poetry:
file_names.update(Poetry.CONFIG_FILES)
if self.args.use_pipenv:
file_names.update(Pipenv.CONFIG_FILES)
return file_names
@cached_property
def tmp_requirements_txt(self) -> Path:
"""Temporary requirements.txt file.
This path is only used when exporting from another format.
"""
return self.ctx.work_dir / f"{self.source_code.md5_hash}.requirements.txt"
def cleanup(self) -> None:
"""Cleanup temporary files after the build process has run."""
if (self.poetry or self.pipenv) and self.tmp_requirements_txt.exists():
self.tmp_requirements_txt.unlink()
shutil.rmtree(self.dependency_directory, ignore_errors=True)
if not any(self.build_directory.iterdir()):
# remove build_directory if it's empty
shutil.rmtree(self.build_directory, ignore_errors=True)
def install_dependencies(self) -> None:
"""Install project dependencies."""
if self.requirements_txt:
LOGGER.debug("installing dependencies to %s...", self.dependency_directory)
if self.docker:
self.docker.install()
else:
self.pip.install(
cache_dir=self.args.cache_dir,
extend_args=self.args.extend_pip_args,
no_cache_dir=not self.args.use_cache,
no_deps=bool(self.poetry or self.pipenv),
requirements=self.requirements_txt,
target=self.dependency_directory,
)
LOGGER.debug(
"dependencies successfully installed to %s", self.dependency_directory
)
else:
LOGGER.info("skipped installing dependencies; none found") |
1,822 | test cdf values | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras clustering centroids initialisation API."""
import tensorflow as tf
import tensorflow.keras.backend as K
from absl.testing import parameterized
from tensorflow_model_optimization.python.core.clustering.keras import cluster_config
from tensorflow_model_optimization.python.core.clustering.keras import clustering_centroids
keras = tf.keras
errors_impl = tf.errors
layers = keras.layers
test = tf.test
CentroidInitialization = cluster_config.CentroidInitialization
class ClusteringCentroidsTest(test.TestCase, parameterized.TestCase):
"""Unit tests for the clustering_centroids module."""
def setUp(self):
self.factory = clustering_centroids.CentroidsInitializerFactory
@parameterized.parameters(
(CentroidInitialization.LINEAR),
(CentroidInitialization.RANDOM),
(CentroidInitialization.DENSITY_BASED),
)
def testExistingInitsAreSupported(self, init_type):
"""
Verifies that the given centroid initialization methods are supported.
"""
self.assertTrue(self.factory.init_is_supported(init_type))
def testNonExistingInitIsNotSupported(self):
self.assertFalse(self.factory.init_is_supported("DEADBEEF"))
@parameterized.parameters(
(
CentroidInitialization.LINEAR,
clustering_centroids.LinearCentroidsInitialisation
),
(
CentroidInitialization.RANDOM,
clustering_centroids.RandomCentroidsInitialisation
),
(
CentroidInitialization.DENSITY_BASED,
clustering_centroids.DensityBasedCentroidsInitialisation
),
)
def testReturnsMethodForExistingInit(self, init_type, method):
"""
Verifies that the centroid initializer factory method returns the expected
classes for the given initialization methods.
"""
self.assertEqual(self.factory.get_centroid_initializer(init_type), method)
def testThrowsValueErrorForNonExistingInit(self):
"""
Verifies that the centroid initializer factory method raises an exception
when invoked with an unsupported initialization method.
"""
with self.assertRaises(ValueError):
self.factory.get_centroid_initializer("DEADBEEF")
@parameterized.parameters(
(0, 0, 1, 1, 1, 0),
(0, 0, 5, 5, 1, 0),
(1, 2, 3, 4, 1, 1),
(7, 12, 17, 22, 1, 5),
(-5, 4, 7, 10, 1.0 / 2.0, 13.0 / 2.0),
)
def testLinearSolverConstruction(self, x1, y1, x2, y2, a, b):
"""
Verifies that a TFLinearEquationSolver is constructed correctly.
"""
solver = clustering_centroids.TFLinearEquationSolver(float(x1),
float(y1),
float(x2),
float(y2))
solver_a = solver.a
self.assertAlmostEqual(K.batch_get_value([solver_a])[0], a)
self.assertAlmostEqual(K.batch_get_value([solver.b])[0], b)
@parameterized.parameters(
(0, 0, 1, 1, 5, 5),
(0, 0, 5, 5, 20, 20),
(1, 2, 3, 4, 3, 4),
(7, 12, 17, 22, 3, 8),
)
def testLinearSolverSolveForX(self, x1, y1, x2, y2, x, y):
"""
Verifies that TFLinearEquationSolver solves the given equations correctly
for X.
"""
solver = clustering_centroids.TFLinearEquationSolver(float(x1),
float(y1),
float(x2),
float(y2))
for_x = solver.solve_for_x(y)
self.assertAlmostEqual(K.batch_get_value([for_x])[0], x)
@parameterized.parameters(
(0, 0, 1, 1, 5, 5),
(0, 0, 5, 5, 20, 20),
(1, 2, 3, 4, 3, 4),
(7, 12, 17, 22, 3, 8),
)
def testLinearSolverSolveForY(self, x1, y1, x2, y2, x, y):
"""
Verifies that TFLinearEquationSolver solves the given equations correctly
for Y.
"""
solver = clustering_centroids.TFLinearEquationSolver(float(x1),
float(y1),
float(x2),
float(y2))
for_y = solver.solve_for_y(x)
self.assertAlmostEqual(K.batch_get_value([for_y])[0], y)
@parameterized.parameters(
([1, 2, 6, 7], 4, 0.5),
([1, 2, 6, 7], 1, 1. / 4.),
([1, 2, 3, 4, 5, 6, 7, 8, 9], 3, 1. / 3.),
([1, 2, 3, 4, 5, 6, 7, 8, 9], 99, 1.),
([1, 2, 3, 4, 5, 6, 7, 8, 9], -20, 0.)
)
def METHOD_NAME(self, weights, point, probability):
"""
Verifies that TFCumulativeDistributionFunction yields the expected output
for the inputs provided.
"""
cdf_calc = clustering_centroids.TFCumulativeDistributionFunction(weights)
self.assertAlmostEqual(
probability,
K.batch_get_value([cdf_calc.get_cdf_value(point)])[0]
)
@parameterized.parameters(
(
[0, 1, 2, 3, 3.1, 3.2, 3.3, 3.4, 3.5],
5,
[0.11137931, 2.0534482, 3.145862, 3.3886206, 3.51]
),
(
[0, 1, 2, 3, 3.1, 3.2, 3.3, 3.4, 3.5],
3,
[0.11137931, 3.145862, 3.51]
),
(
[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.],
3,
[0.3010345, 5.2775865, 9.01]
)
)
def testClusterCentroids(self, weights, number_of_clusters, centroids):
dbci = clustering_centroids.DensityBasedCentroidsInitialisation(
weights,
number_of_clusters
)
calc_centroids = K.batch_get_value([dbci.get_cluster_centroids()])[0]
self.assertSequenceAlmostEqual(centroids, calc_centroids, places=4)
if __name__ == '__main__':
test.main() |
1,823 | test facet configuration with existing facet dict | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2018 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from mock import Mock, patch
from inspirehep.modules.search.utils import get_facet_configuration
@patch('inspirehep.modules.search.utils.get_facet_configuration')
def test_facet_configuration_with_existing_facet_import_string(facet_mock, isolated_app):
facet_mock.return_value = {
'aggs': {
'jessica-jones': {
'terms': {
'field': 'defenders',
'size': 20,
},
},
},
}
config = {
'RECORDS_REST_FACETS': {
'defenders': 'inspirehep.modules.search.utils:get_facet_configuration'
},
}
expected = {
'aggs': {
'jessica-jones': {
'terms': {
'field': 'defenders',
'size': 20
}
}
}
}
with isolated_app.test_request_context('?facet_name=defenders'):
with patch.dict(isolated_app.config, config):
result = get_facet_configuration('records-hep')
facet_mock.assert_called_once()
assert expected == result
def test_facet_configuration_with_existing_facet_callable(isolated_app):
facet_mock = Mock()
facet_mock.return_value = {
'aggs': {
'jessica-jones': {
'terms': {
'field': 'defenders',
'size': 20,
},
},
},
}
config = {
'RECORDS_REST_FACETS': {
'defenders': facet_mock
},
}
expected = {
'aggs': {
'jessica-jones': {
'terms': {
'field': 'defenders',
'size': 20
}
}
}
}
with isolated_app.test_request_context('?facet_name=defenders'):
with patch.dict(isolated_app.config, config):
result = get_facet_configuration('records-hep')
facet_mock.assert_called_once()
assert expected == result
def METHOD_NAME(isolated_app):
config = {
'RECORDS_REST_FACETS': {
'defenders': {
'aggs': {
'jessica-jones': {
'terms': {
'field': 'defenders',
'size': 20,
},
},
},
},
},
}
expected = {
'aggs': {
'jessica-jones': {
'terms': {
'field': 'defenders',
'size': 20
}
}
}
}
with isolated_app.test_request_context('?facet_name=defenders'):
with patch.dict(isolated_app.config, config):
result = get_facet_configuration('records-hep')
assert expected == result
def test_facet_configuration_without_request_facet_name(isolated_app):
config = {
'RECORDS_REST_FACETS': {
'records-hep': {
'aggs': {
'jessica-jones': {
'terms': {
'field': 'defenders',
'size': 20,
},
},
},
},
},
}
expected = {
'aggs': {
'jessica-jones': {
'terms': {
'field': 'defenders',
'size': 20
}
}
}
}
with isolated_app.test_request_context():
with patch.dict(isolated_app.config, config):
result = get_facet_configuration('records-hep')
assert expected == result
def test_facet_configuration_with_fallback_to_default_facet(isolated_app):
config = {
'RECORDS_REST_FACETS': {
'records-hep': {
'aggs': {
'jessica-jones': {
'terms': {
'field': 'defenders',
'size': 20,
},
},
},
},
},
}
expected = {
'aggs': {
'jessica-jones': {
'terms': {
'field': 'defenders',
'size': 20
}
}
}
}
with isolated_app.test_request_context('?facet_name=defenders'):
with patch.dict(isolated_app.config, config):
result = get_facet_configuration('records-hep')
assert expected == result |
1,824 | test is extreme weight | import math
import pytest
from robotoff.prediction.ocr.dataclass import OCRRegex
from robotoff.prediction.ocr.product_weight import (
PRODUCT_WEIGHT_REGEX,
find_product_weight,
is_extreme_weight,
is_suspicious_weight,
is_valid_weight,
normalize_weight,
)
from robotoff.types import Prediction, PredictionType, ServerType
@pytest.mark.parametrize(
"input_str,is_match",
[
("poids net à l'emballage: 500g", True),
("poids 2kg", True),
("poids 2kgv", False),
("qpoids 2kgv", False),
("net wt. 1.4 fl oz", True),
("other string", False),
("1.4 g", False),
("2 l", False),
],
)
def test_product_weight_with_mention_regex(input_str: str, is_match: bool):
with_mention_ocr_regex: OCRRegex = PRODUCT_WEIGHT_REGEX["with_mention"]
with_mention_regex = with_mention_ocr_regex.regex
assert (with_mention_regex.match(input_str) is not None) == is_match
@pytest.mark.parametrize(
"input_str,is_match",
[
("poids net à l'emballage: 500g", False),
("poids 2kg", False),
("250g net weight", True),
("10 g net", True),
("bq10 g net", False),
("1.4 g", False),
("2 l", False),
],
)
def test_product_weight_with_ending_mention_regex(input_str: str, is_match: bool):
ocr_regex: OCRRegex = PRODUCT_WEIGHT_REGEX["with_ending_mention"]
regex = ocr_regex.regex
assert (regex.match(input_str) is not None) == is_match
@pytest.mark.parametrize(
"value,unit,expected",
[
("2", "l", (2000.0, "ml")),
("1549.45", "dl", (154945.0, "ml")),
("10,5", "cl", (105, "ml")),
("20", "ml", (20.0, "ml")),
("2,5", "kg", (2500.0, "g")),
("2.5", "g", (2.5, "g")),
("25", "g", (25, "g")),
("15", "fl oz", (450, "ml")),
("1", "oz", (28.349523125, "g")),
],
)
def test_normalize_weight(value: str, unit: str, expected: tuple[float, str]):
normalized_value, normalized_unit = normalize_weight(value, unit)
assert math.isclose(normalized_value, expected[0])
assert normalized_unit == expected[1]
@pytest.mark.parametrize(
"value,is_valid",
[
("25", True),
("150", True),
("150.0", True),
("0225", False),
("00225", False),
("gsg", False),
("-15", False),
("12,5", False),
("12.5", False),
],
)
def test_is_valid_weight(value: str, is_valid: bool):
assert is_valid_weight(value) is is_valid
@pytest.mark.parametrize(
"value,unit,expected",
[
(10000, "g", True),
(10000, "ml", True),
(9999, "ml", False),
(9999, "g", False),
(100, "g", False),
(100, "ml", False),
(10, "ml", True),
(3, "ml", True),
(10, "g", True),
(2, "g", True),
],
)
def METHOD_NAME(value: float, unit: str, expected: bool):
assert is_extreme_weight(value, unit) is expected
@pytest.mark.parametrize(
"value,unit,expected",
[
(100, "g", False),
(125, "g", False),
(250, "g", False),
(100, "ml", False),
(563, "ml", False),
(2530, "g", False),
(6250, "ml", False),
(2532, "g", True),
(2537, "ml", True),
(6259, "ml", True),
],
)
def test_is_suspicious_weight(value: float, unit: str, expected: bool):
assert is_suspicious_weight(value, unit) is expected
@pytest.mark.parametrize(
"text,expected",
[
("760094310634\nGE PAPIER\n", []),
(
"Poids net: 150 g\nIngrédients:",
[
Prediction(
type=PredictionType.product_weight,
data={
"automatic_processing": True,
"matcher_type": "with_mention",
"normalized_unit": "g",
"normalized_value": 150,
"notify": False,
"priority": 1,
"prompt": "Poids net",
"raw": "Poids net: 150 g",
"unit": "g",
"value": "150",
},
value_tag=None,
value="150 g",
automatic_processing=True,
predictor="regex",
predictor_version="1",
barcode=None,
timestamp=None,
source_image=None,
id=None,
confidence=None,
server_type=ServerType.off,
),
],
),
],
)
def test_find_product_weight(text: str, expected: list[dict]):
assert find_product_weight(text) == expected |
1,825 | description | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetSchemaResult',
'AwaitableGetSchemaResult',
'get_schema',
'get_schema_output',
]
@pulumi.output_type
class GetSchemaResult:
"""
Schema Contract details.
"""
def __init__(__self__, METHOD_NAME=None, id=None, name=None, schema_type=None, type=None, value=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", METHOD_NAME)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if schema_type and not isinstance(schema_type, str):
raise TypeError("Expected argument 'schema_type' to be a str")
pulumi.set(__self__, "schema_type", schema_type)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if value and not isinstance(value, str):
raise TypeError("Expected argument 'value' to be a str")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[str]:
"""
Free-form schema entity description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="schemaType")
def schema_type(self) -> str:
"""
Schema Type. Immutable.
"""
return pulumi.get(self, "schema_type")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
Json-encoded string for non json-based schema.
"""
return pulumi.get(self, "value")
class AwaitableGetSchemaResult(GetSchemaResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSchemaResult(
METHOD_NAME=self.METHOD_NAME,
id=self.id,
name=self.name,
schema_type=self.schema_type,
type=self.type,
value=self.value)
def get_schema(resource_group_name: Optional[str] = None,
schema_id: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSchemaResult:
"""
Gets the details of the Schema specified by its identifier.
:param str resource_group_name: The name of the resource group.
:param str schema_id: Schema id identifier. Must be unique in the current API Management service instance.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['schemaId'] = schema_id
__args__['serviceName'] = service_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20210401preview:getSchema', __args__, opts=opts, typ=GetSchemaResult).value
return AwaitableGetSchemaResult(
METHOD_NAME=pulumi.get(__ret__, 'description'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
schema_type=pulumi.get(__ret__, 'schema_type'),
type=pulumi.get(__ret__, 'type'),
value=pulumi.get(__ret__, 'value'))
@_utilities.lift_output_func(get_schema)
def get_schema_output(resource_group_name: Optional[pulumi.Input[str]] = None,
schema_id: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSchemaResult]:
"""
Gets the details of the Schema specified by its identifier.
:param str resource_group_name: The name of the resource group.
:param str schema_id: Schema id identifier. Must be unique in the current API Management service instance.
:param str service_name: The name of the API Management service.
"""
... |
1,826 | test opf task | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2023 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pytest
import pandapower as pp
import pandapower.networks as nw
import pandapower.toolbox
def METHOD_NAME():
net = pp.create_empty_network()
pp.create_buses(net, 6, [10, 10, 10, 0.4, 7, 7],
min_vm_pu=[0.9, 0.9, 0.88, 0.9, np.nan, np.nan])
idx_ext_grid = 1
pp.create_ext_grid(net, 0, max_q_mvar=80, min_p_mw=0, index=idx_ext_grid)
pp.create_gen(net, 1, 10, min_q_mvar=-50, max_q_mvar=-10, min_p_mw=0, max_p_mw=60)
pp.create_gen(net, 2, 8)
pp.create_gen(net, 3, 5)
pp.create_load(net, 3, 120, max_p_mw=8)
pp.create_sgen(net, 1, 8, min_q_mvar=-50, max_q_mvar=-10, controllable=False)
pp.create_sgen(net, 2, 8)
pp.create_storage(net, 3, 2, 100, min_q_mvar=-10, max_q_mvar=-50, min_p_mw=0, max_p_mw=60,
controllable=True)
pp.create_dcline(net, 4, 5, 0.3, 1e-4, 1e-2, 1.01, 1.02, min_q_from_mvar=-10,
min_q_to_mvar=-10)
pp.create_line(net, 3, 4, 5, "122-AL1/20-ST1A 10.0", max_loading_percent=50)
pp.create_transformer(net, 2, 3, "0.25 MVA 10/0.4 kV")
# --- run and check opf_task()
out1 = pp.opf_task(net, keep=True)
assert out1["flexibilities_without_costs"] == "all"
assert sorted(out1["flexibilities"].keys()) == [i1 + i2 for i1 in ["P", "Q"] for i2 in [
"dcline", "ext_grid", "gen", "storage"]]
for key, df in out1["flexibilities"].items():
assert df.shape[0]
if "gen" in key:
assert df.shape[0] > 1
assert out1["flexibilities"]["Pext_grid"].loc[0, "index"] == [1]
assert np.isnan(out1["flexibilities"]["Pext_grid"].loc[0, "max"])
assert out1["flexibilities"]["Pext_grid"].loc[0, "min"] == 0
assert np.isnan(out1["flexibilities"]["Qext_grid"].loc[0, "min"])
assert out1["flexibilities"]["Qext_grid"].loc[0, "max"] == 80
assert sorted(out1["network_constraints"].keys()) == ["LOADINGline", "VMbus"]
assert out1["network_constraints"]["VMbus"].shape[0] == 3
# check delta_pq
net.gen.loc[0, "min_p_mw"] = net.gen.loc[0, "max_p_mw"] - 1e-5
out2 = pp.opf_task(net, delta_pq=1e-3, keep=True)
assert out2["flexibilities"]["Pgen"].shape[0] == 1
net.gen.loc[0, "min_p_mw"] = net.gen.loc[0, "max_p_mw"] - 1e-1
out1["flexibilities"]["Pgen"].loc[0, "min"] = out1["flexibilities"]["Pgen"].loc[
0, "max"] - 1e-1
out3 = pp.opf_task(net, delta_pq=1e-3, keep=True)
for key in out3["flexibilities"]:
assert pandapower.toolbox.dataframes_equal(out3["flexibilities"][key], out1["flexibilities"][key])
# check costs
pp.create_poly_cost(net, idx_ext_grid, "ext_grid", 2)
pp.create_poly_cost(net, 1, "gen", 1.7)
pp.create_poly_cost(net, 0, "dcline", 2)
pp.create_pwl_cost(net, 2, "gen", [[-1e9, 1, 3.1], [1, 1e9, 0.5]], power_type="q")
out4 = pp.opf_task(net)
for dict_key in ["flexibilities", "network_constraints"]:
for key in out4[dict_key]:
assert pandapower.toolbox.dataframes_equal(out4[dict_key][key], out1[dict_key][key])
assert isinstance(out4["flexibilities_without_costs"], dict)
expected_elm_without_cost = ["gen", "storage"]
assert sorted(out4["flexibilities_without_costs"].keys()) == expected_elm_without_cost
for elm in expected_elm_without_cost:
assert len(out4["flexibilities_without_costs"][elm]) == 1
def test_overloaded_lines():
net = pp.create_empty_network()
bus0 = pp.create_bus(net, vn_kv=.4)
bus1 = pp.create_bus(net, vn_kv=.4)
pp.create_ext_grid(net, bus0)
line0 = pp.create_line(net, bus0, bus1, length_km=1, std_type="NAYY 4x50 SE")
line1 = pp.create_line(net, bus0, bus1, length_km=1, std_type="NA2XS2Y 1x95 RM/25 12/20 kV")
line2 = pp.create_line(net, bus0, bus1, length_km=1, std_type="15-AL1/3-ST1A 0.4")
pp.create_line(net, bus0, bus1, length_km=10, std_type="149-AL1/24-ST1A 10.0")
pp.create_load(net, bus1, p_mw=0.2, q_mvar=0.05)
pp.runpp(net)
# test the overloaded lines by default value of max_load=100
overloaded_lines = pp.overloaded_lines(net, max_load=100)
assert set(overloaded_lines) == {line0, line1}
# test the overloaded lines by a self defined value of max_load=50
overloaded_lines = pp.overloaded_lines(net, max_load=50)
assert set(overloaded_lines) == {line0, line1, line2}
def test_violated_buses():
net = nw.create_cigre_network_lv()
pp.runpp(net)
# set the range of vm.pu
min_vm_pu = 0.92
max_vm_pu = 1.1
# print out the list of violated_bus's index
violated_bus = pp.violated_buses(net, min_vm_pu, max_vm_pu)
assert set(violated_bus) == set(net["bus"].index[[16, 35, 36, 40]])
def test_clear_result_tables():
net = nw.case9()
pp.runpp(net)
elms_to_check = ["bus", "line", "load"]
for elm in elms_to_check:
assert net["res_%s" % elm].shape[0]
pp.clear_result_tables(net)
for elm in elms_to_check:
assert not net["res_%s" % elm].shape[0]
def test_res_power_columns():
assert pandapower.toolbox.res_power_columns("gen") == ["p_mw", "q_mvar"]
assert pandapower.toolbox.res_power_columns("line") == pandapower.toolbox.res_power_columns("line", side="from") == \
pandapower.toolbox.res_power_columns("line", side=0) == ["p_from_mw", "q_from_mvar"]
assert pandapower.toolbox.res_power_columns("line", side="all") == [
"p_from_mw", "q_from_mvar", "p_to_mw", "q_to_mvar"]
assert pandapower.toolbox.res_power_columns("trafo3w", side="all") == [
"p_hv_mw", "q_hv_mvar", "p_mv_mw", "q_mv_mvar", "p_lv_mw", "q_lv_mvar"]
if __name__ == '__main__':
pytest.main([__file__, "-x"] |
1,827 | source rse | # -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from rucio.core.distance import get_distances, add_distance
from rucio.core.replica import add_replicas
from rucio.core.request import list_and_mark_transfer_requests_and_source_replicas, set_transfer_limit, list_transfer_limits, get_request
from rucio.core.transfer import get_supported_transfertools
from rucio.core.rse import add_rse_attribute, RseData, RseCollection
from rucio.daemons.conveyor.preparer import preparer
from rucio.db.sqla.session import get_session
from rucio.db.sqla import models
from rucio.db.sqla.constants import RequestState
@pytest.fixture
def dest_rse(vo, rse_factory):
rse_name, rse_id = rse_factory.make_mock_rse()
yield {'name': rse_name, 'id': rse_id}
@pytest.fixture
def METHOD_NAME(vo, rse_factory, dest_rse):
rse_name, rse_id = rse_factory.make_mock_rse()
add_distance(rse_id, dest_rse['id'], distance=5)
yield {'name': rse_name, 'id': rse_id}
@pytest.fixture
def file(vo, did_factory):
did = did_factory.random_file_did()
return {'scope': did['scope'], 'name': did['name'], 'bytes': 1, 'adler32': 'deadbeef'}
@pytest.fixture
def dataset(did_factory, vo):
return did_factory.make_dataset()
@pytest.fixture
def mock_request(vo, METHOD_NAME, dest_rse, file, root_account):
add_replicas(rse_id=METHOD_NAME['id'], files=[file], account=root_account)
db_session = get_session()
request = models.Request(
state=RequestState.PREPARING,
scope=file['scope'],
name=file['name'],
dest_rse_id=dest_rse['id'],
account=root_account,
)
request.save(session=db_session)
request_dict = request.to_dict()
db_session.commit()
db_session.expunge(request)
yield request_dict
@pytest.fixture
def mock_request_no_source(dest_rse, dataset, root_account):
db_session = get_session()
request = models.Request(
state=RequestState.PREPARING,
scope=dataset['scope'],
name=dataset['name'],
dest_rse_id=dest_rse['id'],
account=root_account,
)
request.save(session=db_session)
request_dict = request.to_dict()
db_session.commit()
db_session.expunge(request)
yield request_dict
def test_listing_preparing_transfers(mock_request):
req_sources = list_and_mark_transfer_requests_and_source_replicas(
rse_collection=RseCollection(),
request_state=RequestState.PREPARING,
)
assert len(req_sources) != 0
found_requests = list(filter(lambda rws: rws.request_id == mock_request['id'], req_sources.values()))
assert len(found_requests) == 1
@pytest.mark.noparallel(reason='uses preparer')
@pytest.mark.parametrize("file_config_mock", [{"overrides": [
('throttler', 'mode', 'DEST_PER_ACT')
]}], indirect=True)
def test_preparer_setting_request_state_waiting(dest_rse, mock_request, file_config_mock):
set_transfer_limit(
dest_rse['name'],
activity=mock_request['activity'],
max_transfers=1,
strategy='fifo',
)
list(list_transfer_limits())
preparer(once=True, transfertools=['mock'], partition_wait_time=0)
updated_mock_request = get_request(mock_request['id'])
assert updated_mock_request['state'] == RequestState.WAITING
@pytest.mark.noparallel(reason='uses preparer')
def test_preparer_setting_request_state_queued(mock_request):
preparer(once=True, transfertools=['mock'], partition_wait_time=0)
updated_mock_request = get_request(mock_request['id'])
assert updated_mock_request['state'] == RequestState.QUEUED
@pytest.mark.noparallel(reason='uses preparer')
def test_preparer_setting_request_source(vo, METHOD_NAME, mock_request):
preparer(once=True, transfertools=['mock'], partition_wait_time=0)
updated_mock_request = get_request(mock_request['id'])
assert updated_mock_request['state'] == RequestState.QUEUED
assert updated_mock_request['source_rse_id'] == METHOD_NAME['id']
@pytest.mark.noparallel(reason='uses preparer')
def test_preparer_for_request_without_source(mock_request_no_source):
preparer(once=True, transfertools=['mock'], partition_wait_time=0)
updated_mock_request: "models.Request" = get_request(mock_request_no_source['id'])
assert updated_mock_request['state'] == RequestState.NO_SOURCES
@pytest.mark.noparallel(reason='uses preparer')
@pytest.mark.parametrize("caches_mock", [{"caches_to_mock": [
'rucio.core.rse.REGION'
]}], indirect=True)
def test_preparer_without_and_with_mat(METHOD_NAME, dest_rse, mock_request, caches_mock):
add_rse_attribute(METHOD_NAME['id'], 'fts', 'a')
add_rse_attribute(dest_rse['id'], 'globus_endpoint_id', 'b')
[cache_region] = caches_mock
cache_region.invalidate()
preparer(once=True, transfertools=['fts3', 'globus'], partition_wait_time=0)
updated_mock_request = get_request(mock_request['id'])
assert updated_mock_request['state'] == RequestState.NO_SOURCES
@pytest.mark.noparallel(reason='uses preparer')
def test_two_sources_one_destination(rse_factory, METHOD_NAME, vo, file, mock_request):
_, source_rse2_id = rse_factory.make_mock_rse()
add_distance(source_rse2_id, mock_request['dest_rse_id'], distance=2)
add_replicas(rse_id=source_rse2_id, files=[file], account=mock_request['account'])
src1_distance, src2_distance = (
get_distances(
src_rse_id=src_rse,
dest_rse_id=mock_request['dest_rse_id'],
)
for src_rse in (METHOD_NAME['id'], source_rse2_id)
)
assert src1_distance and len(src1_distance) == 1 and src1_distance[0]['distance'] == 5
assert src2_distance and len(src2_distance) == 1 and src2_distance[0]['distance'] == 2
preparer(once=True, transfertools=['mock'], partition_wait_time=0)
updated_mock_request = get_request(mock_request['id'])
assert updated_mock_request['state'] == RequestState.QUEUED
assert updated_mock_request['source_rse_id'] == source_rse2_id # distance 2 < 5
def test_get_supported_transfertools_none(vo, rse_factory):
METHOD_NAME, source_rse_id = rse_factory.make_mock_rse()
dest_rse, dest_rse_id = rse_factory.make_mock_rse()
transfertools = get_supported_transfertools(METHOD_NAME=RseData(source_rse_id), dest_rse=RseData(dest_rse_id), transfertools=['fts3', 'globus'])
assert not transfertools
def test_get_supported_transfertools_fts_globus(vo, rse_factory):
METHOD_NAME, source_rse_id = rse_factory.make_mock_rse()
dest_rse, dest_rse_id = rse_factory.make_mock_rse()
add_rse_attribute(source_rse_id, 'fts', 'a')
add_rse_attribute(dest_rse_id, 'fts', 'b')
add_rse_attribute(source_rse_id, 'globus_endpoint_id', 'a')
add_rse_attribute(dest_rse_id, 'globus_endpoint_id', 'b')
transfertools = get_supported_transfertools(METHOD_NAME=RseData(source_rse_id), dest_rse=RseData(dest_rse_id), transfertools=['fts3', 'globus'])
assert len(transfertools) == 2
assert 'fts3' in transfertools
assert 'globus' in transfertools |
1,828 | test assemble xml file | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def METHOD_NAME(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.index = 0
worksheet.conditional_format(
"A1",
{
"type": "data_bar",
"data_bar_2010": True,
},
)
worksheet.conditional_format(
"A2:B2",
{
"type": "data_bar",
"bar_color": "#63C384",
"data_bar_2010": True,
},
)
worksheet.conditional_format(
"A3:C3",
{
"type": "data_bar",
"bar_color": "#FF555A",
"data_bar_2010": True,
},
)
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData/>
<conditionalFormatting sqref="A1">
<cfRule type="dataBar" priority="1">
<dataBar>
<cfvo type="min"/>
<cfvo type="max"/>
<color rgb="FF638EC6"/>
</dataBar>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}">
<x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000001}</x14:id>
</ext>
</extLst>
</cfRule>
</conditionalFormatting>
<conditionalFormatting sqref="A2:B2">
<cfRule type="dataBar" priority="2">
<dataBar>
<cfvo type="min"/>
<cfvo type="max"/>
<color rgb="FF63C384"/>
</dataBar>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}">
<x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000002}</x14:id>
</ext>
</extLst>
</cfRule>
</conditionalFormatting>
<conditionalFormatting sqref="A3:C3">
<cfRule type="dataBar" priority="3">
<dataBar>
<cfvo type="min"/>
<cfvo type="max"/>
<color rgb="FFFF555A"/>
</dataBar>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}">
<x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000003}</x14:id>
</ext>
</extLst>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{78C0D931-6437-407d-A8EE-F0AAD7539E65}">
<x14:conditionalFormattings>
<x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000001}">
<x14:dataBar minLength="0" maxLength="100" border="1" negativeBarBorderColorSameAsPositive="0">
<x14:cfvo type="autoMin"/>
<x14:cfvo type="autoMax"/>
<x14:borderColor rgb="FF638EC6"/>
<x14:negativeFillColor rgb="FFFF0000"/>
<x14:negativeBorderColor rgb="FFFF0000"/>
<x14:axisColor rgb="FF000000"/>
</x14:dataBar>
</x14:cfRule>
<xm:sqref>A1</xm:sqref>
</x14:conditionalFormatting>
<x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000002}">
<x14:dataBar minLength="0" maxLength="100" border="1" negativeBarBorderColorSameAsPositive="0">
<x14:cfvo type="autoMin"/>
<x14:cfvo type="autoMax"/>
<x14:borderColor rgb="FF63C384"/>
<x14:negativeFillColor rgb="FFFF0000"/>
<x14:negativeBorderColor rgb="FFFF0000"/>
<x14:axisColor rgb="FF000000"/>
</x14:dataBar>
</x14:cfRule>
<xm:sqref>A2:B2</xm:sqref>
</x14:conditionalFormatting>
<x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000003}">
<x14:dataBar minLength="0" maxLength="100" border="1" negativeBarBorderColorSameAsPositive="0">
<x14:cfvo type="autoMin"/>
<x14:cfvo type="autoMax"/>
<x14:borderColor rgb="FFFF555A"/>
<x14:negativeFillColor rgb="FFFF0000"/>
<x14:negativeBorderColor rgb="FFFF0000"/>
<x14:axisColor rgb="FF000000"/>
</x14:dataBar>
</x14:cfRule>
<xm:sqref>A3:C3</xm:sqref>
</x14:conditionalFormatting>
</x14:conditionalFormattings>
</ext>
</extLst>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp) |
1,829 | filter indices by size | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import OrderedDict
from typing import Dict, Sequence
import numpy as np
from . import FairseqDataset, LanguagePairDataset
logger = logging.getLogger(__name__)
class RoundRobinZipDatasets(FairseqDataset):
"""Zip multiple :class:`~fairseq.data.FairseqDataset` instances together.
Shorter datasets are repeated in a round-robin fashion to match the length
of the longest one.
Args:
datasets (Dict[~fairseq.data.FairseqDataset]): a dictionary of
:class:`~fairseq.data.FairseqDataset` instances.
eval_key (str, optional): a key used at evaluation time that causes
this instance to pass-through batches from *datasets[eval_key]*.
"""
def __init__(self, datasets, eval_key=None):
super().__init__()
if isinstance(datasets, dict):
datasets = OrderedDict(datasets)
assert isinstance(datasets, OrderedDict)
assert datasets, "Can't make a RoundRobinZipDatasets out of nothing"
for dataset in datasets.values():
assert isinstance(dataset, FairseqDataset)
self.datasets = datasets
self.eval_key = eval_key
self.longest_dataset_key = max(datasets, key=lambda k: len(datasets[k]))
self.longest_dataset = datasets[self.longest_dataset_key]
self._ordered_indices: Dict[str, Sequence[int]] = None
def _map_index(self, key, index):
assert (
self._ordered_indices is not None
), "Must call RoundRobinZipDatasets.ordered_indices() first"
o = self._ordered_indices[key]
return o[index % len(o)]
def __getitem__(self, index):
if self.eval_key is None:
return OrderedDict(
[
(key, dataset[self._map_index(key, index)])
for key, dataset in self.datasets.items()
]
)
else:
# at evaluation time it's useful to pass-through batches from a single key
return self.datasets[self.eval_key][self._map_index(self.eval_key, index)]
def __len__(self):
if self._ordered_indices is not None:
return len(self._ordered_indices[self.longest_dataset_key])
return len(self.longest_dataset)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch."""
if len(samples) == 0:
return None
if self.eval_key is None:
return OrderedDict(
[
(key, dataset.collater([sample[key] for sample in samples]))
for key, dataset in self.datasets.items()
]
)
else:
# at evaluation time it's useful to pass-through batches from a single key
return self.datasets[self.eval_key].collater(samples)
def num_tokens(self, index):
"""Return an example's length (number of tokens), used for batching."""
# TODO make it configurable whether to use max() or sum() here
return max(
dataset.num_tokens(self._map_index(key, index))
for key, dataset in self.datasets.items()
)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return {
key: dataset.size(self._map_index(key, index))
for key, dataset in self.datasets.items()
}
def ordered_indices(self):
"""Ordered indices for batching."""
if self._ordered_indices is None:
# Call the underlying dataset's ordered_indices() here, so that we
# get the same random ordering as we would have from using the
# underlying sub-datasets directly.
self._ordered_indices = OrderedDict(
[
(key, dataset.ordered_indices())
for key, dataset in self.datasets.items()
]
)
return np.arange(len(self))
def METHOD_NAME(self, indices, max_positions=None):
"""
Filter each sub-dataset independently, then update the round robin to work
on the filtered sub-datasets.
"""
def _deep_until_language_pair(dataset):
if isinstance(dataset, LanguagePairDataset):
return dataset
if hasattr(dataset, "tgt_dataset"):
return _deep_until_language_pair(dataset.tgt_dataset)
if hasattr(dataset, "dataset"):
return _deep_until_language_pair(dataset.dataset)
raise Exception(f"Don't know how to unwrap this dataset: {dataset}")
if not isinstance(max_positions, dict):
max_positions = {k: max_positions for k in self.datasets.keys()}
ignored_some = False
for key, dataset in self.datasets.items():
dataset = _deep_until_language_pair(dataset)
self._ordered_indices[key], ignored = dataset.METHOD_NAME(
self._ordered_indices[key], max_positions[key]
)
if len(ignored) > 0:
ignored_some = True
logger.warning(
f"{len(ignored)} samples from {key} have invalid sizes and will be skipped, "
f"max_positions={max_positions[key]}, first few sample ids={ignored[:10]}"
)
# Since we are modifying in place the _ordered_indices,
# it's not possible anymore to return valid ignored indices.
# Hopefully the extra debug information print above should be enough to debug.
# Ideally we would receive ignore_invalid_inputs so that we could have
# a proper error message.
return (np.arange(len(self)), [0] if ignored_some else [])
@property
def supports_prefetch(self):
return all(
getattr(dataset, "supports_prefetch", False)
for dataset in self.datasets.values()
)
def prefetch(self, indices):
for key, dataset in self.datasets.items():
dataset.prefetch([self._map_index(key, index) for index in indices]) |
1,830 | calverted years | from _typeshed import Incomplete
from collections.abc import Generator
from typing import ClassVar
MON: Incomplete
TUE: Incomplete
WED: Incomplete
THU: Incomplete
FRI: Incomplete
SAT: Incomplete
SUN: Incomplete
ISO_MON: Incomplete
ISO_TUE: Incomplete
ISO_WED: Incomplete
ISO_THU: Incomplete
ISO_FRI: Incomplete
ISO_SAT: Incomplete
ISO_SUN: Incomplete
def cleaned_date(day, keep_datetime: bool = False): ...
def daterange(start, end) -> Generator[Incomplete, None, None]: ...
class ChristianMixin:
EASTER_METHOD: Incomplete
include_epiphany: ClassVar[bool]
include_clean_monday: ClassVar[bool]
include_annunciation: ClassVar[bool]
include_fat_tuesday: ClassVar[bool]
fat_tuesday_label: ClassVar[str | None]
include_ash_wednesday: ClassVar[bool]
ash_wednesday_label: ClassVar[str]
include_palm_sunday: ClassVar[bool]
include_holy_thursday: ClassVar[bool]
holy_thursday_label: ClassVar[str]
include_good_friday: ClassVar[bool]
good_friday_label: ClassVar[str]
include_easter_monday: ClassVar[bool]
include_easter_saturday: ClassVar[bool]
easter_saturday_label: ClassVar[str]
include_easter_sunday: ClassVar[bool]
include_all_saints: ClassVar[bool]
include_immaculate_conception: ClassVar[bool]
immaculate_conception_label: ClassVar[str]
include_christmas: ClassVar[bool]
christmas_day_label: ClassVar[str]
include_christmas_eve: ClassVar[bool]
include_ascension: ClassVar[bool]
include_assumption: ClassVar[bool]
include_whit_sunday: ClassVar[bool]
whit_sunday_label: ClassVar[str]
include_whit_monday: ClassVar[bool]
whit_monday_label: ClassVar[str]
include_corpus_christi: ClassVar[bool]
include_boxing_day: ClassVar[bool]
boxing_day_label: ClassVar[str]
include_all_souls: ClassVar[bool]
def get_fat_tuesday(self, year): ...
def get_ash_wednesday(self, year): ...
def get_palm_sunday(self, year): ...
def get_holy_thursday(self, year): ...
def get_good_friday(self, year): ...
def get_clean_monday(self, year): ...
def get_easter_saturday(self, year): ...
def get_easter_sunday(self, year): ...
def get_easter_monday(self, year): ...
def get_ascension_thursday(self, year): ...
def get_whit_monday(self, year): ...
def get_whit_sunday(self, year): ...
def get_corpus_christi(self, year): ...
def shift_christmas_boxing_days(self, year): ...
def get_variable_days(self, year): ...
class WesternMixin(ChristianMixin):
EASTER_METHOD: Incomplete
WEEKEND_DAYS: Incomplete
class OrthodoxMixin(ChristianMixin):
EASTER_METHOD: Incomplete
WEEKEND_DAYS: Incomplete
include_orthodox_christmas: ClassVar[bool]
orthodox_christmas_day_label: ClassVar[str]
def get_fixed_holidays(self, year): ...
class LunarMixin:
@staticmethod
def lunar(year, month, day): ...
class ChineseNewYearMixin(LunarMixin):
include_chinese_new_year_eve: ClassVar[bool]
chinese_new_year_eve_label: ClassVar[str]
include_chinese_new_year: ClassVar[bool]
chinese_new_year_label: ClassVar[str]
include_chinese_second_day: ClassVar[bool]
chinese_second_day_label: ClassVar[str]
include_chinese_third_day: ClassVar[bool]
chinese_third_day_label: ClassVar[str]
shift_sunday_holidays: ClassVar[bool]
shift_start_cny_sunday: ClassVar[bool]
def get_chinese_new_year(self, year): ...
def get_variable_days(self, year): ...
def get_shifted_holidays(self, dates) -> Generator[Incomplete, None, None]: ...
def get_calendar_holidays(self, year): ...
class CalverterMixin:
conversion_method: Incomplete
ISLAMIC_HOLIDAYS: Incomplete
def __init__(self, *args, **kwargs) -> None: ...
def converted(self, year): ...
def METHOD_NAME(self, year): ...
def get_islamic_holidays(self): ...
def get_delta_islamic_holidays(self, year) -> None: ...
def get_variable_days(self, year): ...
class IslamicMixin(CalverterMixin):
WEEKEND_DAYS: Incomplete
conversion_method: Incomplete
include_prophet_birthday: ClassVar[bool]
include_day_after_prophet_birthday: ClassVar[bool]
include_start_ramadan: ClassVar[bool]
include_eid_al_fitr: ClassVar[bool]
length_eid_al_fitr: int
eid_al_fitr_label: ClassVar[str]
include_eid_al_adha: ClassVar[bool]
eid_al_adha_label: ClassVar[str]
length_eid_al_adha: int
include_day_of_sacrifice: ClassVar[bool]
day_of_sacrifice_label: ClassVar[str]
include_islamic_new_year: ClassVar[bool]
include_laylat_al_qadr: ClassVar[bool]
include_nuzul_al_quran: ClassVar[bool]
def get_islamic_holidays(self): ...
class CoreCalendar:
FIXED_HOLIDAYS: Incomplete
WEEKEND_DAYS: Incomplete
def __init__(self) -> None: ...
def name(cls): ...
def get_fixed_holidays(self, year): ...
def get_variable_days(self, year): ...
def get_calendar_holidays(self, year): ...
def holidays(self, year: Incomplete | None = None): ...
def get_holiday_label(self, day): ...
def holidays_set(self, year: Incomplete | None = None): ...
def get_weekend_days(self): ...
def is_working_day(self, day, extra_working_days: Incomplete | None = None, extra_holidays: Incomplete | None = None): ...
def is_holiday(self, day, extra_holidays: Incomplete | None = None): ...
def add_working_days(
self,
day,
delta,
extra_working_days: Incomplete | None = None,
extra_holidays: Incomplete | None = None,
keep_datetime: bool = False,
): ...
def sub_working_days(
self,
day,
delta,
extra_working_days: Incomplete | None = None,
extra_holidays: Incomplete | None = None,
keep_datetime: bool = False,
): ...
def find_following_working_day(self, day): ...
@staticmethod
def get_nth_weekday_in_month(year, month, weekday, n: int = 1, start: Incomplete | None = None): ...
@staticmethod
def get_last_weekday_in_month(year, month, weekday): ...
@staticmethod
def get_iso_week_date(year, week_nb, weekday=1): ...
@staticmethod
def get_first_weekday_after(day, weekday): ...
def get_working_days_delta(
self,
start,
end,
include_start: bool = False,
extra_working_days: Incomplete | None = None,
extra_holidays: Incomplete | None = None,
): ...
def export_to_ical(self, period=[2000, 2030], target_path: Incomplete | None = None): ...
class Calendar(CoreCalendar):
include_new_years_day: ClassVar[bool]
include_new_years_eve: ClassVar[bool]
shift_new_years_day: ClassVar[bool]
include_labour_day: ClassVar[bool]
labour_day_label: ClassVar[str]
def __init__(self, **kwargs) -> None: ...
def get_fixed_holidays(self, year): ...
def get_variable_days(self, year): ...
class WesternCalendar(WesternMixin, Calendar): ...
class OrthodoxCalendar(OrthodoxMixin, Calendar): ...
class ChineseNewYearCalendar(ChineseNewYearMixin, Calendar):
WEEKEND_DAYS: Incomplete
class IslamicCalendar(IslamicMixin, Calendar): ...
class IslamoWesternCalendar(IslamicMixin, WesternMixin, Calendar):
FIXED_HOLIDAYS: Incomplete |
1,831 | test trim nodes | import numpy as np
from openpnm._skgraph import generators as gen
from openpnm._skgraph.visualization import plot_edges, plot_nodes
import openpnm._skgraph.operations as ops
class SKGROperationsTest:
def setup_class(self):
pass
def teardown_class(self):
pass
def test_join(self):
g1 = gen.cubic([3, 3, 3])
g2 = gen.cubic([3, 3, 3])
g2['node.coords'] += np.array([0, 0, 3])
g3 = ops.join(g1, g2)
assert g3['edge.conns'].shape[0] == 108
g1 = gen.cubic([3, 3, 3])
g2 = gen.cubic([3, 3, 3])
g2['node.coords'] += np.array([0, 0, 3])
g3 = ops.join(g1, g2, L_max=1.1)
assert g3['edge.conns'].shape[0] == 117
g1 = gen.cubic([3, 3, 3])
g2 = gen.cubic([3, 3, 3])
g2['node.coords'] += np.array([0, 0, 3])
g3 = ops.join(g1, g2, L_max=1.9)
assert g3['edge.conns'].shape[0] == 157
g1 = gen.cubic([3, 3, 3])
g1['node.num'] = np.arange(27)
g2 = gen.cubic([3, 3, 3])
g2['node.coords'] += np.array([0, 0, 3])
g2['node.flag'] = np.ones(27, dtype=bool)
g2['edge.flag'] = np.ones(54, dtype=bool)
g3 = ops.join(g1, g2, 1.1)
assert np.any(np.isnan(g3['node.num']))
assert not np.all(g3['node.flag'])
assert not np.all(g3['edge.flag'])
# ax = plot_edges(g3)
# ax = plot_nodes(g3, ax=ax)
def test_add_nodes(self):
g = gen.cubic([3, 3, 3])
g = ops.add_nodes(g, [4, 4, 4])
assert g['node.coords'].shape[0] == 28
assert np.all(g['node.coords'][-1, :] == [4, 4, 4])
g['node.float'] = np.ones(28, dtype=float)
g['node.int'] = np.ones(28, dtype=int)
g['node.bool'] = np.ones(28, dtype=bool)
g = ops.add_nodes(g, [5, 5, 5])
assert g['node.float'].shape[0] == 29
assert np.isnan(g['node.float'][-1])
assert g['node.int'][-1] < 0 # value is platform dependent -2147483648
assert g['node.bool'][-1] == False
def test_add_edges(self):
g = gen.cubic([3, 3, 3])
g = ops.add_edges(g, [2, 4])
assert g['edge.conns'].shape[0] == 55
assert np.all(g['edge.conns'][-1, :] == [2, 4])
g['edge.float'] = np.ones(55, dtype=float)
g['edge.int'] = np.ones(55, dtype=int)
g['edge.bool'] = np.ones(55, dtype=bool)
g = ops.add_edges(g, [2, 4])
assert g['edge.float'].shape[0] == 56
assert np.isnan(g['edge.float'][-1])
assert g['edge.int'][-1] < 0 # value is platform dependent -2147483648
assert g['edge.bool'][-1] == False
def test_trim_edges(self):
net = gen.cubic([3, 3, 3])
net['edge.label'] = np.ones(54, dtype=bool)
assert net['node.coords'].shape[0] == 27
assert net['edge.conns'].shape[0] == 54
assert np.all(net['edge.conns'][0] == [0, 1])
net = ops.trim_edges(net, 0)
assert net['node.coords'].shape[0] == 27
assert net['edge.conns'].shape[0] == 53
assert net['edge.label'].shape[0] == 53
assert np.all(net['edge.conns'][0] == [1, 2])
net = ops.trim_edges(net, [20, 30])
assert net['node.coords'].shape[0] == 27
assert net['edge.conns'].shape[0] == 51
assert net['edge.label'].shape[0] == 51
# ax = plot_edges(net['edge.conns'], net['node.coords'])
# ax = plot_nodes(net['node.coords'], ax=ax)
def METHOD_NAME(self):
net = gen.cubic([4, 4, 1])
net['edge.label'] = np.ones(24, dtype=bool)
assert net['node.coords'].shape[0] == 16
assert net['edge.conns'].shape[0] == 24
assert np.all(net['edge.conns'][0] == [0, 1])
net = ops.trim_nodes(net, 0)
assert net['node.coords'].shape[0] == 15
assert net['edge.conns'].shape[0] == 22
assert net['edge.label'].shape[0] == 22
assert np.all(net['edge.conns'][0] == [0, 1])
net = ops.trim_nodes(net, [5, 10])
assert net['node.coords'].shape[0] == 13
assert net['edge.conns'].shape[0] == 15
assert net['edge.label'].shape[0] == 15
# ax = plot_edges(net['edge.conns'], net['node.coords'])
# ax = plot_nodes(net['node.coords'], ax=ax)
def test_split_edges(self):
net = gen.cubic([4, 4, 1])
net['edge.label'] = np.ones(24, dtype=bool)
conns = ops.split_edges(net)[0]
assert conns.shape[0] == 2*net['edge.conns'].shape[0]
conns, coords = ops.split_edges(net)
assert coords.shape[0] == (net['node.coords'].shape[0]
+ net['edge.conns'].shape[0])
# ax = plot_edges(net, color_by=np.arange(conns.shape[0]))
# ax = plot_nodes(net, ax=ax)
if __name__ == '__main__':
t = SKGROperationsTest()
t.setup_class()
self = t
for item in t.__dir__():
if item.startswith('test'):
print(f'Running test: {item}')
t.__getattribute__(item)() |
1,832 | packets to vectors | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr
import pmt
def make_lengthtags(lengths, offsets, tagname='length', vlen=1):
tags = []
assert(len(offsets) == len(lengths))
for offset, length in zip(offsets, lengths):
tag = gr.tag_t()
tag.offset = offset // vlen
tag.key = pmt.string_to_symbol(tagname)
tag.value = pmt.from_long(length // vlen)
tags.append(tag)
return tags
def string_to_vector(string):
v = []
for s in string:
v.append(ord(s))
return v
def strings_to_vectors(strings, tsb_tag_key):
vs = [string_to_vector(string) for string in strings]
return METHOD_NAME(vs, tsb_tag_key)
def vector_to_string(v):
s = []
for d in v:
s.append(chr(d))
return ''.join(s)
def vectors_to_strings(data, tags, tsb_tag_key):
packets = vectors_to_packets(data, tags, tsb_tag_key)
return [vector_to_string(packet) for packet in packets]
def count_bursts(data, tags, tsb_tag_key, vlen=1):
lengthtags = [t for t in tags
if pmt.symbol_to_string(t.key) == tsb_tag_key]
lengths = {}
for tag in lengthtags:
if tag.offset in lengths:
raise ValueError(
"More than one tags with key {0} with the same offset={1}."
.format(tsb_tag_key, tag.offset))
lengths[tag.offset] = pmt.to_long(tag.value) * vlen
in_burst = False
in_packet = False
packet_length = None
packet_pos = None
burst_count = 0
for pos in range(len(data)):
if pos in lengths:
if in_packet:
print("Got tag at pos {0} current packet_pos is {1}".format(
pos, packet_pos))
raise Exception("Received packet tag while in packet.")
packet_pos = -1
packet_length = lengths[pos]
in_packet = True
if not in_burst:
burst_count += 1
in_burst = True
elif not in_packet:
in_burst = False
if in_packet:
packet_pos += 1
if packet_pos == packet_length - 1:
in_packet = False
packet_pos = None
return burst_count
def vectors_to_packets(data, tags, tsb_tag_key, vlen=1):
lengthtags = [t for t in tags
if pmt.symbol_to_string(t.key) == tsb_tag_key]
lengths = {}
for tag in lengthtags:
if tag.offset in lengths:
raise ValueError(
"More than one tags with key {0} with the same offset={1}."
.format(tsb_tag_key, tag.offset))
lengths[tag.offset] = pmt.to_long(tag.value) * vlen
if 0 not in lengths:
raise ValueError("There is no tag with key {0} and an offset of 0"
.format(tsb_tag_key))
pos = 0
packets = []
while pos < len(data):
if pos not in lengths:
raise ValueError("There is no tag with key {0} and an offset of {1}."
"We were expecting one."
.format(tsb_tag_key, pos))
length = lengths[pos]
if length == 0:
raise ValueError("Packets cannot have zero length.")
if pos + length > len(data):
raise ValueError("The final packet is incomplete.")
packets.append(data[pos: pos + length])
pos += length
return packets
def METHOD_NAME(packets, tsb_tag_key, vlen=1):
""" Returns a single data vector and a set of tags.
If used with blocks.vector_source_X, this set of data
and tags will produced a correct tagged stream. """
tags = []
data = []
offset = 0
for packet in packets:
data.extend(packet)
tag = gr.tag_t()
tag.offset = offset // vlen
tag.key = pmt.string_to_symbol(tsb_tag_key)
tag.value = pmt.from_long(len(packet) // vlen)
tags.append(tag)
offset = offset + len(packet)
return data, tags |
1,833 | test rabit ops ipv6 | import re
import sys
import numpy as np
import pytest
import xgboost as xgb
from xgboost import RabitTracker, collective
from xgboost import testing as tm
if sys.platform.startswith("win"):
pytest.skip("Skipping dask tests on Windows", allow_module_level=True)
def test_rabit_tracker():
tracker = RabitTracker(host_ip="127.0.0.1", n_workers=1)
tracker.start(1)
with xgb.collective.CommunicatorContext(**tracker.worker_envs()):
ret = xgb.collective.broadcast("test1234", 0)
assert str(ret) == "test1234"
@pytest.mark.skipif(**tm.not_linux())
def test_socket_error():
tracker = RabitTracker(host_ip="127.0.0.1", n_workers=1)
tracker.start(1)
env = tracker.worker_envs()
env["DMLC_TRACKER_PORT"] = 0
env["DMLC_WORKER_CONNECT_RETRY"] = 1
with pytest.raises(ValueError, match="127.0.0.1:0\n.*refused"):
with xgb.collective.CommunicatorContext(**env):
pass
def run_rabit_ops(client, n_workers):
from xgboost.dask import CommunicatorContext, _get_dask_config, _get_rabit_args
workers = tm.get_client_workers(client)
rabit_args = client.sync(_get_rabit_args, len(workers), _get_dask_config(), client)
assert not collective.is_distributed()
n_workers_from_dask = len(workers)
assert n_workers == n_workers_from_dask
def local_test(worker_id):
with CommunicatorContext(**rabit_args):
a = 1
assert collective.is_distributed()
a = np.array([a])
reduced = collective.allreduce(a, collective.Op.SUM)
assert reduced[0] == n_workers
worker_id = np.array([worker_id])
reduced = collective.allreduce(worker_id, collective.Op.MAX)
assert reduced == n_workers - 1
return 1
futures = client.map(local_test, range(len(workers)), workers=workers)
results = client.gather(futures)
assert sum(results) == n_workers
@pytest.mark.skipif(**tm.no_dask())
def test_rabit_ops():
from distributed import Client, LocalCluster
n_workers = 3
with LocalCluster(n_workers=n_workers) as cluster:
with Client(cluster) as client:
run_rabit_ops(client, n_workers)
def run_broadcast(client):
from xgboost.dask import _get_dask_config, _get_rabit_args
workers = tm.get_client_workers(client)
rabit_args = client.sync(_get_rabit_args, len(workers), _get_dask_config(), client)
def local_test(worker_id):
with collective.CommunicatorContext(**rabit_args):
res = collective.broadcast(17, 0)
return res
futures = client.map(local_test, range(len(workers)), workers=workers)
results = client.gather(futures)
np.testing.assert_allclose(np.array(results), 17)
@pytest.mark.skipif(**tm.no_dask())
def test_broadcast():
from distributed import Client, LocalCluster
n_workers = 3
with LocalCluster(n_workers=n_workers) as cluster:
with Client(cluster) as client:
run_broadcast(client)
@pytest.mark.skipif(**tm.no_ipv6())
@pytest.mark.skipif(**tm.no_dask())
def METHOD_NAME():
import dask
from distributed import Client, LocalCluster
n_workers = 3
with dask.config.set({"xgboost.scheduler_address": "[::1]"}):
with LocalCluster(n_workers=n_workers, host="[::1]") as cluster:
with Client(cluster) as client:
run_rabit_ops(client, n_workers)
def test_rank_assignment() -> None:
from distributed import Client, LocalCluster
def local_test(worker_id):
with xgb.dask.CommunicatorContext(**args) as ctx:
task_id = ctx["DMLC_TASK_ID"]
matched = re.search(".*-([0-9]).*", task_id)
rank = xgb.collective.get_rank()
# As long as the number of workers is lesser than 10, rank and worker id
# should be the same
assert rank == int(matched.group(1))
with LocalCluster(n_workers=8) as cluster:
with Client(cluster) as client:
workers = tm.get_client_workers(client)
args = client.sync(
xgb.dask._get_rabit_args,
len(workers),
None,
client,
)
futures = client.map(local_test, range(len(workers)), workers=workers)
client.gather(futures) |
1,834 | fetch production | #!/usr/bin/env python3
from datetime import datetime
from logging import Logger, getLogger
from typing import Optional
from requests import Session
from .lib import IN, web, zonekey
from .lib.exceptions import ParserException
def fetch_consumption(
zone_key: str = "IN-KA",
session: Optional[Session] = None,
target_datetime: Optional[datetime] = None,
logger: Logger = getLogger(__name__),
) -> dict:
"""Fetch Karnataka consumption"""
if target_datetime:
raise NotImplementedError("This parser is not yet able to parse past dates")
zonekey.assert_zone_key(zone_key, "IN-KA")
html = web.get_response_soup(zone_key, "http://kptclsldc.in/Default.aspx", session)
india_date_time = IN.read_datetime_from_span_id(html, "Label6", "DD/MM/YYYY HH:mm")
demand_value = IN.read_value_from_span_id(html, "Label5")
data = {
"zoneKey": zone_key,
"datetime": india_date_time.datetime,
"consumption": demand_value,
"source": "kptclsldc.in",
}
return data
def METHOD_NAME(
zone_key: str = "IN-KA",
session: Optional[Session] = None,
target_datetime: Optional[datetime] = None,
logger: Logger = getLogger(__name__),
) -> dict:
"""Fetch Karnataka production"""
if target_datetime:
raise NotImplementedError("This parser is not yet able to parse past dates")
zonekey.assert_zone_key(zone_key, "IN-KA")
html = web.get_response_soup(zone_key, "http://kptclsldc.in/StateGen.aspx", session)
india_date_time = IN.read_datetime_from_span_id(
html, "lbldate", "DD/MM/YYYY HH:mm:ss"
)
# RTPS Production: https://en.wikipedia.org/wiki/Raichur_Thermal_Power_Station
rtps_value = IN.read_value_from_span_id(html, "lblrtptot")
# BTPS Production: https://en.wikipedia.org/wiki/Bellary_Thermal_Power_station
btps_value = IN.read_value_from_span_id(html, "lblbtptot")
# YTPS Production: https://en.wikipedia.org/wiki/Yermarus_Thermal_Power_Station
ytps_value = IN.read_value_from_span_id(html, "ytptot")
# UPCL Production: https://en.wikipedia.org/wiki/Udupi_Power_Plant
upcl_value = IN.read_value_from_span_id(html, "lblupctot")
# JINDAl Production: https://en.wikipedia.org/wiki/JSW_Vijayanagar_Power_Station
jindal_value = IN.read_value_from_span_id(html, "lbljintot")
# Coal Production
coal_value = rtps_value + btps_value + ytps_value + upcl_value + jindal_value
# Sharavati Production: Sharavati Hydroelectric
sharavati_value = IN.read_value_from_span_id(html, "lblshvytot")
# Nagjhari Production: Kalinadi-Nagjhari Hydroelectric
nagjhari_value = IN.read_value_from_span_id(html, "lblngjtot")
# Varahi Production: https://en.wikipedia.org/wiki/Varahi_River#Varahi_Hydro-electric_Project
varahi_value = IN.read_value_from_span_id(html, "lblvrhtot")
# Kodsalli Production: Kalinadi Kodasalli Hydroelectric
kodsalli_value = IN.read_value_from_span_id(html, "lblkdsltot")
# Kadra Production: https://en.wikipedia.org/wiki/Kadra_Dam
kadra_value = IN.read_value_from_span_id(html, "lblkdrtot")
# GERUSOPPA production: Gerusoppa Dam
gerusoppa_value = IN.read_value_from_span_id(html, "lblgrsptot")
# JOG production: https://en.wikipedia.org/wiki/Jog_Falls
jog_value = IN.read_value_from_span_id(html, "lbljogtot")
# LPH Production: Linganamakki Dam
lph_value = IN.read_value_from_span_id(html, "lbllphtot")
# Supa production: https://en.wikipedia.org/wiki/Supa_Dam
supa_value = IN.read_value_from_span_id(html, "lblsupatot")
# SHIMSHA: https://en.wikipedia.org/wiki/Shimsha#Power_generation
shimsha_value = IN.read_value_from_span_id(html, "lblshimtot")
# SHIVASAMUDRA: https://en.wikipedia.org/wiki/Shivanasamudra_Falls#Power_generation
shivasamudra_value = IN.read_value_from_span_id(html, "lblshivtot")
# MANIDAM: Mani Dam Hydroelectric
manidam_value = IN.read_value_from_span_id(html, "lblmanitot")
# MUNRABAD: Munirabad Hydroelectric
munrabad_value = IN.read_value_from_span_id(html, "lblmbdtot")
# BHADRA: https://en.wikipedia.org/wiki/Bhadra_Dam
bhadra_value = IN.read_value_from_span_id(html, "lblbdratot")
# GHATAPRABHA: Ghataprabha Hydroelectric
ghataprabha_value = IN.read_value_from_span_id(html, "lblgtprtot")
# ALMATTI: https://en.wikipedia.org/wiki/Almatti_Dam
almatti_value = IN.read_value_from_span_id(html, "lblalmttot")
# CGS (Central Generating Stations) Production
# TODO: Search CGS production type
cgs_value = IN.read_value_from_span_id(html, "lblcgs")
# NCEP (Non-Conventional Energy Production)
ncep_html = web.get_response_soup(
zone_key, "http://kptclsldc.in/StateNCEP.aspx", session
)
ncep_date_time = IN.read_datetime_from_span_id(
ncep_html, "Label1", "DD/MM/YYYY HH:mm:ss"
)
# Check ncep date is similar than state gen date
if abs((india_date_time - ncep_date_time).total_seconds()) > 600:
raise ParserException("IN-KA", "NCEP or State datetime is not valid")
# cogen type is sugarcane bagasee. Proof in Issue #1867
cogen_value = IN.read_value_from_span_id(ncep_html, "lbl_tc")
biomass_value = IN.read_value_from_span_id(ncep_html, "lbl_tb")
# cogen_value is generated from sugarcane bagasse
biomass_value += cogen_value
mini_hydro_value = IN.read_value_from_span_id(ncep_html, "lbl_tm")
wind_value = IN.read_value_from_span_id(ncep_html, "lbl_tw")
solar_value = IN.read_value_from_span_id(ncep_html, "lbl_kar_slr")
# Hydro production
hydro_value = (
sharavati_value
+ nagjhari_value
+ varahi_value
+ kodsalli_value
+ kadra_value
+ gerusoppa_value
+ jog_value
+ lph_value
+ supa_value
+ shimsha_value
+ shivasamudra_value
+ manidam_value
+ munrabad_value
+ bhadra_value
+ ghataprabha_value
+ almatti_value
+ mini_hydro_value
)
# Unknown production
unknown_value = cgs_value
data = {
"zoneKey": zone_key,
"datetime": india_date_time.datetime,
"production": {
"biomass": biomass_value,
"coal": coal_value,
"gas": 0.0,
"hydro": hydro_value,
"nuclear": 0.0,
"oil": 0.0,
"solar": solar_value,
"wind": wind_value,
"geothermal": 0.0,
"unknown": unknown_value,
},
"storage": {"hydro": 0.0},
"source": "kptclsldc.in",
}
return data
if __name__ == "__main__":
session = Session()
print(METHOD_NAME("IN-KA", session))
print(fetch_consumption("IN-KA", session)) |
1,835 | get user display name | """ProtocolEngine-based Labware core implementations."""
from typing import List, Optional, cast
from opentrons_shared_data.labware.dev_types import (
LabwareParameters as LabwareParametersDict,
LabwareDefinition as LabwareDefinitionDict,
)
from opentrons_shared_data.labware.labware_definition import LabwareRole
from opentrons.protocol_engine.errors import LabwareNotOnDeckError, ModuleNotOnDeckError
from opentrons.protocol_engine.clients import SyncClient as ProtocolEngineClient
from opentrons.types import DeckSlotName, Point
from ..labware import AbstractLabware, LabwareLoadParams
from .well import WellCore
class LabwareCore(AbstractLabware[WellCore]):
"""Labware API core using a ProtocolEngine.
Args:
labware_id: ProtocolEngine ID of the loaded labware.
engine_client: ProtocolEngine synchronous client.
"""
def __init__(self, labware_id: str, engine_client: ProtocolEngineClient) -> None:
self._labware_id = labware_id
self._engine_client = engine_client
labware_state = engine_client.state.labware
self._definition = labware_state.get_definition(labware_id)
self._user_display_name = labware_state.get_display_name(labware_id)
@property
def labware_id(self) -> str:
"""The labware's unique ProtocolEngine ID."""
return self._labware_id
@property
def highest_z(self) -> float:
"""The z-coordinate of the tallest single point anywhere on the labware."""
return self._engine_client.state.geometry.get_labware_highest_z(
self._labware_id
)
@property
def load_name(self) -> str:
"""The API load name of the labware definition."""
return self._definition.parameters.loadName
def get_uri(self) -> str:
"""Get the URI string of the labware's definition.
The URI is unique for a given namespace, load name, and definition version.
"""
return self._engine_client.state.labware.get_definition_uri(self._labware_id)
def get_load_params(self) -> LabwareLoadParams:
return LabwareLoadParams(
namespace=self._definition.namespace,
load_name=self._definition.parameters.loadName,
version=self._definition.version,
)
def get_display_name(self) -> str:
"""Get a display name for the labware, falling back to the definition."""
return self._user_display_name or self._definition.metadata.displayName
def METHOD_NAME(self) -> Optional[str]:
"""Get the user-specified display name of the labware, if set."""
return self._user_display_name
def get_name(self) -> str:
"""Get the load name or the label of the labware specified by a user."""
return self._user_display_name or self.load_name
def get_definition(self) -> LabwareDefinitionDict:
"""Get the labware's definition as a plain dictionary."""
return cast(LabwareDefinitionDict, self._definition.dict(exclude_none=True))
def get_parameters(self) -> LabwareParametersDict:
return cast(
LabwareParametersDict,
self._definition.parameters.dict(exclude_none=True),
)
def get_quirks(self) -> List[str]:
return self._definition.parameters.quirks or []
def set_calibration(self, delta: Point) -> None:
raise NotImplementedError(
"Setting a labware's calibration after it's been loaded is not supported."
)
def get_calibrated_offset(self) -> Point:
return self._engine_client.state.geometry.get_labware_position(self._labware_id)
def is_tip_rack(self) -> bool:
"""Whether the labware is a tip rack."""
return self._definition.parameters.isTiprack
def is_adapter(self) -> bool:
"""Whether the labware is an adapter."""
return LabwareRole.adapter in self._definition.allowedRoles
def is_fixed_trash(self) -> bool:
"""Whether the labware is a fixed trash."""
return self._engine_client.state.labware.is_fixed_trash(
labware_id=self.labware_id
)
def get_tip_length(self) -> float:
return self._engine_client.state.labware.get_tip_length(self._labware_id)
def reset_tips(self) -> None:
if self.is_tip_rack():
self._engine_client.reset_tips(labware_id=self.labware_id)
else:
raise TypeError(f"{self.get_display_name()} is not a tip rack.")
def get_next_tip(
self, num_tips: int, starting_tip: Optional[WellCore]
) -> Optional[str]:
return self._engine_client.state.tips.get_next_tip(
labware_id=self._labware_id,
num_tips=num_tips,
starting_tip_name=(
starting_tip.get_name()
if starting_tip and starting_tip.labware_id == self._labware_id
else None
),
)
def get_well_columns(self) -> List[List[str]]:
"""Get the all well names, organized by column, from the labware's definition."""
return self._definition.ordering
def get_well_core(self, well_name: str) -> WellCore:
"""Create a well core interface to a well in this labware."""
return WellCore(
name=well_name,
labware_id=self._labware_id,
engine_client=self._engine_client,
)
def get_deck_slot(self) -> Optional[DeckSlotName]:
"""Get the deck slot the labware is in, if on deck."""
try:
return self._engine_client.state.geometry.get_ancestor_slot_name(
self.labware_id
)
except (LabwareNotOnDeckError, ModuleNotOnDeckError):
return None |
1,836 | test formatting with objects | # -*- coding: utf-8 -*-
import pytest
from markupsafe import escape
from markupsafe import escape_silent
from markupsafe import Markup
from markupsafe._compat import PY2
from markupsafe._compat import text_type
def test_adding():
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup("<em>username</em>")
assert unsafe + safe == text_type(escape(unsafe)) + text_type(safe)
@pytest.mark.parametrize(
("template", "data", "expect"),
(
("<em>%s</em>", "<bad user>", "<em><bad user></em>"),
(
"<em>%(username)s</em>",
{"username": "<bad user>"},
"<em><bad user></em>",
),
("%i", 3.14, "3"),
("%.2f", 3.14, "3.14"),
),
)
def test_string_interpolation(template, data, expect):
assert Markup(template) % data == expect
def test_type_behavior():
assert type(Markup("foo") + "bar") is Markup
x = Markup("foo")
assert x.__html__() is x
def test_html_interop():
class Foo(object):
def __html__(self):
return "<em>awesome</em>"
def __unicode__(self):
return "awesome"
__str__ = __unicode__
assert Markup(Foo()) == "<em>awesome</em>"
result = Markup("<strong>%s</strong>") % Foo()
assert result == "<strong><em>awesome</em></strong>"
def test_tuple_interpol():
result = Markup("<em>%s:%s</em>") % ("<foo>", "<bar>")
expect = Markup(u"<em><foo>:<bar></em>")
assert result == expect
def test_dict_interpol():
result = Markup("<em>%(foo)s</em>") % {"foo": "<foo>"}
expect = Markup(u"<em><foo></em>")
assert result == expect
result = Markup("<em>%(foo)s:%(bar)s</em>") % {"foo": "<foo>", "bar": "<bar>"}
expect = Markup(u"<em><foo>:<bar></em>")
assert result == expect
def test_escaping():
assert escape("\"<>&'") == ""<>&'"
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
def test_unescape():
assert Markup("<test>").unescape() == "<test>"
result = Markup("jack & tavi are cooler than mike & russ").unescape()
expect = "jack & tavi are cooler than mike & russ"
assert result == expect
original = "&foo;"
once = Markup(original).unescape()
twice = Markup(once).unescape()
expect = "&foo;"
assert once == expect
assert twice == expect
def test_format():
result = Markup("<em>{awesome}</em>").format(awesome="<awesome>")
assert result == "<em><awesome></em>"
result = Markup("{0[1][bar]}").format([0, {"bar": "<bar/>"}])
assert result == "<bar/>"
result = Markup("{0[1][bar]}").format([0, {"bar": Markup("<bar/>")}])
assert result == "<bar/>"
def test_formatting_empty():
formatted = Markup("{}").format(0)
assert formatted == Markup("0")
def test_custom_formatting():
class HasHTMLOnly(object):
def __html__(self):
return Markup("<foo>")
class HasHTMLAndFormat(object):
def __html__(self):
return Markup("<foo>")
def __html_format__(self, spec):
return Markup("<FORMAT>")
assert Markup("{0}").format(HasHTMLOnly()) == Markup("<foo>")
assert Markup("{0}").format(HasHTMLAndFormat()) == Markup("<FORMAT>")
def test_complex_custom_formatting():
class User(object):
def __init__(self, id, username):
self.id = id
self.username = username
def __html_format__(self, format_spec):
if format_spec == "link":
return Markup('<a href="/user/{0}">{1}</a>').format(
self.id, self.__html__()
)
elif format_spec:
raise ValueError("Invalid format spec")
return self.__html__()
def __html__(self):
return Markup("<span class=user>{0}</span>").format(self.username)
user = User(1, "foo")
result = Markup("<p>User: {0:link}").format(user)
expect = Markup('<p>User: <a href="/user/1"><span class=user>foo</span></a>')
assert result == expect
def METHOD_NAME():
class Stringable(object):
def __unicode__(self):
return u"строка"
if PY2:
def __str__(self):
return "some other value"
else:
__str__ = __unicode__
assert Markup("{s}").format(s=Stringable()) == Markup(u"строка")
def test_all_set():
import markupsafe as markup
for item in markup.__all__:
getattr(markup, item)
def test_escape_silent():
assert escape_silent(None) == Markup()
assert escape(None) == Markup(None)
assert escape_silent("<foo>") == Markup(u"<foo>")
def test_splitting():
expect = [Markup("a"), Markup("b")]
assert Markup("a b").split() == expect
assert Markup("a b").rsplit() == expect
assert Markup("a\nb").splitlines() == expect
def test_mul():
assert Markup("a") * 3 == Markup("aaa")
def test_escape_return_type():
assert isinstance(escape("a"), Markup)
assert isinstance(escape(Markup("a")), Markup)
class Foo:
def __html__(self):
return "<strong>Foo</strong>"
assert isinstance(escape(Foo()), Markup) |
1,837 | clean office | from .base import *
import submission.forms
from django.forms.widgets import Textarea, FileInput, SelectMultiple
from django import forms
from django.http import HttpResponse
from jsonfield.fields import JSONField
from jsonfield.forms import JSONField as JSONFormField
OFFICE_TYPES = [ # (label, internal identifier, extensions)
('MS Word', 'MS-WORD', ['.doc', '.docx']),
('MS Excel', 'MS-EXCEL', ['.xls', '.xlsx']),
('MS Powerpoint', 'MS-PPT', ['.ppt', '.pptx']),
('MS Project', 'MS-PROJ', ['.mpp']),
('MS Visio', 'MS-VISIO', ['.vsd']),
('OpenDocument Text', 'OD-TEXT', ['.odt']),
('OpenDocument Presentation', 'OD-PRES', ['.odp']),
('OpenDocument Spreadsheet', 'OD-SS', ['.ods']),
('OpenDocument Graphics', 'OD-GRA', ['.odh']),
]
OFFICE_CHOICES = [(ident, label) for label, ident, exts in OFFICE_TYPES]
OFFICE_LABELS = dict(((ident, label) for label, ident, exts in OFFICE_TYPES))
class JSONFieldFlexible(JSONField):
"More flexible JSONField that can accept a list as form input (from a multiselect, as we use it here)"
class JSONFormFieldFlexible(JSONFormField):
def clean(self, value):
if isinstance(value, list):
return value
return super(self.JSONFormFieldFlexible, self).clean(value)
def formfield(self, **kwargs):
if "form_class" not in kwargs:
kwargs["form_class"] = self.JSONFormFieldFlexible
return super(JSONFieldFlexible, self).formfield(**kwargs)
#from south.modelsinspector import add_introspection_rules
#add_introspection_rules([], ["^submission\.models\.office\.JSONFieldFlexible"])
class OfficeComponent(SubmissionComponent):
"An office document submission component"
max_size = FileSizeField(help_text="Maximum size of the Office file, in kB.", null=False, default=10000)
allowed = JSONFieldFlexible(max_length=500, null=False, verbose_name='Allowed types',
help_text='Accepted file extensions.')
def get_allowed_list(self):
return self.allowed['types']
def get_allowed_display(self):
return ", ".join((OFFICE_LABELS[ident] for ident in self.allowed['types']))
allowed_types = dict(((ident, exts) for label, ident, exts in OFFICE_TYPES))
mime_types = {
".doc": "application/msword",
".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
".xls": "application/vnd.ms-excel",
".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
".ppt": "application/vnd.ms-powerpoint",
".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
".mpp": "application/vnd.ms-project",
".vsd": "application/visio",
".odt": "application/vnd.oasis.opendocument.text",
".odp": "application/vnd.oasis.opendocument.presentation",
".ods": "application/vnd.oasis.opendocument.spreadsheet",
".odg": "application/vnd.oasis.opendocument.graphics",
}
class Meta:
app_label = 'submission'
class SubmittedOffice(SubmittedComponent):
component = models.ForeignKey(OfficeComponent, null=False, on_delete=models.PROTECT)
office = models.FileField(upload_to=submission_upload_path, blank=False, max_length=500,
storage=UploadedFileStorage, verbose_name='Office document submission')
class Meta:
app_label = 'submission'
def get_url(self):
return self.office.url
def get_size(self):
try:
return self.office.size
except OSError:
return None
def get_filename(self):
return os.path.split(self.office.name)[1]
def get_fieldfile(self):
return self.office
def download_response(self, **kwargs):
# figure out the MIME type
for ext in self.component.mime_types:
if self.office.name.lower().endswith(ext):
content_type = self.component.mime_types[ext]
break
response = HttpResponse(content_type=content_type)
self.sendfile(self.office, response)
return response
def add_to_zip(self, zipfile, prefix=None, **kwargs):
filename = self.file_filename(self.office, prefix)
zipfile.write(self.office.path, filename)
class Office:
label = "office"
name = "Office"
descr = "an Office file"
Component = OfficeComponent
SubmittedComponent = SubmittedOffice
class ComponentForm(submission.forms.ComponentForm):
class Meta:
model = OfficeComponent
fields = ['title', 'description', 'allowed', 'max_size', 'specified_filename', 'deleted']
def __init__(self, *args, **kwargs):
super(Office.ComponentForm, self).__init__(*args, **kwargs)
self.fields['description'].widget = Textarea(attrs={'cols': 50, 'rows': 5})
self.fields['allowed'].widget = SelectMultiple(choices=OFFICE_CHOICES, attrs={'style':'width:40em', 'size': 15})
self.initial['allowed'] = self._initial_allowed()
def _initial_allowed(self):
"""
Rework the comma-separated value into a list for the SelectMultiple initial value
"""
if self.instance and 'types' in self.instance.allowed:
return self.instance.allowed['types']
else:
return []
def clean_allowed(self):
data = self.data.getlist('allowed')
if len(data)==0:
raise forms.ValidationError("No file types selected")
return {'types': data}
class SubmissionForm(submission.forms.SubmissionForm):
class Meta:
model = SubmittedOffice
fields = ['office']
widgets = {'office': FileInput()}
def METHOD_NAME(self):
data = self.cleaned_data['office']
return self.check_uploaded_data(data)
SubmittedOffice.Type = Office
OfficeComponent.Type = Office |
1,838 | test dot state id in requisites | import io
import os
import os.path
import attr
import pytest
import salt.config
import salt.loader
from salt.exceptions import SaltRenderError
REQUISITES = ["require", "require_in", "use", "use_in", "watch", "watch_in"]
@attr.s
class Renderer:
tmp_path = attr.ib()
def __call__(
self, content, sls="", saltenv="base", argline="-G yaml . jinja", **kws
):
root_dir = self.tmp_path
state_tree_dir = self.tmp_path / "state_tree"
cache_dir = self.tmp_path / "cachedir"
state_tree_dir.mkdir()
cache_dir.mkdir()
config = salt.config.minion_config(None)
config["root_dir"] = str(root_dir)
config["state_events"] = False
config["id"] = "match"
config["file_client"] = "local"
config["file_roots"] = dict(base=[str(state_tree_dir)])
config["cachedir"] = str(cache_dir)
config["test"] = False
_renderers = salt.loader.render(config, {"config.get": lambda a, b: False})
return _renderers["stateconf"](
io.StringIO(content),
saltenv=saltenv,
sls=sls,
argline=argline,
renderers=salt.loader.render(config, {}),
**kws
)
@pytest.fixture
def renderer(tmp_path):
return Renderer(tmp_path)
def test_state_config(renderer):
result = renderer(
"""
.sls_params:
stateconf.set:
- name1: value1
- name2: value2
.extra:
stateconf:
- set
- name: value
# --- end of state config ---
test:
cmd.run:
- name: echo name1={{sls_params.name1}} name2={{sls_params.name2}} {{extra.name}}
- cwd: /
""",
sls="test",
)
assert len(result) == 3
assert "test::sls_params" in result and "test" in result
assert "test::extra" in result
assert (
result["test"]["cmd.run"][0]["name"] == "echo name1=value1 name2=value2 value"
)
def test_sls_dir(renderer):
result = renderer(
"""
test:
cmd.run:
- name: echo sls_dir={{sls_dir}}
- cwd: /
""",
sls="path.to.sls",
)
assert result["test"]["cmd.run"][0]["name"] == "echo sls_dir=path{}to".format(
os.sep
)
def test_states_declared_with_shorthand_no_args(renderer):
result = renderer(
"""
test:
cmd.run:
- name: echo testing
- cwd: /
test1:
pkg.installed
test2:
user.present
"""
)
assert len(result) == 3
for args in (result["test1"]["pkg.installed"], result["test2"]["user.present"]):
assert isinstance(args, list)
assert len(args) == 0
assert result["test"]["cmd.run"][0]["name"] == "echo testing"
def test_adding_state_name_arg_for_dot_state_id(renderer):
result = renderer(
"""
.test:
pkg.installed:
- cwd: /
.test2:
pkg.installed:
- name: vim
""",
sls="test",
)
assert result["test::test"]["pkg.installed"][0]["name"] == "test"
assert result["test::test2"]["pkg.installed"][0]["name"] == "vim"
def test_state_prefix(renderer):
result = renderer(
"""
.test:
cmd.run:
- name: echo renamed
- cwd: /
state_id:
cmd:
- run
- name: echo not renamed
- cwd: /
""",
sls="test",
)
assert len(result) == 2
assert "test::test" in result
assert "state_id" in result
@pytest.mark.parametrize("req", REQUISITES)
def METHOD_NAME(req, renderer):
result = renderer(
"""
.test:
cmd.run:
- name: echo renamed
- cwd: /
state_id:
cmd.run:
- name: echo not renamed
- cwd: /
- {}:
- cmd: .test
""".format(
req
),
sls="test",
)
assert len(result) == 2
assert "test::test" in result
assert "state_id" in result
assert result["state_id"]["cmd.run"][2][req][0]["cmd"] == "test::test"
@pytest.mark.parametrize("req", REQUISITES)
def test_relative_include_with_requisites(req, renderer):
result = renderer(
"""
include:
- some.helper
- .utils
state_id:
cmd.run:
- name: echo test
- cwd: /
- {}:
- cmd: .utils::some_state
""".format(
req
),
sls="test.work",
)
assert result["include"][1] == {"base": "test.utils"}
assert result["state_id"]["cmd.run"][2][req][0]["cmd"] == "test.utils::some_state"
def test_relative_include_and_extend(renderer):
result = renderer(
"""
include:
- some.helper
- .utils
extend:
.utils::some_state:
cmd.run:
- name: echo overridden
""",
sls="test.work",
)
assert "test.utils::some_state" in result["extend"]
@pytest.mark.parametrize("req", REQUISITES)
def test_multilevel_relative_include_with_requisites(req, renderer):
result = renderer(
"""
include:
- .shared
- ..utils
- ...helper
state_id:
cmd.run:
- name: echo test
- cwd: /
- {}:
- cmd: ..utils::some_state
""".format(
req
),
sls="test.nested.work",
)
assert result["include"][0] == {"base": "test.nested.shared"}
assert result["include"][1] == {"base": "test.utils"}
assert result["include"][2] == {"base": "helper"}
assert result["state_id"]["cmd.run"][2][req][0]["cmd"] == "test.utils::some_state"
def test_multilevel_relative_include_beyond_top_level(renderer):
pytest.raises(
SaltRenderError,
renderer,
"""
include:
- ...shared
""",
sls="test.work",
)
def test_start_state_generation(renderer):
result = renderer(
"""
A:
cmd.run:
- name: echo hello
- cwd: /
B:
cmd.run:
- name: echo world
- cwd: /
""",
sls="test",
argline="-so yaml . jinja",
)
assert len(result) == 4
assert result["test::start"]["stateconf.set"][0]["require_in"][0]["cmd"] == "A"
def test_goal_state_generation(renderer):
result = renderer(
"""
{% for sid in "ABCDE": %}
{{sid}}:
cmd.run:
- name: echo this is {{sid}}
- cwd: /
{% endfor %}
""",
sls="test.goalstate",
argline="yaml . jinja",
)
assert len(result) == len("ABCDE") + 1
reqs = result["test.goalstate::goal"]["stateconf.set"][0]["require"]
assert {next(iter(i.values())) for i in reqs} == set("ABCDE")
def test_implicit_require_with_goal_state(renderer):
result = renderer(
"""
{% for sid in "ABCDE": %}
{{sid}}:
cmd.run:
- name: echo this is {{sid}}
- cwd: /
{% endfor %}
F:
cmd.run:
- name: echo this is F
- cwd: /
- require:
- cmd: A
- cmd: B
G:
cmd.run:
- name: echo this is G
- cwd: /
- require:
- cmd: D
- cmd: F
""",
sls="test",
argline="-o yaml . jinja",
)
sids = "ABCDEFG"[::-1]
for i, sid in enumerate(sids):
if i < len(sids) - 1:
assert result[sid]["cmd.run"][2]["require"][0]["cmd"] == sids[i + 1]
F_args = result["F"]["cmd.run"]
assert len(F_args) == 3
F_req = F_args[2]["require"]
assert len(F_req) == 3
assert F_req[1]["cmd"] == "A"
assert F_req[2]["cmd"] == "B"
G_args = result["G"]["cmd.run"]
assert len(G_args) == 3
G_req = G_args[2]["require"]
assert len(G_req) == 3
assert G_req[1]["cmd"] == "D"
assert G_req[2]["cmd"] == "F"
goal_args = result["test::goal"]["stateconf.set"]
assert len(goal_args) == 1
assert [next(iter(i.values())) for i in goal_args[0]["require"]] == list("ABCDEFG")
def test_slsdir(renderer):
result = renderer(
"""
formula/woot.sls:
cmd.run:
- name: echo {{ slspath }}
- cwd: /
""",
sls="formula.woot",
argline="yaml . jinja",
)
r = result["formula/woot.sls"]["cmd.run"][0]["name"]
assert r == "echo formula/woot" |
1,839 | broadcast unclassified changed | from django.dispatch import receiver
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from froide.foirequest.models import FoiAttachment, FoiRequest
from froide.publicbody.models import PublicBody
from .api_views import ProblemReportSerializer
from .consumers import PRESENCE_ROOM
from .models import claimed, escalated, reported, resolved, unclaimed
from .utils import inform_managers
@receiver(reported, dispatch_uid="report_problem_reported")
def broadcast_reported_report(sender, **kwargs):
return broadcast_added_report(sender, **kwargs)
@receiver(claimed, dispatch_uid="report_problem_claimed")
def broadcast_claimed_report(sender, **kwargs):
return broadcast_updated_report(sender, **kwargs)
@receiver(unclaimed, dispatch_uid="report_problem_unclaimed")
def broadcast_unclaimed_report(sender, **kwargs):
return broadcast_updated_report(sender, **kwargs)
@receiver(resolved, dispatch_uid="report_problem_resolved")
def broadcast_resolved_report(sender, **kwargs):
return broadcast_removed_report(sender, **kwargs)
@receiver(escalated, dispatch_uid="report_problem_escalated")
def broadcast_escalated_report(sender, **kwargs):
inform_managers(sender)
return broadcast_removed_report(sender, **kwargs)
def broadcast_updated_report(sender, **kwargs):
data = ProblemReportSerializer(sender).data
broadcast_moderation("report_updated", data)
def broadcast_added_report(sender, **kwargs):
data = ProblemReportSerializer(sender).data
broadcast_moderation("report_added", data)
def broadcast_removed_report(sender, **kwargs):
broadcast_moderation("report_removed", {"id": sender.id})
def _get_pb_data(pb):
return {"id": pb.id, "name": pb.name, "confirmed": pb.confirmed}
@receiver(PublicBody.proposal_added, dispatch_uid="pb_proposal_added_broadcast")
def broadcast_pb_proposal(sender, **kwargs):
broadcast_moderation("publicbody_added", _get_pb_data(sender), key="publicbody")
@receiver(PublicBody.proposal_accepted, dispatch_uid="pb_proposal_accepted_broadcast")
def broadcast_pb_proposal_accepted(sender, **kwargs):
broadcast_moderation("publicbody_removed", _get_pb_data(sender), key="publicbody")
@receiver(PublicBody.proposal_rejected, dispatch_uid="pb_proposal_rejected_broadcast")
def broadcast_pb_proposal_rejected(sender, **kwargs):
broadcast_moderation("publicbody_removed", _get_pb_data(sender), key="publicbody")
@receiver(
PublicBody.change_proposal_added, dispatch_uid="pb_change_proposal_added_broadcast"
)
def broadcast_pb_change_proposal(sender, **kwargs):
broadcast_moderation("publicbody_added", _get_pb_data(sender), key="publicbody")
@receiver(
PublicBody.change_proposal_accepted,
dispatch_uid="pb_change_proposal_accepted_broadcast",
)
def broadcast_pb_change_proposal_accepted(sender, **kwargs):
broadcast_moderation("publicbody_removed", _get_pb_data(sender), key="publicbody")
def _get_unclassified_data(fr):
return {
"id": fr.id,
"title": fr.title,
}
def _get_attachment_data(att):
return {
"name": att.name,
"id": att.id,
"belongs_to_id": att.belongs_to_id,
"belongs_to__request__slug": att.belongs_to.request.slug,
}
@receiver(FoiRequest.status_changed, dispatch_uid="unclassified_status_changed")
def METHOD_NAME(sender, **kwargs):
prev = kwargs.get("previous_status")
if prev != FoiRequest.STATUS.AWAITING_CLASSIFICATION:
return
if not sender.available_for_moderator_action():
return
broadcast_moderation(
"unclassified_removed", _get_unclassified_data(sender), key="unclassified"
)
@receiver(
FoiAttachment.attachment_approved, dispatch_uid="moderation_attachment_approved"
)
def broadcast_attachment_approved(sender, **kwargs):
broadcast_moderation(
"attachment_approved", _get_attachment_data(sender), key="attachments"
)
def broadcast_moderation(broadcast, data, key="report"):
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
PRESENCE_ROOM, {"type": broadcast, key: data}
) |
1,840 | update items | # SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
import enum
from typing import Callable, Optional
from PySide2 import QtCore, QtGui, QtWidgets
from ..utils import SignalsBlocked
class ComboBox(QtWidgets.QComboBox):
def __init__(self, parent: Optional[QtCore.QObject] = None):
super().__init__(parent=parent)
self.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToMinimumContentsLength)
# DataWidgetMapper user property interface
@QtCore.Property(str, user=True)
def __data(self) -> str:
return self.value()
@__data.setter
def __data(self, data: str) -> None:
with SignalsBlocked(self):
self.set_value(data)
# Common public interface
def value(self) -> str:
return self.currentText()
def set_value(self, value: str) -> None:
self.setCurrentText(value)
def reset(self) -> None:
if self.isEditable():
self.setEditText("")
elif self.count():
self.setCurrentIndex(0)
class EnumComboBox(ComboBox):
"""Combo box with an enum model."""
def __init__(
self,
enum_type: enum.Enum,
icons: Optional[dict[enum.Enum, QtGui.QIcon]] = None,
parent: Optional[QtCore.QObject] = None,
):
super().__init__(parent=parent)
for name, member in enum_type.__members__.items():
if icons is not None and member in icons:
self.addItem(icons[member], name, userData=member)
else:
self.addItem(name, userData=member)
# DataWidgetMapper user property interface
@QtCore.Property(int, user=True)
def __data(self) -> int:
return self.currentIndex()
@__data.setter
def __data(self, data: int) -> None:
with SignalsBlocked(self):
self.setCurrentIndex(data)
# Direct enum member access
def member(self) -> enum.Enum:
return self.currentData()
def set_member(self, value: enum.Enum) -> None:
self.setCurrentText(value.name)
class CallbackComboBox(ComboBox):
"""Combo box modeled around provided item callback(s)."""
def __init__(
self,
get_items: Callable,
get_default_item: Optional[Callable] = None,
item_icon: Optional[QtGui.QIcon] = None,
editable: bool = False,
parent: Optional[QtCore.QObject] = None,
):
"""
:param get_items: Required callback which receives no
parameters and returns a list of item strings, or a
dictionary with item string keys and QIcon values, to add
to combo box.
:param get_default_item: Optional callback which receives no
parameters and returns the default item string. If unset,
the first item is the default.
:param item_icon: Optionally provide one static icon for all
items. Icons provided by 'get_items' take precedence.
:param editable: Whether combo box is editable
"""
super().__init__(parent=parent)
self._get_items = get_items
self._get_default_item = get_default_item
self._item_icon = item_icon
self.setEditable(editable)
self.setAutoCompletion(True)
self.setInsertPolicy(QtWidgets.QComboBox.NoInsert)
completer = self.completer()
if completer is not None:
completer.setCompletionMode(QtWidgets.QCompleter.PopupCompletion)
# Initialize
self.METHOD_NAME()
# DataWidgetMapper user property interface
@QtCore.Property(str, user=True)
def __data(self) -> str:
return self.value()
@__data.setter
def __data(self, data: str) -> None:
with SignalsBlocked(self):
if self.findText(data) == -1:
self.METHOD_NAME()
self.set_value(data)
def METHOD_NAME(self) -> str:
"""
Call the provided callback(s) to update combo box items.
:return: Current item string
"""
# Get current state
current_item = None
if not self.count():
if self._get_default_item is not None:
current_item = self._get_default_item()
else:
current_item = self.currentText()
# Reload all items
with SignalsBlocked(self):
self.clear()
items = self._get_items()
if isinstance(items, dict):
for item, icon in items.items():
if icon is None:
icon = self._item_icon
self.addItem(icon, item)
else:
if self._item_icon is not None:
for item in self._get_items():
self.addItem(self._item_icon, item)
else:
self.addItems(self._get_items())
# Restore original state
index = self.findText(current_item)
if index != -1:
self.setCurrentIndex(index)
elif self._get_default_item is not None:
self.setCurrentText(self._get_default_item())
return self.currentText()
def showPopup(self) -> None:
"""
Reload items whenever the popup is shown for just-in-time
model updates.
"""
text = self.METHOD_NAME()
super().showPopup()
# This selects the current item in the popup and must be called after the
# popup is shown.
items = self.model().findItems(text)
if items:
self.view().setCurrentIndex(items[0].index())
def reset(self) -> None:
super().reset()
self.METHOD_NAME() |
1,841 | lookup function | # -*- test-case-name: twisted.web.test.test_soap -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
SOAP support for twisted.web.
Requires SOAPpy 0.10.1 or later.
Maintainer: Itamar Shtull-Trauring
Future plans:
SOAPContext support of some kind.
Pluggable method lookup policies.
"""
# SOAPpy
import SOAPpy # type: ignore[import]
from twisted.internet import defer
# twisted imports
from twisted.web import client, resource, server
class SOAPPublisher(resource.Resource):
"""Publish SOAP methods.
By default, publish methods beginning with 'soap_'. If the method
has an attribute 'useKeywords', it well get the arguments passed
as keyword args.
"""
isLeaf = 1
# override to change the encoding used for responses
encoding = "UTF-8"
def METHOD_NAME(self, functionName):
"""Lookup published SOAP function.
Override in subclasses. Default behaviour - publish methods
starting with soap_.
@return: callable or None if not found.
"""
return getattr(self, "soap_%s" % functionName, None)
def render(self, request):
"""Handle a SOAP command."""
data = request.content.read()
p, header, body, attrs = SOAPpy.parseSOAPRPC(data, 1, 1, 1)
methodName, args, kwargs = p._name, p._aslist, p._asdict
# deal with changes in SOAPpy 0.11
if callable(args):
args = args()
if callable(kwargs):
kwargs = kwargs()
function = self.METHOD_NAME(methodName)
if not function:
self._methodNotFound(request, methodName)
return server.NOT_DONE_YET
else:
if hasattr(function, "useKeywords"):
keywords = {}
for k, v in kwargs.items():
keywords[str(k)] = v
d = defer.maybeDeferred(function, **keywords)
else:
d = defer.maybeDeferred(function, *args)
d.addCallback(self._gotResult, request, methodName)
d.addErrback(self._gotError, request, methodName)
return server.NOT_DONE_YET
def _methodNotFound(self, request, methodName):
response = SOAPpy.buildSOAP(
SOAPpy.faultType(
"%s:Client" % SOAPpy.NS.ENV_T, "Method %s not found" % methodName
),
encoding=self.encoding,
)
self._sendResponse(request, response, status=500)
def _gotResult(self, result, request, methodName):
if not isinstance(result, SOAPpy.voidType):
result = {"Result": result}
response = SOAPpy.buildSOAP(
kw={"%sResponse" % methodName: result}, encoding=self.encoding
)
self._sendResponse(request, response)
def _gotError(self, failure, request, methodName):
e = failure.value
if isinstance(e, SOAPpy.faultType):
fault = e
else:
fault = SOAPpy.faultType(
"%s:Server" % SOAPpy.NS.ENV_T, "Method %s failed." % methodName
)
response = SOAPpy.buildSOAP(fault, encoding=self.encoding)
self._sendResponse(request, response, status=500)
def _sendResponse(self, request, response, status=200):
request.setResponseCode(status)
if self.encoding is not None:
mimeType = 'text/xml; charset="%s"' % self.encoding
else:
mimeType = "text/xml"
request.setHeader("Content-type", mimeType)
request.setHeader("Content-length", str(len(response)))
request.write(response)
request.finish()
class Proxy:
"""A Proxy for making remote SOAP calls.
Pass the URL of the remote SOAP server to the constructor.
Use proxy.callRemote('foobar', 1, 2) to call remote method
'foobar' with args 1 and 2, proxy.callRemote('foobar', x=1)
will call foobar with named argument 'x'.
"""
# at some point this should have encoding etc. kwargs
def __init__(self, url, namespace=None, header=None):
self.url = url
self.namespace = namespace
self.header = header
def _cbGotResult(self, result):
result = SOAPpy.parseSOAPRPC(result)
if hasattr(result, "Result"):
return result.Result
elif len(result) == 1:
## SOAPpy 0.11.6 wraps the return results in a containing structure.
## This check added to make Proxy behaviour emulate SOAPProxy, which
## flattens the structure by default.
## This behaviour is OK because even singleton lists are wrapped in
## another singleton structType, which is almost always useless.
return result[0]
else:
return result
def callRemote(self, method, *args, **kwargs):
payload = SOAPpy.buildSOAP(
args=args,
kw=kwargs,
method=method,
header=self.header,
namespace=self.namespace,
)
return client.getPage(
self.url,
postdata=payload,
method="POST",
headers={"content-type": "text/xml", "SOAPAction": method},
).addCallback(self._cbGotResult) |
1,842 | check encoded file | # Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: test_encoding.py
author: Cyrus Harrison (cyrush@llnl.gov)
created: 4/09/2010
description:
Unit tests for movie encoding helpers.
"""
import unittest
import os
import sys
import glob
from visit_utils import encoding
from visit_utils.common import VisItException
from os.path import join as pjoin
iframes_dir = pjoin(os.path.split(__file__)[0],"_data")
iframes = pjoin(iframes_dir,"wave.movie.%04d.png")
iframes_short_a = pjoin(iframes_dir,"wave.movie.%03d.png")
iframes_short_b = pjoin(iframes_dir,"wave.movie.%d.png")
iframes_stereo = pjoin(iframes_dir,"wave.movie.stereo.%04d.png")
output_dir = pjoin(os.path.split(__file__)[0],"_output")
def lst_slnks():
return glob.glob(pjoin(iframes_dir,"_encode.lnk.*"))
def clean_slnks():
slnks = lst_slnks()
for slnk in slnks:
os.remove(slnk)
def METHOD_NAME(path):
if os.path.isfile(path):
# make sure the file isn't empty
st = os.stat(path)
return st.st_size > 0
return False
class TestEncoding(unittest.TestCase):
def setUp(self):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
def test_encoders(self):
encoders = encoding.encoders()
if len(encoders) > 0:
self.assertTrue("mpg" in encoders)
self.assertTrue("wmv" in encoders)
def test_ffmpeg_encoders(self):
for enc in ["wmv","mpg","divx","mov","swf","mp4","avi"]:
if enc in encoding.encoders():
ofile = pjoin(output_dir,"wave.movie.%s" % enc)
encoding.encode(iframes,ofile)
self.assertTrue(METHOD_NAME(ofile))
ofile = pjoin(output_dir,"wave.movie.slow.%s" % enc)
encoding.encode(iframes,ofile,2)
self.assertTrue(METHOD_NAME(ofile))
def test_sm(self):
if "sm" in encoding.encoders():
ofile = pjoin(output_dir,"wave.movie.sm")
encoding.encode(iframes,ofile)
self.assertTrue(METHOD_NAME(ofile))
clean_slnks()
ofile = pjoin(output_dir,"wave.movie.slow.sm")
encoding.encode(iframes,ofile,2)
self.assertEqual(0,len(lst_slnks()))
self.assertTrue(METHOD_NAME(ofile))
def test_unsupported(self):
self.assertRaises(VisItException, encoding.encode, iframes,"wave.movie.bad_ext")
def test_sm_stereo(self):
if "sm" in encoding.encoders():
clean_slnks()
ofile = pjoin(output_dir,"wave.movie.stereo.sm")
encoding.encode(iframes_stereo,ofile,stereo=True)
self.assertEqual(0,len(lst_slnks()))
self.assertTrue(METHOD_NAME(ofile))
clean_slnks()
ofile = pjoin(output_dir,"wave.movie.stereo.slow.sm")
encoding.encode(iframes_stereo,ofile,2,stereo=True)
self.assertEqual(0,len(lst_slnks()))
self.assertTrue(METHOD_NAME(ofile))
def test_stereo_uneven_frames_error(self):
self.assertRaises(VisItException, encoding.encode, iframes,
pjoin(output_dir,"wave.movie.stereo.bad.sm"),
stereo=True)
def test_extract(self):
if "mpg" in encoding.encoders():
eframes = pjoin(output_dir,"extract_out_%04d.png")
encoding.encode(iframes,pjoin(output_dir,"wave.movie.mpg"))
encoding.extract(pjoin(output_dir,"wave.movie.mpg"),eframes)
ofile = pjoin(output_dir,"wave.movie.extract.and.reencode.mpg")
encoding.encode(eframes,ofile)
self.assertTrue(METHOD_NAME(ofile))
def test_pre_lr_stereo(self):
if "divx" in encoding.encoders():
iframes = pjoin(iframes_dir,"noise.stereo.left.right.1080p.%04d.png")
ofile = pjoin(output_dir,"noise.movie.stereo.pre.left.right.avi")
encoding.encode(iframes,ofile,etype="divx")
self.assertTrue(METHOD_NAME(ofile))
def test_short_symlinks(self):
if "mpg" in encoding.encoders():
clean_slnks()
ofile = pjoin(output_dir,"wave.movie.test.seq.pattern.03d.mpg")
encoding.encode(iframes_short_a,ofile,3)
self.assertEqual(0,len(lst_slnks()))
self.assertTrue(METHOD_NAME(ofile))
clean_slnks()
ofile = pjoin(output_dir,"wave.movie.test.seq.pattern.d.mpg")
encoding.encode(iframes_short_b,ofile,5)
self.assertEqual(0,len(lst_slnks()))
self.assertTrue(METHOD_NAME(ofile))
def test_ffmpeg_input_frame_rate(self):
for enc in ["wmv","mpg","divx","mov","swf","mp4"]:
if enc in encoding.encoders():
clean_slnks()
ofile = pjoin(output_dir,"wave.movie.input_frame_rate.%s" % enc)
encoding.encode(iframes,ofile,input_frame_rate=5)
self.assertEqual(0,len(lst_slnks()))
self.assertTrue(METHOD_NAME(ofile))
def test_ffmpeg_input_and_output_frame_rate(self):
for enc in ["wmv","mov"]:
if enc in encoding.encoders():
clean_slnks()
ofile = pjoin(output_dir,"wave.movie.input_and_output_frame_rate.%s" % enc)
encoding.encode(iframes,ofile,input_frame_rate=5,output_frame_rate=30)
self.assertEqual(0,len(lst_slnks()))
self.assertTrue(METHOD_NAME(ofile))
def test_ffmpeg_reencode_new_format(self):
encoders = encoding.encoders()
if "mpg" in encoders and "wmv" in encoders:
clean_slnks()
ofile_src = pjoin(output_dir,"wave.movie.reencode.src.mpg")
ofile_des = pjoin(output_dir,"wave.movie.reencode.src.wmv")
encoding.encode(iframes,ofile_src)
encoding.encode(ofile_src,ofile_des)
self.assertEqual(0,len(lst_slnks()))
self.assertTrue(METHOD_NAME(ofile_src))
self.assertTrue(METHOD_NAME(ofile_des))
if __name__ == '__main__':
unittest.main()
|
1,843 | slugify | """Test module of the references."""
import unittest
from books import Books
import os
import re
import sys
def METHOD_NAME(txt):
"""Slugify function."""
output = txt.lower()
output = re.sub(r'\]\([^)]*\)', ']', output) # remove the content of parenthesis if reference
output = re.sub(r'<[^>]+>', '', output)
output = output.replace('+', 'p')
output = re.sub(r"[\(\):`'=]", '', output)
output = re.sub(r'\\_', '_', output)
output = re.sub(r'[\W-]+', '-', output)
output = re.sub(r'^-*', '', output)
output = re.sub(r'-*$', '', output)
return output.strip(' ').strip('-')
class TestReferences(unittest.TestCase):
"""Unit test of the references."""
def setUp(self):
"""Setup."""
books = Books()
self.anchors = {}
for book in books.books:
# we are not responsible of the content of the discord chats
if book.name == 'discord':
continue
for md_path in book.md_paths:
anchors = []
args = {} if sys.version_info[0] < 3 else {'encoding': 'utf-8'}
with open(md_path, **args) as f:
skipUntil = ''
for line in f:
if skipUntil:
if skipUntil in line:
skipUntil = ''
continue
if '<a' in line and 'name=' in line:
for m in re.finditer(
r'<a[^>]*name\s*=\s*"(.*)"[^>]*>',
line.strip()):
anchors.append(m.group(1))
if re.match(r'```', line):
skipUntil = '```'
continue
elif line.startswith('#'):
m = re.match(r'^#{1,4} .*$', line)
if m:
title = re.sub(r'^#*', '', line)
anchors.append(METHOD_NAME(title))
elif line.startswith('%figure'):
title = METHOD_NAME(line.replace('%figure', ''))
if title:
anchors.append(METHOD_NAME(title))
elif line.startswith('%api'):
title = line.replace('%api', '')
anchors.append(METHOD_NAME(title))
elif "**wb" in line:
for m in re.finditer(r'\*\*(wb[^\*]*)\*\*', line):
anchor = m.group(1).replace('\\_', '_')
anchors.append(anchor)
self.anchors[md_path] = anchors
def test_anchors_are_unique(self):
"""Test that the anchors are unique."""
books = Books()
for book in books.books:
# we are not responsible of the content of the discord chats
if book.name == 'discord':
continue
for md_path in book.md_paths:
anchors = self.anchors[md_path]
s = set()
for a in anchors:
if a in s:
self.assertTrue(
False,
msg='%s: Anchors "%s" are not unique'
% (md_path, a)
)
s.add(a)
def test_references_are_valid(self):
"""Test that the MD files refer valid URLs."""
books = Books()
for book in books.books:
# we are not responsible of the content of the discord chats
if book.name == 'discord':
continue
for md_path in book.md_paths:
args = {} if sys.version_info[0] < 3 else {'encoding': 'utf-8'}
with open(md_path, **args) as f:
content = f.read()
for m in re.finditer(r"[^!]\[(.*?)\]\(([^\)]+)\)", content):
# remove parameters
ref = m.group(2)
# 1. external link
self.assertFalse(
ref.startswith('www'),
msg='URL should not start with "www": "%s"' % (ref)
)
if ref.startswith('http'):
continue
# 3. steam link
if ref.startswith('steam:'):
continue
# 4. mailto
if ref.startswith('mailto:'):
mailto = ref[len('mailto:'):]
m = re.match(r'^([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)$', mailto)
self.assertIsNotNone(
m,
msg='Invalid address "%s"' % (mailto)
)
continue
# 5. variable (the variable should be in format `url.something`)
if ref.startswith('{{'):
if re.match(r'{{\s{0,}url\..*}}', ref) is not None:
continue
# 6. link to another MD file
link = ''
anchor = ''
if ref.startswith('#'):
anchor = ref[1:] # Remove the '#' character.
else:
ref = ref.split('#')
link = ref[0]
if len(ref) > 1:
anchor = ref[1]
if link != '':
self.assertTrue(
link.endswith('.md'),
msg='Invalid reference "%s" in %s:\n-> "%s"' %
(ref, md_path, m.group(0))
)
file_path = os.path.join(book.path, link)
self.assertTrue(
os.path.isfile(file_path),
msg='%s: "%s" not found' % (md_path, file_path)
)
# 7. Anchor
if anchor != '':
file_path = ''
if link == '':
file_path = os.path.join(book.path, md_path)
else:
file_path = os.path.join(book.path, link)
file_path = os.path.abspath(file_path) # Remove '..'-like patterns from file_path.
found = anchor in self.anchors[file_path]
self.assertTrue(
found, msg='%s: %s#%s not found' %
(md_path, file_path, anchor)
) |
1,844 | gen flow | import os
from pathlib import Path
from typing import Any, Dict, List, Literal
import pendulum
from prefect import Flow
from prefect.backend import set_key_value
from viadot.task_utils import (
add_ingestion_metadata_task,
df_get_data_types_task,
df_map_mixed_dtypes_for_parquet,
df_to_csv,
df_to_parquet,
dtypes_to_json_task,
update_dtypes_dict,
)
from viadot.tasks import AzureDataLakeUpload, SAPBWToDF
class SAPBWToADLS(Flow):
def __init__(
self,
name: str,
mdx_query: str,
mapping_dict: dict = None,
sapbw_credentials: dict = None,
sapbw_credentials_key: str = "SAP",
env: str = "BW",
output_file_extension: str = ".parquet",
local_file_path: str = None,
adls_file_name: str = None,
adls_dir_path: str = None,
if_exists: Literal["replace", "append", "delete"] = "replace",
overwrite_adls: bool = True,
vault_name: str = None,
sp_credentials_secret: str = None,
*args: List[any],
**kwargs: Dict[str, Any],
):
"""
Flow for downloading data from SAP BW to file, then uploading it to ADLS.
Args:
name (str): Name of the flow.
mdx_query (str): MDX query to be passed to SAP BW server.
mapping_dict (dict, optional): Dictionary with original column names and the mapping for them. If not None then flows is generating mapping automatically with mapping applied by user, if not - it generates automatically the json file with columns.
sapbw_credentials (dict, optional): Credentials to SAP in dictionary format. Defaults to None.
sapbw_credentials_key (str, optional): Azure KV secret. Defaults to "SAP".
env (str, optional): SAP environment. Defaults to "BW".
output_file_extension (str, optional): Output file extension - to allow selection between .csv and .parquet. Defaults to ".parquet".
local_file_path (str, optional): Local destination path. Defaults to None.
adls_file_name (str, optional): Azure Data Lake file name. Defaults to None.
adls_dir_path(str, optional): Azure Data Lake destination file path. Defaults to None.
if_exists (Literal["append", "replace", "skip"], optional): What to do if the table exists. Defaults to "replace".
overwrite_adls (bool, optional): Whether to overwrite the file in ADLS. Defaults to True.
vault_name (str, optional): The name of the vault from which to obtain the secrets.. Defaults to None.
sp_credentials_secret (str, optional): The name of the Azure Key Vault secret containing a dictionary with ACCOUNT_NAME and Service Principal credentials (TENANT_ID, CLIENT_ID, CLIENT_SECRET). Defaults to None.
"""
self.sapbw_credentials = sapbw_credentials
self.sapbw_credentials_key = sapbw_credentials_key
self.env = env
self.mdx_query = mdx_query
self.mapping_dict = mapping_dict
self.output_file_extension = output_file_extension
self.local_file_path = (
local_file_path or self.slugify(name) + self.output_file_extension
)
self.local_json_path = self.slugify(name) + ".json"
self.now = str(pendulum.now("utc"))
self.adls_dir_path = adls_dir_path
if adls_file_name is not None:
self.adls_file_path = os.path.join(adls_dir_path, adls_file_name)
self.adls_schema_file_dir_file = os.path.join(
adls_dir_path, "schema", Path(adls_file_name).stem + ".json"
)
else:
self.adls_file_path = os.path.join(
adls_dir_path, self.now + self.output_file_extension
)
self.adls_schema_file_dir_file = os.path.join(
adls_dir_path, "schema", self.now + ".json"
)
self.if_exists = if_exists
self.overwrite_adls = overwrite_adls
self.vault_name = vault_name
self.sp_credentials_secret = sp_credentials_secret
super().__init__(*args, name=name, **kwargs)
self.METHOD_NAME()
@staticmethod
def slugify(name):
return name.replace(" ", "_").lower()
def METHOD_NAME(self) -> Flow:
sapbw_to_df_task = SAPBWToDF(
sapbw_credentials=self.sapbw_credentials,
sapbw_credentials_key=self.sapbw_credentials_key,
env=self.env,
)
df = sapbw_to_df_task.bind(
mdx_query=self.mdx_query,
mapping_dict=self.mapping_dict,
flow=self,
)
df_viadot_downloaded = add_ingestion_metadata_task.bind(df=df, flow=self)
dtypes_dict = df_get_data_types_task.bind(df_viadot_downloaded, flow=self)
df_to_be_loaded = df_map_mixed_dtypes_for_parquet.bind(
df_viadot_downloaded, dtypes_dict, flow=self
)
if self.output_file_extension == ".parquet":
df_to_file = df_to_parquet.bind(
df=df_to_be_loaded,
path=self.local_file_path,
if_exists=self.if_exists,
flow=self,
)
else:
df_to_file = df_to_csv.bind(
df=df_to_be_loaded,
path=self.local_file_path,
if_exists=self.if_exists,
flow=self,
)
file_to_adls_task = AzureDataLakeUpload()
adls_upload = file_to_adls_task.bind(
from_path=self.local_file_path,
to_path=self.adls_file_path,
overwrite=self.overwrite_adls,
sp_credentials_secret=self.sp_credentials_secret,
flow=self,
)
dtypes_updated = update_dtypes_dict(dtypes_dict, flow=self)
dtypes_to_json_task.bind(
dtypes_dict=dtypes_updated, local_json_path=self.local_json_path, flow=self
)
json_to_adls_task = AzureDataLakeUpload()
json_to_adls_task.bind(
from_path=self.local_json_path,
to_path=self.adls_schema_file_dir_file,
overwrite=self.overwrite_adls,
sp_credentials_secret=self.sp_credentials_secret,
vault_name=self.vault_name,
flow=self,
)
df_viadot_downloaded.set_upstream(df, flow=self)
dtypes_dict.set_upstream(df_viadot_downloaded, flow=self)
df_to_be_loaded.set_upstream(dtypes_dict, flow=self)
adls_upload.set_upstream(df_to_file, flow=self)
df_to_file.set_upstream(dtypes_updated, flow=self)
json_to_adls_task.set_upstream(dtypes_to_json_task, flow=self)
set_key_value(key=self.adls_dir_path, value=self.adls_file_path) |
1,845 | is segments local | #!/usr/bin/env python
"""
GPDB Configuration
Usage:
from mpp.lib.config import GPDBConfig
"""
from mpp.lib.PSQL import PSQL
from tinctest.main import TINCException
import os
# ============================================================================
class GPDBConfigException(TINCException): pass
class GPDBConfig():
'''Class with methods to get GPDB Configuration informaitons '''
class Record:
def __init__(self, line):
line = line.split('|')
line = [l.strip() for l in line]
self.dbid = int(line[0])
self.content = int(line[1])
self.role = line[2] == 'p'
self.preferred_role = line[3] == 'p'
self.mode = line[4] == 's'
self.status = line[5] == 'u'
self.hostname = line[6]
self.address = line[7]
self.port = line[8]
self.datadir = line[9]
self.replication_port =line[10]
self.san_mounts = line[11]
def __init__(self):
self.record = []
self._fill()
def _fill(self):
'''Get the records and add to Record class '''
self.record = []
config_sql = "select dbid, content, role, preferred_role, mode, status, hostname, address, port, fselocation as datadir, replication_port, san_mounts from gp_segment_configuration, pg_filespace_entry, pg_catalog.pg_filespace fs where fsefsoid = fs.oid and fsname='pg_system' and gp_segment_configuration.dbid=pg_filespace_entry.fsedbid ORDER BY content, preferred_role;"
config_out = PSQL.run_sql_command(config_sql, flags = '-t -q', dbname='postgres')
if len(config_out.strip()) > 0:
config_out = config_out.splitlines()
for line in config_out:
if line.find("NOTICE")<0:
line = line.strip()
if line:
self.record.append(GPDBConfig.Record(line))
else:
raise GPDBConfigException('Unable to select gp_segment_configuration')
def has_mirror(self):
''' Checks if the configuration has mirror'''
return reduce(lambda x, y: x or y,
[not r.role for r in self.record])
def get_countprimarysegments(self):
''' Returns number of primary segments '''
n = 0
for r in self.record:
if r.role and r.content != -1:
n += 1
return n
def get_hosts(self, segments = False):
'''
@summary Returns the list of hostnames
@param segments : True or False (True -returns only segment hosts)
'''
list = []
for r in self.record:
if segments:
if r.content != -1:
list.append(r.hostname)
else:
list.append(r.hostname)
return set(list)
def get_hostandport_of_segment(self, psegmentNumber = 0, pRole = 'p'):
'''
@summary: Return a tuple that contains the host and port of the specified segment.
@param pSegmentNumber : The segment number (0 - N-1, where N is the number of segments).
@param pRole: 'p' for Primary, 'm' for Mirror
'''
if pRole == 'p':
role = True
else:
role = False
for seg in self.record:
if seg.content == psegmentNumber and seg.role == role:
return (seg.hostname, seg.port)
def get_host_and_datadir_of_segment(self, dbid=-1):
'''
@description : Return hostname and data_dir for the dbid provided
'''
for r in self.record:
if r.dbid == int(dbid):
return(r.hostname, r.datadir)
def METHOD_NAME(self, prole='p'):
'''
@summary: Check from the segment "address" column whether the GPDB configuration is localhost
Now checks whether "all" segments has "localhost" address
@param pRole: 'p' for primary, 'm' for mirror
'''
if prole == 'p':
role = True
else:
role = False
n = 0
for r in self.record:
if r.content != -1:
if r.role == role:
if r.address == "localhost":
n = n+1
return (self.get_countprimarysegments()==n)
def is_multinode(self):
'''
Check whether GPDB is multinode
For OSX, it will always be single node. It's documented about issues with OSX and GPBD setting up
@note: On DCA, the hostname for each segment is different, but the address is pointing to localhost
localhost
'''
if os.uname()[0] == 'Darwin':
return False
# We check hostname, as we could have different addresses
# on the same host.
hostname_set = set([r.hostname for r in self.record])
if len(hostname_set) == 1:
return False
else:
return True
def has_master_mirror(self):
''' Returns true if standby is configured '''
master = 0
for r in self.record:
if r.content == -1:
master += 1
if master == 1:
return False
else:
return True
def get_count_segments(self):
'''Returns number of segments '''
out = PSQL.run_sql_command("select count(*) from gp_segment_configuration where content != -1 and role = 'p' and status = 'u'", flags='-q -t', dbname='template1')
for line in out:
return line.strip()
def is_mastermirror_synchronized(self):
''' Returns True is master and standby are synchronized'''
out = PSQL.run_sql_command('select summary_state from gp_master_mirroring',flags='-q -t', dbname='template1')
if len(out.strip()) > 0:
for line in out:
line = line.strip()
if line == 'Synchronized':
return True
return False
def get_masterdata_directory(self):
''' Returns the MASTER_DATA_DIRECTORY '''
for r in self.record:
if r.role and r.content == -1:
return r.datadir
def get_masterhost(self):
''' Returns master hostname'''
for r in self.record:
if r.role and r.content == -1:
return r.hostname
def get_master_standbyhost(self):
''' Return standby hostname '''
for r in self.record:
if (r.content == -1) and (not r.role):
return r.hostname
def is_not_insync_segments(self):
'''Returns True if no down or change_tracking segments '''
gpseg_sql = "select count(*) from gp_segment_configuration where mode <>'s' or status <> 'u';"
not_insync_segments = PSQL.run_sql_command(gpseg_sql, flags = '-t -q')
if not_insync_segments.strip() != '0' :
return False
return True
def is_balanced_segments(self):
'''Returns True if primary and mirror are balanced'''
gpseg_sql = "select count(*) from gp_segment_configuration where role != preferred_role;"
balance_segments = PSQL.run_sql_command(gpseg_sql, flags = '-t -q')
if balance_segments.strip() != '0' :
return False
return True
def count_of_nodes_in_mode(self, mode = 'c'):
"""
PURPOSE:
gives count of number of nodes in change tracking
@return:
count of number of nodes in change tracking
"""
sqlcmd = "select count(*) from gp_segment_configuration where mode = '" + mode + "'"
(num_cl) = PSQL.run_sql_command(sqlcmd)
num_cl = num_cl.split('\n')[3].strip()
return num_cl
def is_down_segments(self):
for r in self.record:
if r.status == 'd':
return True
return False |
1,846 | test crop frequencies | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for :mod:`gwpy.spectrogram.spectrogram`
"""
from io import BytesIO
import pytest
import numpy
from scipy import signal
from matplotlib import rc_context
from astropy import units
from ...testing import utils
from ...types.tests.test_array2d import TestArray2D as _TestArray2D
from .. import Spectrogram
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
class TestSpectrogram(_TestArray2D):
"""Tests of `gwpy.spectrogram.Spectrogram`
"""
TEST_CLASS = Spectrogram
def test_new(self):
super().test_new()
# check handling of epoch vs t0
a = self.create(epoch=10)
b = self.create(t0=10)
utils.assert_quantity_sub_equal(a, b)
def test_new_redundant_args(self):
with pytest.raises(
ValueError,
match="^give only one of epoch or t0$",
):
self.TEST_CLASS(self.data, epoch=1, t0=1)
def test_new_times(self):
times = numpy.arange(self.data.shape[0])
a = self.create(times=times)
utils.assert_quantity_equal(a.times, times * units.second)
def test_epoch(self, array):
assert array.epoch.gps == array.x0.value
def test_value_at(self, array):
super().test_value_at(array)
v = array.value_at(5000 * units.millisecond,
2000 * units.milliHertz)
assert v == self.data[5][2] * array.unit
@pytest.mark.parametrize('ratio', ('mean', 'median'))
def test_ratio(self, array, ratio):
rat = array.ratio(ratio)
array_meth = getattr(array, ratio)
utils.assert_quantity_sub_equal(rat, array / array_meth(axis=0))
with pytest.raises(ValueError):
array.ratio('blah')
def test_from_spectra(self, array):
min_ = self.TEST_ARRAY.min(axis=0)
max_ = self.TEST_ARRAY.max(axis=0)
mean = self.TEST_ARRAY.mean(axis=0)
# check basic stack works
new = self.TEST_ARRAY.from_spectra(mean, min_, max_, dt=1)
assert new.shape == (3, min_.size)
assert new.name == mean.name
assert new.epoch == mean.epoch
assert new.f0 == mean.f0
assert new.df == mean.df
assert new.unit == mean.unit
assert new.dt == 1 * units.second
utils.assert_array_equal(
new.value, numpy.vstack((mean.value, min_.value, max_.value)))
# check kwargs
new = self.TEST_ARRAY.from_spectra(
mean, min_, max_,
dt=2, epoch=0, f0=100, df=.5, unit='meter', name='test')
assert new.name == 'test'
assert new.epoch.gps == 0
assert new.f0 == 100 * units.Hertz
assert new.df == 0.5 * units.Hertz
assert new.unit == units.meter
# check error on timing
with pytest.raises(ValueError):
self.TEST_ARRAY.from_spectra(mean)
self.TEST_ARRAY.from_spectra(mean, dt=array.dt)
# check error on inputs
with pytest.raises(ValueError):
self.TEST_ARRAY.from_spectra(mean, mean[1:])
with pytest.raises(ValueError):
self.TEST_ARRAY.from_spectra(mean, mean[::2])
def METHOD_NAME(self):
array = self.create(f0=0, df=1)
# test simple
array2 = array.crop_frequencies()
utils.assert_quantity_sub_equal(array, array2)
assert numpy.may_share_memory(array.value, array2.value)
# test normal
array2 = array.crop_frequencies(2, 5)
utils.assert_array_equal(array2.value, array.value[:, 2:5])
assert array2.f0 == 2 * units.Hertz
assert array2.df == array.df
# test copy
array2 = array.crop_frequencies(copy=True)
assert not numpy.may_share_memory(array.value, array2.value)
# test warnings
with pytest.warns(UserWarning):
array.crop_frequencies(array.yspan[0]-1, array.yspan[1])
with pytest.warns(UserWarning):
array.crop_frequencies(array.yspan[0], array.yspan[1]+1)
@pytest.mark.parametrize('method', ('imshow', 'pcolormesh'))
def test_plot(self, array, method):
with rc_context(rc={'text.usetex': False}):
plot = array.plot(method=method)
ax = plot.gca()
assert len(ax.lines) == 0
if method == 'imshow':
assert len(ax.images) == 1
else:
assert len(ax.collections) == 1
assert ax.get_epoch() == array.x0.value
assert ax.get_xlim() == array.xspan
assert ax.get_ylim() == array.yspan
plot.save(BytesIO(), format='png')
plot.close()
def test_zpk(self, array):
zpk = [], [1], 1
utils.assert_quantity_sub_equal(
array.zpk(*zpk), array.filter(*zpk, analog=True))
def test_filter(self):
array = self.create(t0=0, dt=1/1024., f0=0, df=1)
# build filter
zpk = [], [1], 1
lti = signal.lti(*zpk)
fresp = numpy.nan_to_num(abs(
lti.freqresp(w=array.frequencies.value)[1]))
# test simple filter
a2 = array.filter(*zpk, analog=True)
utils.assert_array_equal(array * fresp, a2)
# test inplace filtering
array.filter(lti, inplace=True, analog=True)
utils.assert_array_equal(array, a2)
# test errors
with pytest.raises(TypeError):
array.filter(lti, blah=1)
def test_read_write_hdf5(self):
array = self.create(name='X1:TEST')
utils.test_read_write(array, 'hdf5', write_kw={'overwrite': True})
def test_percentile(self):
array = self.create(name='Test', unit='m')
a2 = array.percentile(50)
utils.assert_quantity_sub_equal(array.median(axis=0), a2,
exclude=('name',))
assert a2.name == 'Test: 50th percentile'
assert a2.unit == array.unit |
1,847 | apply filters | import re
import logging
from .base import ResponseMicroService
from ..context import Context
from ..exception import SATOSAError
logger = logging.getLogger(__name__)
class AddStaticAttributes(ResponseMicroService):
"""
Add static attributes to the responses.
"""
def __init__(self, config, *args, **kwargs):
super().__init__(*args, **kwargs)
self.static_attributes = config["static_attributes"]
def process(self, context, data):
data.attributes.update(self.static_attributes)
return super().process(context, data)
class FilterAttributeValues(ResponseMicroService):
"""
Filter attribute values, only preserving those matching the given regex.
"""
def __init__(self, config, *args, **kwargs):
super().__init__(*args, **kwargs)
self.attribute_filters = config["attribute_filters"]
def process(self, context, data):
# apply default filters
provider_filters = self.attribute_filters.get("", {})
target_provider = data.auth_info.issuer
self._apply_requester_filters(data.attributes, provider_filters, data.requester, context, target_provider)
# apply target provider specific filters
provider_filters = self.attribute_filters.get(target_provider, {})
self._apply_requester_filters(data.attributes, provider_filters, data.requester, context, target_provider)
return super().process(context, data)
def _apply_requester_filters(self, attributes, provider_filters, requester, context, target_provider):
# apply default requester filters
default_requester_filters = provider_filters.get("", {})
self.METHOD_NAME(attributes, default_requester_filters, context, target_provider)
# apply requester specific filters
requester_filters = provider_filters.get(requester, {})
self.METHOD_NAME(attributes, requester_filters, context, target_provider)
def METHOD_NAME(self, attributes, attribute_filters, context, target_provider):
for attribute_name, attribute_filters in attribute_filters.items():
if type(attribute_filters) == str:
# convert simple notation to filter list
attribute_filters = {'regexp': attribute_filters}
for filter_type, filter_value in attribute_filters.items():
if filter_type == "regexp":
filter_func = re.compile(filter_value).search
elif filter_type == "shibmdscope_match_scope":
mdstore = context.get_decoration(Context.KEY_METADATA_STORE)
md_scopes = list(mdstore.shibmd_scopes(target_provider,"idpsso_descriptor")) if mdstore else []
filter_func = lambda v: self._shibmdscope_match_scope(v, md_scopes)
elif filter_type == "shibmdscope_match_value":
mdstore = context.get_decoration(Context.KEY_METADATA_STORE)
md_scopes = list(mdstore.shibmd_scopes(target_provider,"idpsso_descriptor")) if mdstore else []
filter_func = lambda v: self._shibmdscope_match_value(v, md_scopes)
else:
raise SATOSAError("Unknown filter type")
if attribute_name == "": # default filter for all attributes
for attribute, values in attributes.items():
attributes[attribute] = list(filter(filter_func, attributes[attribute]))
elif attribute_name in attributes:
attributes[attribute_name] = list(filter(filter_func, attributes[attribute_name]))
def _shibmdscope_match_value(self, value, md_scopes):
for md_scope in md_scopes:
if not md_scope['regexp'] and md_scope['text'] == value:
return True
elif md_scope['regexp'] and re.fullmatch(md_scope['text'], value):
return True
return False
def _shibmdscope_match_scope(self, value, md_scopes):
split_value = value.split('@')
if len(split_value) != 2:
logger.info(f"Discarding invalid scoped value {value}")
return False
value_scope = split_value[1]
return self._shibmdscope_match_value(value_scope, md_scopes) |
1,848 | select frame | # Copyright 2008-2011 Nokia Networks
# Copyright 2011-2016 Ryan Tomac, Ed Manlove and contributors
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
from selenium.webdriver.remote.webelement import WebElement
from SeleniumLibrary.base import LibraryComponent, keyword
class FrameKeywords(LibraryComponent):
@keyword
def METHOD_NAME(self, locator: Union[WebElement, str]):
"""Sets frame identified by ``locator`` as the current frame.
See the `Locating elements` section for details about the locator
syntax.
Works both with frames and iframes. Use `Unselect Frame` to cancel
the frame selection and return to the main frame.
Example:
| `Select Frame` | top-frame | # Select frame with id or name 'top-frame' |
| `Click Link` | example | # Click link 'example' in the selected frame |
| `Unselect Frame` | | # Back to main frame. |
| `Select Frame` | //iframe[@name='xxx'] | # Select frame using xpath |
"""
self.info(f"Selecting frame '{locator}'.")
element = self.find_element(locator)
self.driver.switch_to.frame(element)
@keyword
def unselect_frame(self):
"""Sets the main frame as the current frame.
In practice cancels the previous `Select Frame` call.
"""
self.driver.switch_to.default_content()
@keyword
def current_frame_should_contain(self, text: str, loglevel: str = "TRACE"):
"""Verifies that the current frame contains ``text``.
See `Page Should Contain` for an explanation about the ``loglevel``
argument.
Prior to SeleniumLibrary 3.0 this keyword was named
`Current Frame Contains`.
"""
if not self.is_text_present(text):
self.log_source(loglevel)
raise AssertionError(
f"Frame should have contained text '{text}' but did not."
)
self.info(f"Current frame contains text '{text}'.")
@keyword
def current_frame_should_not_contain(self, text: str, loglevel: str = "TRACE"):
"""Verifies that the current frame does not contain ``text``.
See `Page Should Contain` for an explanation about the ``loglevel``
argument.
"""
if self.is_text_present(text):
self.log_source(loglevel)
raise AssertionError(
f"Frame should not have contained text '{text}' but it did."
)
self.info(f"Current frame did not contain text '{text}'.")
@keyword
def frame_should_contain(
self, locator: Union[WebElement, str], text: str, loglevel: str = "TRACE"
):
"""Verifies that frame identified by ``locator`` contains ``text``.
See the `Locating elements` section for details about the locator
syntax.
See `Page Should Contain` for an explanation about the ``loglevel``
argument.
"""
if not self._frame_contains(locator, text):
self.log_source(loglevel)
raise AssertionError(
f"Frame '{locator}' should have contained text '{text}' but did not."
)
self.info(f"Frame '{locator}' contains text '{text}'.")
def _frame_contains(self, locator: Union[WebElement, str], text: str):
element = self.find_element(locator)
self.driver.switch_to.frame(element)
self.info(f"Searching for text from frame '{locator}'.")
found = self.is_text_present(text)
self.driver.switch_to.default_content()
return found |
1,849 | list | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-02-02-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/trustedAccessRoles",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"location": _SERIALIZER.url("location", location, "str", min_length=1),
}
_url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class TrustedAccessRolesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2023_02_02_preview.ContainerServiceClient`'s
:attr:`trusted_access_roles` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = METHOD_NAME(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def METHOD_NAME(self, location: str, **kwargs: Any) -> Iterable["_models.TrustedAccessRole"]:
"""List supported trusted access roles.
List supported trusted access roles.
:param location: The name of Azure region. Required.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TrustedAccessRole or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2023_02_02_preview.models.TrustedAccessRole]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop(
"api_version", _params.pop("api-version", self._api_version or "2023-02-02-preview")
)
cls: ClsType[_models.TrustedAccessRoleListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("TrustedAccessRoleListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
METHOD_NAME.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/trustedAccessRoles"
} |
1,850 | dpi to steps | from meerk40t.core.units import MM_PER_INCH, UNITS_PER_INCH, Length
from meerk40t.svgelements import Matrix
class View:
def __init__(
self, width, height, dpi=float(UNITS_PER_INCH), dpi_x=None, dpi_y=None
):
"""
This should init the simple width and height dimensions.
The default coordinate system is (0,0), (width,0), (width,height), (0,height), In top_left, top_right,
bottom_right, bottom_left ordering.
@param width:
@param height:
"""
if dpi_x is None:
dpi_x = dpi
if dpi_y is None:
dpi_y = dpi
self.width = width
self.height = height
self.dpi_x = dpi_x
self.dpi_y = dpi_y
self.dpi = (dpi_x + dpi_y) / 2.0
self._source = None
self._destination = None
self._matrix = None
def __str__(self):
return f"View('{self.width}', '{self.height}', @{self.dpi})"
@property
def mm(self):
return self.dpi * MM_PER_INCH
def set_dims(self, width, height):
self.width = width
self.height = height
self.reset()
def reset(self):
width = float(Length(self.width))
height = float(Length(self.height))
top_left = 0, 0
top_right = width, 0
bottom_right = width, height
bottom_left = 0, height
self._source = top_left, top_right, bottom_right, bottom_left
self._destination = top_left, top_right, bottom_right, bottom_left
def contains(self, x, y):
"""
This solves the AABB of the container, not the strict solution. If a view is rotated by a non-tau/4 multiple
amount, we could generate false positives.
@param x:
@param y:
@return:
"""
# This solves the AABB of the container, not the strict solution
x0, y0, x1, y1 = self.bbox()
return x0 < x < x1 and y0 < y < y1
def bbox(self):
return (
min(
self._destination[0][0],
self._destination[1][0],
self._destination[2][0],
self._destination[3][0],
),
min(
self._destination[0][1],
self._destination[1][1],
self._destination[2][1],
self._destination[3][1],
),
max(
self._destination[0][0],
self._destination[1][0],
self._destination[2][0],
self._destination[3][0],
),
max(
self._destination[0][1],
self._destination[1][1],
self._destination[2][1],
self._destination[3][1],
),
)
def scale(self, scale_x, scale_y):
width = float(Length(self.width))
height = float(Length(self.height))
width *= scale_x
height *= scale_y
top_left, top_right, bottom_right, bottom_left = self._destination
top_left, top_right, bottom_right, bottom_left = (
(top_left[0] * scale_x, top_left[1] * scale_y),
(top_right[0] * scale_x, top_right[1] * scale_y),
(bottom_right[0] * scale_x, bottom_right[1] * scale_y),
(bottom_left[0] * scale_x, bottom_left[1] * scale_y),
)
self._destination = top_left, top_right, bottom_right, bottom_left
self._matrix = None
def origin(self, origin_x, origin_y):
width = float(Length(self.width))
height = float(Length(self.height))
dx = -width * origin_x
dy = -height * origin_y
top_left, top_right, bottom_right, bottom_left = self._destination
self._destination = (
(top_left[0] + dx, top_left[1] + dy),
(top_right[0] + dx, top_right[1] + dy),
(bottom_right[0] + dx, bottom_right[1] + dy),
(bottom_left[0] + dx, bottom_left[1] + dy),
)
self._matrix = None
def flip_x(self):
top_left, top_right, bottom_right, bottom_left = self._destination
self._destination = (
top_right,
top_left,
bottom_left,
bottom_right,
)
self._matrix = None
def flip_y(self):
top_left, top_right, bottom_right, bottom_left = self._destination
self._destination = (
bottom_left,
bottom_right,
top_right,
top_left,
)
self._matrix = None
def swap_xy(self):
top_left, top_right, bottom_right, bottom_left = self._destination
self._destination = (
(top_left[1], top_left[0]),
(top_right[1], top_right[0]),
(bottom_right[1], bottom_right[0]),
(bottom_left[1], bottom_left[0]),
)
self._matrix = None
def transform(
self,
origin_x=0.0,
origin_y=0.0,
user_scale_x=1.0,
user_scale_y=1.0,
flip_x=False,
flip_y=False,
swap_xy=False,
):
self.reset()
self.scale(1.0 / user_scale_x, 1.0 / user_scale_y)
if flip_x:
self.flip_x()
if flip_y:
self.flip_y()
if origin_x != 0 or origin_y != 0:
self.origin(origin_x, origin_y)
if swap_xy:
self.swap_xy()
def position(self, x, y, vector=False):
if not isinstance(x, (int, float)):
x = Length(x, relative_length=self.width, unitless=1).units
if not isinstance(y, (int, float)):
y = Length(y, relative_length=self.height, unitless=1).units
unit_x, unit_y = x, y
if vector:
return self.matrix.transform_vector([unit_x, unit_y])
return self.matrix.point_in_matrix_space([unit_x, unit_y])
def iposition(self, x, y, vector=False):
if not isinstance(x, (int, float)):
x = Length(x, relative_length=self.width, unitless=1).units
if not isinstance(y, (int, float)):
y = Length(y, relative_length=self.height, unitless=1).units
unit_x, unit_y = x, y
matrix = ~self.matrix
if vector:
return matrix.transform_vector([unit_x, unit_y])
return matrix.point_in_matrix_space([unit_x, unit_y])
@property
def matrix(self):
if self._matrix is None:
self._matrix = Matrix.map(*self._source, *self._destination)
return self._matrix
def METHOD_NAME(self, dpi):
"""
Converts a DPI to a given step amount within the device length values. So M2 Nano will have 1 step per mil,
the DPI of 500 therefore is step_x 2, step_y 2. A Galvo laser with a 200mm lens will have steps equal to
200mm/65536 ~= 0.12 mils. So a DPI of 500 needs a step size of ~16.65 for x and y. Since 500 DPI is one dot
per 2 mils.
Note, steps size can be negative if our driver is x or y flipped.
@param dpi:
@return:
"""
# We require vectors so any positional offsets are non-contributing.
unit_x = self.dpi_x
unit_y = self.dpi_y
matrix = self.matrix
oneinch_x = abs(complex(*matrix.transform_vector([unit_x, 0])))
oneinch_y = abs(complex(*matrix.transform_vector([0, unit_y])))
step_x = float(oneinch_x / dpi)
step_y = float(oneinch_y / dpi)
return step_x, step_y |
1,851 | valid resource type | # Copyright (c) 2001-2022, Hove and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Hove (www.hove.com).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from jormungandr import i_manager
collections_to_resource_type = {
"stop_points": "stop_point",
"routes": "route",
"networks": "network",
"commercial_modes": "commercial_mode",
"physical_modes": "physical_mode",
"companies": "company",
"stop_areas": "stop_area",
"lines": "line",
"line_groups": "line_group",
"addresses": "address",
"coords": "coord",
"trips": "trip",
"contributors": "contributor",
"datasets": "dataset",
}
resource_type_to_collection = dict(
(resource_type, collection) for (collection, resource_type) in collections_to_resource_type.items()
)
types_not_ptrefable = ["addresses", "administrative_regions"]
class InvalidUriException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class Uri:
def __init__(self, string):
self.uri = string
self.params = None
self.lon = None
self.lat = None
self.is_region = None
self.objects = []
self.region_ = None
self.parse_region_coord()
self.parse_params()
def region(self):
if not self.region_ and self.lon and self.lat:
# On va chercher la region associee
self.region_ = i_manager.get_region(lon=self.lon, lat=self.lat, api='ALL')
if not self.region_:
error = "No region is covering these coordinates"
raise InvalidUriException(error)
return self.region_
def parse_region_coord(self):
# On caste la premiere partie de l'url qui est soit une region,
# soit une coordonnee (coord/lon;lat)
parts = self.uri.split("/")
parts.reverse()
self.region_or_coord_part = parts.pop()
if self.region_or_coord_part.count(";") == 1:
self.is_region = False
lonlatsplitted = self.region_or_coord_part.split(";")
if len(lonlatsplitted) != 2:
raise InvalidUriException(", unable to parse lon or lat " + self.region_or_coord_part)
lon = lonlatsplitted[0]
lat = lonlatsplitted[1]
try:
self.lon = float(lon)
self.lat = float(lat)
except ValueError:
error = ", unable to parse lon or lat" + lon + ":" + lat
raise InvalidUriException(error)
else:
self.is_region = True
self.region_ = self.region_or_coord_part
parts.reverse()
self.params = "/".join(parts)
def parse_params(self):
parts = self.params.split("/")
resource_type, uid = None, None
for par in parts:
if par != "":
if not resource_type:
if self.METHOD_NAME(par):
resource_type = par
else:
error = "Invalid resource type : " + par
raise InvalidUriException(error)
else:
uid = par
self.objects.append((resource_type, uid))
resource_type, uid = None, None
if resource_type:
self.objects.append((resource_type, uid))
def METHOD_NAME(self, resource_type):
resource_types = [
"connections",
"stop_points",
"networks",
"commercial_modes",
"physical_modes",
"companies",
"stop_areas",
"routes",
"lines",
"line_groups",
"addresses",
"administrative_regions",
"coords",
"pois",
"trips",
"contributors",
"datasets",
]
return resource_type in resource_types
import unittest
class Tests(unittest.TestCase):
def testOnlyRegionWithoutBeginningSlash(self):
string = "paris"
uri = Uri(string)
self.assertEqual(uri.region(), "paris")
def testOnlyRegionWithBeginningSlash(self):
string = "/paris"
uri = Uri(string)
self.assertEqual(uri.region(), "paris")
def testOnlyCoordPosWithoutSlash(self):
string = "coord/1.1;2.3"
uri = Uri(string)
self.assertEqual(uri.lon, 1.1)
self.assertEqual(uri.lat, 2.3)
string = "coord/.1;2."
uri = Uri(string)
self.assertEqual(uri.lon, 0.1)
self.assertEqual(uri.lat, 2)
string = "coord/.111111;22.3"
uri = Uri(string)
self.assertEqual(uri.lon, 0.111111)
self.assertEqual(uri.lat, 22.3)
def testOnlyCoordPosWithSlash(self):
string = "/coord/1.1;2.3"
uri = Uri(string)
self.assertEqual(uri.lon, 1.1)
self.assertEqual(uri.lat, 2.3)
string = "/coord/.1;2."
uri = Uri(string)
self.assertEqual(uri.lon, 0.1)
self.assertEqual(uri.lat, 2)
string = "/coord/.111111;22.3"
uri = Uri(string)
self.assertEqual(uri.lon, 0.111111)
self.assertEqual(uri.lat, 22.3)
def testResourceListWithslash(self):
string = "/paris/stop_areas"
uri = Uri(string)
self.assertEqual(uri.region(), "paris")
self.assertEqual(uri.params, "stop_areas")
self.assertEqual(len(uri.objects), 1)
self.assertEqual(uri.objects[0][0], "stop_areas")
self.assertEqual(uri.objects[0][1], None)
def testResourceWithslash(self):
string = "/paris/stop_areas/1"
uri = Uri(string)
self.assertEqual(uri.region(), "paris")
self.assertEqual(uri.params, "stop_areas")
self.assertEqual(len(uri.objects), 1)
self.assertEqual(uri.objects[0][0], "stop_areas")
self.assertEqual(uri.objects[0][1], 1)
def testResourceListWithoutSlash(self):
string = "paris/stop_areas"
uri = Uri(string)
self.assertEqual(uri.region(), "paris")
self.assertEqual(uri.params, "stop_areas")
self.assertEqual(len(uri.objects), 1)
self.assertEqual(uri.objects[0][0], "stop_areas")
self.assertEqual(uri.objects[0][1], None)
def testResourcetWithoutslash(self):
string = "paris/stop_areas/1"
uri = Uri(string)
self.assertEqual(uri.region(), "paris")
self.assertEqual(uri.params, "stop_areas")
self.assertEqual(len(uri.objects), 1)
self.assertEqual(uri.objects[0][0], "stop_areas")
self.assertEqual(uri.objects[0][1], 1) |
1,852 | test default | # Copyright Cartopy Contributors
#
# This file is part of Cartopy and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Tests for Robinson projection.
"""
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import pytest
import cartopy.crs as ccrs
from .helpers import check_proj_params
_CRS_PC = ccrs.PlateCarree()
_CRS_ROB = ccrs.Robinson()
def METHOD_NAME():
robin = ccrs.Robinson()
other_args = {'a=6378137.0', 'lon_0=0'}
check_proj_params('robin', robin, other_args)
assert_almost_equal(robin.x_limits, [-17005833.3305252, 17005833.3305252])
assert_almost_equal(robin.y_limits, [-8625154.6651000, 8625154.6651000])
def test_sphere_globe():
globe = ccrs.Globe(semimajor_axis=1000, ellipse=None)
robin = ccrs.Robinson(globe=globe)
other_args = {'a=1000', 'lon_0=0'}
check_proj_params('robin', robin, other_args)
assert_almost_equal(robin.x_limits, [-2666.2696851, 2666.2696851])
assert_almost_equal(robin.y_limits, [-1352.3000000, 1352.3000000])
def test_ellipse_globe():
globe = ccrs.Globe(ellipse='WGS84')
with pytest.warns(UserWarning,
match='does not handle elliptical globes.') as w:
robin = ccrs.Robinson(globe=globe)
assert len(w) == 1
other_args = {'ellps=WGS84', 'lon_0=0'}
check_proj_params('robin', robin, other_args)
# Limits are the same as default since ellipses are not supported.
assert_almost_equal(robin.x_limits, [-17005833.3305252, 17005833.3305252])
assert_almost_equal(robin.y_limits, [-8625154.6651000, 8625154.6651000])
def test_eccentric_globe():
globe = ccrs.Globe(semimajor_axis=1000, semiminor_axis=500,
ellipse=None)
with pytest.warns(UserWarning,
match='does not handle elliptical globes.') as w:
robin = ccrs.Robinson(globe=globe)
assert len(w) == 1
other_args = {'a=1000', 'b=500', 'lon_0=0'}
check_proj_params('robin', robin, other_args)
# Limits are the same as spheres since ellipses are not supported.
assert_almost_equal(robin.x_limits, [-2666.2696851, 2666.2696851])
assert_almost_equal(robin.y_limits, [-1352.3000000, 1352.3000000])
def test_offset():
crs = ccrs.Robinson()
crs_offset = ccrs.Robinson(false_easting=1234, false_northing=-4321)
other_args = {'a=6378137.0', 'lon_0=0', 'x_0=1234', 'y_0=-4321'}
check_proj_params('robin', crs_offset, other_args)
assert tuple(np.array(crs.x_limits) + 1234) == crs_offset.x_limits
assert tuple(np.array(crs.y_limits) - 4321) == crs_offset.y_limits
@pytest.mark.parametrize('lon', [-10.0, 10.0])
def test_central_longitude(lon):
robin = ccrs.Robinson(central_longitude=lon)
other_args = {'a=6378137.0', f'lon_0={lon}'}
check_proj_params('robin', robin, other_args)
assert_almost_equal(robin.x_limits, [-17005833.3305252, 17005833.3305252],
decimal=5)
assert_almost_equal(robin.y_limits, [-8625154.6651000, 8625154.6651000])
def test_transform_point():
"""
Mostly tests the workaround for a specific problem.
Problem report in: https://github.com/SciTools/cartopy/issues/232
Fix covered in: https://github.com/SciTools/cartopy/pull/277
"""
# this way has always worked
result = _CRS_ROB.transform_point(35.0, 70.0, _CRS_PC)
assert_array_almost_equal(result, (2376187.2182271, 7275318.1162980))
# this always did something, but result has altered
result = _CRS_ROB.transform_point(np.nan, 70.0, _CRS_PC)
assert np.all(np.isnan(result))
# this used to crash + is now fixed
result = _CRS_ROB.transform_point(35.0, np.nan, _CRS_PC)
assert np.all(np.isnan(result))
def test_transform_points():
"""
Mostly tests the workaround for a specific problem.
Problem report in: https://github.com/SciTools/cartopy/issues/232
Fix covered in: https://github.com/SciTools/cartopy/pull/277
"""
# these always worked
result = _CRS_ROB.transform_points(_CRS_PC,
np.array([35.0]),
np.array([70.0]))
assert_array_almost_equal(result,
[[2376187.2182271, 7275318.1162980, 0]])
result = _CRS_ROB.transform_points(_CRS_PC,
np.array([35.0]),
np.array([70.0]),
np.array([0.0]))
assert_array_almost_equal(result,
[[2376187.2182271, 7275318.1162980, 0]])
# this always did something, but result has altered
result = _CRS_ROB.transform_points(_CRS_PC,
np.array([np.nan]),
np.array([70.0]))
assert np.all(np.isnan(result))
# this used to crash + is now fixed
result = _CRS_ROB.transform_points(_CRS_PC,
np.array([35.0]),
np.array([np.nan]))
assert np.all(np.isnan(result))
# multipoint case
x = np.array([10.0, 21.0, 0.0, 77.7, np.nan, 0.0])
y = np.array([10.0, np.nan, 10.0, 77.7, 55.5, 0.0])
z = np.array([10.0, 0.0, 0.0, np.nan, 55.5, 0.0])
expect_result = np.array(
[[9.40422591e+05, 1.06952091e+06, 1.00000000e+01],
[11.1, 11.2, 11.3],
[0.0, 1069520.91213902, 0.0],
[22.1, 22.2, 22.3],
[33.1, 33.2, 33.3],
[0.0, 0.0, 0.0]])
result = _CRS_ROB.transform_points(_CRS_PC, x, y, z)
assert result.shape == (6, 3)
assert np.all(np.isnan(result[[1, 3, 4], :]))
result[[1, 3, 4], :] = expect_result[[1, 3, 4], :]
assert not np.any(np.isnan(result))
assert np.allclose(result, expect_result) |
1,853 | get integration runtime connection info | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetIntegrationRuntimeConnectionInfoResult',
'AwaitableGetIntegrationRuntimeConnectionInfoResult',
'get_integration_runtime_connection_info',
'get_integration_runtime_connection_info_output',
]
@pulumi.output_type
class GetIntegrationRuntimeConnectionInfoResult:
"""
Connection information for encrypting the on-premises data source credentials.
"""
def __init__(__self__, host_service_uri=None, identity_cert_thumbprint=None, is_identity_cert_exprired=None, public_key=None, service_token=None, version=None):
if host_service_uri and not isinstance(host_service_uri, str):
raise TypeError("Expected argument 'host_service_uri' to be a str")
pulumi.set(__self__, "host_service_uri", host_service_uri)
if identity_cert_thumbprint and not isinstance(identity_cert_thumbprint, str):
raise TypeError("Expected argument 'identity_cert_thumbprint' to be a str")
pulumi.set(__self__, "identity_cert_thumbprint", identity_cert_thumbprint)
if is_identity_cert_exprired and not isinstance(is_identity_cert_exprired, bool):
raise TypeError("Expected argument 'is_identity_cert_exprired' to be a bool")
pulumi.set(__self__, "is_identity_cert_exprired", is_identity_cert_exprired)
if public_key and not isinstance(public_key, str):
raise TypeError("Expected argument 'public_key' to be a str")
pulumi.set(__self__, "public_key", public_key)
if service_token and not isinstance(service_token, str):
raise TypeError("Expected argument 'service_token' to be a str")
pulumi.set(__self__, "service_token", service_token)
if version and not isinstance(version, str):
raise TypeError("Expected argument 'version' to be a str")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="hostServiceUri")
def host_service_uri(self) -> str:
"""
The on-premises integration runtime host URL.
"""
return pulumi.get(self, "host_service_uri")
@property
@pulumi.getter(name="identityCertThumbprint")
def identity_cert_thumbprint(self) -> str:
"""
The integration runtime SSL certificate thumbprint. Click-Once application uses it to do server validation.
"""
return pulumi.get(self, "identity_cert_thumbprint")
@property
@pulumi.getter(name="isIdentityCertExprired")
def is_identity_cert_exprired(self) -> bool:
"""
Whether the identity certificate is expired.
"""
return pulumi.get(self, "is_identity_cert_exprired")
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> str:
"""
The public key for encrypting a credential when transferring the credential to the integration runtime.
"""
return pulumi.get(self, "public_key")
@property
@pulumi.getter(name="serviceToken")
def service_token(self) -> str:
"""
The token generated in service. Callers use this token to authenticate to integration runtime.
"""
return pulumi.get(self, "service_token")
@property
@pulumi.getter
def version(self) -> str:
"""
The integration runtime version.
"""
return pulumi.get(self, "version")
class AwaitableGetIntegrationRuntimeConnectionInfoResult(GetIntegrationRuntimeConnectionInfoResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetIntegrationRuntimeConnectionInfoResult(
host_service_uri=self.host_service_uri,
identity_cert_thumbprint=self.identity_cert_thumbprint,
is_identity_cert_exprired=self.is_identity_cert_exprired,
public_key=self.public_key,
service_token=self.service_token,
version=self.version)
def METHOD_NAME(integration_runtime_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIntegrationRuntimeConnectionInfoResult:
"""
Get connection info for an integration runtime
Azure REST API version: 2021-06-01.
:param str integration_runtime_name: Integration runtime name
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['integrationRuntimeName'] = integration_runtime_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:synapse:getIntegrationRuntimeConnectionInfo', __args__, opts=opts, typ=GetIntegrationRuntimeConnectionInfoResult).value
return AwaitableGetIntegrationRuntimeConnectionInfoResult(
host_service_uri=pulumi.get(__ret__, 'host_service_uri'),
identity_cert_thumbprint=pulumi.get(__ret__, 'identity_cert_thumbprint'),
is_identity_cert_exprired=pulumi.get(__ret__, 'is_identity_cert_exprired'),
public_key=pulumi.get(__ret__, 'public_key'),
service_token=pulumi.get(__ret__, 'service_token'),
version=pulumi.get(__ret__, 'version'))
@_utilities.lift_output_func(METHOD_NAME)
def get_integration_runtime_connection_info_output(integration_runtime_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetIntegrationRuntimeConnectionInfoResult]:
"""
Get connection info for an integration runtime
Azure REST API version: 2021-06-01.
:param str integration_runtime_name: Integration runtime name
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
... |
1,854 | test get halo returns values specified by | from unittest import mock
from lazyflow.operators.tiktorch.classifier import ModelSession, enforce_min_shape
import pytest
from tiktorch.proto import inference_pb2
@pytest.fixture
def pb_session():
return inference_pb2.ModelSession(
inputAxes=["xyc"],
outputAxes=["xyc"],
hasTraining=False,
inputShapes=[
inference_pb2.InputShape(
shapeType=0,
shape=inference_pb2.NamedInts(
namedInts=[
inference_pb2.NamedInt(name="x", size=1024),
inference_pb2.NamedInt(name="y", size=512),
inference_pb2.NamedInt(name="c", size=1),
]
),
),
],
outputShapes=[
inference_pb2.OutputShape(
shapeType=1,
referenceTensor="input",
offset=inference_pb2.NamedFloats(
namedFloats=[
inference_pb2.NamedFloat(name="x", size=16),
inference_pb2.NamedFloat(name="y", size=32),
inference_pb2.NamedFloat(name="c", size=3),
]
),
scale=inference_pb2.NamedFloats(
namedFloats=[
inference_pb2.NamedFloat(name="x", size=1),
inference_pb2.NamedFloat(name="y", size=0.5),
inference_pb2.NamedFloat(name="c", size=2),
]
),
halo=inference_pb2.NamedInts(
namedInts=[
inference_pb2.NamedInt(name="x", size=256),
inference_pb2.NamedInt(name="y", size=128),
inference_pb2.NamedInt(name="c", size=1),
]
),
)
],
inputNames=["input"],
outputNames=["output"],
)
def METHOD_NAME(pb_session):
model_session = ModelSession(session=pb_session, factory=mock.Mock())
assert model_session.get_halos(axes="xy") == {"output": (256, 128)}
assert model_session.get_halos(axes="yx") == {"output": (128, 256)}
assert model_session.get_halos(axes="yxc") == {"output": (128, 256, 1)}
def test_get_halo_returns_0_if_value_is_unspecified(pb_session):
model_session = ModelSession(session=pb_session, factory=mock.Mock())
assert model_session.get_halos(axes="xyz") == {"output": (256, 128, 0)}
assert model_session.get_halos(axes="txyz") == {"output": (0, 256, 128, 0)}
def test_get_output_shape(pb_session):
"""shape = shape(input_tensor) * scale + 2 * offset"""
model_session = ModelSession(session=pb_session, factory=mock.Mock())
output_shape = model_session.get_output_shapes()
assert output_shape == {"output": [{"x": 1056, "y": 320, "c": 8}]}
def test_get_input_shape(pb_session):
model_session = ModelSession(session=pb_session, factory=mock.Mock())
assert model_session.get_input_shapes("xyc") == {"input": [(1024, 512, 1)]}
assert model_session.get_input_shapes("cyx") == {"input": [(1, 512, 1024)]}
assert model_session.get_input_shapes("c") == {"input": [(1,)]}
assert model_session.get_input_shapes("tzyxc") == {"input": [(1, 1, 512, 1024, 1)]}
def test_known_classes(pb_session):
model_session = ModelSession(session=pb_session, factory=mock.Mock())
assert model_session.known_classes == list(range(1, 9))
def test_num_classes(pb_session):
model_session = ModelSession(session=pb_session, factory=mock.Mock())
assert model_session.num_classes == 8
def test_has_training(pb_session):
model_session = ModelSession(session=pb_session, factory=mock.Mock())
assert not model_session.has_training
def test_input_axes(pb_session):
model_session = ModelSession(session=pb_session, factory=mock.Mock())
assert model_session.input_axes == ["xyc"]
def test_get_output_axes(pb_session):
model_session = ModelSession(session=pb_session, factory=mock.Mock())
assert model_session.output_axes == ["xyc"]
@pytest.mark.parametrize(
"min_shape, step, axes, expected",
[
((512, 512), (10, 10), "yx", (512, 512)),
((256, 512), (10, 10), "yx", (256, 512)),
((256, 256), (2, 2), "yx", (512, 512)),
((128, 256), (2, 2), "yx", (384, 512)),
((64, 64, 64), (1, 1, 1), "zyx", (64, 64, 64)),
((2, 64, 64), (1, 1, 1), "zyx", (2, 64, 64)),
((2, 2, 64), (1, 1, 1), "zyx", (2, 2, 64)),
((2, 2, 32), (1, 1, 1), "zyx", (34, 34, 64)),
((42, 10, 512, 512), (0, 0, 10, 10), "tcyx", (42, 10, 512, 512)),
],
)
def test_enforce_min_shape(min_shape, step, axes, expected):
enforced_shape = enforce_min_shape(min_shape, step, axes)
assert enforced_shape == expected |
1,855 | construct model templates distillation | import logging
from typing import Dict, List
from autogluon.core.models import AbstractModel
from autogluon.core.trainer.abstract_trainer import AbstractTrainer
from autogluon.core.utils import generate_train_test_split
from ..models.lgb.lgb_model import LGBModel
from .model_presets.presets import MODEL_TYPES, get_preset_models
from .model_presets.presets_distill import get_preset_models_distillation
logger = logging.getLogger(__name__)
# This Trainer handles model training details
class AutoTrainer(AbstractTrainer):
def construct_model_templates(self, hyperparameters, **kwargs):
path = kwargs.pop("path", self.path)
problem_type = kwargs.pop("problem_type", self.problem_type)
eval_metric = kwargs.pop("eval_metric", self.eval_metric)
quantile_levels = kwargs.pop("quantile_levels", self.quantile_levels)
invalid_model_names = kwargs.pop("invalid_model_names", self._get_banned_model_names())
silent = kwargs.pop("silent", self.verbosity < 3)
ag_args_fit = kwargs.pop("ag_args_fit", None)
if quantile_levels is not None:
if ag_args_fit is None:
ag_args_fit = dict()
ag_args_fit = ag_args_fit.copy()
ag_args_fit["quantile_levels"] = quantile_levels
return get_preset_models(
path=path,
problem_type=problem_type,
eval_metric=eval_metric,
hyperparameters=hyperparameters,
ag_args_fit=ag_args_fit,
invalid_model_names=invalid_model_names,
silent=silent,
**kwargs,
)
def fit(
self,
X,
y,
hyperparameters,
X_val=None,
y_val=None,
X_unlabeled=None,
holdout_frac=0.1,
num_stack_levels=0,
core_kwargs: dict = None,
aux_kwargs: dict = None,
time_limit=None,
infer_limit=None,
infer_limit_batch_size=None,
use_bag_holdout=False,
groups=None,
**kwargs,
):
for key in kwargs:
logger.warning(f"Warning: Unknown argument passed to `AutoTrainer.fit()`. Argument: {key}")
if use_bag_holdout:
if self.bagged_mode:
logger.log(20, f"use_bag_holdout={use_bag_holdout}, will use tuning_data as holdout (will not be used for early stopping).")
else:
logger.warning(f"Warning: use_bag_holdout={use_bag_holdout}, but bagged mode is not enabled. use_bag_holdout will be ignored.")
if (y_val is None) or (X_val is None):
if not self.bagged_mode or use_bag_holdout:
if groups is not None:
raise AssertionError(f"Validation data must be manually specified if use_bag_holdout and groups are both specified.")
if self.bagged_mode:
# Need at least 2 samples of each class in train data after split for downstream k-fold splits
# to ensure each k-fold has at least 1 sample of each class in training data
min_cls_count_train = 2
else:
min_cls_count_train = 1
X, X_val, y, y_val = generate_train_test_split(
X,
y,
problem_type=self.problem_type,
test_size=holdout_frac,
random_state=self.random_state,
min_cls_count_train=min_cls_count_train,
)
logger.log(
20, f"Automatically generating train/validation split with holdout_frac={holdout_frac}, Train Rows: {len(X)}, Val Rows: {len(X_val)}"
)
elif self.bagged_mode:
if not use_bag_holdout:
# TODO: User could be intending to blend instead. Add support for blend stacking.
# This error message is necessary because when calculating out-of-fold predictions for user, we want to return them in the form given in train_data,
# but if we merge train and val here, it becomes very confusing from a users perspective, especially because we reset index, making it impossible to match
# the original train_data to the out-of-fold predictions from `predictor.get_oof_pred_proba()`.
raise AssertionError(
"X_val, y_val is not None, but bagged mode was specified. "
"If calling from `TabularPredictor.fit()`, `tuning_data` should be None.\n"
"Default bagged mode does not use tuning data / validation data. "
"Instead, all data (`train_data` and `tuning_data`) should be combined and specified as `train_data`.\n"
"To avoid this error and use `tuning_data` as holdout data in bagged mode, "
"specify the following:\n"
"\tpredictor.fit(..., tuning_data=tuning_data, use_bag_holdout=True)"
)
# Log the hyperparameters dictionary so it easy to edit if the user wants.
log_str = f"User-specified model hyperparameters to be fit:\n" "{\n"
for k in hyperparameters.keys():
log_str += f"\t'{k}': {hyperparameters[k]},\n"
log_str += "}"
logger.log(20, log_str)
self._train_multi_and_ensemble(
X=X,
y=y,
X_val=X_val,
y_val=y_val,
X_unlabeled=X_unlabeled,
hyperparameters=hyperparameters,
num_stack_levels=num_stack_levels,
time_limit=time_limit,
core_kwargs=core_kwargs,
aux_kwargs=aux_kwargs,
infer_limit=infer_limit,
infer_limit_batch_size=infer_limit_batch_size,
groups=groups,
)
def METHOD_NAME(self, hyperparameters, **kwargs):
path = kwargs.pop("path", self.path)
problem_type = kwargs.pop("problem_type", self.problem_type)
eval_metric = kwargs.pop("eval_metric", self.eval_metric)
invalid_model_names = kwargs.pop("invalid_model_names", self._get_banned_model_names())
silent = kwargs.pop("silent", self.verbosity < 3)
# TODO: QUANTILE VERSION?
return get_preset_models_distillation(
path=path,
problem_type=problem_type,
eval_metric=eval_metric,
hyperparameters=hyperparameters,
invalid_model_names=invalid_model_names,
silent=silent,
**kwargs,
)
def _get_default_proxy_model_class(self):
return LGBModel
def compile_models(self, model_names="all", with_ancestors=False, compiler_configs: dict = None) -> List[str]:
"""Ensures that compiler_configs maps to the correct models if the user specified the same keys as in hyperparameters such as RT, XT, etc."""
if compiler_configs is not None:
model_types_map = self._get_model_types_map()
compiler_configs_new = dict()
for k in compiler_configs:
if k in model_types_map:
compiler_configs_new[model_types_map[k]] = compiler_configs[k]
else:
compiler_configs_new[k] = compiler_configs[k]
compiler_configs = compiler_configs_new
return super().compile_models(model_names=model_names, with_ancestors=with_ancestors, compiler_configs=compiler_configs)
def _get_model_types_map(self) -> Dict[str, AbstractModel]:
return MODEL_TYPES |
1,856 | time total | # Copyright (c) 2022 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Optional, TYPE_CHECKING, List
from PyQt6.QtCore import pyqtSignal, pyqtProperty, QObject, pyqtSlot, QUrl
from PyQt6.QtGui import QImage
from cura.CuraApplication import CuraApplication
if TYPE_CHECKING:
from cura.PrinterOutput.PrinterOutputController import PrinterOutputController
from cura.PrinterOutput.Models.PrinterOutputModel import PrinterOutputModel
from cura.PrinterOutput.Models.PrinterConfigurationModel import PrinterConfigurationModel
class PrintJobOutputModel(QObject):
stateChanged = pyqtSignal()
timeTotalChanged = pyqtSignal()
timeElapsedChanged = pyqtSignal()
nameChanged = pyqtSignal()
keyChanged = pyqtSignal()
assignedPrinterChanged = pyqtSignal()
ownerChanged = pyqtSignal()
configurationChanged = pyqtSignal()
previewImageChanged = pyqtSignal()
compatibleMachineFamiliesChanged = pyqtSignal()
def __init__(self, output_controller: "PrinterOutputController", key: str = "", name: str = "", parent = None) -> None:
super().__init__(parent)
self._output_controller = output_controller
self._state = ""
self._time_total = 0
self._time_elapsed = 0
self._name = name # Human readable name
self._key = key # Unique identifier
self._assigned_printer = None # type: Optional[PrinterOutputModel]
self._owner = "" # Who started/owns the print job?
self._configuration = None # type: Optional[PrinterConfigurationModel]
self._compatible_machine_families = [] # type: List[str]
self._preview_image_id = 0
self._preview_image = None # type: Optional[QImage]
@pyqtProperty("QStringList", notify=compatibleMachineFamiliesChanged)
def compatibleMachineFamilies(self) -> List[str]:
# Hack; Some versions of cluster will return a family more than once...
return list(set(self._compatible_machine_families))
def setCompatibleMachineFamilies(self, compatible_machine_families: List[str]) -> None:
if self._compatible_machine_families != compatible_machine_families:
self._compatible_machine_families = compatible_machine_families
self.compatibleMachineFamiliesChanged.emit()
@pyqtProperty(QUrl, notify=previewImageChanged)
def previewImageUrl(self):
self._preview_image_id += 1
# There is an image provider that is called "print_job_preview". In order to ensure that the image qml object, that
# requires a QUrl to function, updates correctly we add an increasing number. This causes to see the QUrl
# as new (instead of relying on cached version and thus forces an update.
temp = "image://print_job_preview/" + str(self._preview_image_id) + "/" + self._key
return QUrl(temp, QUrl.ParsingMode.TolerantMode)
def getPreviewImage(self) -> Optional[QImage]:
return self._preview_image
def updatePreviewImage(self, preview_image: Optional[QImage]) -> None:
if self._preview_image != preview_image:
self._preview_image = preview_image
self.previewImageChanged.emit()
@pyqtProperty(QObject, notify=configurationChanged)
def configuration(self) -> Optional["PrinterConfigurationModel"]:
return self._configuration
def updateConfiguration(self, configuration: Optional["PrinterConfigurationModel"]) -> None:
if self._configuration != configuration:
self._configuration = configuration
self.configurationChanged.emit()
@pyqtProperty(str, notify = ownerChanged)
def owner(self) -> str:
return self._owner
def updateOwner(self, owner: str) -> None:
if self._owner != owner:
self._owner = owner
self.ownerChanged.emit()
@pyqtProperty(bool, notify = ownerChanged)
def isMine(self) -> bool:
"""
Returns whether this print job was sent by the currently logged in user.
This checks the owner of the print job with the owner of the currently
logged in account. Both of these are human-readable account names which
may be duplicate. In practice the harm here is limited, but it's the
best we can do with the information available to the API.
"""
return self._owner == CuraApplication.getInstance().getCuraAPI().account.userName
@pyqtProperty(QObject, notify=assignedPrinterChanged)
def assignedPrinter(self):
return self._assigned_printer
def updateAssignedPrinter(self, assigned_printer: Optional["PrinterOutputModel"]) -> None:
if self._assigned_printer != assigned_printer:
old_printer = self._assigned_printer
self._assigned_printer = assigned_printer
if old_printer is not None:
# If the previously assigned printer is set, this job is moved away from it.
old_printer.updateActivePrintJob(None)
self.assignedPrinterChanged.emit()
@pyqtProperty(str, notify=keyChanged)
def key(self):
return self._key
def updateKey(self, key: str):
if self._key != key:
self._key = key
self.keyChanged.emit()
@pyqtProperty(str, notify = nameChanged)
def name(self):
return self._name
def updateName(self, name: str):
if self._name != name:
self._name = name
self.nameChanged.emit()
@pyqtProperty(int, notify = timeTotalChanged)
def METHOD_NAME(self) -> int:
return int(self._time_total)
@pyqtProperty(int, notify = timeElapsedChanged)
def timeElapsed(self) -> int:
return int(self._time_elapsed)
@pyqtProperty(int, notify = timeElapsedChanged)
def timeRemaining(self) -> int:
# Never get a negative time remaining
return int(max(self.METHOD_NAME - self.timeElapsed, 0))
@pyqtProperty(float, notify = timeElapsedChanged)
def progress(self) -> float:
result = float(self.timeElapsed) / max(self.METHOD_NAME, 1.0) # Prevent a division by zero exception.
return min(result, 1.0) # Never get a progress past 1.0
@pyqtProperty(str, notify=stateChanged)
def state(self) -> str:
return self._state
@pyqtProperty(bool, notify=stateChanged)
def isActive(self) -> bool:
inactive_states = [
"pausing",
"paused",
"resuming",
"wait_cleanup"
]
if self.state in inactive_states and self.timeRemaining > 0:
return False
return True
def updateTimeTotal(self, new_time_total: int) -> None:
if self._time_total != new_time_total:
self._time_total = new_time_total
self.timeTotalChanged.emit()
def updateTimeElapsed(self, new_time_elapsed: int) -> None:
if self._time_elapsed != new_time_elapsed:
self._time_elapsed = new_time_elapsed
self.timeElapsedChanged.emit()
def updateState(self, new_state: str) -> None:
if self._state != new_state:
self._state = new_state
self.stateChanged.emit()
@pyqtSlot(str)
def setState(self, state):
self._output_controller.setJobState(self, state) |
1,857 | test date to list uses fill value | from copy import deepcopy
from datetime import date, datetime
from typing import Any, List
import pytest
import torch
from dateutil.parser import parse
from ludwig.constants import ENCODER_OUTPUT, FILL_WITH_CONST, MISSING_VALUE_STRATEGY
from ludwig.features import date_feature
from ludwig.features.date_feature import DateInputFeature
from ludwig.schema.features.date_feature import DateInputFeatureConfig
from ludwig.schema.utils import load_config_with_kwargs
from ludwig.types import FeatureConfigDict
from ludwig.utils.date_utils import create_vector_from_datetime_obj
from ludwig.utils.misc_utils import merge_dict
from ludwig.utils.torch_utils import get_torch_device
BATCH_SIZE = 2
DATE_W_SIZE = 9
DEVICE = get_torch_device()
@pytest.fixture(scope="module")
def date_config():
return {"name": "date_column_name", "type": "date"}
def test_date_input_feature(date_config: FeatureConfigDict):
# setup image input feature definition
feature_def = deepcopy(date_config)
# pickup any other missing parameters
defaults = DateInputFeatureConfig(name="foo").to_dict()
set_def = merge_dict(defaults, feature_def)
# ensure no exceptions raised during build
feature_config, _ = load_config_with_kwargs(DateInputFeatureConfig, set_def)
input_feature_obj = DateInputFeature(feature_config).to(DEVICE)
# check one forward pass through input feature
input_tensor = input_feature_obj.create_sample_input(batch_size=BATCH_SIZE)
assert input_tensor.shape == torch.Size((BATCH_SIZE, DATE_W_SIZE))
assert input_tensor.dtype == torch.int32
encoder_output = input_feature_obj(input_tensor)
assert encoder_output[ENCODER_OUTPUT].shape == (BATCH_SIZE, *input_feature_obj.output_shape)
@pytest.mark.parametrize(
"date_str,datetime_format,expected_list",
[
("2012-02-26T13:51:50.417-07:00", None, [2012, 2, 26, 6, 57, 13, 51, 50, 49910]),
("2022-06-25 09:30:59", None, [2022, 6, 25, 5, 176, 9, 30, 59, 34259]),
("2022-06-25", None, [2022, 6, 25, 5, 176, 0, 0, 0, 0]),
],
)
def test_date_to_list(date_str, datetime_format, expected_list):
preprocessing_parameters = None
assert (
date_feature.DateInputFeature.date_to_list(date_str, datetime_format, preprocessing_parameters) == expected_list
)
@pytest.fixture(scope="module")
def reference_date_list() -> List[int]:
return create_vector_from_datetime_obj(datetime.utcfromtimestamp(1691600953.443032))
@pytest.fixture(scope="module")
def fill_value() -> str:
return "1970-01-01 00:00:00"
@pytest.fixture(scope="module")
def fill_value_list(fill_value: str) -> List[int]:
return create_vector_from_datetime_obj(parse(fill_value))
@pytest.mark.parametrize(
"timestamp,datetime_format,expected_list",
[
pytest.param(1691600953.443032, None, "reference_date_list", id="float-s"),
pytest.param(1691600953443.032, None, "reference_date_list", id="float-ms"),
pytest.param(1691600953, None, "reference_date_list", id="int-s"),
pytest.param(1691600953443, None, "reference_date_list", id="int-ms"),
pytest.param(1691600953.443032, "%d/%m/%y %H:%M:%S.%f", "reference_date_list", id="float-s-fmt"),
pytest.param(1691600953443.032, "%d/%m/%y %H:%M:%S.%f", "reference_date_list", id="float-ms-fmt"),
pytest.param(1691600953, "%d/%m/%y %H:%M:%S.%f", "reference_date_list", id="int-s-fmt"),
pytest.param(1691600953443, "%d/%m/%y %H:%M:%S.%f", "reference_date_list", id="int-ms-fmt"),
pytest.param("1691600953.443032", None, "reference_date_list", id="string[float]-s"),
pytest.param("1691600953443.0032", None, "reference_date_list", id="string[float]-ms"),
pytest.param("1691600953", None, "reference_date_list", id="string[int]-s"),
pytest.param("1691600953443", None, "reference_date_list", id="string[int]-ms"),
pytest.param("1691600953.443032", "%d/%m/%y %H:%M:%S.%f", "reference_date_list", id="string[float]-s-fmt"),
pytest.param("1691600953443.0032", "%d/%m/%y %H:%M:%S.%f", "reference_date_list", id="string[float]-ms-fmt"),
pytest.param("1691600953", "%d/%m/%y %H:%M:%S.%f", "reference_date_list", id="string[int]-s-fmt"),
pytest.param("1691600953443", "%d/%m/%y %H:%M:%S.%f", "reference_date_list", id="string[int]-ms-fmt"),
pytest.param("foo", None, "fill_value_list", id="string error"),
pytest.param([1691600953.443032], None, "fill_value_list", id="list error"),
pytest.param(None, None, "fill_value_list", id="NoneType error"),
],
)
def test_date_to_list_numeric(timestamp: Any, datetime_format: str, expected_list: List[int], fill_value: str, request):
"""Test that numeric datetime formats are converted correctly.
Currently, we support int, float, and string representations of POSIX timestamps in seconds and milliseconds. Valid
timestamps should be converted to datetime lists by `luwdig.utils.date_utils.create_vector_from_datetime_object`.
If a string format is provided, it should be ignored.
Args:
timestamp: Input to be converted to a date vector
datetime_format: Optional format string, should be ignored under the hood with these timestamps.
expected_list: The expected output of `DateFeatureMixin.date_to_list`
fill_value: Date to be used as fallback
request: pytest request fixture
"""
expected_result = request.getfixturevalue(expected_list)
# The default fill value is `datetime.now`, for testing we override this to be a constant.
preprocessing_parameters = {MISSING_VALUE_STRATEGY: FILL_WITH_CONST, "fill_value": fill_value}
# No exception should ever be raised from `date_to_list` due to a parsing error. The expected behavior is to fall
# back to the fill value.
dt = date_feature.DateInputFeature.date_to_list(timestamp, datetime_format, preprocessing_parameters)
assert dt == expected_result
def test_date_to_list__DatetimeObjectFromParsedJSON():
preprocessing_parameters = None
datetime_obj = datetime.fromisoformat("2022-06-25")
assert date_feature.DateInputFeature.date_to_list(datetime_obj, None, preprocessing_parameters) == [
2022,
6,
25,
5,
176,
0,
0,
0,
0,
]
def METHOD_NAME():
preprocessing_parameters = {"fill_value": "2013-02-26"}
invalid_date_str = "2012abc-02"
datetime_format = None
assert date_feature.DateInputFeature.date_to_list(invalid_date_str, datetime_format, preprocessing_parameters) == [
2013,
2,
26,
1,
57,
0,
0,
0,
0,
]
@pytest.fixture(scope="module")
def date_obj():
return date.fromisoformat("2022-06-25")
@pytest.fixture(scope="module")
def date_obj_vec():
return create_vector_from_datetime_obj(datetime.fromisoformat("2022-06-25"))
def test_date_object_to_list(date_obj, date_obj_vec, fill_value):
"""Test support for datetime.date object conversion.
Args:
date_obj: Date object to convert into a vector
date_obj_vector: Expected vector version of `date_obj`
"""
computed_date_vec = date_feature.DateInputFeature.date_to_list(
date_obj, None, preprocessing_parameters={MISSING_VALUE_STRATEGY: FILL_WITH_CONST, "fill_value": fill_value}
)
assert computed_date_vec == date_obj_vec |
1,858 | transform acl list output | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import base64
from knack.log import get_logger
from knack.util import todict
from .track2_util import _encode_bytes
storage_account_key_options = {'primary': 'key1', 'secondary': 'key2'}
logger = get_logger(__name__)
def METHOD_NAME(result):
""" Transform to convert SDK output into a form that is more readily
usable by the CLI and tools such as jpterm. """
from collections import OrderedDict
new_result = []
for key in sorted(result.keys()):
new_entry = OrderedDict()
new_entry['Name'] = key
new_entry['Start'] = result[key]['start']
new_entry['Expiry'] = result[key]['expiry']
new_entry['Permissions'] = result[key]['permission']
new_result.append(new_entry)
return new_result
def transform_container_permission_output(result):
return {'publicAccess': result.public_access or 'off'}
def transform_cors_list_output(result):
from collections import OrderedDict
new_result = []
for service in sorted(result.keys()):
for i, rule in enumerate(result[service]):
new_entry = OrderedDict()
new_entry['Service'] = service
new_entry['Rule'] = i + 1
new_entry['AllowedMethods'] = ', '.join((x for x in rule.allowed_methods))
new_entry['AllowedOrigins'] = ', '.join((x for x in rule.allowed_origins))
new_entry['ExposedHeaders'] = ', '.join((x for x in rule.exposed_headers))
new_entry['AllowedHeaders'] = ', '.join((x for x in rule.allowed_headers))
new_entry['MaxAgeInSeconds'] = rule.max_age_in_seconds
new_result.append(new_entry)
return new_result
def transform_entity_query_output(result):
from collections import OrderedDict
new_results = []
ignored_keys = ['etag', 'Timestamp', 'RowKey', 'PartitionKey']
for row in result['items']:
new_entry = OrderedDict()
new_entry['PartitionKey'] = row['PartitionKey']
new_entry['RowKey'] = row['RowKey']
other_keys = sorted([x for x in row.keys() if x not in ignored_keys])
for key in other_keys:
new_entry[key] = row[key]
new_results.append(new_entry)
return new_results
def transform_entities_result(result):
for entity in result.items:
transform_entity_result(entity)
return result
def transform_entity_result(entity):
for key in entity.keys():
entity_property = entity[key]
if hasattr(entity_property, 'value') and isinstance(entity_property.value, bytes):
entity_property.value = base64.b64encode(entity_property.value).decode()
return entity
def transform_logging_list_output(result):
from collections import OrderedDict
new_result = []
for key in sorted(result.keys()):
new_entry = OrderedDict()
new_entry['Service'] = key
new_entry['Read'] = str(result[key]['read'])
new_entry['Write'] = str(result[key]['write'])
new_entry['Delete'] = str(result[key]['delete'])
new_entry['RetentionPolicy'] = str(result[key]['retentionPolicy']['days'])
new_result.append(new_entry)
return new_result
def transform_metrics_list_output(result):
from collections import OrderedDict
new_result = []
for service in sorted(result.keys()):
service_name = service
for interval in sorted(result[service].keys()):
item = result[service][interval]
new_entry = OrderedDict()
new_entry['Service'] = service_name
service_name = ''
new_entry['Interval'] = str(interval)
new_entry['Enabled'] = str(item['enabled'])
new_entry['IncludeApis'] = str(item['includeApis'])
new_entry['RetentionPolicy'] = str(item['retentionPolicy']['days'])
new_result.append(new_entry)
return new_result
def create_boolean_result_output_transformer(property_name):
def _transformer(result):
return {property_name: result}
return _transformer
def transform_storage_list_output(result):
if getattr(result, 'next_marker', None):
logger.warning('Next Marker:')
logger.warning(result.next_marker)
return list(result)
# pylint: disable=unused-argument
def transform_file_upload(result):
return None
def transform_url_without_encode(result):
""" Ensures the resulting URL string does not contain extra / characters """
import re
result = re.sub('//', '/', result)
result = re.sub('/', '//', result, count=1)
return result
def transform_share_directory_json_output(result):
result = todict(result)
new_result = {
"metadata": result.pop('metadata', None),
"name": result.pop('name', None),
"properties": {
"etag": result.pop('etag', None),
"lastModified": result.pop('lastModified', None),
"serverEncrypted": result.pop('serverEncrypted', None)
}
}
new_result.update(result)
return new_result
def transform_share_file_json_output(result):
result = todict(result)
new_result = {
"metadata": result.pop('metadata', None),
"name": result.pop('name', None),
"properties": {
"etag": result.pop('etag', None),
"lastModified": result.pop('lastModified', None),
"serverEncrypted": result.pop('serverEncrypted', None),
"contentLength": result.pop('size', None),
"contentRange": result.pop('contentRange', None),
"contentSettings": result.pop('contentSettings', None),
"copy": result.pop("copy", None)
}
}
new_result.update(result)
return new_result
def transform_file_show_result(result):
result = todict(result)
new_result = {
"content": result.pop('content', ""),
"properties": {
"contentLength": result.pop('contentLength', None),
"contentRange": result.pop('contentRange', None),
"contentSettings": result.pop('contentSettings', None),
"copy": result.pop('copy', None),
"etag": result.pop('etag', None),
"lastModified": result.pop('lastModified', None),
"serverEncrypted": result.pop('serverEncrypted', None)
}
}
if new_result['properties']['contentSettings'] and new_result['properties']['contentSettings']['contentMd5']:
new_result['properties']['contentSettings']['contentMd5'] = _encode_bytes(new_result['properties']
['contentSettings']['contentMd5'])
new_result.update(result)
_decode_bytearray(new_result)
return new_result
def _decode_bytearray(result):
for k, v in result.items():
if isinstance(v, bytearray):
result[k] = base64.urlsafe_b64encode(v).decode()
elif isinstance(v, dict):
_decode_bytearray(v)
def transform_share_list_handle(result):
for item in result["items"]:
item["handleId"] = item.id
delattr(item, "id")
return result |
1,859 | xnli compute metrics | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score, average_precision_score, ndcg_score, roc_auc_score
import numpy as np
_has_sklearn = True
except (AttributeError, ImportError):
_has_sklearn = False
def is_sklearn_available():
return _has_sklearn
if _has_sklearn:
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def simple_ndcg(preds, labels, guids):
ndcgs = []
query2content = {}
for guid, pred, label in zip(guids, preds, labels):
query = guid.split("_")[0]
if not query in query2content:
query2content[query] = [[int(pred)], [int(label)]]
else:
query2content[query][0].append(int(pred))
query2content[query][1].append(int(label))
for key in query2content.keys():
if len(query2content[key][1]) < 2 or len(query2content[key][0]) < 2:
continue
ndcgs.append(ndcg_score(np.asarray([query2content[key][1]]), np.asarray([query2content[key][0]])))
return {"ndcg" : np.array(ndcgs).mean()}
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def acc_and_auc(preds, labels): # auc of pr curve is equal to average precision
acc = simple_accuracy(preds, labels)
auc = average_precision_score(labels, preds)
return {
"acc": acc,
"auc": auc,
"acc_and_auc": (acc + auc) / 2,
}
def acc_and_roc_auc(preds, labels): # auc of pr curve is equal to average precision
acc = simple_accuracy(preds, labels)
roc_auc = roc_auc_score(labels, preds)
return {
"acc": acc,
"roc_auc": roc_auc,
"acc_and_roc_auc": (acc + roc_auc) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def xglue_compute_metrics(task_name, preds, labels, guids):
assert len(preds) == len(labels)
if task_name == "xnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "pawsx":
return acc_and_auc(preds, labels)
elif task_name == "qam":
return acc_and_auc(preds, labels)
elif task_name == "ads":
return acc_and_roc_auc(preds, labels)
elif task_name == "rel":
return simple_ndcg(preds, labels, guids)
elif task_name == "news":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
def xtreme_compute_metrics(task_name, preds, labels, guids):
assert len(preds) == len(labels)
if task_name == "xnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "pawsx":
return acc_and_auc(preds, labels)
else:
raise KeyError(task_name)
def glue_compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "hans":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
def METHOD_NAME(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "xnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name) |
1,860 | self scheduler sim options | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES).
#
# Copyright (c) 2018-2023 by the software owners: The Regents of the
# University of California, through Lawrence Berkeley National Laboratory,
# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon
# University, West Virginia University Research Corporation, et al.
# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md
# for full copyright and license information.
#################################################################################
from importlib import resources
from numbers import Number
from pathlib import Path
from typing import Dict, Union
import os
import pytest
# define custom type for type hinting
PrescientOptions = Dict[str, Union[str, bool, Number, dict]]
from idaes.apps.grid_integration import DoubleLoopCoordinator
from idaes.apps.grid_integration.tests.util import (
make_testing_tracker,
make_testing_bidder,
)
## create trackers
thermal_tracker = make_testing_tracker()
thermal_projection_tracker = make_testing_tracker()
thermal_bidder = make_testing_bidder()
# create coordinator
coordinator = DoubleLoopCoordinator(
bidder=thermal_bidder,
tracker=thermal_tracker,
projection_tracker=thermal_projection_tracker,
)
class TestDoubleLoopIntegration:
"Integration test for the double loop using 5bus use case."
@pytest.fixture
def data_path(self) -> Path:
# NOTE here we want the path to the entire 5bus directory
# we need to specify __init__.py as a workaround for Python 3.9,
# where importlib.resources.path() requires the resource to be a file
# directories are not supported and will raise an error if attempted
with resources.path("idaes.tests.prescient.5bus", "__init__.py") as pkg_file:
return Path(pkg_file).parent
@pytest.mark.unit
def test_data_path_available(self, data_path: Path):
assert data_path.is_dir()
@pytest.fixture
def output_dir(self, tmp_path: Path) -> Path:
path = tmp_path / "bidder_integration_test_output"
path.mkdir()
return path
@pytest.fixture
def self_scheduler_output_dir(self, tmp_path: Path) -> Path:
path = tmp_path / "self_scheduler_integration_test_output"
path.mkdir()
return path
@pytest.fixture
def self_scheduler_plugin_path(self) -> Path:
with resources.path(
"idaes.apps.grid_integration.tests",
"self_scheduler_integration_test_plugin.py",
) as p:
return Path(p)
@pytest.mark.unit
def test_self_scheduler_plugin_path_is_existing_file(
self, self_scheduler_plugin_path
):
assert self_scheduler_plugin_path.is_file()
@pytest.fixture
def prescient_options(self, data_path: Path) -> PrescientOptions:
return {
"data_path": str(data_path),
"input_format": "rts-gmlc",
"simulate_out_of_sample": True,
"run_sced_with_persistent_forecast_errors": True,
"start_date": "07-10-2020",
"num_days": 2,
"sced_horizon": 4,
"ruc_mipgap": 0.01,
"reserve_factor": 0.0,
"deterministic_ruc_solver": "cbc",
"day_ahead_pricing": "LMP",
"symbolic_solver_labels": True,
"deterministic_ruc_solver_options": {
"feas": "off",
"DivingF": "on",
},
"sced_solver": "cbc",
"sced_frequency_minutes": 60,
"ruc_horizon": 48,
"compute_market_settlements": True,
"monitor_all_contingencies": False,
"output_solver_logs": False,
"price_threshold": 1000,
"contingency_price_threshold": 100,
"reserve_price_threshold": 5,
}
@pytest.fixture
def bidder_sim_options(
self,
prescient_options,
output_dir: Path,
) -> PrescientOptions:
prescient_options["plugin"] = {
"doubleloop": {
"module": coordinator.prescient_plugin_module,
"bidding_generator": "10_STEAM",
}
}
prescient_options["output_directory"] = str(output_dir)
return prescient_options
@pytest.fixture
def METHOD_NAME(
self,
prescient_options,
self_scheduler_output_dir: Path,
self_scheduler_plugin_path: Path,
) -> PrescientOptions:
prescient_options["plugin"] = {
"doubleloop": {
"module": str(self_scheduler_plugin_path),
"bidding_generator": "10_STEAM",
}
}
prescient_options["output_directory"] = str(self_scheduler_output_dir)
return prescient_options
@pytest.fixture
def run_bidder_simulator(self, bidder_sim_options: PrescientOptions) -> None:
prescient_simulator = pytest.importorskip(
"prescient.simulator",
reason="Prescient (optional dependency) not available",
)
prescient_simulator.Prescient().simulate(**bidder_sim_options)
@pytest.fixture
def run_self_scheduler_simulator(
self, METHOD_NAME: PrescientOptions
) -> None:
prescient_simulator = pytest.importorskip(
"prescient.simulator",
reason="Prescient (optional dependency) not available",
)
prescient_simulator.Prescient().simulate(**METHOD_NAME)
@pytest.fixture
def simulation_results_dir(self, run_bidder_simulator, output_dir):
return output_dir
@pytest.fixture
def self_scheduler_simulation_results_dir(
self, run_self_scheduler_simulator, self_scheduler_output_dir
):
return self_scheduler_output_dir
@pytest.mark.unit
def test_prescient_outputs_exist(
self, simulation_results_dir, self_scheduler_simulation_results_dir
):
assert os.path.isdir(simulation_results_dir)
assert os.path.isdir(self_scheduler_simulation_results_dir)
file_names = [
"hourly_gen_summary.csv",
"tracker_detail.csv",
"hourly_summary.csv",
"bus_detail.csv",
"overall_simulation_output.csv",
"virtual_detail.csv",
"bidding_model_detail.csv",
"bidder_detail.csv",
"daily_summary.csv",
"line_detail.csv",
"thermal_detail.csv",
"runtimes.csv",
"tracking_model_detail.csv",
"renewables_detail.csv",
"contingency_detail.csv",
]
for f in file_names:
file_path = os.path.join(simulation_results_dir, f)
assert os.path.isfile(file_path)
file_path = os.path.join(self_scheduler_simulation_results_dir, f)
assert os.path.isfile(file_path) |
1,861 | set up | # Owner(s): ["oncall: jit"]
import os
import sys
import unittest
import torch
import torch._C
from pathlib import Path
from test_nnapi import TestNNAPI
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
"""
Unit Tests for Nnapi backend with delegate
Inherits most tests from TestNNAPI, which loads Android NNAPI models
without the delegate API.
"""
# First skip is needed for IS_WINDOWS or IS_MACOS to skip the tests.
torch_root = Path(__file__).resolve().parent.parent.parent
lib_path = torch_root / 'build' / 'lib' / 'libnnapi_backend.so'
@unittest.skipIf(not os.path.exists(lib_path),
"Skipping the test as libnnapi_backend.so was not found")
class TestNnapiBackend(TestNNAPI):
def METHOD_NAME(self):
super().METHOD_NAME()
# Save default dtype
module = torch.nn.PReLU()
self.default_dtype = module.weight.dtype
# Change dtype to float32 (since a different unit test changed dtype to float64,
# which is not supported by the Android NNAPI delegate)
# Float32 should typically be the default in other files.
torch.set_default_dtype(torch.float32)
# Load nnapi delegate library
torch.ops.load_library(str(lib_path))
# Override
def call_lowering_to_nnapi(self, traced_module, args):
compile_spec = {"forward": {"inputs": args}}
return torch._C._jit_to_backend("nnapi", traced_module, compile_spec)
def test_tensor_input(self):
# Lower a simple module
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
# Argument input is a single Tensor
self.call_lowering_to_nnapi(traced, args)
# Argument input is a Tensor in a list
self.call_lowering_to_nnapi(traced, [args])
# Test exceptions for incorrect compile specs
def test_compile_spec_santiy(self):
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
errorMsgTail = r"""
method_compile_spec should contain a Tensor or Tensor List which bundles input parameters: shape, dtype, quantization, and dimorder.
For input shapes, use 0 for run/load time flexible input.
method_compile_spec must use the following format:
{"forward": {"inputs": at::Tensor}} OR {"forward": {"inputs": c10::List<at::Tensor>}}"""
# No forward key
compile_spec = {"backward": {"inputs": args}}
with self.assertRaisesRegex(RuntimeError, "method_compile_spec does not contain the \"forward\" key." + errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No dictionary under the forward key
compile_spec = {"forward": 1}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No inputs key (in the dictionary under the forward key)
compile_spec = {"forward": {"not inputs": args}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No Tensor or TensorList under the inputs key
compile_spec = {"forward": {"inputs": 1}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
compile_spec = {"forward": {"inputs": [1]}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
def tearDown(self):
# Change dtype back to default (Otherwise, other unit tests will complain)
torch.set_default_dtype(self.default_dtype) |
1,862 | get metadata | """Generate documentation for derived datasets."""
import logging
from pathlib import Path
from jinja2 import Environment, FileSystemLoader
from bigquery_etl.config import ConfigLoader
from bigquery_etl.dependency import extract_table_references
from bigquery_etl.metadata.parse_metadata import DatasetMetadata, Metadata
from bigquery_etl.schema import Schema
logging.basicConfig(format="%(levelname)s (%(filename)s:%(lineno)d) - %(message)s")
VIEW_FILE = "view.sql"
METADATA_FILE = "metadata.yaml"
SCHEMA_FILE = "schema.yaml"
DATASET_METADATA_FILE = "dataset_metadata.yaml"
README_FILE = "README.md"
def METHOD_NAME(path, metadata_filename=METADATA_FILE):
metadata_path = path / metadata_filename
try:
if metadata_filename == METADATA_FILE:
metadata = Metadata.from_file(metadata_path)
return metadata
elif metadata_filename == DATASET_METADATA_FILE:
metadata = DatasetMetadata.from_file(metadata_path)
return metadata
else:
raise Exception(f"Invalid metadata filename provided - {metadata_filename}")
except FileNotFoundError:
logging.warning(f"Metadata not found at {str(metadata_path)}")
def _get_readme_content(path):
readme_file = path / README_FILE
if readme_file.exists():
return readme_file.read_text()
def _get_referenced_tables_from_view(table_path):
referenced_tables = []
view_file = table_path / VIEW_FILE
if view_file.exists():
for referenced_table in extract_table_references(view_file.read_text()):
table_split = referenced_table.split(".")
if len(table_split) == 2:
# missing project ID, retrieve from file path
[dataset_id, table_id] = table_split
project_id = view_file.parent.parent.parent.name
elif len(table_split) == 3:
[project_id, dataset_id, table_id] = table_split
else:
continue
referenced_tables.append(
{
"project_id": project_id,
"dataset_id": dataset_id,
"table_id": table_id,
}
)
return referenced_tables
def _get_schema(table_path):
schema_path = table_path / SCHEMA_FILE
try:
schema = Schema.from_schema_file(schema_path)
return schema.schema.get("fields")
except Exception as e:
logging.warning(f"Unable to open schema: {e}")
def _iter_table_markdown(table_paths, template):
source_url = ConfigLoader.get("docs", "source_url")
for table_path in table_paths:
source_urls = {"Source Directory": f"{source_url}/{str(table_path)}"}
referenced_tables = _get_referenced_tables_from_view(table_path)
if referenced_tables:
source_urls[
"View Definition"
] = f"{source_url}/{str(table_path / VIEW_FILE)}"
metadata = METHOD_NAME(table_path)
if metadata:
source_urls[
"Metadata File"
] = f"{source_url}/{str(table_path / METADATA_FILE)}"
readme_content = _get_readme_content(table_path)
schema = _get_schema(table_path)
output = template.render(
metadata=metadata,
readme_content=readme_content,
schema=schema,
table_name=table_path.name,
qualified_table_name=f"{table_path.parent.name}.{table_path.name}",
source_urls=source_urls,
referenced_tables=referenced_tables,
project_url=f"{source_url}/sql",
)
yield output
def generate_derived_dataset_docs(out_dir, project_dir):
"""Generate documentation for derived datasets."""
output_path = Path(out_dir) / ConfigLoader.get(
"default", "user_facing_project", fallback="mozdata"
)
project_path = Path(project_dir)
# get a list of all user-facing datasets
dataset_paths = sorted(
[
dataset_path
for dataset_path in project_path.iterdir()
if dataset_path.is_dir()
and all(
suffix not in str(dataset_path)
for suffix in ConfigLoader.get(
"default", "non_user_facing_dataset_suffixes", fallback=[]
)
)
]
)
for dataset_path in dataset_paths:
table_paths = sorted([path for path in dataset_path.iterdir() if path.is_dir()])
file_loader = FileSystemLoader("bigquery_etl/docs/derived_datasets/templates")
env = Environment(loader=file_loader)
table_template = env.get_template("table.md")
dataset_header_template = env.get_template("dataset_header.md")
dataset_metadata = METHOD_NAME(
dataset_path, metadata_filename=DATASET_METADATA_FILE
)
dataset_readme_content = _get_readme_content(dataset_path)
with open(output_path / f"{dataset_path.name}.md", "w") as dataset_doc:
# In the template, we manually set title to prevent Mkdocs from removing
# underscores and capitalizing file names
# https://github.com/mkdocs/mkdocs/issues/1915#issuecomment-561311801
dataset_header = dataset_header_template.render(
title=dataset_metadata.friendly_name
if dataset_metadata
else dataset_path.name,
description=dataset_metadata.description if dataset_metadata else None,
readme_content=dataset_readme_content,
source_url=f"{ConfigLoader.get('docs', 'source_url')}/{str(dataset_path)}",
)
dataset_doc.write(dataset_header)
dataset_doc.write(
"".join(_iter_table_markdown(table_paths, table_template))
) |
1,863 | test refresh token returns correct new token | from datetime import timedelta
from django.conf import settings
from django.urls import reverse
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient, APITestCase
from argus.auth.factories import AdminUserFactory, PersonUserFactory
from argus.auth.models import User
class APITests(APITestCase):
def setUp(self):
self.superuser1_password = "best_admin#1"
self.superuser1 = AdminUserFactory(username="superuser1", password=self.superuser1_password)
self.normal_user1_password = "12345"
self.normal_user1 = PersonUserFactory(username="normal_user1", password=self.normal_user1_password)
self.superuser1_client = APIClient()
self.superuser1_token = Token.objects.create(user=self.superuser1)
self.superuser1_client.credentials(**self.assemble_token_auth_kwarg(self.superuser1_token.key))
self.normal_user1_client = APIClient()
self.normal_user1_token = Token.objects.create(user=self.normal_user1)
self.normal_user1_client.credentials(**self.assemble_token_auth_kwarg(self.normal_user1_token.key))
@staticmethod
def assemble_token_auth_kwarg(token_key: str):
return {"HTTP_AUTHORIZATION": f"Token {token_key}"}
@staticmethod
def expire_token(token: Token):
# Subtract an extra second, just to be sure
token.created -= timedelta(days=settings.AUTH_TOKEN_EXPIRES_AFTER_DAYS, seconds=1)
token.save()
def test_logout_deletes_token(self):
logout_path = reverse("v1:auth:logout")
def assert_token_is_deleted(token: Token, user: User, client: APIClient):
self.assertTrue(hasattr(user, "auth_token"))
self.assertEqual(client.post(logout_path).status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertFalse(hasattr(user, "auth_token"))
with self.assertRaises(Token.DoesNotExist):
token.refresh_from_db()
assert_token_is_deleted(self.normal_user1_token, self.normal_user1, self.normal_user1_client)
assert_token_is_deleted(self.superuser1_token, self.superuser1, self.superuser1_client)
def _successfully_get_auth_token(self, user: User, user_password: str, client: APIClient):
auth_token_path = reverse("v1:api-token-auth")
response = client.post(auth_token_path, {"username": user.username, "password": user_password})
self.assertEqual(response.status_code, status.HTTP_200_OK)
return response
def test_get_auth_token_always_replaces_old_token(self):
def assert_token_is_replaced(user: User, user_password: str, old_token: Token, client: APIClient):
old_token_key = old_token.key
response = self._successfully_get_auth_token(user, user_password, client)
new_token_key = response.data["token"]
self.assertNotEqual(new_token_key, old_token_key)
with self.assertRaises(Token.DoesNotExist):
old_token.refresh_from_db()
user.refresh_from_db()
self.assertEqual(new_token_key, user.auth_token.key)
assert_token_is_replaced(
self.normal_user1, self.normal_user1_password, self.normal_user1_token, self.normal_user1_client
)
assert_token_is_replaced(
self.superuser1, self.superuser1_password, self.superuser1_token, self.superuser1_client
)
def test_auth_token_expires_and_is_deleted(self):
some_auth_required_path = reverse("v1:auth:current-user")
def assert_token_expires_and_is_deleted(user: User, token: Token, client: APIClient):
self.assertEqual(client.get(some_auth_required_path).status_code, status.HTTP_200_OK)
self.expire_token(token)
self.assertEqual(client.get(some_auth_required_path).status_code, status.HTTP_401_UNAUTHORIZED)
user.refresh_from_db()
self.assertFalse(hasattr(user, "auth_token"))
with self.assertRaises(Token.DoesNotExist):
token.refresh_from_db()
assert_token_expires_and_is_deleted(self.normal_user1, self.normal_user1_token, self.normal_user1_client)
assert_token_expires_and_is_deleted(self.superuser1, self.superuser1_token, self.superuser1_client)
def test_can_get_auth_token_after_deletion_or_expiration(self):
logout_path = reverse("v1:auth:logout")
some_auth_required_path = reverse("v1:auth:current-user")
def assert_unauthorized_until_getting_auth_token(user: User, user_password: str, client: APIClient):
self.assertEqual(client.get(some_auth_required_path).status_code, status.HTTP_401_UNAUTHORIZED)
client.credentials() # clears credentials
response = self._successfully_get_auth_token(user, user_password, client)
client.credentials(**self.assemble_token_auth_kwarg(response.data["token"]))
self.assertEqual(client.get(some_auth_required_path).status_code, status.HTTP_200_OK)
def assert_can_get_auth_token_after_deletion_and_expiration(user: User, user_password: str, client: APIClient):
client.post(logout_path)
assert_unauthorized_until_getting_auth_token(user, user_password, client)
self.expire_token(Token.objects.get(user=user))
assert_unauthorized_until_getting_auth_token(user, user_password, client)
assert_can_get_auth_token_after_deletion_and_expiration(
self.normal_user1, self.normal_user1_password, self.normal_user1_client
)
assert_can_get_auth_token_after_deletion_and_expiration(
self.superuser1, self.superuser1_password, self.superuser1_client
)
def test_get_current_user_returns_correct_user(self):
current_user_path = reverse("v1:auth:current-user")
response = self.superuser1_client.get(current_user_path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["username"], self.superuser1.username)
response = self.normal_user1_client.get(current_user_path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["username"], self.normal_user1.username)
def test_get_user_returns_the_correct_fields(self):
user_path = lambda user: reverse("v1:auth:user", args=[user.pk])
def assert_correct_fields_for_user(user: User):
response = self.normal_user1_client.get(user_path(user))
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.data
self.assertEqual(len(response_data), 4)
self.assertEqual(response_data["username"], user.username)
self.assertIn("first_name", response_data)
self.assertIn("last_name", response_data)
self.assertIn("email", response_data)
assert_correct_fields_for_user(self.normal_user1)
assert_correct_fields_for_user(self.superuser1)
def METHOD_NAME(self):
auth_token_path = reverse("v2:auth:refresh-token")
response = self.normal_user1_client.post(auth_token_path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotEqual(response.data["token"], self.normal_user1_token.key)
self.assertEqual(response.data["token"], Token.objects.get(user=self.normal_user1).key) |
1,864 | test empty | #!/usr/bin/env python
# Copyright (C) 2006-2021 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.standard import MonoLoader, PercivalBpmEstimator
class TestPercivalBpmEstimator(TestCase):
def testInvalidParam(self):
# testing that output is valid (not NaN nor inf nor negative time values)
self.assertConfigureFails(PercivalBpmEstimator(), { 'frameSize': -1 })
self.assertConfigureFails(PercivalBpmEstimator(), { 'frameSizeOSS': -1 })
self.assertConfigureFails(PercivalBpmEstimator(), { 'hopSize': -1 })
self.assertConfigureFails(PercivalBpmEstimator(), { 'hopSizeOSS': -1 })
self.assertConfigureFails(PercivalBpmEstimator(), { 'maxBPM': -1 })
self.assertConfigureFails(PercivalBpmEstimator(), { 'minBPM': -1 })
self.assertConfigureFails(PercivalBpmEstimator(), { 'sampleRate': -1 })
# PercivalBpmEstimator() is called inside LoopBpmEstimator().
# LoopBpmEstimator() also considers "confidence" as a parameter.
# Regression tests are made on current performance observations
# which return an accurate value to 1 place of decimal.
def testRegression(self):
audio = MonoLoader(filename=join(testdata.audio_dir, 'recorded', 'techno_loop.wav'))()
expectedEstimate = 125
estimate = PercivalBpmEstimator()(audio)
# Tolerance tuned to 0.1 based on emperical test resulting in BPM = 125.28
self.assertAlmostEqual(expectedEstimate, estimate, 0.1)
# prints 125.28408813476562
# Define Markers for significant meaningful subsections
# to give proportional relationship with audio length.
# Similart strategy used for LoopBpmEstimator()-
len90 = int(0.9*len(audio)) # End point for 90% of loop
len75 = int(0.75*len(audio)) # 75% point
len50 = int(0.5*len(audio)) # mid point
# If any future changes break these asserts,
# then this will indicates something in algorithm has changed.
expectedEstimate = 124.9
estimate = PercivalBpmEstimator()(audio[0:len90])
self.assertAlmostEqual(expectedEstimate, estimate, 0.1)
# prints 124.90558624267578
estimate = PercivalBpmEstimator()(audio[5000:len75])
self.assertAlmostEqual(expectedEstimate, estimate, 0.1)
# prints 124.90558624267578
estimate = PercivalBpmEstimator()(audio[0:len50])
self.assertAlmostEqual(expectedEstimate, estimate, 0.1)
# prints 124.90558624267578
def testSilentEdge(self):
audio = MonoLoader(filename=join(testdata.audio_dir, 'recorded', 'techno_loop.wav'))()
bpmEstimate = 125
lenSilence = 30000 # N.B The beat period is 21168 samples for 125 bpm @ 44.1k samp. rate
silentAudio = zeros(lenSilence)
expectedEstimate = 124.9
# Test addition of non-musical silence before the loop starts
# The length is not a beat period,
# case 1: there is non-musical* silence before the loop starts
signal1 = numpy.append(silentAudio, audio)
estimate = PercivalBpmEstimator()(signal1)
self.assertAlmostEqual(expectedEstimate, estimate, 0.1)
# case 2: there is non-musical silence after the loop ends
signal2 = numpy.append(audio,silentAudio)
estimate = PercivalBpmEstimator()(signal2)
self.assertAlmostEqual(expectedEstimate, estimate, 0.1)
# case 3: there is non-musical silence at both ends
signal3 = numpy.append(signal1, silentAudio)
estimate = PercivalBpmEstimator()(signal3)
self.assertAlmostEqual(expectedEstimate, estimate, 0.1)
# In the previous test, the length of the silence is independent of the sample length
# This test examines response to adding silences of lengths equal to a multiple of the beat period.
def testExactAudioLengthMatch(self):
audio = MonoLoader(filename=join(testdata.audio_dir, 'recorded', 'techno_loop.wav'))()
bpmEstimate = 125
beatPeriod = 21168 # N.B The beat period is 21168 samples for 125 bpm @ 44.1k samp. rate
silentAudio = zeros(beatPeriod)
expectedEstimate = 124.9
# Add non-musical silence to the beginning of the audio
signal1 = numpy.append(silentAudio, audio)
estimate = PercivalBpmEstimator()(signal1)
self.assertAlmostEqual(expectedEstimate, estimate, 0.1)
# Add non-musical silence to the end of the audio
signal2 = numpy.append(audio, silentAudio)
estimate = PercivalBpmEstimator()(signal2)
self.assertAlmostEqual(expectedEstimate, estimate, 0.1)
# Concatenate silence at both ends
signal3 = numpy.append(signal1, silentAudio)
estimate = PercivalBpmEstimator()(signal3)
self.assertAlmostEqual(expectedEstimate, estimate, 0.1)
# A series of assert checks on the BPM estimator for empty, zero or constant signals.
# PercivalBpmEstimator uses the percival estimator internally.
# The runtime errors have their origin in that algorithm.
def METHOD_NAME(self):
emptyAudio = []
self.assertRaises(RuntimeError, lambda: PercivalBpmEstimator()(emptyAudio))
def testZero(self):
beatPeriod = 21168 # N.B The beat period is 21168 samples for 125 bpm @ 44.1k samp. rate
zeroAudio = zeros(beatPeriod)
estimate = PercivalBpmEstimator()(zeroAudio)
self.assertEqual(estimate, 0.0)
def testConstantInput(self):
beatPeriod = 21168 # N.B The beat period is 21168 samples for 125 bpm @ 44.1k samp. rate
onesAudio = ones(beatPeriod)
estimate = PercivalBpmEstimator()(onesAudio)
# The observed BPM is also 104.4 for constant input of ones.
self.assertAlmostEqual(estimate, 104.40341,8)
constantInput = [0.5 for i in range(21168)]
estimate = PercivalBpmEstimator()(constantInput)
# The observed BPM is also around 104.4 for another constant input value, 0.5
self.assertAlmostEqual(estimate, 104.40341,8)
constantInput = [0.5 for i in range(21168)]
#Repeat test but, tweak a config. parameters out of its default value.
estimate = PercivalBpmEstimator(maxBPM=60)(constantInput)
self.assertAlmostEqual(estimate, 104.40341,8)
suite = allTests(TestPercivalBpmEstimator)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite) |
1,865 | get open | """Stocks Trading Hours Model."""
import logging
import os
from datetime import datetime
import pandas as pd
import pytz
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
# pylint: disable=no-member
# pylint: disable=no-member
@log_start_end(log=logger)
def get_bursa(symbol: str) -> pd.DataFrame:
"""Get current exchange open hours.
Parameters
----------
symbol : str
Exchange symbol
Returns
-------
pd.DataFrame
Exchange info
"""
bursa = all_bursa()
symbol = symbol.upper()
if symbol in bursa["short_name"].values:
df = pd.DataFrame(bursa.loc[bursa["short_name"] == symbol]).transpose()
is_open = check_if_open(bursa, symbol)
df_is_open = pd.DataFrame([is_open], index=["open"], columns=df.columns.values)
df = pd.concat([df, df_is_open], axis=0)
return df
if symbol in bursa.index:
df = pd.DataFrame(bursa.loc[symbol])
is_open = check_if_open(bursa, symbol)
df_is_open = pd.DataFrame([is_open], index=["open"], columns=df.columns.values)
df = pd.concat([df, df_is_open], axis=0)
return df
return pd.DataFrame()
@log_start_end(log=logger)
def METHOD_NAME() -> pd.DataFrame:
"""Get open exchanges.
Parameters
----------
Returns
-------
pd.DataFrame
Currently open exchanges
"""
bursa = all_bursa()
is_open_list = []
for exchange in bursa.index:
is_open = check_if_open(bursa, exchange)
is_open_list.append(is_open)
bursa["open"] = is_open_list
bursa = bursa.loc[bursa["open"]]
return bursa[["name", "short_name"]]
@log_start_end(log=logger)
def get_closed() -> pd.DataFrame:
"""Get closed exchanges.
Parameters
----------
Returns
-------
pd.DataFrame
Currently closed exchanges
"""
bursa = all_bursa()
is_open_list = []
for exchange in bursa.index:
is_open = check_if_open(bursa, exchange)
is_open_list.append(is_open)
bursa["open"] = is_open_list
bursa = bursa.loc[~bursa["open"]]
return bursa[["name", "short_name"]]
@log_start_end(log=logger)
def get_all() -> pd.DataFrame:
"""Get all exchanges.
Parameters
----------
Returns
-------
pd.DataFrame
All available exchanges
"""
bursa = all_bursa()
is_open_list = []
for exchange in bursa.index:
is_open = check_if_open(bursa, exchange)
is_open_list.append(is_open)
bursa["open"] = is_open_list
return bursa[["name", "short_name", "open"]]
@log_start_end(log=logger)
def get_all_exchange_short_names() -> pd.DataFrame:
"""Get all exchanges short names.
Parameters
----------
Returns
-------
pd.DataFrame
All available exchanges short names
"""
bursa = all_bursa()
is_open_list = []
for exchange in bursa.index:
is_open = check_if_open(bursa, exchange)
is_open_list.append(is_open)
bursa["open"] = is_open_list
return bursa[["short_name"]]
@log_start_end(log=logger)
def all_bursa():
"""Get all exchanges from dictionary
Parameters
__________
Returns
_______
pd.DataFrame
All exchanges
"""
path = os.path.join(os.path.dirname(__file__), "data/bursa_open_hours.json")
bursa = pd.read_json(path) # , orient="index")
return bursa
def check_if_open(bursa: pd.DataFrame, exchange: str) -> bool:
"""Check if market open helper function
Parameters
__________
bursa : pd.DataFrame
pd.DataFrame of all exchanges
exchange : str
bursa pd.DataFrame index value for exchange
Returns
_______
bool
If market is open
"""
exchange = exchange.upper()
if exchange in bursa.index.values:
tz = bursa.loc[exchange]["timezone"]
exchange_df = bursa.loc[exchange]
elif exchange in bursa["short_name"].values:
tz = bursa.loc[bursa["short_name"] == exchange]["timezone"].values[0]
exchange_df = bursa.loc[bursa["short_name"] == exchange]
exchange_df = exchange_df.iloc[0].transpose()
utcmoment_naive = datetime.utcnow()
utcmoment = utcmoment_naive.replace(tzinfo=pytz.utc)
local_datetime = utcmoment.astimezone(pytz.timezone(tz))
market_open = datetime.strptime(exchange_df["market_open"], "%H:%M:%S")
market_close = datetime.strptime(exchange_df["market_close"], "%H:%M:%S")
after_market_open = local_datetime.time() >= market_open.time()
before_market_close = local_datetime.time() <= market_close.time()
try:
lunchbreak_start = datetime.strptime(
exchange_df["lunchbreak_start"], "%H:%M:%S"
)
lunchbreak_end = datetime.strptime(exchange_df["lunchbreak_end"], "%H:%M:%S")
after_lunch_start = local_datetime.time() >= lunchbreak_start.time()
before_lunch_end = local_datetime.time() <= lunchbreak_end.time()
except Exception:
after_lunch_start = False
before_lunch_end = False
if local_datetime.weekday() >= 5:
result = False
else:
result = (
after_market_open
and before_market_close
and not (after_lunch_start and before_lunch_end)
)
return result |
1,866 | process worker | # pylint:disable=no-member
import logging
import stat
from asyncio import CancelledError, Task, create_task, get_event_loop
from asyncio import sleep as async_sleep
from concurrent.futures import ThreadPoolExecutor
from contextlib import suppress
from pathlib import Path
from queue import Empty
from time import sleep as blocking_sleep
from typing import Final
import aioprocessing
from aioprocessing.process import AioProcess
from aioprocessing.queues import AioQueue
from pydantic import ByteSize, PositiveFloat
from servicelib.logging_utils import log_context
from watchdog.events import FileSystemEvent
from ._watchdog_extensions import ExtendedInotifyObserver, SafeFileSystemEventHandler
_HEART_BEAT_MARK: Final = 1
logger = logging.getLogger(__name__)
class _LoggingEventHandler(SafeFileSystemEventHandler):
def event_handler(self, event: FileSystemEvent) -> None:
# NOTE: runs in the created process
file_path = Path(event.src_path)
file_stat = file_path.stat()
logger.info(
"Attribute change to: '%s': permissions=%s uid=%s gid=%s size=%s\nFile stat: %s",
file_path,
stat.filemode(file_stat.st_mode),
file_stat.st_uid,
file_stat.st_gid,
ByteSize(file_stat.st_size).human_readable(),
file_stat,
)
class _LoggingEventHandlerProcess:
def __init__(
self,
path_to_observe: Path,
health_check_queue: AioQueue,
heart_beat_interval_s: PositiveFloat,
) -> None:
self.path_to_observe: Path = path_to_observe
self.health_check_queue: AioQueue = health_check_queue
self.heart_beat_interval_s: PositiveFloat = heart_beat_interval_s
# This is accessible from the creating process and from
# the process itself and is used to stop the process.
self._stop_queue: AioQueue = aioprocessing.AioQueue()
self._file_system_event_handler: _LoggingEventHandler | None = None
self._process: AioProcess | None = None
def start_process(self) -> None:
with log_context(
logger,
logging.DEBUG,
f"{_LoggingEventHandlerProcess.__name__} start_process",
):
self._process = aioprocessing.AioProcess(
target=self.METHOD_NAME, daemon=True
)
self._process.start()
def _stop_process(self) -> None:
with log_context(
logger,
logging.DEBUG,
f"{_LoggingEventHandlerProcess.__name__} stop_process",
):
self._stop_queue.put(None)
if self._process:
# force stop the process
self._process.kill()
self._process.join()
self._process = None
# cleanup whatever remains
self._file_system_event_handler = None
def shutdown(self) -> None:
with log_context(
logger, logging.DEBUG, f"{_LoggingEventHandlerProcess.__name__} shutdown"
):
self._stop_process()
# signal queue observers to finish
self.health_check_queue.put(None)
def METHOD_NAME(self) -> None:
observer = ExtendedInotifyObserver()
self._file_system_event_handler = _LoggingEventHandler()
watch = None
try:
watch = observer.schedule(
event_handler=self._file_system_event_handler,
path=f"{self.path_to_observe.absolute()}",
recursive=True,
)
observer.start()
while self._stop_queue.qsize() == 0:
# NOTE: watchdog handles events internally every 1 second.
# While doing so it will block this thread briefly.
# Health check delivery may be delayed.
self.health_check_queue.put(_HEART_BEAT_MARK)
blocking_sleep(self.heart_beat_interval_s)
except Exception: # pylint: disable=broad-except
logger.exception("Unexpected error")
finally:
if watch:
observer.remove_handler_for_watch(
self._file_system_event_handler, watch
)
observer.stop()
logger.warning("%s exited", _LoggingEventHandlerProcess.__name__)
class LoggingEventHandlerObserver:
"""
Ensures watchdog is not blocked.
When blocked, it will restart the process handling the watchdog.
"""
def __init__(
self,
path_to_observe: Path,
heart_beat_interval_s: PositiveFloat,
*,
max_heart_beat_wait_interval_s: PositiveFloat = 10,
) -> None:
self.path_to_observe: Path = path_to_observe
self._heart_beat_interval_s: PositiveFloat = heart_beat_interval_s
self.max_heart_beat_wait_interval_s: PositiveFloat = (
max_heart_beat_wait_interval_s
)
self._health_check_queue = aioprocessing.AioQueue()
self._logging_event_handler_process = _LoggingEventHandlerProcess(
path_to_observe=self.path_to_observe,
health_check_queue=self._health_check_queue,
heart_beat_interval_s=heart_beat_interval_s,
)
self._keep_running: bool = False
self._task_health_worker: Task | None = None
@property
def heart_beat_interval_s(self) -> PositiveFloat:
return min(
self._heart_beat_interval_s * 100, self.max_heart_beat_wait_interval_s
)
async def _health_worker(self) -> None:
wait_for = self.heart_beat_interval_s
while self._keep_running:
await async_sleep(wait_for)
heart_beat_count = 0
while True:
try:
self._health_check_queue.get_nowait()
heart_beat_count += 1
except Empty: # noqa: PERF203
break
if heart_beat_count == 0:
with ThreadPoolExecutor(max_workers=1) as executor:
loop = get_event_loop()
await loop.run_in_executor(executor, self._stop_observer_process)
await loop.run_in_executor(executor, self._start_observer_process)
def _start_observer_process(self) -> None:
self._logging_event_handler_process.start_process()
def _stop_observer_process(self) -> None:
self._logging_event_handler_process.shutdown()
async def start(self) -> None:
with log_context(
logger, logging.INFO, f"{LoggingEventHandlerObserver.__name__} start"
):
self._keep_running = True
self._task_health_worker = create_task(
self._health_worker(), name="observer_monitor_health_worker"
)
self._start_observer_process()
async def stop(self) -> None:
with log_context(
logger, logging.INFO, f"{LoggingEventHandlerObserver.__name__} stop"
):
self._stop_observer_process()
self._keep_running = False
if self._task_health_worker is not None:
self._task_health_worker.cancel()
with suppress(CancelledError):
await self._task_health_worker |
1,867 | forward | # Authors: Divyesh Narayanan <divyesh.narayanan@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import torch
from torch import nn
class SleepStagerBlanco2020(nn.Module):
"""Sleep staging architecture from Blanco et al 2020.
Convolutional neural network for sleep staging described in [Blanco2020]_.
A series of seven convolutional layers with kernel sizes running down from 7 to 3,
in an attempt to extract more general features at the beginning, while more specific
and complex features were extracted in the final stages.
Parameters
----------
n_channels : int
Number of EEG channels.
sfreq : float
EEG sampling frequency.
n_conv_chans : int
Number of convolutional channels. Set to 20 in [Blanco2020]_.
n_groups : int
Number of groups for the convolution. Set to 2 in [Blanco2020]_ for 2 Channel EEG.
controls the connections between inputs and outputs. n_channels and n_conv_chans must be
divisible by n_groups.
input_size_s : float
Size of the input, in seconds.
n_classes : int
Number of classes.
dropout : float
Dropout rate before the output dense layer.
apply_batch_norm : bool
If True, apply batch normalization after both temporal convolutional
layers.
return_feats : bool
If True, return the features, i.e. the output of the feature extractor
(before the final linear layer). If False, pass the features through
the final linear layer.
References
----------
.. [Blanco2020] Fernandez-Blanco, E., Rivero, D. & Pazos, A. Convolutional
neural networks for sleep stage scoring on a two-channel EEG signal.
Soft Comput 24, 4067–4079 (2020). https://doi.org/10.1007/s00500-019-04174-1
"""
def __init__(self, n_channels, sfreq, n_conv_chans=20, input_size_s=30,
n_classes=5, n_groups=2, max_pool_size=2, dropout=0.5, apply_batch_norm=False,
return_feats=False):
super().__init__()
input_size = np.ceil(input_size_s * sfreq).astype(int)
self.n_channels = n_channels
batch_norm = nn.BatchNorm2d if apply_batch_norm else nn.Identity
self.feature_extractor = nn.Sequential(
nn.Conv2d(n_channels, n_conv_chans, (1, 7), groups=n_groups, padding=0),
batch_norm(n_conv_chans),
nn.ReLU(),
nn.MaxPool2d((1, max_pool_size)),
nn.Conv2d(n_conv_chans, n_conv_chans, (1, 7), groups=n_conv_chans, padding=0),
batch_norm(n_conv_chans),
nn.ReLU(),
nn.MaxPool2d((1, max_pool_size)),
nn.Conv2d(n_conv_chans, n_conv_chans, (1, 5), groups=n_conv_chans, padding=0),
batch_norm(n_conv_chans),
nn.ReLU(),
nn.MaxPool2d((1, max_pool_size)),
nn.Conv2d(n_conv_chans, n_conv_chans, (1, 5), groups=n_conv_chans, padding=0),
batch_norm(n_conv_chans),
nn.ReLU(),
nn.MaxPool2d((1, max_pool_size)),
nn.Conv2d(n_conv_chans, n_conv_chans, (1, 5), groups=n_conv_chans, padding=0),
batch_norm(n_conv_chans),
nn.ReLU(),
nn.MaxPool2d((1, max_pool_size)),
nn.Conv2d(n_conv_chans, n_conv_chans, (1, 3), groups=n_conv_chans, padding=0),
batch_norm(n_conv_chans),
nn.ReLU(),
nn.MaxPool2d((1, max_pool_size)),
nn.Conv2d(n_conv_chans, n_conv_chans, (1, 3), groups=n_conv_chans, padding=0),
batch_norm(n_conv_chans),
nn.ReLU(),
nn.MaxPool2d((1, max_pool_size))
)
self.len_last_layer = self._len_last_layer(n_channels, input_size)
self.return_feats = return_feats
if not return_feats:
self.fc = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(self.len_last_layer, n_classes),
nn.Softmax(dim=1)
)
def _len_last_layer(self, n_channels, input_size):
self.feature_extractor.eval()
with torch.no_grad():
out = self.feature_extractor(
torch.Tensor(1, n_channels, 1, input_size)) # batch_size,n_channels,height,width
self.feature_extractor.train()
return len(out.flatten())
def METHOD_NAME(self, x):
"""Forward pass.
Parameters
----------
x: torch.Tensor
Batch of EEG windows of shape (batch_size, n_channels, n_times).
"""
if x.ndim == 3:
x = x.unsqueeze(2)
feats = self.feature_extractor(x).flatten(start_dim=1)
if self.return_feats:
return feats
else:
return self.fc(feats) |
1,868 | user gas msg | #!/usr/bin/env python3
import unittest
from panda import Panda
from panda.tests.libpanda import libpanda_py
import panda.tests.safety.common as common
from panda.tests.safety.common import CANPackerPanda
MSG_LENKHILFE_3 = 0x0D0 # RX from EPS, for steering angle and driver steering torque
MSG_HCA_1 = 0x0D2 # TX by OP, Heading Control Assist steering torque
MSG_BREMSE_1 = 0x1A0 # RX from ABS, for ego speed
MSG_MOTOR_2 = 0x288 # RX from ECU, for CC state and brake switch state
MSG_ACC_SYSTEM = 0x368 # TX by OP, longitudinal acceleration controls
MSG_MOTOR_3 = 0x380 # RX from ECU, for driver throttle input
MSG_GRA_NEU = 0x38A # TX by OP, ACC control buttons for cancel/resume
MSG_MOTOR_5 = 0x480 # RX from ECU, for ACC main switch state
MSG_ACC_GRA_ANZEIGE = 0x56A # TX by OP, ACC HUD
MSG_LDW_1 = 0x5BE # TX by OP, Lane line recognition and text alerts
class TestVolkswagenPqSafety(common.PandaSafetyTest, common.DriverTorqueSteeringSafetyTest):
cruise_engaged = False
STANDSTILL_THRESHOLD = 0
RELAY_MALFUNCTION_ADDR = MSG_HCA_1
RELAY_MALFUNCTION_BUS = 0
MAX_RATE_UP = 6
MAX_RATE_DOWN = 10
MAX_TORQUE = 300
MAX_RT_DELTA = 113
RT_INTERVAL = 250000
DRIVER_TORQUE_ALLOWANCE = 80
DRIVER_TORQUE_FACTOR = 3
@classmethod
def setUpClass(cls):
if cls.__name__ == "TestVolkswagenPqSafety":
cls.packer = None
cls.safety = None
raise unittest.SkipTest
def _set_prev_torque(self, t):
self.safety.set_desired_torque_last(t)
self.safety.set_rt_torque_last(t)
# Ego speed (Bremse_1)
def _speed_msg(self, speed):
values = {"Geschwindigkeit_neu__Bremse_1_": speed}
return self.packer.make_can_msg_panda("Bremse_1", 0, values)
# Brake light switch (shared message Motor_2)
def _user_brake_msg(self, brake):
# since this signal is used for engagement status, preserve current state
return self._motor_2_msg(brake_pressed=brake, cruise_engaged=self.safety.get_controls_allowed())
# ACC engaged status (shared message Motor_2)
def _pcm_status_msg(self, enable):
self.__class__.cruise_engaged = enable
return self._motor_2_msg(cruise_engaged=enable)
# Acceleration request to drivetrain coordinator
def _accel_msg(self, accel):
values = {"ACS_Sollbeschl": accel}
return self.packer.make_can_msg_panda("ACC_System", 0, values)
# Driver steering input torque
def _torque_driver_msg(self, torque):
values = {"LH3_LM": abs(torque), "LH3_LMSign": torque < 0}
return self.packer.make_can_msg_panda("Lenkhilfe_3", 0, values)
# openpilot steering output torque
def _torque_cmd_msg(self, torque, steer_req=1):
values = {"LM_Offset": abs(torque), "LM_OffSign": torque < 0}
return self.packer.make_can_msg_panda("HCA_1", 0, values)
# ACC engagement and brake light switch status
# Called indirectly for compatibility with common.py tests
def _motor_2_msg(self, brake_pressed=False, cruise_engaged=False):
values = {"Bremslichtschalter": brake_pressed,
"GRA_Status": cruise_engaged}
return self.packer.make_can_msg_panda("Motor_2", 0, values)
# ACC main switch status
def _motor_5_msg(self, main_switch=False):
values = {"GRA_Hauptschalter": main_switch}
return self.packer.make_can_msg_panda("Motor_5", 0, values)
# Driver throttle input (Motor_3)
def METHOD_NAME(self, gas):
values = {"Fahrpedal_Rohsignal": gas}
return self.packer.make_can_msg_panda("Motor_3", 0, values)
# Cruise control buttons (GRA_Neu)
def _button_msg(self, _set=False, resume=False, cancel=False, bus=2):
values = {"GRA_Neu_Setzen": _set, "GRA_Recall": resume, "GRA_Abbrechen": cancel}
return self.packer.make_can_msg_panda("GRA_Neu", bus, values)
def test_torque_measurements(self):
# TODO: make this test work with all cars
self._rx(self._torque_driver_msg(50))
self._rx(self._torque_driver_msg(-50))
self._rx(self._torque_driver_msg(0))
self._rx(self._torque_driver_msg(0))
self._rx(self._torque_driver_msg(0))
self._rx(self._torque_driver_msg(0))
self.assertEqual(-50, self.safety.get_torque_driver_min())
self.assertEqual(50, self.safety.get_torque_driver_max())
self._rx(self._torque_driver_msg(0))
self.assertEqual(0, self.safety.get_torque_driver_max())
self.assertEqual(-50, self.safety.get_torque_driver_min())
self._rx(self._torque_driver_msg(0))
self.assertEqual(0, self.safety.get_torque_driver_max())
self.assertEqual(0, self.safety.get_torque_driver_min())
class TestVolkswagenPqStockSafety(TestVolkswagenPqSafety):
# Transmit of GRA_Neu is allowed on bus 0 and 2 to keep compatibility with gateway and camera integration
TX_MSGS = [[MSG_HCA_1, 0], [MSG_GRA_NEU, 0], [MSG_GRA_NEU, 2], [MSG_LDW_1, 0]]
FWD_BLACKLISTED_ADDRS = {2: [MSG_HCA_1, MSG_LDW_1]}
FWD_BUS_LOOKUP = {0: 2, 2: 0}
def setUp(self):
self.packer = CANPackerPanda("vw_golf_mk4")
self.safety = libpanda_py.libpanda
self.safety.set_safety_hooks(Panda.SAFETY_VOLKSWAGEN_PQ, 0)
self.safety.init_tests()
def test_spam_cancel_safety_check(self):
self.safety.set_controls_allowed(0)
self.assertTrue(self._tx(self._button_msg(cancel=True)))
self.assertFalse(self._tx(self._button_msg(resume=True)))
self.assertFalse(self._tx(self._button_msg(_set=True)))
# do not block resume if we are engaged already
self.safety.set_controls_allowed(1)
self.assertTrue(self._tx(self._button_msg(resume=True)))
class TestVolkswagenPqLongSafety(TestVolkswagenPqSafety, common.LongitudinalAccelSafetyTest):
TX_MSGS = [[MSG_HCA_1, 0], [MSG_LDW_1, 0], [MSG_ACC_SYSTEM, 0], [MSG_ACC_GRA_ANZEIGE, 0]]
FWD_BLACKLISTED_ADDRS = {2: [MSG_HCA_1, MSG_LDW_1, MSG_ACC_SYSTEM, MSG_ACC_GRA_ANZEIGE]}
FWD_BUS_LOOKUP = {0: 2, 2: 0}
INACTIVE_ACCEL = 3.01
def setUp(self):
self.packer = CANPackerPanda("vw_golf_mk4")
self.safety = libpanda_py.libpanda
self.safety.set_safety_hooks(Panda.SAFETY_VOLKSWAGEN_PQ, Panda.FLAG_VOLKSWAGEN_LONG_CONTROL)
self.safety.init_tests()
# stock cruise controls are entirely bypassed under openpilot longitudinal control
def test_disable_control_allowed_from_cruise(self):
pass
def test_enable_control_allowed_from_cruise(self):
pass
def test_cruise_engaged_prev(self):
pass
def test_set_and_resume_buttons(self):
for button in ["set", "resume"]:
# ACC main switch must be on, engage on falling edge
self.safety.set_controls_allowed(0)
self._rx(self._motor_5_msg(main_switch=False))
self._rx(self._button_msg(_set=(button == "set"), resume=(button == "resume"), bus=0))
self._rx(self._button_msg(bus=0))
self.assertFalse(self.safety.get_controls_allowed(), f"controls allowed on {button} with main switch off")
self._rx(self._motor_5_msg(main_switch=True))
self._rx(self._button_msg(_set=(button == "set"), resume=(button == "resume"), bus=0))
self.assertFalse(self.safety.get_controls_allowed(), f"controls allowed on {button} rising edge")
self._rx(self._button_msg(bus=0))
self.assertTrue(self.safety.get_controls_allowed(), f"controls not allowed on {button} falling edge")
def test_cancel_button(self):
# Disable on rising edge of cancel button
self._rx(self._motor_5_msg(main_switch=True))
self.safety.set_controls_allowed(1)
self._rx(self._button_msg(cancel=True, bus=0))
self.assertFalse(self.safety.get_controls_allowed(), "controls allowed after cancel")
def test_main_switch(self):
# Disable as soon as main switch turns off
self._rx(self._motor_5_msg(main_switch=True))
self.safety.set_controls_allowed(1)
self._rx(self._motor_5_msg(main_switch=False))
self.assertFalse(self.safety.get_controls_allowed(), "controls allowed after ACC main switch off")
if __name__ == "__main__":
unittest.main() |
1,869 | create transfer reply | #!/usr/bin/python
# SPDX-License-Identifier: LGPL-2.1-or-later
from __future__ import absolute_import, print_function, unicode_literals
from optparse import OptionParser
import os.path
import sys
import dbus
import dbus.service
import dbus.mainloop.glib
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
BUS_NAME='org.bluez.obex'
PATH = '/org/bluez/obex'
CLIENT_INTERFACE='org.bluez.obex.Client1'
SESSION_INTERFACE='org.bluez.obex.Session1'
FILE_TRASNFER_INTERFACE='org.bluez.obex.FileTransfer1'
TRANSFER_INTERFACE='org.bluez.obex.Transfer1'
def parse_options():
parser.add_option("-d", "--device", dest="device",
help="Device to connect", metavar="DEVICE")
parser.add_option("-c", "--chdir", dest="new_dir",
help="Change current directory to DIR", metavar="DIR")
parser.add_option("-l", "--list", action="store_true", dest="list_dir",
help="List the current directory")
parser.add_option("-g", "--get", dest="get_file",
help="Get FILE", metavar="FILE")
parser.add_option("-p", "--put", dest="put_file",
help="Put FILE", metavar="FILE")
parser.add_option("-y", "--copy", dest="copy_file",
help="Copy FILE", metavar="FILE")
parser.add_option("-m", "--move", dest="move_file",
help="Move FILE", metavar="FILE")
parser.add_option("-n", "--destname", dest="dest_file",
help="Destination FILE", metavar="FILE")
parser.add_option("-r", "--remove", dest="remove_file",
help="Remove FILE", metavar="FILE")
parser.add_option("-v", "--verbose", action="store_true",
dest="verbose")
return parser.parse_args()
class FtpClient:
def __init__(self, session_path, verbose=False):
self.transferred = 0
self.transfer_path = None
self.transfer_size = 0
self.verbose = verbose
bus = dbus.SessionBus()
obj = bus.get_object(BUS_NAME, session_path)
self.session = dbus.Interface(obj, SESSION_INTERFACE)
self.ftp = dbus.Interface(obj, FILE_TRASNFER_INTERFACE)
bus.add_signal_receiver(self.properties_changed,
dbus_interface="org.freedesktop.DBus.Properties",
signal_name="PropertiesChanged",
path_keyword="path")
def METHOD_NAME(self, path, properties):
self.transfer_path = path
self.transfer_size = properties["Size"]
if self.verbose:
print("Transfer created: %s" % path)
def generic_reply(self):
if self.verbose:
print("Operation succeeded")
def error(self, err):
print(err)
mainloop.quit()
def properties_changed(self, interface, properties, invalidated, path):
if path != self.transfer_path:
return
if "Status" in properties and \
(properties['Status'] == 'complete' or \
properties['Status'] == 'error'):
if self.verbose:
print("Transfer %s" % properties['Status'])
mainloop.quit()
return
if "Transferred" not in properties:
return
value = properties["Transferred"]
speed = (value - self.transferred) / 1000
print("Transfer progress %d/%d at %d kBps" % (value,
self.transfer_size,
speed))
self.transferred = value
def change_folder(self, new_dir):
for node in new_dir.split("/"):
self.ftp.ChangeFolder(node)
def list_folder(self):
for i in self.ftp.ListFolder():
if i["Type"] == "folder":
print("%s/" % (i["Name"]))
else:
print("%s" % (i["Name"]))
def put_file(self, filename):
self.ftp.PutFile(os.path.abspath(filename),
os.path.basename(filename),
reply_handler=self.METHOD_NAME,
error_handler=self.error)
def get_file(self, filename):
self.ftp.GetFile(os.path.abspath(filename),
os.path.basename(filename),
reply_handler=self.METHOD_NAME,
error_handler=self.error)
def remove_file(self, filename):
self.ftp.Delete(filename,
reply_handler=self.generic_reply,
error_handler=self.error)
def move_file(self, filename, destname):
self.ftp.MoveFile(filename, destname,
reply_handler=self.generic_reply,
error_handler=self.error)
def copy_file(self, filename, destname):
self.ftp.CopyFile(filename, destname,
reply_handler=self.generic_reply,
error_handler=self.error)
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
parser = OptionParser()
(options, args) = parse_options()
if not options.device:
parser.print_help()
sys.exit(0)
bus = dbus.SessionBus()
mainloop = GObject.MainLoop()
client = dbus.Interface(bus.get_object(BUS_NAME, PATH,),
CLIENT_INTERFACE)
print("Creating Session")
path = client.CreateSession(options.device, { "Target": "ftp" })
ftp_client = FtpClient(path, options.verbose)
if options.new_dir:
ftp_client.change_folder(options.new_dir)
if options.list_dir:
ftp_client.list_folder()
if options.get_file:
ftp_client.get_file(options.get_file)
if options.put_file:
ftp_client.put_file(options.put_file)
if options.move_file:
ftp_client.move_file(options.move_file, options.dest_file)
if options.copy_file:
ftp_client.copy_file(options.copy_file, options.dest_file)
if options.remove_file:
ftp_client.remove_file(options.remove_file)
mainloop.run() |
1,870 | test | """
An implementation of RandLA-Net based on the paper:
RandLA-Net: Efficient Semantic Segmentation of Large-Scale Point Clouds
Reference: https://arxiv.org/abs/1911.11236
"""
import os.path as osp
import torch
import torch.nn.functional as F
from randlanet_classification import DilatedResidualBlock, SharedMLP, decimate
from torch.nn import Linear
from torchmetrics.functional import jaccard_index
from tqdm import tqdm
import torch_geometric.transforms as T
from torch_geometric.datasets import ShapeNet
from torch_geometric.loader import DataLoader
from torch_geometric.nn import knn_interpolate
from torch_geometric.typing import WITH_TORCH_CLUSTER
from torch_geometric.utils import scatter
if not WITH_TORCH_CLUSTER:
quit("This example requires 'torch-cluster'")
category = 'Airplane' # Pass in `None` to train on all categories.
category_num_classes = 4 # 4 for Airplane - see ShapeNet for details
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'ShapeNet')
transform = T.Compose([
T.RandomJitter(0.01),
T.RandomRotate(15, axis=0),
T.RandomRotate(15, axis=1),
T.RandomRotate(15, axis=2),
])
pre_transform = T.NormalizeScale()
train_dataset = ShapeNet(
path,
category,
split='trainval',
transform=transform,
pre_transform=pre_transform,
)
test_dataset = ShapeNet(
path,
category,
split='test',
pre_transform=pre_transform,
)
train_loader = DataLoader(train_dataset, 12, shuffle=True, num_workers=6)
test_loader = DataLoader(test_dataset, 12, shuffle=False, num_workers=6)
class FPModule(torch.nn.Module):
"""Upsampling with a skip connection."""
def __init__(self, k, nn):
super().__init__()
self.k = k
self.nn = nn
def forward(self, x, pos, batch, x_skip, pos_skip, batch_skip):
x = knn_interpolate(x, pos, pos_skip, batch, batch_skip, k=self.k)
x = torch.cat([x, x_skip], dim=1)
x = self.nn(x)
return x, pos_skip, batch_skip
class Net(torch.nn.Module):
def __init__(
self,
num_features: int,
num_classes: int,
decimation: int = 4,
num_neighbors: int = 16,
return_logits: bool = False,
):
super().__init__()
self.decimation = decimation
# An option to return logits instead of log probabilities:
self.return_logits = return_logits
# Authors use 8, which is a bottleneck
# for the final MLP, and also when num_classes>8
# or num_features>8.
d_bottleneck = max(32, num_classes, num_features)
self.fc0 = Linear(num_features, d_bottleneck)
self.block1 = DilatedResidualBlock(num_neighbors, d_bottleneck, 32)
self.block2 = DilatedResidualBlock(num_neighbors, 32, 128)
self.block3 = DilatedResidualBlock(num_neighbors, 128, 256)
self.block4 = DilatedResidualBlock(num_neighbors, 256, 512)
self.mlp_summit = SharedMLP([512, 512])
self.fp4 = FPModule(1, SharedMLP([512 + 256, 256]))
self.fp3 = FPModule(1, SharedMLP([256 + 128, 128]))
self.fp2 = FPModule(1, SharedMLP([128 + 32, 32]))
self.fp1 = FPModule(1, SharedMLP([32 + 32, d_bottleneck]))
self.mlp_classif = SharedMLP([d_bottleneck, 64, 32],
dropout=[0.0, 0.5])
self.fc_classif = Linear(32, num_classes)
def forward(self, x, pos, batch, ptr):
x = x if x is not None else pos
b1_out = self.block1(self.fc0(x), pos, batch)
b1_out_decimated, ptr1 = decimate(b1_out, ptr, self.decimation)
b2_out = self.block2(*b1_out_decimated)
b2_out_decimated, ptr2 = decimate(b2_out, ptr1, self.decimation)
b3_out = self.block3(*b2_out_decimated)
b3_out_decimated, ptr3 = decimate(b3_out, ptr2, self.decimation)
b4_out = self.block4(*b3_out_decimated)
b4_out_decimated, _ = decimate(b4_out, ptr3, self.decimation)
mlp_out = (
self.mlp_summit(b4_out_decimated[0]),
b4_out_decimated[1],
b4_out_decimated[2],
)
fp4_out = self.fp4(*mlp_out, *b3_out_decimated)
fp3_out = self.fp3(*fp4_out, *b2_out_decimated)
fp2_out = self.fp2(*fp3_out, *b1_out_decimated)
fp1_out = self.fp1(*fp2_out, *b1_out)
x = self.mlp_classif(fp1_out[0])
logits = self.fc_classif(x)
if self.return_logits:
return logits
probas = logits.log_softmax(dim=-1)
return probas
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net(3, category_num_classes).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
def train():
model.train()
total_loss = correct_nodes = total_nodes = 0
for i, data in tqdm(enumerate(train_loader)):
data = data.to(device)
optimizer.zero_grad()
out = model(data.x, data.pos, data.batch, data.ptr)
loss = F.nll_loss(out, data.y)
loss.backward()
optimizer.step()
total_loss += loss.item()
correct_nodes += out.argmax(dim=1).eq(data.y).sum().item()
total_nodes += data.num_nodes
if (i + 1) % 10 == 0:
print(f'[{i+1}/{len(train_loader)}] Loss: {total_loss / 10:.4f} '
f'Train Acc: {correct_nodes / total_nodes:.4f}')
total_loss = correct_nodes = total_nodes = 0
@torch.no_grad()
def METHOD_NAME(loader):
model.eval()
ious, categories = [], []
y_map = torch.empty(loader.dataset.num_classes, device=device).long()
for data in loader:
data = data.to(device)
outs = model(data.x, data.pos, data.batch, data.ptr)
sizes = (data.ptr[1:] - data.ptr[:-1]).tolist()
for out, y, category in zip(outs.split(sizes), data.y.split(sizes),
data.category.tolist()):
category = list(ShapeNet.seg_classes.keys())[category]
part = ShapeNet.seg_classes[category]
part = torch.tensor(part, device=device)
y_map[part] = torch.arange(part.size(0), device=device)
iou = jaccard_index(
out[:, part].argmax(dim=-1),
y_map[y],
num_classes=part.size(0),
absent_score=1.0,
)
ious.append(iou)
categories.append(data.category)
iou = torch.tensor(ious, device=device)
category = torch.cat(categories, dim=0)
mean_iou = scatter(iou, category, reduce='mean') # Per-category IoU.
return float(mean_iou.mean()) # Global IoU.
for epoch in range(1, 31):
train()
iou = METHOD_NAME(test_loader)
print(f'Epoch: {epoch:02d}, Test IoU: {iou:.4f}') |
1,871 | test must resolve with respect to cwd | """
Unit test for local API service
"""
from unittest import TestCase
from unittest.mock import Mock, patch
from samcli.lib.providers.provider import Api
from samcli.lib.providers.api_collector import ApiCollector
from samcli.lib.providers.api_provider import ApiProvider
from samcli.commands.local.lib.exceptions import NoApisDefined
from samcli.commands.local.lib.local_api_service import LocalApiService
from samcli.local.apigw.route import Route
class TestLocalApiService_start(TestCase):
def setUp(self):
self.port = 123
self.host = "abc"
self.static_dir = "static"
self.cwd = "cwd"
self.template = {"hello": "world"}
self.lambda_invoke_context_mock = Mock()
self.lambda_runner_mock = Mock()
self.api_provider_mock = Mock()
self.apigw_service = Mock()
self.stderr_mock = Mock()
self.lambda_invoke_context_mock.template = self.template
self.lambda_invoke_context_mock.local_lambda_runner = self.lambda_runner_mock
self.lambda_invoke_context_mock.get_cwd = Mock()
self.lambda_invoke_context_mock.get_cwd.return_value = self.cwd
self.lambda_invoke_context_mock.stderr = self.stderr_mock
@patch("samcli.commands.local.lib.local_api_service.LocalApigwService")
@patch("samcli.commands.local.lib.local_api_service.ApiProvider")
@patch.object(LocalApiService, "_make_static_dir_path")
@patch.object(LocalApiService, "_print_routes")
def test_must_start_service(self, log_routes_mock, make_static_dir_mock, SamApiProviderMock, ApiGwServiceMock):
routing_list = [1, 2, 3] # something
static_dir_path = "/foo/bar"
make_static_dir_mock.return_value = static_dir_path
SamApiProviderMock.return_value = self.api_provider_mock
ApiGwServiceMock.return_value = self.apigw_service
# Now start the service
local_service = LocalApiService(self.lambda_invoke_context_mock, self.port, self.host, self.static_dir)
local_service.api_provider.api.routes = routing_list
local_service.start()
# Make sure the right methods are called
SamApiProviderMock.assert_called_with(
self.lambda_invoke_context_mock.stacks,
cwd=self.cwd,
)
log_routes_mock.assert_called_with(routing_list, self.host, self.port)
make_static_dir_mock.assert_called_with(self.cwd, self.static_dir)
ApiGwServiceMock.assert_called_with(
api=self.api_provider_mock.api,
lambda_runner=self.lambda_runner_mock,
static_dir=static_dir_path,
port=self.port,
host=self.host,
stderr=self.stderr_mock,
)
self.apigw_service.create.assert_called_with()
self.apigw_service.run.assert_called_with()
@patch("samcli.commands.local.lib.local_api_service.LocalApigwService")
@patch("samcli.commands.local.lib.local_api_service.ApiProvider")
@patch.object(LocalApiService, "_make_static_dir_path")
@patch.object(LocalApiService, "_print_routes")
@patch.object(ApiProvider, "_extract_api")
def test_must_raise_if_route_not_available(
self, extract_api, log_routes_mock, make_static_dir_mock, SamApiProviderMock, ApiGwServiceMock
):
routing_list = [] # Empty
api = Api()
extract_api.return_value = api
SamApiProviderMock.extract_api.return_value = api
SamApiProviderMock.return_value = self.api_provider_mock
ApiGwServiceMock.return_value = self.apigw_service
# Now start the service
local_service = LocalApiService(self.lambda_invoke_context_mock, self.port, self.host, self.static_dir)
local_service.api_provider.api.routes = routing_list
with self.assertRaises(NoApisDefined):
local_service.start()
class TestLocalApiService_print_routes(TestCase):
def test_must_print_routes(self):
host = "host"
port = 123
apis = [
Route(path="/1", methods=["GET"], function_name="name1"),
Route(path="/1", methods=["POST"], function_name="name1"),
Route(path="/1", methods=["DELETE"], function_name="othername1"),
Route(path="/2", methods=["GET2"], function_name="name2"),
Route(path="/3", methods=["GET3"], function_name="name3"),
]
apis = ApiCollector.dedupe_function_routes(apis)
expected = {
"Mounting name1 at http://host:123/1 [GET, POST]",
"Mounting othername1 at http://host:123/1 [DELETE]",
"Mounting name2 at http://host:123/2 [GET2]",
"Mounting name3 at http://host:123/3 [GET3]",
}
actual = LocalApiService._print_routes(apis, host, port)
self.assertEqual(expected, set(actual))
class TestLocalApiService_make_static_dir_path(TestCase):
def test_must_skip_if_none(self):
result = LocalApiService._make_static_dir_path("something", None)
self.assertIsNone(result)
@patch("samcli.commands.local.lib.local_api_service.os")
def METHOD_NAME(self, os_mock):
static_dir = "mydir"
cwd = "cwd"
resolved_path = "cwd/mydir"
os_mock.path.join.return_value = resolved_path
os_mock.path.exists.return_value = True # Fake the path to exist
result = LocalApiService._make_static_dir_path(cwd, static_dir)
self.assertEqual(resolved_path, result)
os_mock.path.join.assert_called_with(cwd, static_dir)
os_mock.path.exists.assert_called_with(resolved_path)
@patch("samcli.commands.local.lib.local_api_service.os")
def test_must_return_none_if_path_not_exists(self, os_mock):
static_dir = "mydir"
cwd = "cwd"
resolved_path = "cwd/mydir"
os_mock.path.join.return_value = resolved_path
os_mock.path.exists.return_value = False # Resolved path does not exist
result = LocalApiService._make_static_dir_path(cwd, static_dir)
self.assertIsNone(result) |
1,872 | test flexible image widget | import pytest
from django.core.exceptions import ValidationError
from guardian.shortcuts import assign_perm
from grandchallenge.cases.widgets import FlexibleImageField, WidgetChoices
from grandchallenge.components.models import ComponentInterface
from grandchallenge.core.guardian import get_objects_for_user
from grandchallenge.uploads.models import UserUpload
from tests.components_tests.factories import ComponentInterfaceFactory
from tests.factories import ImageFactory, UserFactory
from tests.uploads_tests.factories import UserUploadFactory
from tests.utils import get_view_for_user
@pytest.mark.django_db
def test_flexible_image_field_validation():
user = UserFactory()
upload1 = UserUploadFactory(creator=user)
upload2 = UserUploadFactory()
im1, im2 = ImageFactory.create_batch(2)
assign_perm("cases.view_image", user, im1)
ci = ComponentInterfaceFactory(kind=ComponentInterface.Kind.IMAGE)
field = FlexibleImageField(
image_queryset=get_objects_for_user(user, "cases.view_image"),
upload_queryset=UserUpload.objects.filter(creator=user).all(),
)
parsed_value_for_empty_data = field.widget.value_from_datadict(
data={}, name=ci.slug, files={}
)
decompressed_value_for_missing_value = field.widget.decompress(value=None)
assert not parsed_value_for_empty_data
assert decompressed_value_for_missing_value == [None, None]
parsed_value_for_image_with_permission = field.widget.value_from_datadict(
data={ci.slug: im1.pk}, name=ci.slug, files={}
)
decompressed_value_for_image_with_permission = field.widget.decompress(
im1.pk
)
assert (
parsed_value_for_image_with_permission
== decompressed_value_for_image_with_permission
== [
im1.pk,
None,
]
)
assert field.clean(parsed_value_for_image_with_permission) == im1
parsed_value_for_image_without_permission = (
field.widget.value_from_datadict(
data={ci.slug: im2.pk}, name=ci.slug, files={}
)
)
decompressed_value_for_image_without_permission = field.widget.decompress(
im2.pk
)
assert (
parsed_value_for_image_without_permission
== decompressed_value_for_image_without_permission
== [im2.pk, None]
)
with pytest.raises(ValidationError):
field.clean(parsed_value_for_image_without_permission)
parsed_value_for_upload_from_user = field.widget.value_from_datadict(
data={ci.slug: str(upload1.pk)}, name=ci.slug, files={}
)
decompressed_value_for_upload_from_user = field.widget.decompress(
str(upload1.pk)
)
assert (
parsed_value_for_upload_from_user
== decompressed_value_for_upload_from_user
== [None, [str(upload1.pk)]]
)
assert field.clean(parsed_value_for_upload_from_user).get() == upload1
parsed_value_from_upload_from_other_user = (
field.widget.value_from_datadict(
data={ci.slug: str(upload2.pk)}, name=ci.slug, files={}
)
)
decompressed_value_for_upload_from_other_user = field.widget.decompress(
str(upload2.pk)
)
assert (
parsed_value_from_upload_from_other_user
== decompressed_value_for_upload_from_other_user
== [None, [str(upload2.pk)]]
)
with pytest.raises(ValidationError):
field.clean(parsed_value_from_upload_from_other_user)
parsed_value_for_missing_value = field.widget.value_from_datadict(
data={ci.slug: "IMAGE_UPLOAD"}, name=ci.slug, files={}
)
decompressed_value_for_missing_value = field.widget.decompress(
"IMAGE_UPLOAD"
)
assert (
parsed_value_for_missing_value
== decompressed_value_for_missing_value
== [None, None]
)
with pytest.raises(ValidationError):
field.clean(parsed_value_for_missing_value)
@pytest.mark.django_db
def METHOD_NAME(client):
user = UserFactory()
ci = ComponentInterfaceFactory(kind=ComponentInterface.Kind.IMAGE)
response = get_view_for_user(
viewname="cases:select-image-widget",
client=client,
user=user,
data={
f"WidgetChoice-{ci.slug}": WidgetChoices.IMAGE_SEARCH.name,
"interface_slug": ci.slug,
},
)
assert '<input class="form-control" type="search"' in str(response.content)
response2 = get_view_for_user(
viewname="cases:select-image-widget",
client=client,
user=user,
data={
f"WidgetChoice-{ci.slug}": WidgetChoices.IMAGE_UPLOAD.name,
"interface_slug": ci.slug,
},
)
assert 'class="user-upload"' in str(response2.content)
response3 = get_view_for_user(
viewname="cases:select-image-widget",
client=client,
user=user,
data={
f"WidgetChoice-{ci.slug}": WidgetChoices.UNDEFINED.name,
"interface_slug": ci.slug,
},
)
assert response3.content == b"" |
1,873 | copy fusion profile | import os
import shutil
import platform
from pathlib import Path
from openpype.hosts.fusion import (
FUSION_HOST_DIR,
FUSION_VERSIONS_DICT,
get_fusion_version,
)
from openpype.lib.applications import (
PreLaunchHook,
LaunchTypes,
ApplicationLaunchFailed,
)
class FusionCopyPrefsPrelaunch(PreLaunchHook):
"""
Prepares local Fusion profile directory, copies existing Fusion profile.
This also sets FUSION MasterPrefs variable, which is used
to apply Master.prefs file to override some Fusion profile settings to:
- enable the OpenPype menu
- force Python 3 over Python 2
- force English interface
Master.prefs is defined in openpype/hosts/fusion/deploy/fusion_shared.prefs
"""
app_groups = {"fusion"}
order = 2
launch_types = {LaunchTypes.local}
def get_fusion_profile_name(self, profile_version) -> str:
# Returns 'Default', unless FUSION16_PROFILE is set
return os.getenv(f"FUSION{profile_version}_PROFILE", "Default")
def get_fusion_profile_dir(self, profile_version) -> Path:
# Get FUSION_PROFILE_DIR variable
fusion_profile = self.get_fusion_profile_name(profile_version)
fusion_var_prefs_dir = os.getenv(
f"FUSION{profile_version}_PROFILE_DIR"
)
# Check if FUSION_PROFILE_DIR exists
if fusion_var_prefs_dir and Path(fusion_var_prefs_dir).is_dir():
fu_prefs_dir = Path(fusion_var_prefs_dir, fusion_profile)
self.log.info(f"{fusion_var_prefs_dir} is set to {fu_prefs_dir}")
return fu_prefs_dir
def get_profile_source(self, profile_version) -> Path:
"""Get Fusion preferences profile location.
See Per-User_Preferences_and_Paths on VFXpedia for reference.
"""
fusion_profile = self.get_fusion_profile_name(profile_version)
profile_source = self.get_fusion_profile_dir(profile_version)
if profile_source:
return profile_source
# otherwise get default location of the profile folder
fu_prefs_dir = f"Blackmagic Design/Fusion/Profiles/{fusion_profile}"
if platform.system() == "Windows":
profile_source = Path(os.getenv("AppData"), fu_prefs_dir)
elif platform.system() == "Darwin":
profile_source = Path(
"~/Library/Application Support/", fu_prefs_dir
).expanduser()
elif platform.system() == "Linux":
profile_source = Path("~/.fusion", fu_prefs_dir).expanduser()
self.log.info(
f"Locating source Fusion prefs directory: {profile_source}"
)
return profile_source
def get_copy_fusion_prefs_settings(self):
# Get copy preferences options from the global application settings
copy_fusion_settings = self.data["project_settings"]["fusion"].get(
"copy_fusion_settings", {}
)
if not copy_fusion_settings:
self.log.error("Copy prefs settings not found")
copy_status = copy_fusion_settings.get("copy_status", False)
force_sync = copy_fusion_settings.get("force_sync", False)
copy_path = copy_fusion_settings.get("copy_path") or None
if copy_path:
copy_path = Path(copy_path).expanduser()
return copy_status, copy_path, force_sync
def METHOD_NAME(
self, copy_from: Path, copy_to: Path, force_sync: bool
) -> None:
"""On the first Fusion launch copy the contents of Fusion profile
directory to the working predefined location. If the Openpype profile
folder exists, skip copying, unless re-sync is checked.
If the prefs were not copied on the first launch,
clean Fusion profile will be created in fu_profile_dir.
"""
if copy_to.exists() and not force_sync:
self.log.info(
"Destination Fusion preferences folder already exists: "
f"{copy_to} "
)
return
self.log.info("Starting copying Fusion preferences")
self.log.debug(f"force_sync option is set to {force_sync}")
try:
copy_to.mkdir(exist_ok=True, parents=True)
except PermissionError:
self.log.warning(f"Creating the folder not permitted at {copy_to}")
return
if not copy_from.exists():
self.log.warning(f"Fusion preferences not found in {copy_from}")
return
for file in copy_from.iterdir():
if file.suffix in (
".prefs",
".def",
".blocklist",
".fu",
".toolbars",
):
# convert Path to str to be compatible with Python 3.6+
shutil.copy(str(file), str(copy_to))
self.log.info(
f"Successfully copied preferences: {copy_from} to {copy_to}"
)
def execute(self):
(
copy_status,
fu_profile_dir,
force_sync,
) = self.get_copy_fusion_prefs_settings()
# Get launched application context and return correct app version
app_name = self.launch_context.env.get("AVALON_APP_NAME")
app_version = get_fusion_version(app_name)
if app_version is None:
version_names = ", ".join(str(x) for x in FUSION_VERSIONS_DICT)
raise ApplicationLaunchFailed(
"Unable to detect valid Fusion version number from app "
f"name: {app_name}.\nMake sure to include at least a digit "
"to indicate the Fusion version like '18'.\n"
f"Detectable Fusion versions are: {version_names}"
)
_, profile_version = FUSION_VERSIONS_DICT[app_version]
fu_profile = self.get_fusion_profile_name(profile_version)
# do a copy of Fusion profile if copy_status toggle is enabled
if copy_status and fu_profile_dir is not None:
profile_source = self.get_profile_source(profile_version)
dest_folder = Path(fu_profile_dir, fu_profile)
self.METHOD_NAME(profile_source, dest_folder, force_sync)
# Add temporary profile directory variables to customize Fusion
# to define where it can read custom scripts and tools from
fu_profile_dir_variable = f"FUSION{profile_version}_PROFILE_DIR"
self.log.info(f"Setting {fu_profile_dir_variable}: {fu_profile_dir}")
self.launch_context.env[fu_profile_dir_variable] = str(fu_profile_dir)
# Add custom Fusion Master Prefs and the temporary
# profile directory variables to customize Fusion
# to define where it can read custom scripts and tools from
master_prefs_variable = f"FUSION{profile_version}_MasterPrefs"
master_prefs = Path(FUSION_HOST_DIR, "deploy", "fusion_shared.prefs")
self.log.info(f"Setting {master_prefs_variable}: {master_prefs}")
self.launch_context.env[master_prefs_variable] = str(master_prefs) |
1,874 | activate | """
This type stub file was generated by pyright.
"""
class _ANY(object):
"""
A helper object that compares equal to everything. Copied from
unittest.mock
"""
def __eq__(self, other) -> bool: ...
def __ne__(self, other) -> bool: ...
def __repr__(self): ...
ANY = _ANY()
class Stubber(object):
"""
This class will allow you to stub out requests so you don't have to hit
an endpoint to write tests. Responses are returned first in, first out.
If operations are called out of order, or are called with no remaining
queued responses, an error will be raised.
**Example:**
::
import datetime
import botocore.session
from botocore.stub import Stubber
s3 = botocore.session.get_session().create_client('s3')
stubber = Stubber(s3)
response = {
'IsTruncated': False,
'Name': 'test-bucket',
'MaxKeys': 1000, 'Prefix': '',
'Contents': [{
'Key': 'test.txt',
'ETag': '"abc123"',
'StorageClass': 'STANDARD',
'LastModified': datetime.datetime(2016, 1, 20, 22, 9),
'Owner': {'ID': 'abc123', 'DisplayName': 'myname'},
'Size': 14814
}],
'EncodingType': 'url',
'ResponseMetadata': {
'RequestId': 'abc123',
'HTTPStatusCode': 200,
'HostId': 'abc123'
},
'Marker': ''
}
expected_params = {'Bucket': 'test-bucket'}
stubber.add_response('list_objects', response, expected_params)
stubber.activate()
service_response = s3.list_objects(Bucket='test-bucket')
assert service_response == response
This class can also be called as a context manager, which will handle
activation / deactivation for you.
**Example:**
::
import datetime
import botocore.session
from botocore.stub import Stubber
s3 = botocore.session.get_session().create_client('s3')
response = {
"Owner": {
"ID": "foo",
"DisplayName": "bar"
},
"Buckets": [{
"CreationDate": datetime.datetime(2016, 1, 20, 22, 9),
"Name": "baz"
}]
}
with Stubber(s3) as stubber:
stubber.add_response('list_buckets', response, {})
service_response = s3.list_buckets()
assert service_response == response
If you have an input parameter that is a randomly generated value, or you
otherwise don't care about its value, you can use ``stub.ANY`` to ignore
it in validation.
**Example:**
::
import datetime
import botocore.session
from botocore.stub import Stubber, ANY
s3 = botocore.session.get_session().create_client('s3')
stubber = Stubber(s3)
response = {
'IsTruncated': False,
'Name': 'test-bucket',
'MaxKeys': 1000, 'Prefix': '',
'Contents': [{
'Key': 'test.txt',
'ETag': '"abc123"',
'StorageClass': 'STANDARD',
'LastModified': datetime.datetime(2016, 1, 20, 22, 9),
'Owner': {'ID': 'abc123', 'DisplayName': 'myname'},
'Size': 14814
}],
'EncodingType': 'url',
'ResponseMetadata': {
'RequestId': 'abc123',
'HTTPStatusCode': 200,
'HostId': 'abc123'
},
'Marker': ''
}
expected_params = {'Bucket': ANY}
stubber.add_response('list_objects', response, expected_params)
with stubber:
service_response = s3.list_objects(Bucket='test-bucket')
assert service_response == response
"""
def __init__(self, client) -> None:
"""
:param client: The client to add your stubs to.
"""
...
def __enter__(self) -> Stubber: ...
def __exit__(self, exception_type, exception_value, traceback): ...
def METHOD_NAME(self):
"""
Activates the stubber on the client
"""
...
def deactivate(self):
"""
Deactivates the stubber on the client
"""
...
def add_response(self, method, service_response, expected_params=...):
"""
Adds a service response to the response queue. This will be validated
against the service model to ensure correctness. It should be noted,
however, that while missing attributes are often considered correct,
your code may not function properly if you leave them out. Therefore
you should always fill in every value you see in a typical response for
your particular request.
:param method: The name of the client method to stub.
:type method: str
:param service_response: A dict response stub. Provided parameters will
be validated against the service model.
:type service_response: dict
:param expected_params: A dictionary of the expected parameters to
be called for the provided service response. The parameters match
the names of keyword arguments passed to that client call. If
any of the parameters differ a ``StubResponseError`` is thrown.
You can use stub.ANY to indicate a particular parameter to ignore
in validation. stub.ANY is only valid for top level params.
"""
...
def add_client_error(
self,
method,
service_error_code=...,
service_message=...,
http_status_code=...,
service_error_meta=...,
expected_params=...,
response_meta=...,
):
"""
Adds a ``ClientError`` to the response queue.
:param method: The name of the service method to return the error on.
:type method: str
:param service_error_code: The service error code to return,
e.g. ``NoSuchBucket``
:type service_error_code: str
:param service_message: The service message to return, e.g.
'The specified bucket does not exist.'
:type service_message: str
:param http_status_code: The HTTP status code to return, e.g. 404, etc
:type http_status_code: int
:param service_error_meta: Additional keys to be added to the
service Error
:type service_error_meta: dict
:param expected_params: A dictionary of the expected parameters to
be called for the provided service response. The parameters match
the names of keyword arguments passed to that client call. If
any of the parameters differ a ``StubResponseError`` is thrown.
You can use stub.ANY to indicate a particular parameter to ignore
in validation.
:param response_meta: Additional keys to be added to the
response's ResponseMetadata
:type response_meta: dict
"""
...
def assert_no_pending_responses(self):
"""
Asserts that all expected calls were made.
"""
... |
1,875 | power | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2023 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
from pymeasure.instruments import Instrument
from pymeasure.instruments.validators import truncated_range
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class ThorlabsPM100USB(Instrument):
"""Represents Thorlabs PM100USB powermeter."""
def __init__(self, adapter, name="ThorlabsPM100USB powermeter", **kwargs):
super().__init__(
adapter, name, **kwargs
)
self._set_flags()
wavelength_min = Instrument.measurement(
"SENS:CORR:WAV? MIN", "Measure minimum wavelength, in nm"
)
wavelength_max = Instrument.measurement(
"SENS:CORR:WAV? MAX", "Measure maximum wavelength, in nm"
)
@property
def wavelength(self):
"""Control the wavelength in nm."""
value = self.values("SENSE:CORR:WAV?")[0]
return value
@wavelength.setter
def wavelength(self, value):
"""Wavelength in nm."""
if self.wavelength_settable:
# Store min and max wavelength to only request them once.
if not hasattr(self, "_wavelength_min"):
self._wavelength_min = self.wavelength_min
if not hasattr(self, "_wavelength_max"):
self._wavelength_max = self.wavelength_max
value = truncated_range(
value, [self._wavelength_min, self._wavelength_max]
)
self.write(f"SENSE:CORR:WAV {value}")
else:
raise AttributeError(
f"{self.sensor_name} does not allow setting the wavelength."
)
@property
def METHOD_NAME(self):
"""Measure the power in W."""
if self.is_power_sensor:
return self.values("MEAS:POW?")[0]
else:
raise AttributeError(f"{self.sensor_name} is not a power sensor.")
@property
def energy(self):
"""Measure the energy in J."""
if self.is_energy_sensor:
return self.values("MEAS:ENER?")[0]
else:
raise AttributeError(
f"{self.sensor_name} is not an energy sensor."
)
def _set_flags(self):
"""Get sensor info and write flags."""
response = self.values("SYST:SENSOR:IDN?")
if response[0] == "no sensor":
raise OSError("No sensor connected.")
self.sensor_name = response[0]
self.sensor_sn = response[1]
self.sensor_cal_msg = response[2]
self.sensor_type = response[3]
self.sensor_subtype = response[4]
_flags_str = response[5]
# interpretation of the flags, see p. 49 of the manual:
# https://www.thorlabs.de/_sd.cfm?fileName=17654-D02.pdf&partNumber=PM100D
# Convert to binary representation and pad zeros to 9 bit for sensors
# where not all flags are present.
_flags_str = format(int(_flags_str), "09b")
# Reverse the order so it matches the flag order from the manual, i.e.
# from decimal values from 1 to 256.
_flags_str = _flags_str[::-1]
# Convert to boolean.
self.flags = [x == "1" for x in _flags_str]
# setting the flags; _dn are unused; decimal values as comments
(
self.is_power_sensor, # 1
self.is_energy_sensor, # 2
_d4, # 4
_d8, # 8
self.response_settable, # 16
self.wavelength_settable, # 32
self.tau_settable, # 64
_d128, # 128
self.has_temperature_sensor, # 256
) = self.flags |
1,876 | test debug function calls device function | from numba.tests.support import override_config
from numba.cuda.testing import skip_on_cudasim
from numba import cuda
from numba.core import types
from numba.cuda.testing import CUDATestCase
import itertools
import re
import unittest
@skip_on_cudasim('Simulator does not produce debug dumps')
class TestCudaDebugInfo(CUDATestCase):
"""
These tests only checks the compiled PTX for debuginfo section
"""
def _getasm(self, fn, sig):
fn.compile(sig)
return fn.inspect_asm(sig)
def _check(self, fn, sig, expect):
asm = self._getasm(fn, sig=sig)
re_section_dbginfo = re.compile(r"\.section\s+\.debug_info\s+{")
match = re_section_dbginfo.search(asm)
assertfn = self.assertIsNotNone if expect else self.assertIsNone
assertfn(match, msg=asm)
def test_no_debuginfo_in_asm(self):
@cuda.jit(debug=False)
def foo(x):
x[0] = 1
self._check(foo, sig=(types.int32[:],), expect=False)
def test_debuginfo_in_asm(self):
@cuda.jit(debug=True, opt=False)
def foo(x):
x[0] = 1
self._check(foo, sig=(types.int32[:],), expect=True)
def test_environment_override(self):
with override_config('CUDA_DEBUGINFO_DEFAULT', 1):
# Using default value
@cuda.jit(opt=False)
def foo(x):
x[0] = 1
self._check(foo, sig=(types.int32[:],), expect=True)
# User override default value
@cuda.jit(debug=False)
def bar(x):
x[0] = 1
self._check(bar, sig=(types.int32[:],), expect=False)
def test_issue_5835(self):
# Invalid debug metadata would segfault NVVM when any function was
# compiled with debug turned on and optimization off. This eager
# compilation should not crash anything.
@cuda.jit((types.int32[::1],), debug=True, opt=False)
def f(x):
x[0] = 0
def test_wrapper_has_debuginfo(self):
sig = (types.int32[::1],)
@cuda.jit(sig, debug=True, opt=0)
def f(x):
x[0] = 1
llvm_ir = f.inspect_llvm(sig)
defines = [line for line in llvm_ir.splitlines()
if 'define void @"_ZN6cudapy' in line]
# Make sure we only found one definition
self.assertEqual(len(defines), 1)
wrapper_define = defines[0]
self.assertIn('!dbg', wrapper_define)
def test_debug_function_calls_internal_impl(self):
# Calling a function in a module generated from an implementation
# internal to Numba requires multiple modules to be compiled with NVVM -
# the internal implementation, and the caller. This example uses two
# modules because the `in (2, 3)` is implemented with:
#
# numba::cpython::listobj::in_seq::$3clocals$3e::seq_contains_impl$242(
# UniTuple<long long, 2>,
# int
# )
#
# This is condensed from this reproducer in Issue 5311:
# https://github.com/numba/numba/issues/5311#issuecomment-674206587
@cuda.jit((types.int32[:], types.int32[:]), debug=True, opt=False)
def f(inp, outp):
outp[0] = 1 if inp[0] in (2, 3) else 3
def METHOD_NAME(self):
# Calling a device function requires compilation of multiple modules
# with NVVM - one for the caller and one for the callee. This checks
# that we don't cause an NVVM error in this case.
@cuda.jit(device=True, debug=True, opt=0)
def threadid():
return cuda.blockDim.x * cuda.blockIdx.x + cuda.threadIdx.x
@cuda.jit((types.int32[:],), debug=True, opt=0)
def kernel(arr):
i = cuda.grid(1)
if i < len(arr):
arr[i] = threadid()
def _test_chained_device_function(self, kernel_debug, f1_debug, f2_debug):
@cuda.jit(device=True, debug=f2_debug, opt=False)
def f2(x):
return x + 1
@cuda.jit(device=True, debug=f1_debug, opt=False)
def f1(x, y):
return x - f2(y)
@cuda.jit((types.int32, types.int32), debug=kernel_debug, opt=False)
def kernel(x, y):
f1(x, y)
kernel[1, 1](1, 2)
def test_chained_device_function(self):
# Calling a device function that calls another device function from a
# kernel with should succeed regardless of which jit decorators have
# debug=True. See Issue #7159.
debug_opts = itertools.product(*[(True, False)] * 3)
for kernel_debug, f1_debug, f2_debug in debug_opts:
with self.subTest(kernel_debug=kernel_debug,
f1_debug=f1_debug,
f2_debug=f2_debug):
self._test_chained_device_function(kernel_debug,
f1_debug,
f2_debug)
def _test_chained_device_function_two_calls(self, kernel_debug, f1_debug,
f2_debug):
@cuda.jit(device=True, debug=f2_debug, opt=False)
def f2(x):
return x + 1
@cuda.jit(device=True, debug=f1_debug, opt=False)
def f1(x, y):
return x - f2(y)
@cuda.jit(debug=kernel_debug, opt=False)
def kernel(x, y):
f1(x, y)
f2(x)
kernel[1, 1](1, 2)
def test_chained_device_function_two_calls(self):
# Calling a device function that calls a leaf device function from a
# kernel, and calling the leaf device function from the kernel should
# succeed, regardless of which jit decorators have debug=True. See
# Issue #7159.
debug_opts = itertools.product(*[(True, False)] * 3)
for kernel_debug, f1_debug, f2_debug in debug_opts:
with self.subTest(kernel_debug=kernel_debug,
f1_debug=f1_debug,
f2_debug=f2_debug):
self._test_chained_device_function_two_calls(kernel_debug,
f1_debug,
f2_debug)
def test_chained_device_three_functions(self):
# Like test_chained_device_function, but with enough functions (three)
# to ensure that the recursion visits all the way down the call tree
# when fixing linkage of functions for debug.
def three_device_fns(kernel_debug, leaf_debug):
@cuda.jit(device=True, debug=leaf_debug, opt=False)
def f3(x):
return x * x
@cuda.jit(device=True)
def f2(x):
return f3(x) + 1
@cuda.jit(device=True)
def f1(x, y):
return x - f2(y)
@cuda.jit(debug=kernel_debug, opt=False)
def kernel(x, y):
f1(x, y)
kernel[1, 1](1, 2)
# Check when debug on the kernel, on the leaf, and not on any function.
three_device_fns(kernel_debug=True, leaf_debug=True)
three_device_fns(kernel_debug=True, leaf_debug=False)
three_device_fns(kernel_debug=False, leaf_debug=True)
three_device_fns(kernel_debug=False, leaf_debug=False)
if __name__ == '__main__':
unittest.main() |
1,877 | test main | # test_getopt.py
# David Goodger <dgoodger@bigfoot.com> 2000-08-19
from test.support import verbose, run_doctest, run_unittest, EnvironmentVarGuard
import unittest
import getopt
sentinel = object()
class GetoptTests(unittest.TestCase):
def setUp(self):
self.env = EnvironmentVarGuard()
if "POSIXLY_CORRECT" in self.env:
del self.env["POSIXLY_CORRECT"]
def tearDown(self):
self.env.__exit__()
del self.env
def assertError(self, *args, **kwargs):
self.assertRaises(getopt.GetoptError, *args, **kwargs)
def test_short_has_arg(self):
self.assertTrue(getopt.short_has_arg('a', 'a:'))
self.assertFalse(getopt.short_has_arg('a', 'a'))
self.assertError(getopt.short_has_arg, 'a', 'b')
def test_long_has_args(self):
has_arg, option = getopt.long_has_args('abc', ['abc='])
self.assertTrue(has_arg)
self.assertEqual(option, 'abc')
has_arg, option = getopt.long_has_args('abc', ['abc'])
self.assertFalse(has_arg)
self.assertEqual(option, 'abc')
has_arg, option = getopt.long_has_args('abc', ['abcd'])
self.assertFalse(has_arg)
self.assertEqual(option, 'abcd')
self.assertError(getopt.long_has_args, 'abc', ['def'])
self.assertError(getopt.long_has_args, 'abc', [])
self.assertError(getopt.long_has_args, 'abc', ['abcd','abcde'])
def test_do_shorts(self):
opts, args = getopt.do_shorts([], 'a', 'a', [])
self.assertEqual(opts, [('-a', '')])
self.assertEqual(args, [])
opts, args = getopt.do_shorts([], 'a1', 'a:', [])
self.assertEqual(opts, [('-a', '1')])
self.assertEqual(args, [])
#opts, args = getopt.do_shorts([], 'a=1', 'a:', [])
#self.assertEqual(opts, [('-a', '1')])
#self.assertEqual(args, [])
opts, args = getopt.do_shorts([], 'a', 'a:', ['1'])
self.assertEqual(opts, [('-a', '1')])
self.assertEqual(args, [])
opts, args = getopt.do_shorts([], 'a', 'a:', ['1', '2'])
self.assertEqual(opts, [('-a', '1')])
self.assertEqual(args, ['2'])
self.assertError(getopt.do_shorts, [], 'a1', 'a', [])
self.assertError(getopt.do_shorts, [], 'a', 'a:', [])
def test_do_longs(self):
opts, args = getopt.do_longs([], 'abc', ['abc'], [])
self.assertEqual(opts, [('--abc', '')])
self.assertEqual(args, [])
opts, args = getopt.do_longs([], 'abc=1', ['abc='], [])
self.assertEqual(opts, [('--abc', '1')])
self.assertEqual(args, [])
opts, args = getopt.do_longs([], 'abc=1', ['abcd='], [])
self.assertEqual(opts, [('--abcd', '1')])
self.assertEqual(args, [])
opts, args = getopt.do_longs([], 'abc', ['ab', 'abc', 'abcd'], [])
self.assertEqual(opts, [('--abc', '')])
self.assertEqual(args, [])
# Much like the preceding, except with a non-alpha character ("-") in
# option name that precedes "="; failed in
# http://python.org/sf/126863
opts, args = getopt.do_longs([], 'foo=42', ['foo-bar', 'foo=',], [])
self.assertEqual(opts, [('--foo', '42')])
self.assertEqual(args, [])
self.assertError(getopt.do_longs, [], 'abc=1', ['abc'], [])
self.assertError(getopt.do_longs, [], 'abc', ['abc='], [])
def test_getopt(self):
# note: the empty string between '-a' and '--beta' is significant:
# it simulates an empty string option argument ('-a ""') on the
# command line.
cmdline = ['-a', '1', '-b', '--alpha=2', '--beta', '-a', '3', '-a',
'', '--beta', 'arg1', 'arg2']
opts, args = getopt.getopt(cmdline, 'a:b', ['alpha=', 'beta'])
self.assertEqual(opts, [('-a', '1'), ('-b', ''),
('--alpha', '2'), ('--beta', ''),
('-a', '3'), ('-a', ''), ('--beta', '')])
# Note ambiguity of ('-b', '') and ('-a', '') above. This must be
# accounted for in the code that calls getopt().
self.assertEqual(args, ['arg1', 'arg2'])
self.assertError(getopt.getopt, cmdline, 'a:b', ['alpha', 'beta'])
def test_gnu_getopt(self):
# Test handling of GNU style scanning mode.
cmdline = ['-a', 'arg1', '-b', '1', '--alpha', '--beta=2']
# GNU style
opts, args = getopt.gnu_getopt(cmdline, 'ab:', ['alpha', 'beta='])
self.assertEqual(args, ['arg1'])
self.assertEqual(opts, [('-a', ''), ('-b', '1'),
('--alpha', ''), ('--beta', '2')])
# recognize "-" as an argument
opts, args = getopt.gnu_getopt(['-a', '-', '-b', '-'], 'ab:', [])
self.assertEqual(args, ['-'])
self.assertEqual(opts, [('-a', ''), ('-b', '-')])
# Posix style via +
opts, args = getopt.gnu_getopt(cmdline, '+ab:', ['alpha', 'beta='])
self.assertEqual(opts, [('-a', '')])
self.assertEqual(args, ['arg1', '-b', '1', '--alpha', '--beta=2'])
# Posix style via POSIXLY_CORRECT
self.env["POSIXLY_CORRECT"] = "1"
opts, args = getopt.gnu_getopt(cmdline, 'ab:', ['alpha', 'beta='])
self.assertEqual(opts, [('-a', '')])
self.assertEqual(args, ['arg1', '-b', '1', '--alpha', '--beta=2'])
def test_libref_examples(self):
s = """
Examples from the Library Reference: Doc/lib/libgetopt.tex
An example using only Unix style options:
>>> import getopt
>>> args = '-a -b -cfoo -d bar a1 a2'.split()
>>> args
['-a', '-b', '-cfoo', '-d', 'bar', 'a1', 'a2']
>>> optlist, args = getopt.getopt(args, 'abc:d:')
>>> optlist
[('-a', ''), ('-b', ''), ('-c', 'foo'), ('-d', 'bar')]
>>> args
['a1', 'a2']
Using long option names is equally easy:
>>> s = '--condition=foo --testing --output-file abc.def -x a1 a2'
>>> args = s.split()
>>> args
['--condition=foo', '--testing', '--output-file', 'abc.def', '-x', 'a1', 'a2']
>>> optlist, args = getopt.getopt(args, 'x', [
... 'condition=', 'output-file=', 'testing'])
>>> optlist
[('--condition', 'foo'), ('--testing', ''), ('--output-file', 'abc.def'), ('-x', '')]
>>> args
['a1', 'a2']
"""
import types
m = types.ModuleType("libreftest", s)
run_doctest(m, verbose)
def test_issue4629(self):
longopts, shortopts = getopt.getopt(['--help='], '', ['help='])
self.assertEqual(longopts, [('--help', '')])
longopts, shortopts = getopt.getopt(['--help=x'], '', ['help='])
self.assertEqual(longopts, [('--help', 'x')])
self.assertRaises(getopt.GetoptError, getopt.getopt, ['--help='], '', ['help'])
def METHOD_NAME():
run_unittest(GetoptTests)
if __name__ == "__main__":
METHOD_NAME() |
1,878 | test constraint weighted undirected | """Unit tests for the :mod:`networkx.algorithms.structuralholes` module."""
import math
import pytest
import networkx as nx
from networkx.classes.tests import dispatch_interface
class TestStructuralHoles:
"""Unit tests for computing measures of structural holes.
The expected values for these functions were originally computed using the
proprietary software `UCINET`_ and the free software `IGraph`_ , and then
computed by hand to make sure that the results are correct.
.. _UCINET: https://sites.google.com/site/ucinetsoftware/home
.. _IGraph: http://igraph.org/
"""
def setup_method(self):
self.D = nx.DiGraph()
self.D.add_edges_from([(0, 1), (0, 2), (1, 0), (2, 1)])
self.D_weights = {(0, 1): 2, (0, 2): 2, (1, 0): 1, (2, 1): 1}
# Example from http://www.analytictech.com/connections/v20(1)/holes.htm
self.G = nx.Graph()
self.G.add_edges_from(
[
("A", "B"),
("A", "F"),
("A", "G"),
("A", "E"),
("E", "G"),
("F", "G"),
("B", "G"),
("B", "D"),
("D", "G"),
("G", "C"),
]
)
self.G_weights = {
("A", "B"): 2,
("A", "F"): 3,
("A", "G"): 5,
("A", "E"): 2,
("E", "G"): 8,
("F", "G"): 3,
("B", "G"): 4,
("B", "D"): 1,
("D", "G"): 3,
("G", "C"): 10,
}
# This additionally tests the @nx._dispatch mechanism, treating
# nx.mutual_weight as if it were a re-implementation from another package
@pytest.mark.parametrize("wrapper", [lambda x: x, dispatch_interface.convert])
def test_constraint_directed(self, wrapper):
constraint = nx.constraint(wrapper(self.D))
assert constraint[0] == pytest.approx(1.003, abs=1e-3)
assert constraint[1] == pytest.approx(1.003, abs=1e-3)
assert constraint[2] == pytest.approx(1.389, abs=1e-3)
def test_effective_size_directed(self):
effective_size = nx.effective_size(self.D)
assert effective_size[0] == pytest.approx(1.167, abs=1e-3)
assert effective_size[1] == pytest.approx(1.167, abs=1e-3)
assert effective_size[2] == pytest.approx(1, abs=1e-3)
def test_constraint_weighted_directed(self):
D = self.D.copy()
nx.set_edge_attributes(D, self.D_weights, "weight")
constraint = nx.constraint(D, weight="weight")
assert constraint[0] == pytest.approx(0.840, abs=1e-3)
assert constraint[1] == pytest.approx(1.143, abs=1e-3)
assert constraint[2] == pytest.approx(1.378, abs=1e-3)
def test_effective_size_weighted_directed(self):
D = self.D.copy()
nx.set_edge_attributes(D, self.D_weights, "weight")
effective_size = nx.effective_size(D, weight="weight")
assert effective_size[0] == pytest.approx(1.567, abs=1e-3)
assert effective_size[1] == pytest.approx(1.083, abs=1e-3)
assert effective_size[2] == pytest.approx(1, abs=1e-3)
def test_constraint_undirected(self):
constraint = nx.constraint(self.G)
assert constraint["G"] == pytest.approx(0.400, abs=1e-3)
assert constraint["A"] == pytest.approx(0.595, abs=1e-3)
assert constraint["C"] == pytest.approx(1, abs=1e-3)
def test_effective_size_undirected_borgatti(self):
effective_size = nx.effective_size(self.G)
assert effective_size["G"] == pytest.approx(4.67, abs=1e-2)
assert effective_size["A"] == pytest.approx(2.50, abs=1e-2)
assert effective_size["C"] == pytest.approx(1, abs=1e-2)
def test_effective_size_undirected(self):
G = self.G.copy()
nx.set_edge_attributes(G, 1, "weight")
effective_size = nx.effective_size(G, weight="weight")
assert effective_size["G"] == pytest.approx(4.67, abs=1e-2)
assert effective_size["A"] == pytest.approx(2.50, abs=1e-2)
assert effective_size["C"] == pytest.approx(1, abs=1e-2)
def METHOD_NAME(self):
G = self.G.copy()
nx.set_edge_attributes(G, self.G_weights, "weight")
constraint = nx.constraint(G, weight="weight")
assert constraint["G"] == pytest.approx(0.299, abs=1e-3)
assert constraint["A"] == pytest.approx(0.795, abs=1e-3)
assert constraint["C"] == pytest.approx(1, abs=1e-3)
def test_effective_size_weighted_undirected(self):
G = self.G.copy()
nx.set_edge_attributes(G, self.G_weights, "weight")
effective_size = nx.effective_size(G, weight="weight")
assert effective_size["G"] == pytest.approx(5.47, abs=1e-2)
assert effective_size["A"] == pytest.approx(2.47, abs=1e-2)
assert effective_size["C"] == pytest.approx(1, abs=1e-2)
def test_constraint_isolated(self):
G = self.G.copy()
G.add_node(1)
constraint = nx.constraint(G)
assert math.isnan(constraint[1])
def test_effective_size_isolated(self):
G = self.G.copy()
G.add_node(1)
nx.set_edge_attributes(G, self.G_weights, "weight")
effective_size = nx.effective_size(G, weight="weight")
assert math.isnan(effective_size[1])
def test_effective_size_borgatti_isolated(self):
G = self.G.copy()
G.add_node(1)
effective_size = nx.effective_size(G)
assert math.isnan(effective_size[1]) |
1,879 | write entry | # Copyright 2023 OpenC3, Inc.
# All Rights Reserved.
#
# This program is free software; you can modify and/or redistribute it
# under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation; version 3 with
# attribution addendums as found in the LICENSE.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# This file may also be used under the terms of a commercial license
# if purchased from OpenC3, Inc.
import copy
from datetime import datetime, timezone
from .log_writer import LogWriter
from openc3.environment import OPENC3_SCOPE
from openc3.utilities.time import to_nsec_from_epoch
from openc3.utilities.logger import Logger
# Creates a log file of stream data for either reads or writes. Can automatically
# cycle the log based on when the log file reaches a predefined size or based on time.
class StreamLog(LogWriter):
# # self.return [String] Original name passed to stream log
# attr_reader :orig_name
# The allowable log types
LOG_TYPES = ["READ", "WRITE"]
# self.param log_name [String] The name of the stream log. Typically matches the
# name of the corresponding interface
# self.param log_type [Symbol] The type of log to create. Must be 'READ'
# or 'WRITE'.
# self.param cycle_time [Integer] The amount of time in seconds before creating
# a new log file. This can be combined with cycle_size.
# self.param cycle_size [Integer] The size in bytes before creating a new log
# file. This can be combined with cycle_time.
# self.param cycle_hour [Integer] The time at which to cycle the log. Combined with
# cycle_minute to cycle the log daily at the specified time. If None, the log
# will be cycled hourly at the specified cycle_minute.
# self.param cycle_minute [Integer] The time at which to cycle the log. See cycle_hour
# for more information.
def __init__(
self,
log_name,
log_type,
cycle_time=600, # 10 minutes, matches time in target_model
cycle_size=50_000_000, # 50MB, matches size in target_model
cycle_hour=None,
cycle_minute=None,
):
if log_type not in StreamLog.LOG_TYPES:
raise RuntimeError("log_type must be 'READ' or 'WRITE'")
super().__init__(
f"{OPENC3_SCOPE}/stream_logs/",
True, # Start with logging enabled
cycle_time,
cycle_size,
cycle_hour,
cycle_minute,
)
self.log_type = log_type
self.name = log_name
@property
def name(self):
return self.log_name
@name.setter
def name(self, name):
self.orig_name = name
self.log_name = name.lower() + "_stream_" + self.log_type.lower()
# Create a clone of this object with a new name
def clone(self):
stream_log = copy.copy(self)
stream_log.name = stream_log.orig_name
return stream_log
# Write to the log file.
#
# If no log file currently exists in the filesystem, a new file will be
# created.
#
# self.param data [String] String of data
def write(self, data):
if not self.logging_enabled:
return
if data is None or len(data) <= 0:
return
try:
with self.mutex:
time_nsec_since_epoch = to_nsec_from_epoch(datetime.now(timezone.utc))
self.prepare_write(time_nsec_since_epoch, len(data))
if self.file:
self.METHOD_NAME(time_nsec_since_epoch, data)
except RuntimeError as error:
Logger.error(f"Error writing {self.filename} : {repr(error)}")
# OpenC3.handle_critical_exception(err)
def METHOD_NAME(self, time_nsec_since_epoch, data):
self.file.write(data)
self.file_size += len(data)
if not self.first_time:
self.first_time = time_nsec_since_epoch
self.last_time = time_nsec_since_epoch
def bucket_filename(self):
return f"{self.first_timestamp()}__{self.log_name}" + self.extension()
def extension(self):
return ".bin" |
1,880 | train forward | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import math
import net
class DygraphModel():
# define model
def create_model(self, config):
user_size = config.get("hyper_parameters.user_size")
cms_segid_size = config.get("hyper_parameters.cms_segid_size")
cms_group_id_size = config.get("hyper_parameters.cms_group_id_size")
final_gender_code_size = config.get(
"hyper_parameters.final_gender_code_size")
age_level_size = config.get("hyper_parameters.age_level_size")
pvalue_level_size = config.get("hyper_parameters.pvalue_level_size")
shopping_level_size = config.get(
"hyper_parameters.shopping_level_size")
occupation_size = config.get("hyper_parameters.occupation_size")
new_user_class_level_size = config.get(
"hyper_parameters.new_user_class_level_size")
adgroup_id_size = config.get("hyper_parameters.adgroup_id_size")
cate_size = config.get("hyper_parameters.cate_size")
campaign_id_size = config.get("hyper_parameters.campaign_id_size")
customer_size = config.get("hyper_parameters.customer_size")
brand_size = config.get("hyper_parameters.brand_size")
btag_size = config.get("hyper_parameters.btag_size")
pid_size = config.get("hyper_parameters.pid_size")
main_embedding_size = config.get(
"hyper_parameters.main_embedding_size")
other_embedding_size = config.get(
"hyper_parameters.other_embedding_size")
dmr_model = net.DMRLayer(
user_size, cms_segid_size, cms_group_id_size,
final_gender_code_size, age_level_size, pvalue_level_size,
shopping_level_size, occupation_size, new_user_class_level_size,
adgroup_id_size, cate_size, campaign_id_size, customer_size,
brand_size, btag_size, pid_size, main_embedding_size,
other_embedding_size)
return dmr_model
# define feeds which convert numpy of batch data to paddle.tensor
def create_feeds(self, batch_data, config):
b = batch_data[0]
sparse_tensor = b.astype('int64')
dense_tensor = paddle.to_tensor(b[:, 264].numpy().astype('float32')
.reshape(-1, 1))
label = sparse_tensor[:, -1].reshape([-1, 1])
return label, [sparse_tensor, dense_tensor]
# define optimizer
def create_optimizer(self, dy_model, config):
lr = config.get("hyper_parameters.optimizer.learning_rate", 0.001)
optimizer = paddle.optimizer.Adam(
learning_rate=lr, parameters=dy_model.parameters())
return optimizer
# define metrics such as auc/acc
# multi-task need to define multi metric
def create_metrics(self):
metrics_list_name = ["auc"]
auc_metric = paddle.metric.Auc("ROC")
metrics_list = [auc_metric]
return metrics_list, metrics_list_name
# construct train forward phase
def METHOD_NAME(self, dy_model, metrics_list, batch_data, config):
label, input_tensor = self.create_feeds(batch_data, config)
pred, loss = dy_model.forward(input_tensor, False)
# update metrics
predict_2d = paddle.concat(x=[1 - pred, pred], axis=1)
metrics_list[0].update(preds=predict_2d.numpy(), labels=label.numpy())
print_dict = {'loss': loss}
# print_dict = None
return loss, metrics_list, print_dict
def infer_forward(self, dy_model, metrics_list, batch_data, config):
label, input_tensor = self.create_feeds(batch_data, config)
pred, loss = dy_model.forward(input_tensor, True)
# update metrics
predict_2d = paddle.concat(x=[1 - pred, pred], axis=1)
metrics_list[0].update(preds=predict_2d.numpy(), labels=label.numpy())
return metrics_list, None |
1,881 | test create new environment cname taken script | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from mock import patch
from ebcli.operations import createops
from ebcli.lib.aws import InvalidParameterValueError
from ebcli.objects.requests import CreateEnvironmentRequest
from .. import baseinttest
class TestCreateEnvironment(baseinttest.BaseOperationsTest):
module_name = 'createops'
@patch('ebcli.operations.createops.elasticbeanstalk.get_all_environment_names')
def test_create_new_environment_envname_taken(self, mock_names):
mock_names.return_value = ['my-env', 'my-env2']
self.mock_elasticbeanstalk.create_environment.side_effect = [
InvalidParameterValueError('Environment env-name already exists.'),
None,
]
self.mock_input.return_value = 'new-env-name'
env_request = CreateEnvironmentRequest(
app_name='app-name',
env_name='env-name',
)
createops.create_env(env_request, interactive=True)
self.assertEqual(self.mock_elasticbeanstalk.create_environment.call_count, 2)
def test_create_new_environment_envname_taken_script(self):
self.mock_elasticbeanstalk.create_environment.side_effect = [
InvalidParameterValueError('Environment env-name already exists.'),
]
try:
env_request = CreateEnvironmentRequest(
app_name='app-name',
env_name='env-name',
)
createops.create_env(env_request, interactive=False)
self.fail('Should have thrown InvalidParameterValueError')
except InvalidParameterValueError:
pass
def test_create_new_environment_cname_taken(self):
self.mock_elasticbeanstalk.create_environment.side_effect = [
InvalidParameterValueError('DNS name (cname) is not available.'),
None,
]
self.mock_input.return_value = 'new-cname'
env_request = CreateEnvironmentRequest(
app_name='app-name',
env_name='env-name',
)
createops.create_env(env_request, interactive=True)
self.assertEqual(self.mock_elasticbeanstalk.create_environment.call_count, 2)
def METHOD_NAME(self):
self.mock_elasticbeanstalk.create_environment.side_effect = [
InvalidParameterValueError('DNS name (cname) is not available.'),
]
try:
env_request = CreateEnvironmentRequest(
app_name='app-name',
env_name='env-name',
)
createops.create_env(env_request, interactive=False)
self.fail('Should have thrown InvalidParameterValueError')
except InvalidParameterValueError:
pass
def test_create_new_environment_app_notexists(self):
self.mock_elasticbeanstalk.create_environment.side_effect = [
InvalidParameterValueError('Application \'app-name\' already exists.'),
]
try:
env_request = CreateEnvironmentRequest(
app_name='app-name',
env_name='env-name',
)
createops.create_env(env_request, interactive=True)
self.fail('Should have thrown InvalidParameterValueError')
except InvalidParameterValueError:
pass
def test_create_new_environment_app_notexists_script(self):
self.mock_elasticbeanstalk.create_environment.side_effect = [
InvalidParameterValueError('Application \'app-name\' already exists.'),
]
try:
env_request = CreateEnvironmentRequest(
app_name='app-name',
env_name='env-name',
)
createops.create_env(env_request, interactive=False)
self.fail('Should have thrown InvalidParameterValueError')
except InvalidParameterValueError:
pass |
1,882 | fetch pr info | #!/usr/bin/env python3
"""
Summarizes recent PRs based on their GitHub labels.
The result can be copy-pasted into CHANGELOG.md, though it often needs some manual editing too.
"""
import multiprocessing
import re
import sys
from dataclasses import dataclass
from typing import Any, List, Optional
import requests
from git import Repo # pip install GitPython
from tqdm import tqdm
OWNER = "emilk"
REPO = "egui"
COMMIT_RANGE = "latest..HEAD"
INCLUDE_LABELS = False # It adds quite a bit of visual noise
OFFICIAL_DEVS = [
"emilk",
]
@dataclass
class PrInfo:
gh_user_name: str
pr_title: str
labels: List[str]
@dataclass
class CommitInfo:
hexsha: str
title: str
pr_number: Optional[int]
def get_github_token() -> str:
import os
token = os.environ.get("GH_ACCESS_TOKEN", "")
if token != "":
return token
home_dir = os.path.expanduser("~")
token_file = os.path.join(home_dir, ".githubtoken")
try:
with open(token_file, "r") as f:
token = f.read().strip()
return token
except Exception:
pass
print("ERROR: expected a GitHub token in the environment variable GH_ACCESS_TOKEN or in ~/.githubtoken")
sys.exit(1)
# Slow
def fetch_pr_info_from_commit_info(commit_info: CommitInfo) -> Optional[PrInfo]:
if commit_info.pr_number is None:
return None
else:
return METHOD_NAME(commit_info.pr_number)
# Slow
def METHOD_NAME(pr_number: int) -> Optional[PrInfo]:
url = f"https://api.github.com/repos/{OWNER}/{REPO}/pulls/{pr_number}"
gh_access_token = get_github_token()
headers = {"Authorization": f"Token {gh_access_token}"}
response = requests.get(url, headers=headers)
json = response.json()
# Check if the request was successful (status code 200)
if response.status_code == 200:
labels = [label["name"] for label in json["labels"]]
gh_user_name = json["user"]["login"]
return PrInfo(gh_user_name=gh_user_name, pr_title=json["title"], labels=labels)
else:
print(f"ERROR {url}: {response.status_code} - {json['message']}")
return None
def get_commit_info(commit: Any) -> CommitInfo:
match = re.match(r"(.*) \(#(\d+)\)", commit.summary)
if match:
return CommitInfo(hexsha=commit.hexsha, title=str(match.group(1)), pr_number=int(match.group(2)))
else:
return CommitInfo(hexsha=commit.hexsha, title=commit.summary, pr_number=None)
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text # or whatever
def print_section(crate: str, items: List[str]) -> None:
if 0 < len(items):
print(f"#### {crate}")
for line in items:
line = remove_prefix(line, f"{crate}: ")
line = remove_prefix(line, f"[{crate}] ")
print(f"* {line}")
print()
def main() -> None:
repo = Repo(".")
commits = list(repo.iter_commits(COMMIT_RANGE))
commits.reverse() # Most recent last
commit_infos = list(map(get_commit_info, commits))
pool = multiprocessing.Pool()
pr_infos = list(
tqdm(
pool.imap(fetch_pr_info_from_commit_info, commit_infos),
total=len(commit_infos),
desc="Fetch PR info commits",
)
)
ignore_labels = ["CI", "dependencies"]
crate_names = [
"ecolor",
"eframe",
"egui_extras",
"egui_plot",
"egui_glow",
"egui-wgpu",
"egui-winit",
"egui",
"epaint",
]
sections = {}
unsorted_prs = []
unsorted_commits = []
for commit_info, pr_info in zip(commit_infos, pr_infos):
hexsha = commit_info.hexsha
title = commit_info.title
pr_number = commit_info.pr_number
if pr_number is None:
# Someone committed straight to main:
summary = f"{title} [{hexsha[:7]}](https://github.com/{OWNER}/{REPO}/commit/{hexsha})"
unsorted_commits.append(summary)
else:
title = pr_info.pr_title if pr_info else title # We prefer the PR title if available
labels = pr_info.labels if pr_info else []
summary = f"{title} [#{pr_number}](https://github.com/{OWNER}/{REPO}/pull/{pr_number})"
if INCLUDE_LABELS and 0 < len(labels):
summary += f" ({', '.join(labels)})"
if pr_info is not None:
gh_user_name = pr_info.gh_user_name
if gh_user_name not in OFFICIAL_DEVS:
summary += f" (thanks [@{gh_user_name}](https://github.com/{gh_user_name})!)"
if 'typo' in labels:
continue # We get so many typo PRs. Let's not flood the changelog with them.
added = False
for crate in crate_names:
if crate in labels:
sections.setdefault(crate, []).append(summary)
added = True
if not added:
if not any(label in labels for label in ignore_labels):
unsorted_prs.append(summary)
print()
for crate in crate_names:
if crate in sections:
summary = sections[crate]
print_section(crate, summary)
print_section("Unsorted PRs", unsorted_prs)
print_section("Unsorted commits", unsorted_commits)
if __name__ == "__main__":
main() |
1,883 | test parse version | #!/usr/bin/env python
# coding=utf-8
import pytest
from sacred.utils import (
PATHCHANGE,
convert_to_nested_dict,
get_by_dotted_path,
is_prefix,
iter_prefixes,
iterate_flattened,
iterate_flattened_separately,
join_paths,
recursive_update,
set_by_dotted_path,
get_inheritors,
convert_camel_case_to_snake_case,
apply_backspaces_and_linefeeds,
module_exists,
module_is_in_cache,
get_package_version,
parse_version,
rel_path,
)
def test_recursive_update():
d = {"a": {"b": 1}}
res = recursive_update(d, {"c": 2, "a": {"d": 3}})
assert d is res
assert res == {"a": {"b": 1, "d": 3}, "c": 2}
def test_iterate_flattened_separately():
d = {
"a1": 1,
"b2": {"bar": "foo", "foo": "bar"},
"c1": "f",
"d1": [1, 2, 3],
"e2": {},
}
res = list(iterate_flattened_separately(d, ["foo", "bar"]))
assert res == [
("a1", 1),
("c1", "f"),
("d1", [1, 2, 3]),
("e2", {}),
("b2", PATHCHANGE),
("b2.foo", "bar"),
("b2.bar", "foo"),
]
def test_iterate_flattened():
d = {"a": {"aa": 1, "ab": {"aba": 8}}, "b": 3}
assert list(iterate_flattened(d)) == [("a.aa", 1), ("a.ab.aba", 8), ("b", 3)]
def test_set_by_dotted_path():
d = {"foo": {"bar": 7}}
set_by_dotted_path(d, "foo.bar", 10)
assert d == {"foo": {"bar": 10}}
def test_set_by_dotted_path_creates_missing_dicts():
d = {"foo": {"bar": 7}}
set_by_dotted_path(d, "foo.d.baz", 3)
assert d == {"foo": {"bar": 7, "d": {"baz": 3}}}
def test_get_by_dotted_path():
assert get_by_dotted_path({"a": 12}, "a") == 12
assert get_by_dotted_path({"a": 12}, "") == {"a": 12}
assert get_by_dotted_path({"foo": {"a": 12}}, "foo.a") == 12
assert get_by_dotted_path({"foo": {"a": 12}}, "foo.b") is None
def test_iter_prefixes():
assert list(iter_prefixes("foo.bar.baz")) == ["foo", "foo.bar", "foo.bar.baz"]
def test_join_paths():
assert join_paths() == ""
assert join_paths("foo") == "foo"
assert join_paths("foo", "bar") == "foo.bar"
assert join_paths("a", "b", "c", "d") == "a.b.c.d"
assert join_paths("", "b", "", "d") == "b.d"
assert join_paths("a.b", "c.d.e") == "a.b.c.d.e"
assert join_paths("a.b.", "c.d.e") == "a.b.c.d.e"
def test_is_prefix():
assert is_prefix("", "foo")
assert is_prefix("foo", "foo.bar")
assert is_prefix("foo.bar", "foo.bar.baz")
assert not is_prefix("a", "foo.bar")
assert not is_prefix("a.bar", "foo.bar")
assert not is_prefix("foo.b", "foo.bar")
assert not is_prefix("foo.bar", "foo.bar")
def test_convert_to_nested_dict():
dotted_dict = {"foo.bar": 8, "foo.baz": 7}
assert convert_to_nested_dict(dotted_dict) == {"foo": {"bar": 8, "baz": 7}}
def test_convert_to_nested_dict_nested():
dotted_dict = {"a.b": {"foo.bar": 8}, "a.b.foo.baz": 7}
assert convert_to_nested_dict(dotted_dict) == {
"a": {"b": {"foo": {"bar": 8, "baz": 7}}}
}
def test_get_inheritors():
class A:
pass
class B(A):
pass
class C(B):
pass
class D(A):
pass
class E:
pass
assert get_inheritors(A) == {B, C, D}
@pytest.mark.parametrize(
"name,expected",
[
("CamelCase", "camel_case"),
("snake_case", "snake_case"),
("CamelCamelCase", "camel_camel_case"),
("Camel2Camel2Case", "camel2_camel2_case"),
("getHTTPResponseCode", "get_http_response_code"),
("get2HTTPResponseCode", "get2_http_response_code"),
("HTTPResponseCode", "http_response_code"),
("HTTPResponseCodeXYZ", "http_response_code_xyz"),
],
)
def test_convert_camel_case_to_snake_case(name, expected):
assert convert_camel_case_to_snake_case(name) == expected
@pytest.mark.parametrize(
"text,expected",
[
("", ""),
("\b", ""),
("\r", "\r"),
("\r\n", "\n"),
("ab\bc", "ac"),
("\ba", "a"),
("ab\nc\b\bd", "ab\nd"),
("abc\rdef", "def"),
("abc\r", "abc\r"),
("abc\rd", "dbc"),
("abc\r\nd", "abc\nd"),
("abc\ndef\rg", "abc\ngef"),
("abc\ndef\r\rg", "abc\ngef"),
("abcd\refg\r", "efgd\r"),
("abcd\refg\r\n", "efgd\n"),
],
)
def test_apply_backspaces_and_linefeeds(text, expected):
assert apply_backspaces_and_linefeeds(text) == expected
def test_module_exists_base_level_modules():
assert module_exists("pytest")
assert not module_exists("clearly_non_existing_module_name")
def test_module_exists_does_not_import_module():
assert module_exists("tests.donotimport")
def test_module_is_in_cache():
assert module_is_in_cache("pytest")
assert module_is_in_cache("pkgutil")
assert not module_is_in_cache("does_not_even_exist")
def test_get_package_version():
package_version = get_package_version("pytest")
assert str(package_version) == "7.1.2"
def METHOD_NAME():
parsed_version = parse_version("6.2.3")
assert str(parsed_version) == "6.2.3"
def test_get_package_version_comparison():
package_version = get_package_version("pytest")
current_version = parse_version("7.1.2")
old_version = parse_version("6.2.0")
new_version = parse_version("7.2.4")
assert package_version == current_version
assert not package_version < current_version
assert not package_version > current_version
assert package_version <= new_version
assert package_version >= old_version
def test_rel_path():
assert rel_path("", "foo.bar.baz") == "foo.bar.baz"
assert rel_path("foo", "foo.bar.baz") == "bar.baz"
assert rel_path("foo.bar", "foo.bar.baz") == "baz"
assert rel_path("foo.bar.baz", "foo.bar.baz") == ""
assert rel_path("", "") == "" |
1,884 | tear down | #!/usr/bin/env python3
"""
MODULE: Test of r.learn.ml
AUTHOR(S): Steven Pawley <dr.stevenpawley gmail com>
PURPOSE: Test of r.learn.ml for regression
COPYRIGHT: (C) 2020 by Steven Pawley and the GRASS Development Team
This program is free software under the GNU General Public
License (>=v2). Read the file COPYING that comes with GRASS
for details.
"""
import tempfile
import os
from grass.gunittest.case import TestCase
from grass.gunittest.main import test
class TestRegression(TestCase):
"""Test regression and prediction using r.learn.ml"""
# input rasters
band1 = "lsat7_2002_10@PERMANENT"
band2 = "lsat7_2002_20@PERMANENT"
band3 = "lsat7_2002_30@PERMANENT"
band4 = "lsat7_2002_40@PERMANENT"
band5 = "lsat7_2002_50@PERMANENT"
band7 = "lsat7_2002_70@PERMANENT"
input_map = "elev_ned_30m@PERMANENT"
# imagery group created during test
group = "predictors"
# training data created during test
training_points = "training_points"
# raster map created as output during test
output = "regression_result"
# files created during test
model_file = tempfile.NamedTemporaryFile(suffix=".gz").name
training_file = tempfile.NamedTemporaryFile(suffix=".gz").name
@classmethod
def setUpClass(cls):
"""Setup that is required for all tests
Uses a temporary region for testing and creates an imagery group and
randomly samples a categorical map to use as training pixels/points
"""
cls.use_temp_region()
cls.runModule("g.region", raster=cls.input_map)
cls.runModule(
"i.group",
group=cls.group,
input=[cls.band1, cls.band2, cls.band3, cls.band4, cls.band5, cls.band7],
)
cls.runModule(
"r.random",
input=cls.input_map,
npoints=1000,
vector=cls.training_points,
seed=1234,
)
@classmethod
def tearDownClass(cls):
"""Remove the temporary region (and anything else we created)"""
cls.del_temp_region()
cls.runModule("g.remove", flags="f", type="vector", name=cls.training_points)
cls.runModule("g.remove", flags="f", type="group", name=cls.group)
def METHOD_NAME(self):
"""Remove the output created from the tests
(reuse the same name for all the test functions)"""
self.runModule("g.remove", flags="f", type="raster", name=[self.output])
try:
os.remove(self.model_file)
except FileNotFoundError:
pass
try:
os.remove(self.training_file)
except FileNotFoundError:
pass
def test_output_created_regression(self):
"""Checks that the output is created"""
self.assertModule(
"r.learn.train",
group=self.group,
training_points=self.training_points,
field="value",
model_name="RandomForestRegressor",
n_estimators=100,
save_model=self.model_file,
)
self.assertFileExists(filename=self.model_file)
self.assertModule(
"r.learn.predict",
group=self.group,
load_model=self.model_file,
output=self.output,
)
self.assertRasterExists(self.output, msg="Output was not created")
def test_save_load_training(self):
"""Test that training data can be saved and loaded"""
# save training data
self.assertModule(
"r.learn.train",
group=self.group,
training_points=self.training_points,
field="value",
model_name="RandomForestRegressor",
save_training=self.training_file,
n_estimators=100,
save_model=self.model_file,
)
self.assertFileExists(filename=self.model_file)
self.assertFileExists(filename=self.training_file)
# load training data and retrain
self.assertModule(
"r.learn.train",
group=self.group,
model_name="RandomForestRegressor",
load_training=self.training_file,
n_estimators=100,
save_model=self.model_file,
overwrite=True,
)
# predict after loading training data
self.assertModule(
"r.learn.predict",
group=self.group,
load_model=self.model_file,
output=self.output,
)
self.assertRasterExists(self.output, msg="Output was not created")
if __name__ == "__main__":
test() |
1,885 | cleanup prompts | import logging
import re
import time
from typing import Mapping
from urllib.parse import urlencode, urlparse
from bs4 import BeautifulSoup, Tag
from readability import Document
from lncrawl.core.browser import EC
from lncrawl.core.crawler import Crawler
from lncrawl.models import Chapter, SearchResult
from lncrawl.templates.browser.chapter_only import ChapterOnlyBrowserTemplate
from lncrawl.templates.browser.searchable import SearchableBrowserTemplate
logger = logging.getLogger(__name__)
automation_warning = """
<div style="opacity: 0.5; padding: 14px; text-align: center; border: 1px solid #000; font-style: italic; font-size: 0.825rem">
Parsed with an automated reader. The content accuracy is not guaranteed.
</div>
""".strip()
class NovelupdatesTemplate(SearchableBrowserTemplate, ChapterOnlyBrowserTemplate):
is_template = True
_cached_crawlers: Mapping[str, Crawler] = {}
_title_matcher = re.compile(r"^(c|ch|chap|chapter)?[^\w\d]*(\d+)$", flags=re.I)
def wait_for_cloudflare(self):
if "cf_clearance" in self.cookies:
return
try:
self.browser.wait(
"#challenge-running",
expected_conditon=EC.invisibility_of_element,
timeout=20,
)
except Exception:
pass
def METHOD_NAME(self):
try:
self.browser.find("#uniccmp").remove()
except Exception:
pass
def select_search_items(self, query: str):
query = dict(sf=1, sh=query, sort="srank", order="asc", rl=1, mrl="min")
soup = self.get_soup(
f"https://www.novelupdates.com/series-finder/?{urlencode(query)}"
)
yield from soup.select(".l-main .search_main_box_nu")
def select_search_items_in_browser(self, query: str):
query = dict(sf=1, sh=query, sort="srank", order="asc", rl=1, mrl="min")
self.visit(f"https://www.novelupdates.com/series-finder/?{urlencode(query)}")
self.browser.wait(".l-main .search_main_box_nu")
yield from self.browser.soup.select(".l-main .search_main_box_nu")
def parse_search_item(self, tag: Tag) -> SearchResult:
a = tag.select_one(".search_title a[href]")
info = []
rank = tag.select_one(".genre_rank")
rating = tag.select_one(".search_ratings")
chapter_count = tag.select_one('.ss_desk i[title="Chapter Count"]')
last_updated = tag.select_one('.ss_desk i[title="Last Updated"]')
reviewers = tag.select_one('.ss_desk i[title="Reviews"]')
if rating:
info.append(rating.text.strip())
if rank:
info.append("Rank " + rank.text.strip())
if reviewers:
info.append(reviewers.parent.text.strip())
if chapter_count:
info.append(chapter_count.parent.text.strip())
if last_updated:
info.append(last_updated.parent.text.strip())
return SearchResult(
title=a.text.strip(),
info=" | ".join(info),
url=self.absolute_url(a["href"]),
)
def parse_title(self, soup: BeautifulSoup) -> str:
return soup.select_one(".seriestitlenu").text
def parse_title_in_browser(self) -> str:
self.browser.wait(".seriestitlenu")
return self.parse_title(self.browser.soup)
def parse_cover(self, soup: BeautifulSoup) -> str:
img_tag = soup.select_one(".seriesimg img[src]")
if img_tag:
return img_tag["src"]
def parse_authors(self, soup: BeautifulSoup):
for a in soup.select("#showauthors a#authtag"):
yield a.text.strip()
def select_chapter_tags(self, soup: BeautifulSoup):
postid = soup.select_one("input#mypostid")["value"]
response = self.submit_form(
"https://www.novelupdates.com/wp-admin/admin-ajax.php",
data=dict(
action="nd_getchapters",
mygrr="1",
mypostid=postid,
),
)
soup = self.make_soup(response)
yield from reversed(soup.select(".sp_li_chp a[data-id]"))
def select_chapter_tags_in_browser(self):
self.METHOD_NAME()
el = self.browser.find(".my_popupreading_open")
el.scroll_into_view()
el.click()
self.browser.wait("#my_popupreading li.sp_li_chp a[data-id]")
tag = self.browser.find("#my_popupreading").as_tag()
yield from reversed(tag.select("li.sp_li_chp a[data-id]"))
def parse_chapter_item(self, tag: Tag, id: int) -> Chapter:
title = tag.text.strip().title()
title_match = self._title_matcher.match(title)
if title_match: # skip simple titles
title = f"Chapter {title_match.group(2)}"
return Chapter(
id=id,
title=title,
url=self.absolute_url(tag["href"]),
)
def download_chapter_body_in_scraper(self, chapter: Chapter) -> None:
response = self.get_response(chapter.url, allow_redirects=True)
logger.info("%s => %s", chapter.url, response.url)
chapter.url = response.url
return self.parse_chapter_body(chapter, response.text)
def download_chapter_body_in_browser(self, chapter: Chapter) -> str:
self.visit(chapter.url)
for i in range(30):
if not self.browser.current_url.startswith(chapter.url):
break
time.sleep(1)
logger.info("%s => %s", chapter.url, self.browser.current_url)
chapter.url = self.browser.current_url
return self.parse_chapter_body(chapter, self.browser.html)
def select_chapter_body(self, soup: BeautifulSoup) -> Tag:
return super().select_chapter_body(soup)
def parse_chapter_body(self, chapter: Chapter, text: str) -> str:
crawler = self._find_original_crawler(chapter)
if hasattr(crawler, "download_chapter_body_in_scraper"):
return crawler.download_chapter_body_in_scraper(chapter)
elif hasattr(crawler, "download_chapter_body"):
return crawler.download_chapter_body(chapter)
else:
reader = Document(text)
chapter.title = reader.short_title()
summary = reader.summary(html_partial=True)
return automation_warning + summary
def _find_original_crawler(self, chapter: Chapter):
from lncrawl.core.sources import crawler_list, prepare_crawler
parsed_url = urlparse(chapter.url)
base_url = "%s://%s/" % (parsed_url.scheme, parsed_url.hostname)
if base_url in crawler_list:
try:
crawler = self._cached_crawlers.get(base_url)
if not crawler:
crawler = prepare_crawler(chapter.url)
self._cached_crawlers[base_url] = crawler
return crawler
except Exception as e:
logger.info("Failed with original crawler.", e)
return None |
1,886 | tear down | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ipaddress
import time
import unittest
import s1ap_types
import s1ap_wrapper
class TestS1HandoverFailure(unittest.TestCase):
"""Integration Test: TestS1HandoverFailure"""
def setUp(self):
"""Initialize before test case execution"""
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
def METHOD_NAME(self):
"""Cleanup after test case execution"""
self._s1ap_wrapper.cleanup()
def test_s1_handover_failure(self):
"""S1 Handover Unsuccessful Scenario (S1 HO Failure):
1) Attach UE to ENB 1 (After handover UE should switch to ENB 2)
2) Trigger handover by sending S1 HO Required message from source ENB
3) Receive S1 HO Request and send S1 HO Failure in resp from target ENB
4) Receive and handle the S1 HO Preparation Failure msg in source ENB
5) S1 HO has failed. UE is still attached to ENB 1. Detach the UE
Note: Before execution of this test case,
Run the test script s1aptests/test_modify_mme_config_for_sanity.py
to update multiple PLMN/TAC configuration in MME and
after test case execution, restore the MME configuration by running
the test script s1aptests/test_restore_mme_config_after_sanity.py
Or
Make sure that following steps are correct
1. Configure same plmn and tac in both MME and S1APTester
2. How to configure plmn and tac in MME:
2a. Set mcc and mnc in gateway.mconfig for mme service
2b. Set tac in gateway.mconfig for mme service
2c. Restart MME service
3. How to configure plmn and tac in S1APTester,
3a. For multi-eNB test case, configure plmn and tac from test case.
In each multi-eNB test case, set plmn, plmn length and tac in enb_list
3b. For single eNB test case, configure plmn and tac in nbAppCfg.txt
"""
# Column is an ENB parameter, Row is number of ENB
# Columns: Cell Id, Tac, EnbType, PLMN Id, PLMN length
enb_list = [
[1, 1, 1, "00101", 5],
[2, 2, 1, "00101", 5],
]
self._s1ap_wrapper.multiEnbConfig(len(enb_list), enb_list)
print("Waiting for 2 seconds for multiple ENBs to get configured")
time.sleep(2)
self._s1ap_wrapper.configUEDevice(1)
req = self._s1ap_wrapper.ue_req
print(
"************************* Running End to End attach for UE Id:",
req.ue_id,
)
# Now actually complete the attach
attach = self._s1ap_wrapper._s1_util.attach(
req.ue_id,
s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t,
)
addr = attach.esmInfo.pAddr.addrInfo
default_ip = ipaddress.ip_address(bytes(addr[:4]))
# Wait on EMM Information from MME
self._s1ap_wrapper._s1_util.receive_emm_info()
print("Waiting for 3 seconds for the flow rules creation")
time.sleep(3)
# Verify if flow rules are created
# 1 UL flow for default bearer
num_ul_flows = 1
dl_flow_rules = {default_ip: []}
self._s1ap_wrapper.s1_util.verify_flow_rules(
num_ul_flows,
dl_flow_rules,
)
# Trigger the S1 Handover Procedure from Source ENB by sending S1
# Handover Required Message to MME
print(
"************************* Sending S1 Handover Required for UE Id:",
req.ue_id,
)
s1ho_required = s1ap_types.FwNbS1HoRequired_t()
s1ho_required.ueId = req.ue_id
s1ho_required.s1HoEvent = (
s1ap_types.FwS1HoEvents.FW_S1_HO_FAILURE.value
)
self._s1ap_wrapper.s1_util.issue_cmd(
s1ap_types.tfwCmd.S1_HANDOVER_REQUIRED,
s1ho_required,
)
# After receiving S1 Handover Required from Source ENB, MME sends S1
# Handover Request to Target ENB.
# Wait for S1 Handover Request Indication
response = self._s1ap_wrapper.s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.S1_HANDOVER_REQ_IND.value
s1ho_req_ind = response.cast(s1ap_types.FwNbS1HoReqInd_t)
print(
"************************* Received S1 Handover Request "
"Indication (UeId: "
+ str(s1ho_req_ind.ueId)
+ ", Connected EnbId: "
+ str(s1ho_req_ind.currEnbId)
+ ") (HO SrcEnbId: "
+ str(s1ho_req_ind.hoSrcEnbId)
+ ", HO TgtEnbId: "
+ str(s1ho_req_ind.hoTgtEnbId)
+ ")",
)
# Send the S1 Handover Failure message from Target ENB to MME
print(
"************************* Sending S1 Handover Failure for UE Id:",
req.ue_id,
)
s1ho_failure = s1ap_types.FwNbS1HoFailure_t()
s1ho_failure.ueId = req.ue_id
self._s1ap_wrapper.s1_util.issue_cmd(
s1ap_types.tfwCmd.S1_HANDOVER_FAILURE,
s1ho_failure,
)
# After receiving S1 Handover Failure from Target ENB, MME sends S1
# Handover Preparation Failure to Source ENB.
# Wait for S1 Handover Preparation Failure Indication
response = self._s1ap_wrapper.s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.S1_HANDOVER_PREP_FAIL_IND.value
s1ho_prep_fail_ind = response.cast(s1ap_types.FwNbS1HoPrepFailInd_t)
print(
"************************* Received S1 Handover Preparation "
"Failure Indication (UeId: "
+ str(s1ho_prep_fail_ind.ueId)
+ ", Connected EnbId: "
+ str(s1ho_prep_fail_ind.currEnbId)
+ ") (HO SrcEnbId: "
+ str(s1ho_prep_fail_ind.hoSrcEnbId)
+ ", HO TgtEnbId: "
+ str(s1ho_prep_fail_ind.hoTgtEnbId)
+ ")",
)
print(
"************************* Running UE detach for UE Id:",
req.ue_id,
)
# Now detach the UE
self._s1ap_wrapper.s1_util.detach(
req.ue_id,
s1ap_types.ueDetachType_t.UE_NORMAL_DETACH.value,
)
print("Waiting for 5 seconds for the flow rules deletion")
time.sleep(5)
# Verify that all UL/DL flows are deleted
self._s1ap_wrapper.s1_util.verify_flow_rules_deletion()
if __name__ == "__main__":
unittest.main() |
1,887 | hover artists | import logging
from typing import Optional
from feeluown.app import App
from feeluown.utils.aio import run_fn, run_afn
from feeluown.player import SongRadio
from feeluown.library import SongProtocol, VideoModel
logger = logging.getLogger(__name__)
MV_BTN_TEXT = '播放 MV'
class SongMenuInitializer:
def __init__(self, app: App, song):
"""
:type app: feeluown.app.App
"""
self._app = app
self._song = song
self._fetching_artists = False
self._fetching_mv = False
def apply(self, menu):
app = self._app
song = self._song
def enter_song_radio(song):
radio = SongRadio.create(app, song)
app.fm.activate(radio.fetch_songs_func, reset=False)
if app.playlist.current_song != song:
app.playlist.clear()
self._app.playlist.next()
self._app.player.resume()
else:
for song_ in app.playlist.list().copy():
if song_ is not app.playlist.current_song:
app.playlist.remove(song_)
self._app.player.resume()
def goto_song_explore(song):
app.browser.goto(model=song, path='/explore')
async def goto_song_album(song):
usong: SongProtocol = await run_fn(self._app.library.song_upgrade, song)
if usong.album is not None:
self._app.browser.goto(model=usong.album)
else:
self._app.show_msg('该歌曲没有专辑信息')
menu.hovered.connect(self.on_action_hovered)
artist_menu = menu.addMenu('查看歌手')
artist_menu.menuAction().setData({'artists': None, 'song': song})
mv_menu = menu.addMenu(MV_BTN_TEXT)
mv_menu.menuAction().setData({'mvs': None, 'song': song})
menu.addAction('查看专辑').triggered.connect(
lambda: run_afn(goto_song_album, song))
menu.addAction('歌曲电台').triggered.connect(
lambda: enter_song_radio(song))
menu.addAction('歌曲详情').triggered.connect(
lambda: goto_song_explore(song))
def on_action_hovered(self, action):
"""
Fetch song.artists when artists_action is hovered. If it is
already fetched, ignore.
"""
data = action.data()
if data is None: # submenu action
return
if 'artists' in data:
self.METHOD_NAME(action, data)
elif 'mvs' in data:
self._hover_mv(action, data)
def _hover_mv(self, action, data):
def mv_fetched_cb(future):
self._fetching_mv = False
mv: Optional[VideoModel] = future.result()
if mv is not None:
try:
mv_action = action.menu().addAction(mv.title)
mv_action.triggered.connect(
lambda: self._app.playlist.play_model(mv))
except RuntimeError:
# action may have been deleted.
return
if mv is not None:
data['mvs'] = [mv]
action.setText(MV_BTN_TEXT)
else:
data['mvs'] = []
action.setText('该歌曲无 MV')
action.setDisabled(True)
action.setData(data)
# artists value has not been fetched
if data['mvs'] is None and self._fetching_mv is False:
logger.debug('fetch song.mv for actions')
song = data['song']
self._fetching_mv = True
task = run_fn(self._app.library.song_get_mv, song)
task.add_done_callback(mv_fetched_cb)
def METHOD_NAME(self, action, data):
# pylint: disable=unnecessary-direct-lambda-call
def artists_fetched_cb(future):
self._fetching_artists = False
artists = future.result() # ignore the potential exception
if artists:
for artist in artists:
try:
artist_action = action.menu().addAction(artist.name)
# create a closure to bind variable artist
artist_action.triggered.connect(
(lambda x: lambda: self._app.browser.goto(model=x))(artist))
except RuntimeError:
# action may have been deleted.
return
data['artists'] = artists or []
action.setData(data)
# artists value has not been fetched
if data['artists'] is None and self._fetching_artists is False:
logger.debug('fetch song.artists for actions')
song = data['song']
self._fetching_artists = True
task = run_fn(lambda: self._app.library.song_upgrade(song).artists)
task.add_done_callback(artists_fetched_cb) |
1,888 | read qa frame | """
desispec.io.qa
==============
IO routines for QA.
"""
from __future__ import print_function, absolute_import, division
import os, yaml
import json
from desiutil.io import yamlify
from desispec.io import findfile, read_meta_frame
from desispec.io.util import makepath
from desiutil.log import get_logger
from .util import checkgzip
# log=get_logger()
def qafile_from_framefile(frame_file, qaprod_dir=None, output_dir=None):
""" Derive the QA filename from an input frame file
Args:
frame_file: str
output_dir: str, optional Over-ride default output path
qa_dir: str, optional Over-ride default QA
Returns:
"""
frame_file = checkgzip(frame_file)
frame_meta = read_meta_frame(frame_file)
night = frame_meta['NIGHT'].strip()
camera = frame_meta['CAMERA'].strip()
expid = int(frame_meta['EXPID'])
if frame_meta['FLAVOR'] in ['flat', 'arc']:
qatype = 'qa_calib'
else:
qatype = 'qa_data'
# Name
qafile = findfile(qatype, night=night, camera=camera, expid=expid,
outdir=output_dir, qaprod_dir=qaprod_dir)
# Return
return qafile, qatype
def read_qa_data(filename):
"""Read data from a QA file
"""
# Read yaml
with open(filename, 'r') as infile:
qa_data = yaml.safe_load(infile)
# Convert expid to int
for night in qa_data.keys():
for expid in list(qa_data[night].keys()):
if isinstance(expid,str):
qa_data[night][int(expid)] = qa_data[night][expid].copy()
qa_data[night].pop(expid)
# Return
return qa_data
def read_qa_brick(filename):
"""Generate a QA_Brick object from a data file
"""
from desispec.qa.qa_brick import QA_Brick
# Read
qa_data = read_qa_data(filename)
# Instantiate
qabrick = QA_Brick(in_data=qa_data)
return qabrick
def METHOD_NAME(filename):
"""Generate a QA_Frame object from a data file
"""
from desispec.qa.qa_frame import QA_Frame
#- check if filename is (night, expid, camera) tuple instead
if not isinstance(filename, str):
night, expid, camera = filename
filename = findfile('qa', night, expid, camera)
# Read
filename = checkgzip(filename)
qa_data = read_qa_data(filename)
# Instantiate
qaframe = QA_Frame(qa_data)
return qaframe
def load_qa_frame(filename, frame_meta=None, flavor=None):
""" Load an existing QA_Frame or generate one, as needed
Args:
filename: str
frame_meta: dict like, optional
flavor: str, optional
Type of QA_Frame
Returns:
qa_frame: QA_Frame object
"""
from desispec.qa.qa_frame import QA_Frame
log=get_logger()
if os.path.isfile(filename): # Read from file, if it exists
qaframe = METHOD_NAME(filename)
log.info("Loaded QA file {:s}".format(filename))
# Check against frame, if provided
if frame_meta is not None:
for key in ['camera','expid','night','flavor']:
assert str(getattr(qaframe, key)) == str(frame_meta[key.upper()])
else: # Init
if frame_meta is None:
log.error("QA file {:s} does not exist. Expecting frame input".format(filename))
qaframe = QA_Frame(frame_meta)
# Set flavor?
if flavor is not None:
qaframe.flavor = flavor
# Return
return qaframe
def load_qa_brick(filename):
""" Load an existing QA_Brick or generate one, as needed
Args:
filename: str
Returns:
qa_brick: QA_Brick object
"""
from desispec.qa.qa_brick import QA_Brick
log=get_logger()
if os.path.isfile(filename): # Read from file, if it exists
qabrick = read_qa_brick(filename)
log.info("Loaded QA file {:s}".format(filename))
else: # Init
qabrick = QA_Brick()
# Return
return qabrick
def write_qa_brick(outfile, qabrick):
"""Write QA for a given exposure
Args:
outfile : filename
qabrick : QA_Brick object
_data: dict of QA info
"""
outfile = makepath(outfile, 'qa')
# Simple yaml
ydict = yamlify(qabrick.data)
with open(outfile, 'w') as yamlf:
yamlf.write(yaml.dump(ydict))#, default_flow_style=True) )
return outfile
def write_qa_frame(outfile, qaframe, verbose=False):
"""Write QA for a given frame
Args:
outfile : str
filename
qa_exp : QA_Frame object, with the following attributes
qa_data: dict of QA info
"""
log=get_logger()
outfile = makepath(outfile, 'qa')
# Generate the dict
odict = {qaframe.night: {qaframe.expid: {qaframe.camera: {}, 'flavor': qaframe.flavor}}}
odict[qaframe.night][qaframe.expid][qaframe.camera] = qaframe.qa_data
ydict = yamlify(odict)
# Simple yaml
with open(outfile, 'w') as yamlf:
yamlf.write(yaml.dump(ydict))
if verbose:
log.info("Wrote QA frame file: {:s}".format(outfile))
return outfile
def write_qa_exposure(outroot, qaexp, ret_dict=False):
"""Write QA for a given exposure
Args:
outroot : str
filename without format extension
qa_exp : QA_Exposure object
ret_dict : bool, optional
Return dict only? [for qa_prod, mainly]
Returns:
outfile or odict : str or dict
"""
# Generate the dict
odict = {qaexp.night: {qaexp.expid: {}}}
odict[qaexp.night][qaexp.expid]['flavor'] = qaexp.flavor
odict[qaexp.night][qaexp.expid]['meta'] = qaexp.meta
cameras = list(qaexp.data['frames'].keys())
for camera in cameras:
odict[qaexp.night][qaexp.expid][camera] = qaexp.data['frames'][camera]
# Return dict only?
if ret_dict:
return odict
# Simple yaml
ydict = yamlify(odict)
outfile = outroot+'.yaml'
outfile = makepath(outfile, 'qa')
with open(outfile, 'w') as yamlf:
yamlf.write( yaml.dump(ydict))#, default_flow_style=True) )
return outfile
def load_qa_multiexp(inroot):
"""Load QA for a given production
Args:
inroot : str
base filename without format extension
Returns:
odict : dict
"""
log=get_logger()
infile = inroot+'.json'
log.info("Loading QA prod file: {:s}".format(infile))
# Read
if not os.path.exists(infile):
log.info("QA prod file {:s} does not exist!".format(infile))
log.error("You probably need to generate it with desi_qa_prod --make_frameqa=3 --slurp")
with open(infile, 'rt') as fh:
odict = json.load(fh)
# Return
return odict
def write_qa_multiexp(outroot, mdict, indent=True):
"""Write QA for a given production
Args:
outroot : str
filename without format extension
mdict : dict
Returns:
outfile: str
output filename
"""
log=get_logger()
outfile = outroot+'.json'
outfile = makepath(outfile, 'qa')
ydict = yamlify(mdict) # This works well for JSON too
# Simple json
with open(outfile, 'wt') as fh:
json.dump(ydict, fh, indent=indent)
log.info('Wrote QA Multi-Exposure file: {:s}'.format(outfile))
return outfile
def write_qa_ql(outfile, qaresult):
"""Write QL output files
Args:
outfile : str
filename to be written (yaml)
qaresult : dict
QAresults from run_qa()
Returns:
outfile : str
"""
#import yaml
#from desiutil.io import yamlify
# Take in QL input and output to yaml
#SE: No yaml creation as of May 2018
qadict = yamlify(qaresult)
#f=open(outfile,"w")
#f.write(yaml.dump(qadict))
#f.close()
g=open(outfile,"w")
json.dump(qadict, g, sort_keys=True, indent=4)
g.close()
return outfile
|
1,889 | start rendering | import os
import unreal
from openpype.settings import get_project_settings
from openpype.pipeline import Anatomy
from openpype.hosts.unreal.api import pipeline
from openpype.widgets.message_window import Window
queue = None
executor = None
def _queue_finish_callback(exec, success):
unreal.log("Render completed. Success: " + str(success))
# Delete our reference so we don't keep it alive.
global executor
global queue
del executor
del queue
def _job_finish_callback(job, success):
# You can make any edits you want to the editor world here, and the world
# will be duplicated when the next render happens. Make sure you undo your
# edits in OnQueueFinishedCallback if you don't want to leak state changes
# into the editor world.
unreal.log("Individual job completed.")
def METHOD_NAME():
"""
Start the rendering process.
"""
unreal.log("Starting rendering...")
# Get selected sequences
assets = unreal.EditorUtilityLibrary.get_selected_assets()
if not assets:
Window(
parent=None,
title="No assets selected",
message="No assets selected. Select a render instance.",
level="warning")
raise RuntimeError(
"No assets selected. You need to select a render instance.")
# instances = pipeline.ls_inst()
instances = [
a for a in assets
if a.get_class().get_name() == "AyonPublishInstance"]
inst_data = []
for i in instances:
data = pipeline.parse_container(i.get_path_name())
if data["family"] == "render":
inst_data.append(data)
try:
project = os.environ.get("AVALON_PROJECT")
anatomy = Anatomy(project)
root = anatomy.roots['renders']
except Exception as e:
raise Exception(
"Could not find render root in anatomy settings.") from e
render_dir = f"{root}/{project}"
# subsystem = unreal.get_editor_subsystem(
# unreal.MoviePipelineQueueSubsystem)
# queue = subsystem.get_queue()
global queue
queue = unreal.MoviePipelineQueue()
ar = unreal.AssetRegistryHelpers.get_asset_registry()
data = get_project_settings(project)
config = None
config_path = str(data.get("unreal").get("render_config_path"))
if config_path and unreal.EditorAssetLibrary.does_asset_exist(config_path):
unreal.log("Found saved render configuration")
config = ar.get_asset_by_object_path(config_path).get_asset()
for i in inst_data:
sequence = ar.get_asset_by_object_path(i["sequence"]).get_asset()
sequences = [{
"sequence": sequence,
"output": f"{i['output']}",
"frame_range": (
int(float(i["frameStart"])),
int(float(i["frameEnd"])) + 1)
}]
render_list = []
# Get all the sequences to render. If there are subsequences,
# add them and their frame ranges to the render list. We also
# use the names for the output paths.
for seq in sequences:
subscenes = pipeline.get_subsequences(seq.get('sequence'))
if subscenes:
for sub_seq in subscenes:
sequences.append({
"sequence": sub_seq.get_sequence(),
"output": (f"{seq.get('output')}/"
f"{sub_seq.get_sequence().get_name()}"),
"frame_range": (
sub_seq.get_start_frame(), sub_seq.get_end_frame())
})
else:
# Avoid rendering camera sequences
if "_camera" not in seq.get('sequence').get_name():
render_list.append(seq)
# Create the rendering jobs and add them to the queue.
for render_setting in render_list:
job = queue.allocate_new_job(unreal.MoviePipelineExecutorJob)
job.sequence = unreal.SoftObjectPath(i["master_sequence"])
job.map = unreal.SoftObjectPath(i["master_level"])
job.author = "Ayon"
# If we have a saved configuration, copy it to the job.
if config:
job.get_configuration().copy_from(config)
# User data could be used to pass data to the job, that can be
# read in the job's OnJobFinished callback. We could,
# for instance, pass the AyonPublishInstance's path to the job.
# job.user_data = ""
output_dir = render_setting.get('output')
shot_name = render_setting.get('sequence').get_name()
settings = job.get_configuration().find_or_add_setting_by_class(
unreal.MoviePipelineOutputSetting)
settings.output_resolution = unreal.IntPoint(1920, 1080)
settings.custom_start_frame = render_setting.get("frame_range")[0]
settings.custom_end_frame = render_setting.get("frame_range")[1]
settings.use_custom_playback_range = True
settings.file_name_format = f"{shot_name}" + ".{frame_number}"
settings.output_directory.path = f"{render_dir}/{output_dir}"
job.get_configuration().find_or_add_setting_by_class(
unreal.MoviePipelineDeferredPassBase)
render_format = data.get("unreal").get("render_format", "png")
if render_format == "png":
job.get_configuration().find_or_add_setting_by_class(
unreal.MoviePipelineImageSequenceOutput_PNG)
elif render_format == "exr":
job.get_configuration().find_or_add_setting_by_class(
unreal.MoviePipelineImageSequenceOutput_EXR)
elif render_format == "jpg":
job.get_configuration().find_or_add_setting_by_class(
unreal.MoviePipelineImageSequenceOutput_JPG)
elif render_format == "bmp":
job.get_configuration().find_or_add_setting_by_class(
unreal.MoviePipelineImageSequenceOutput_BMP)
# If there are jobs in the queue, start the rendering process.
if queue.get_jobs():
global executor
executor = unreal.MoviePipelinePIEExecutor()
preroll_frames = data.get("unreal").get("preroll_frames", 0)
settings = unreal.MoviePipelinePIEExecutorSettings()
settings.set_editor_property(
"initial_delay_frame_count", preroll_frames)
executor.on_executor_finished_delegate.add_callable_unique(
_queue_finish_callback)
executor.on_individual_job_finished_delegate.add_callable_unique(
_job_finish_callback) # Only available on PIE Executor
executor.execute(queue) |
1,890 | connectivity | from typing import List
from qcs_api_client.client import QCSClientConfiguration
from pyquil.api._engagement_manager import EngagementManager
from pyquil import get_qc
from openqaoa.backends.devices_core import DeviceBase
class DevicePyquil(DeviceBase):
"""
Contains the required information and methods needed to access remote
Rigetti QPUs via Pyquil.
Attributes
----------
n_qubits: `int`
The maximum number of qubits available for the selected backend.
Available upon proper initialisation of the class.
"""
def __init__(
self,
device_name: str,
as_qvm: bool = None,
noisy: bool = None,
compiler_timeout: float = 20.0,
execution_timeout: float = 20.0,
client_configuration: QCSClientConfiguration = None,
endpoint_id: str = None,
engagement_manager: EngagementManager = None,
):
"""
Parameters
----------
device_name: str
The name of the desired quantum computer. This should correspond to
a name returned by :py:func:`list_quantum_computers`. Names ending
in "-qvm" will return a QVM. Names ending in "-pyqvm" will return a
:py:class:`PyQVM`. Names ending in "-noisy-qvm" will return a QVM
with a noise model. Otherwise, we will return a QPU with the given
name.
as_qvm: bool
An optional flag to force construction of a QVM (instead of a QPU).
If specified and set to ``True``, a QVM-backed quantum computer will
be returned regardless of the name's suffix.
noisy: bool
An optional flag to force inclusion of a noise model. If specified
and set to ``True``, a quantum computer with a noise model will be
returned regardless of the name's suffix. The generic QVM noise
model is simple T1 and T2 noise plus readout error. See
:py:func:`~pyquil.noise.decoherence_noise_with_asymmetric_ro`. Note,
we currently do not support noise models based on QCS hardware; a
value of `True`` will result in an error if the requested QPU is a
QCS hardware QPU.
compiler_timeout: float
Time limit for compilation requests, in seconds.
execution_timeout: float
Time limit for execution requests, in seconds.
client_configuration: QCSClientConfiguration
Optional client configuration. If none is provided, a default one
will be loaded.
endpoint_id: str
Optional quantum processor endpoint ID, as used in the
`QCS API Docs`_.
engagement_manager: EngagementManager
Optional engagement manager. If none is provided, a default one will
be created.
"""
self.device_name = device_name
self.device_location = "qcs"
self.as_qvm = as_qvm
self.noisy = noisy
self.compiler_timeout = compiler_timeout
self.execution_timeout = execution_timeout
self.client_configuration = client_configuration
self.endpoint_id = endpoint_id
self.engagement_manager = engagement_manager
self.quantum_computer = get_qc(
name=self.device_name,
as_qvm=self.as_qvm,
noisy=self.noisy,
compiler_timeout=self.compiler_timeout,
execution_timeout=self.execution_timeout,
client_configuration=self.client_configuration,
endpoint_id=self.endpoint_id,
engagement_manager=self.engagement_manager,
)
self.n_qubits = len(self.quantum_computer.qubits())
def check_connection(self) -> bool:
"""This method should allow a user to easily check if the credentials
provided to access the remote QPU is valid.
If no device was specified in initialisation of object, just runs
a test connection without a specific device.
If device was specified, checks if connection to that device
can be established.
TODO :
Accessing Rigetti's QCS is currently unsupported, so this part
is empty until that is figured out.
"""
return True
def METHOD_NAME(self) -> List[List[int]]:
# returns a networkx graph of qubit topology
G = self.quantum_computer.qubit_topology()
connectivity_as_list = list(G.edges())
return connectivity_as_list |
1,891 | get product default version | #!/usr/bin/env python
# @HEADER
# ************************************************************************
#
# TriBITS: Tribal Build, Integrate, and Test System
# Copyright 2013 Sandia Corporation
#
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Corporation nor the names of the
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ************************************************************************
# @HEADER
#
# Defaults
#
gccBaseName = "gcc"
gccDefaultVersion = "4.8.3"
gccSupportedVersions = ["4.8.3"]
#
# Script code
#
from InstallProgramDriver import *
from GeneralScriptSupport import *
class GccInstall:
def __init__(self):
self.dummy = None
#
# Called before even knowing the product version
#
def getScriptName(self):
return "install-gcc.py"
def getProductBaseName(self):
return gccBaseName
def METHOD_NAME(self):
return gccDefaultVersion
def getProductSupportedVersions(self):
return gccSupportedVersions
#
# Called after knowing the product version but before parsing the
# command-line.
#
def getProductName(self, version):
return gccBaseName+"-"+version
def getBaseDirName(self, version):
return gccBaseName+"-"+version+"-base"
def getExtraHelpStr(self, version):
return """
This script builds """+self.getProductName(version)+""" from source compiled with the
configured C compilers in your path.
NOTE: The assumed directory structure of the download source provided by the
command --download-cmnd=<download-cmnd> is:
gcc-<version>-base/
gcc-<full-version>.tar.gz
"""
def injectExtraCmndLineOptions(self, clp, version):
setStdDownloadCmndOption(self, clp, version)
clp.add_option(
"--extra-configure-options", dest="extraConfigureOptions", type="string", \
default="", \
help="Extra options to add to the 'configure' command for " \
+ self.getProductName(version)+"." \
+" Note: This does not override the hard-coded configure options." )
def echoExtraCmndLineOptions(self, inOptions):
cmndLine = ""
cmndLine += " --download-cmnd='"+inOptions.downloadCmnd+"' \\\n"
cmndLine += " --extra-configure-options='"+inOptions.extraConfigureOptions+"' \\\n"
return cmndLine
#
# Called after parsing the command-line
#
def setup(self, inOptions):
self.inOptions = inOptions
self.baseDir = os.getcwd()
self.gccBaseDir = self.baseDir+"/"+self.getBaseDirName(self.inOptions.version)
self.gccSrcDir = "gcc-"+self.inOptions.version
self.gccBuildBaseDir = self.gccBaseDir+"/gcc-build"
self.scriptBaseDir = getScriptBaseDir()
#
# Called after setup()
#
def doDownload(self):
removeDirIfExists(self.gccBaseDir, True)
echoRunSysCmnd(self.inOptions.downloadCmnd)
def doUntar(self):
print "Nothing to untar!"
def doConfigure(self):
createDir(self.gccBuildBaseDir)
echoRunSysCmnd(
"../"+self.gccSrcDir+"/configure --disable-multilib --enable-languages='c,c++,fortran'"+\
" "+self.inOptions.extraConfigureOptions+\
" --prefix="+self.inOptions.installDir,
workingDir=self.gccBuildBaseDir,
extraEnv={"CFLAGS":"-O3"},
)
def doBuild(self):
echoChDir(self.gccBuildBaseDir)
echoRunSysCmnd("make " + getParallelOpt(self.inOptions, "-j") \
+ self.inOptions.makeOptions)
def doInstall(self):
echoChDir(self.gccBuildBaseDir)
echoRunSysCmnd("make " + getParallelOpt(self.inOptions, "-j") \
+ self.inOptions.makeOptions + " install")
def getFinalInstructions(self):
return """
To use the installed version of gcc-"""+self.inOptions.version+""" add the path:
"""+self.inOptions.installDir+"""/bin
to your path and that should be it!
Also, when you link shared libs or executables, pass in:
-Wl,-rpath,"""+self.inOptions.installDir+"""/lib[64]
That will make it so that you don't need to add this GCC libs to your
LD_LIBRARY_PATH.
"""
#
# Executable statements
#
gccInstaller = InstallProgramDriver(GccInstall())
gccInstaller.runDriver() |
1,892 | activate spectrum node | import logging
import os
from cryptoadvance.specter.managers.node_manager import NodeManager
from cryptoadvance.specter.services.service import (
Service,
devstatus_prod,
)
# A SpecterError can be raised and will be shown to the user as a red banner
from cryptoadvance.specter.specter_error import SpecterError
from flask import current_app as app
from flask import url_for
from flask_apscheduler import APScheduler
from cryptoadvance.specterext.spectrum.spectrum_node import SpectrumNode
from cryptoadvance.spectrum.server import init_app, Spectrum
from cryptoadvance.spectrum.db import db
from cryptoadvance.specter.specter_error import BrokenCoreConnectionException
from cryptoadvance.specter.server_endpoints.welcome.welcome_vm import WelcomeVm
logger = logging.getLogger(__name__)
spectrum_node_alias = "spectrum_node"
class SpectrumService(Service):
id = "spectrum"
name = "Spectrum"
icon = "spectrum/img/logo.svg"
logo = "spectrum/img/logo.svg"
desc = "An electrum hidden behind a core API"
has_blueprint = True
blueprint_module = "cryptoadvance.specterext.spectrum.controller"
devstatus = devstatus_prod
isolated_client = False
# TODO: As more Services are integrated, we'll want more robust categorization and sorting logic
sort_priority = 2
@property
def spectrum_node(self):
"""Iterates all nodes and returns the spectrum Node or None if it doesn't exist"""
for node in app.specter.node_manager.nodes.values():
if (
hasattr(node, "fqcn")
and node.fqcn
== "cryptoadvance.specterext.spectrum.spectrum_node.SpectrumNode"
):
return node
return None
@property
def is_spectrum_node_available(self):
"""Whether there is a spectrum Node available (activated or not)"""
return not self.spectrum_node is None
@property
def is_spectrum_node_running(self):
if self.is_spectrum_node_available:
return self.spectrum_node.is_running
return False
def callback_specter_added_to_flask_app(self):
logger.debug("Setting up Spectrum ...")
# See comments in config.py which would be the natural place to define SPECTRUM_DATADIR
# but we want to avoid RuntimeError: Working outside of application context.
app.config["SPECTRUM_DATADIR"] = os.path.join(
app.config["SPECTER_DATA_FOLDER"], "sqlite"
)
app.config["DATABASE"] = os.path.abspath(
os.path.join(app.config["SPECTRUM_DATADIR"], "db.sqlite")
)
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///" + app.config["DATABASE"]
if not os.path.exists(app.config["SPECTRUM_DATADIR"]):
os.makedirs(app.config["SPECTRUM_DATADIR"])
logger.info(
f"Intitializing Database in {app.config['SQLALCHEMY_DATABASE_URI']}"
)
db.init_app(app)
db.create_all()
# Check whether there is a Spectrum node in the node manager of Specter
if self.is_spectrum_node_available:
try:
self.spectrum_node.start_spectrum(app, self.data_folder)
except BrokenCoreConnectionException as e:
logger.error(e)
self.specter.checker.run_now()
# TODO: Refactor this or the next function to only have one
def enable_default_spectrum(self):
"""* Creates and saves a Spectrum node if there is none with the default config values ("ELECTRUM_DEFAULT_OPTION")
* Starts Spectrum
* Switches to the Spectrum node
"""
if not self.is_spectrum_node_available:
# No SpectrumNode yet created. Let's do that.
default_electrum = app.config["ELECTRUM_DEFAULT_OPTION"]
spectrum_node = SpectrumNode(
host=app.config["ELECTRUM_OPTIONS"][default_electrum]["host"],
port=app.config["ELECTRUM_OPTIONS"][default_electrum]["port"],
ssl=app.config["ELECTRUM_OPTIONS"][default_electrum]["ssl"],
)
app.specter.node_manager.nodes[spectrum_node_alias] = spectrum_node
app.specter.node_manager.save_node(spectrum_node)
self.spectrum_node.start_spectrum(app, self.data_folder)
self.METHOD_NAME()
def enable_spectrum(self, host, port, ssl, METHOD_NAME=False):
"""* Creates a Spectrum node if there is none
* Starts Spectrum
* Does by default NOT yet switch to the Spectrum node nor yet save the node to disk
"""
if not self.is_spectrum_node_available:
# No SpectrumNode yet created. Let's do that.
logger.debug("Creating a Spectrum node ...")
spectrum_node = SpectrumNode(host=host, port=port, ssl=ssl)
app.specter.node_manager.nodes[spectrum_node_alias] = spectrum_node
self.spectrum_node.start_spectrum(app, self.data_folder)
if METHOD_NAME:
self.METHOD_NAME()
def disable_spectrum(self):
"""Stops Spectrum and deletes the Spectrum node"""
self.spectrum_node.stop_spectrum()
spectrum_node = None
if self.is_spectrum_node_available:
app.specter.node_manager.delete_node(self.spectrum_node, app.specter)
logger.info("Spectrum disabled")
def update_electrum(self, host, port, ssl):
if not self.is_spectrum_node_available:
raise Exception("No Spectrum node available. Cannot start Spectrum.")
logger.info(f"Updating Spectrum node with {host}:{port} (ssl: {ssl})")
self.spectrum_node.update_electrum(host, port, ssl, app, self.data_folder)
def METHOD_NAME(self):
"""Makes the Spectrum node the new active node and saves it to disk"""
logger.info("Activating Spectrum node.")
if not self.is_spectrum_node_available:
raise Exception("Spectrum is not enabled. Cannot start Electrum")
nm: NodeManager = app.specter.node_manager
if self.spectrum_node.is_running:
app.specter.update_active_node(spectrum_node_alias)
app.specter.node_manager.save_node(self.spectrum_node)
logger.info(
f"Activated node {self.spectrum_node} with rpc {self.spectrum_node.rpc}"
)
else:
raise SpecterError(
"Trying to switch Spectrum node but there seems to be a connection problem."
)
def callback_adjust_view_model(self, view_model: WelcomeVm):
if view_model.__class__.__name__ == "WelcomeVm":
# potentially, we could make a reidrect here:
# view_model.about_redirect=url_for("spectrum_endpoint.some_enpoint_here")
# but we do it small here and only replace a specific component:
view_model.get_started_include = (
"spectrum/welcome/components/get_started.jinja"
)
if self.is_spectrum_node_available:
view_model.tick_checkboxes_include = (
"spectrum/welcome/components/tick_checkboxes.jinja"
)
return view_model |
1,893 | parse cert | """
Module to interact with keystores
"""
import logging
import os
from datetime import datetime
from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__)
__virtualname__ = "keystore"
try:
import jks
import OpenSSL
has_depends = True
except ImportError:
has_depends = False
def __virtual__():
"""
Check dependencies
"""
if has_depends is False:
msg = "jks unavailable: {} execution module cant be loaded ".format(
__virtualname__
)
return False, msg
return __virtualname__
def METHOD_NAME(alias, public_cert, return_cert=False):
ASN1 = OpenSSL.crypto.FILETYPE_ASN1
PEM = OpenSSL.crypto.FILETYPE_PEM
cert_data = {}
sha1 = public_cert.digest("sha1")
cert_pem = OpenSSL.crypto.dump_certificate(PEM, public_cert)
raw_until = public_cert.get_notAfter().decode(__salt_system_encoding__)
date_until = datetime.strptime(raw_until, "%Y%m%d%H%M%SZ")
string_until = date_until.strftime("%B %d %Y")
raw_start = public_cert.get_notBefore().decode(__salt_system_encoding__)
date_start = datetime.strptime(raw_start, "%Y%m%d%H%M%SZ")
string_start = date_start.strftime("%B %d %Y")
if return_cert:
cert_data["pem"] = cert_pem
cert_data["alias"] = alias
cert_data["sha1"] = sha1
cert_data["valid_until"] = string_until
cert_data["valid_start"] = string_start
cert_data["expired"] = date_until < datetime.now()
return cert_data
def list(keystore, passphrase, alias=None, return_cert=False):
"""
Lists certificates in a keytool managed keystore.
:param keystore: The path to the keystore file to query
:param passphrase: The passphrase to use to decode the keystore
:param alias: (Optional) If found, displays details on only this key
:param return_certs: (Optional) Also return certificate PEM.
.. warning::
There are security implications for using return_cert to return decrypted certificates.
CLI Example:
.. code-block:: bash
salt '*' keystore.list /usr/lib/jvm/java-8/jre/lib/security/cacerts changeit
salt '*' keystore.list /usr/lib/jvm/java-8/jre/lib/security/cacerts changeit debian:verisign_-_g5.pem
"""
decoded_certs = []
entries = []
keystore = jks.KeyStore.load(keystore, passphrase)
if alias:
# If alias is given, look it up and build expected data structure
entry_value = keystore.entries.get(alias)
if entry_value:
entries = [(alias, entry_value)]
else:
entries = keystore.entries.items()
if entries:
for entry_alias, cert_enc in entries:
entry_data = {}
if isinstance(cert_enc, jks.PrivateKeyEntry):
cert_result = cert_enc.cert_chain[0][1]
entry_data["type"] = "PrivateKeyEntry"
elif isinstance(cert_enc, jks.TrustedCertEntry):
cert_result = cert_enc.cert
entry_data["type"] = "TrustedCertEntry"
else:
raise CommandExecutionError(
"Unsupported EntryType detected in keystore"
)
public_cert = _get_cert(cert_result)
entry_data.update(METHOD_NAME(entry_alias, public_cert, return_cert))
decoded_certs.append(entry_data)
return decoded_certs
def _get_cert(certificate):
"""
Gets the correct certificate depending of the encoding
:param certificate: str
"""
ASN1 = OpenSSL.crypto.FILETYPE_ASN1
PEM = OpenSSL.crypto.FILETYPE_PEM
if certificate[0] == 0x30:
public_cert = OpenSSL.crypto.load_certificate(ASN1, certificate)
else:
public_cert = OpenSSL.crypto.load_certificate(PEM, certificate)
return public_cert
def add(name, keystore, passphrase, certificate, private_key=None):
"""
Adds certificates to an existing keystore or creates a new one if necesssary.
:param name: alias for the certificate
:param keystore: The path to the keystore file to query
:param passphrase: The passphrase to use to decode the keystore
:param certificate: The PEM public certificate to add to keystore. Can be a string for file.
:param private_key: (Optional for TrustedCert) The PEM private key to add to the keystore
CLI Example:
.. code-block:: bash
salt '*' keystore.add aliasname /tmp/test.store changeit /tmp/testcert.crt
salt '*' keystore.add aliasname /tmp/test.store changeit certificate="-----BEGIN CERTIFICATE-----SIb...BM=-----END CERTIFICATE-----"
salt '*' keystore.add keyname /tmp/test.store changeit /tmp/512.cert private_key=/tmp/512.key
"""
ASN1 = OpenSSL.crypto.FILETYPE_ASN1
PEM = OpenSSL.crypto.FILETYPE_PEM
certs_list = []
if os.path.isfile(keystore):
keystore_object = jks.KeyStore.load(keystore, passphrase)
for alias, loaded_cert in keystore_object.entries.items():
certs_list.append(loaded_cert)
try:
cert_string = __salt__["x509.get_pem_entry"](certificate)
except SaltInvocationError:
raise SaltInvocationError(
"Invalid certificate file or string: {}".format(certificate)
)
if private_key:
# Accept PEM input format, but convert to DES for loading into new keystore
key_string = __salt__["x509.get_pem_entry"](private_key)
loaded_cert = OpenSSL.crypto.load_certificate(PEM, cert_string)
loaded_key = OpenSSL.crypto.load_privatekey(PEM, key_string)
dumped_cert = OpenSSL.crypto.dump_certificate(ASN1, loaded_cert)
dumped_key = OpenSSL.crypto.dump_privatekey(ASN1, loaded_key)
new_entry = jks.PrivateKeyEntry.new(name, [dumped_cert], dumped_key, "rsa_raw")
else:
new_entry = jks.TrustedCertEntry.new(name, cert_string)
certs_list.append(new_entry)
keystore_object = jks.KeyStore.new("jks", certs_list)
keystore_object.save(keystore, passphrase)
return True
def remove(name, keystore, passphrase):
"""
Removes a certificate from an existing keystore.
Returns True if remove was successful, otherwise False
:param name: alias for the certificate
:param keystore: The path to the keystore file to query
:param passphrase: The passphrase to use to decode the keystore
CLI Example:
.. code-block:: bash
salt '*' keystore.remove aliasname /tmp/test.store changeit
"""
certs_list = []
keystore_object = jks.KeyStore.load(keystore, passphrase)
for alias, loaded_cert in keystore_object.entries.items():
if name not in alias:
certs_list.append(loaded_cert)
if len(keystore_object.entries) != len(certs_list):
# Entry has been removed, save keystore updates
keystore_object = jks.KeyStore.new("jks", certs_list)
keystore_object.save(keystore, passphrase)
return True
else:
# No alias found, notify user
return False
def get_sha1(certificate):
"""
Returns the SHA1 sum of a ASN1/PEM certificate
:param name: ASN1/PEM certificate
CLI Example:
.. code-block:: bash
salt '*' keystore.get_sha1 "(certificate_content_string)"
"""
public_cert = _get_cert(certificate)
return public_cert.digest("SHA1") |
1,894 | score with l2 normalize | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TransR:
"Learning entity and relation embeddings for knowledge graph completion."
Lin, Yankai, et al.
https://www.aaai.org/ocs/index.php/AAAI/AAAI15/paper/view/9571/9523
"""
import numpy as np
import paddle.fluid as fluid
from .Model import Model
from .utils import lookup_table
class TransR(Model):
"""
TransR model.
"""
def __init__(self,
data_reader,
hidden_size,
margin,
learning_rate,
args,
optimizer="adam"):
"""init"""
self._neg_times = args.neg_times
super(TransR, self).__init__(
model_name="TransR",
data_reader=data_reader,
hidden_size=hidden_size,
margin=margin,
learning_rate=learning_rate,
args=args,
optimizer=optimizer)
self.construct()
def creat_share_variables(self):
"""
Share variables for train and test programs.
"""
entity_embedding = fluid.layers.create_parameter(
shape=self._ent_shape,
dtype="float32",
name=self.ent_name,
default_initializer=fluid.initializer.Xavier())
relation_embedding = fluid.layers.create_parameter(
shape=self._rel_shape,
dtype="float32",
name=self.rel_name,
default_initializer=fluid.initializer.Xavier())
init_values = np.tile(
np.identity(
self._hidden_size, dtype="float32").reshape(-1),
(self._relation_total, 1))
transfer_matrix = fluid.layers.create_parameter(
shape=[
self._relation_total, self._hidden_size * self._hidden_size
],
dtype="float32",
name=self._prefix + "transfer_matrix",
default_initializer=fluid.initializer.NumpyArrayInitializer(
init_values))
return entity_embedding, relation_embedding, transfer_matrix
def METHOD_NAME(self, head, rel, tail):
"""
Score function of TransR
"""
head = fluid.layers.l2_normalize(head, axis=-1)
rel = fluid.layers.l2_normalize(rel, axis=-1)
tail = fluid.layers.l2_normalize(tail, axis=-1)
score = head + rel - tail
return score
@staticmethod
def matmul_with_expend_dims(x, y):
"""matmul_with_expend_dims"""
x = fluid.layers.unsqueeze(x, axes=[1])
res = fluid.layers.matmul(x, y)
return fluid.layers.squeeze(res, axes=[1])
def construct_train_program(self):
"""
Construct train program
"""
entity_embedding, relation_embedding, transfer_matrix = self.creat_share_variables(
)
pos_head = lookup_table(self.train_pos_input[:, 0], entity_embedding)
pos_tail = lookup_table(self.train_pos_input[:, 2], entity_embedding)
pos_rel = lookup_table(self.train_pos_input[:, 1], relation_embedding)
neg_head = lookup_table(self.train_neg_input[:, 0], entity_embedding)
neg_tail = lookup_table(self.train_neg_input[:, 2], entity_embedding)
neg_rel = lookup_table(self.train_neg_input[:, 1], relation_embedding)
rel_matrix = fluid.layers.reshape(
lookup_table(self.train_pos_input[:, 1], transfer_matrix),
[-1, self._hidden_size, self._hidden_size])
pos_head_trans = self.matmul_with_expend_dims(pos_head, rel_matrix)
pos_tail_trans = self.matmul_with_expend_dims(pos_tail, rel_matrix)
trans_neg = True
if trans_neg:
rel_matrix_neg = fluid.layers.reshape(
lookup_table(self.train_neg_input[:, 1], transfer_matrix),
[-1, self._hidden_size, self._hidden_size])
neg_head_trans = self.matmul_with_expend_dims(neg_head,
rel_matrix_neg)
neg_tail_trans = self.matmul_with_expend_dims(neg_tail,
rel_matrix_neg)
else:
neg_head_trans = self.matmul_with_expend_dims(neg_head, rel_matrix)
neg_tail_trans = self.matmul_with_expend_dims(neg_tail, rel_matrix)
pos_score = self.METHOD_NAME(pos_head_trans, pos_rel,
pos_tail_trans)
neg_score = self.METHOD_NAME(neg_head_trans, neg_rel,
neg_tail_trans)
pos = fluid.layers.reduce_sum(
fluid.layers.abs(pos_score), -1, keep_dim=False)
neg = fluid.layers.reduce_sum(
fluid.layers.abs(neg_score), -1, keep_dim=False)
neg = fluid.layers.reshape(
neg, shape=[-1, self._neg_times], inplace=True)
loss = fluid.layers.reduce_mean(
fluid.layers.relu(pos - neg + self._margin))
return [loss]
def construct_test_program(self):
"""
Construct test program
"""
entity_embedding, relation_embedding, transfer_matrix = self.creat_share_variables(
)
rel_matrix = fluid.layers.reshape(
lookup_table(self.test_input[1], transfer_matrix),
[self._hidden_size, self._hidden_size])
entity_embedding_trans = fluid.layers.matmul(entity_embedding,
rel_matrix, False, False)
rel_vec = lookup_table(self.test_input[1], relation_embedding)
entity_embedding_trans = fluid.layers.l2_normalize(
entity_embedding_trans, axis=-1)
rel_vec = fluid.layers.l2_normalize(rel_vec, axis=-1)
head_vec = lookup_table(self.test_input[0], entity_embedding_trans)
tail_vec = lookup_table(self.test_input[2], entity_embedding_trans)
# The paddle fluid.layers.topk GPU OP is very inefficient
# we do sort operation in the evaluation step using multiprocessing
id_replace_head = fluid.layers.reduce_sum(
fluid.layers.abs(entity_embedding_trans + rel_vec - tail_vec),
dim=1)
id_replace_tail = fluid.layers.reduce_sum(
fluid.layers.abs(entity_embedding_trans - rel_vec - head_vec),
dim=1)
return [id_replace_head, id_replace_tail] |
1,895 | activate | from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from registration import signals
from registration.forms import RegistrationForm
from registration.models import RegistrationProfile
class DefaultBackend(object):
"""
A registration backend which follows a simple workflow:
1. User signs up, inactive account is created.
2. Email is sent to user with activation link.
3. User clicks activation link, account is now active.
Using this backend requires that
* ``registration`` be listed in the ``INSTALLED_APPS`` setting
(since this backend makes use of models defined in this
application).
* The setting ``ACCOUNT_ACTIVATION_DAYS`` be supplied, specifying
(as an integer) the number of days from registration during
which a user may activate their account (after that period
expires, activation will be disallowed).
* The creation of the templates
``registration/activation_email_subject.txt`` and
``registration/activation_email.txt``, which will be used for
the activation email. See the notes for this backends
``register`` method for details regarding these templates.
Additionally, registration can be temporarily closed by adding the
setting ``REGISTRATION_OPEN`` and setting it to
``False``. Omitting this setting, or setting it to ``True``, will
be interpreted as meaning that registration is currently open and
permitted.
Internally, this is accomplished via storing an activation key in
an instance of ``registration.models.RegistrationProfile``. See
that model and its custom manager for full documentation of its
fields and supported operations.
"""
def register(self, request, **kwargs):
"""
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
An email will be sent to the supplied email address; this
email should contain an activation link. The email will be
rendered using two templates. See the documentation for
``RegistrationProfile.send_activation_email()`` for
information about these templates and the contexts provided to
them.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
"""
username, email, password = kwargs['username'], kwargs['email'], kwargs['password1']
site = get_current_site(request)
new_user = RegistrationProfile.objects.create_inactive_user(username, email,
password, site)
signals.user_registered.send(sender=self.__class__,
user=new_user,
request=request)
return new_user
def METHOD_NAME(self, request, activation_key):
"""
Given an an activation key, look up and activate the user
account corresponding to that key (if possible).
After successful activation, the signal
``registration.signals.user_activated`` will be sent, with the
newly activated ``User`` as the keyword argument ``user`` and
the class of this backend as the sender.
"""
activated = RegistrationProfile.objects.activate_user(activation_key)
if activated:
signals.user_activated.send(sender=self.__class__,
user=activated,
request=request)
return activated
def registration_allowed(self, request):
"""
Indicate whether account registration is currently permitted,
based on the value of the setting ``REGISTRATION_OPEN``. This
is determined as follows:
* If ``REGISTRATION_OPEN`` is not specified in settings, or is
set to ``True``, registration is permitted.
* If ``REGISTRATION_OPEN`` is both specified and set to
``False``, registration is not permitted.
"""
return getattr(settings, 'REGISTRATION_OPEN', True)
def get_form_class(self, request):
"""
Return the default form class used for user registration.
"""
return RegistrationForm
def post_registration_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
user registration.
"""
return ('registration_complete', (), {})
def post_activation_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
account activation.
"""
return ('registration_activation_complete', (), {}) |
1,896 | nonfatal error | from Cython.TestUtils import CythonTest
import Cython.Compiler.Errors as Errors
from Cython.Compiler.Nodes import *
from Cython.Compiler.ParseTreeTransforms import *
from Cython.Compiler.Buffer import *
class TestBufferParsing(CythonTest):
# First, we only test the raw parser, i.e.
# the number and contents of arguments are NOT checked.
# However "dtype"/the first positional argument is special-cased
# to parse a type argument rather than an expression
def parse(self, s):
return self.should_not_fail(lambda: self.fragment(s)).root
def not_parseable(self, expected_error, s):
e = self.should_fail(lambda: self.fragment(s), Errors.CompileError)
self.assertEqual(expected_error, e.message_only)
def test_basic(self):
t = self.parse(u"cdef object[float, 4, ndim=2, foo=foo] x")
bufnode = t.stats[0].base_type
self.assertTrue(isinstance(bufnode, TemplatedTypeNode))
self.assertEqual(2, len(bufnode.positional_args))
# print bufnode.dump()
# should put more here...
def test_type_pos(self):
self.parse(u"cdef object[short unsigned int, 3] x")
def test_type_keyword(self):
self.parse(u"cdef object[foo=foo, dtype=short unsigned int] x")
def test_pos_after_key(self):
self.not_parseable("Non-keyword arg following keyword arg",
u"cdef object[foo=1, 2] x")
# See also tests/error/e_bufaccess.pyx and tets/run/bufaccess.pyx
# THESE TESTS ARE NOW DISABLED, the code they test was pretty much
# refactored away
class TestBufferOptions(CythonTest):
# Tests the full parsing of the options within the brackets
def METHOD_NAME(self, error):
# We're passing self as context to transform to trap this
self.error = error
self.assertTrue(self.expect_error)
def parse_opts(self, opts, expect_error=False):
assert opts != ""
s = u"def f():\n cdef object[%s] x" % opts
self.expect_error = expect_error
root = self.fragment(s, pipeline=[NormalizeTree(self), PostParse(self)]).root
if not expect_error:
vardef = root.stats[0].body.stats[0]
assert isinstance(vardef, CVarDefNode) # use normal assert as this is to validate the test code
buftype = vardef.base_type
self.assertTrue(isinstance(buftype, TemplatedTypeNode))
self.assertTrue(isinstance(buftype.base_type_node, CSimpleBaseTypeNode))
self.assertEqual(u"object", buftype.base_type_node.name)
return buftype
else:
self.assertTrue(len(root.stats[0].body.stats) == 0)
def non_parse(self, expected_err, opts):
self.parse_opts(opts, expect_error=True)
# e = self.should_fail(lambda: self.parse_opts(opts))
self.assertEqual(expected_err, self.error.message_only)
def __test_basic(self):
buf = self.parse_opts(u"unsigned short int, 3")
self.assertTrue(isinstance(buf.dtype_node, CSimpleBaseTypeNode))
self.assertTrue(buf.dtype_node.signed == 0 and buf.dtype_node.longness == -1)
self.assertEqual(3, buf.ndim)
def __test_dict(self):
buf = self.parse_opts(u"ndim=3, dtype=unsigned short int")
self.assertTrue(isinstance(buf.dtype_node, CSimpleBaseTypeNode))
self.assertTrue(buf.dtype_node.signed == 0 and buf.dtype_node.longness == -1)
self.assertEqual(3, buf.ndim)
def __test_ndim(self):
self.parse_opts(u"int, 2")
self.non_parse(ERR_BUF_NDIM, u"int, 'a'")
self.non_parse(ERR_BUF_NDIM, u"int, -34")
def __test_use_DEF(self):
t = self.fragment(u"""
DEF ndim = 3
def f():
cdef object[int, ndim] x
cdef object[ndim=ndim, dtype=int] y
""", pipeline=[NormalizeTree(self), PostParse(self)]).root
stats = t.stats[0].body.stats
self.assertTrue(stats[0].base_type.ndim == 3)
self.assertTrue(stats[1].base_type.ndim == 3)
# add exotic and impossible combinations as they come along...
if __name__ == '__main__':
import unittest
unittest.main() |
1,897 | update repository | # Copyright The Linux Foundation and each contributor to CommunityBridge.
# SPDX-License-Identifier: MIT
"""
Controller related to repository operations.
"""
import uuid
import cla.hug_types
from cla.utils import get_repository_instance, get_supported_repository_providers
from cla.models.dynamo_models import Project, Repository, UserPermissions, GitHubOrg
from cla.models import DoesNotExist
from cla.auth import AuthUser
def get_repositories():
"""
Returns a list of repositories in the CLA system.
:return: List of repositories in dict format.
:rtype: [dict]
"""
return [repository.to_dict() for repository in get_repository_instance().all()]
def get_repository(repository_id):
"""
Returns the CLA repository requested by ID.
:param repository_id: The repository ID.
:type repository_id: ID
:return: dict representation of the repository object.
:rtype: dict
"""
repository = get_repository_instance()
try:
repository.load(str(repository_id))
except DoesNotExist as err:
return {'errors': {'repository_id': str(err)}}
return repository.to_dict()
def create_repository(auth_user: AuthUser, # pylint: disable=too-many-arguments
repository_project_id,
repository_name,
repository_organization_name,
repository_type,
repository_url,
repository_external_id=None):
"""
Creates a repository and returns the newly created repository in dict format.
:param repository_project_id: The ID of the repository project.
:type repository_project_id: string
:param repository_name: The new repository name.
:type repository_name: string
:param repository_type: The new repository type ('github', 'gerrit', etc).
:type repository_organization_name: string
:param repository_organization_name: The repository organization name
:type repository_type: string
:param repository_url: The new repository URL.
:type repository_url: string
:param repository_external_id: The ID of the repository from the repository provider.
:type repository_external_id: string
:return: dict representation of the new repository object.
:rtype: dict
"""
# Check that organization exists
github_organization = GitHubOrg()
try:
github_organization.load(str(repository_organization_name))
except DoesNotExist as err:
return {'errors': {'organization_name': str(err)}}
# Check that project is valid.
project = Project()
try:
project.load(str(repository_project_id))
except DoesNotExist as err:
return {'errors': {'repository_project_id': str(err)}}
# Get SFDC project identifier
sfdc_id = project.get_project_external_id()
# Validate user is authorized for this project
can_access = cla.controllers.project.check_user_authorization(auth_user, sfdc_id)
if not can_access['valid']:
return can_access['errors']
# Validate if exist already repository linked to a contract group
if repository_external_id is not None:
# Seach for the repository
linked_repository = Repository().get_repository_by_external_id(repository_external_id, repository_type)
# If found return an error
if linked_repository is not None:
return {'errors': {'repository_external_id': 'This repository is alredy configured for a contract group.'}}
repository = Repository()
repository.set_repository_id(str(uuid.uuid4()))
repository.set_repository_project_id(str(repository_project_id))
repository.set_repository_sfdc_id(str(sfdc_id))
repository.set_repository_name(repository_name)
repository.set_repository_organization_name(repository_organization_name)
repository.set_repository_type(repository_type)
repository.set_repository_url(repository_url)
if repository_external_id is not None:
repository.set_repository_external_id(repository_external_id)
repository.save()
return repository.to_dict()
def METHOD_NAME(repository_id, # pylint: disable=too-many-arguments
repository_project_id=None,
repository_type=None,
repository_name=None,
repository_url=None,
repository_external_id=None):
"""
Updates a repository and returns the newly updated repository in dict format.
Values of None means the field will not be updated.
:param repository_id: ID of the repository to update.
:type repository_id: ID
:param repository_project_id: ID of the repository project.
:type repository_project_id: string
:param repository_name: New name for the repository.
:type repository_name: string | None
:param repository_type: New type for repository ('github', 'gerrit', etc).
:type repository_type: string | None
:param repository_url: New URL for the repository.
:type repository_url: string | None
:param repository_external_id: ID of the repository from the service provider.
:type repository_external_id: string
:return: dict representation of the repository object.
:rtype: dict
"""
repository = Repository()
try:
repository.load(str(repository_id))
except DoesNotExist as err:
return {'errors': {'repository_id': str(err)}}
# TODO: Ensure project_id exists.
if repository_project_id is not None:
repository.set_repository_project_id(str(repository_project_id))
if repository_type is not None:
supported_repo_types = get_supported_repository_providers().keys()
if repository_type in supported_repo_types:
repository.set_repository_type(repository_type)
else:
return {'errors': {'repository_type':
'Invalid value passed. The accepted values are: (%s)' \
%'|'.join(supported_repo_types)}}
if repository_external_id is not None:
# Find a repository is already linked with this external_id
linked_repository = Repository().get_repository_by_external_id(repository_external_id, repository.get_repository_type())
# If found return an error
if linked_repository is not None:
return {'errors': {'repository_external_id': 'This repository is alredy configured for a contract group.'}}
repository.set_repository_external_id(repository_external_id)
if repository_name is not None:
repository.set_repository_name(repository_name)
if repository_url is not None:
try:
val = cla.hug_types.url(repository_url)
repository.set_repository_url(val)
except ValueError as err:
return {'errors': {'repository_url': 'Invalid URL specified'}}
repository.save()
return repository.to_dict()
def delete_repository(repository_id):
"""
Deletes a repository based on ID.
:param repository_id: The ID of the repository.
:type repository_id: ID
"""
repository = Repository()
try:
repository.load(str(repository_id))
except DoesNotExist as err:
return {'errors': {'repository_id': str(err)}}
repository.delete()
return {'success': True} |
1,898 | infer | # Copyright 2020-2023 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import zipfile
from urllib.request import urlretrieve
import shutil
# Detectron imports
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2 import model_zoo
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.engine import DefaultTrainer
# OpenDR engine imports
from opendr.engine.learners import Learner
from opendr.engine.constants import OPENDR_SERVER_URL
from opendr.engine.data import Image
# single demo grasp module imports
from opendr.control.single_demo_grasp.training.learner_utils import register_datasets
class SingleDemoGraspLearner(Learner):
def __init__(self, object_name=None, data_directory=None, lr=0.0008, batch_size=512, img_per_step=2, num_workers=2,
num_classes=1, iters=1000, threshold=0.8, device='cuda'):
super(SingleDemoGraspLearner, self).__init__(lr=lr, threshold=threshold, batch_size=batch_size, device=device,
iters=iters)
self.dataset_dir = data_directory
self.object_name = object_name
self.output_dir = os.path.join(self.dataset_dir, self.object_name, "output")
self.num_workers = num_workers
self.num_classes = num_classes
self.temp_dir = os.path.join(self.dataset_dir, "download_temp")
self.cfg = get_cfg()
self.cfg.merge_from_file(model_zoo.get_config_file(
"COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
self.cfg.DATALOADER.NUM_WORKERS = self.num_workers
self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
"COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")
self.cfg.MODEL.DEVICE = device
self.cfg.SOLVER.IMS_PER_BATCH = img_per_step
self.cfg.SOLVER.BASE_LR = lr
self.cfg.SOLVER.MAX_ITER = iters
self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
self.cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = batch_size
self.cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 12
self.cfg.OUTPUT_DIR = self.output_dir
os.makedirs(self.cfg.OUTPUT_DIR, exist_ok=True)
def fit(self):
self.metadata = self._prepare_datasets()
self.cfg.DATASETS.TRAIN = (self.object_name + "_train",)
self.cfg.DATASETS.TEST = ()
self.trainer = DefaultTrainer(self.cfg)
self.trainer.resume_or_load(resume=False)
self.trainer.train()
def METHOD_NAME(self, img_data):
if not isinstance(img_data, Image):
img_data = Image(img_data)
img_data = img_data.convert(format='channels_last', channel_order='rgb')
self.predictor = DefaultPredictor(self.cfg)
self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = self.threshold
output = self.predictor(img_data)
bounding_box = output["instances"].to("cpu").pred_boxes.tensor.numpy()
keypoints_pred = output["instances"].to("cpu").pred_keypoints.numpy()
if len(bounding_box) > 0:
return 1, bounding_box[0], keypoints_pred[0]
else:
return 0, None, None
def _prepare_datasets(self):
bbx_train = np.load(os.path.join(self.dataset_dir, self.object_name, 'images/annotations/boxes_train.npy'),
encoding='bytes')
bbx_val = np.load(os.path.join(self.dataset_dir, self.object_name, 'images/annotations/boxes_val.npy'),
encoding='bytes')
kps_train = np.load(os.path.join(self.dataset_dir, self.object_name, 'images/annotations/kps_train.npy'),
encoding='bytes')
kps_val = np.load(os.path.join(self.dataset_dir, self.object_name, 'images/annotations/kps_val.npy'),
encoding='bytes')
vars()[self.object_name + '_metadata'], train_set, val_set = register_datasets(DatasetCatalog, MetadataCatalog,
self.dataset_dir, self.object_name,
bbx_train, kps_train, bbx_val, kps_val)
self.num_train = len(bbx_train)
self.num_val = len(bbx_val)
self.num_kps = len(kps_train[0][0])
self.train_set = train_set
self.val_set = val_set
return vars()[self.object_name + '_metadata']
def load(self, path_to_model):
if os.path.isfile(path_to_model):
self.cfg.MODEL.WEIGHTS = path_to_model
self.predictor = DefaultPredictor(self.cfg)
print("Model loaded!")
else:
assert os.path.isfile(path_to_model), "Checkpoint {} not found!".format(path_to_model)
def save(self, path):
if os.path.isfile(os.path.join(self.output_dir, "model_final.pth")):
print("found the trained model at: " + os.path.join(self.output_dir, "model_final.pth"))
if path != self.output_dir:
print("copying the trained model to your desired directory at: ")
print(path)
shutil.copyfile(os.path.join(self.output_dir, "model_final.pth"), os.path.join(path, "model_final.pth"))
else:
print("model is already saved at: " + os.path.join(self.output_dir, "model_final.pth"))
else:
print("no trained model was found...")
def download(self, path=None, object_name=None):
if path is None:
path = self.temp_dir
if object_name is None:
object_name = "pendulum"
if not os.path.exists(path):
os.makedirs(path)
print("Downloading pretrained model, training data and samples for: " + object_name)
filename = object_name + ".zip"
url = os.path.join(OPENDR_SERVER_URL, "control/single_demo_grasp/", filename)
destination_file = os.path.join(path, filename)
urlretrieve(url, destination_file)
with zipfile.ZipFile(destination_file, 'r') as zip_ref:
zip_ref.extractall(path)
"""
removing zip file after extracting contents
"""
os.remove(destination_file)
def eval(self):
"""This method is not used in this implementation."""
raise NotImplementedError()
def optimize(self):
"""This method is not used in this implementation."""
raise NotImplementedError()
def reset(self):
"""This method is not used in this implementation."""
raise NotImplementedError() |
1,899 | set speed | #!/usr/bin/env python
#############################################################################
# Dell
#
# Module contains an implementation of SONiC FAN Base API and
# provides various info about the FANs which are available in the platform
#
#############################################################################
import logging
from sonic_py_common.general import getstatusoutput_noshell
try:
from sonic_fan.fan_base import FanBase
except ImportError as e:
raise ImportError (str(e) + "- required module not found")
class FanUtil(FanBase):
"""Platform-specific FANutil class"""
FANTRAY_NUM_ON_MAIN_BOARD = 7
NUM_FANS_PERTRAY = 2
FANTRAY_NUM_START_IDX = 1
FRU_FAN_START_IDX = 1
IPMI_FAN_PRESENCE = ["ipmitool", "sensor", "get", ""]
IPMI_FAN_FRONT_SPEED = ["ipmitool", "sdr", "get", ""]
IPMI_FAN_REAR_SPEED = ["ipmitool", "sdr", "get", ""]
IPMI_FRU_DATA = ["ipmitool", "fru", "print", ""]
def __init__(self, log_level=logging.DEBUG):
FanBase.__init__(self)
self.num_fans = (self.FANTRAY_NUM_ON_MAIN_BOARD*self.NUM_FANS_PERTRAY)
def get_fan_status(self,fan_id):
try:
self.IPMI_FAN_PRESENCE[3] = 'FAN' + str(fan_id) + '_prsnt'
ret_status, ipmi_cmd_ret = getstatusoutput_noshell(self.IPMI_FAN_PRESENCE)
if ret_status == 0:
return(ipmi_cmd_ret.splitlines()[5].strip(' ').strip('[]'))
except Exception:
logging.error('Failed to execute : %s'%(' '.join(self.IPMI_FAN_PRESENCE)))
def get_front_fan_speed(self,fan_id):
try:
self.IPMI_FAN_FRONT_SPEED[3] = 'Fan' + str(fan_id) + '_Front_rpm'
ret_status, ipmi_cmd_ret = getstatusoutput_noshell(self.IPMI_FAN_FRONT_SPEED)
if ret_status == 0:
rdata = ipmi_cmd_ret.splitlines()[3].split(':')[1].split(' ')[1]
return rdata
except Exception:
logging.error('Failed to execute : %s'%(' '.join(self.IPMI_FAN_FRONT_SPEED)))
def get_rear_fan_speed(self,fan_id):
try:
self.IPMI_FAN_REAR_SPEED[3] = 'Fan' + str(fan_id) + '_Rear_rpm'
ret_status, ipmi_cmd_ret = getstatusoutput_noshell(self.IPMI_FAN_REAR_SPEED)
if ret_status == 0:
rdata = ipmi_cmd_ret.splitlines()[3].split(':')[1].split(' ')[1]
return rdata
except Exception:
logging.error('Failed to execute : %s'%(' '.join(self.IPMI_FAN_REAR_SPEED)))
# Read FAN FRU info
def get_fan_direction_from_fru(self,fru_id,reg_name):
output = None
try:
self.IPMI_FRU_DATA[3] = str(fru_id)
status, ipmi_fru_list = getstatusoutput_noshell(self.IPMI_FRU_DATA)
if status == 0:
for item in ipmi_fru_list.split("\n"):
if reg_name in item:
output = item.strip()
if output is None:
logging.error('\nFailed to fetch: ' + reg_name + ' sensor ')
output = output.split(':')[1].strip(' ')
if output == 'F2B' or output == 'B2F':
return output
except Exception:
logging.error('Failed to execute:' + ipmi_fru_list)
def get_num_fans(self):
return self.num_fans
def get_presence(self, index):
if index is None:
return False
if index < self.FANTRAY_NUM_START_IDX or index > self.FANTRAY_NUM_START_IDX + self.num_fans - 1:
logging.error('Invalid FAN index:%d', index)
return False
tray_index = ((index-1)/self.NUM_FANS_PERTRAY) + 1
if (self.get_fan_status(tray_index) == 'Device Present'):
return True
else:
return False
def get_status(self, index):
if index is None:
return False
if index < self.FANTRAY_NUM_START_IDX or index > self.FANTRAY_NUM_START_IDX + self.num_fans - 1:
logging.error('Invalid FAN index:%d', index)
return False
tray_index = ((index-1)/self.NUM_FANS_PERTRAY) + 1
fantray_front_speed=self.get_front_fan_speed(tray_index)
fantray_rear_speed=self.get_rear_fan_speed(tray_index)
if (fantray_front_speed != '0' and fantray_rear_speed != '0'):
return True
else:
return False
def get_direction(self, index):
if index is None:
return None
if index < self.FANTRAY_NUM_START_IDX or index > self.FANTRAY_NUM_START_IDX + self.num_fans - 1:
logging.error('Invalid FAN index:%d', index)
return None
tray_index = ((index-1)/self.NUM_FANS_PERTRAY)
fru_id = self.FRU_FAN_START_IDX + tray_index
direction = self.get_fan_direction_from_fru(fru_id,'Board Extra')
if direction == 'B2F':
return "INTAKE"
elif direction == 'F2B':
return "EXHAUST"
else:
return None
def get_speed(self, index):
if index is None:
return 0
if index < self.FANTRAY_NUM_START_IDX or index > self.FANTRAY_NUM_START_IDX + self.num_fans - 1:
logging.error('Invalid FAN index:%d', index)
return 0
tray_index = ((index-1)/self.NUM_FANS_PERTRAY) + 1
if (index % 2 != 0):
fantray_speed=self.get_front_fan_speed(tray_index)
else:
fantray_speed=self.get_rear_fan_speed(tray_index)
if (self.get_presence(index) == True):
return int(fantray_speed.strip())
else:
return 0
def METHOD_NAME(self, val):
logging.error("Not allowed to set fan speed!")
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.