id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
299,300 | get log renderers | # This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import re
from datetime import datetime, timedelta
from difflib import SequenceMatcher
from enum import Enum
from markupsafe import Markup
from indico.core import signals
from indico.util.date_time import format_human_timedelta
from indico.util.i18n import force_locale, orig_string
from indico.util.signals import named_objects_from_signal
def METHOD_NAME():
return named_objects_from_signal(signals.event.METHOD_NAME.send(), plugin_attr='plugin')
def make_diff_log(changes, fields):
"""Create a value for log data containing change information.
:param changes: a dict mapping attributes to ``(old, new)`` tuples
:param fields: a dict mapping attributes to field metadata. for
simple cases this may be a string with the human-friendly
title, for more advanced fields it should be a dict
containing ``title``, a ``type`` string and a ``convert``
callback which will be invoked with a tuple containing the
old and new value
"""
data = {'_diff': True}
for key, field_data in fields.items():
try:
change = changes[key]
except KeyError:
continue
if isinstance(field_data, str):
field_data = {'title': field_data}
title = field_data['title']
convert = field_data.get('convert')
attr = field_data.get('attr')
default = field_data.get('default')
type_ = field_data.get('type')
not_none_change = [x for x in change if x is not None]
if attr:
change = [getattr(x, attr) if x is not None else '' for x in change]
if convert:
change = convert(change)
if type_ is not None:
# when we have an explicit type specified don't do any
# guessing/conversions
pass
elif not_none_change and all(isinstance(x, Enum) for x in not_none_change):
type_ = 'enum'
change = [orig_string(getattr(x, 'title', x.name))
if x is not None else default
for x in change]
elif all(isinstance(x, bool) for x in change):
type_ = 'bool'
elif all(isinstance(x, (int, float)) for x in change):
type_ = 'number'
elif all(isinstance(x, (list, tuple)) for x in change):
type_ = 'list'
elif all(isinstance(x, set) for x in change):
type_ = 'list'
change = list(map(sorted, change))
elif all(isinstance(x, datetime) for x in change):
type_ = 'datetime'
change = [x.isoformat() for x in change]
elif not_none_change and all(isinstance(x, timedelta) for x in not_none_change):
type_ = 'timedelta'
with force_locale(None, default=False):
change = [format_human_timedelta(x) if x is not None else default for x in change]
else:
type_ = 'text'
change = list(map(str, map(orig_string, change)))
data[title] = list(change) + [type_]
return data
def render_changes(a, b, type_):
"""Render the comparison of `a` and `b` as HTML.
:param a: old value
:param b: new value
:param type_: the type determining how the values should be compared
"""
if type_ in ('number', 'enum', 'bool', 'datetime', 'timedelta'):
if a in (None, ''):
a = '\N{EMPTY SET}'
if b in (None, ''):
b = '\N{EMPTY SET}'
return f'{a} \N{RIGHTWARDS ARROW} {b}'
elif type_ == 'string':
return '{} \N{RIGHTWARDS ARROW} {}'.format(a or '\N{EMPTY SET}', b or '\N{EMPTY SET}')
elif type_ == 'list':
return _diff_list(a or [], b or [])
elif type_ == 'struct_list':
return _diff_list([repr(x) for x in a or []], [repr(x) for x in b or []])
elif type_ == 'text':
return _diff_text(a or '', b or '')
else:
raise NotImplementedError(f'Unexpected diff type: {type_}')
def _clean(strings, _linebreak_re=re.compile(r'\A(\n*)(.*?)(\n*)\Z', re.DOTALL)):
# make linebreak changes more visible by adding an arrow indicating
# the linebreak in addition to the linebreak itself
leading_nl, string, trailing_nl = _linebreak_re.match(''.join(strings)).groups()
_linebreak_symbol = Markup('<strong>\N{RETURN SYMBOL}</strong>\n')
return Markup('').join((_linebreak_symbol * len(leading_nl),
string,
_linebreak_symbol * len(trailing_nl)))
def _diff_text(a, b, _noword_re=re.compile(r'(\W)')):
# split the strings into words so we don't get changes involving
# partial words. this makes the diff much more readable to humans
# as you don't end up with large deletions/insertions inside a word
a = _noword_re.split(a)
b = _noword_re.split(b)
seqm = SequenceMatcher(a=a, b=b)
output = []
for opcode, a0, a1, b0, b1 in seqm.get_opcodes():
if opcode == 'equal':
output.append(''.join(seqm.a[a0:a1]))
elif opcode == 'insert':
inserted = _clean(seqm.b[b0:b1])
output.append(Markup('<ins>{}</ins>').format(inserted))
elif opcode == 'delete':
deleted = _clean(seqm.a[a0:a1])
output.append(Markup('<del>{}</del>').format(deleted))
elif opcode == 'replace':
deleted = _clean(seqm.a[a0:a1])
inserted = _clean(seqm.b[b0:b1])
output.append(Markup('<del>{}</del>').format(deleted))
output.append(Markup('<ins>{}</ins>').format(inserted))
else:
raise RuntimeError('unexpected opcode: ' + opcode)
return Markup('').join(output)
def _diff_list(a, b):
seqm = SequenceMatcher(a=a, b=b)
output = []
for opcode, a0, a1, b0, b1 in seqm.get_opcodes():
if opcode == 'equal':
output += seqm.a[a0:a1]
elif opcode == 'insert':
inserted = seqm.b[b0:b1]
output += list(map(Markup('<ins>{}</ins>').format, inserted))
elif opcode == 'delete':
deleted = seqm.a[a0:a1]
output += list(map(Markup('<del>{}</del>').format, deleted))
elif opcode == 'replace':
deleted = seqm.a[a0:a1]
inserted = seqm.b[b0:b1]
output += list(map(Markup('<del>{}</del>').format, deleted))
output += list(map(Markup('<ins>{}</ins>').format, inserted))
else:
raise RuntimeError('unexpected opcode: ' + opcode)
return Markup(', ').join(output)
def serialize_log_entry(entry):
return {
'id': entry.id,
'type': entry.type,
'realm': entry.realm.name,
'kind': entry.kind.name,
'module': entry.module,
'description': entry.summary,
'meta': entry.meta,
'time': entry.logged_dt.astimezone(entry.event.tzinfo).isoformat(),
'payload': entry.data,
'user': {
'fullName': entry.user.full_name if entry.user else None,
'avatarURL': entry.user.avatar_url if entry.user else None
}
} |
299,301 | tapered gutenberg richter pdf | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (C) 2010-2023 GEM Foundation, G. Weatherill, M. Pagani,
# D. Monelli.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (https://www.globalquakemodel.org/tools-products) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM's OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# (hazard@globalquakemodel.org).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
'''
Module :mod:`openquake.hmtk.strain.utils` holds a set of useful
utility functions for the strain rate model calculations
'''
import numpy as np
from math import exp
def moment_function(magnitude):
'''
Get moment (in Nm) from magnitude using Hanks & Kanamori (1979)
:param float (or numpy.ndarray) magnitude:
Magnitude of event
:returns:
Seismic Moment in Nm
'''
return 10. ** ((1.5 * magnitude) + 9.05)
def moment_magnitude_function(moment):
'''
For a given moment, get the moment magnitude using the formula
of Hanks & Kanamori (1979)
:param float or numpy.ndarray magnitude
Seismic moment in Nm
'''
return (2. / 3.) * (np.log10(moment) - 9.05)
def calculate_taper_function(obs_threshold_moment, sel_threshold_moment,
corner_moment, beta):
'''
Calculates the tapering function of the tapered Gutenberg & Richter model:
as described in Bird & Liu (2007)::
taper_function = (M_0(M_T) / M_0(M_T^{CMT}))^-beta x exp((M_0(m_T^CMT) -
M_0(m_T)) / M_0(m_c))
:param numpy.ndarray obs_threshold_moment:
Moment of the threshold magnitude of the observed earthquake catalogue
:param numpy.ndarray sel_threshold_moment:
Moment of the target magnitude
:param float corner_momnet:
Corner moment of the Tapered Gutenberg-Richter Function
:param float beta:
Beta value (b * ln(10.)) of the Tapered Gutenberg-Richter Function
:returns:
Relative moment rate
'''
argument = (obs_threshold_moment - sel_threshold_moment) /\
corner_moment
if argument < -100.0:
g_function = 0.0
else:
g_function = ((sel_threshold_moment / obs_threshold_moment) **
-beta) * exp(argument)
return g_function
def tapered_gutenberg_richter_cdf(moment, moment_threshold, beta,
corner_moment):
'''
Tapered Gutenberg Richter Cumulative Density Function
:param float or numpy.ndarray moment:
Moment for calculation of rate
:param float or numpy.ndarray moment_threshold:
Threshold Moment of the distribution (moment rate essentially!)
:param float beta:
Beta value (b * ln(10.)) of the Tapered Gutenberg-Richter Function
:param float corner_momnet:
Corner moment of the Tapered Gutenberg-Richter Function
:returns:
Cumulative probability of moment release > moment
'''
cdf = np.exp((moment_threshold - moment) / corner_moment)
return ((moment / moment_threshold) ** (-beta)) * cdf
def METHOD_NAME(moment, moment_threshold, beta,
corner_moment):
'''
Tapered Gutenberg-Richter Probability Density Function
:param float or numpy.ndarray moment:
Moment for calculation of rate
:param float or numpy.ndarray moment_threshold:
Threshold Moment of the distribution (moment rate essentially!)
:param float beta:
Beta value (b * ln(10.)) of the Tapered Gutenberg-Richter Function
:param float corner_momnet:
Corner moment of the Tapered Gutenberg-Richter Function
:returns:
Absolute probability of moment release > moment
'''
return ((beta / moment + 1. / corner_moment) *
tapered_gutenberg_richter_cdf(moment, moment_threshold, beta,
corner_moment)) |
299,302 | token to docsfile | import argparse
import sys
__author__ = 'buchholb'
parser = argparse.ArgumentParser()
parser.add_argument('--sentences',
type=str,
help='Clueweb sentences file with recognized entities.',
required=True)
parser.add_argument('--w',
type=str,
help='Output filename for the contexts file ("wordsfile").',
required=True)
parser.add_argument('--d',
type=str,
help='Output filename for the orig text file ("docsfile").',
required=True)
parser.add_argument('--first-context-id',
type=int,
help='Start assigning context ids from this number here.',
default=0)
parser.add_argument('--stop-tokens-file',
type=str,
help='A list of tokens to omit in the context file. '
'Usually punctuation and stopwords, '
'one item a line and lower case.',
required=True)
parser.add_argument('--length-one-non-alnum-is-not-stop',
help='Usually, non-alpha-numeric tokens of length 1 '
'are ignored. This option disables that so that '
'everything can be used in the words-file.',
default=False,
action='store_true')
def tokenize_sentence(s):
tokens = []
inside_entity = False
current_token_from = 0
for i in range(0, len(s)):
if inside_entity:
if s[i] == ']' and (i + 1 == len(s) or s[i + 1] == ' '):
i += 1
tokens.append(s[current_token_from:i])
current_token_from = i + 1
inside_entity = False
elif i + 1 < len(s) and s[i + 1] == '[':
if i > current_token_from:
tok = s[current_token_from:i]
if ' ' not in tok:
tokens.append(tok)
inside_entity = False
current_token_from = i + 1
else:
if s[i] == ' ' and i > current_token_from:
tokens.append(s[current_token_from:i])
current_token_from = i + 1
elif s[i] == '[' and i > 0 and s[i - 1] == ' ':
inside_entity = True
if current_token_from < len(s):
tokens.append(s[current_token_from:])
return tokens
def is_marked_entity(token):
return token[0] == '[' and token[-1] == ']' and token.find('|') != -1
def METHOD_NAME(token):
if not is_marked_entity(token):
return token
else:
return token.split('|')[1][:-1]
def entity_id_to_full_entity(entity_id):
return '<http://rdf.freebase.com/ns/' + entity_id + '>'
def should_be_written_to_wordsfile(word, stop_tokens, write_len_1_non_alnum):
return (write_len_1_non_alnum or len(
word) > 1 or word.isalnum()) and word not in stop_tokens
def write_context_to_wordsfile(context_id, tokens, wordsfile, stop_tokens,
write_len_1_non_alnum):
for t in tokens:
try:
is_entity = is_marked_entity(t)
lower = t.lower()
if not is_entity:
if should_be_written_to_wordsfile(lower, stop_tokens,
write_len_1_non_alnum):
print('\t'.join([lower, '0', context_id, '1']),
file=wordsfile)
else:
spl = t.split('|')
words = spl[1][:-1].lower()
for word in words.split(' '):
if should_be_written_to_wordsfile(word, stop_tokens,
write_len_1_non_alnum):
print('\t'.join([word, '0', context_id, '1']),
file=wordsfile)
entity = entity_id_to_full_entity(spl[0][1:])
print('\t'.join([entity, '1', context_id, '1']),
file=wordsfile)
except IndexError:
print('Problem on token: ' + t, file=sys.stderr)
def process(sentences, context_file_name, orig_file_name, first_context_id,
stop_tokens, write_len_1_non_alnum):
context_id = first_context_id
with open(context_file_name, 'w') as wordsfile:
with open(orig_file_name, 'w') as docsfile:
for line in open(sentences, 'r'):
cols = line.strip().split('\t')
if len(cols) != 2:
print("Ignoring line without exactly one tab, "
"line number (starting from 0): "
+ str(context_id - first_context_id), file=sys.stderr)
continue
tokens = tokenize_sentence(cols[0])
docsfile_tokens = [METHOD_NAME(t) for t in tokens]
print('\t'.join([str(context_id), ' '.join(docsfile_tokens)]),
file=docsfile)
write_context_to_wordsfile(str(context_id), tokens, wordsfile,
stop_tokens, write_len_1_non_alnum)
context_id += 1
def read_stop_tokens(file_name):
stop_tokens = set()
for line in open(file_name, 'r'):
stop_tokens.add(line.strip())
return stop_tokens
def main():
args = vars(parser.parse_args())
stop_tokens = read_stop_tokens(args['stop_tokens_file'])
process(args['sentences'], args['w'], args['d'], args['first_context_id'],
stop_tokens, args['length_one_non_alnum_is_not_stop'])
if __name__ == '__main__':
main() |
299,303 | test partition target concurrency wont make tiny | from __future__ import annotations
import concurrent.futures
import os
import sys
import time
from unittest import mock
import pytest
from pre_commit import parse_shebang
from pre_commit import xargs
@pytest.mark.parametrize(
('env', 'expected'),
(
({}, 0),
({b'x': b'1'}, 12),
({b'x': b'12'}, 13),
({b'x': b'1', b'y': b'2'}, 24),
),
)
def test_environ_size(env, expected):
# normalize integer sizing
assert xargs._environ_size(_env=env) == expected
@pytest.fixture
def win32_mock():
with mock.patch.object(sys, 'getfilesystemencoding', return_value='utf-8'):
with mock.patch.object(sys, 'platform', 'win32'):
yield
@pytest.fixture
def linux_mock():
with mock.patch.object(sys, 'getfilesystemencoding', return_value='utf-8'):
with mock.patch.object(sys, 'platform', 'linux'):
yield
def test_partition_trivial():
assert xargs.partition(('cmd',), (), 1) == (('cmd',),)
def test_partition_simple():
assert xargs.partition(('cmd',), ('foo',), 1) == (('cmd', 'foo'),)
def test_partition_limits():
ret = xargs.partition(
('ninechars',), (
# Just match the end (with spaces)
'.' * 5, '.' * 4,
# Just match the end (single arg)
'.' * 10,
# Goes over the end
'.' * 5,
'.' * 6,
),
1,
_max_length=21,
)
assert ret == (
('ninechars', '.' * 5, '.' * 4),
('ninechars', '.' * 10),
('ninechars', '.' * 5),
('ninechars', '.' * 6),
)
def test_partition_limit_win32(win32_mock):
cmd = ('ninechars',)
# counted as half because of utf-16 encode
varargs = ('😑' * 5,)
ret = xargs.partition(cmd, varargs, 1, _max_length=21)
assert ret == (cmd + varargs,)
def test_partition_limit_linux(linux_mock):
cmd = ('ninechars',)
varargs = ('😑' * 5,)
ret = xargs.partition(cmd, varargs, 1, _max_length=31)
assert ret == (cmd + varargs,)
def test_argument_too_long_with_large_unicode(linux_mock):
cmd = ('ninechars',)
varargs = ('😑' * 10,) # 4 bytes * 10
with pytest.raises(xargs.ArgumentTooLongError):
xargs.partition(cmd, varargs, 1, _max_length=20)
def test_partition_target_concurrency():
ret = xargs.partition(
('foo',), ('A',) * 22,
4,
_max_length=50,
)
assert ret == (
('foo',) + ('A',) * 6,
('foo',) + ('A',) * 6,
('foo',) + ('A',) * 6,
('foo',) + ('A',) * 4,
)
def METHOD_NAME():
ret = xargs.partition(
('foo',), ('A',) * 10,
4,
_max_length=50,
)
assert ret == (
('foo',) + ('A',) * 4,
('foo',) + ('A',) * 4,
('foo',) + ('A',) * 2,
)
def test_argument_too_long():
with pytest.raises(xargs.ArgumentTooLongError):
xargs.partition(('a' * 5,), ('a' * 5,), 1, _max_length=10)
def test_xargs_smoke():
ret, out = xargs.xargs(('echo',), ('hello', 'world'))
assert ret == 0
assert out.replace(b'\r\n', b'\n') == b'hello world\n'
exit_cmd = parse_shebang.normalize_cmd(('bash', '-c', 'exit $1', '--'))
# Abuse max_length to control the exit code
max_length = len(' '.join(exit_cmd)) + 3
def test_xargs_retcode_normal():
ret, _ = xargs.xargs(exit_cmd, ('0',), _max_length=max_length)
assert ret == 0
ret, _ = xargs.xargs(exit_cmd, ('0', '1'), _max_length=max_length)
assert ret == 1
# takes the maximum return code
ret, _ = xargs.xargs(exit_cmd, ('0', '5', '1'), _max_length=max_length)
assert ret == 5
@pytest.mark.xfail(sys.platform == 'win32', reason='posix only')
def test_xargs_retcode_killed_by_signal():
ret, _ = xargs.xargs(
parse_shebang.normalize_cmd(('bash', '-c', 'kill -9 $$', '--')),
('foo', 'bar'),
)
assert ret == -9
def test_xargs_concurrency():
bash_cmd = parse_shebang.normalize_cmd(('bash', '-c'))
print_pid = ('sleep 0.5 && echo $$',)
start = time.time()
ret, stdout = xargs.xargs(
bash_cmd, print_pid * 5,
target_concurrency=5,
_max_length=len(' '.join(bash_cmd + print_pid)) + 1,
)
elapsed = time.time() - start
assert ret == 0
pids = stdout.splitlines()
assert len(pids) == 5
# It would take 0.5*5=2.5 seconds to run all of these in serial, so if it
# takes less, they must have run concurrently.
assert elapsed < 2.5
def test_thread_mapper_concurrency_uses_threadpoolexecutor_map():
with xargs._thread_mapper(10) as thread_map:
_self = thread_map.__self__ # type: ignore
assert isinstance(_self, concurrent.futures.ThreadPoolExecutor)
def test_thread_mapper_concurrency_uses_regular_map():
with xargs._thread_mapper(1) as thread_map:
assert thread_map is map
def test_xargs_propagate_kwargs_to_cmd():
env = {'PRE_COMMIT_TEST_VAR': 'Pre commit is awesome'}
cmd: tuple[str, ...] = ('bash', '-c', 'echo $PRE_COMMIT_TEST_VAR', '--')
cmd = parse_shebang.normalize_cmd(cmd)
ret, stdout = xargs.xargs(cmd, ('1',), env=env)
assert ret == 0
assert b'Pre commit is awesome' in stdout
@pytest.mark.xfail(sys.platform == 'win32', reason='posix only')
def test_xargs_color_true_makes_tty():
retcode, out = xargs.xargs(
(sys.executable, '-c', 'import sys; print(sys.stdout.isatty())'),
('1',),
color=True,
)
assert retcode == 0
assert out == b'True\n'
@pytest.mark.xfail(os.name == 'posix', reason='nt only')
@pytest.mark.parametrize('filename', ('t.bat', 't.cmd', 'T.CMD'))
def test_xargs_with_batch_files(tmpdir, filename):
f = tmpdir.join(filename)
f.write('echo it works\n')
retcode, out = xargs.xargs((str(f),), ('x',) * 8192)
assert retcode == 0, (retcode, out) |
299,304 | gen multiple die rolls answer | """
Greynir: Natural language processing for Icelandic
Randomness query response module
Copyright (C) 2023 Miðeind ehf.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see http://www.gnu.org/licenses/.
This query module handles queries related to the generation
of random numbers, e.g. "Kastaðu tengingi", "Nefndu tölu milli 5 og 10", etc.
"""
# TODO: Suport commands of the form "Kastaðu tveir dé 6", D&D style die rolling lingo
import logging
import random
from queries import Query, QueryStateDict, AnswerTuple
from queries.util import gen_answer, read_grammar_file
from queries.arithmetic import add_num, terminal_num
from speech.trans import gssml
from tree import Result, Node
_RANDOM_QTYPE = "Random"
TOPIC_LEMMAS = ["teningur", "skjaldarmerki", "handahóf"]
def help_text(lemma: str) -> str:
"""Help text to return when query processor is unable to parse a query but
one of the above lemmas is found in it"""
return "Ég skil þig ef þú segir til dæmis: {0}.".format(
random.choice(
(
"Kastaðu teningi",
"Kastaðu tíu hliða teningi",
"Fiskur eða skjaldarmerki",
"Kastaðu teningi",
"Kastaðu peningi",
"Veldu tölu á milli sjö og þrettán",
)
)
)
# This module wants to handle parse trees for queries
HANDLE_TREE = True
# The grammar nonterminals this module wants to handle
QUERY_NONTERMINALS = {"QRandom"}
# The context-free grammar for the queries recognized by this plug-in module
GRAMMAR = read_grammar_file("rand")
def QRandomQuery(node: Node, params: QueryStateDict, result: Result) -> None:
result.qtype = _RANDOM_QTYPE
def QRandomHeadsOrTails(node: Node, params: QueryStateDict, result: Result) -> None:
result.action = "headstails"
def QRandomBetween(node: Node, params: QueryStateDict, result: Result) -> None:
result.action = "randbtwn"
def QRandomDieRoll(node: Node, params: QueryStateDict, result: Result) -> None:
result.action = "dieroll"
def QRandomDiceSides(node: Node, params: QueryStateDict, result: Result) -> None:
result.dice_sides = 6
def QRandNumber(node: Node, params: QueryStateDict, result: Result) -> None:
d = result.find_descendant(t_base="tala")
if d:
add_num(terminal_num(d), result)
else:
add_num(result._nominative, result)
def METHOD_NAME(q: Query, result: Result):
# TODO: Implement me
pass
def gen_random_answer(q: Query, result: Result):
"""Generate answer to a query asking for a random number between two numbers."""
(num1, num2) = (1, 6) # Default
if "numbers" in result:
# Asking for a number between x and y
if len(result.numbers) == 2:
(num1, num2) = sorted(result.numbers)
# Asking for the roll of an x-sided die
else:
if result.numbers[0] == 0:
return gen_answer("Núll hliða teningar eru ekki til.")
(num1, num2) = (1, result.numbers[0])
# Query key is random number range (e.g. 1-6)
q.set_key("{0}-{1}".format(num1, num2))
answer = random.randint(num1, num2)
response = dict(answer=answer)
if result.action == "dieroll":
voice_answer = (
f"Talan {gssml(answer, type='number', gender='kk')} kom upp á teningnum"
)
else:
voice_answer = f"Ég vel töluna {gssml(answer, type='number', gender='kk')}"
return response, str(answer), voice_answer
def heads_or_tails(q: Query, result: Result) -> AnswerTuple:
"""Generate answer to "heads or tails" queries, i.e. "fiskur eða skjaldarmerki."""
q.set_key("HeadsOrTails")
return gen_answer(random.choice(("Skjaldarmerki", "Fiskur")))
def sentence(state: QueryStateDict, result: Result) -> None:
"""Called when sentence processing is complete"""
q: Query = state["query"]
if "qtype" not in result or "action" not in result:
q.set_error("E_QUERY_NOT_UNDERSTOOD")
return
# Successfully matched a query type
q.set_qtype(result.qtype)
try:
func = heads_or_tails if result.action == "headstails" else gen_random_answer
r = func(q, result)
if r:
q.set_answer(*r)
except Exception as e:
logging.warning(f"Exception while processing random query: {e}")
q.set_error(f"E_EXCEPTION: {e}")
raise |
299,305 | attach disk | import uuid
from typing import Mapping, Any, Optional, MutableMapping, List, Dict
import logging
import aiohttp
from hailtop.utils import retry_transient_errors, sleep_before_try
from .base_client import GoogleBaseClient
log = logging.getLogger('compute_client')
class GCPOperationError(Exception):
def __init__(self, status: int, message: str, error_codes: Optional[List[str]], error_messages: Optional[List[str]], response: Dict[str, Any]):
super().__init__(message)
self.status = status
self.message = message
self.error_codes = error_codes
self.error_messages = error_messages
self.response = response
def __str__(self):
return f'GCPOperationError: {self.status}:{self.message} {self.error_codes} {self.error_messages}; {self.response}'
class PagedIterator:
def __init__(self, client: 'GoogleComputeClient', path: str, request_params: Optional[MutableMapping[str, Any]], request_kwargs: Mapping[str, Any]):
assert 'params' not in request_kwargs
self._client = client
self._path = path
if request_params is None:
request_params = {}
self._request_params = request_params
self._request_kwargs = request_kwargs
self._page = None
self._index: Optional[int] = None
def __aiter__(self) -> 'PagedIterator':
return self
async def __anext__(self):
if self._page is None:
assert 'pageToken' not in self._request_params
self._page = await self._client.get(self._path, params=self._request_params, **self._request_kwargs)
self._index = 0
while True:
assert self._page
if 'items' in self._page and self._index is not None and self._index < len(self._page['items']):
i = self._index
self._index += 1
return self._page['items'][i]
next_page_token = self._page.get('nextPageToken')
if next_page_token is not None:
assert self._request_params
self._request_params['pageToken'] = next_page_token
self._page = await self._client.get(self._path, params=self._request_params, **self._request_kwargs)
self._index = 0
else:
raise StopAsyncIteration
class GoogleComputeClient(GoogleBaseClient):
def __init__(self, project, **kwargs):
super().__init__(f'https://compute.googleapis.com/compute/v1/projects/{project}', **kwargs)
# docs:
# https://cloud.google.com/compute/docs/api/how-tos/api-requests-responses#handling_api_responses
# https://cloud.google.com/compute/docs/reference/rest/v1
# https://cloud.google.com/compute/docs/reference/rest/v1/instances/insert
# https://cloud.google.com/compute/docs/reference/rest/v1/instances/get
# https://cloud.google.com/compute/docs/reference/rest/v1/instances/delete
# https://cloud.google.com/compute/docs/reference/rest/v1/disks
async def list(self, path: str, *, params: Optional[MutableMapping[str, Any]] = None, **kwargs) -> PagedIterator:
return PagedIterator(self, path, params, kwargs)
async def create_disk(self, path: str, *, params: Optional[MutableMapping[str, Any]] = None, **kwargs):
return await self._request_with_zonal_operations_response(self.post, path, params, **kwargs)
async def METHOD_NAME(self, path: str, *, params: Optional[MutableMapping[str, Any]] = None, **kwargs):
return await self._request_with_zonal_operations_response(self.post, path, params, **kwargs)
async def detach_disk(self, path: str, *, params: Optional[MutableMapping[str, Any]] = None, **kwargs):
return await self._request_with_zonal_operations_response(self.post, path, params, **kwargs)
async def delete_disk(self, path: str, *, params: Optional[MutableMapping[str, Any]] = None, **kwargs):
return await self.delete(path, params=params, **kwargs)
async def _request_with_zonal_operations_response(self, request_f, path, maybe_params: Optional[MutableMapping[str, Any]] = None, **kwargs):
params = maybe_params or {}
assert 'requestId' not in params
async def request_and_wait():
params['requestId'] = str(uuid.uuid4())
resp = await request_f(path, params=params, **kwargs)
operation_id = resp['id']
zone = resp['zone'].rsplit('/', 1)[1]
tries = 0
while True:
result = await self.post(f'/zones/{zone}/operations/{operation_id}/wait',
timeout=aiohttp.ClientTimeout(total=150))
if result['status'] == 'DONE':
error = result.get('error')
if error:
assert result.get('httpErrorStatusCode') is not None
assert result.get('httpErrorMessage') is not None
error_codes = [e['code'] for e in error['errors']]
error_messages = [e['message'] for e in error['errors']]
raise GCPOperationError(result['httpErrorStatusCode'],
result['httpErrorMessage'],
error_codes,
error_messages,
result)
return result
tries += 1
await sleep_before_try(tries, base_delay_ms=2_000, max_delay_ms=15_000)
return await retry_transient_errors(request_and_wait) |
299,306 | test stage out | #!/usr/bin/env python
"""
testing file manager
"""
from __future__ import print_function
import logging
import os.path
import shutil
import tempfile
import unittest
import WMCore.Storage.StageOutError
from WMCore.Storage.FileManager import StageInMgr, StageOutMgr, DeleteMgr
class FileManagerTest(unittest.TestCase):
def setUp(self):
self.testDir = None
def tearDown(self):
if (self.testDir != None):
try:
shutil.rmtree(self.testDir)
except Exception:
# meh, if it fails, I guess something weird happened
pass
def testStageFile(self):
pass
# def stageFile(self, fileToStage, stageOut = True):
def testDelete(self):
pass
# def deleteLFN(self, lfn):
def testInitialiseSiteConf(self):
pass
# def initialiseSiteConf(self):
def testInitialiseOverride(self):
# def initialiseOverride(self):
pass
def testGetTransferDetails(self):
pass
# def getTransferDetails(self, lfn, currentMethod):
def testStageIn(self):
pass
def METHOD_NAME(self):
pass
# def stageIn(self,fileToStage):
# def stageOut(self,fileToStage):
def test_doTransfer(self):
pass
# def _doTransfer(self, currentMethod, methodCounter, lfn, pfn, stageOut):
def testCleanSuccessfulStageOuts(self):
pass
# def cleanSuccessfulStageOuts(self):
def testSearchTFC(self):
pass
# def searchTFC(self, lfn):
def testStageOutMgrWrapperWin(self):
fileForTransfer = {'LFN': '/etc/hosts', \
'PFN': 'file:///etc/hosts', \
'PNN': None, \
'StageOutCommand': None}
wrapper = StageOutMgr(**{
'command': 'test-win',
'option': '',
'phedex-node': 'test-win',
'lfn-prefix': ''})
wrapper(fileForTransfer)
def testStageOutMgrWrapperFail(self):
fileForTransfer = {'LFN': 'failtest', \
'PFN': 'failtest', \
'PNN': None, \
'StageOutCommand': None}
wrapper = StageOutMgr(numberOfRetries=1,
retryPauseTime=0, **{
'command': 'test-fail',
'option': '',
'phedex-node': 'test-win',
'lfn-prefix': ''})
self.assertRaises(WMCore.Storage.StageOutError.StageOutError, wrapper.__call__, fileForTransfer)
def testStageOutMgrWrapperRealCopy(self):
self.testDir = tempfile.mkdtemp()
fileForTransfer = {'LFN': '/etc/hosts', \
'PFN': '/etc/hosts', \
'PNN': None, \
'StageOutCommand': None}
wrapper = StageOutMgr(**{
'command': 'cp',
'option': '',
'phedex-node': 'test-win',
'lfn-prefix': self.testDir})
wrapper(fileForTransfer)
self.assertTrue(os.path.exists(os.path.join(self.testDir, '/etc/hosts')))
def testStageOutMgrWrapperRealCopyFallback(self):
self.testDir = tempfile.mkdtemp()
fileForTransfer = {'LFN': '/etc/hosts', \
'PFN': '/etc/hosts', \
'PNN': None, \
'StageOutCommand': None}
wrapper = StageOutMgr(**{
'command': 'testFallbackToOldBackend',
'option': '',
'phedex-node': 'test-win',
'lfn-prefix': self.testDir})
wrapper(fileForTransfer)
self.assertTrue(os.path.exists(os.path.join(self.testDir, '/etc/hosts')))
def testStageInMgrWrapperWin(self):
fileForTransfer = {'LFN': '/etc/hosts', \
'PFN': '/etc/hosts', \
'PNN': None, \
'StageOutCommand': None}
wrapper = StageInMgr(**{
'command': 'test-win',
'option': '',
'phedex-node': 'test-win',
'lfn-prefix': ''})
wrapper(fileForTransfer)
def testStageInMgrWrapperFail(self):
fileForTransfer = {'LFN': 'failtest', \
'PFN': 'failtest', \
'PNN': None, \
'StageOutCommand': None}
wrapper = StageInMgr(numberOfRetries=1,
retryPauseTime=0, **{
'command': 'test-fail',
'option': '',
'phedex-node': 'test-win',
'lfn-prefix': ''})
self.assertRaises(WMCore.Storage.StageOutError.StageOutError, wrapper.__call__, fileForTransfer)
def testStageInMgrWrapperRealCopy(self):
self.testDir = tempfile.mkdtemp()
shutil.copy('/etc/hosts', self.testDir + '/INPUT')
fileForTransfer = {'LFN': '/INPUT', \
'PFN': '%s/etc/hosts' % self.testDir, \
'PNN': None, \
'StageOutCommand': None}
wrapper = StageInMgr(**{
'command': 'cp',
'option': '',
'phedex-node': 'test-win',
'lfn-prefix': self.testDir})
wrapper(fileForTransfer)
def testStageInMgrWrapperRealCopyFallback(self):
self.testDir = tempfile.mkdtemp()
shutil.copy('/etc/hosts', self.testDir + '/INPUT')
fileForTransfer = {'LFN': '/INPUT', \
'PFN': '%s/etc/hosts' % self.testDir, \
'PNN': None, \
'StageOutCommand': None}
wrapper = StageInMgr(**{
'command': 'testFallbackToOldBackend',
'option': '',
'phedex-node': 'test-win',
'lfn-prefix': self.testDir})
wrapper(fileForTransfer)
def testDeleteMgrWrapper(self):
self.testDir = tempfile.mkdtemp()
shutil.copy('/etc/hosts', self.testDir + '/INPUT')
fileForTransfer = {'LFN': '/INPUT', \
'PFN': '%s/etc/hosts' % self.testDir, \
'PNN': None, \
'StageOutCommand': None}
wrapper = StageInMgr(**{
'command': 'cp',
'option': '',
'phedex-node': 'test-win',
'lfn-prefix': self.testDir})
retval = wrapper(fileForTransfer)
print("got the retval %s" % retval)
wrapper = DeleteMgr(**{
'command': 'cp',
'option': '',
'phedex-node': 'test-win',
'lfn-prefix': self.testDir})
wrapper(retval)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main() |
299,307 | update position | from __future__ import annotations
import logging
import uuid
from dataclasses import dataclass
from typing import Any, Iterator, List, TYPE_CHECKING, Tuple
from dcs.mapping import Point
from .missiontarget import MissionTarget
from ..utils import Heading, pairwise
if TYPE_CHECKING:
from game.ato import FlightType
from .controlpoint import ControlPoint, Coalition
FRONTLINE_MIN_CP_DISTANCE = 5000
@dataclass
class FrontLineSegment:
"""
Describes a line segment of a FrontLine
"""
point_a: Point
point_b: Point
@property
def blue_forward_heading(self) -> Heading:
"""The heading toward the start of the next red segment or red base."""
return Heading.from_degrees(self.point_a.heading_between_point(self.point_b))
@property
def length(self) -> float:
"""Length of the segment"""
return self.point_a.distance_to_point(self.point_b)
class FrontLine(MissionTarget):
"""Defines a front line location between two control points.
Front lines are the area where ground combat happens.
Overwrites the entirety of MissionTarget __init__ method to allow for
dynamic position calculation.
"""
def __init__(
self,
blue_point: ControlPoint,
red_point: ControlPoint,
) -> None:
self.id = uuid.uuid4()
self.blue_cp = blue_point
self.red_cp = red_point
try:
route = list(blue_point.convoy_route_to(red_point))
except KeyError:
# Some campaigns are air only and the mission generator currently relies on
# *some* "front line" being drawn between these two. In this case there will
# be no supply route to follow. Just create an arbitrary route between the
# two points.
route = [blue_point.position, red_point.position]
# Snap the beginning and end points to the CPs rather than the convoy waypoints,
# which are on roads.
route[0] = blue_point.position
route[-1] = red_point.position
self.segments: List[FrontLineSegment] = [
FrontLineSegment(a, b) for a, b in pairwise(route)
]
super().__init__(
f"Front line {blue_point}/{red_point}", self._compute_position()
)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, FrontLine):
return False
return (self.blue_cp, self.red_cp) == (other.blue_cp, other.red_cp)
def __hash__(self) -> int:
return hash(id(self))
def _compute_position(self) -> Point:
return self.point_along_route_from_blue(self._blue_route_progress)
def METHOD_NAME(self) -> None:
self.position = self._compute_position()
def control_point_friendly_to(self, player: bool) -> ControlPoint:
if player:
return self.blue_cp
return self.red_cp
def control_point_hostile_to(self, player: bool) -> ControlPoint:
return self.control_point_friendly_to(not player)
def is_friendly(self, to_player: bool) -> bool:
"""Returns True if the objective is in friendly territory."""
return False
def mission_types(self, for_player: bool) -> Iterator[FlightType]:
from game.ato import FlightType
yield from [
FlightType.CAS,
FlightType.AEWC,
FlightType.REFUELING
# TODO: FlightType.TROOP_TRANSPORT
# TODO: FlightType.EVAC
]
yield from super().mission_types(for_player)
@property
def points(self) -> Iterator[Point]:
yield self.segments[0].point_a
for segment in self.segments:
yield segment.point_b
@property
def control_points(self) -> Tuple[ControlPoint, ControlPoint]:
"""Returns a tuple of the two control points."""
return self.blue_cp, self.red_cp
@property
def coalition(self) -> Coalition:
return self.blue_cp.coalition
@property
def route_length(self) -> float:
"""The total distance of all segments"""
return sum(i.length for i in self.segments)
@property
def blue_forward_heading(self) -> Heading:
"""The heading toward the start of the next red segment or red base."""
return self.active_segment.blue_forward_heading
@property
def active_segment(self) -> FrontLineSegment:
"""The FrontLine segment where there can be an active conflict"""
if self._blue_route_progress <= self.segments[0].length:
return self.segments[0]
distance_to_segment = self._blue_route_progress
for segment in self.segments:
if distance_to_segment <= segment.length:
return segment
else:
distance_to_segment -= segment.length
logging.error(
"Frontline attack distance is greater than the sum of its segments"
)
return self.segments[0]
def point_along_route_from_blue(self, distance: float) -> Point:
"""Returns a point {distance} away from control_point_a along the route."""
if distance < self.segments[0].length:
return self.blue_cp.position.point_from_heading(
self.segments[0].blue_forward_heading.degrees, distance
)
remaining_dist = distance
for segment in self.segments:
if remaining_dist < segment.length:
return segment.point_a.point_from_heading(
segment.blue_forward_heading.degrees, remaining_dist
)
else:
remaining_dist -= segment.length
raise RuntimeError(
f"Could not find front line point {distance} from {self.blue_cp}"
)
@property
def _blue_route_progress(self) -> float:
"""
The distance from point "a" where the conflict should occur
according to the current strength of each control point
"""
total_strength = self.blue_cp.base.strength + self.red_cp.base.strength
if self.blue_cp.base.strength == 0:
return self._adjust_for_min_dist(0)
if self.red_cp.base.strength == 0:
return self._adjust_for_min_dist(self.route_length)
strength_pct = self.blue_cp.base.strength / total_strength
return self._adjust_for_min_dist(strength_pct * self.route_length)
def _adjust_for_min_dist(self, distance: float) -> float:
"""
Ensures the frontline conflict is never located within the minimum distance
constant of either end control point.
"""
if (distance > self.route_length / 2) and (
distance + FRONTLINE_MIN_CP_DISTANCE > self.route_length
):
distance = self.route_length - FRONTLINE_MIN_CP_DISTANCE
elif (distance < self.route_length / 2) and (
distance < FRONTLINE_MIN_CP_DISTANCE
):
distance = FRONTLINE_MIN_CP_DISTANCE
return distance
@staticmethod
def sort_control_points(
a: ControlPoint, b: ControlPoint
) -> tuple[ControlPoint, ControlPoint]:
if a.is_friendly_to(b):
raise ValueError(
"Cannot sort control points that are friendly to each other"
)
if a.captured:
return a, b
return b, a |
299,308 | test container path | """
Test the napalm_formula execution module.
"""
import textwrap
import pytest
import salt.modules.napalm_formula as napalm_formula
from salt.utils.immutabletypes import freeze
from tests.support.mock import MagicMock, patch
@pytest.fixture
def set_model():
return freeze(
{
"interfaces": {
"interface": {
"Ethernet1": {
"config": {
"name": "Ethernet1",
"description": "Interface Ethernet1",
},
"subinterfaces": {
"subinterface": {
"0": {
"config": {
"index": 0,
"description": "Subinterface Ethernet1.0",
}
},
"100": {
"config": {
"index": 100,
"description": "Subinterface Ethernet1.100",
}
},
"900": {
"config": {
"index": 900,
"description": "Subinterface Ethernet1.900",
}
},
}
},
},
"Ethernet2": {
"config": {
"name": "Ethernet2",
"description": "Interface Ethernet2",
},
"subinterfaces": {
"subinterface": {
"400": {
"config": {
"index": 400,
"description": "Subinterface Ethernet2.400",
}
}
}
},
},
}
}
}
)
@pytest.fixture
def set_defaults():
return freeze(
{
"interfaces": {
"interface": {
"*": {
"config": {"mtu": 2048, "enabled": True},
"subinterfaces": {
"subinterface": {"*": {"config": {"enabled": True}}}
},
}
}
}
}
)
@pytest.fixture
def configure_loader_modules():
return {napalm_formula: {"__grains__": {"os": "eos"}}}
def METHOD_NAME(set_model):
paths = [
"interfaces:interface:Ethernet1:config",
"interfaces:interface:Ethernet1:subinterfaces:subinterface:0:config",
"interfaces:interface:Ethernet1:subinterfaces:subinterface:100:config",
"interfaces:interface:Ethernet2:subinterfaces:subinterface:400:config",
"interfaces:interface:Ethernet1:subinterfaces:subinterface:900:config",
"interfaces:interface:Ethernet2:config",
]
with patch("salt.utils.napalm.is_proxy", MagicMock(return_value=True)):
ret = napalm_formula.container_path(set_model.copy())
assert set(ret) == set(paths)
def test_setval():
with patch("salt.utils.napalm.is_proxy", MagicMock(return_value=True)):
dict_ = {"foo": {"bar": {"baz": True}}}
assert dict_ == napalm_formula.setval("foo:bar:baz", True)
def test_defaults(set_model, set_defaults):
expected_result = {
"interfaces": {
"interface": {
"Ethernet1": {
"config": {
"name": "Ethernet1",
"description": "Interface Ethernet1",
"mtu": 2048,
"enabled": True,
},
"subinterfaces": {
"subinterface": {
"0": {
"config": {
"index": 0,
"description": "Subinterface Ethernet1.0",
"enabled": True,
}
},
"100": {
"config": {
"index": 100,
"description": "Subinterface Ethernet1.100",
"enabled": True,
}
},
"900": {
"config": {
"index": 900,
"description": "Subinterface Ethernet1.900",
"enabled": True,
}
},
}
},
},
"Ethernet2": {
"config": {
"name": "Ethernet2",
"description": "Interface Ethernet2",
"mtu": 2048,
"enabled": True,
},
"subinterfaces": {
"subinterface": {
"400": {
"config": {
"index": 400,
"description": "Subinterface Ethernet2.400",
"enabled": True,
}
}
}
},
},
}
}
}
with patch("salt.utils.napalm.is_proxy", MagicMock(return_value=True)):
ret = napalm_formula.defaults(set_model.copy(), set_defaults.copy())
assert ret == expected_result
def test_render_field():
config = {"description": "Interface description"}
ret = napalm_formula.render_field(config, "description", quotes=True)
assert ret == 'description "Interface description"'
def test_render_field_junos():
config = {"description": "Interface description"}
with patch.dict(napalm_formula.__grains__, {"os": "junos"}):
ret = napalm_formula.render_field(config, "description", quotes=True)
assert ret == 'description "Interface description";'
def test_render_fields():
config = {"mtu": 2048, "description": "Interface description"}
expected_render = textwrap.dedent(
'''\
mtu "2048"
description "Interface description"'''
)
ret = napalm_formula.render_fields(config, "mtu", "description", quotes=True)
assert ret == expected_render |
299,309 | test list metrics | from __future__ import annotations
import shutil
import textwrap
from contextlib import contextmanager
from pathlib import Path
from typing import Iterator
import pytest
from dbt_semantic_interfaces.parsing.dir_to_model import (
parse_yaml_files_to_validation_ready_semantic_manifest,
)
from dbt_semantic_interfaces.parsing.objects import YamlConfigFile
from dbt_semantic_interfaces.test_utils import base_semantic_manifest_file
from metricflow.cli.main import (
dimension_values,
dimensions,
entities,
health_checks,
metrics,
query,
tutorial,
validate_configs,
)
from metricflow.protocols.sql_client import SqlEngine
from metricflow.test.fixtures.cli_fixtures import MetricFlowCliRunner
from metricflow.test.model.example_project_configuration import EXAMPLE_PROJECT_CONFIGURATION_YAML_CONFIG_FILE
def test_query(capsys: pytest.CaptureFixture, cli_runner: MetricFlowCliRunner) -> None: # noqa: D
# Disabling capsys to resolve error "ValueError: I/O operation on closed file". Better solution TBD.
with capsys.disabled():
resp = cli_runner.run(query, args=["--metrics", "bookings", "--group-by", "metric_time"])
# case insensitive matches are needed for snowflake due to the capitalization thing
engine_is_snowflake = cli_runner.cli_context.sql_client.sql_engine_type is SqlEngine.SNOWFLAKE
assert "bookings" in resp.output or ("bookings" in resp.output.lower() and engine_is_snowflake)
assert resp.exit_code == 0
def test_list_dimensions(capsys: pytest.CaptureFixture, cli_runner: MetricFlowCliRunner) -> None: # noqa: D
# Disabling capsys to resolve error "ValueError: I/O operation on closed file". Better solution TBD.
with capsys.disabled():
resp = cli_runner.run(dimensions, args=["--metrics", "bookings"])
assert "ds" in resp.output
assert resp.exit_code == 0
def METHOD_NAME(capsys: pytest.CaptureFixture, cli_runner: MetricFlowCliRunner) -> None: # noqa: D
# Disabling capsys to resolve error "ValueError: I/O operation on closed file". Better solution TBD.
with capsys.disabled():
resp = cli_runner.run(metrics)
assert "bookings_per_listing: listing__capacity_latest" in resp.output
assert resp.exit_code == 0
def test_get_dimension_values(capsys: pytest.CaptureFixture, cli_runner: MetricFlowCliRunner) -> None: # noqa: D
# Disabling capsys to resolve error "ValueError: I/O operation on closed file". Better solution TBD.
with capsys.disabled():
resp = cli_runner.run(dimension_values, args=["--metrics", "bookings", "--dimension", "booking__is_instant"])
actual_output_lines = sorted(resp.output.split("\n"))
assert ["", "• False", "• True"] == actual_output_lines
assert resp.exit_code == 0
@contextmanager
def create_directory(directory_path: str) -> Iterator[None]:
"""Creates a temporary directory (errors if it exists) and removes it."""
path = Path(directory_path)
path.mkdir(parents=True, exist_ok=True)
yield
shutil.rmtree(path)
def test_validate_configs(capsys: pytest.CaptureFixture, cli_runner: MetricFlowCliRunner) -> None: # noqa: D
yaml_contents = textwrap.dedent(
"""\
semantic_model:
name: bad_semantic_model
node_relation:
schema_name: some_schema
alias: some_table
defaults:
agg_time_dimension: ds
dimensions:
- name: country
type: categorical
"""
)
bad_semantic_model = YamlConfigFile(filepath="inline_for_test", contents=yaml_contents)
# JSON-stored manifests from dbt are not transformed, so we run this test on that style of output
manifest = parse_yaml_files_to_validation_ready_semantic_manifest(
[EXAMPLE_PROJECT_CONFIGURATION_YAML_CONFIG_FILE, base_semantic_manifest_file(), bad_semantic_model],
apply_transformations=False,
).semantic_manifest
target_directory = Path().absolute() / "target"
with create_directory(target_directory.as_posix()):
manifest_file = target_directory / "semantic_manifest.json"
manifest_file.write_text(manifest.json())
# Disabling capsys to resolve error "ValueError: I/O operation on closed file". Better solution TBD.
with capsys.disabled():
resp = cli_runner.run(validate_configs)
assert "ERROR" in resp.output
assert resp.exit_code == 0
def test_health_checks(capsys: pytest.CaptureFixture, cli_runner: MetricFlowCliRunner) -> None: # noqa: D
# Disabling capsys to resolve error "ValueError: I/O operation on closed file". Better solution TBD.
with capsys.disabled():
resp = cli_runner.run(health_checks)
assert "SELECT 1: Success!" in resp.output
assert resp.exit_code == 0
def test_tutorial_message(capsys: pytest.CaptureFixture, cli_runner: MetricFlowCliRunner) -> None:
"""Tests the message output of the tutorial.
The tutorial now essentially compiles a semantic manifest and then asks the user to run dbt seed,
so from an end user perspective it's little more than the output with -m.
The tutorial currently requires execution from a dbt project path. Rather than go all the way on testing the
tutorial given the path and dbt project requirements, we simply check the message output. When we allow for
project path overrides it might warrant a more complete test of the semantic manifest building steps in the
tutorial flow.
"""
# Disabling capsys to resolve error "ValueError: I/O operation on closed file". Better solution TBD.
with capsys.disabled():
resp = cli_runner.run(tutorial, args=["-m"])
assert "Please run the following steps" in resp.output
assert "dbt seed" in resp.output
def test_list_entities(capsys: pytest.CaptureFixture, cli_runner: MetricFlowCliRunner) -> None: # noqa: D
# Disabling capsys to resolve error "ValueError: I/O operation on closed file". Better solution TBD.
with capsys.disabled():
resp = cli_runner.run(entities, args=["--metrics", "bookings"])
assert "guest" in resp.output
assert "host" in resp.output |
299,310 | average by dept | import json
import os
from textwrap import dedent
from django.core.management.base import BaseCommand
from django.db.models import F, OuterRef, Q, Sum
from courses.models import Department
from courses.util import get_semesters
from PennCourses.settings.base import S3_resource
from review.annotations import review_averages
from review.models import ALL_FIELD_SLUGS, Review
from review.views import reviewbit_filters_pcr, section_filters_pcr
def METHOD_NAME(fields, semesters, departments=None, verbose=False):
"""
For each department and year, compute the average of given fields
(see `alert.models.ReviewBit` for an enumeration of fields) across all (valid) sections.
Note that fields should be a list of strings representing the review fields to be aggregated.
"""
dept_avgs = {}
for i, semester in enumerate(semesters):
if verbose:
print(f"Processing semester {semester} ({i+1}/{len(semesters)})")
if departments is None:
depts_qs = Department.objects.all()
else:
depts_qs = Department.objects.filter(code__in=departments)
semester_dept_avgs = review_averages(
depts_qs,
fields=fields,
reviewbit_subfilters=(
reviewbit_filters_pcr
& Q(review__section__course__semester=semester)
& Q(review__section__course__department_id=OuterRef("id"))
),
section_subfilters=(
section_filters_pcr
& Q(course__semester=semester)
& Q(course__department_id=OuterRef("id"))
),
extra_metrics=False,
).values("code", *fields)
dept_avgs[semester] = {dept_dict.pop("code"): dept_dict for dept_dict in semester_dept_avgs}
for code, enrollments_sum in (
Review.objects.filter(
Q(section__course__primary_listing_id=F("section__course_id"))
& ~Q(section__activity__in=["LAB", "REC"])
& Q(section__course__semester=semester)
)
.annotate(code=F("section__course__department__code"))
.values("code")
.annotate(enrollments_sum=Sum("enrollment"))
.values_list("code", "enrollments_sum")
):
dept_avgs[semester][code]["enrollments_sum"] = enrollments_sum
return dept_avgs
class Command(BaseCommand):
help = dedent(
"""
Compute the average of given `fields`
(see `alert.models.ReviewBit` for an enumeration of fields)
by semester by department, and print or save to a file.
"""
)
def add_arguments(self, parser):
parser.add_argument(
"--fields",
nargs="?",
default=None,
help=dedent(
"""
fields as strings seperated by commas. If not provided, defaults to all fields.
"""
),
)
parser.add_argument(
"--path",
nargs="?",
default=None,
type=str,
help=dedent(
"""
path to the output file. If not provided then will simply be printed to console.
"""
),
)
parser.add_argument(
"--upload-to-s3",
default=False,
action="store_true",
help=(
"Enable this argument to upload the output of this script to the penn.courses "
"S3 bucket, at the path specified by the path argument."
),
)
parser.add_argument(
"--semesters",
nargs="?",
default="all",
type=str,
help=dedent(
"""
semesters to aggregate data for (in XXXXx form) as strings seperated
by commas. If semesters not provided then all semesters used.
"""
),
)
parser.add_argument(
"--departments",
nargs="?",
default=None,
type=str,
help=dedent(
"""
department codes to aggregate data for as strings seperated by
commas. If departments not provided then all departments used.
"""
),
)
def handle(self, *args, **kwargs):
upload_to_s3 = kwargs["upload_to_s3"]
path = kwargs["path"]
assert path is None or (path.endswith(".json") and "/" not in path)
semesters = get_semesters(semesters=kwargs["semesters"])
if kwargs["fields"] is None:
fields = ALL_FIELD_SLUGS
else:
fields = kwargs["fields"].strip().split(",")
if kwargs["departments"] is None:
departments = None
else:
departments = kwargs["departments"].strip().split(",")
print(
f"Averaging department review data ({', '.join(fields)}) by semester "
f"for semester(s): {', '.join(semesters)}"
)
dept_avgs = METHOD_NAME(
fields, semesters=semesters, departments=departments, verbose=True
)
if path is None:
print(json.dumps(dept_avgs, indent=4))
else:
output_file_path = (
"/tmp/review_semester_department_export.json" if upload_to_s3 else path
)
os.makedirs(os.path.dirname(output_file_path), exist_ok=True)
with open(output_file_path, "w") as f:
json.dump(dept_avgs, f, indent=4)
if upload_to_s3:
S3_resource.meta.client.upload_file(output_file_path, "penn.courses", path)
os.remove(output_file_path) |
299,311 | u 2d | import pytest
from firedrake import *
import thetis.utility as utility
import thetis.utility3d as utility3d
import numpy
@pytest.fixture(scope="module")
def mesh2d():
return UnitSquareMesh(5, 5)
@pytest.fixture(scope="module")
def mesh(mesh2d):
fs = utility.get_functionspace(mesh2d, 'CG', 1)
bathymetry_2d = Function(fs).assign(1.0)
n_layers = 10
return utility.extrude_mesh_sigma(mesh2d, n_layers, bathymetry_2d)
@pytest.fixture(params=[
"P1xP1", "P2xP2",
"P1DGxP1DG", "P2DGxP2DG",
"P1xP3", "P1DGxP3DG",
"P3xP1DG", "P3DGxP1",
])
def spaces(request):
h_name, v_name = request.param.split('x')
def get_tuple(name):
degree = int(name[1:2])
family = 'DG' if 'DG' in name else 'CG'
return (family, degree)
return (get_tuple(h_name), get_tuple(v_name))
@pytest.fixture
def p1_2d(mesh2d, spaces):
(name, order), (vname, vorder) = spaces
return utility.get_functionspace(mesh2d, name, order)
@pytest.fixture
def p1(mesh, spaces):
(name, order), (vname, vorder) = spaces
return utility.get_functionspace(mesh, name, order, vname, vorder)
@pytest.fixture
def METHOD_NAME(mesh2d, spaces):
(name, order), (vname, vorder) = spaces
return utility.get_functionspace(mesh2d, name, order, vector=True)
@pytest.fixture
def u(mesh, spaces):
(name, order), (vname, vorder) = spaces
return utility.get_functionspace(mesh, name, order, vname, vorder,
vector=True)
@pytest.fixture
def c3d(p1):
x, y, z = SpatialCoordinate(p1.mesh())
return Function(p1, name="Tracer").interpolate(z + 2.0)
@pytest.fixture
def c3d_x(p1):
x, y, z = SpatialCoordinate(p1.mesh())
return Function(p1, name="Tracer").interpolate(x + 2.0)
@pytest.fixture
def c2d(p1_2d):
x, y = SpatialCoordinate(p1_2d.mesh())
return Function(p1_2d, name="Tracer").interpolate(Constant(4.0))
@pytest.fixture
def c2d_x(p1_2d):
x, y = SpatialCoordinate(p1_2d.mesh())
return Function(p1_2d, name="Tracer").interpolate(2*x)
@pytest.fixture
def uv_3d(u):
x, y, z = SpatialCoordinate(u.mesh())
return Function(u, name="Velocity").interpolate(as_vector((z + 1.0,
2.0*z + 4.0,
3.0*z + 6.0)))
@pytest.fixture
def uv_3d_x(u):
x, y, z = SpatialCoordinate(u.mesh())
return Function(u, name="Velocity").interpolate(as_vector((x + 1.0,
2.0*y + 4.0,
3.0*x*z + 6.0)))
@pytest.fixture
def uv_2d(METHOD_NAME):
x, y = SpatialCoordinate(METHOD_NAME.mesh())
return Function(METHOD_NAME, name="Velocity").interpolate(Constant((4.0, 8.0)))
@pytest.fixture
def uv_2d_x(METHOD_NAME):
x, y = SpatialCoordinate(METHOD_NAME.mesh())
return Function(METHOD_NAME, name="Velocity").interpolate(as_vector((4.0*x,
8.0*y)))
@pytest.mark.parametrize('params',
(['bottom', 'bottom', 1.0],
['bottom', 'top', 1.1],
['bottom', 'average', 1.05],
['top', 'top', 2.0],
['top', 'bottom', 1.9],
['top', 'average', 1.95],
))
def test_copy_3d_field_to_2d(c3d, c2d, params):
boundary, facet, expect = params
utility3d.SubFunctionExtractor(c3d, c2d, boundary=boundary, elem_facet=facet).solve()
assert numpy.allclose(c2d.dat.data_ro[:], expect)
@pytest.mark.parametrize('params',
(['bottom', 'bottom', (0.0, 2.0)],
['bottom', 'top', (0.1, 2.2)],
['bottom', 'average', (0.05, 2.1)],
['top', 'top', (1.0, 4.0)],
['top', 'bottom', (0.9, 3.8)],
['top', 'average', (0.95, 3.9)],
))
def test_copy_3d_field_to_2d_vec(uv_3d, uv_2d, params):
boundary, facet, expect = params
utility3d.SubFunctionExtractor(uv_3d, uv_2d, boundary=boundary, elem_facet=facet).solve()
assert numpy.allclose(uv_2d.dat.data_ro, expect)
@pytest.mark.parametrize('boundary', ('top', 'bottom'))
@pytest.mark.parametrize('facet', ('top', 'bottom', 'average'))
def test_copy_3d_field_to_2d_x(c3d_x, c2d_x, boundary, facet):
utility3d.SubFunctionExtractor(c3d_x, c2d_x, boundary=boundary, elem_facet=facet).solve()
assert numpy.allclose(c2d_x.dat.data_ro.min(), 2.0)
assert numpy.allclose(c2d_x.dat.data_ro.max(), 3.0)
@pytest.mark.parametrize('boundary', ('top', 'bottom'))
@pytest.mark.parametrize('facet', ('top', 'bottom', 'average'))
def test_copy_3d_field_to_2d_x_vec(uv_3d_x, uv_2d_x, boundary, facet):
utility3d.SubFunctionExtractor(uv_3d_x, uv_2d_x, boundary=boundary, elem_facet=facet).solve()
assert numpy.allclose(uv_2d_x.dat.data_ro[:, 0].min(), 1.0)
assert numpy.allclose(uv_2d_x.dat.data_ro[:, 0].max(), 2.0)
assert numpy.allclose(uv_2d_x.dat.data_ro[:, 1].min(), 4.0)
assert numpy.allclose(uv_2d_x.dat.data_ro[:, 1].max(), 6.0)
def test_copy_2d_field_to_3d(c2d, c3d):
utility3d.ExpandFunctionTo3d(c2d, c3d).solve()
assert numpy.allclose(c3d.dat.data_ro[:], 4.0)
def test_copy_2d_field_to_3d_x(c2d_x, c3d_x):
utility3d.ExpandFunctionTo3d(c2d_x, c3d_x).solve()
assert numpy.allclose(c3d_x.dat.data_ro.min(), 0.0)
assert numpy.allclose(c3d_x.dat.data_ro.max(), 2.0)
def test_copy_2d_field_to_3d_x_vec(uv_2d_x, uv_3d_x):
utility3d.ExpandFunctionTo3d(uv_2d_x, uv_3d_x).solve()
assert numpy.allclose(uv_3d_x.dat.data_ro[:, 0].min(), 0.0)
assert numpy.allclose(uv_3d_x.dat.data_ro[:, 0].max(), 4.0)
assert numpy.allclose(uv_3d_x.dat.data_ro[:, 1].min(), 0.0)
assert numpy.allclose(uv_3d_x.dat.data_ro[:, 1].max(), 8.0)
def test_copy_2d_field_to_3d_vec(uv_2d, uv_3d):
utility3d.ExpandFunctionTo3d(uv_2d, uv_3d).solve()
assert numpy.allclose(uv_3d.dat.data_ro[:, 0], 4.0)
assert numpy.allclose(uv_3d.dat.data_ro[:, 1], 8.0)
def test_minimum_angle(mesh2d):
min_angle = utility.get_minimum_angles_2d(mesh2d).vector().gather().min()
assert numpy.allclose(min_angle, pi/4)
def test_cell_widths(mesh2d):
cell_width = utility.get_cell_widths_2d(mesh2d).dat.data
assert numpy.allclose(cell_width, 1./5)
def test_anisotropic_cell_size(mesh2d):
cell_size = utility.anisotropic_cell_size(mesh2d).dat.data
# seems to produce two different cellsizes depending on orientation:
assert numpy.allclose(cell_size.max(), 1./5)
assert numpy.allclose(cell_size.min(), (numpy.sqrt(5)-1)/10)
if __name__ == '__main__':
"""Run all tests"""
import os
pytest.main(os.path.abspath(__file__)) |
299,312 | test documentation filter form init | from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_config.models import UserConfigModel
from dfirtrack_main.filter_forms import DocumentationFilterForm, GeneralFilterForm
from dfirtrack_main.models import Notestatus
class DocumentationFilterFormTestCase(TestCase):
"""documentation filter form tests"""
@classmethod
def setUpTestData(cls):
# create user
test_user = User.objects.create_user(
username='testuser_filter_forms', password='LqShcoecDudasdRxhfKV'
)
# create config
user_config, created = UserConfigModel.objects.get_or_create(
user_config_username=test_user, filter_view='documentation'
)
# create notestatus
Notestatus.objects.create(notestatus_name='test_filter_forms_status')
def test_notestatus_form_label(self):
"""test form label"""
# get object
form = DocumentationFilterForm()
# compare
self.assertEqual(
form.fields['filter_list_status'].label, 'Filter for notestatus'
)
def test_notestatus_form_empty_label(self):
"""test form label"""
# get object
form = DocumentationFilterForm()
# compare
self.assertEqual(
form.fields['filter_list_status'].empty_label, 'Filter for notestatus'
)
def test_documentation_filter_form_empty(self):
"""test minimum form requirements / VALID"""
# get user
test_user = User.objects.get(username='testuser_filter_forms')
# get user config
user_config = UserConfigModel.objects.get(
user_config_username=test_user, filter_view='documentation'
)
# get object
form = DocumentationFilterForm(
data={'user_config_id': user_config.user_config_id}
)
# compare
self.assertTrue(form.is_valid())
def METHOD_NAME(self):
"""Test the init method of the documentation filter form"""
# get user
test_user = User.objects.get(username='testuser_filter_forms')
# get user config
user_config = UserConfigModel.objects.get(
user_config_username=test_user, filter_view='documentation'
)
# get notestatus
notestatus = Notestatus.objects.get(notestatus_name='test_filter_forms_status')
# get object beofre assignment
form_wo_notestatus = DocumentationFilterForm(
data={'user_config_id': user_config.user_config_id}, instance=user_config
)
# assign notestatus
user_config.filter_list_status = notestatus
user_config.save()
# get object
form_with_notestatus = DocumentationFilterForm(
data={'user_config_id': user_config.user_config_id}, instance=user_config
)
# compare
self.assertFalse('filter_list_status' in form_wo_notestatus.initial)
self.assertEqual(
form_with_notestatus.initial['filter_list_status'], notestatus.notestatus_id
)
def test_documentation_filter_form_save(self):
"""Test the save method of the documentation filter form"""
# get user
test_user = User.objects.get(username='testuser_filter_forms')
# get user config
user_config = UserConfigModel.objects.get(
user_config_username=test_user, filter_view='documentation'
)
# get notestatus
notestatus = Notestatus.objects.create(notestatus_name='test_filter_form_save')
# get object
form = DocumentationFilterForm(
data={
'filter_list_status': notestatus.notestatus_id,
'user_config_id': user_config.user_config_id,
},
instance=user_config,
)
# check if form is valid and save form to database
self.assertTrue(form.is_valid())
user_config = form.save(commit=False)
user_config.save()
form.save_m2m()
# compare
self.assertEqual(user_config.filter_list_status, notestatus)
class GeneralFilterFormTestCase(TestCase):
"""system filter form tests"""
@classmethod
def setUpTestData(cls):
# create user
test_user = User.objects.create_user(
username='testuser_filter_forms_general', password='LqShcoecDudasdRxhfKV'
)
# create config
UserConfigModel.objects.get_or_create(
user_config_username=test_user, filter_view='documentation'
)
def test_case_form_label(self):
"""test form label"""
# get object
form = GeneralFilterForm()
# compare
self.assertEqual(form.fields['filter_list_case'].label, 'Filter for case')
def test_case_form_empty_label(self):
"""test form label"""
# get object
form = GeneralFilterForm()
# compare
self.assertEqual(form.fields['filter_list_case'].empty_label, 'Filter for case')
def test_tag_form_label(self):
"""test form label"""
# get object
form = GeneralFilterForm()
# compare
self.assertEqual(form.fields['filter_list_tag'].label, 'Filter for tag')
def test_filter_list_assigned_to_user_id_form_label(self):
"""test form label"""
# get object
form = GeneralFilterForm()
# compare
self.assertEqual(
form.fields['filter_list_assigned_to_user_id'].label, 'Filter for user'
)
def test_filter_list_assigned_to_user_id_form_empty_label(self):
"""test form label"""
# get object
form = GeneralFilterForm()
# compare
self.assertEqual(
form.fields['filter_list_assigned_to_user_id'].empty_label,
'Filter for user',
)
def test_system_filter_form_empty(self):
"""test minimum form requirements / VALID"""
# create user
test_user = User.objects.get(username='testuser_filter_forms_general')
# create config
user_config = UserConfigModel.objects.get(
user_config_username=test_user, filter_view='documentation'
)
# get object
form = GeneralFilterForm(data={'user_config_id': user_config.user_config_id})
# compare
self.assertTrue(form.is_valid()) |
299,313 | delete | """
.. module: lemur.destinations.service
:platform: Unix
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
from sqlalchemy import func
from flask import current_app
from lemur import database
from lemur.models import certificate_destination_associations
from lemur.destinations.models import Destination
from lemur.certificates.models import Certificate
from lemur.certificates import service as certificate_service
from lemur.logs import service as log_service
from lemur.sources.service import add_aws_destination_to_sources
def create(label, plugin_name, options, description=None):
"""
Creates a new destination, that can then be used as a destination for certificates.
:param label: Destination common name
:param description:
:rtype: Destination
:return: New destination
"""
# remove any sub-plugin objects before try to save the json options
for option in options:
if "plugin" in option["type"]:
del option["value"]["plugin_object"]
destination = Destination(
label=label, options=options, plugin_name=plugin_name, description=description
)
current_app.logger.info("Destination: %s created", label)
# add the destination as source, to avoid new destinations that are not in source, as long as an AWS destination
if add_aws_destination_to_sources(destination):
current_app.logger.info("Source: %s created", label)
log_service.audit_log("create_destination", destination.label, "Creating new destination")
return database.create(destination)
def update(destination_id, label, plugin_name, options, description):
"""
Updates an existing destination.
:param destination_id: Lemur assigned ID
:param label: Destination common name
:param plugin_name:
:param options:
:param description:
:rtype: Destination
:return:
"""
destination = get(destination_id)
destination.label = label
destination.plugin_name = plugin_name
# remove any sub-plugin objects before try to save the json options
for option in options:
if "plugin" in option["type"]:
del option["value"]["plugin_object"]
destination.options = options
destination.description = description
log_service.audit_log("update_destination", destination.label, "Updating destination")
updated = database.update(destination)
# add the destination as source, to avoid new destinations that are not in source, as long as an AWS destination
if add_aws_destination_to_sources(updated):
current_app.logger.info("Source: %s created", label)
return updated
def METHOD_NAME(destination_id):
"""
Deletes an destination.
:param destination_id: Lemur assigned ID
"""
destination = get(destination_id)
if destination:
# remove association of this source from all valid certificates
certificates = certificate_service.get_all_valid_certificates_with_destination(destination_id)
for certificate in certificates:
certificate_service.remove_destination_association(certificate, destination)
current_app.logger.warning(
f"Removed destination {destination.label} for {certificate.name} during destination delete")
# proceed with destination delete
log_service.audit_log("delete_destination", destination.label, "Deleting destination")
database.METHOD_NAME(destination)
def get(destination_id):
"""
Retrieves an destination by its lemur assigned ID.
:param destination_id: Lemur assigned ID
:rtype: Destination
:return:
"""
return database.get(Destination, destination_id)
def get_by_label(label):
"""
Retrieves a destination by its label
:param label:
:return:
"""
return database.get(Destination, label, field="label")
def get_all():
"""
Retrieves all destination currently known by Lemur.
:return:
"""
query = database.session_query(Destination)
return database.find_all(query, Destination, {}).all()
def render(args):
filt = args.pop("filter")
certificate_id = args.pop("certificate_id", None)
if certificate_id:
query = database.session_query(Destination).join(
Certificate, Destination.certificate
)
query = query.filter(Certificate.id == certificate_id)
else:
query = database.session_query(Destination)
if filt:
terms = filt.split(";")
query = database.filter(query, Destination, terms)
return database.sort_and_page(query, Destination, args)
def stats(**kwargs):
"""
Helper that defines some useful statistics about destinations.
:param kwargs:
:return:
"""
items = (
database.db.session.query(
Destination.label,
func.count(certificate_destination_associations.c.certificate_id),
)
.join(certificate_destination_associations)
.group_by(Destination.label)
.all()
)
keys = []
values = []
for key, count in items:
keys.append(key)
values.append(count)
return {"labels": keys, "values": values} |
299,314 | get next | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualApplianceSkusOperations:
"""VirtualApplianceSkusOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["_models.NetworkVirtualApplianceSkuListResult"]:
"""List all SKUs available for a virtual appliance.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkVirtualApplianceSkuListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.NetworkVirtualApplianceSkuListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualApplianceSkuListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkVirtualApplianceSkuListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
METHOD_NAME, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkVirtualApplianceSkus'} # type: ignore
async def get(
self,
sku_name: str,
**kwargs
) -> "_models.NetworkVirtualApplianceSku":
"""Retrieves a single available sku for network virtual appliance.
:param sku_name: Name of the Sku.
:type sku_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkVirtualApplianceSku, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.NetworkVirtualApplianceSku
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualApplianceSku"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'skuName': self._serialize.url("sku_name", sku_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkVirtualApplianceSku', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkVirtualApplianceSkus/{skuName}'} # type: ignore |
299,315 | tags | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetRouteTableResult',
'AwaitableGetRouteTableResult',
'get_route_table',
'get_route_table_output',
]
@pulumi.output_type
class GetRouteTableResult:
"""
Route table resource.
"""
def __init__(__self__, disable_bgp_route_propagation=None, etag=None, id=None, location=None, name=None, provisioning_state=None, resource_guid=None, routes=None, subnets=None, METHOD_NAME=None, type=None):
if disable_bgp_route_propagation and not isinstance(disable_bgp_route_propagation, bool):
raise TypeError("Expected argument 'disable_bgp_route_propagation' to be a bool")
pulumi.set(__self__, "disable_bgp_route_propagation", disable_bgp_route_propagation)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if routes and not isinstance(routes, list):
raise TypeError("Expected argument 'routes' to be a list")
pulumi.set(__self__, "routes", routes)
if subnets and not isinstance(subnets, list):
raise TypeError("Expected argument 'subnets' to be a list")
pulumi.set(__self__, "subnets", subnets)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", METHOD_NAME)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="disableBgpRoutePropagation")
def disable_bgp_route_propagation(self) -> Optional[bool]:
"""
Whether to disable the routes learned by BGP on that route table. True means disable.
"""
return pulumi.get(self, "disable_bgp_route_propagation")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the route table resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the route table.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def routes(self) -> Optional[Sequence['outputs.RouteResponse']]:
"""
Collection of routes contained within a route table.
"""
return pulumi.get(self, "routes")
@property
@pulumi.getter
def subnets(self) -> Sequence['outputs.SubnetResponse']:
"""
A collection of references to subnets.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetRouteTableResult(GetRouteTableResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRouteTableResult(
disable_bgp_route_propagation=self.disable_bgp_route_propagation,
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
routes=self.routes,
subnets=self.subnets,
METHOD_NAME=self.METHOD_NAME,
type=self.type)
def get_route_table(expand: Optional[str] = None,
resource_group_name: Optional[str] = None,
route_table_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRouteTableResult:
"""
Gets the specified route table.
Azure REST API version: 2023-02-01.
:param str expand: Expands referenced resources.
:param str resource_group_name: The name of the resource group.
:param str route_table_name: The name of the route table.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['routeTableName'] = route_table_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network:getRouteTable', __args__, opts=opts, typ=GetRouteTableResult).value
return AwaitableGetRouteTableResult(
disable_bgp_route_propagation=pulumi.get(__ret__, 'disable_bgp_route_propagation'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
resource_guid=pulumi.get(__ret__, 'resource_guid'),
routes=pulumi.get(__ret__, 'routes'),
subnets=pulumi.get(__ret__, 'subnets'),
METHOD_NAME=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_route_table)
def get_route_table_output(expand: Optional[pulumi.Input[Optional[str]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRouteTableResult]:
"""
Gets the specified route table.
Azure REST API version: 2023-02-01.
:param str expand: Expands referenced resources.
:param str resource_group_name: The name of the resource group.
:param str route_table_name: The name of the route table.
"""
... |
299,316 | is cubic | # ***************************************************************************
# * Copyright (c) 2009, 2010 Yorik van Havre <yorik@uncreated.net> *
# * Copyright (c) 2009, 2010 Ken Cline <cline@frii.com> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides various functions to work with cubic shapes (parallelepipeds)."""
## @package cuboids
# \ingroup draftgeoutils
# \brief Provides various functions for cubic shapes (parallelepipeds).
## \addtogroup draftgeoutils
# @{
import math
import FreeCAD as App
import DraftVecUtils
from draftgeoutils.general import geomType, vec, precision
def METHOD_NAME(shape):
"""Return True if the shape is a parallelepiped (cuboid).
A parallelepiped of cube-like shape has 8 vertices, 6 faces, 12 edges,
and all angles are 90 degrees between its edges.
"""
# first we try fast methods
if (len(shape.Vertexes) != 8
or len(shape.Faces) != 6
or len(shape.Edges) != 12):
return False
for e in shape.Edges:
if geomType(e) != "Line":
return False
# if ok until now, let's do more advanced testing
for f in shape.Faces:
if len(f.Edges) != 4:
return False
for i in range(4):
e1 = vec(f.Edges[i])
if i < 3:
e2 = vec(f.Edges[i+1])
else:
e2 = vec(f.Edges[0])
rpi = [0.0, round(math.pi/2, precision())]
if round(e1.getAngle(e2), precision()) not in rpi:
return False
return True
def getCubicDimensions(shape):
"""Return a list containing the placement, and dimensions of the shape.
The dimensios are length, width and height of a the parallelepiped,
rounded to the value indicated by `precision`.
The placement point is the lowest corner of the shape.
If it is not a parallelepiped (cuboid), return None.
"""
if not METHOD_NAME(shape):
return None
# determine lowest face, which will be our base
z = [10, 1000000000000]
for i in range(len(shape.Faces)):
if shape.Faces[i].CenterOfMass.z < z[1]:
z = [i, shape.Faces[i].CenterOfMass.z]
if z[0] > 5:
return None
base = shape.Faces[z[0]]
basepoint = base.Edges[0].Vertexes[0].Point
plpoint = base.CenterOfMass
# basenorm = base.normalAt(0.5, 0.5)
# getting length and width
vx = vec(base.Edges[0])
vy = vec(base.Edges[1])
if round(vx.Length) == round(vy.Length):
vy = vec(base.Edges[2])
# getting rotations
rotZ = DraftVecUtils.angle(vx)
rotY = DraftVecUtils.angle(vx, App.Vector(vx.x, vx.y, 0))
rotX = DraftVecUtils.angle(vy, App.Vector(vy.x, vy.y, 0))
# getting height
vz = None
rpi = round(math.pi/2, precision())
for i in range(1, 6):
for e in shape.Faces[i].Edges:
if basepoint in [e.Vertexes[0].Point, e.Vertexes[1].Point]:
vtemp = vec(e)
# print(vtemp)
if round(vtemp.getAngle(vx), precision()) == rpi:
if round(vtemp.getAngle(vy), precision()) == rpi:
vz = vtemp
if not vz:
return None
mat = App.Matrix()
mat.move(plpoint)
mat.rotateX(rotX)
mat.rotateY(rotY)
mat.rotateZ(rotZ)
return [App.Placement(mat),
round(vx.Length, precision()),
round(vy.Length, precision()),
round(vz.Length, precision())]
## @} |
299,317 | test flag f | """
Name: r.what test
Purpose: Tests r.what and its flags/options.
Author: Sunveer Singh, Google Code-in 2018
Copyright: (C) 2018 by Sunveer Singh and the GRASS Development Team
Licence: This program is free software under the GNU General Public
License (>=v2). Read the file COPYING that comes with GRASS
for details.
"""
from grass.gunittest.case import TestCase
from grass.gunittest.main import test
from grass.gunittest.gmodules import SimpleModule
class Testrr(TestCase):
input = "elevation"
coordinates = (633614.08, 224125.12, 632972.36, 225382.87)
points = "comm_colleges"
@classmethod
def setUpClass(cls):
cls.use_temp_region()
cls.runModule("g.region", raster=cls.input, flags="p")
@classmethod
def tearDownClass(cls):
cls.del_temp_region()
def test_flag_n(self):
"""Testing output with flag n"""
string = """1|145096.8591495|154534.264883875||*
2|616341.4371495|146049.750883875||*
3|410595.7191495|174301.828883875||*
4|734153.6871495|169168.437883875||*
"""
r_what = SimpleModule(
"r.what", map=self.input, coordinates=self.coordinates, flags="n"
)
r_what.outputs.stdout = string
self.assertLooksLike(reference=string, actual=r_what.outputs.stdout)
def METHOD_NAME(self):
"""Testing output with flag f"""
string = """5|706338.2501495|54889.417883875||*
6|758009.7501495|112019.898883875||*
7|754002.7501495|200902.234883875||*
8|704771.7501495|183364.484883875||*"""
r_what = SimpleModule(
"r.what", map=self.input, coordinates=self.coordinates, flags="f"
)
r_what.outputs.stdout = string
self.assertLooksLike(reference=string, actual=r_what.outputs.stdout)
def test_flag_r(self):
"""Testing output with flag r"""
string = """9|399187.0631495|220018.859883875||*
10|685098.9371495|33282.089883875||*
11|577750.8131495|257153.109883875||*
12|794095.5621495|199742.671883875||*"""
r_what = SimpleModule(
"r.what", map=self.input, coordinates=self.coordinates, flags="r"
)
r_what.outputs.stdout = string
self.assertLooksLike(reference=string, actual=r_what.outputs.stdout)
def test_flag_i(self):
"""Testing output with flag i"""
string = """13|634688.2501495|100629.616883875||*
14|287638.7811495|207582.624883875||*
15|366218.5321495|222940.625883875||*
16|385212.4371495|236593.109883875||*"""
r_what = SimpleModule(
"r.what", map=self.input, coordinates=self.coordinates, flags="i"
)
r_what.outputs.stdout = string
self.assertLooksLike(reference=string, actual=r_what.outputs.stdout)
def test_flag_c(self):
"""Testing output with flag c"""
string = """17|628137.4371495|63995.550883875||*
18|782600.5631495|152698.890883875||*
19|502813.9381495|235232.577883875||*
20|705922.6251495|136589.359883875||*"""
r_what = SimpleModule(
"r.what", map=self.input, coordinates=self.coordinates, flags="c"
)
r_what.outputs.stdout = string
self.assertLooksLike(reference=string, actual=r_what.outputs.stdout)
def test_flag_v(self):
"""Testing output with flag v"""
string = """21|620397.8131495|246847.640883875||*
22|738465.3751495|237233.983883875||*
23|708944.7501495|247632.296883875||*
24|526666.6871495|249780.312883875||*"""
r_what = SimpleModule(
"r.what",
map=self.input,
coordinates=self.coordinates,
flags="v",
points=self.points,
)
r_what.outputs.stdout = string
self.assertLooksLike(reference=string, actual=r_what.outputs.stdout)
if __name__ == "__main__":
from grass.gunittest.main import test
test() |
299,318 | inner | from datetime import datetime, timedelta
from sqlalchemy.orm.session import Session
from src.challenges.challenge_event_bus import ChallengeEvent, ChallengeEventBus
from src.challenges.listen_streak_challenge import listen_streak_challenge_manager
from src.models.indexing.block import Block
from src.models.rewards.challenge import Challenge
from src.models.social.play import Play
from src.models.users.user import User
from src.utils.config import shared_config
from src.utils.db_session import get_db
from src.utils.redis_connection import get_redis
REDIS_URL = shared_config["redis"]["url"]
BLOCK_NUMBER = 10
def create_play(offset: int) -> Play:
return Play(
id=offset,
user_id=1,
source=None,
play_item_id=1,
slot=1,
signature=None,
updated_at=datetime.now() + timedelta(days=offset),
created_at=datetime.now() + timedelta(days=offset),
)
def dispatch_play(offset: int, session: Session, bus: ChallengeEventBus):
play = create_play(offset)
session.add(play)
session.flush()
bus.dispatch(
ChallengeEvent.track_listen,
BLOCK_NUMBER,
1,
{"created_at": play.created_at.timestamp()},
)
def setup_challenges(session):
block = Block(blockhash="0x1", number=BLOCK_NUMBER)
user = User(
blockhash="0x1",
blocknumber=BLOCK_NUMBER,
txhash="xyz",
user_id=1,
is_current=True,
handle="TestHandle",
handle_lc="testhandle",
wallet="0x1",
is_verified=False,
name="test_name",
created_at=datetime.now(),
updated_at=datetime.now(),
)
session.add(block)
session.flush()
session.add(user)
session.flush()
session.query(Challenge).filter(Challenge.id == "listen-streak").update(
{"active": True, "starting_block": BLOCK_NUMBER}
)
# Wrapper function to call use_scoped_dispatch_queue,
# and then process when it goes out of scope
def make_scope_and_process(bus, session):
def METHOD_NAME(fn):
with bus.use_scoped_dispatch_queue():
fn()
bus.process_events(session)
return METHOD_NAME
def test_listen_streak_challenge(app):
redis_conn = get_redis()
bus = ChallengeEventBus(redis_conn)
# Register events with the bus
bus.register_listener(ChallengeEvent.track_listen, listen_streak_challenge_manager)
with app.app_context():
db = get_db()
with db.scoped_session() as session:
setup_challenges(session)
# wrapped dispatch play
def dp(offset):
return dispatch_play(offset, session, bus)
scope_and_process = make_scope_and_process(bus, session)
# Make sure plays increment the step count
scope_and_process(lambda: dp(0))
state = listen_streak_challenge_manager.get_user_challenge_state(
session, ["1"]
)[0]
assert state.current_step_count == 1 and not state.is_complete
scope_and_process(lambda: dp(1))
state = listen_streak_challenge_manager.get_user_challenge_state(
session, ["1"]
)[0]
assert state.current_step_count == 2 and not state.is_complete
# Make sure the step count resets if the user missed a day
scope_and_process(lambda: dp(3))
state = listen_streak_challenge_manager.get_user_challenge_state(
session, ["1"]
)[0]
assert state.current_step_count == 1 and not state.is_complete
# Add more plays to increment the step count
scope_and_process(lambda: dp(4))
scope_and_process(lambda: dp(5))
scope_and_process(lambda: dp(6))
scope_and_process(lambda: dp(7))
scope_and_process(lambda: dp(8))
state = listen_streak_challenge_manager.get_user_challenge_state(
session, ["1"]
)[0]
assert state.current_step_count == 6 and not state.is_complete
# Make sure that is_complete is set when step count hits 7
scope_and_process(lambda: dp(9))
state = listen_streak_challenge_manager.get_user_challenge_state(
session, ["1"]
)[0]
assert state.current_step_count == 7 and state.is_complete == True
def test_multiple_listens(app):
redis_conn = get_redis()
bus = ChallengeEventBus(redis_conn)
# Register events with the bus
bus.register_listener(ChallengeEvent.track_listen, listen_streak_challenge_manager)
with app.app_context():
db = get_db()
with db.scoped_session() as session:
setup_challenges(session)
state = listen_streak_challenge_manager.get_user_challenge_state(session, ["1"])
# make sure empty to start
assert len(state) == 0
def dp(offset):
return dispatch_play(offset, session, bus)
scope_and_process = make_scope_and_process(bus, session)
scope_and_process(lambda: dp(1))
state = listen_streak_challenge_manager.get_user_challenge_state(
session, ["1"]
)[0]
assert state.current_step_count == 1
scope_and_process(lambda: (dp(2), dp(3), dp(4), dp(5)))
state = listen_streak_challenge_manager.get_user_challenge_state(
session, ["1"]
)[0]
# This will actually "reset" the listen count, because
# we dedupe multiple play events in a single call to process
# and in this case, we pick the one with the greatest timestamp
# which is > 2 days, thus resetting.
# Not the greatest behavior, but shouldn't have user facing
# impact.
# we really want to just ensure that this doesn't crash
assert state.current_step_count == 1
def test_anon_listen(app):
redis_conn = get_redis()
bus = ChallengeEventBus(redis_conn)
# Register events with the bus
bus.register_listener(ChallengeEvent.track_listen, listen_streak_challenge_manager)
with app.app_context():
db = get_db()
with db.scoped_session() as session:
setup_challenges(session)
with bus.use_scoped_dispatch_queue():
bus.dispatch(
ChallengeEvent.track_listen,
BLOCK_NUMBER,
None,
{"created_at": datetime.now()},
)
(num_processed, error) = bus.process_events(session)
assert not error
assert num_processed == 0 |
299,319 | initialize | import os
import numpy as np
from cobaya.likelihoods.base_classes import InstallableLikelihood
from cobaya.log import LoggedError
from scipy.interpolate import InterpolatedUnivariateSpline
class TT(InstallableLikelihood):
"""
Python translation of the Planck 2018 Gibbs TT likelihood
(python Eirik Gjerløw, Feb 2023)
See https://wiki.cosmos.esa.int/planck-legacy-archive/index.php/CMB_spectrum_%26_Likelihood_Code
"""
install_options = {"github_repository": "CobayaSampler/planck_native_data",
"github_release": "v1",
"asset": "planck_2018_lowT.zip",
"directory": "planck_2018_lowT_native"}
lmin: int = 2
lmax: int = 29
type = "CMB"
aliases = ["lowT"]
@classmethod
def get_bibtex(cls):
from cobaya.likelihoods.base_classes import Planck2018Clik
return Planck2018Clik.get_bibtex()
def get_requirements(self):
return {'Cl': {'tt': self.lmax}}
def get_can_support_params(self):
return ['A_planck']
def METHOD_NAME(self):
if self.get_install_options() and self.packages_path:
if self.lmin < 2 or self.lmax > 200 or self.lmin >= self.lmax:
raise LoggedError(
self.log, "lmin must be >= 2, lmax must be <= 200,\n"
"and lmin must be less than lmax.")
path = self.get_path(self.packages_path)
# The txt files start at l=2, hence the index gymnastics
cov = np.loadtxt(
os.path.join(path, 'cov.txt'))[
self.lmin - 2:self.lmax + 1 - 2,
self.lmin - 2:self.lmax + 1 - 2]
# The inverse covariance matrix for the gaussian likelihood calculation
self._covinv = np.linalg.inv(cov)
# The average cl's for the gaussian likelihood calculation
self._mu = np.ascontiguousarray(np.loadtxt(
os.path.join(path, 'mu.txt'))[self.lmin - 2:self.lmax + 1 - 2])
# The cl's used for offset calculation - hence the full range of ells
mu_sigma = np.zeros(self.lmax + 1)
mu_sigma[self.lmin:] = np.loadtxt(
os.path.join(path, 'mu_sigma.txt'))[self.lmin - 2:self.lmax + 1 - 2]
# Spline info
nbins = 1000
spline_cl = np.loadtxt(
os.path.join(path, 'cl2x_1.txt'))[:, self.lmin - 2:self.lmax + 1 - 2]
spline_val = np.loadtxt(
os.path.join(path, 'cl2x_2.txt'))[:, self.lmin - 2:self.lmax + 1 - 2]
self._spline = []
self._spline_derivative = []
# Set up prior and spline
self._prior_bounds = np.zeros((self.lmax + 1 - self.lmin, 2))
for i in range(self.lmax - self.lmin + 1):
j = 0
while abs(spline_val[j, i] + 5) < 1e-4:
j += 1
self._prior_bounds[i, 0] = spline_cl[j + 2, i]
j = nbins - 1
while abs(spline_val[j, i] - 5) < 1e-4:
j -= 1
self._prior_bounds[i, 1] = spline_cl[j - 2, i]
self._spline.append(
InterpolatedUnivariateSpline(spline_cl[:, i], spline_val[:, i]))
self._spline_derivative.append(self._spline[-1].derivative())
# initialize offset to normalize like a chi-squared
self._offset = 0
self._offset = self.log_likelihood(mu_sigma)
def log_likelihood(self, cls_TT, calib=1):
r"""
Calculate log likelihood from CMB TT spectrum
:param cls_TT: L(L+1)C_L/2pi zero-based array in muK^2 units
:param calib: optional calibration parameter
:return: log likelihood
"""
theory: np.ndarray = cls_TT[self.lmin:self.lmax + 1] / calib ** 2
if any(theory < self._prior_bounds[:, 0]) or any(
theory > self._prior_bounds[:, 1]):
return - np.inf
logl = 0.
# Convert the cl's to Gaussianized variables
x = np.zeros_like(theory)
for i, (spline, diff_spline, cl) in enumerate(
zip(self._spline, self._spline_derivative, theory)):
dxdCl = diff_spline(cl)
if dxdCl < 0:
return -np.inf
logl += np.log(dxdCl)
x[i] = spline(cl)
delta = x - self._mu
logl += -0.5 * self._covinv.dot(delta).dot(delta)
logl -= self._offset
return logl
def logp(self, **params_values):
cls = self.provider.get_Cl(ell_factor=True)['tt']
return self.log_likelihood(cls, params_values.get('A_planck', 1)) |
299,320 | generate text data | # Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
import autokeras as ak
SEED = 5
COLUMN_NAMES = [
"sex",
"age",
"n_siblings_spouses",
"parch",
"fare",
"class",
"deck",
"embark_town",
"alone",
]
COLUMN_TYPES = {
"sex": "categorical",
"age": "numerical",
"n_siblings_spouses": "categorical",
"parch": "categorical",
"fare": "numerical",
"class": "categorical",
"deck": "categorical",
"embark_town": "categorical",
"alone": "categorical",
}
TRAIN_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv"
TEST_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/eval.csv"
TRAIN_CSV_PATH = keras.utils.get_file(
fname=os.path.basename(TRAIN_DATA_URL), origin=TRAIN_DATA_URL
)
TEST_CSV_PATH = keras.utils.get_file(
fname=os.path.basename(TEST_DATA_URL), origin=TEST_DATA_URL
)
def generate_data(num_instances=100, shape=(32, 32, 3), dtype="np"):
np.random.seed(SEED)
data = np.random.rand(*((num_instances,) + shape))
if data.dtype == np.float64:
data = data.astype(np.float32)
if dtype == "np":
return data
if dtype == "dataset":
return tf.data.Dataset.from_tensor_slices(data)
def generate_one_hot_labels(num_instances=100, num_classes=10, dtype="np"):
np.random.seed(SEED)
labels = np.random.randint(num_classes, size=num_instances)
data = keras.utils.to_categorical(labels)
if dtype == "np":
return data
if dtype == "dataset":
return tf.data.Dataset.from_tensor_slices(data).batch(32)
def METHOD_NAME(num_instances=100):
vocab = np.array(
[
["adorable", "clueless", "dirty", "odd", "stupid"],
["puppy", "car", "rabbit", "girl", "monkey"],
["runs", "hits", "jumps", "drives", "barfs"],
[
"crazily.",
"dutifully.",
"foolishly.",
"merrily.",
"occasionally.",
],
]
)
return np.array(
[
" ".join([vocab[j][np.random.randint(0, 5)] for j in range(4)])
for i in range(num_instances)
]
)
def generate_data_with_categorical(
num_instances=100,
num_numerical=10,
num_categorical=3,
num_classes=5,
dtype="np",
):
categorical_data = np.random.randint(
num_classes, size=(num_instances, num_categorical)
)
numerical_data = np.random.rand(num_instances, num_numerical)
data = np.concatenate((numerical_data, categorical_data), axis=1)
if data.dtype == np.float64:
data = data.astype(np.float32)
if dtype == "np":
return data
if dtype == "dataset":
return tf.data.Dataset.from_tensor_slices(data)
def build_graph():
keras.backend.clear_session()
image_input = ak.ImageInput(shape=(32, 32, 3))
image_input.batch_size = 32
image_input.num_samples = 1000
merged_outputs = ak.SpatialReduction()(image_input)
head = ak.ClassificationHead(num_classes=10, shape=(10,))
classification_outputs = head(merged_outputs)
return ak.graph.Graph(inputs=image_input, outputs=classification_outputs)
def get_func_args(func):
params = inspect.signature(func).parameters.keys()
return set(params) - set(["self", "args", "kwargs"])
def get_object_detection_data():
images = generate_data(num_instances=2, shape=(32, 32, 3))
bbox_0 = np.random.rand(3, 4)
class_id_0 = np.random.rand(
3,
)
bbox_1 = np.random.rand(5, 4)
class_id_1 = np.random.rand(
5,
)
labels = np.array(
[(bbox_0, class_id_0), (bbox_1, class_id_1)], dtype=object
)
return images, labels |
299,321 | get spark events | import json
from notebook.utils import url_path_join
from notebook.base.handlers import IPythonHandler
from tornado import web
from tornado import gen
from tornado.web import MissingArgumentError
from tornado.escape import json_decode
from sparkmagic.kernels.kernelmagics import KernelMagics
import sparkmagic.utils.configuration as conf
from sparkmagic.utils import constants
from sparkmagic.utils.sparkevents import SparkEvents
from sparkmagic.utils.sparklogger import SparkLog
class ReconnectHandler(IPythonHandler):
logger = None
@web.authenticated
@gen.coroutine
def post(self):
self.logger = SparkLog("ReconnectHandler")
spark_events = self.METHOD_NAME()
try:
data = json_decode(self.request.body)
except ValueError as e:
self.set_status(400)
msg = "Invalid JSON in request body."
self.logger.error(msg)
self.finish(msg)
spark_events.emit_cluster_change_event(None, 400, False, msg)
return
endpoint = None
try:
path = self._get_argument_or_raise(data, "path")
username = self._get_argument_or_raise(data, "username")
password = self._get_argument_or_raise(data, "password")
endpoint = self._get_argument_or_raise(data, "endpoint")
auth = self._get_argument_if_exists(data, "auth")
if auth is None:
if username == "" and password == "":
auth = constants.NO_AUTH
else:
auth = constants.AUTH_BASIC
except MissingArgumentError as e:
self.set_status(400)
self.finish(str(e))
self.logger.error(str(e))
spark_events.emit_cluster_change_event(endpoint, 400, False, str(e))
return
kernel_name = self._get_kernel_name(data)
# Get kernel manager, create a new kernel if none exists or restart the existing one when applicable
kernel_manager = yield self._get_kernel_manager(path, kernel_name)
# Execute code
client = kernel_manager.client()
code = "%{} -s {} -u {} -p {} -t {}".format(
KernelMagics._do_not_call_change_endpoint.__name__,
endpoint,
username,
password,
auth,
)
response_id = client.execute(code, silent=False, store_history=False)
msg = client.get_shell_msg(response_id)
# Get execution info
successful_message = self._msg_successful(msg)
error = self._msg_error(msg)
if successful_message:
status_code = 200
else:
self.logger.error("Code to reconnect errored out: {}".format(error))
status_code = 500
# Post execution info
self.set_status(status_code)
self.finish(
json.dumps(dict(success=successful_message, error=error), sort_keys=True)
)
spark_events.emit_cluster_change_event(
endpoint, status_code, successful_message, error
)
def _get_kernel_name(self, data):
kernel_name = self._get_argument_if_exists(data, "kernelname")
self.logger.debug("Kernel name is {}".format(kernel_name))
if kernel_name is None:
kernel_name = conf.server_extension_default_kernel_name()
self.logger.debug("Defaulting to kernel name {}".format(kernel_name))
return kernel_name
def _get_argument_if_exists(self, data, key):
return data.get(key)
def _get_argument_or_raise(self, data, key):
try:
return data[key]
except KeyError:
raise MissingArgumentError(key)
@gen.coroutine
def _get_kernel_manager(self, path, kernel_name):
sessions = self.session_manager.list_sessions()
kernel_id = None
for session in sessions:
if session["notebook"]["path"] == path:
session_id = session["id"]
kernel_id = session["kernel"]["id"]
existing_kernel_name = session["kernel"]["name"]
break
if kernel_id is None:
self.logger.debug("Kernel not found. Starting a new kernel.")
k_m = yield self._get_kernel_manager_new_session(path, kernel_name)
elif existing_kernel_name != kernel_name:
self.logger.debug(
"Existing kernel name '{}' does not match requested '{}'. Starting a new kernel.".format(
existing_kernel_name, kernel_name
)
)
self._delete_session(session_id)
k_m = yield self._get_kernel_manager_new_session(path, kernel_name)
else:
self.logger.debug("Kernel found. Restarting kernel.")
k_m = self.kernel_manager.get_kernel(kernel_id)
k_m.restart_kernel()
raise gen.Return(k_m)
@gen.coroutine
def _get_kernel_manager_new_session(self, path, kernel_name):
model_future = self.session_manager.create_session(
kernel_name=kernel_name, path=path, type="notebook"
)
model = yield model_future
kernel_id = model["kernel"]["id"]
self.logger.debug("Kernel created with id {}".format(str(kernel_id)))
k_m = self.kernel_manager.get_kernel(kernel_id)
raise gen.Return(k_m)
def _delete_session(self, session_id):
self.session_manager.delete_session(session_id)
def _msg_status(self, msg):
return msg["content"]["status"]
def _msg_successful(self, msg):
return self._msg_status(msg) == "ok"
def _msg_error(self, msg):
if self._msg_status(msg) != "error":
return None
return "{}:\n{}".format(msg["content"]["ename"], msg["content"]["evalue"])
def METHOD_NAME(self):
spark_events = getattr(self, "spark_events", None)
if spark_events is None:
return SparkEvents()
return spark_events
def load_jupyter_server_extension(nb_app):
nb_app.log.info("sparkmagic extension enabled!")
web_app = nb_app.web_app
base_url = web_app.settings["base_url"]
host_pattern = ".*$"
route_pattern_reconnect = url_path_join(base_url, "/reconnectsparkmagic")
handlers = [(route_pattern_reconnect, ReconnectHandler)]
web_app.add_handlers(host_pattern, handlers) |
299,322 | rpm content repometadata files api | import uuid
import pytest
from pulpcore.client.pulp_rpm import (
ApiClient as RpmApiClient,
ContentRepoMetadataFilesApi,
DistributionsRpmApi,
PublicationsRpmApi,
RemotesRpmApi,
RepositoriesRpmApi,
RepositoriesRpmVersionsApi,
RpmRepositorySyncURL,
)
from pulp_rpm.tests.functional.constants import (
RPM_UNSIGNED_FIXTURE_URL,
)
@pytest.fixture(scope="session")
def rpm_publication_api(rpm_client):
"""Fixture for RPM publication API."""
return PublicationsRpmApi(rpm_client)
@pytest.fixture(scope="session")
def rpm_repository_version_api(rpm_client):
"""Fixture for the RPM repository versions API."""
return RepositoriesRpmVersionsApi(rpm_client)
@pytest.fixture(scope="session")
def rpm_distribution_api(rpm_client):
"""Fixture for RPM distribution API."""
return DistributionsRpmApi(rpm_client)
@pytest.fixture(scope="session")
def rpm_client(bindings_cfg):
"""Fixture for RPM client."""
return RpmApiClient(bindings_cfg)
@pytest.fixture(scope="session")
def METHOD_NAME(rpm_client):
return ContentRepoMetadataFilesApi(rpm_client)
@pytest.fixture(scope="session")
def rpm_rpmremote_api(rpm_client):
"""Fixture for RPM remote API."""
return RemotesRpmApi(rpm_client)
@pytest.fixture(scope="session")
def rpm_repository_api(rpm_client):
"""Fixture for RPM repositories API."""
return RepositoriesRpmApi(rpm_client)
@pytest.fixture(scope="class")
def rpm_repository_factory(rpm_repository_api, gen_object_with_cleanup):
"""A factory to generate an RPM Repository with auto-deletion after the test run."""
def _rpm_repository_factory(pulp_domain=None, **body):
data = {"name": str(uuid.uuid4())}
data.update(body)
kwargs = {}
if pulp_domain:
kwargs["pulp_domain"] = pulp_domain
return gen_object_with_cleanup(rpm_repository_api, data, **kwargs)
return _rpm_repository_factory
@pytest.fixture(scope="class")
def rpm_rpmremote_factory(rpm_rpmremote_api, gen_object_with_cleanup):
"""A factory to generate an RPM Remote with auto-deletion after the test run."""
def _rpm_rpmremote_factory(
*, url=RPM_UNSIGNED_FIXTURE_URL, policy="immediate", pulp_domain=None, **body
):
data = {"url": url, "policy": policy, "name": str(uuid.uuid4())}
data.update(body)
kwargs = {}
if pulp_domain:
kwargs["pulp_domain"] = pulp_domain
return gen_object_with_cleanup(rpm_rpmremote_api, data, **kwargs)
return _rpm_rpmremote_factory
@pytest.fixture(scope="class")
def rpm_distribution_factory(rpm_distribution_api, gen_object_with_cleanup):
"""A factory to generate an RPM Distribution with auto-deletion after the test run."""
def _rpm_distribution_factory(pulp_domain=None, **body):
data = {"base_path": str(uuid.uuid4()), "name": str(uuid.uuid4())}
data.update(body)
kwargs = {}
if pulp_domain:
kwargs["pulp_domain"] = pulp_domain
return gen_object_with_cleanup(rpm_distribution_api, data, **kwargs)
return _rpm_distribution_factory
@pytest.fixture(scope="class")
def rpm_publication_factory(rpm_publication_api, gen_object_with_cleanup):
"""A factory to generate an RPM Publication with auto-deletion after the test run."""
def _rpm_publication_factory(pulp_domain=None, **body):
# XOR check on repository and repository_version
assert bool("repository" in body) ^ bool("repository_version" in body)
kwargs = {}
if pulp_domain:
kwargs["pulp_domain"] = pulp_domain
return gen_object_with_cleanup(rpm_publication_api, body, **kwargs)
return _rpm_publication_factory
@pytest.fixture(scope="class")
def init_and_sync(rpm_repository_factory, rpm_repository_api, rpm_rpmremote_factory, monitor_task):
"""Initialize a new repository and remote and sync the content from the passed URL."""
def _init_and_sync(
repository=None,
remote=None,
url=RPM_UNSIGNED_FIXTURE_URL,
policy="immediate",
sync_policy="additive",
skip_types=None,
optimize=True,
return_task=False,
):
if repository is None:
repository = rpm_repository_factory()
if remote is None:
remote = rpm_rpmremote_factory(url=url, policy=policy)
repository_sync_data = RpmRepositorySyncURL(
remote=remote.pulp_href,
sync_policy=sync_policy,
skip_types=skip_types,
optimize=optimize,
)
sync_response = rpm_repository_api.sync(repository.pulp_href, repository_sync_data)
task = monitor_task(sync_response.task)
repository = rpm_repository_api.read(repository.pulp_href)
return (repository, remote) if not return_task else (repository, remote, task)
return _init_and_sync |
299,323 | grade exact units | from enum import Enum
from typing import Any, Callable, Optional, Tuple
import numpy as np
import prairielearn as pl
from pint import UnitRegistry
from typing_extensions import assert_never
CORRECT_UNITS_INCORRECT_MAGNITUDE_FEEDBACK = (
"Your answer has correct units, but incorrect magnitude."
)
INCORRECT_UNITS_CORRECT_MAGNITUDE_FEEDBACK = (
"Your answer has correct magnitude, but incorrect units."
)
INCORRECT_UNITS_AND_MAGNITUDE_FEEDBACK = (
"Your answer has incorrect units and magnitude."
)
INCORRECT_FEEDBACK = "Your answer is incorrect."
class ComparisonType(Enum):
RELABS = "relabs"
SIGFIG = "sigfig"
EXACT = "exact"
DECDIG = "decdig"
def get_only_units_grading_fn(
*, ureg: UnitRegistry, correct_ans: str
) -> Callable[[str], Tuple[bool, Optional[str]]]:
"""Returns the grading function used for units only grading mode."""
parsed_correct_ans = ureg.Quantity(correct_ans)
def grade_only_units(submitted_ans: str) -> Tuple[bool, Optional[str]]:
parsed_submission = ureg.Quantity(submitted_ans)
if parsed_correct_ans.units == parsed_submission.units:
return True, None
return False, INCORRECT_FEEDBACK
return grade_only_units
def get_exact_units_grading_fn(
*,
ureg: UnitRegistry,
correct_ans: str,
comparison: ComparisonType,
magnitude_partial_credit: Optional[float],
digits: int,
rtol: float,
atol: str,
) -> Callable[[str], Tuple[float, Optional[str]]]:
parsed_correct_ans = ureg.Quantity(correct_ans)
parsed_atol = ureg.Quantity(atol)
def magnitude_comparison_fn(
submitted_magnitude: float, correct_magnitude: float
) -> bool:
submitted_magnitude_parsed = np.float64(submitted_magnitude)
correct_magnitude_parsed = np.float64(correct_magnitude)
"""Returns true if submitted_magnitude is close enough to correct_magnitude based on comparison algorithm"""
if comparison is ComparisonType.EXACT:
return submitted_magnitude_parsed == correct_magnitude_parsed
elif comparison is ComparisonType.SIGFIG:
return pl.is_correct_scalar_sf(
a_sub=submitted_magnitude_parsed,
a_tru=correct_magnitude_parsed,
digits=digits,
)
elif comparison is ComparisonType.DECDIG:
return pl.is_correct_scalar_dd(
a_sub=submitted_magnitude_parsed,
a_tru=correct_magnitude_parsed,
digits=digits,
)
elif comparison is ComparisonType.RELABS:
return pl.is_correct_scalar_ra(
a_sub=submitted_magnitude_parsed,
a_tru=correct_magnitude_parsed,
rtol=rtol,
atol=parsed_atol.magnitude,
)
assert_never(comparison)
magnitudes_match_credit = (
magnitude_partial_credit if magnitude_partial_credit is not None else 0.0
)
units_match_credit = (
(1.0 - magnitude_partial_credit)
if magnitude_partial_credit is not None
else 0.0
)
def METHOD_NAME(submitted_ans: str) -> Tuple[float, Optional[str]]:
# will return no error, assuming parse() catches all of them
parsed_submission = ureg.Quantity(submitted_ans)
magnitudes_match = magnitude_comparison_fn(
parsed_submission.magnitude, parsed_correct_ans.magnitude
)
units_match = parsed_correct_ans.units == parsed_submission.units
if magnitudes_match and units_match:
return 1.0, None
elif magnitudes_match and not units_match:
return magnitudes_match_credit, INCORRECT_UNITS_CORRECT_MAGNITUDE_FEEDBACK
elif units_match and not magnitudes_match:
return units_match_credit, CORRECT_UNITS_INCORRECT_MAGNITUDE_FEEDBACK
return 0.0, INCORRECT_UNITS_AND_MAGNITUDE_FEEDBACK
return METHOD_NAME
def get_with_units_grading_fn(
*, ureg: UnitRegistry, correct_ans: str, rtol: float, atol: str
) -> Callable[[str], Tuple[bool, Optional[str]]]:
# Assume atol and correct answer have same dimensionality, checked in prepare method
correct_ans_base_unit = ureg.Quantity(correct_ans).to_base_units()
parsed_atol = ureg.Quantity(atol).to_base_units()
def grade_with_units(submitted_ans: str) -> Tuple[bool, Optional[str]]:
# will return no error, assuming parse() catches all of them
parsed_sub_base_unit = ureg.Quantity(submitted_ans).to_base_units()
if not correct_ans_base_unit.check(parsed_sub_base_unit.dimensionality):
return False, (
f"Your answer has dimensionality <code>{parsed_sub_base_unit.dimensionality}</code>, "
f"which is inconsistent with <code>{correct_ans_base_unit.dimensionality}</code>."
)
magnitudes_match = pl.is_correct_scalar_ra(
a_sub=np.float64(parsed_sub_base_unit.magnitude),
a_tru=np.float64(correct_ans_base_unit.magnitude),
rtol=rtol,
atol=parsed_atol.magnitude,
)
if magnitudes_match:
return True, None
return False, INCORRECT_FEEDBACK
return grade_with_units
def is_numberless(a_sub: str, a_sub_parsed: Any) -> bool:
return "1" not in a_sub and a_sub_parsed.magnitude == 1 |
299,324 | romtest | # Copyright 2020-2022 Capypara and the SkyTemple Contributors
#
# This file is part of SkyTemple.
#
# SkyTemple is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SkyTemple is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SkyTemple. If not, see <https://www.gnu.org/licenses/>.
from __future__ import annotations
import functools
import os
import sys
import typing
from abc import ABC, abstractmethod
from tempfile import TemporaryFile
from typing import Any, Generic, Mapping, Optional, Protocol, Type, TypeVar
from skytemple_files.common.util import (
OptionalKwargs,
get_files_from_rom_with_extension,
get_ppmdu_config_for_rom,
)
from skytemple_files_test.image import ImageTestCaseAbc
U = TypeVar("U")
class BoundDataHandler(Protocol[U]):
@classmethod
def deserialize(cls, data: bytes, **kwargs: OptionalKwargs) -> U:
...
@classmethod
def serialize(cls, data: U, **kwargs: OptionalKwargs) -> bytes:
...
T = TypeVar("T", bound=BoundDataHandler) # type: ignore
class SkyTempleFilesTestCase(ImageTestCaseAbc, Generic[T, U], ABC):
@classmethod
@property
@abstractmethod
def handler(cls) -> Type[T]:
pass # type: ignore
@classmethod
def _load_main_fixture(cls, path: str, **kwargs: OptionalKwargs) -> U: # type: ignore
with open(path, "rb") as f:
return cls.handler.deserialize(f.read(), **kwargs) # type: ignore
@classmethod
def _save_and_reload_main_fixture( # type: ignore
cls,
model: U,
ser_kwargs: Optional[Mapping[str, Any]] = None,
deser_kwargs: Optional[Mapping[str, Any]] = None,
) -> U:
if deser_kwargs is None:
deser_kwargs = {}
raw = cls._save_and_reload_main_fixture_raw(model, ser_kwargs)
return cls.handler.deserialize(raw, **deser_kwargs) # type: ignore
@classmethod
def _save_and_reload_main_fixture_raw( # type: ignore
cls, model: U, ser_kwargs: Optional[Mapping[str, Any]] = None
) -> bytes:
if ser_kwargs is None:
ser_kwargs = {}
with TemporaryFile(mode="rb+") as f:
f.write(cls.handler.serialize(model, **ser_kwargs)) # type: ignore
f.seek(0)
return f.read() # type: ignore
@typing.no_type_check
def fixpath(func):
@functools.wraps(func)
def ffunc(cls, *args, **kwargs):
return os.path.join(
os.path.dirname(sys.modules[cls.__module__].__file__),
*func(cls, *args, **kwargs),
)
return ffunc
def METHOD_NAME(*, file_names=None, file_ext=None, path):
"""
Runs tests against a real ROM.
file_ext is the file extensions checked and path the path prefix.
The env var SKYTEMPLE_TEST_ROM must contain the path to the ROM otherwise the test is skipped.
Tests are marked with the pytest mark "romtest".
"""
def _outer_wrapper(wrapped_function):
import inspect
from unittest import SkipTest
import pytest
from ndspy.rom import NintendoDSRom
from parameterized import parameterized
rom = None
if (
"SKYTEMPLE_TEST_ROM" in os.environ
and os.environ["SKYTEMPLE_TEST_ROM"] != ""
):
rom = NintendoDSRom.fromFile(os.environ["SKYTEMPLE_TEST_ROM"])
if rom:
def dataset_name_func(testcase_func, _, param):
return f"{testcase_func.__name__}/{param.args[0]}"
if file_ext is not None and file_names is not None:
raise TypeError(
"file_ext and file_names can not be set at the same time."
)
if file_ext is not None:
files = [
(x, rom.getFileByName(x))
for x in get_files_from_rom_with_extension(rom, file_ext)
if x.startswith(path)
]
elif file_names is not None:
files = [(x, rom.getFileByName(path + x)) for x in file_names]
else:
raise TypeError(
"Either file_ext or file_names can not be set at the same time."
)
if len(files) < 1:
def no_files(*args, **kwargs):
raise SkipTest("No matching files were found in the ROM.")
return pytest.mark.METHOD_NAME(no_files)
else:
spec = inspect.getfullargspec(wrapped_function)
if "pmd2_data" in spec.args or "pmd2_data" in spec.kwonlyargs:
pmd2_data = get_ppmdu_config_for_rom(rom)
def pmd2datawrapper(*args, **kwargs):
return wrapped_function(*args, **kwargs, pmd2_data=pmd2_data)
pmd2datawrapper.__name__ = wrapped_function.__name__
parameterized.expand(files, name_func=dataset_name_func)(
pytest.mark.METHOD_NAME(pmd2datawrapper)
)
else:
parameterized.expand(files, name_func=dataset_name_func)(
pytest.mark.METHOD_NAME(wrapped_function)
)
# since expands now adds the tests to our locals, we need to pass them back...
# this isn't hacky at all wdym??????ßßß
frame_locals = inspect.currentframe().f_back.f_locals # type: ignore
for local_name, local in inspect.currentframe().f_locals.items(): # type: ignore
if local_name.startswith("test_"):
frame_locals[local_name] = local
else:
def no_tests(*args, **kwargs):
raise SkipTest("No ROM file provided or ROM not found.")
return pytest.mark.METHOD_NAME(no_tests)
return _outer_wrapper |
299,325 | test factory | from unittest import mock
from unittest.mock import PropertyMock
from django.test import TestCase
from eth_account import Account
from hexbytes import HexBytes
from gnosis.safe.tests.safe_test_case import SafeTestCaseMixin
from ..utils import get_safe_message_hash_for_message
from .factories import SafeMessageConfirmationFactory, SafeMessageFactory
from .mocks import get_eip712_payload_mock
class TestSafeMessage(SafeTestCaseMixin, TestCase):
def test_str(self):
# fast_keccak(encode_abi([DOMAIN_TYPEHASH_V1_3_0, ganache_chain_id, address])
mock_domain_separator = b"k(\x81\xf6l\xa4\xbbS-cS\xf5u\xb7\xc1F\xf7\xf5l\xfaC\xce\xd1\x06\xb1j\xe2O\x16a.\x03"
# Use same safe_address so hash is always the same
safe_address = "0x20a3C95188E1c053800e54575A508baCe65761A7"
for input, expected in [
(
"TestMessage",
"Safe Message 0xb04a24aa07a51d1d8c3913e9493b3b1f88ed6a8a75430a9a8eda3ed3ce1897bc - TestMessage",
),
(
"TestMessageVeryLong",
"Safe Message 0xe3db816540ce371e2703b8ec59bdd6fec32e0c6078f2e204a205fd6d81564f28 - TestMessageVery...",
),
(
get_eip712_payload_mock(),
"Safe Message 0xbabb22f5c02a24db447b8f0136d6e26bb58cd6d068ebe8ab25c2221cfdf53e18 - {'types': {'EIP...",
),
]:
with self.subTest(input=input):
with mock.patch(
"gnosis.safe.Safe.domain_separator",
return_value=mock_domain_separator,
new_callable=PropertyMock,
):
safe_message = SafeMessageFactory(safe=safe_address, message=input)
self.assertEqual(str(safe_message), expected)
def METHOD_NAME(self):
# 0x63EB7d344c819caAC85bAa1C28cC4C2c08776495
owner_1_account = Account.from_key(
"0x4923c57f121449492c2be3c8355904b5286b2486be9d1ff0241e29650c5f589d"
)
# 0x3456cbF38287EE5CAa40492e4Abf6319496c2B84
owner_2_account = Account.from_key(
"0xfe4a966a3bc93ccad16e2eacb867ba14f06cdf9a9957e6f0fdef1619494471df"
)
safe_message_1 = SafeMessageFactory(safe=self.deploy_test_safe().address)
safe_message_confirmation_1 = SafeMessageConfirmationFactory(
signing_owner=owner_1_account, safe_message=safe_message_1
)
safe_message = safe_message_confirmation_1.safe_message
message = safe_message.message
message_hash = safe_message.message_hash
self.assertEqual(
message_hash,
get_safe_message_hash_for_message(safe_message.safe, message).hex(),
)
recovered_owner = Account._recover_hash(
safe_message.message_hash,
signature=safe_message_confirmation_1.signature,
)
self.assertEqual(
safe_message_confirmation_1.owner,
recovered_owner,
"0x63EB7d344c819caAC85bAa1C28cC4C2c08776495",
)
self.assertEqual(
safe_message.build_signature(),
HexBytes(safe_message_confirmation_1.signature),
)
# Check building of signatures sorted
safe_message_confirmation_2 = SafeMessageConfirmationFactory(
signing_owner=owner_2_account, safe_message=safe_message
)
recovered_owner = Account._recover_hash(
safe_message.message_hash,
signature=safe_message_confirmation_2.signature,
)
self.assertEqual(
safe_message_confirmation_2.owner,
recovered_owner,
"0x3456cbF38287EE5CAa40492e4Abf6319496c2B84",
)
# Signatures must be sorted as owner_2 < owner1
expected_signature = HexBytes(safe_message_confirmation_2.signature) + HexBytes(
safe_message_confirmation_1.signature
)
self.assertEqual(safe_message.build_signature(), expected_signature) |
299,326 | format resource name | """Sketch analyzer plugin for GCP Logging."""
import re
from timesketch.lib.analyzers import interface
from timesketch.lib.analyzers import manager
class GCPLoggingSketchPlugin(interface.BaseAnalyzer):
"""Analyzer for GCP Logging"""
NAME = "gcp_logging"
DISPLAY_NAME = "Google Cloud Logging Analyzer"
DESCRIPTION = (
"Extract features and tag security relevant actions in " "Google Cloud Logging."
)
def METHOD_NAME(self, resource_name):
"""Format resource names for storage as sketch attributes.
Returns:
Tuple in format ('resource_type', 'resource_identifier')
"""
resource_identifier = resource_name.rsplit("/", maxsplit=2)[-1]
resource_type = "gcp_" + resource_name.rsplit("/", maxsplit=2)[-2]
return (resource_type, resource_identifier)
def run(self):
"""Entry point for the analyzer.
Returns:
String with summary of the analyzer result
"""
query = 'data_type:"gcp:log:json"'
return_fields = ["principalEmail", "methodName", "resourceName"]
events = self.event_stream(query_string=query, return_fields=return_fields)
users = []
resources = {}
for event in events:
principal_email = event.source.get("principalEmail")
method_name = event.source.get("methodName")
resource_name = event.source.get("resourceName")
if principal_email:
if principal_email not in users:
users.append(principal_email)
if re.match(
r"\d{12}-compute@developer\.gserviceaccount\.com", principal_email
):
event.add_tags(["default-service-account"])
if resource_name:
resource_type, resource_identifier = self.METHOD_NAME(
resource_name
)
if resource_type not in resources:
resources[resource_type] = []
if resource_identifier not in resources[resource_type]:
resources[resource_type].append(resource_identifier)
if method_name:
if method_name.endswith("CreateServiceAccount"):
event.add_tags(["service-account-created"])
if method_name.endswith("CreateServiceAccountKey"):
event.add_tags(["service-account-key-created"])
if method_name.endswith("SetIamPolicy"):
event.add_tags(["iam-policy-changed"])
if method_name.endswith("compute.instances.insert"):
event.add_tags(["gce-instance-created"])
if method_name.endswith("compute.firewalls.insert"):
event.add_tags(["fw-rule-created"])
if method_name.endswith("compute.networks.insert"):
event.add_tags(["network-created"])
# pylint: disable-msg=line-too-long
if method_name.endswith("compute.projects.setCommonInstanceMetadata"):
event.add_tags(["compute-metadata-changed"])
if method_name.endswith("compute.instances.setMetadata"):
event.add_tags(["compute-metadata-changed"])
event.commit()
# TODO: Don't add attributes if they already exist.
self.sketch.add_sketch_attribute("gcp_identities", users, "text")
for resource_type, resource_list in resources.items():
# TODO: Don't add attributes if they already exist.
self.sketch.add_sketch_attribute(resource_type, resource_list, "text")
for user in users:
view_name = "GCP User {0:s}".format(user)
query_string = 'principalEmail:"{0:s}"'.format(user)
self.sketch.add_view(
view_name=view_name, analyzer_name=self.NAME, query_string=query_string
)
return (
"GCP logging analyzer completed with " "{0:d} resource types extracted."
).format(len(resources))
manager.AnalysisManager.register_analyzer(GCPLoggingSketchPlugin) |
299,327 | publish ram consumption event | import logging
import time
from typing import Optional
import psutil
from common.agent_events import RAMConsumptionEvent
from common.event_queue import IAgentEventPublisher
from common.tags import RESOURCE_HIJACKING_T1496_TAG
from common.types import AgentID, PercentLimited
from common.utils.code_utils import PeriodicCaller
from .consts import CRYPTOJACKER_PAYLOAD_TAG
MEMORY_CONSUMPTION_CHECK_INTERVAL = 30
# If target memory consumption is within 2% of actual consumption, we'll consider it close enough.
MEMORY_CONSUMPTION_NOP_THRESHOLD = 0.02
# We don't want to ever use more then 90% of available memory, otherwise we risk impacting the
# victim machines performance
MEMORY_CONSUMPTION_SAFETY_LIMIT = 0.9
logger = logging.getLogger(__name__)
class MemoryUtilizer:
def __init__(
self,
target_utilization: PercentLimited,
agent_id: AgentID,
agent_event_publisher: IAgentEventPublisher,
):
self._agent_id = agent_id
self._agent_event_publisher = agent_event_publisher
self._target_utilization = target_utilization
self._consumed_bytes = b""
self._periodic_caller = PeriodicCaller(
self.adjust_memory_utilization,
MEMORY_CONSUMPTION_CHECK_INTERVAL,
name="Cryptojacker.MemoryUtilizer",
)
@property
def consumed_bytes_size(self) -> int:
try:
return len(self._consumed_bytes)
except AttributeError:
# self._consumed_bytes was deleted and is currently being reinitialized return 0 while
# we wait.
return 0
def start(self):
logger.debug("Starting MemoryUtilizer")
self._periodic_caller.start()
def adjust_memory_utilization(self):
try:
memory_to_consume = self._calculate_memory_to_consume()
self.consume_bytes(len(self._consumed_bytes) + memory_to_consume)
except RuntimeError as err:
logger.error("Failed to adjust memory utilization: %s", err)
def _calculate_memory_to_consume(self) -> int:
total_virtual_memory = psutil.virtual_memory().total
available_virtual_memory = psutil.virtual_memory().available
used_virtual_memory = psutil.Process().memory_info().vms
if used_virtual_memory > total_virtual_memory:
raise RuntimeError("Impossible system state: Used memory is greater than total memory")
ideal_memory_to_consume = int(
total_virtual_memory * self._target_utilization.as_decimal_fraction()
- used_virtual_memory
)
maximum_memory_to_consume = int(
(available_virtual_memory + used_virtual_memory) * MEMORY_CONSUMPTION_SAFETY_LIMIT
- used_virtual_memory
)
# We never want to consume 100% of available memory, otherwise the OS could kill this
# process or one of the user's mission-critical processes. This logic limits the amount of
# memory we consume to 90% of available memory.
return min(ideal_memory_to_consume, maximum_memory_to_consume)
def consume_bytes(self, bytes_: int):
logger.debug(
f"Currently consumed: {self.consumed_bytes_size} bytes - Target: {bytes_} bytes"
)
if not self._should_change_byte_consumption(bytes_):
logger.debug("Not adjusting memory consumption, as the difference is too small")
return
timestamp = time.time()
if bytes_ <= 0:
self._consumed_bytes = bytearray(0)
else:
# If len(self._consumed_bytes) > 50% of available RAM, we must delete it before
# reassigning it to a new bytearray. Otherwise, the new bytearray may be allocated to
# more than 50% of total RAM before the original byte array is garbage collected.
# This will cause this process to consume all available ram until the OS to kills this
# process or an out-of-memory error occurs.
del self._consumed_bytes
self._consumed_bytes = bytearray(bytes_)
self.METHOD_NAME(timestamp)
def _should_change_byte_consumption(self, target_consumption_bytes_: int) -> bool:
if target_consumption_bytes_ <= 0:
if self.consumed_bytes_size == 0:
return False
return True
percent_difference = (
abs(self.consumed_bytes_size - target_consumption_bytes_) / target_consumption_bytes_
)
if percent_difference <= MEMORY_CONSUMPTION_NOP_THRESHOLD:
return False
return True
def METHOD_NAME(self, timestamp: float):
total_virtual_memory = psutil.virtual_memory().total
used_virtual_memory = psutil.Process().memory_info().vms
self._agent_event_publisher.publish(
RAMConsumptionEvent(
source=self._agent_id,
timestamp=timestamp,
bytes=used_virtual_memory,
utilization=(used_virtual_memory / total_virtual_memory) * 100,
tags=frozenset({CRYPTOJACKER_PAYLOAD_TAG, RESOURCE_HIJACKING_T1496_TAG}),
)
)
def stop(self, timeout: Optional[float] = None):
logger.debug("Stopping MemoryUtilizer")
self._periodic_caller.stop(timeout=timeout) |
299,328 | hotp | """OATH helpers"""
import hmac
from hashlib import sha1
from struct import pack
from time import time
# pylint: disable=invalid-name
def METHOD_NAME(key: bytes, counter: int, digits=6) -> int:
"""
Implementation of the HOTP algorithm from `RFC 4226
<http://tools.ietf.org/html/rfc4226#section-5>`_.
:param bytes key: The shared secret. A 20-byte string is recommended.
:param int counter: The password counter.
:param int digits: The number of decimal digits to generate.
:returns: The HOTP token.
:rtype: int
>>> key = b'12345678901234567890'
>>> for c in range(10):
... hotp(key, c)
755224
287082
359152
969429
338314
254676
287922
162583
399871
520489
"""
msg = pack(b">Q", counter)
hs = hmac.new(key, msg, sha1).digest()
hs = list(iter(hs))
offset = hs[19] & 0x0F
bin_code = (
(hs[offset] & 0x7F) << 24 | hs[offset + 1] << 16 | hs[offset + 2] << 8 | hs[offset + 3]
)
return bin_code % pow(10, digits)
def totp(key: bytes, step=30, t0=0, digits=6, drift=0) -> int:
"""
Implementation of the TOTP algorithm from `RFC 6238
<http://tools.ietf.org/html/rfc6238#section-4>`_.
:param bytes key: The shared secret. A 20-byte string is recommended.
:param int step: The time step in seconds. The time-based code changes
every ``step`` seconds.
:param int t0: The Unix time at which to start counting time steps.
:param int digits: The number of decimal digits to generate.
:param int drift: The number of time steps to add or remove. Delays and
clock differences might mean that you have to look back or forward a
step or two in order to match a token.
:returns: The TOTP token.
:rtype: int
>>> key = b'12345678901234567890'
>>> now = int(time())
>>> for delta in range(0, 200, 20):
... totp(key, t0=(now-delta))
755224
755224
287082
359152
359152
969429
338314
338314
254676
287922
"""
return TOTP(key, step, t0, digits, drift).token()
class TOTP:
"""
An alternate TOTP interface.
This provides access to intermediate steps of the computation. This is a
living object: the return values of ``t`` and ``token`` will change along
with other properties and with the passage of time.
:param bytes key: The shared secret. A 20-byte string is recommended.
:param int step: The time step in seconds. The time-based code changes
every ``step`` seconds.
:param int t0: The Unix time at which to start counting time steps.
:param int digits: The number of decimal digits to generate.
:param int drift: The number of time steps to add or remove. Delays and
clock differences might mean that you have to look back or forward a
step or two in order to match a token.
>>> key = b'12345678901234567890'
>>> totp = TOTP(key)
>>> totp.time = 0
>>> totp.t()
0
>>> totp.token()
755224
>>> totp.time = 30
>>> totp.t()
1
>>> totp.token()
287082
>>> totp.verify(287082)
True
>>> totp.verify(359152)
False
>>> totp.verify(359152, tolerance=1)
True
>>> totp.drift
1
>>> totp.drift = 0
>>> totp.verify(359152, tolerance=1, min_t=3)
False
>>> totp.drift
0
>>> del totp.time
>>> totp.t0 = int(time()) - 60
>>> totp.t()
2
>>> totp.token()
359152
"""
# pylint: disable=too-many-arguments
def __init__(self, key: bytes, step=30, t0=0, digits=6, drift=0):
self.key = key
self.step = step
self.t0 = t0
self.digits = digits
self.drift = drift
self._time = None
def token(self):
"""The computed TOTP token."""
return METHOD_NAME(self.key, self.t(), digits=self.digits)
def t(self):
"""The computed time step."""
return ((int(self.time) - self.t0) // self.step) + self.drift
@property
def time(self):
"""
The current time.
By default, this returns time.time() each time it is accessed. If you
want to generate a token at a specific time, you can set this property
to a fixed value instead. Deleting the value returns it to its 'live'
state.
"""
return self._time if (self._time is not None) else time()
@time.setter
def time(self, value):
self._time = value
@time.deleter
def time(self):
self._time = None
def verify(self, token, tolerance=0, min_t=None):
"""
A high-level verification helper.
:param int token: The provided token.
:param int tolerance: The amount of clock drift you're willing to
accommodate, in steps. We'll look for the token at t values in
[t - tolerance, t + tolerance].
:param int min_t: The minimum t value we'll accept. As a rule, this
should be one larger than the largest t value of any previously
accepted token.
:rtype: bool
Iff this returns True, `self.drift` will be updated to reflect the
drift value that was necessary to match the token.
"""
drift_orig = self.drift
verified = False
for offset in range(-tolerance, tolerance + 1):
self.drift = drift_orig + offset
if (min_t is not None) and (self.t() < min_t):
continue
if self.token() == token:
verified = True
break
else:
self.drift = drift_orig
return verified |
299,329 | test keep alive error | import pickle
import unittest
from betfairlightweight import exceptions
class ExceptionsTest(unittest.TestCase):
def test_betfair_error(self):
with self.assertRaises(exceptions.BetfairError):
raise exceptions.BetfairError("test")
# pickle
error = exceptions.PasswordError("test")
self.assertEqual(
str(pickle.loads(pickle.dumps(error))),
str(error),
)
def test_password_error(self):
# raise
with self.assertRaises(exceptions.BetfairError):
raise exceptions.PasswordError("test")
# pickle
error = exceptions.PasswordError("test")
self.assertEqual(
str(pickle.loads(pickle.dumps(error))),
str(error),
)
def test_app_key_error(self):
# raise
with self.assertRaises(exceptions.BetfairError):
raise exceptions.AppKeyError("test")
# pickle
error = exceptions.AppKeyError("test")
self.assertEqual(
str(pickle.loads(pickle.dumps(error))),
str(error),
)
def test_certs_error(self):
# raise
with self.assertRaises(exceptions.BetfairError):
raise exceptions.CertsError("test")
# pickle
error = exceptions.CertsError("test")
self.assertEqual(
str(pickle.loads(pickle.dumps(error))),
str(error),
)
def test_status_code_error(self):
# raise
with self.assertRaises(exceptions.BetfairError):
raise exceptions.StatusCodeError("test")
# pickle
error = exceptions.StatusCodeError("test")
self.assertEqual(
str(pickle.loads(pickle.dumps(error))),
str(error),
)
def test_invalid_response_error(self):
# raise
with self.assertRaises(exceptions.BetfairError):
raise exceptions.InvalidResponse({})
# pickle
error = exceptions.InvalidResponse({})
self.assertEqual(
str(pickle.loads(pickle.dumps(error))),
str(error),
)
def test_login_error(self):
# raise
with self.assertRaises(exceptions.BetfairError):
raise exceptions.LoginError({})
# pickle
error = exceptions.LoginError({})
self.assertEqual(
str(pickle.loads(pickle.dumps(error))),
str(error),
)
def METHOD_NAME(self):
# raise
with self.assertRaises(exceptions.BetfairError):
raise exceptions.KeepAliveError({})
# pickle
error = exceptions.KeepAliveError({})
self.assertEqual(
str(pickle.loads(pickle.dumps(error))),
str(error),
)
def test_api_error(self):
# raise
with self.assertRaises(exceptions.BetfairError):
raise exceptions.APIError({}, "test", {})
# pickle
error = exceptions.APIError({}, "test", {})
self.assertEqual(
str(pickle.loads(pickle.dumps(error))),
str(error),
)
def test_logout_error(self):
# raise
with self.assertRaises(exceptions.BetfairError):
raise exceptions.LogoutError({})
# pickle
error = exceptions.LogoutError({})
self.assertEqual(
str(pickle.loads(pickle.dumps(error))),
str(error),
)
def test_socket_error(self):
# raise
with self.assertRaises(exceptions.BetfairError):
raise exceptions.SocketError("test")
# pickle
error = exceptions.SocketError("test")
self.assertEqual(
str(pickle.loads(pickle.dumps(error))),
str(error),
)
def test_listener_error(self):
# raise
with self.assertRaises(exceptions.BetfairError):
raise exceptions.ListenerError("test", "test")
# pickle
error = exceptions.ListenerError("test", "test")
self.assertEqual(
str(pickle.loads(pickle.dumps(error))),
str(error),
)
def test_cache_error(self):
# raise
with self.assertRaises(exceptions.BetfairError):
raise exceptions.CacheError("test")
# pickle
error = exceptions.CacheError("test")
self.assertEqual(
str(pickle.loads(pickle.dumps(error))),
str(error),
)
def test_race_card_error(self):
# raise
with self.assertRaises(exceptions.BetfairError):
raise exceptions.RaceCardError("test")
# pickle
error = exceptions.RaceCardError("test")
self.assertEqual(
str(pickle.loads(pickle.dumps(error))),
str(error),
) |
299,330 | find and set features col as vector | from .basemodel import BaseModel
class SparkModel(BaseModel):
"""
Wrapper around Spark MLlib PipelineModel providing interface for scoring pandas DataFrame.
"""
def __init__(self, spark, spark_model):
self.spark = spark
self.spark_model = spark_model
@staticmethod
def METHOD_NAME(self, spark_df, spark_model):
"""
Finds the `featuresCol` column in spark_model and
then tries to cast that column to `vector` type.
This method is noop if the `featuresCol` is already of type `vector`
or if it can't be cast to `vector` type
Note:
If a spark ML pipeline contains a single Estimator stage, it requires
the input dataframe to contain features column of vector type.
But the autologging for pyspark ML casts vector column to array<double> type
for parity with the pd Dataframe. The following fix is required, which transforms
that features column back to vector type so that the pipeline stages can correctly work.
A valid scenario is if the auto-logged input example is directly used
for prediction, which would otherwise fail without this transformation.
Args:
spark_df: Input dataframe that contains `featuresCol`
spark_model: A pipeline model or a single transformer that contains `featuresCol` param
Returns:
A spark dataframe that contains features column of `vector` type.
"""
from pyspark.sql.functions import udf
from pyspark.ml.linalg import Vectors, VectorUDT
from pyspark.sql import types as t
def _find_stage_with_features_col(stage):
if stage.hasParam("featuresCol"):
def _array_to_vector(input_array):
return Vectors.dense(input_array)
array_to_vector_udf = udf(f=_array_to_vector, returnType=VectorUDT())
features_col_name = stage.extractParamMap().get(stage.featuresCol)
features_col_type = [
_field
for _field in spark_df.schema.fields
if _field.name == features_col_name
and _field.dataType
in [
t.ArrayType(t.DoubleType(), True),
t.ArrayType(t.DoubleType(), False),
]
]
if len(features_col_type) == 1:
return spark_df.withColumn(
features_col_name, array_to_vector_udf(features_col_name)
)
return spark_df
if hasattr(spark_model, "stages"):
for stage in reversed(spark_model.stages):
return _find_stage_with_features_col(stage)
return _find_stage_with_features_col(spark_model)
def predict(self, pandas_df):
"""
Generate predictions given input data in a pandas DataFrame.
Args:
pandas_df: pandas DataFrame containing input data.
Returns:
List with model predictions.
"""
from pyspark.ml import PipelineModel
spark_df = self.METHOD_NAME(
self.spark.createDataFrame(pandas_df), self.spark_model
)
prediction_column = "prediction"
if isinstance(self.spark_model, PipelineModel) and self.spark_model.stages[
-1
].hasParam("outputCol"):
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
# do a transform with an empty input DataFrame
# to get the schema of the transformed DataFrame
transformed_df = self.spark_model.transform(
spark.createDataFrame([], spark_df.schema)
)
# Ensure prediction column doesn't already exist
if prediction_column not in transformed_df.columns:
# make sure predict work by default for Transformers
self.spark_model.stages[-1].setOutputCol(prediction_column)
return [
x.prediction
for x in self.spark_model.transform(spark_df)
.select(prediction_column)
.collect()
] |
299,331 | validate condition | # Copyright (c) 2017, Frappe Technologies and contributors
# License: MIT. See LICENSE
import base64
import hashlib
import hmac
import json
from time import sleep
from urllib.parse import urlparse
import requests
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils.jinja import validate_template
from frappe.utils.safe_exec import get_safe_globals
WEBHOOK_SECRET_HEADER = "X-Frappe-Webhook-Signature"
class Webhook(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.integrations.doctype.webhook_data.webhook_data import WebhookData
from frappe.integrations.doctype.webhook_header.webhook_header import WebhookHeader
from frappe.types import DF
condition: DF.SmallText | None
enable_security: DF.Check
enabled: DF.Check
is_dynamic_url: DF.Check
meets_condition: DF.Data | None
preview_document: DF.DynamicLink | None
preview_request_body: DF.Code | None
request_method: DF.Literal["POST", "PUT", "DELETE"]
request_structure: DF.Literal["", "Form URL-Encoded", "JSON"]
request_url: DF.Data
timeout: DF.Int
webhook_data: DF.Table[WebhookData]
webhook_docevent: DF.Literal[
"after_insert",
"on_update",
"on_submit",
"on_cancel",
"on_trash",
"on_update_after_submit",
"on_change",
]
webhook_doctype: DF.Link
webhook_headers: DF.Table[WebhookHeader]
webhook_json: DF.Code | None
webhook_secret: DF.Password | None
# end: auto-generated types
def validate(self):
self.validate_docevent()
self.METHOD_NAME()
self.validate_request_url()
self.validate_request_body()
self.validate_repeating_fields()
self.validate_secret()
self.preview_document = None
def on_update(self):
frappe.cache.delete_value("webhooks")
def validate_docevent(self):
if self.webhook_doctype:
is_submittable = frappe.get_value("DocType", self.webhook_doctype, "is_submittable")
if not is_submittable and self.webhook_docevent in [
"on_submit",
"on_cancel",
"on_update_after_submit",
]:
frappe.throw(_("DocType must be Submittable for the selected Doc Event"))
def METHOD_NAME(self):
temp_doc = frappe.new_doc(self.webhook_doctype)
if self.condition:
try:
frappe.safe_eval(self.condition, eval_locals=get_context(temp_doc))
except Exception as e:
frappe.throw(_("Invalid Condition: {}").format(e))
def validate_request_url(self):
try:
request_url = urlparse(self.request_url).netloc
if not request_url:
raise frappe.ValidationError
except Exception as e:
frappe.throw(_("Check Request URL"), exc=e)
def validate_request_body(self):
if self.request_structure:
if self.request_structure == "Form URL-Encoded":
self.webhook_json = None
elif self.request_structure == "JSON":
validate_template(self.webhook_json)
self.webhook_data = []
def validate_repeating_fields(self):
"""Error when Same Field is entered multiple times in webhook_data"""
webhook_data = [entry.fieldname for entry in self.webhook_data]
if len(webhook_data) != len(set(webhook_data)):
frappe.throw(_("Same Field is entered more than once"))
def validate_secret(self):
if self.enable_security:
try:
self.get_password("webhook_secret", False).encode("utf8")
except Exception:
frappe.throw(_("Invalid Webhook Secret"))
@frappe.whitelist()
def generate_preview(self):
# This function doesn't need to do anything specific as virtual fields
# get evaluated automatically.
pass
@property
def meets_condition(self):
if not self.condition:
return _("Yes")
if not (self.preview_document and self.webhook_doctype):
return _("Select a document to check if it meets conditions.")
try:
doc = frappe.get_cached_doc(self.webhook_doctype, self.preview_document)
met_condition = frappe.safe_eval(self.condition, eval_locals=get_context(doc))
except Exception as e:
return _("Failed to evaluate conditions: {}").format(e)
return _("Yes") if met_condition else _("No")
@property
def preview_request_body(self):
if not (self.preview_document and self.webhook_doctype):
return _("Select a document to preview request data")
try:
doc = frappe.get_cached_doc(self.webhook_doctype, self.preview_document)
return frappe.as_json(get_webhook_data(doc, self))
except Exception as e:
return _("Failed to compute request body: {}").format(e)
def get_context(doc):
return {"doc": doc, "utils": get_safe_globals().get("frappe").get("utils")}
def enqueue_webhook(doc, webhook) -> None:
try:
webhook: Webhook = frappe.get_doc("Webhook", webhook.get("name"))
headers = get_webhook_headers(doc, webhook)
data = get_webhook_data(doc, webhook)
if webhook.is_dynamic_url:
request_url = frappe.render_template(webhook.request_url, get_context(doc))
else:
request_url = webhook.request_url
except Exception as e:
frappe.logger().debug({"enqueue_webhook_error": e})
log_request(webhook.name, doc.name, request_url, headers, data)
return
for i in range(3):
try:
r = requests.request(
method=webhook.request_method,
url=request_url,
data=json.dumps(data, default=str),
headers=headers,
timeout=webhook.timeout or 5,
)
r.raise_for_status()
frappe.logger().debug({"webhook_success": r.text})
log_request(webhook.name, doc.name, request_url, headers, data, r)
break
except requests.exceptions.ReadTimeout as e:
frappe.logger().debug({"webhook_error": e, "try": i + 1})
log_request(webhook.name, doc.name, request_url, headers, data)
except Exception as e:
frappe.logger().debug({"webhook_error": e, "try": i + 1})
log_request(webhook.name, doc.name, request_url, headers, data, r)
sleep(3 * i + 1)
if i != 2:
continue
def log_request(
webhook: str,
docname: str,
url: str,
headers: dict,
data: dict,
res: requests.Response | None = None,
):
request_log = frappe.get_doc(
{
"doctype": "Webhook Request Log",
"webhook": webhook,
"reference_document": docname,
"user": frappe.session.user if frappe.session.user else None,
"url": url,
"headers": frappe.as_json(headers) if headers else None,
"data": frappe.as_json(data) if data else None,
"response": res and res.text,
"error": frappe.get_traceback(),
}
)
request_log.save(ignore_permissions=True)
def get_webhook_headers(doc, webhook):
headers = {}
if webhook.enable_security:
data = get_webhook_data(doc, webhook)
signature = base64.b64encode(
hmac.new(
webhook.get_password("webhook_secret").encode("utf8"),
json.dumps(data).encode("utf8"),
hashlib.sha256,
).digest()
)
headers[WEBHOOK_SECRET_HEADER] = signature
if webhook.webhook_headers:
for h in webhook.webhook_headers:
if h.get("key") and h.get("value"):
headers[h.get("key")] = h.get("value")
return headers
def get_webhook_data(doc, webhook):
data = {}
doc = doc.as_dict(convert_dates_to_str=True)
if webhook.webhook_data:
data = {w.key: doc.get(w.fieldname) for w in webhook.webhook_data}
elif webhook.webhook_json:
data = frappe.render_template(webhook.webhook_json, get_context(doc))
data = json.loads(data)
return data |
299,332 | get namespace | """Handles incoming servicediscovery requests, invokes methods, returns responses."""
import json
from moto.core.common_types import TYPE_RESPONSE
from moto.core.responses import BaseResponse
from .models import servicediscovery_backends, ServiceDiscoveryBackend
class ServiceDiscoveryResponse(BaseResponse):
def __init__(self) -> None:
super().__init__(service_name="servicediscovery")
@property
def servicediscovery_backend(self) -> ServiceDiscoveryBackend:
"""Return backend instance specific for this region."""
return servicediscovery_backends[self.current_account][self.region]
def list_namespaces(self) -> TYPE_RESPONSE:
namespaces = self.servicediscovery_backend.list_namespaces()
return 200, {}, json.dumps({"Namespaces": [ns.to_json() for ns in namespaces]})
def create_http_namespace(self) -> str:
params = json.loads(self.body)
name = params.get("Name")
creator_request_id = params.get("CreatorRequestId")
description = params.get("Description")
tags = params.get("Tags")
operation_id = self.servicediscovery_backend.create_http_namespace(
name=name,
creator_request_id=creator_request_id,
description=description,
tags=tags,
)
return json.dumps(dict(OperationId=operation_id))
def delete_namespace(self) -> str:
params = json.loads(self.body)
namespace_id = params.get("Id")
operation_id = self.servicediscovery_backend.delete_namespace(
namespace_id=namespace_id
)
return json.dumps(dict(OperationId=operation_id))
def list_operations(self) -> TYPE_RESPONSE:
operations = self.servicediscovery_backend.list_operations()
return (
200,
{},
json.dumps({"Operations": [o.to_json(short=True) for o in operations]}),
)
def get_operation(self) -> str:
params = json.loads(self.body)
operation_id = params.get("OperationId")
operation = self.servicediscovery_backend.get_operation(
operation_id=operation_id
)
return json.dumps(dict(Operation=operation.to_json()))
def METHOD_NAME(self) -> str:
params = json.loads(self.body)
namespace_id = params.get("Id")
namespace = self.servicediscovery_backend.METHOD_NAME(
namespace_id=namespace_id
)
return json.dumps(dict(Namespace=namespace.to_json()))
def tag_resource(self) -> str:
params = json.loads(self.body)
resource_arn = params.get("ResourceARN")
tags = params.get("Tags")
self.servicediscovery_backend.tag_resource(resource_arn=resource_arn, tags=tags)
return "{}"
def untag_resource(self) -> str:
params = json.loads(self.body)
resource_arn = params.get("ResourceARN")
tag_keys = params.get("TagKeys")
self.servicediscovery_backend.untag_resource(
resource_arn=resource_arn, tag_keys=tag_keys
)
return "{}"
def list_tags_for_resource(self) -> TYPE_RESPONSE:
params = json.loads(self.body)
resource_arn = params.get("ResourceARN")
tags = self.servicediscovery_backend.list_tags_for_resource(
resource_arn=resource_arn
)
return 200, {}, json.dumps(tags)
def create_private_dns_namespace(self) -> str:
params = json.loads(self.body)
name = params.get("Name")
creator_request_id = params.get("CreatorRequestId")
description = params.get("Description")
vpc = params.get("Vpc")
tags = params.get("Tags")
properties = params.get("Properties")
operation_id = self.servicediscovery_backend.create_private_dns_namespace(
name=name,
creator_request_id=creator_request_id,
description=description,
vpc=vpc,
tags=tags,
properties=properties,
)
return json.dumps(dict(OperationId=operation_id))
def create_public_dns_namespace(self) -> str:
params = json.loads(self.body)
name = params.get("Name")
creator_request_id = params.get("CreatorRequestId")
description = params.get("Description")
tags = params.get("Tags")
properties = params.get("Properties")
operation_id = self.servicediscovery_backend.create_public_dns_namespace(
name=name,
creator_request_id=creator_request_id,
description=description,
tags=tags,
properties=properties,
)
return json.dumps(dict(OperationId=operation_id))
def create_service(self) -> str:
params = json.loads(self.body)
name = params.get("Name")
namespace_id = params.get("NamespaceId")
creator_request_id = params.get("CreatorRequestId")
description = params.get("Description")
dns_config = params.get("DnsConfig")
health_check_config = params.get("HealthCheckConfig")
health_check_custom_config = params.get("HealthCheckCustomConfig")
tags = params.get("Tags")
service_type = params.get("Type")
service = self.servicediscovery_backend.create_service(
name=name,
namespace_id=namespace_id,
creator_request_id=creator_request_id,
description=description,
dns_config=dns_config,
health_check_config=health_check_config,
health_check_custom_config=health_check_custom_config,
tags=tags,
service_type=service_type,
)
return json.dumps(dict(Service=service.to_json()))
def get_service(self) -> str:
params = json.loads(self.body)
service_id = params.get("Id")
service = self.servicediscovery_backend.get_service(service_id=service_id)
return json.dumps(dict(Service=service.to_json()))
def delete_service(self) -> str:
params = json.loads(self.body)
service_id = params.get("Id")
self.servicediscovery_backend.delete_service(service_id=service_id)
return "{}"
def list_services(self) -> str:
services = self.servicediscovery_backend.list_services()
return json.dumps(dict(Services=[s.to_json() for s in services]))
def update_service(self) -> str:
params = json.loads(self.body)
service_id = params.get("Id")
details = params.get("Service")
operation_id = self.servicediscovery_backend.update_service(
service_id=service_id, details=details
)
return json.dumps(dict(OperationId=operation_id))
def update_private_dns_namespace(self) -> str:
params = json.loads(self.body)
_id = params.get("Id")
description = params["Namespace"].get("Description")
properties = params["Namespace"].get("Properties", {}).get("DnsProperties")
operation_id = self.servicediscovery_backend.update_private_dns_namespace(
_id=_id,
description=description,
properties=properties,
)
return json.dumps(dict(OperationId=operation_id))
def update_public_dns_namespace(self) -> str:
params = json.loads(self.body)
_id = params.get("Id")
description = params["Namespace"].get("Description")
properties = params["Namespace"].get("Properties", {}).get("DnsProperties")
operation_id = self.servicediscovery_backend.update_public_dns_namespace(
_id=_id,
description=description,
properties=properties,
)
return json.dumps(dict(OperationId=operation_id)) |
299,333 | set up | import pytest
import responses
from sentry.integrations.slack.utils import get_channel_id
from sentry.integrations.slack.utils.channel import CHANNEL_PREFIX, MEMBER_PREFIX
from sentry.shared_integrations.exceptions import ApiRateLimitedError, DuplicateDisplayNameError
from sentry.testutils.cases import TestCase
from sentry.testutils.helpers import install_slack
from sentry.utils import json
class GetChannelIdTest(TestCase):
def METHOD_NAME(self):
self.resp = responses.mock
self.resp.__enter__()
self.integration = install_slack(self.event.project.organization)
def tearDown(self):
self.resp.__exit__(None, None, None)
def add_list_response(self, list_type, channels, result_name="channels"):
self.resp = responses.mock
self.resp.add(
method=responses.GET,
url="https://slack.com/api/%s.list" % list_type,
status=200,
content_type="application/json",
body=json.dumps({"ok": "true", result_name: channels}),
)
def add_msg_response(self, channel_id, result_name="channel"):
if channel_id == "channel_not_found":
bodydict = {"ok": False, "error": "channel_not_found"}
else:
bodydict = {"ok": True, result_name: channel_id, "scheduled_message_id": "Q1298393284"}
self.resp.add(
method=responses.POST,
url="https://slack.com/api/chat.scheduleMessage",
status=200,
content_type="application/json",
body=json.dumps(bodydict),
)
def run_valid_test(self, channel, expected_prefix, expected_id, timed_out):
assert (expected_prefix, expected_id, timed_out) == get_channel_id(
self.organization, self.integration, channel
)
def test_valid_channel_selected(self):
self.add_msg_response("m-c")
self.resp.add(
method=responses.POST,
url="https://slack.com/api/chat.deleteScheduledMessage",
status=200,
content_type="application/json",
body=json.dumps({"ok": True}),
)
self.run_valid_test("#My-Channel", CHANNEL_PREFIX, "m-c", False)
def test_valid_private_channel_selected(self):
self.add_msg_response("m-p-c")
self.resp.add(
method=responses.POST,
url="https://slack.com/api/chat.deleteScheduledMessage",
status=200,
content_type="application/json",
body=json.dumps({"ok": True}),
)
self.run_valid_test("#my-private-channel", CHANNEL_PREFIX, "m-p-c", False)
def test_valid_member_selected(self):
self.add_msg_response("channel_not_found")
self.add_list_response(
"users",
[
{"name": "first-morty", "id": "m", "profile": {"display_name": "Morty"}},
{"name": "other-user", "id": "o-u", "profile": {"display_name": "Jimbob"}},
{"name": "better_morty", "id": "bm", "profile": {"display_name": "Morty"}},
],
result_name="members",
)
self.run_valid_test("@first-morty", MEMBER_PREFIX, "m", False)
def test_valid_member_selected_display_name(self):
self.add_msg_response("channel_not_found")
self.add_list_response(
"users",
[
{"name": "first-morty", "id": "m", "profile": {"display_name": "Morty"}},
{"name": "other-user", "id": "o-u", "profile": {"display_name": "Jimbob"}},
{"name": "better_morty", "id": "bm", "profile": {"display_name": "Morty"}},
],
result_name="members",
)
self.run_valid_test("@Jimbob", MEMBER_PREFIX, "o-u", False)
def test_invalid_member_selected_display_name(self):
self.add_msg_response("channel_not_found")
self.add_list_response(
"users",
[
{"name": "first-morty", "id": "m", "profile": {"display_name": "Morty"}},
{"name": "other-user", "id": "o-u", "profile": {"display_name": "Jimbob"}},
{"name": "better_morty", "id": "bm", "profile": {"display_name": "Morty"}},
],
result_name="members",
)
with pytest.raises(DuplicateDisplayNameError):
get_channel_id(self.organization, self.integration, "@Morty")
def test_invalid_channel_selected(self):
self.add_msg_response("channel_not_found")
assert get_channel_id(self.organization, self.integration, "#fake-channel")[1] is None
assert get_channel_id(self.organization, self.integration, "@fake-user")[1] is None
def test_rate_limiting(self):
"""Should handle 429 from Slack when searching for users"""
self.add_msg_response("channel_not_found")
self.resp.add(
method=responses.GET,
url="https://slack.com/api/users.list",
status=429,
content_type="application/json",
body=json.dumps({"ok": False, "error": "ratelimited"}),
)
with pytest.raises(ApiRateLimitedError):
get_channel_id(self.organization, self.integration, "@user") |
299,334 | test values extended | import sys
import unittest
from test import test_support
pwd = test_support.import_module('pwd')
class PwdTest(unittest.TestCase):
def test_values(self):
entries = pwd.getpwall()
for e in entries:
self.assertEqual(len(e), 7)
self.assertEqual(e[0], e.pw_name)
self.assertIsInstance(e.pw_name, basestring)
self.assertEqual(e[1], e.pw_passwd)
self.assertIsInstance(e.pw_passwd, basestring)
self.assertEqual(e[2], e.pw_uid)
self.assertIsInstance(e.pw_uid, (int, long))
self.assertEqual(e[3], e.pw_gid)
self.assertIsInstance(e.pw_gid, (int, long))
self.assertEqual(e[4], e.pw_gecos)
self.assertIsInstance(e.pw_gecos, basestring)
self.assertEqual(e[5], e.pw_dir)
self.assertIsInstance(e.pw_dir, basestring)
self.assertEqual(e[6], e.pw_shell)
self.assertIsInstance(e.pw_shell, basestring)
# The following won't work, because of duplicate entries
# for one uid
# self.assertEqual(pwd.getpwuid(e.pw_uid), e)
# instead of this collect all entries for one uid
# and check afterwards (done in test_values_extended)
def METHOD_NAME(self):
entries = pwd.getpwall()
entriesbyname = {}
entriesbyuid = {}
if len(entries) > 1000: # Huge passwd file (NIS?) -- skip this test
self.skipTest('passwd file is huge; extended test skipped')
for e in entries:
entriesbyname.setdefault(e.pw_name, []).append(e)
entriesbyuid.setdefault(e.pw_uid, []).append(e)
# check whether the entry returned by getpwuid()
# for each uid is among those from getpwall() for this uid
for e in entries:
if not e[0] or e[0] == '+':
continue # skip NIS entries etc.
self.assertIn(pwd.getpwnam(e.pw_name), entriesbyname[e.pw_name])
self.assertIn(pwd.getpwuid(e.pw_uid), entriesbyuid[e.pw_uid])
def test_errors(self):
self.assertRaises(TypeError, pwd.getpwuid)
self.assertRaises(TypeError, pwd.getpwuid, 3.14)
self.assertRaises(TypeError, pwd.getpwnam)
self.assertRaises(TypeError, pwd.getpwnam, 42)
self.assertRaises(TypeError, pwd.getpwall, 42)
# try to get some errors
bynames = {}
byuids = {}
for (n, p, u, g, gecos, d, s) in pwd.getpwall():
bynames[n] = u
byuids[u] = n
allnames = bynames.keys()
namei = 0
fakename = allnames[namei]
while fakename in bynames:
chars = list(fakename)
for i in xrange(len(chars)):
if chars[i] == 'z':
chars[i] = 'A'
break
elif chars[i] == 'Z':
continue
else:
chars[i] = chr(ord(chars[i]) + 1)
break
else:
namei = namei + 1
try:
fakename = allnames[namei]
except IndexError:
# should never happen... if so, just forget it
break
fakename = ''.join(chars)
self.assertRaises(KeyError, pwd.getpwnam, fakename)
# In some cases, byuids isn't a complete list of all users in the
# system, so if we try to pick a value not in byuids (via a perturbing
# loop, say), pwd.getpwuid() might still be able to find data for that
# uid. Using sys.maxint may provoke the same problems, but hopefully
# it will be a more repeatable failure.
fakeuid = sys.maxint
self.assertNotIn(fakeuid, byuids)
self.assertRaises(KeyError, pwd.getpwuid, fakeuid)
# -1 shouldn't be a valid uid because it has a special meaning in many
# uid-related functions
self.assertRaises(KeyError, pwd.getpwuid, -1)
# should be out of uid_t range
self.assertRaises(KeyError, pwd.getpwuid, 2**128)
self.assertRaises(KeyError, pwd.getpwuid, -2**128)
def test_main():
test_support.run_unittest(PwdTest)
if __name__ == "__main__":
test_main() |
299,335 | clean | import re
import signal
import threading
from asyncio import Event
from logging import DEBUG
from pathlib import Path
from time import sleep
from unittest.mock import Mock
import pytest
from sanic.app import Sanic
from sanic.worker.constants import ProcessState, RestartOrder
from sanic.worker.loader import AppLoader
from sanic.worker.process import WorkerProcess
from sanic.worker.reloader import Reloader
@pytest.fixture
def reloader():
...
@pytest.fixture
def app():
app = Sanic("Test")
@app.route("/")
def handler(_):
...
return app
@pytest.fixture
def app_loader(app):
return AppLoader(factory=lambda: app)
def run_reloader(reloader):
def stop(*_):
reloader.stop()
signal.signal(signal.SIGALRM, stop)
signal.alarm(1)
reloader()
def is_python_file(filename):
return (isinstance(filename, Path) and (filename.suffix == "py")) or (
isinstance(filename, str) and filename.endswith(".py")
)
def test_reload_send():
publisher = Mock()
reloader = Reloader(publisher, 0.1, set(), Mock())
reloader.reload("foobar")
publisher.send.assert_called_once_with("__ALL_PROCESSES__:foobar")
def test_iter_files():
reloader = Reloader(Mock(), 0.1, set(), Mock())
len_python_files = len(list(reloader.files()))
assert len_python_files > 0
static_dir = Path(__file__).parent.parent / "static"
len_static_files = len(list(static_dir.glob("**/*")))
reloader = Reloader(Mock(), 0.1, set({static_dir}), Mock())
len_total_files = len(list(reloader.files()))
assert len_static_files > 0
assert len_total_files == len_python_files + len_static_files
@pytest.mark.parametrize(
"order,expected",
(
(
RestartOrder.SHUTDOWN_FIRST,
[
"Restarting a process",
"Begin restart termination",
"Starting a process",
],
),
(
RestartOrder.STARTUP_FIRST,
[
"Restarting a process",
"Starting a process",
"Begin restart termination",
"Waiting for process to be acked",
"Process acked. Terminating",
],
),
),
)
def test_default_reload_shutdown_order(monkeypatch, caplog, order, expected):
current_process = Mock()
worker_process = WorkerProcess(
lambda **_: current_process,
"Test",
lambda **_: ...,
{},
{},
)
def start(self):
worker_process.set_state(ProcessState.ACKED)
self._target()
orig = threading.Thread.start
monkeypatch.setattr(threading.Thread, "start", start)
with caplog.at_level(DEBUG):
worker_process.restart(restart_order=order)
ansi = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
def METHOD_NAME(msg: str):
msg, _ = ansi.sub("", msg).split(":", 1)
return msg
debug = [METHOD_NAME(record[2]) for record in caplog.record_tuples]
assert debug == expected
current_process.start.assert_called_once()
current_process.terminate.assert_called_once()
monkeypatch.setattr(threading.Thread, "start", orig)
def test_reload_delayed(monkeypatch):
WorkerProcess.THRESHOLD = 1
current_process = Mock()
worker_process = WorkerProcess(
lambda **_: current_process,
"Test",
lambda **_: ...,
{},
{},
)
def start(self):
sleep(0.2)
self._target()
orig = threading.Thread.start
monkeypatch.setattr(threading.Thread, "start", start)
message = "Worker Test failed to come ack within 0.1 seconds"
with pytest.raises(TimeoutError, match=message):
worker_process.restart(restart_order=RestartOrder.STARTUP_FIRST)
monkeypatch.setattr(threading.Thread, "start", orig)
def test_reloader_triggers_start_stop_listeners(
app: Sanic, app_loader: AppLoader
):
results = []
@app.reload_process_start
def reload_process_start(_):
results.append("reload_process_start")
@app.reload_process_stop
def reload_process_stop(_):
results.append("reload_process_stop")
reloader = Reloader(Mock(), 0.1, set(), app_loader)
run_reloader(reloader)
assert results == ["reload_process_start", "reload_process_stop"]
def test_not_triggered(app_loader):
reload_dir = Path(__file__).parent.parent / "fake"
publisher = Mock()
reloader = Reloader(publisher, 0.1, {reload_dir}, app_loader)
run_reloader(reloader)
publisher.send.assert_not_called()
def test_triggered(app_loader):
paths = set()
def check_file(filename, mtimes):
if (isinstance(filename, Path) and (filename.name == "server.py")) or (
isinstance(filename, str) and "sanic/app.py" in filename
):
paths.add(str(filename))
return True
return False
reload_dir = Path(__file__).parent.parent / "fake"
publisher = Mock()
reloader = Reloader(publisher, 0.1, {reload_dir}, app_loader)
reloader.check_file = check_file # type: ignore
run_reloader(reloader)
assert len(paths) == 2
publisher.send.assert_called()
call_arg = publisher.send.call_args_list[0][0][0]
assert call_arg.startswith("__ALL_PROCESSES__:")
assert call_arg.count(",") == 1
for path in paths:
assert str(path) in call_arg
def test_reloader_triggers_reload_listeners(app: Sanic, app_loader: AppLoader):
before = Event()
after = Event()
def check_file(filename, mtimes):
return not after.is_set()
@app.before_reload_trigger
async def before_reload_trigger(_):
before.set()
@app.after_reload_trigger
async def after_reload_trigger(_):
after.set()
reloader = Reloader(Mock(), 0.1, set(), app_loader)
reloader.check_file = check_file # type: ignore
run_reloader(reloader)
assert before.is_set()
assert after.is_set()
def test_check_file(tmp_path):
current = tmp_path / "testing.txt"
current.touch()
mtimes = {}
assert Reloader.check_file(current, mtimes) is False
assert len(mtimes) == 1
assert Reloader.check_file(current, mtimes) is False
mtimes[current] = mtimes[current] - 1
assert Reloader.check_file(current, mtimes) is True |
299,336 | build arguments schema | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"databricks workspace private-endpoint-connection delete",
)
class Delete(AAZCommand):
"""Delete private endpoint connection with the specified name
"""
_aaz_info = {
"version": "2023-02-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.databricks/workspaces/{}/privateendpointconnections/{}", "2023-02-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="The name of the private endpoint connection",
required=True,
id_part="child_name_1",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.workspace_name = AAZStrArg(
options=["--workspace-name"],
help="The name of the workspace.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
max_length=64,
min_length=3,
),
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.PrivateEndpointConnectionsDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class PrivateEndpointConnectionsDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"privateEndpointConnectionName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"workspaceName", self.ctx.args.workspace_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-02-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
299,337 | update activity type | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import datetime
import unittest
import frappe
from frappe.utils import add_months, add_to_date, now_datetime, nowdate
from erpnext.accounts.doctype.sales_invoice.test_sales_invoice import create_sales_invoice
from erpnext.projects.doctype.timesheet.timesheet import OverlapError, make_sales_invoice
from erpnext.setup.doctype.employee.test_employee import make_employee
class TestTimesheet(unittest.TestCase):
def setUp(self):
frappe.db.delete("Timesheet")
def test_timesheet_billing_amount(self):
emp = make_employee("test_employee_6@salary.com")
timesheet = make_timesheet(emp, simulate=True, is_billable=1)
self.assertEqual(timesheet.total_hours, 2)
self.assertEqual(timesheet.total_billable_hours, 2)
self.assertEqual(timesheet.time_logs[0].billing_rate, 50)
self.assertEqual(timesheet.time_logs[0].billing_amount, 100)
self.assertEqual(timesheet.total_billable_amount, 100)
def test_timesheet_billing_amount_not_billable(self):
emp = make_employee("test_employee_6@salary.com")
timesheet = make_timesheet(emp, simulate=True, is_billable=0)
self.assertEqual(timesheet.total_hours, 2)
self.assertEqual(timesheet.total_billable_hours, 0)
self.assertEqual(timesheet.time_logs[0].billing_rate, 0)
self.assertEqual(timesheet.time_logs[0].billing_amount, 0)
self.assertEqual(timesheet.total_billable_amount, 0)
def test_sales_invoice_from_timesheet(self):
emp = make_employee("test_employee_6@salary.com")
timesheet = make_timesheet(emp, simulate=True, is_billable=1)
sales_invoice = make_sales_invoice(
timesheet.name, "_Test Item", "_Test Customer", currency="INR"
)
sales_invoice.due_date = nowdate()
sales_invoice.submit()
timesheet = frappe.get_doc("Timesheet", timesheet.name)
self.assertEqual(sales_invoice.total_billing_amount, 100)
self.assertEqual(timesheet.status, "Billed")
self.assertEqual(sales_invoice.customer, "_Test Customer")
item = sales_invoice.items[0]
self.assertEqual(item.item_code, "_Test Item")
self.assertEqual(item.qty, 2.00)
self.assertEqual(item.rate, 50.00)
def test_timesheet_billing_based_on_project(self):
emp = make_employee("test_employee_6@salary.com")
project = frappe.get_value("Project", {"project_name": "_Test Project"})
timesheet = make_timesheet(
emp, simulate=True, is_billable=1, project=project, company="_Test Company"
)
sales_invoice = create_sales_invoice(do_not_save=True)
sales_invoice.project = project
sales_invoice.submit()
ts = frappe.get_doc("Timesheet", timesheet.name)
self.assertEqual(ts.per_billed, 100)
self.assertEqual(ts.time_logs[0].sales_invoice, sales_invoice.name)
def test_timesheet_time_overlap(self):
emp = make_employee("test_employee_6@salary.com")
settings = frappe.get_single("Projects Settings")
initial_setting = settings.ignore_employee_time_overlap
settings.ignore_employee_time_overlap = 0
settings.save()
METHOD_NAME("_Test Activity Type")
timesheet = frappe.new_doc("Timesheet")
timesheet.employee = emp
timesheet.append(
"time_logs",
{
"billable": 1,
"activity_type": "_Test Activity Type",
"from_time": now_datetime(),
"to_time": now_datetime() + datetime.timedelta(hours=3),
"company": "_Test Company",
},
)
timesheet.append(
"time_logs",
{
"billable": 1,
"activity_type": "_Test Activity Type",
"from_time": now_datetime(),
"to_time": now_datetime() + datetime.timedelta(hours=3),
"company": "_Test Company",
},
)
self.assertRaises(frappe.ValidationError, timesheet.save)
settings.ignore_employee_time_overlap = 1
settings.save()
timesheet.save() # should not throw an error
settings.ignore_employee_time_overlap = initial_setting
settings.save()
def test_timesheet_not_overlapping_with_continuous_timelogs(self):
emp = make_employee("test_employee_6@salary.com")
METHOD_NAME("_Test Activity Type")
timesheet = frappe.new_doc("Timesheet")
timesheet.employee = emp
timesheet.append(
"time_logs",
{
"billable": 1,
"activity_type": "_Test Activity Type",
"from_time": now_datetime(),
"to_time": now_datetime() + datetime.timedelta(hours=3),
"company": "_Test Company",
},
)
timesheet.append(
"time_logs",
{
"billable": 1,
"activity_type": "_Test Activity Type",
"from_time": now_datetime() + datetime.timedelta(hours=3),
"to_time": now_datetime() + datetime.timedelta(hours=4),
"company": "_Test Company",
},
)
timesheet.save() # should not throw an error
def test_to_time(self):
emp = make_employee("test_employee_6@salary.com")
from_time = now_datetime()
timesheet = frappe.new_doc("Timesheet")
timesheet.employee = emp
timesheet.append(
"time_logs",
{
"billable": 1,
"activity_type": "_Test Activity Type",
"from_time": from_time,
"hours": 2,
"company": "_Test Company",
},
)
timesheet.save()
to_time = timesheet.time_logs[0].to_time
self.assertEqual(to_time, add_to_date(from_time, hours=2, as_datetime=True))
def test_per_billed_hours(self):
"""If amounts are 0, per_billed should be calculated based on hours."""
ts = frappe.new_doc("Timesheet")
ts.total_billable_amount = 0
ts.total_billed_amount = 0
ts.total_billable_hours = 2
ts.total_billed_hours = 0.5
ts.calculate_percentage_billed()
self.assertEqual(ts.per_billed, 25)
ts.total_billed_hours = 2
ts.calculate_percentage_billed()
self.assertEqual(ts.per_billed, 100)
def test_per_billed_amount(self):
"""If amounts are > 0, per_billed should be calculated based on amounts, regardless of hours."""
ts = frappe.new_doc("Timesheet")
ts.total_billable_hours = 2
ts.total_billed_hours = 1
ts.total_billable_amount = 200
ts.total_billed_amount = 50
ts.calculate_percentage_billed()
self.assertEqual(ts.per_billed, 25)
ts.total_billed_hours = 3
ts.total_billable_amount = 200
ts.total_billed_amount = 200
ts.calculate_percentage_billed()
self.assertEqual(ts.per_billed, 100)
def make_timesheet(
employee,
simulate=False,
is_billable=0,
activity_type="_Test Activity Type",
project=None,
task=None,
company=None,
):
METHOD_NAME(activity_type)
timesheet = frappe.new_doc("Timesheet")
timesheet.employee = employee
timesheet.company = company or "_Test Company"
timesheet_detail = timesheet.append("time_logs", {})
timesheet_detail.is_billable = is_billable
timesheet_detail.activity_type = activity_type
timesheet_detail.from_time = now_datetime()
timesheet_detail.hours = 2
timesheet_detail.to_time = timesheet_detail.from_time + datetime.timedelta(
hours=timesheet_detail.hours
)
timesheet_detail.project = project
timesheet_detail.task = task
for data in timesheet.get("time_logs"):
if simulate:
while True:
try:
timesheet.save(ignore_permissions=True)
break
except OverlapError:
data.from_time = data.from_time + datetime.timedelta(minutes=10)
data.to_time = data.from_time + datetime.timedelta(hours=data.hours)
else:
timesheet.save(ignore_permissions=True)
timesheet.submit()
return timesheet
def METHOD_NAME(activity_type):
activity_type = frappe.get_doc("Activity Type", activity_type)
activity_type.billing_rate = 50.0
activity_type.save(ignore_permissions=True) |
299,338 | test dict output | # Copyright 2023 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flax.linen.combinators."""
from typing import Any, Optional, Sequence
from absl.testing import absltest
from flax import linen as nn
import jax
from jax import numpy as jnp
from jax import random
import numpy as np
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
class MLP(nn.Module):
layer_sizes: Sequence[int]
activation: Optional[Any] = None
activation_final: Optional[Any] = None
@nn.compact
def __call__(self, inputs):
x = inputs
for layer_size in self.layer_sizes[:-1]:
x = nn.Dense(
features=layer_size, kernel_init=nn.initializers.ones_init()
)(x)
if self.activation is not None:
x = self.activation(x)
x = nn.Dense(
features=self.layer_sizes[-1], kernel_init=nn.initializers.ones_init()
)(x)
if self.activation_final is None:
return x
return self.activation_final(x)
class AttentionTuple(nn.Module):
num_heads: int = 2
qkv_features: int = 16
@nn.compact
def __call__(self, query, key_value):
output = nn.MultiHeadDotProductAttention(
num_heads=self.num_heads, qkv_features=self.qkv_features
)(query, key_value)
return output, key_value
class AttentionDict(nn.Module):
num_heads: int = 2
qkv_features: int = 16
@nn.compact
def __call__(self, query, key_value):
output = nn.MultiHeadDotProductAttention(
num_heads=self.num_heads, qkv_features=self.qkv_features
)(query, key_value)
return dict(query=output, key_value=key_value)
class SequentialTest(absltest.TestCase):
def test_construction(self):
sequential = nn.Sequential([nn.Dense(4), nn.Dense(2)])
key1, key2 = random.split(random.PRNGKey(0), 2)
x = random.uniform(key1, (3, 1, 5))
params = sequential.init(key2, x)
output = sequential.apply(params, x)
self.assertEqual(output.shape, (3, 1, 2))
def test_fails_if_layers_empty(self):
sequential = nn.Sequential([])
with self.assertRaisesRegex(ValueError, 'Empty Sequential module'):
sequential.init(random.PRNGKey(42), jnp.ones((3, 5)))
def test_same_output_as_mlp(self):
sequential = nn.Sequential([
nn.Dense(4, kernel_init=nn.initializers.ones_init()),
nn.Dense(8, kernel_init=nn.initializers.ones_init()),
nn.Dense(2, kernel_init=nn.initializers.ones_init()),
])
mlp = MLP(layer_sizes=[4, 8, 2])
key1, key2 = random.split(random.PRNGKey(0), 2)
x = random.uniform(key1, (3, 5))
params_1 = sequential.init(key2, x)
params_2 = mlp.init(key2, x)
output_1 = sequential.apply(params_1, x)
output_2 = mlp.apply(params_2, x)
np.testing.assert_array_equal(output_1, output_2)
def test_same_output_as_mlp_with_activation(self):
sequential = nn.Sequential([
nn.Dense(4, kernel_init=nn.initializers.ones_init()),
nn.relu,
nn.Dense(8, kernel_init=nn.initializers.ones_init()),
nn.relu,
nn.Dense(2, kernel_init=nn.initializers.ones_init()),
nn.log_softmax,
])
mlp = MLP(
layer_sizes=[4, 8, 2],
activation=nn.relu,
activation_final=nn.log_softmax,
)
key1, key2 = random.split(random.PRNGKey(0), 2)
x = random.uniform(key1, (3, 5))
params_1 = sequential.init(key2, x)
params_2 = mlp.init(key2, x)
output_1 = sequential.apply(params_1, x)
output_2 = mlp.apply(params_2, x)
np.testing.assert_array_equal(output_1, output_2)
def test_tuple_output(self):
sequential = nn.Sequential([
AttentionTuple(),
AttentionTuple(),
])
key1, key2 = random.split(random.PRNGKey(0), 2)
query = random.uniform(key1, (3, 5))
key_value = random.uniform(key1, (9, 5))
params_1 = sequential.init(key2, query, key_value)
outputs = sequential.apply(params_1, query, key_value)
np.testing.assert_equal(len(outputs), 2)
out_query, out_key_value = outputs
np.testing.assert_equal(out_query.shape, (3, 5))
np.testing.assert_equal(out_key_value.shape, (9, 5))
def METHOD_NAME(self):
sequential = nn.Sequential([
AttentionDict(),
AttentionDict(),
])
key1, key2 = random.split(random.PRNGKey(0), 2)
query = random.uniform(key1, (3, 5))
key_value = random.uniform(key1, (9, 5))
params_1 = sequential.init(key2, query, key_value)
outputs = sequential.apply(params_1, query, key_value)
np.testing.assert_equal(len(outputs), 2)
out_query, out_key_value = outputs['query'], outputs['key_value']
np.testing.assert_equal(out_query.shape, (3, 5))
np.testing.assert_equal(out_key_value.shape, (9, 5))
if __name__ == '__main__':
absltest.main() |
299,339 | test mananger can remove plugin |
import logging
from unittest import mock
from django.test import TestCase
from django.conf import settings
from plugins.models import PluginMeta, Plugin
from plugins.models import PluginParameter, DefaultStrParameter
from plugins.models import ComputeResource
from plugins.services import manager
COMPUTE_RESOURCE_URL = settings.COMPUTE_RESOURCE_URL
class PluginManagerTests(TestCase):
def setUp(self):
# avoid cluttered console output (for instance logging all the http requests)
logging.disable(logging.WARNING)
self.plugin_fs_name = "simplecopyapp"
plugin_parameters = [{'name': 'dir', 'type': 'string', 'action': 'store',
'optional': True, 'flag': '--dir', 'short_flag': '-d',
'default': '/', 'help': 'test plugin', 'ui_exposed': True}]
self.plg_data = {'description': 'A simple chris fs app demo',
'version': '0.1',
'dock_image': 'fnndsc/pl-simplecopyapp',
'execshell': 'python3',
'selfpath': '/usr/local/bin',
'selfexec': 'simplefsapp'}
self.plg_meta_data = {'name': self.plugin_fs_name,
'title': 'Dir plugin',
'license': 'MIT',
'type': 'fs',
'icon': 'http://github.com/plugin',
'category': 'Dir',
'stars': 0,
'authors': 'FNNDSC (dev@babyMRI.org)'}
self.plugin_repr = self.plg_data.copy()
self.plugin_repr.update(self.plg_meta_data)
self.plugin_repr['parameters'] = plugin_parameters
self.pl_manager = manager.PluginManager()
(self.compute_resource, tf) = ComputeResource.objects.get_or_create(
name="host", compute_url=COMPUTE_RESOURCE_URL)
# create a plugin
data = self.plg_meta_data.copy()
(pl_meta, tf) = PluginMeta.objects.get_or_create(**data)
data = self.plg_data.copy()
(plugin, tf) = Plugin.objects.get_or_create(meta=pl_meta, **data)
plugin.compute_resources.set([self.compute_resource])
plugin.save()
# add plugin's parameters
parameters = plugin_parameters
(plg_param, tf) = PluginParameter.objects.get_or_create(
plugin=plugin,
name=parameters[0]['name'],
type=parameters[0]['type'],
flag=parameters[0]['flag'],
short_flag=parameters[0]['short_flag'],
optional=parameters[0]['optional']
)
default = parameters[0]['default']
DefaultStrParameter.objects.get_or_create(plugin_param=plg_param, value=default)
def tearDown(self):
# re-enable logging
logging.disable(logging.NOTSET)
def test_mananger_can_get_plugin(self):
"""
Test whether the manager can return a plugin object.
"""
plugin = Plugin.objects.get(meta__name=self.plugin_fs_name, version="0.1")
self.assertEqual(plugin, self.pl_manager.get_plugin(self.plugin_fs_name, "0.1"))
def test_mananger_can_register_plugin(self):
"""
Test whether the manager can add a new plugin to the system given its name
and version.
"""
self.plugin_repr['name'] = 'testapp'
# mock manager's get_plugin_representation_from_store static method
self.pl_manager.get_plugin_representation_from_store = mock.Mock(
return_value=self.plugin_repr)
plugin = self.pl_manager.register_plugin('testapp', '0.1', 'host')
self.assertEqual(Plugin.objects.count(), 2)
self.assertEqual(plugin.meta.name, 'testapp')
self.assertTrue(PluginParameter.objects.count() > 1)
self.pl_manager.get_plugin_representation_from_store.assert_called_with(
'testapp', '0.1', 30)
self.pl_manager.register_plugin('testapp', '', 'host')
self.pl_manager.get_plugin_representation_from_store.assert_called_with(
'testapp', None, 30)
def test_mananger_register_plugin_raises_name_error_if_compute_resource_does_not_exist(self):
"""
Test whether the manager's register_plugin method raises NameError exception
when the compute respource argument doesn't exist in the DB.
"""
with self.assertRaises(NameError):
self.pl_manager.register_plugin('testapp', '0.1', 'dummy')
def test_mananger_can_register_plugin_by_url(self):
"""
Test whether the manager can add a new plugin to the system given its url.
"""
self.plugin_repr['name'] = 'testapp'
# mock manager's get_plugin_representation_from_store static method
self.pl_manager.get_plugin_representation_from_store_by_url = mock.Mock(
return_value=self.plugin_repr)
plugin = self.pl_manager.register_plugin_by_url(
'http://127.0.0.1:8010/api/v1/1/', 'host')
self.assertEqual(Plugin.objects.count(), 2)
self.assertEqual(plugin.meta.name, 'testapp')
self.assertTrue(PluginParameter.objects.count() > 1)
self.pl_manager.get_plugin_representation_from_store_by_url.assert_called_with(
'http://127.0.0.1:8010/api/v1/1/', 30)
def test_mananger_register_plugin_by_url_raises_name_error_if_compute_resource_does_not_exist(self):
"""
Test whether the manager's register_plugin_by_url method raises NameError
exception when the compute respource argument doesn't exist in the DB.
"""
with self.assertRaises(NameError):
self.pl_manager.register_plugin('testapp', '0.1', 'dummy')
def METHOD_NAME(self):
"""
Test whether the manager can remove an existing plugin from the system.
"""
plugin = Plugin.objects.get(meta__name=self.plugin_fs_name, version="0.1")
self.pl_manager.remove_plugin(plugin.id)
self.assertEqual(Plugin.objects.count(), 0)
self.assertEqual(PluginMeta.objects.count(), 0)
self.assertEqual(PluginParameter.objects.count(), 0) |
299,340 | bytes msg | import os
import sys
import unittest
import collections
import email
from email.message import Message
from email._policybase import compat32
from test.support import load_package_tests
from test.test_email import __file__ as landmark
# Load all tests in package
def load_tests(*args):
return load_package_tests(os.path.dirname(__file__), *args)
# helper code used by a number of test modules.
def openfile(filename, *args, **kws):
path = os.path.join(os.path.dirname(landmark), 'data', filename)
return open(path, *args, **kws)
# Base test class
class TestEmailBase(unittest.TestCase):
maxDiff = None
# Currently the default policy is compat32. By setting that as the default
# here we make minimal changes in the test_email tests compared to their
# pre-3.3 state.
policy = compat32
# Likewise, the default message object is Message.
message = Message
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.addTypeEqualityFunc(bytes, self.assertBytesEqual)
# Backward compatibility to minimize test_email test changes.
ndiffAssertEqual = unittest.TestCase.assertEqual
def _msgobj(self, filename):
with openfile(filename) as fp:
return email.message_from_file(fp, policy=self.policy)
def _str_msg(self, string, message=None, policy=None):
if policy is None:
policy = self.policy
if message is None:
message = self.message
return email.message_from_string(string, message, policy=policy)
def METHOD_NAME(self, bytestring, message=None, policy=None):
if policy is None:
policy = self.policy
if message is None:
message = self.message
return email.message_from_bytes(bytestring, message, policy=policy)
def _make_message(self):
return self.message(policy=self.policy)
def _bytes_repr(self, b):
return [repr(x) for x in b.splitlines(keepends=True)]
def assertBytesEqual(self, first, second, msg):
"""Our byte strings are really encoded strings; improve diff output"""
self.assertEqual(self._bytes_repr(first), self._bytes_repr(second))
def assertDefectsEqual(self, actual, expected):
self.assertEqual(len(actual), len(expected), actual)
for i in range(len(actual)):
self.assertIsInstance(actual[i], expected[i],
'item {}'.format(i))
def parameterize(cls):
"""A test method parameterization class decorator.
Parameters are specified as the value of a class attribute that ends with
the string '_params'. Call the portion before '_params' the prefix. Then
a method to be parameterized must have the same prefix, the string
'_as_', and an arbitrary suffix.
The value of the _params attribute may be either a dictionary or a list.
The values in the dictionary and the elements of the list may either be
single values, or a list. If single values, they are turned into single
element tuples. However derived, the resulting sequence is passed via
*args to the parameterized test function.
In a _params dictionary, the keys become part of the name of the generated
tests. In a _params list, the values in the list are converted into a
string by joining the string values of the elements of the tuple by '_' and
converting any blanks into '_'s, and this become part of the name.
The full name of a generated test is a 'test_' prefix, the portion of the
test function name after the '_as_' separator, plus an '_', plus the name
derived as explained above.
For example, if we have:
count_params = range(2)
def count_as_foo_arg(self, foo):
self.assertEqual(foo+1, myfunc(foo))
we will get parameterized test methods named:
test_foo_arg_0
test_foo_arg_1
test_foo_arg_2
Or we could have:
example_params = {'foo': ('bar', 1), 'bing': ('bang', 2)}
def example_as_myfunc_input(self, name, count):
self.assertEqual(name+str(count), myfunc(name, count))
and get:
test_myfunc_input_foo
test_myfunc_input_bing
Note: if and only if the generated test name is a valid identifier can it
be used to select the test individually from the unittest command line.
"""
paramdicts = {}
testers = collections.defaultdict(list)
for name, attr in cls.__dict__.items():
if name.endswith('_params'):
if not hasattr(attr, 'keys'):
d = {}
for x in attr:
if not hasattr(x, '__iter__'):
x = (x,)
n = '_'.join(str(v) for v in x).replace(' ', '_')
d[n] = x
attr = d
paramdicts[name[:-7] + '_as_'] = attr
if '_as_' in name:
testers[name.split('_as_')[0] + '_as_'].append(name)
testfuncs = {}
for name in paramdicts:
if name not in testers:
raise ValueError("No tester found for {}".format(name))
for name in testers:
if name not in paramdicts:
raise ValueError("No params found for {}".format(name))
for name, attr in cls.__dict__.items():
for paramsname, paramsdict in paramdicts.items():
if name.startswith(paramsname):
testnameroot = 'test_' + name[len(paramsname):]
for paramname, params in paramsdict.items():
test = (lambda self, name=name, params=params:
getattr(self, name)(*params))
testname = testnameroot + '_' + paramname
test.__name__ = testname
testfuncs[testname] = test
for key, value in testfuncs.items():
setattr(cls, key, value)
return cls |
299,341 | save cafmsa infos for beneficiary | import hashlib
import logging
import time
from datetime import date, timedelta
from tempfile import SpooledTemporaryFile
from uuid import UUID
from gql.client import AsyncClientSession
from cdb.api.core.emails import notify_manager_after_cafmsa_import
from cdb.api.db.graphql.beneficiary import (
BeneficiaryRsaInfos,
get_beneficiary_by_nir,
update_beneficiary,
)
from cdb.api.db.graphql.get_client import gql_client_backend_only
from cdb.api.db.graphql.manager import get_manager_by_account_id
from cdb.caf_msa.parse_infos_foyer_rsa import (
CafBeneficiary,
CafInfoFlux,
CafMsaInfosFoyer,
parse_caf_file,
transform_cafMsaFoyer_to_beneficiary,
)
logger = logging.getLogger(__name__)
async def update_cafmsa_for_beneficiaries(
account_id: UUID, authorization: str, file: SpooledTemporaryFile
) -> None:
count = 0
count_error = 0
count_success = 0
time_interval = 60 * 5
infos: CafInfoFlux | None = None
async with gql_client_backend_only(bearer_token=authorization) as session:
start_time = time.time()
last_time = time.time()
try:
parsed = parse_caf_file(file)
maybe_infos = next(parsed)
if not isinstance(maybe_infos, CafInfoFlux):
raise CafXMLMissingNodeException
infos = maybe_infos
for node in parsed:
if not isinstance(node, CafMsaInfosFoyer) or not infos:
raise CafXMLMissingNodeException
for beneficiary in node.beneficiaries:
if (time.time() - last_time) > time_interval:
logger.info(
"Traitement du fichier en cours "
"%s/%s dossiers traités (%s erreurs)",
count_success,
count,
count_error,
)
last_time = time.time()
try:
result = await METHOD_NAME(
gql_session=session,
flux_info=infos,
foyer=node,
caf_beneficiary=beneficiary,
)
if result:
count_success += 1
except Exception as error:
logger.error(
"Erreur lors du traitement du dossier %s",
error,
)
count_error += 1
finally:
count += 1
logger.info(
"Mise à jour de %s/%s dossiers reçus (%s erreurs) en %s",
count_success,
count,
count_error,
timedelta(seconds=time.time() - start_time),
)
manager = await get_manager_by_account_id(session, account_id)
if manager:
logger.info(
"Notification de la fin du traitement au manager %s",
manager.email,
)
notify_manager_after_cafmsa_import(
manager.email,
date.today().strftime("%d/%m/%Y"),
count,
count_success,
count_error,
)
else:
logger.warning(
"manager (id=%s) introuvable",
account_id,
)
except Exception as error:
logger.error(error)
async def METHOD_NAME(
gql_session: AsyncClientSession,
flux_info: CafInfoFlux,
foyer: CafMsaInfosFoyer,
caf_beneficiary: CafBeneficiary,
) -> BeneficiaryRsaInfos | None:
beneficiary = await get_beneficiary_by_nir(gql_session, caf_beneficiary.nir)
if beneficiary:
beneficiary_update = transform_cafMsaFoyer_to_beneficiary(
caf_beneficiary, foyer
)
external_data_to_save = {
"flux": flux_info.dict(),
"foyer": foyer.dict(),
}
sha = hashlib.sha256(str(external_data_to_save).encode()).hexdigest()
return await update_beneficiary(
gql_session,
beneficiary.id,
beneficiary_update,
sha,
external_data_to_save,
)
else:
logging.warning("truc", caf_beneficiary.nir)
class CafXMLMissingNodeException(Exception):
"""
utility class when some node in xml are missing
""" |
299,342 | test start with twenty | #!/usr/bin/env python3
#
# Tests if the markov jump model works.
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import unittest
import numpy as np
from pints.toy.stochastic import DegradationModel
class TestMarkovJumpModel(unittest.TestCase):
"""
Tests if the markov jump model works using
the degradation model.
"""
def test_start_with_zero(self):
# Test the special case where the initial molecule count is zero
model = DegradationModel(0)
times = [0, 1, 2, 100, 1000]
parameters = [0.1]
values = model.simulate(parameters, times)
self.assertEqual(len(values), len(times))
self.assertTrue(np.all(values == np.zeros(5)))
def METHOD_NAME(self):
# Run small simulation
model = DegradationModel(20)
times = [0, 1, 2, 100, 1000, 10000]
parameters = [0.1]
values = model.simulate(parameters, times)
self.assertEqual(len(values), len(times))
self.assertEqual(values[0], 20)
self.assertEqual(values[-1], 0)
self.assertTrue(np.all(values[1:] <= values[:-1]))
def test_simulate(self):
times = np.linspace(0, 100, 101)
model = DegradationModel(20)
time, mol_count = model.simulate_raw([0.1], 100)
values = model.interpolate_mol_counts(time, mol_count, times)
self.assertTrue(len(time), len(mol_count))
# Test output of Gillespie algorithm
expected = np.array([[x] for x in range(20, -1, -1)])
self.assertTrue(np.all(mol_count == expected))
# Check simulate function returns expected values
self.assertTrue(np.all(values[np.where(times < time[1])] == 20))
# Check interpolation function works as expected
temp_time = np.array([np.random.uniform(time[0], time[1])])
self.assertEqual(
model.interpolate_mol_counts(time, mol_count, temp_time)[0], 20)
temp_time = np.array([np.random.uniform(time[1], time[2])])
self.assertEqual(
model.interpolate_mol_counts(time, mol_count, temp_time)[0], 19)
# Check interpolation works if 1 time is given
time, mol, out = [1], [7], [0, 1, 2, 3]
interpolated = model.interpolate_mol_counts(time, mol, out)
self.assertEqual(list(interpolated), [7, 7, 7, 7])
# Check if no times are given
self.assertRaisesRegex(ValueError, 'At least one time',
model.interpolate_mol_counts, [], [], out)
# Check if times and count don't match
self.assertRaisesRegex(ValueError, 'must match',
model.interpolate_mol_counts, [1], [2, 3], out)
# Decreasing output times
self.assertRaisesRegex(
ValueError, 'must be non-decreasing',
model.interpolate_mol_counts, [1, 2], [2, 3], [1, 2, 3, 4, 0])
# Check extrapolation outside of output times
# Note: step-wise "interpolation", no actual interpolation!
time = [10, 11]
mol = [5, 6]
out = [0, 10, 10.5, 11, 20]
val = model.interpolate_mol_counts(time, mol, out)
self.assertEqual(list(val), [5, 5, 5, 6, 6])
time = [10]
mol = [5]
out = [0, 10, 10.5, 11, 20]
val = model.interpolate_mol_counts(time, mol, out)
self.assertEqual(list(val), [5, 5, 5, 5, 5])
def test_errors(self):
model = DegradationModel(20)
# times cannot be negative
times_2 = np.linspace(-10, 10, 21)
parameters_2 = [0.1]
self.assertRaisesRegex(ValueError, 'Negative times',
model.simulate, parameters_2, times_2)
# this model should have 1 parameter
times = np.linspace(0, 100, 101)
parameters_3 = [0.1, 1]
self.assertRaisesRegex(ValueError, 'should have 1 parameter',
model.simulate, parameters_3, times)
# Initial value can't be negative
self.assertRaisesRegex(ValueError, 'Initial molecule count',
DegradationModel, -1)
if __name__ == '__main__':
unittest.main() |
299,343 | set se | #!/usr/bin/env python
"""
Remove files from the FileCatalog (and all replicas from Storage Elements)
Examples:
$ drm /your/lfn/goes/here
$ drm -F myfilecontaininglfns.txt
"""
import os
import DIRAC
from DIRAC import S_OK, gLogger
from DIRAC.Core.Base.Script import Script
from DIRAC.Interfaces.Utilities.DCommands import DSession
from DIRAC.Interfaces.Utilities.DCommands import DCatalog
from DIRAC.Interfaces.Utilities.DCommands import pathFromArgument
from DIRAC.Interfaces.Utilities.DConfigCache import ConfigCache
class Params:
"""handles input options for drm command"""
def __init__(self):
"""creates a Params class with default values"""
self.lfnFileName = ""
self.targetSE = ""
self.rmDirFlag = False
def setLfnFileName(self, lfnFile):
"""sets filename for file containing the lfns to de deleted"""
self.lfnFileName = lfnFile
return S_OK()
def METHOD_NAME(self, targetSE):
"""
sets the name of the storage element from which the files
are to be removed
"""
self.targetSE = targetSE
return S_OK()
def setDirFlag(self, _):
"""flag to remove directories recursively"""
self.rmDirFlag = True
return S_OK()
def registerCLISwitches(self):
"""adds options to the DIRAC options parser"""
Script.registerArgument(["lfn: logical file name"], mandatory=False)
Script.registerSwitch("F:", "lfnFile=", "file containing a list of LFNs", self.setLfnFileName)
Script.registerSwitch("D:", "destination-se=", "Storage Element from where to remove replica", self.METHOD_NAME)
Script.registerSwitch("r", "", "remove directory recursively", self.setDirFlag)
@Script()
def main():
"""where all the action is"""
configCache = ConfigCache()
options = Params()
options.registerCLISwitches()
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
configCache.cacheConfig()
session = DSession()
catalog = DCatalog()
if not args and not options.lfnFileName:
gLogger.error(f"No argument provided for:\n{Script.scriptName}")
Script.showHelp(exitCode=-1)
lfns = set()
for path in args:
lfns.add(pathFromArgument(session, path))
if options.lfnFileName:
if not os.path.exists(options.lfnFileName):
gLogger.error(f"non-existent file {options.lfnFileName}:")
DIRAC.exit(-1)
lfnFile = open(options.lfnFileName)
lfnList = lfnFile.readlines()
# ignore empty lines anywhere in the file
lfnList = [lfn.strip() for lfn in lfnList]
lfnSet = {pathFromArgument(session, lfn) for lfn in lfnList if lfn}
lfns.update(lfnSet)
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
dirac = Dirac()
dm = DataManager()
nLfns = len(lfns)
if nLfns > 1:
gLogger.notice(f"Removing {nLfns} objects")
exitCode = 0
goodCounter = 0
badCounter = 0
for lfn in lfns:
if options.rmDirFlag and not catalog.isFile(lfn):
result = returnSingleResult(dm.cleanLogicalDirectory(lfn))
if result["OK"]:
goodCounter += 1
else:
gLogger.error(result["Message"])
badCounter += 1
exitCode = 3
else:
if options.targetSE:
result = returnSingleResult(dirac.removeReplica(lfn, options.targetSE, printOutput=False))
else:
result = returnSingleResult(dirac.removeFile(lfn, printOutput=False))
if not result["OK"]:
if "No such file or directory" == result["Message"]:
gLogger.notice(f"{lfn} no such file")
else:
gLogger.error(f"{lfn}: {result['Message']}")
badCounter += 1
exitCode = 2
else:
goodCounter += 1
if goodCounter % 10 == 0:
gLogger.notice(f"{goodCounter} files removed")
if badCounter:
gLogger.notice(f"{badCounter} files failed removal")
gLogger.notice(f"\n{goodCounter} object(s) removed in total")
if badCounter:
gLogger.notice(f"{badCounter} object(s) failed removal in total")
DIRAC.exit(exitCode)
if __name__ == "__main__":
main() |
299,344 | default | """JSON utilities for legacy saving formats (h5 and SavedModel)"""
import collections
import enum
import functools
import json
import numpy as np
from keras_core.legacy.saving import serialization
from keras_core.saving import serialization_lib
from keras_core.utils.module_utils import tensorflow as tf
_EXTENSION_TYPE_SPEC = "_EXTENSION_TYPE_SPEC"
class Encoder(json.JSONEncoder):
"""JSON encoder and decoder that handles TensorShapes and tuples."""
def METHOD_NAME(self, obj):
"""Encodes objects for types that aren't handled by the default
encoder."""
if tf.available and isinstance(obj, tf.TensorShape):
items = obj.as_list() if obj.rank is not None else None
return {"class_name": "TensorShape", "items": items}
return get_json_type(obj)
def encode(self, obj):
return super().encode(_encode_tuple(obj))
def _encode_tuple(x):
if isinstance(x, tuple):
return {
"class_name": "__tuple__",
"items": tuple(_encode_tuple(i) for i in x),
}
elif isinstance(x, list):
return [_encode_tuple(i) for i in x]
elif isinstance(x, dict):
return {key: _encode_tuple(value) for key, value in x.items()}
else:
return x
def decode(json_string):
return json.loads(json_string, object_hook=_decode_helper)
def decode_and_deserialize(
json_string, module_objects=None, custom_objects=None
):
"""Decodes the JSON and deserializes any Keras objects found in the dict."""
return json.loads(
json_string,
object_hook=functools.partial(
_decode_helper,
deserialize=True,
module_objects=module_objects,
custom_objects=custom_objects,
),
)
def _decode_helper(
obj, deserialize=False, module_objects=None, custom_objects=None
):
"""A decoding helper that is TF-object aware.
Args:
obj: A decoded dictionary that may represent an object.
deserialize: Boolean. When True, deserializes any Keras
objects found in `obj`. Defaults to `False`.
module_objects: A dictionary of built-in objects to look the name up in.
Generally, `module_objects` is provided by midlevel library
implementers.
custom_objects: A dictionary of custom objects to look the name up in.
Generally, `custom_objects` is provided by the end user.
Returns:
The decoded object.
"""
if isinstance(obj, dict) and "class_name" in obj:
if tf.available:
if obj["class_name"] == "TensorShape":
return tf.TensorShape(obj["items"])
elif obj["class_name"] == "TypeSpec":
from tensorflow.python.framework import type_spec_registry
return type_spec_registry.lookup(obj["type_spec"])._deserialize(
_decode_helper(obj["serialized"])
)
elif obj["class_name"] == "CompositeTensor":
spec = obj["spec"]
tensors = []
for dtype, tensor in obj["tensors"]:
tensors.append(
tf.constant(tensor, dtype=tf.dtypes.as_dtype(dtype))
)
return tf.nest.pack_sequence_as(
_decode_helper(spec), tensors, expand_composites=True
)
if obj["class_name"] == "__tuple__":
return tuple(_decode_helper(i) for i in obj["items"])
elif obj["class_name"] == "__ellipsis__":
return Ellipsis
elif deserialize and "__passive_serialization__" in obj:
# __passive_serialization__ is added by the JSON encoder when
# encoding an object that has a `get_config()` method.
try:
if (
"module" not in obj
): # TODO(nkovela): Add TF SavedModel scope
return serialization.deserialize_keras_object(
obj,
module_objects=module_objects,
custom_objects=custom_objects,
)
else:
return serialization_lib.deserialize_keras_object(
obj,
module_objects=module_objects,
custom_objects=custom_objects,
)
except ValueError:
pass
elif obj["class_name"] == "__bytes__":
return obj["value"].encode("utf-8")
return obj
def get_json_type(obj):
"""Serializes any object to a JSON-serializable structure.
Args:
obj: the object to serialize
Returns:
JSON-serializable structure representing `obj`.
Raises:
TypeError: if `obj` cannot be serialized.
"""
# if obj is a serializable Keras class instance
# e.g. optimizer, layer
if hasattr(obj, "get_config"):
# TODO(nkovela): Replace with legacy serialization
serialized = serialization.serialize_keras_object(obj)
serialized["__passive_serialization__"] = True
return serialized
# if obj is any numpy type
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
# misc functions (e.g. loss function)
if callable(obj):
return obj.__name__
# if obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
if tf.available and isinstance(obj, tf.compat.v1.Dimension):
return obj.value
if tf.available and isinstance(obj, tf.TensorShape):
return obj.as_list()
if tf.available and isinstance(obj, tf.DType):
return obj.name
if isinstance(obj, collections.abc.Mapping):
return dict(obj)
if obj is Ellipsis:
return {"class_name": "__ellipsis__"}
# if isinstance(obj, wrapt.ObjectProxy):
# return obj.__wrapped__
if tf.available and isinstance(obj, tf.TypeSpec):
from tensorflow.python.framework import type_spec_registry
try:
type_spec_name = type_spec_registry.get_name(type(obj))
return {
"class_name": "TypeSpec",
"type_spec": type_spec_name,
"serialized": obj._serialize(),
}
except ValueError:
raise ValueError(
f"Unable to serialize {obj} to JSON, because the TypeSpec "
f"class {type(obj)} has not been registered."
)
if tf.available and isinstance(obj, tf.__internal__.CompositeTensor):
spec = tf.type_spec_from_value(obj)
tensors = []
for tensor in tf.nest.flatten(obj, expand_composites=True):
tensors.append((tensor.dtype.name, tensor.numpy().tolist()))
return {
"class_name": "CompositeTensor",
"spec": get_json_type(spec),
"tensors": tensors,
}
if isinstance(obj, enum.Enum):
return obj.value
if isinstance(obj, bytes):
return {"class_name": "__bytes__", "value": obj.decode("utf-8")}
raise TypeError(
f"Unable to serialize {obj} to JSON. Unrecognized type {type(obj)}."
) |
299,345 | on submit | # Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe import _
from frappe.desk.form import assign_to
from frappe.model.document import Document
from frappe.utils import add_days, flt, unique
from erpnext.setup.doctype.employee.employee import get_holiday_list_for_employee
from erpnext.setup.doctype.holiday_list.holiday_list import is_holiday
class EmployeeBoardingController(Document):
"""
Create the project and the task for the boarding process
Assign to the concerned person and roles as per the onboarding/separation template
"""
def validate(self):
# remove the task if linked before submitting the form
if self.amended_from:
for activity in self.activities:
activity.task = ""
def METHOD_NAME(self):
# create the project for the given employee onboarding
project_name = _(self.doctype) + " : "
if self.doctype == "Employee Onboarding":
project_name += self.job_applicant
else:
project_name += self.employee
project = frappe.get_doc(
{
"doctype": "Project",
"project_name": project_name,
"expected_start_date": self.date_of_joining
if self.doctype == "Employee Onboarding"
else self.resignation_letter_date,
"department": self.department,
"company": self.company,
}
).insert(ignore_permissions=True, ignore_mandatory=True)
self.db_set("project", project.name)
self.db_set("boarding_status", "Pending")
self.reload()
self.create_task_and_notify_user()
def create_task_and_notify_user(self):
# create the task for the given project and assign to the concerned person
holiday_list = self.get_holiday_list()
for activity in self.activities:
if activity.task:
continue
dates = self.get_task_dates(activity, holiday_list)
task = frappe.get_doc(
{
"doctype": "Task",
"project": self.project,
"subject": activity.activity_name + " : " + self.employee_name,
"description": activity.description,
"department": self.department,
"company": self.company,
"task_weight": activity.task_weight,
"exp_start_date": dates[0],
"exp_end_date": dates[1],
}
).insert(ignore_permissions=True)
activity.db_set("task", task.name)
users = [activity.user] if activity.user else []
if activity.role:
user_list = frappe.db.sql_list(
"""
SELECT
DISTINCT(has_role.parent)
FROM
`tabHas Role` has_role
LEFT JOIN `tabUser` user
ON has_role.parent = user.name
WHERE
has_role.parenttype = 'User'
AND user.enabled = 1
AND has_role.role = %s
""",
activity.role,
)
users = unique(users + user_list)
if "Administrator" in users:
users.remove("Administrator")
# assign the task the users
if users:
self.assign_task_to_users(task, users)
def get_holiday_list(self):
if self.doctype == "Employee Separation":
return get_holiday_list_for_employee(self.employee)
else:
if self.employee:
return get_holiday_list_for_employee(self.employee)
else:
if not self.holiday_list:
frappe.throw(_("Please set the Holiday List."), frappe.MandatoryError)
else:
return self.holiday_list
def get_task_dates(self, activity, holiday_list):
start_date = end_date = None
if activity.begin_on is not None:
start_date = add_days(self.boarding_begins_on, activity.begin_on)
start_date = self.update_if_holiday(start_date, holiday_list)
if activity.duration is not None:
end_date = add_days(self.boarding_begins_on, activity.begin_on + activity.duration)
end_date = self.update_if_holiday(end_date, holiday_list)
return [start_date, end_date]
def update_if_holiday(self, date, holiday_list):
while is_holiday(holiday_list, date):
date = add_days(date, 1)
return date
def assign_task_to_users(self, task, users):
for user in users:
args = {
"assign_to": [user],
"doctype": task.doctype,
"name": task.name,
"description": task.description or task.subject,
"notify": self.notify_users_by_email,
}
assign_to.add(args)
def on_cancel(self):
# delete task project
project = self.project
for task in frappe.get_all("Task", filters={"project": project}):
frappe.delete_doc("Task", task.name, force=1)
frappe.delete_doc("Project", project, force=1)
self.db_set("project", "")
for activity in self.activities:
activity.db_set("task", "")
frappe.msgprint(
_("Linked Project {} and Tasks deleted.").format(project), alert=True, indicator="blue"
)
@frappe.whitelist()
def get_onboarding_details(parent, parenttype):
return frappe.get_all(
"Employee Boarding Activity",
fields=[
"activity_name",
"role",
"user",
"required_for_employee_creation",
"description",
"task_weight",
"begin_on",
"duration",
],
filters={"parent": parent, "parenttype": parenttype},
order_by="idx",
)
def update_employee_boarding_status(project, event=None):
employee_onboarding = frappe.db.exists("Employee Onboarding", {"project": project.name})
employee_separation = frappe.db.exists("Employee Separation", {"project": project.name})
if not (employee_onboarding or employee_separation):
return
status = "Pending"
if flt(project.percent_complete) > 0.0 and flt(project.percent_complete) < 100.0:
status = "In Process"
elif flt(project.percent_complete) == 100.0:
status = "Completed"
if employee_onboarding:
frappe.db.set_value("Employee Onboarding", employee_onboarding, "boarding_status", status)
elif employee_separation:
frappe.db.set_value("Employee Separation", employee_separation, "boarding_status", status)
def update_task(task, event=None):
if task.project and not task.flags.from_project:
update_employee_boarding_status(frappe.get_cached_doc("Project", task.project)) |
299,346 | test driver class error | from collections import OrderedDict
from copy import deepcopy
import attr
import pytest
from labgrid import Target, target_factory
from labgrid.driver import Driver
from labgrid.resource import Resource
from labgrid.exceptions import InvalidConfigError, RegistrationError
from labgrid.resource import SerialPort
from labgrid.util.yaml import load
def test_empty():
t = target_factory.make_target('dummy', {})
assert isinstance(t, Target)
def test_resources():
original_config = {
'resources': OrderedDict([
('RawSerialPort', {
'port': 'foo',
'speed': 115200,
'name': 'console',
}),
]),
}
config = deepcopy(original_config)
t = target_factory.make_target('dummy', config)
assert isinstance(t, Target)
assert t.get_resource(SerialPort) is not None
assert config == original_config
def test_drivers():
original_config = {
'resources': OrderedDict([
('RawSerialPort', {
'port': 'foo',
'speed': 115200
}),
]),
'drivers': OrderedDict([
('FakeConsoleDriver', {
'name': 'console',
}),
('ShellDriver', {
'name': 'shell',
'prompt': '',
'login_prompt': '',
'username': ''
}),
]),
}
config = deepcopy(original_config)
t = target_factory.make_target('dummy', config)
assert isinstance(t, Target)
assert t.get_resource(SerialPort) is not None
assert config == original_config
def test_convert_dict():
original_data = load("""
FooPort: {}
BarPort:
name: bar
""")
data = deepcopy(original_data)
l = target_factory._convert_to_named_list(data)
assert l == [
{
'cls': 'FooPort',
'name': None,
},
{
'cls': 'BarPort',
'name': 'bar'
},
]
assert data == original_data
def test_convert_simple_list():
original_data = load("""
- FooPort: {}
- BarPort:
name: bar
""")
data = deepcopy(original_data)
l = target_factory._convert_to_named_list(data)
assert l == [
{
'cls': 'FooPort',
'name': None,
},
{
'cls': 'BarPort',
'name': 'bar'
},
]
assert data == original_data
def test_convert_explicit_list():
original_data = load("""
- cls: FooPort
- cls: BarPort
name: bar
""")
data = deepcopy(original_data)
l = target_factory._convert_to_named_list(data)
assert l == [
{
'cls': 'FooPort',
'name': None,
},
{
'cls': 'BarPort',
'name': 'bar'
},
]
assert data == original_data
def test_normalize_config():
original_config = {
'resources': OrderedDict([
('RawSerialPort', {
'port': 'foo',
'speed': 115200
}),
]),
'drivers': OrderedDict([
('FakeConsoleDriver', {
'name': 'console',
}),
]),
}
config = deepcopy(original_config)
resources, drivers = target_factory.normalize_config(config)
assert 'RawSerialPort' in resources
assert resources['RawSerialPort'] == {None: ({'port': 'foo', 'speed': 115200},)}
assert 'FakeConsoleDriver' in drivers
assert drivers['FakeConsoleDriver'] == {'console': ({}, {})}
assert config == original_config
def test_convert_error():
with pytest.raises(InvalidConfigError) as excinfo:
data = load("""
- {}
""")
target_factory._convert_to_named_list(data)
assert "invalid empty dict as list item" in excinfo.value.msg
with pytest.raises(InvalidConfigError) as excinfo:
data = load("""
- "error"
""")
target_factory._convert_to_named_list(data)
assert "invalid list item type <class 'str'> (should be dict)" in excinfo.value.msg
with pytest.raises(InvalidConfigError) as excinfo:
data = load("""
- name: "bar"
extra: "baz"
""")
target_factory._convert_to_named_list(data)
assert "missing 'cls' key in OrderedDict(" in excinfo.value.msg
with pytest.raises(InvalidConfigError) as excinfo:
data = load("""
- one:
- two: {}
""")
target_factory._convert_to_named_list(data)
assert "invalid list item, add empty dict for no arguments" in excinfo.value.msg
def test_resource_param_error():
with pytest.raises(InvalidConfigError) as excinfo:
target_factory.make_resource(
None, 'NetworkSerialPort', 'serial', {'port': None})
assert "failed to create" in excinfo.value.msg
def test_driver_param_error():
with pytest.raises(InvalidConfigError) as excinfo:
target_factory.make_driver(
None, 'QEMUDriver', 'qemu', {'cpu': 'arm'})
assert "failed to create" in excinfo.value.msg
def test_resource_class_error():
with pytest.raises(InvalidConfigError) as excinfo:
target_factory.make_resource(
None, 'UnknownResource', None, {})
assert "unknown resource class" in excinfo.value.msg
def METHOD_NAME():
with pytest.raises(InvalidConfigError) as excinfo:
target_factory.make_driver(
None, 'UnknownDriver', None, {})
assert "unknown driver class" in excinfo.value.msg
def test_register_same_driver():
@attr.s
class SameDriver(Driver):
pass
with pytest.raises(RegistrationError) as excinfo:
target_factory.reg_driver(SameDriver)
target_factory.reg_driver(SameDriver)
assert "driver with name" in excinfo.value.msg
def test_register_same_resource():
@attr.s
class SameResource(Resource):
pass
with pytest.raises(RegistrationError) as excinfo:
target_factory.reg_driver(SameResource)
target_factory.reg_driver(SameResource)
assert "driver with name" in excinfo.value.msg |
299,347 | get schema |
__copyright__ = 'Copyright 2022, The RADICAL-Cybertools Team'
__license__ = 'MIT'
# configure the psij logger (captured in the launch components stderr)
import logging
logging.basicConfig(level='DEBUG')
import threading as mt
from .base import PilotLauncherBase
from ... import states as rps
# psij is optional
psij = None
psij_ex = None
try:
import psij
except ImportError as ex:
psij_ex = ex
# ------------------------------------------------------------------------------
#
class PilotLauncherPSIJ(PilotLauncherBase):
# --------------------------------------------------------------------------
#
def __init__(self, name, log, prof, state_cb):
# psij is an optional dependency - let an import exception fall through
# to disable this pilot launcher
if psij_ex:
raise psij_ex
assert psij
PilotLauncherBase.__init__(self, name, log, prof, state_cb)
self._jobs = dict() # map pilot id to psi_j job
self._pilots = dict() # map psi_j id to pilot job
self._jex = dict() # map launch schema to psi_j job executors
self._lock = mt.RLock() # lock above structures
# --------------------------------------------------------------------------
#
def METHOD_NAME(self, rcfg):
url = rcfg['job_manager_endpoint']
schemas = url.split(':')[0].split('+')
if 'ssh' in schemas:
schemas.remove('ssh')
if 'gsissh' in schemas:
schemas.remove('gsissh')
if len(schemas) > 1:
return
if not schemas:
return
schema = schemas[0]
if schema == 'fork':
schema = 'local'
return schema
# --------------------------------------------------------------------------
#
def _translate_state(self, status):
if status.state == psij.JobState.NEW : return rps.NEW
elif status.state == psij.JobState.QUEUED : return rps.PMGR_LAUNCHING
elif status.state == psij.JobState.ACTIVE : return rps.PMGR_ACTIVE
elif status.state == psij.JobState.COMPLETED : return rps.DONE
elif status.state == psij.JobState.FAILED : return rps.FAILED
elif status.state == psij.JobState.CANCELED : return rps.CANCELED
else:
raise ValueError('cannot interpret psij state: %s' % repr(status))
# --------------------------------------------------------------------------
#
def _job_status_cb(self, job, status):
try:
with self._lock:
if job.id not in self._pilots:
return
rp_state = self._translate_state(status)
pilot = self._pilots[job.id]
self._state_cb(pilot, rp_state)
except Exception:
self._log.exception('job status callback failed')
# --------------------------------------------------------------------------
#
def can_launch(self, rcfg, pilot):
schema = self.METHOD_NAME(rcfg)
if not schema:
return False
if schema not in self._jex:
self._log.debug('create executor for %s', schema)
try:
self._jex[schema] = psij.JobExecutor.get_instance(schema)
self._jex[schema].set_job_status_callback(self._job_status_cb)
except:
self._log.exception('failed to create psij executor')
return False
return True
# --------------------------------------------------------------------------
#
def launch_pilots(self, rcfg, pilots):
assert psij
for pilot in pilots:
pid = pilot['uid']
schema = self.METHOD_NAME(rcfg)
assert schema
jex = self._jex.get(schema)
assert jex
jd = pilot['jd_dict']
proj, res = None, None
if jd.project:
if ':' in jd.project:
proj, res = jd.project.split(':', 1)
else:
proj = jd.project
attr = psij.JobAttributes()
attr.duration = jd.wall_time_limit
attr.queue_name = jd.queue
attr.project_name = proj
attr.reservation_id = res
spec = psij.JobSpec()
spec.attributes = attr
spec.executable = jd.executable
spec.arguments = jd.arguments
spec.environment = jd.environment
spec.directory = jd.working_directory
spec.stdout_path = jd.output
spec.stderr_path = jd.error
spec.resources = psij.ResourceSpecV1()
spec.resources.node_count = jd.node_count
spec.resources.process_count = jd.total_cpu_count
# spec.resources.cpu_cores_per_process = 1
# spec.resources.gpu_cores_per_process = jd.total_gpu_count
job = psij.Job(spec)
self._jobs[pid] = job
self._pilots[job.id] = pilot
self._log.debug('added %s: %s', job.id, pid)
jex.submit(job)
# --------------------------------------------------------------------------
#
def kill_pilots(self, pids):
for pid in pids:
if pid not in pids:
continue
self._jobs[pid].cancel()
# ------------------------------------------------------------------------------
|
299,348 | get hydro max power | import tempfile
from pathlib import Path
from typing import Dict
from filelock import FileLock
from antarest.matrixstore.service import ISimpleMatrixService
from antarest.study.storage.variantstudy.business import matrix_constants
from antarest.study.storage.variantstudy.business.matrix_constants.common import (
FIXED_4_COLUMNS,
FIXED_8_COLUMNS,
NULL_MATRIX,
NULL_SCENARIO_MATRIX,
)
# TODO: put index into variable
HYDRO_COMMON_CAPACITY_MAX_POWER_V7 = "hydro/common/capacity/max_power/v7"
HYDRO_COMMON_CAPACITY_RESERVOIR_V7 = "hydro/common/capacity/reservoir/v7"
HYDRO_COMMON_CAPACITY_RESERVOIR_V6 = "hydro/common/capacity/reservoir/v6"
HYDRO_COMMON_CAPACITY_INFLOW_PATTERN = "hydro/common/capacity/inflow_pattern"
HYDRO_COMMON_CAPACITY_CREDIT_MODULATION = "hydro/common/capacity/credit_modulations"
RESERVES_TS = "reserves"
MISCGEN_TS = "miscgen"
PREPRO_CONVERSION = "prepro/conversion"
PREPRO_DATA = "prepro/data"
THERMAL_PREPRO_DATA = "thermals/prepro/data"
THERMAL_PREPRO_MODULATION = "thermals/prepro/modulation"
LINK_V7 = "link_v7"
LINK_V8 = "link_v8"
LINK_DIRECT = "link_direct"
LINK_INDIRECT = "link_indirect"
NULL_MATRIX_NAME = "null_matrix"
EMPTY_SCENARIO_MATRIX = "empty_scenario_matrix"
ONES_SCENARIO_MATRIX = "ones_scenario_matrix"
# Short-term storage aliases
ST_STORAGE_PMAX_INJECTION = ONES_SCENARIO_MATRIX
ST_STORAGE_PMAX_WITHDRAWAL = ONES_SCENARIO_MATRIX
ST_STORAGE_LOWER_RULE_CURVE = EMPTY_SCENARIO_MATRIX
ST_STORAGE_UPPER_RULE_CURVE = ONES_SCENARIO_MATRIX
ST_STORAGE_INFLOWS = EMPTY_SCENARIO_MATRIX
MATRIX_PROTOCOL_PREFIX = "matrix://"
# noinspection SpellCheckingInspection
class GeneratorMatrixConstants:
def __init__(self, matrix_service: ISimpleMatrixService) -> None:
self.hashes: Dict[str, str] = {}
self.matrix_service: ISimpleMatrixService = matrix_service
with FileLock(str(Path(tempfile.gettempdir()) / "matrix_constant_init.lock")):
self._init()
def _init(self) -> None:
self.hashes[HYDRO_COMMON_CAPACITY_MAX_POWER_V7] = self.matrix_service.create(
matrix_constants.hydro.v7.max_power
)
self.hashes[HYDRO_COMMON_CAPACITY_RESERVOIR_V7] = self.matrix_service.create(
matrix_constants.hydro.v7.reservoir
)
self.hashes[HYDRO_COMMON_CAPACITY_RESERVOIR_V6] = self.matrix_service.create(
matrix_constants.hydro.v6.reservoir
)
self.hashes[HYDRO_COMMON_CAPACITY_INFLOW_PATTERN] = self.matrix_service.create(
matrix_constants.hydro.v7.inflow_pattern
)
self.hashes[HYDRO_COMMON_CAPACITY_CREDIT_MODULATION] = self.matrix_service.create(
matrix_constants.hydro.v7.credit_modulations
)
self.hashes[PREPRO_CONVERSION] = self.matrix_service.create(matrix_constants.prepro.conversion)
self.hashes[PREPRO_DATA] = self.matrix_service.create(matrix_constants.prepro.data)
self.hashes[THERMAL_PREPRO_DATA] = self.matrix_service.create(matrix_constants.thermals.prepro.data)
self.hashes[THERMAL_PREPRO_MODULATION] = self.matrix_service.create(matrix_constants.thermals.prepro.modulation)
self.hashes[LINK_V7] = self.matrix_service.create(matrix_constants.link.v7.link)
self.hashes[LINK_V8] = self.matrix_service.create(matrix_constants.link.v8.link)
self.hashes[LINK_DIRECT] = self.matrix_service.create(matrix_constants.link.v8.direct)
self.hashes[LINK_INDIRECT] = self.matrix_service.create(matrix_constants.link.v8.indirect)
self.hashes[NULL_MATRIX_NAME] = self.matrix_service.create(NULL_MATRIX)
self.hashes[EMPTY_SCENARIO_MATRIX] = self.matrix_service.create(NULL_SCENARIO_MATRIX)
self.hashes[RESERVES_TS] = self.matrix_service.create(FIXED_4_COLUMNS)
self.hashes[MISCGEN_TS] = self.matrix_service.create(FIXED_8_COLUMNS)
# Some short-term storage matrices use np.ones((8760, 1))
self.hashes[ONES_SCENARIO_MATRIX] = self.matrix_service.create(
matrix_constants.st_storage.series.pmax_injection
)
def METHOD_NAME(self, version: int) -> str:
if version > 650:
return MATRIX_PROTOCOL_PREFIX + self.hashes[HYDRO_COMMON_CAPACITY_MAX_POWER_V7]
else:
return MATRIX_PROTOCOL_PREFIX + self.hashes[NULL_MATRIX_NAME]
def get_hydro_reservoir(self, version: int) -> str:
if version > 650:
return MATRIX_PROTOCOL_PREFIX + self.hashes[HYDRO_COMMON_CAPACITY_RESERVOIR_V7]
return MATRIX_PROTOCOL_PREFIX + self.hashes[HYDRO_COMMON_CAPACITY_RESERVOIR_V6]
def get_hydro_credit_modulations(self) -> str:
return MATRIX_PROTOCOL_PREFIX + self.hashes[HYDRO_COMMON_CAPACITY_CREDIT_MODULATION]
def get_hydro_inflow_pattern(self) -> str:
return MATRIX_PROTOCOL_PREFIX + self.hashes[HYDRO_COMMON_CAPACITY_INFLOW_PATTERN]
def get_prepro_conversion(self) -> str:
return MATRIX_PROTOCOL_PREFIX + self.hashes[PREPRO_CONVERSION]
def get_prepro_data(self) -> str:
return MATRIX_PROTOCOL_PREFIX + self.hashes[PREPRO_DATA]
def get_thermal_prepro_data(self) -> str:
return MATRIX_PROTOCOL_PREFIX + self.hashes[THERMAL_PREPRO_DATA]
def get_thermal_prepro_modulation(self) -> str:
return MATRIX_PROTOCOL_PREFIX + self.hashes[THERMAL_PREPRO_MODULATION]
def get_link(self, version: int) -> str:
if version < 820:
return MATRIX_PROTOCOL_PREFIX + self.hashes[LINK_V7]
return MATRIX_PROTOCOL_PREFIX + self.hashes[LINK_V8]
def get_link_direct(self) -> str:
return MATRIX_PROTOCOL_PREFIX + self.hashes[LINK_DIRECT]
def get_link_indirect(self) -> str:
return MATRIX_PROTOCOL_PREFIX + self.hashes[LINK_INDIRECT]
def get_null_matrix(self) -> str:
return MATRIX_PROTOCOL_PREFIX + self.hashes[NULL_MATRIX_NAME]
def get_null_scenario_matrix(self) -> str:
return MATRIX_PROTOCOL_PREFIX + self.hashes[EMPTY_SCENARIO_MATRIX]
def get_default_reserves(self) -> str:
return MATRIX_PROTOCOL_PREFIX + self.hashes[RESERVES_TS]
def get_default_miscgen(self) -> str:
return MATRIX_PROTOCOL_PREFIX + self.hashes[MISCGEN_TS]
def get_st_storage_pmax_injection(self) -> str:
"""2D-matrix of shape (8760, 1), filled-in with ones."""
return MATRIX_PROTOCOL_PREFIX + self.hashes[ST_STORAGE_PMAX_INJECTION]
def get_st_storage_pmax_withdrawal(self) -> str:
"""2D-matrix of shape (8760, 1), filled-in with ones."""
return MATRIX_PROTOCOL_PREFIX + self.hashes[ST_STORAGE_PMAX_WITHDRAWAL]
def get_st_storage_lower_rule_curve(self) -> str:
"""2D-matrix of shape (8760, 1), filled-in with zeros."""
return MATRIX_PROTOCOL_PREFIX + self.hashes[ST_STORAGE_LOWER_RULE_CURVE]
def get_st_storage_upper_rule_curve(self) -> str:
"""2D-matrix of shape (8760, 1), filled-in with ones."""
return MATRIX_PROTOCOL_PREFIX + self.hashes[ST_STORAGE_UPPER_RULE_CURVE]
def get_st_storage_inflows(self) -> str:
"""2D-matrix of shape (8760, 1), filled-in with zeros."""
return MATRIX_PROTOCOL_PREFIX + self.hashes[ST_STORAGE_INFLOWS] |
299,349 | transform | """A transformer that encodes categorical features into target encodings."""
import warnings
import pandas as pd
from evalml.pipelines.components.transformers.encoders.onehot_encoder import (
OneHotEncoderMeta,
)
from evalml.pipelines.components.transformers.transformer import Transformer
from evalml.utils import import_or_raise, infer_feature_types
class TargetEncoder(Transformer, metaclass=OneHotEncoderMeta):
"""A transformer that encodes categorical features into target encodings.
Args:
cols (list): Columns to encode. If None, all string columns will be encoded, otherwise only the columns provided will be encoded.
Defaults to None
smoothing (float): The smoothing factor to apply. The larger this value is, the more influence the expected target value has
on the resulting target encodings. Must be strictly larger than 0. Defaults to 1.0
handle_unknown (string): Determines how to handle unknown categories for a feature encountered. Options are 'value', 'error', nd 'return_nan'.
Defaults to 'value', which replaces with the target mean
handle_missing (string): Determines how to handle missing values encountered during `fit` or `transform`. Options are 'value', 'error', and 'return_nan'.
Defaults to 'value', which replaces with the target mean
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
name = "Target Encoder"
hyperparameter_ranges = {}
"""{}"""
def __init__(
self,
cols=None,
smoothing=1,
handle_unknown="value",
handle_missing="value",
random_seed=0,
**kwargs,
):
parameters = {
"cols": cols,
"smoothing": smoothing,
"handle_unknown": handle_unknown,
"handle_missing": handle_missing,
}
parameters.update(kwargs)
unknown_and_missing_input_options = ["error", "return_nan", "value"]
if handle_unknown not in unknown_and_missing_input_options:
raise ValueError(
"Invalid input '{}' for handle_unknown".format(handle_unknown),
)
if handle_missing not in unknown_and_missing_input_options:
raise ValueError(
"Invalid input '{}' for handle_missing".format(handle_missing),
)
if smoothing <= 0:
raise ValueError(
"Smoothing value needs to be strictly larger than 0. {} provided".format(
smoothing,
),
)
category_encode = import_or_raise(
"category_encoders",
error_msg="category-encoders not installed. Please install using `pip install category-encoders`",
)
# Supress warnings for now due to problems discussion here:
# https://github.com/scikit-learn-contrib/category_encoders/issues/327
with warnings.catch_warnings():
warnings.simplefilter("ignore")
encoder = category_encode.target_encoder.TargetEncoder(**parameters)
super().__init__(
parameters=parameters,
component_obj=encoder,
random_seed=random_seed,
)
def fit(self, X, y):
"""Fits the target encoder.
Args:
X (pd.DataFrame): The input training data of shape [n_samples, n_features].
y (pd.Series, optional): The target training data of length [n_samples].
Returns:
self
"""
return super().fit(X, y)
def METHOD_NAME(self, X, y=None):
"""Transform data using the fitted target encoder.
Args:
X (pd.DataFrame): The input training data of shape [n_samples, n_features].
y (pd.Series, optional): The target training data of length [n_samples].
Returns:
pd.DataFrame: Transformed data.
"""
X_ww = infer_feature_types(X)
if y is not None:
y = infer_feature_types(y)
X_t = self._component_obj.METHOD_NAME(X, y)
X_t_df = pd.DataFrame(X_t, columns=X_ww.columns, index=X_ww.index)
no_cat_schema = X_ww.ww.select(exclude="category", return_schema=True)
X_t_df.ww.init(schema=no_cat_schema)
return X_t_df
def fit_transform(self, X, y):
"""Fit and transform data using the target encoder.
Args:
X (pd.DataFrame): The input training data of shape [n_samples, n_features].
y (pd.Series, optional): The target training data of length [n_samples].
Returns:
pd.DataFrame: Transformed data.
"""
return self.fit(X, y).METHOD_NAME(X, y)
def get_feature_names(self):
"""Return feature names for the input features after fitting.
Returns:
np.array: The feature names after encoding.
"""
return self._component_obj.get_feature_names()
def _get_feature_provenance(self):
return {col: col for col in self.get_feature_names()} |
299,350 | package info | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.cmake import CMakeToolchain, CMakeDeps, CMake, cmake_layout
from conan.tools.build import check_min_cppstd
from conan.tools.files import get, replace_in_file, copy, export_conandata_patches, apply_conandata_patches
import os
required_conan_version = ">=1.53.0"
class RmluiConan(ConanFile):
name = "rmlui"
description = "RmlUi - The HTML/CSS User Interface Library Evolved"
license = "MIT"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/mikke89/RmlUi"
topics = ("css", "gui", "html", "lua", "rmlui")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"font_interface": ["freetype", None],
"fPIC": [True, False],
"matrix_mode": ["column_major", "row_major"],
"shared": [True, False],
"with_lua_bindings": [True, False],
"with_thirdparty_containers": [True, False]
}
default_options = {
"font_interface": "freetype",
"fPIC": True,
"matrix_mode": "column_major",
"shared": False,
"with_lua_bindings": False,
"with_thirdparty_containers": True
}
@property
def _minimum_compilers_version(self):
# Reference: https://en.cppreference.com/w/cpp/compiler_support/14
return {
"apple-clang": "5.1",
"clang": "3.4",
"gcc": "5",
"intel": "17",
"sun-cc": "5.15",
"Visual Studio": "15"
}
@property
def _minimum_cpp_standard(self):
return 14
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, self._minimum_cpp_standard)
def lazy_lt_semver(v1, v2):
lv1 = [int(v) for v in v1.split(".")]
lv2 = [int(v) for v in v2.split(".")]
min_length = min(len(lv1), len(lv2))
return lv1[:min_length] < lv2[:min_length]
min_version = self._minimum_compilers_version.get(
str(self.settings.compiler))
if not min_version:
self.output.warning(f"{self.ref} recipe lacks information about the {self.settings.compiler} compiler support.")
else:
if lazy_lt_semver(str(self.settings.compiler.version), min_version):
raise ConanInvalidConfiguration(f"{self.ref} requires C++{self._minimum_cpp_standard} support. The current compiler {self.settings.compiler} {self.settings.compiler.version} does not support it.")
def requirements(self):
if self.options.font_interface == "freetype":
self.requires("freetype/2.10.4")
if self.options.with_lua_bindings:
self.requires("lua/5.3.5")
if self.options.with_thirdparty_containers:
self.requires("robin-hood-hashing/3.11.3", transitive_headers=True)
def layout(self):
cmake_layout(self, src_folder="src")
def source(self):
get(self, **self.conan_data["sources"][self.version],
destination=self.source_folder, strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.cache_variables["BUILD_LUA_BINDINGS"] = self.options.with_lua_bindings
tc.cache_variables["BUILD_SAMPLES"] = False
tc.cache_variables["DISABLE_RTTI_AND_EXCEPTIONS"] = False
tc.cache_variables["ENABLE_PRECOMPILED_HEADERS"] = True
tc.cache_variables["ENABLE_TRACY_PROFILING"] = False
tc.cache_variables["MATRIX_ROW_MAJOR"] = self.options.matrix_mode == "row_major"
tc.cache_variables["NO_FONT_INTERFACE_DEFAULT"] = not self.options.font_interface
tc.cache_variables["NO_THIRDPARTY_CONTAINERS"] = not self.options.with_thirdparty_containers
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def _patch_sources(self):
apply_conandata_patches(self)
# If we are using robin_hood hashing provided by conan, we need to change its include path
if self.options.with_thirdparty_containers:
config_path = os.path.join(self.source_folder,
"Include", "RmlUi", "Config", "Config.h")
replace_in_file(
self, config_path, "\"../Core/Containers/robin_hood.h\"", "<robin_hood.h>")
def build(self):
self._patch_sources()
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, pattern="*LICENSE.txt", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder, excludes=("Samples/*", "Tests/*"))
cmake = CMake(self)
cmake.install()
def METHOD_NAME(self):
if self.options.matrix_mode == "row_major":
self.cpp_info.defines.append("RMLUI_MATRIX_ROW_MAJOR")
if not self.options.shared:
self.cpp_info.defines.append("RMLUI_STATIC_LIB")
if not self.options.with_thirdparty_containers:
self.cpp_info.defines.append("RMLUI_NO_THIRDPARTY_CONTAINERS")
if self.options.with_lua_bindings:
self.cpp_info.libs.append("RmlLua")
self.cpp_info.libs.append("RmlDebugger")
self.cpp_info.libs.append("RmlCore") |
299,351 | async alarm arm night | """
Setup for different kinds of Tuya alarm control panels.
"""
import logging
from homeassistant.components.alarm_control_panel import AlarmControlPanelEntity
from homeassistant.components.alarm_control_panel.const import (
AlarmControlPanelEntityFeature as Feature,
)
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMED_VACATION,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from .device import TuyaLocalDevice
from .helpers.config import async_tuya_setup_platform
from .helpers.device_config import TuyaEntityConfig
from .helpers.mixin import TuyaLocalEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
config = {**config_entry.data, **config_entry.options}
await async_tuya_setup_platform(
hass,
async_add_entities,
config,
"alarm_control_panel",
TuyaLocalAlarmControlPanel,
)
class TuyaLocalAlarmControlPanel(TuyaLocalEntity, AlarmControlPanelEntity):
"""Representation of a Tuya Alarm Control Panel"""
def __init__(self, device: TuyaLocalDevice, config: TuyaEntityConfig):
"""
Initialise the alarm control panel.
Args:
device (TuyaLocalDevice): the device API instance
config (TuyaEntityConfig): the configuration for this entity
"""
super().__init__()
dps_map = self._init_begin(device, config)
self._alarm_state_dp = dps_map.get("alarm_state")
self._trigger_dp = dps_map.get("trigger")
self._init_end(dps_map)
if not self._alarm_state_dp:
raise AttributeError(f"{config.config_id} is missing an alarm_state dp")
alarm_states = self._alarm_state_dp.values(device)
if STATE_ALARM_ARMED_HOME in alarm_states:
self._attr_supported_features |= Feature.ARM_HOME
if STATE_ALARM_ARMED_AWAY in alarm_states:
self._attr_supported_features |= Feature.ARM_AWAY
if STATE_ALARM_ARMED_NIGHT in alarm_states:
self._attr_supported_features |= Feature.ARM_NIGHT
if STATE_ALARM_ARMED_VACATION in alarm_states:
self._attr_supported_features |= Feature.ARM_VACATION
if STATE_ALARM_ARMED_CUSTOM_BYPASS in alarm_states:
self._attr_supported_features |= Feature.ARM_CUSTOM_BYPASS
if self._trigger_dp:
self._attr_supported_features |= Feature.TRIGGER
# Code support not implemented
self._attr_code_format = None
@property
def state(self):
"""Return the current alarm state."""
if self._trigger_dp and self._trigger_dp.get_value(self._device):
return STATE_ALARM_TRIGGERED
return self._alarm_state_dp.get_value(self._device)
async def _alarm_send_command(self, cmd):
if cmd in self._alarm_state_dp.values(self._device):
await self._alarm_state_dp.async_set_value(self._device, cmd)
else:
raise NotImplementedError()
async def async_alarm_disarm(self, code=None):
"""Send disarm command"""
await self._alarm_send_command(STATE_ALARM_DISARMED)
async def async_alarm_arm_home(self, code=None):
await self._alarm_send_command(STATE_ALARM_ARMED_HOME)
async def async_alarm_arm_away(self, code=None):
"""Send away command"""
await self._alarm_send_command(STATE_ALARM_ARMED_AWAY)
async def METHOD_NAME(self, code=None):
"""Send away command"""
await self._alarm_send_command(STATE_ALARM_ARMED_NIGHT)
async def async_alarm_arm_vacation(self, code=None):
"""Send away command"""
await self._alarm_send_command(STATE_ALARM_ARMED_VACATION)
async def async_alarm_arm_custom_bypass(self, code=None):
await self._alarm_send_command(STATE_ALARM_ARMED_CUSTOM_BYPASS)
async def async_alarm_trigger(self, code=None):
if self._trigger_dp:
await self._trigger_dp.async_set_value(self._device, True)
else:
await self._alarm_send_command(STATE_ALARM_TRIGGERED) |
299,352 | configure | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, collect_libs, copy, export_conandata_patches, get, rmdir
import os
required_conan_version = ">=1.53.0"
class LibcdsConan(ConanFile):
name = "libcds"
description = "C++11 library of Concurrent Data Structures."
license = "BSL-1.0"
topics = ("concurrent", "lock-free", "containers", "hazard-pointer", "rcu")
homepage = "https://github.com/khizmax/libcds"
url = "https://github.com/conan-io/conan-center-index"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def METHOD_NAME(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("boost/1.81.0")
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, 11)
if self.settings.os == "Macos" and self.settings.arch == "armv8":
raise ConanInvalidConfiguration("Macos M1 not supported (yet)")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["WITH_TESTS"] = False
tc.variables["WITH_TESTS_COVERAGE"] = False
tc.variables["WITH_BOOST_ATOMIC"] = False
tc.variables["WITH_ASAN"] = False
tc.variables["WITH_TSAN"] = False
tc.variables["ENABLE_UNIT_TEST"] = False
tc.variables["ENABLE_STRESS_TEST"] = False
tc.generate()
cd = CMakeDeps(self)
cd.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.METHOD_NAME()
cmake.build()
def package(self):
copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
cmake_target = "cds" if self.options.shared else "cds-s"
self.cpp_info.set_property("cmake_file_name", "LibCDS")
self.cpp_info.set_property("cmake_target_name", f"LibCDS::{cmake_target}")
# TODO: back to global scope in conan v2 once cmake_find_package* generators removed
self.cpp_info.components["_libcds"].libs = collect_libs(self)
if self.settings.os == "Windows" and not self.options.shared:
self.cpp_info.components["_libcds"].defines = ["CDS_BUILD_STATIC_LIB"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["_libcds"].system_libs = ["m", "pthread"]
if self.settings.compiler in ["gcc", "clang", "apple-clang"] and self.settings.arch == "x86_64":
self.cpp_info.components["_libcds"].cxxflags = ["-mcx16"]
self.cpp_info.components["_libcds"].requires = ["boost::boost"]
# TODO: to remove in conan v2 once cmake_find_package* generators removed
self.cpp_info.names["cmake_find_package"] = "LibCDS"
self.cpp_info.names["cmake_find_package_multi"] = "LibCDS"
self.cpp_info.components["_libcds"].names["cmake_find_package"] = cmake_target
self.cpp_info.components["_libcds"].names["cmake_find_package_multi"] = cmake_target
self.cpp_info.components["_libcds"].set_property("cmake_target_name", f"LibCDS::{cmake_target}") |
299,353 | post role | import json
import os
from tempfile import SpooledTemporaryFile
from typing import Any, Callable, Dict, Iterable, Union
from fastapi import APIRouter, Depends, File, HTTPException, UploadFile, status
from libmambapy import bindings as libmamba_api
from quetz import authorization
from quetz.config import Config
from quetz.database import get_db_manager
from quetz.deps import get_rules
from . import db_models
router = APIRouter(tags=["content-trust"])
def assert_role(signable: Dict[str, Any], builder: Callable):
try:
return builder(signable)
except RuntimeError:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid role definition"
)
def assert_key_exists(key: str, db):
query = db.query(db_models.RepodataSigningKey).filter(
db_models.RepodataSigningKey.public_key == key,
)
if not query:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail="Key not found"
)
def assert_keys_exist(keys: Iterable[str], db):
for k in keys:
assert_key_exists(k, db)
def post_role_file(file: Union[str, bytes], channel_name: str, builder: Callable):
if type(file.file) is SpooledTemporaryFile and not hasattr(file, "seekable"):
file.file.seekable = file.file._file.seekable
file.file.seek(0, os.SEEK_END)
file.file.seek(0)
role = None
with file.file as f:
role = assert_role(json.load(f), builder)
file.file.seek(0)
Config().get_package_store().add_file(f.read(), channel_name, file.filename)
return role
def role_builder(
channel: str, role: str, delegation: db_models.RoleDelegation
) -> Callable:
def root_builder(json_dict: Dict[str, Any]):
return libmamba_api.RootImpl(json.dumps(json_dict))
def key_mgr_builder(json_dict: Dict[str, Any]):
keys = [k.public_key for k in delegation.keys]
threshold = delegation.threshold
spec = libmamba_api.SpecImpl()
full_keys = libmamba_api.RoleFullKeys(
keys={k: libmamba_api.Key.from_ed25519(k) for k in keys},
threshold=threshold,
)
return libmamba_api.KeyMgr(json.dumps(json_dict), full_keys, spec)
def pkg_mgr_builder(json_dict: Dict[str, Any]):
keys = [k.public_key for k in delegation.keys]
threshold = delegation.threshold
spec = libmamba_api.SpecImpl()
full_keys = libmamba_api.RoleFullKeys(
keys={k: libmamba_api.Key.from_ed25519(k) for k in keys},
threshold=threshold,
)
return libmamba_api.PkgMgr(json.dumps(json_dict), full_keys, spec)
def wrong_role(_: Dict[str, Any]):
raise RuntimeError()
builder = dict(root=root_builder, key_mgr=key_mgr_builder, pkg_mgr=pkg_mgr_builder)
return builder.get(role, wrong_role)
@router.post("/api/content-trust/{channel}/roles", status_code=201, tags=["files"])
def METHOD_NAME(
channel: str,
type: str,
file: UploadFile = File(...),
force: bool = False,
auth: authorization.Rules = Depends(get_rules),
):
auth.assert_channel_roles(channel, ["owner"])
with get_db_manager() as db:
existing_role_count = (
db.query(db_models.ContentTrustRole)
.filter(
db_models.ContentTrustRole.channel == channel,
db_models.ContentTrustRole.type == type,
)
.count()
)
if not force and existing_role_count:
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail=f"Content trust role '{type}' already exists "
f"for channel '{channel}'",
)
def get_self_delegation(nullable: bool = False):
query = (
db.query(db_models.RoleDelegation)
.filter(
db_models.RoleDelegation.type == type,
db_models.RoleDelegation.channel == channel,
)
.one_or_none()
)
if not query and not nullable:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"'{type}' keys not yet delegated",
)
return query
self_delegation = get_self_delegation(nullable=type == "root")
ct_role = post_role_file(
file, channel, role_builder(channel, type, self_delegation)
)
db_role = db_models.ContentTrustRole(
type=ct_role.type,
channel=channel,
version=ct_role.version,
timestamp=ct_role.timestamp,
expiration=ct_role.expires,
)
# add delegations
for role_type, role_keys in ct_role.all_keys().items():
keys = [
db.merge(db_models.SigningKey(public_key=key_id))
for key_id in role_keys.keys
]
delegated_db_role = db_models.RoleDelegation(
type=role_type,
channel=channel,
threshold=role_keys.threshold,
keys=keys,
)
db_role.delegations.append(delegated_db_role)
# set self_delegation if the role is 'root'
if type == "root":
# Error handling (missing 'root' delegation, etc.) is done by
# mamba API when loading the root role from file
self_delegation = [r for r in db_role.delegations if r.type == "root"][0]
if not self_delegation:
raise RuntimeError("self_delegation must not be None")
# db_role.delegation = self_delegation
self_delegation.consumers.append(db_role)
db.add(db_role)
db.commit()
@router.get("/api/content-trust/{channel}/roles")
def get_role(
channel: str,
type: str = None,
auth: authorization.Rules = Depends(get_rules),
):
auth.assert_channel_roles(channel, ["owner", "maintainer", "member"])
with get_db_manager() as db:
query = (
db.query(db_models.ContentTrustRole)
.filter(db_models.ContentTrustRole.channel == channel)
.all()
)
return {q.delegation.keys for q in query}
@router.get("/api/content-trust/new-key")
def get_new_key(secret: bool = False):
public_key, private_key = libmamba_api.generate_ed25519_keypair()
key = db_models.SigningKey(
public_key=public_key,
private_key=private_key,
)
mamba_key = libmamba_api.Key.from_ed25519(key.public_key)
private_key = key.private_key
with get_db_manager() as db:
db.add(key)
db.commit()
res = json.loads(mamba_key.json_str)
if secret:
res["secret"] = private_key
return res |
299,354 | release | import threading
import time
from contextlib import contextmanager
from queue import Queue
import pytest
from .. import _thread_cache
from .._thread_cache import ThreadCache, start_thread_soon
from .tutil import gc_collect_harder, slow
def test_thread_cache_basics():
q = Queue()
def fn():
raise RuntimeError("hi")
def deliver(outcome):
q.put(outcome)
start_thread_soon(fn, deliver)
outcome = q.get()
with pytest.raises(RuntimeError, match="hi"):
outcome.unwrap()
def test_thread_cache_deref():
res = [False]
class del_me:
def __call__(self):
return 42
def __del__(self):
res[0] = True
q = Queue()
def deliver(outcome):
q.put(outcome)
start_thread_soon(del_me(), deliver)
outcome = q.get()
assert outcome.unwrap() == 42
gc_collect_harder()
assert res[0]
@slow
def test_spawning_new_thread_from_deliver_reuses_starting_thread():
# We know that no-one else is using the thread cache, so if we keep
# submitting new jobs the instant the previous one is finished, we should
# keep getting the same thread over and over. This tests both that the
# thread cache is LIFO, and that threads can be assigned new work *before*
# deliver exits.
# Make sure there are a few threads running, so if we weren't LIFO then we
# could grab the wrong one.
q = Queue()
COUNT = 5
for _ in range(COUNT):
start_thread_soon(lambda: time.sleep(1), lambda result: q.put(result))
for _ in range(COUNT):
q.get().unwrap()
seen_threads = set()
done = threading.Event()
def deliver(n, _):
print(n)
seen_threads.add(threading.current_thread())
if n == 0:
done.set()
else:
start_thread_soon(lambda: None, lambda _: deliver(n - 1, _))
start_thread_soon(lambda: None, lambda _: deliver(5, _))
done.wait()
assert len(seen_threads) == 1
@slow
def test_idle_threads_exit(monkeypatch):
# Temporarily set the idle timeout to something tiny, to speed up the
# test. (But non-zero, so that the worker loop will at least yield the
# CPU.)
monkeypatch.setattr(_thread_cache, "IDLE_TIMEOUT", 0.0001)
q = Queue()
start_thread_soon(lambda: None, lambda _: q.put(threading.current_thread()))
seen_thread = q.get()
# Since the idle timeout is 0, after sleeping for 1 second, the thread
# should have exited
time.sleep(1)
assert not seen_thread.is_alive()
@contextmanager
def _join_started_threads():
before = frozenset(threading.enumerate())
try:
yield
finally:
for thread in threading.enumerate():
if thread not in before:
thread.join(timeout=1.0)
assert not thread.is_alive()
def test_race_between_idle_exit_and_job_assignment(monkeypatch):
# This is a lock where the first few times you try to acquire it with a
# timeout, it waits until the lock is available and then pretends to time
# out. Using this in our thread cache implementation causes the following
# sequence:
#
# 1. start_thread_soon grabs the worker thread, assigns it a job, and
# releases its lock.
# 2. The worker thread wakes up (because the lock has been released), but
# the JankyLock lies to it and tells it that the lock timed out. So the
# worker thread tries to exit.
# 3. The worker thread checks for the race between exiting and being
# assigned a job, and discovers that it *is* in the process of being
# assigned a job, so it loops around and tries to acquire the lock
# again.
# 4. Eventually the JankyLock admits that the lock is available, and
# everything proceeds as normal.
class JankyLock:
def __init__(self):
self._lock = threading.Lock()
self._counter = 3
def acquire(self, timeout=-1):
got_it = self._lock.acquire(timeout=timeout)
if timeout == -1:
return True
elif got_it:
if self._counter > 0:
self._counter -= 1
self._lock.METHOD_NAME()
return False
return True
else:
return False
def METHOD_NAME(self):
self._lock.METHOD_NAME()
monkeypatch.setattr(_thread_cache, "Lock", JankyLock)
with _join_started_threads():
tc = ThreadCache()
done = threading.Event()
tc.start_thread_soon(lambda: None, lambda _: done.set())
done.wait()
# Let's kill the thread we started, so it doesn't hang around until the
# test suite finishes. Doesn't really do any harm, but it can be confusing
# to see it in debug output.
monkeypatch.setattr(_thread_cache, "IDLE_TIMEOUT", 0.0001)
tc.start_thread_soon(lambda: None, lambda _: None)
def test_raise_in_deliver(capfd):
seen_threads = set()
def track_threads():
seen_threads.add(threading.current_thread())
def deliver(_):
done.set()
raise RuntimeError("don't do this")
done = threading.Event()
start_thread_soon(track_threads, deliver)
done.wait()
done = threading.Event()
start_thread_soon(track_threads, lambda _: done.set())
done.wait()
assert len(seen_threads) == 1
err = capfd.readouterr().err
assert "don't do this" in err
assert "delivering result" in err |
299,355 | test non list config | """
tests.pytests.unit.beacons.test_diskusage
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Disk usage beacon test cases
"""
from collections import namedtuple
import pytest
import salt.beacons.diskusage as diskusage
from tests.support.mock import MagicMock, Mock, patch
@pytest.fixture
def configure_loader_modules():
return {}
@pytest.fixture
def stub_disk_partition():
return [
namedtuple("partition", "device mountpoint fstype, opts")(
"tmpfs", "/mnt/tmp", "tmpfs", "rw,nosuid,nodev,relatime,size=10240k"
),
namedtuple("partition", "device mountpoint fstype, opts")(
"/dev/disk0s2", "/", "hfs", "rw,local,rootfs,dovolfs,journaled,multilabel"
),
]
@pytest.fixture
def windows_stub_disk_partition():
return [
namedtuple("partition", "device mountpoint fstype, opts")(
"C:\\", "C:\\", "NTFS", "rw,fixed"
),
namedtuple("partition", "device mountpoint fstype, opts")(
"D:\\", "D:\\", "CDFS", "ro,cdrom"
),
]
@pytest.fixture
def stub_disk_usage():
return [
namedtuple("usage", "total used free percent")(1000, 500, 500, 50),
namedtuple("usage", "total used free percent")(100, 75, 25, 25),
]
@pytest.fixture
def windows_stub_disk_usage():
return namedtuple("usage", "total used free percent")(1000, 500, 500, 50)
def METHOD_NAME():
config = {}
ret = diskusage.validate(config)
assert ret == (False, "Configuration for diskusage beacon must be a list.")
def test_empty_config():
config = [{}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
def test_diskusage_match(stub_disk_usage, stub_disk_partition):
disk_usage_mock = Mock(side_effect=stub_disk_usage)
with patch("salt.utils.platform.is_windows", MagicMock(return_value=False)), patch(
"psutil.disk_partitions", MagicMock(return_value=stub_disk_partition)
), patch("psutil.disk_usage", disk_usage_mock):
config = [{"/": "50%"}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = diskusage.beacon(config)
assert ret == [{"diskusage": 50, "mount": "/"}]
def test_diskusage_match_no_percent(stub_disk_usage, stub_disk_partition):
disk_usage_mock = Mock(side_effect=stub_disk_usage)
with patch("salt.utils.platform.is_windows", MagicMock(return_value=False)), patch(
"psutil.disk_partitions", MagicMock(return_value=stub_disk_partition)
), patch("psutil.disk_usage", disk_usage_mock):
# Test without the percent
config = [{"/": 50}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = diskusage.beacon(config)
assert ret == [{"diskusage": 50, "mount": "/"}]
def test_diskusage_nomatch(stub_disk_usage, stub_disk_partition):
disk_usage_mock = Mock(side_effect=stub_disk_usage)
with patch("salt.utils.platform.is_windows", MagicMock(return_value=False)), patch(
"psutil.disk_partitions", MagicMock(return_value=stub_disk_partition)
), patch("psutil.disk_usage", disk_usage_mock):
config = [{"/": "70%"}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = diskusage.beacon(config)
assert ret != [{"diskusage": 50, "mount": "/"}]
def test_diskusage_match_regex(stub_disk_usage, stub_disk_partition):
disk_usage_mock = Mock(side_effect=stub_disk_usage)
with patch("salt.utils.platform.is_windows", MagicMock(return_value=False)), patch(
"psutil.disk_partitions", MagicMock(return_value=stub_disk_partition)
), patch("psutil.disk_usage", disk_usage_mock):
config = [{"/": "50%"}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = diskusage.beacon(config)
assert ret == [{"diskusage": 50, "mount": "/"}]
def test_diskusage_windows_single_slash(
windows_stub_disk_usage, windows_stub_disk_partition
):
r"""
This tests new behavior (C:\)
"""
disk_usage_mock = Mock(return_value=windows_stub_disk_usage)
with patch("salt.utils.platform.is_windows", MagicMock(return_value=True)):
with patch(
"psutil.disk_partitions",
MagicMock(return_value=windows_stub_disk_partition),
), patch("psutil.disk_usage", disk_usage_mock):
config = [{"C:\\": "50%"}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = diskusage.beacon(config)
assert ret == [{"diskusage": 50, "mount": "C:\\"}]
def test_diskusage_windows_double_slash(
windows_stub_disk_usage, windows_stub_disk_partition
):
"""
This tests original behavior (C:\\)
"""
disk_usage_mock = Mock(return_value=windows_stub_disk_usage)
with patch("salt.utils.platform.is_windows", MagicMock(return_value=True)):
with patch(
"psutil.disk_partitions",
MagicMock(return_value=windows_stub_disk_partition),
), patch("psutil.disk_usage", disk_usage_mock):
config = [{"C:\\\\": "50%"}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = diskusage.beacon(config)
assert ret == [{"diskusage": 50, "mount": "C:\\"}]
def test_diskusage_windows_lowercase(
windows_stub_disk_usage, windows_stub_disk_partition
):
r"""
This tests lowercase drive letter (c:\)
"""
disk_usage_mock = Mock(return_value=windows_stub_disk_usage)
with patch("salt.utils.platform.is_windows", MagicMock(return_value=True)):
with patch(
"psutil.disk_partitions",
MagicMock(return_value=windows_stub_disk_partition),
), patch("psutil.disk_usage", disk_usage_mock):
config = [{"c:\\": "50%"}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = diskusage.beacon(config)
assert ret == [{"diskusage": 50, "mount": "C:\\"}]
def test_diskusage_windows_match_regex(
windows_stub_disk_usage, windows_stub_disk_partition
):
disk_usage_mock = Mock(return_value=windows_stub_disk_usage)
with patch("salt.utils.platform.is_windows", MagicMock(return_value=True)):
with patch(
"psutil.disk_partitions",
MagicMock(return_value=windows_stub_disk_partition),
), patch("psutil.disk_usage", disk_usage_mock):
config = [{"^[a-zA-Z]:\\": "50%"}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = diskusage.beacon(config)
_expected = [
{"diskusage": 50, "mount": "C:\\"},
{"diskusage": 50, "mount": "D:\\"},
]
assert ret == _expected |
299,356 | smcleanup | import subprocess
import sys
import time
from pathlib import Path
from environment_tedge import TedgeEnvironment
"""
Validate the tedge-mapper-c8y does not loose last message from tedge-agent when it fails and comes back
Given a configured system
When `rolldice` package is installed
when a subscriber is started as `sudo tedge mqtt sub 'c8y/s/us'`
When tedge-agent is started as `sudo systemctl start tedge-agent.service`
When c8y mapper is started as `sudo systemctl start tedge-mapper-c8y.service`
When send a delete operation `sudo tedge mqtt pub "c8y/s/ds" "528,tedge,rolldice,,,delete"`
When c8y mapper is stopped `sudo systemctl stop tedge-mapper-c8y.service`
Wait for sometime for operation to be completed and agent to push the operation result.
When c8y mapper is restarted `sudo systemctl restart tedge-mapper-c8y.service`
Now c8y mapper receives the last update result message, process and forwards it to the cloud on `c8y/s/us`
Then validate subscriber output for `501,c8y_SoftwareUpdate`, for the status of operation
Then validate subscriber output for `503,c8y_SoftwareUpdate` for final result of operation
Then test has passed
"""
class MapperC8yReceiveLastMessageOnRestart(BaseTest):
systemctl = "/usr/bin/systemctl"
tedge = "/usr/bin/tedge"
sudo = "/usr/bin/sudo"
apt = "/usr/bin/apt-get"
mqtt_sub = "/usr/bin/mosquitto_sub"
rm = "/usr/bin/rm"
def setup(self):
self.addCleanupFunction(self.METHOD_NAME)
self.tedge_connect_c8y()
self.startProcess(
command=self.sudo,
arguments=[self.apt, "install", "rolldice"],
stdouterr="install",
)
self.startProcess(
command=self.sudo,
arguments=[self.tedge, "mqtt", "sub", "c8y/s/us"],
stdouterr="tedge_sub",
background=True,
)
self.startProcess(
command=self.sudo,
arguments=[
self.tedge,
"mqtt",
"pub",
"c8y/s/ds",
"528,tedge,rolldice,,,delete",
],
stdouterr="tedge_pub",
)
self.addCleanupFunction(self.METHOD_NAME)
def execute(self):
time.sleep(2)
self.startProcess(
command=self.sudo,
arguments=[self.systemctl, "stop", "tedge-mapper-c8y.service"],
stdouterr="mapper_stop",
)
self.startProcess(
command=self.mqtt_sub,
arguments=["-v", "-t", "tedge/commands/res/software/update"],
stdouterr="tedge_sub_agent",
background=True,
)
# check if the agent has completed the operation
time.sleep(15)
self.startProcess(
command=self.sudo,
arguments=[self.systemctl, "restart", "tedge-mapper-c8y.service"],
stdouterr="mapper_restart",
)
# wait for the c8y mapper to process and publish result to cloud
# and subscriber to capture the output and log it.
time.sleep(30)
# Stop the subscriber
kill = self.startProcess(
command=self.sudo,
arguments=["killall", "tedge", "mosquitto_sub"],
stdouterr="kill_out",
)
def validate(self):
self.log.info("Validate")
self.assertGrep("tedge_sub.out", "501,c8y_SoftwareUpdate", contains=True)
self.assertGrep("tedge_sub.out", "503,c8y_SoftwareUpdate", contains=True)
def METHOD_NAME(self):
self.log.info("Stop c8y-mapper and agent")
self.startProcess(
command=self.sudo,
arguments=[self.tedge, "disconnect", "c8y"],
stdouterr="connect_c8y",
)
def setup_mosquitto(self):
self.startProcess(
command=self.sudo,
arguments=[self.systemctl, "stop", "mosquitto.service"],
stdouterr="mosquitto_stop",
)
self.startProcess(
command=self.sudo,
arguments=[self.rm, "/var/lib/mosquitto/mosquitto.db"],
stdouterr="remove_db",
)
self.startProcess(
command=self.sudo,
arguments=[self.systemctl, "restart", "mosquitto.service"],
stdouterr="restart_mosquitto",
) |
299,357 | header parameters | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"palo-alto cloudngfw local-rulestack list-country",
)
class ListCountry(AAZCommand):
"""List of countries for Rulestack
:example: List of countries for Rulestack
az palo-alto cloudngfw local-rulestack list-country -g MyResourceGroup -n MyLocalRulestacks
"""
_aaz_info = {
"version": "2022-08-29",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/paloaltonetworks.cloudngfw/localrulestacks/{}/listcountries", "2022-08-29"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.local_rulestack_name = AAZStrArg(
options=["-n", "--name", "--local-rulestack-name"],
help="LocalRulestack resource name",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.skip = AAZStrArg(
options=["--skip"],
help="LocalRulestack resource skip",
)
_args_schema.top = AAZIntArg(
options=["--top"],
help="LocalRulestack resource top",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.LocalRulestacksListCountries(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class LocalRulestacksListCountries(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/listCountries",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"localRulestackName", self.ctx.args.local_rulestack_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"skip", self.ctx.args.skip,
),
**self.serialize_query_param(
"top", self.ctx.args.top,
),
**self.serialize_query_param(
"api-version", "2022-08-29",
required=True,
),
}
return parameters
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType(
flags={"required": True},
)
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.code = AAZStrType(
flags={"required": True},
)
_element.description = AAZStrType()
return cls._schema_on_200
class _ListCountryHelper:
"""Helper class for ListCountry"""
__all__ = ["ListCountry"] |
299,358 | test archive doesnt update local on rest | import re
import pytest
import responses
from determined.common import api
from determined.common.experimental import model
from tests.fixtures import api_responses
_MASTER = "http://localhost:8080"
@pytest.fixture
def standard_session() -> api.Session:
return api.Session(master=_MASTER, user=None, auth=None, cert=None)
@pytest.fixture
def sample_model(standard_session: api.Session) -> model.Model:
bindings_model = api_responses.sample_get_model().model
return model.Model._from_bindings(bindings_model, standard_session)
@pytest.fixture
def sample_model_version(standard_session: api.Session) -> model.ModelVersion:
bindings_model_versions = api_responses.sample_get_model_versions().modelVersions
return model.ModelVersion._from_bindings(bindings_model_versions[0], standard_session)
@responses.activate
def test_get_versions_gets_all_pages(sample_model: model.Model) -> None:
model_versions_resp = api_responses.sample_get_model_versions()
model_versions_resp.model.name = sample_model.name = "test_model"
responses.add_callback(
responses.GET,
f"{_MASTER}/api/v1/models/{sample_model.name}/versions",
callback=api_responses.serve_by_page(model_versions_resp, "modelVersions", max_page_size=2),
)
mvs = sample_model.get_versions()
assert len(mvs) == len(model_versions_resp.modelVersions)
@responses.activate
def test_set_name_doesnt_update_local_on_rest_failure(
sample_model_version: model.ModelVersion,
) -> None:
sample_model_version.name = "test_version_name"
responses.patch(
re.compile(f"{_MASTER}/api/v1/models/{sample_model_version.model_name}.*"), status=400
)
try:
sample_model_version.set_name("new_version_name")
raise AssertionError("Server's 400 should raise an exception")
except api.errors.APIException:
assert sample_model_version.name == "test_version_name"
@responses.activate
def test_set_notes_doesnt_update_local_on_rest_failure(
sample_model_version: model.ModelVersion,
) -> None:
sample_model_version.notes = "test notes"
responses.patch(
re.compile(f"{_MASTER}/api/v1/models/{sample_model_version.model_name}.*"), status=400
)
try:
sample_model_version.set_notes("new notes")
raise AssertionError("Server's 400 should raise an exception")
except api.errors.APIException:
assert sample_model_version.notes == "test notes"
@responses.activate
def test_add_metadata_doesnt_update_local_on_rest_failure(sample_model: model.Model) -> None:
sample_model.metadata = {}
responses.patch(re.compile(f"{_MASTER}/api/v1/models/{sample_model.name}.*"), status=400)
try:
sample_model.add_metadata({"test": "test"})
raise AssertionError("Server's 400 should raise an exception")
except api.errors.APIException:
assert "test" not in sample_model.metadata
@responses.activate
def test_remove_metadata_doesnt_update_local_on_rest_failure(sample_model: model.Model) -> None:
sample_model.metadata = {"test": "test"}
responses.patch(re.compile(f"{_MASTER}/api/v1/models/{sample_model.name}.*"), status=400)
try:
sample_model.remove_metadata(["test"])
raise AssertionError("Server's 400 should raise an exception")
except api.errors.APIException:
assert "test" in sample_model.metadata
@responses.activate
def METHOD_NAME(sample_model: model.Model) -> None:
sample_model.archived = False
responses.post(re.compile(f"{_MASTER}/api/v1/models/{sample_model.name}.*"), status=400)
try:
sample_model.archive()
raise AssertionError("Server's 400 should raise an exception")
except api.errors.APIException:
assert sample_model.archived is False
@responses.activate
def test_unarchive_doesnt_update_local_on_rest_failure(sample_model: model.Model) -> None:
sample_model.archived = True
responses.post(re.compile(f"{_MASTER}/api/v1/models/{sample_model.name}.*"), status=400)
try:
sample_model.unarchive()
raise AssertionError("Server's 400 should raise an exception")
except api.errors.APIException:
assert sample_model.archived is True |
299,359 | test from address | import unittest
from test.support import bigmemtest, _2G
import sys
from ctypes import *
from ctypes.test import need_symbol
formats = "bBhHiIlLqQfd"
formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
c_long, c_ulonglong, c_float, c_double, c_longdouble
class ArrayTestCase(unittest.TestCase):
def test_simple(self):
# create classes holding simple numeric types, and check
# various properties.
init = list(range(15, 25))
for fmt in formats:
alen = len(init)
int_array = ARRAY(fmt, alen)
ia = int_array(*init)
# length of instance ok?
self.assertEqual(len(ia), alen)
# slot values ok?
values = [ia[i] for i in range(alen)]
self.assertEqual(values, init)
# out-of-bounds accesses should be caught
with self.assertRaises(IndexError): ia[alen]
with self.assertRaises(IndexError): ia[-alen-1]
# change the items
from operator import setitem
new_values = list(range(42, 42+alen))
[setitem(ia, n, new_values[n]) for n in range(alen)]
values = [ia[i] for i in range(alen)]
self.assertEqual(values, new_values)
# are the items initialized to 0?
ia = int_array()
values = [ia[i] for i in range(alen)]
self.assertEqual(values, [0] * alen)
# Too many initializers should be caught
self.assertRaises(IndexError, int_array, *range(alen*2))
CharArray = ARRAY(c_char, 3)
ca = CharArray(b"a", b"b", b"c")
# Should this work? It doesn't:
# CharArray("abc")
self.assertRaises(TypeError, CharArray, "abc")
self.assertEqual(ca[0], b"a")
self.assertEqual(ca[1], b"b")
self.assertEqual(ca[2], b"c")
self.assertEqual(ca[-3], b"a")
self.assertEqual(ca[-2], b"b")
self.assertEqual(ca[-1], b"c")
self.assertEqual(len(ca), 3)
# cannot delete items
from operator import delitem
self.assertRaises(TypeError, delitem, ca, 0)
def test_step_overflow(self):
a = (c_int * 5)()
a[3::sys.maxsize] = (1,)
self.assertListEqual(a[3::sys.maxsize], [1])
a = (c_char * 5)()
a[3::sys.maxsize] = b"A"
self.assertEqual(a[3::sys.maxsize], b"A")
a = (c_wchar * 5)()
a[3::sys.maxsize] = u"X"
self.assertEqual(a[3::sys.maxsize], u"X")
def test_numeric_arrays(self):
alen = 5
numarray = ARRAY(c_int, alen)
na = numarray()
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0] * alen)
na = numarray(*[c_int()] * alen)
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0]*alen)
na = numarray(1, 2, 3, 4, 5)
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
na = numarray(*map(c_int, (1, 2, 3, 4, 5)))
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
def test_classcache(self):
self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4))
self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3))
def METHOD_NAME(self):
# Failed with 0.9.8, reported by JUrner
p = create_string_buffer(b"foo")
sz = (c_char * 3).from_address(addressof(p))
self.assertEqual(sz[:], b"foo")
self.assertEqual(sz[::], b"foo")
self.assertEqual(sz[::-1], b"oof")
self.assertEqual(sz[::3], b"f")
self.assertEqual(sz[1:4:2], b"o")
self.assertEqual(sz.value, b"foo")
@need_symbol('create_unicode_buffer')
def test_from_addressW(self):
p = create_unicode_buffer("foo")
sz = (c_wchar * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
def test_cache(self):
# Array types are cached internally in the _ctypes extension,
# in a WeakValueDictionary. Make sure the array type is
# removed from the cache when the itemtype goes away. This
# test will not fail, but will show a leak in the testsuite.
# Create a new type:
class my_int(c_int):
pass
# Create a new array type based on it:
t1 = my_int * 1
t2 = my_int * 1
self.assertIs(t1, t2)
def test_subclass(self):
class T(Array):
_type_ = c_int
_length_ = 13
class U(T):
pass
class V(U):
pass
class W(V):
pass
class X(T):
_type_ = c_short
class Y(T):
_length_ = 187
for c in [T, U, V, W]:
self.assertEqual(c._type_, c_int)
self.assertEqual(c._length_, 13)
self.assertEqual(c()._type_, c_int)
self.assertEqual(c()._length_, 13)
self.assertEqual(X._type_, c_short)
self.assertEqual(X._length_, 13)
self.assertEqual(X()._type_, c_short)
self.assertEqual(X()._length_, 13)
self.assertEqual(Y._type_, c_int)
self.assertEqual(Y._length_, 187)
self.assertEqual(Y()._type_, c_int)
self.assertEqual(Y()._length_, 187)
def test_bad_subclass(self):
with self.assertRaises(AttributeError):
class T(Array):
pass
with self.assertRaises(AttributeError):
class T(Array):
_type_ = c_int
with self.assertRaises(AttributeError):
class T(Array):
_length_ = 13
def test_bad_length(self):
with self.assertRaises(ValueError):
class T(Array):
_type_ = c_int
_length_ = - sys.maxsize * 2
with self.assertRaises(ValueError):
class T(Array):
_type_ = c_int
_length_ = -1
with self.assertRaises(TypeError):
class T(Array):
_type_ = c_int
_length_ = 1.87
with self.assertRaises(OverflowError):
class T(Array):
_type_ = c_int
_length_ = sys.maxsize * 2
def test_zero_length(self):
# _length_ can be zero.
class T(Array):
_type_ = c_int
_length_ = 0
def test_empty_element_struct(self):
class EmptyStruct(Structure):
_fields_ = []
obj = (EmptyStruct * 2)() # bpo37188: Floating point exception
self.assertEqual(sizeof(obj), 0)
def test_empty_element_array(self):
class EmptyArray(Array):
_type_ = c_int
_length_ = 0
obj = (EmptyArray * 2)() # bpo37188: Floating point exception
self.assertEqual(sizeof(obj), 0)
def test_bpo36504_signed_int_overflow(self):
# The overflow check in PyCArrayType_new() could cause signed integer
# overflow.
with self.assertRaises(OverflowError):
c_char * sys.maxsize * 2
@unittest.skipUnless(sys.maxsize > 2**32, 'requires 64bit platform')
@bigmemtest(size=_2G, memuse=1, dry_run=False)
def test_large_array(self, size):
c_char * size
if __name__ == '__main__':
unittest.main() |
299,360 | create asset | # Copyright (c) Princeton University.
# This source code is licensed under the BSD 3-Clause license found in the LICENSE file in the root directory of this source tree.
# Authors: Yiming Zuo
import bpy
import mathutils
import numpy as np
from numpy.random import uniform, normal, randint
from nodes.node_wrangler import Nodes, NodeWrangler
from nodes import node_utils
from nodes.color import color_category
from surfaces import surface
from assets.leaves.leaf_maple import LeafFactoryMaple
from assets.leaves.leaf_broadleaf import LeafFactoryBroadleaf
from assets.leaves.leaf_ginko import LeafFactoryGinko
from placement.factory import AssetFactory
def nodegroup_nodegroup_apply_wrap(nw: NodeWrangler, **kwargs):
# Code generated using version 2.4.3 of the node_transpiler
group_input = nw.new_node(Nodes.GroupInput,
expose_input=[('NodeSocketGeometry', 'Geometry', None)])
angle = nw.new_node(Nodes.Value,
label='angle')
angle.outputs[0].default_value = kwargs['angle']
radians = nw.new_node(Nodes.Math,
input_kwargs={0: angle},
attrs={'operation': 'RADIANS'})
combine_xyz_2 = nw.new_node(Nodes.CombineXYZ,
input_kwargs={'Z': radians})
transform_2 = nw.new_node(Nodes.Transform,
input_kwargs={'Geometry': group_input.outputs["Geometry"], 'Rotation': combine_xyz_2})
position_1 = nw.new_node(Nodes.InputPosition)
separate_xyz = nw.new_node(Nodes.SeparateXYZ,
input_kwargs={'Vector': position_1})
rotation = nw.new_node(Nodes.Value,
label='rotation')
rotation.outputs[0].default_value = kwargs['rotation']
value = nw.new_node(Nodes.Value)
value.outputs[0].default_value = 1.0
end_radius = nw.new_node(Nodes.Value,
label='end_radius')
end_radius.outputs[0].default_value = kwargs['end_radius']
spiral = nw.new_node('GeometryNodeCurveSpiral',
input_kwargs={'Resolution': 1000, 'Rotations': rotation, 'Start Radius': value, 'End Radius': end_radius, 'Height': 0.0})
curve_length = nw.new_node(Nodes.CurveLength,
input_kwargs={'Curve': spiral})
position = nw.new_node(Nodes.InputPosition)
separate_xyz_1 = nw.new_node(Nodes.SeparateXYZ,
input_kwargs={'Vector': position})
attribute_statistic = nw.new_node(Nodes.AttributeStatistic,
input_kwargs={'Geometry': transform_2, 2: separate_xyz_1.outputs["Y"]})
subtract = nw.new_node(Nodes.Math,
input_kwargs={0: attribute_statistic.outputs["Max"], 1: attribute_statistic.outputs["Min"]},
attrs={'operation': 'SUBTRACT'})
divide = nw.new_node(Nodes.Math,
input_kwargs={0: curve_length, 1: subtract},
attrs={'operation': 'DIVIDE'})
divide_1 = nw.new_node(Nodes.Math,
input_kwargs={0: value, 1: divide},
attrs={'operation': 'DIVIDE'})
divide_2 = nw.new_node(Nodes.Math,
input_kwargs={0: end_radius, 1: divide},
attrs={'operation': 'DIVIDE'})
spiral_1 = nw.new_node('GeometryNodeCurveSpiral',
input_kwargs={'Resolution': 1000, 'Rotations': rotation, 'Start Radius': divide_1, 'End Radius': divide_2, 'Height': 0.0})
transform = nw.new_node(Nodes.Transform,
input_kwargs={'Geometry': spiral_1, 'Rotation': (0.0, 1.5708, 3.1416)})
noise_texture = nw.new_node(Nodes.NoiseTexture,
input_kwargs={'Scale': 2.0})
subtract_1 = nw.new_node(Nodes.VectorMath,
input_kwargs={0: noise_texture.outputs["Color"], 1: (0.5, 0.5, 0.5)},
attrs={'operation': 'SUBTRACT'})
noise_level = nw.new_node(Nodes.Value,
label='noise_level')
noise_level.outputs[0].default_value = kwargs['noise_level']
multiply = nw.new_node(Nodes.VectorMath,
input_kwargs={0: subtract_1.outputs["Vector"], 1: noise_level},
attrs={'operation': 'MULTIPLY'})
set_position_2 = nw.new_node(Nodes.SetPosition,
input_kwargs={'Geometry': transform, 'Offset': multiply.outputs["Vector"]})
map_range = nw.new_node(Nodes.MapRange,
input_kwargs={'Value': separate_xyz_1.outputs["Y"], 1: attribute_statistic.outputs["Min"], 2: attribute_statistic.outputs["Max"]})
sample_curve = nw.new_node(Nodes.SampleCurve,
input_kwargs={'Curve': set_position_2, 'Factor': map_range.outputs["Result"]},
attrs={'mode': 'FACTOR'})
separate_xyz_2 = nw.new_node(Nodes.SeparateXYZ,
input_kwargs={'Vector': sample_curve.outputs["Position"]})
combine_xyz = nw.new_node(Nodes.CombineXYZ,
input_kwargs={'X': separate_xyz.outputs["X"], 'Y': separate_xyz_2.outputs["Y"], 'Z': separate_xyz_2.outputs["Z"]})
normalize = nw.new_node(Nodes.VectorMath,
input_kwargs={0: sample_curve.outputs["Position"]},
attrs={'operation': 'NORMALIZE'})
multiply_1 = nw.new_node(Nodes.VectorMath,
input_kwargs={0: separate_xyz.outputs["Z"], 1: normalize.outputs["Vector"]},
attrs={'operation': 'MULTIPLY'})
add = nw.new_node(Nodes.VectorMath,
input_kwargs={0: combine_xyz, 1: multiply_1.outputs["Vector"]})
set_position = nw.new_node(Nodes.SetPosition,
input_kwargs={'Geometry': transform_2, 'Position': add.outputs["Vector"]})
subtract_2 = nw.new_node(Nodes.Math,
input_kwargs={0: 0.0, 1: radians},
attrs={'operation': 'SUBTRACT'})
combine_xyz_3 = nw.new_node(Nodes.CombineXYZ,
input_kwargs={'Z': subtract_2})
transform_3 = nw.new_node(Nodes.Transform,
input_kwargs={'Geometry': set_position, 'Rotation': combine_xyz_3})
combine_xyz_4 = nw.new_node(Nodes.CombineXYZ,
input_kwargs={'Z': divide_1})
transform_4 = nw.new_node(Nodes.Transform,
input_kwargs={'Geometry': transform_3, 'Translation': combine_xyz_4})
group_output = nw.new_node(Nodes.GroupOutput,
input_kwargs={'Geometry': transform_4})
class LeafFactoryWrapped(AssetFactory):
def __init__(self, factory_seed, season='autumn', coarse=False):
super().__init__(factory_seed, coarse=coarse)
self.factory_list = [
LeafFactoryMaple(factory_seed, season=season, coarse=coarse),
LeafFactoryBroadleaf(factory_seed, season=season, coarse=coarse),
LeafFactoryGinko(factory_seed, season=season, coarse=coarse),
]
def METHOD_NAME(self, **params):
fac_id = randint(len(self.factory_list))
fac = self.factory_list[fac_id]
wrap_params = {
'angle': uniform(-70, 70),
'rotation': uniform(0.2, 2.0),
'end_radius': np.exp(uniform(-2.0, 2.0)),
'noise_level': uniform(0.0, 0.5)
}
obj = fac.METHOD_NAME()
surface.add_geomod(obj, nodegroup_nodegroup_apply_wrap, apply=False, input_kwargs=wrap_params)
bpy.ops.object.convert(target='MESH')
return ob |
299,361 | chair check | #!/usr/bin/env python3
# Grabs the latest members data from data.parliament XML and update accordingly.
# Get data using crawl-datadotparl-members.py
import sys
import argparse
from collections import OrderedDict
import json
import re
from lxml import etree
from datetime import datetime
parser = argparse.ArgumentParser(description='Fetch latest member data from data.parliament.uk')
parser.add_argument('--verbose', action='store_true')
ARGS = parser.parse_args()
# Source XML files to use for the list of members
source_files = [
'all-current-commons',
'all-lords',
]
parser = etree.ETCompatXMLParser()
etree.set_default_parser(parser)
def slugify(s):
return re.sub('[^\w ]', '', s).replace(' ', '-').lower()
def verbose(s):
if ARGS.verbose:
print(s)
class Popolo(object):
def __init__(self):
self.organizations = []
self.memberships = []
def output(self, fn):
self.memberships = sorted(self.memberships, key=lambda x: (x['person_id'], x['id']))
self.organizations = sorted(self.organizations, key=lambda x: x['id'])
out = OrderedDict(sorted({
'organizations': self.organizations,
'memberships': self.memberships,
}.items()))
json.dump(out, open('../../members/ministers-2010.json', 'w'), indent=2)
def add_membership(self, mship):
self.memberships.append(mship)
def add_organization(self, org, id=None, **kwargs):
id = id or slugify(org)
org = OrderedDict(sorted({'id': id, 'name': org}.items()))
org.update(kwargs)
if org not in self.organizations:
self.organizations.append(org)
return id
popolo = Popolo()
class TimePeriod(object):
def __init__(self, el):
self.el = el
def make_date(self, s):
return datetime.strptime(self.el.find(s).text, '%Y-%m-%dT00:00:00').strftime('%Y-%m-%d')
@property
def dates(self):
start = self.make_date('StartDate')
end = self.make_date('EndDate') if self.el.find('EndDate').text else '9999-12-31'
return start, end
class Position(object):
def __init__(self, position, positions):
self.position = position
self.list = positions
def METHOD_NAME(self, chair_date, periods, to_date):
if chair_date[1] < to_date:
periods.append({'position': 'Chair', 'fromdate': chair_date[0], 'todate': chair_date[1]})
periods.append({'position': '', 'fromdate': chair_date[1], 'todate': to_date})
else:
periods.append({'position': 'Chair', 'fromdate': chair_date[0], 'todate': to_date})
@property
def time_periods(self):
position = self.position
attrs = {}
if self.list.name_is_dept:
attrs['position'] = ''
elif position.find('HansardName').text is not None:
attrs['position'] = position.find('HansardName').text
else:
attrs['position'] = position.find('Name').text
position_dates = TimePeriod(self.position).dates
attrs['fromdate'], attrs['todate'] = position_dates
periods = [ attrs ]
chairdates = position.find('ChairDates')
if chairdates is None or not len(chairdates):
return periods
chair_dates = [TimePeriod(x).dates for x in chairdates]
unique_dates = []
seen = {}
for c in chair_dates:
if c in seen: continue
seen[c] = 1
unique_dates.append(c)
chair_dates = unique_dates
if len(chair_dates) > 2:
raise Exception("Doesn't currently handle more than two chair dates")
if len(chair_dates) == 2:
first_to_date = chair_dates[1][0]
else:
first_to_date = position_dates[1]
if chair_dates[0][0] <= position_dates[0]:
attrs['position'] = 'Chair'
if chair_dates[0][1] < position_dates[1]:
attrs['todate'] = chair_dates[0][1]
periods.append({'position': '', 'fromdate': chair_dates[0][1], 'todate': first_to_date})
else:
attrs['todate'] = chair_dates[0][0]
self.METHOD_NAME(chair_dates[0], periods, first_to_date)
if len(chair_dates) == 2:
self.METHOD_NAME(chair_dates[1], periods, position_dates[1])
return periods
def generate(self):
position = self.position
for period in self.time_periods:
attrs = OrderedDict()
attrs['id'] = 'uk.parliament.data/Member/%s/%s/%s%s' % (
self.list.person['datadotparl_id'], self.list.typ, position.attrib['Id'], self.counter)
attrs['source'] = 'datadotparl/%s' % self.list.typ.lower()
if self.list.name_is_dept:
dept = position.find('Name').text
if not re.search('(?i)panel|committee|commission|court', dept):
dept = '%s Committee' % dept
else:
dept = ''
if period['position']:
attrs['role'] = period['position']
attrs['person_id'] = self.list.person['id']
# XXX Will need to cover Lords etc at some point
attrs['organization_id'] = popolo.add_organization(dept or 'House of Commons')
attrs['start_date'] = period['fromdate']
if period['todate'] != '9999-12-31':
attrs['end_date'] = period['todate']
popolo.add_membership(attrs)
@property
def counter(self):
extra = ''
id = self.position.attrib['Id']
if self.list.counter.setdefault(id, 0):
extra = '/%d' % self.list.counter[id]
self.list.counter[id] += 1
return extra
class Positions(object):
def __init__(self, data, person):
self.data = data
self.person = person
def filter(self, typ, name_is_dept=False):
container = typ + 's'
if self.data.find(container) is None:
return
self.counter = {}
self.typ = typ
self.name_is_dept = name_is_dept
for p in self.data.find(container).findall(typ):
pos = Position(p, self)
yield pos
def parse(fn, root_tag):
try:
xml = etree.parse(fn).getroot()
except:
raise Exception('Data from %s does not seem to be valid XML.' % fn)
if xml.tag != root_tag:
raise Exception('Root tag in %s is not "%s" as expected, got "%s"' % (fn, root_tag, xml.tag))
return xml
parl_member_ids = set()
for source_file in source_files:
parl_members_slim = parse('../../rawdata/datadotparl/' + source_file + '.xml', 'Members')
parl_member_ids |= set(member.attrib['Member_Id'] for member in parl_members_slim)
verbose('Found %d members from %s.xml.' % (len(parl_members_slim), source_file))
verbose('Total of %d distinct people.' % len(parl_member_ids))
# Enrich the data!
parl_members = dict()
for member_id in parl_member_ids:
member_fullbio_xml = parse('../../rawdata/datadotparl/' + member_id + '.xml', 'Members')
parl_members[member_id] = member_fullbio_xml.find('Member')
##########
# At this point parl_members contains a complete set of data.
##########
# Import people.xml
people_json = json.load(open('../../members/people.json'))
people = []
for person in people_json['persons']:
for i in person.get('identifiers', []):
if i['scheme'] == 'datadotparl_id':
person['datadotparl_id'] = i['identifier']
people.append(person)
verbose(str(len(people)) + ' people with data.parliament ID.')
for person in people:
try:
parl_data = parl_members[person['datadotparl_id']]
except KeyError:
sys.stderr.write(
"we have an ID for {0} but no matching file downloaded from data.parliament" \
.format(person['datadotparl_id'])
)
continue
positions = Positions(parl_data, person)
for position in positions.filter('GovernmentPost'):
position.generate()
for position in positions.filter('OppositionPost'):
position.generate()
for position in positions.filter('ParliamentaryPost'):
position.generate()
for position in positions.filter('Committee', name_is_dept=True):
position.generate()
verbose('Done for all source files.')
# Write out the updated ministers file
popolo.output('../../members/ministers-2010.json')
verbose('ministers-2010.json updated!') |
299,362 | main | import logging
import os
import sys
if sys.platform == "win32":
config_for_pytensor_cache_script = "cxx=,device=cpu"
pytensor_flags = (
os.environ["PYTENSOR_FLAGS"] if "PYTENSOR_FLAGS" in os.environ else ""
)
if pytensor_flags:
pytensor_flags += ","
pytensor_flags += config_for_pytensor_cache_script
os.environ["PYTENSOR_FLAGS"] = pytensor_flags
import pytensor
import pytensor.compile.compiledir
from pytensor import config
from pytensor.link.c.basic import get_module_cache
_logger = logging.getLogger("pytensor.bin.pytensor-cache")
def print_help(exit_status):
if exit_status:
print(f"command \"{' '.join(sys.argv)}\" not recognized")
print('Type "pytensor-cache" to print the cache location')
print('Type "pytensor-cache help" to print this help')
print('Type "pytensor-cache clear" to erase the cache')
print('Type "pytensor-cache list" to print the cache content')
print('Type "pytensor-cache unlock" to unlock the cache directory')
print(
'Type "pytensor-cache cleanup" to delete keys in the old ' "format/code version"
)
print('Type "pytensor-cache purge" to force deletion of the cache directory')
print(
'Type "pytensor-cache basecompiledir" '
"to print the parent of the cache directory"
)
print(
'Type "pytensor-cache basecompiledir list" '
"to print the content of the base compile dir"
)
print(
'Type "pytensor-cache basecompiledir purge" '
"to remove everything in the base compile dir, "
"that is, erase ALL cache directories"
)
sys.exit(exit_status)
def METHOD_NAME():
if len(sys.argv) == 1:
print(config.compiledir)
elif len(sys.argv) == 2:
if sys.argv[1] == "help":
print_help(exit_status=0)
if sys.argv[1] == "clear":
# We skip the refresh on module cache creation because the refresh will
# be done when calling clear afterwards.
cache = get_module_cache(init_args=dict(do_refresh=False))
cache.clear(
unversioned_min_age=-1, clear_base_files=True, delete_if_problem=True
)
# Print a warning if some cached modules were not removed, so that the
# user knows he should manually delete them, or call
# pytensor-cache purge, # to properly clear the cache.
items = [
item
for item in sorted(os.listdir(cache.dirname))
if item.startswith("tmp")
]
if items:
_logger.warning(
"There remain elements in the cache dir that you may "
"need to erase manually. The cache dir is:\n %s\n"
'You can also call "pytensor-cache purge" to '
"remove everything from that directory." % config.compiledir
)
_logger.debug(f"Remaining elements ({len(items)}): {', '.join(items)}")
elif sys.argv[1] == "list":
pytensor.compile.compiledir.print_compiledir_content()
elif sys.argv[1] == "cleanup":
pytensor.compile.compiledir.cleanup()
cache = get_module_cache(init_args=dict(do_refresh=False))
cache.clear_old()
elif sys.argv[1] == "unlock":
pytensor.compile.compilelock.force_unlock(config.compiledir)
print("Lock successfully removed!")
elif sys.argv[1] == "purge":
pytensor.compile.compiledir.compiledir_purge()
elif sys.argv[1] == "basecompiledir":
# Simply print the base_compiledir
print(pytensor.config.base_compiledir)
else:
print_help(exit_status=1)
elif len(sys.argv) == 3 and sys.argv[1] == "basecompiledir":
if sys.argv[2] == "list":
pytensor.compile.compiledir.basecompiledir_ls()
elif sys.argv[2] == "purge":
pytensor.compile.compiledir.basecompiledir_purge()
else:
print_help(exit_status=1)
else:
print_help(exit_status=1)
if __name__ == "__main__":
METHOD_NAME() |
299,363 | test place ko shape not json | # Copyright (c) 2001-2022, Hove and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Hove (www.hove.com).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import logging
from .tests_mechanism import AbstractTestFixture, dataset
from .check_utils import *
def valid_autocomplete_with_one_stop_area(response):
"""response contains 1 stop_areas "gare" """
assert len(response['links']) == 1
links = get_not_null(response, 'links')
places = get_not_null(response, 'places')
assert len(links) == 1
assert len(places) == 1
assert places[0]['embedded_type'] == 'stop_area'
assert places[0]['name'] == 'Gare (Quimper)'
def valid_autocomplete_with_multi_object(response):
"""
response contains 10 elements
1 admin, 6 stop_areas and three addresses
"""
links = get_not_null(response, 'links')
places = get_not_null(response, 'places')
assert len(links) == 2
assert len(places) == 10
assert places[0]['embedded_type'] == 'administrative_region'
assert places[1]['embedded_type'] == 'stop_area'
assert places[1]['name'] == 'IUT (Quimper)'
assert places[2]['embedded_type'] == 'stop_area'
assert places[3]['embedded_type'] == 'stop_area'
assert places[4]['embedded_type'] == 'stop_area'
assert places[5]['embedded_type'] == 'stop_area'
assert places[6]['embedded_type'] == 'stop_area'
assert places[7]['embedded_type'] == 'address'
assert places[8]['embedded_type'] == 'address'
assert places[9]['embedded_type'] == 'address'
def is_response_empty(response):
"""response is empty"""
assert len(response['links']) == 0
assert 'places' not in response
@dataset({"main_autocomplete_test": {}})
class TestAutocomplete(AbstractTestFixture):
"""
Test the autocomplete responses
"""
def test_autocomplete_without(self):
"""
Test of empty result
"""
response = self.query_region("places?q=bob marley", display=False)
is_response_empty(response)
def test_autocomplete_with_one_stop_area(self):
"""
Test with one object in the result
"""
response = self.query_region("places?q=Gare", display=False)
is_valid_autocomplete(response, 2)
valid_autocomplete_with_one_stop_area(response)
def test_autocomplete_with_multi_objects(self):
"""
Test with 10 objects of different types in the result
"""
response = self.query_region("places?q=quimper", display=False)
is_valid_autocomplete(response, 2)
valid_autocomplete_with_multi_object(response)
def test_places_coords(self):
coords = '{lon};{lat}'.format(lon=2, lat=3)
response = self.query(
'v1/coverage/{coords}/places?q={q}&type[]=stop_point'.format(coords=coords, q='Becharles')
)
places = get_not_null(response, 'places')
assert len(places) == 1
assert places[0]['id'] == 'stop_point:Becharles'
def test_place_uri_coords(self):
coords = '{lon};{lat}'.format(lon=2, lat=3)
response = self.query(
'v1/coverage/{coords}/places/{id}'.format(coords=coords, id='stop_point:Becharles')
)
places = get_not_null(response, 'places')
assert len(places) == 1
assert places[0]['id'] == 'stop_point:Becharles'
def test_place_ok_shape(self):
shape = '{"type":"Feature","geometry":{"type":"Polygon","coordinates":\
[[[2.283,48.896],[2.280,48.818],[2.417,48.818],[2.416,48.897],[2.283,48.896]]]}}'
_ = self.query_region("places?q=Gare&shape={}".format(shape))
def test_place_ko_shape_with_empty_json_object(self):
_, status = self.query_no_assert('/v1/coverage/main_autocomplete_test/places?q=Gare&shape={}')
assert status == 400
def METHOD_NAME(self):
_, status = self.query_no_assert('/v1/coverage/main_autocomplete_test/places?q=Gare&shape=toto')
assert status == 400
def test_place_ko_shape_multipolygon_not_yet_accepted(self):
multipolygon = '{"type":"Feature","geometry":{"type":"MultiPolygo","coordiates":\
[[[[2.4,48.6],[2.8,48.6],[2.7,48.9],[2.4,48.6]]],\
[[[2.1,48.9],[2.2,48.6],[2.4,48.9],[2.1,48.9]]]]}}'
_, status = self.query_no_assert(
'/v1/coverage/main_autocomplete_test/places?q=Gare&shape={}'.format(multipolygon)
)
assert status == 400
def test_visibility(self):
"""
Test if visible parameters (way struct) is taken into account
data :
quai NEUF (Quimper) with visible=true
input/output quai NEUF (Quimper) with visible=false
"""
response = self.query_region("places?q=quai NEUF", display=False)
is_valid_autocomplete(response, 2)
places = get_not_null(response, 'places')
assert len(places) == 1
assert places[0]['name'] == 'quai NEUF (Quimper)'
def test_place_with_shape_scope_invald(self):
_, status = self.query_no_assert('/v1/coverage/main_autocomplete_test/places?q=Gare&shape_scope[]=bob')
assert status == 400
def test_place_with_one_shape_scope_accepted(self):
_, status = self.query_no_assert('/v1/coverage/main_autocomplete_test/places?q=Gare&shape_scope[]=poi')
assert status == 200
def test_place_with_many_shape_scope_accepted(self):
_, status = self.query_no_assert(
'/v1/coverage/main_autocomplete_test/places?q=Gare'
'&shape_scope[]=poi&shape_scope[]=street&shape_scope[]=addr'
)
assert status == 200 |
299,364 | cache info | # -*- coding: utf-8 -*-
"""
pint.compat.lrucache
~~~~~~~~~~~~~~~~~~~~
LRU (least recently used) cache backport.
From https://code.activestate.com/recipes/578078-py26-and-py30-backport-of-python-33s-lru-cache/
:copyright: 2004, Raymond Hettinger,
:license: MIT License
"""
from collections import namedtuple
from functools import update_wrapper
from threading import RLock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark = (object(),),
fasttypes = set((int, str, frozenset, type(None))),
sorted=sorted, tuple=tuple, type=type, len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
oldvalue = root[RESULT]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def METHOD_NAME():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.METHOD_NAME = METHOD_NAME
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function |
299,365 | test coeff | import numpy as np
import unittest
import random
from migen import *
from misoc.cores import fir
class TestDSP(unittest.TestCase):
def setUp(self):
self.dut = fir.DSP()
def test_init(self):
self.assertEqual(len(self.dut.a), 24)
self.assertEqual(len(self.dut.b), 18)
self.assertEqual(len(self.dut.c), 48)
self.assertEqual(len(self.dut.d), 24)
self.assertEqual(len(self.dut.p), 48)
self.assertEqual(len(self.dut.m), 48)
def test_seq(self):
def gen():
a, b, c, d = 0x123, -0x456, 0x789, 0x357
yield self.dut.a.eq(a)
yield self.dut.d.eq(d)
yield self.dut.presub.eq(1)
yield
self.assertEqual((yield self.dut.ar), a)
self.assertEqual((yield self.dut.dr), d)
yield self.dut.b.eq(b)
yield
self.assertEqual((yield self.dut.br), b)
self.assertEqual((yield self.dut.adr), a - d)
yield self.dut.c.eq(c)
yield
self.assertEqual((yield self.dut.cr), c)
self.assertEqual((yield self.dut.mr), (a - d)*b)
yield
self.assertEqual((yield self.dut.pr), (a - d)*b + c)
run_simulation(self.dut, gen())
class TestSRStorage(unittest.TestCase):
def setUp(self):
self.dut = fir.SRStorage(3, 8)
def test_init(self):
self.assertEqual(len(self.dut.load.data), 8)
self.assertEqual(len(self.dut.out.data), 8)
def load(self, d):
yield self.dut.load.eop.eq(1)
for i in d:
for _ in range(random.randint(0, 15)):
yield
yield self.dut.load.data.eq(i)
yield self.dut.load.stb.eq(1)
yield
while not (yield self.dut.load.ack):
yield
yield self.dut.load.stb.eq(0)
@passive
def retrieve(self, o):
o.append([])
while True:
for _ in range(random.randint(0, 4)):
yield
yield self.dut.out.ack.eq(1)
yield
while not (yield self.dut.out.stb):
yield
o[-1].append((yield self.dut.out.data))
if (yield self.dut.out.eop):
o.append([])
yield self.dut.out.ack.eq(0)
def test_seq(self):
o = []
random.seed(42)
run_simulation(self.dut, [self.load(range(10)), self.retrieve(o)])
for i, oi in enumerate(o[2:-1]):
with self.subTest(i=i):
if not oi:
continue
self.assertEqual(oi, list(range(i, i + 3)))
def feed(endpoint, x, maxwait=20):
for i in x:
for _ in range(random.randint(0, maxwait)):
yield
yield endpoint.data.eq(int(i))
yield endpoint.stb.eq(1)
yield
while not (yield endpoint.ack):
yield
yield endpoint.stb.eq(0)
@passive
def retrieve(endpoint, o, maxwait=10):
yield
while True:
for _ in range(random.randint(0, maxwait)):
yield
yield endpoint.ack.eq(1)
yield
while not (yield endpoint.stb):
yield
o.append((yield endpoint.data))
yield endpoint.ack.eq(0)
class TestMACFIR(unittest.TestCase):
def test_init(self):
dut = fir.MACFIR(n=10, scale=0)
self.assertEqual(len(dut.sample.load.data), 24)
self.assertEqual(len(dut.coeff.load.data), 18)
self.assertEqual(len(dut.output.data), 48)
def setcoeff(self, c, h):
for i, bi in enumerate(h):
yield c[i].eq(int(bi))
def test_run(self):
x = np.arange(20) + 1
h = np.arange(10) + 1
dut = fir.MACFIR(n=len(h), scale=0)
o = []
random.seed(42)
run_simulation(dut, [self.setcoeff(dut.coeff.sr, h[::-1]),
feed(dut.sample.load, x), retrieve(dut.output, o)])
p = np.convolve(h, x)
self.assertEqual(o, list(p[:len(o)]))
def test_sym(self):
x = np.arange(20) + 1
h = np.arange(5) + 1
dut = fir.SymMACFIR(n=len(h), scale=0)
o = []
random.seed(42)
run_simulation(dut, [self.setcoeff(dut.coeff.sr, h[::-1]),
feed(dut.sample.load, x), retrieve(dut.output, o)])
hh = np.r_[h, h[::-1]]
p = np.convolve(hh, x)
self.assertEqual(o, list(p[:len(o)]))
class TestHBFMACUp(unittest.TestCase):
def test_init(self):
coeff = [-3, 0, 6, 8, 6, 0, -3]
dut = fir.HBFMACUpsampler(coeff)
self.assertEqual(len(dut.coeff.sr), 2)
def METHOD_NAME(self):
for coeff in [0], [-1, 3, -1], [-1, 0, 1, 0, 1, 0, 1, -2]:
with self.subTest(coeff=coeff):
with self.assertRaises(ValueError):
fir.HBFMACUpsampler(coeff)
def test_run(self):
for n in 2, 3, 4, 10:
coeff, x = self.coeff(n)
with self.subTest(coeff=coeff):
self.filter(coeff, x)
print(n)
#with self.subTest(coeff=coeff, maxwait=True):
# self.filter(coeff, x, maxwait=1)
def coeff(self, n):
x = np.arange(3*n) + 1
coeff = []
for i in range(n):
j = i + 2
j = (-j if j & 1 else j) << 2
coeff[2*i:2*i] = [j, 0, j, 0]
coeff[2*n - 1] = 1
coeff = coeff[:-1]
return coeff, x
def filter(self, coeff, x, maxwait=0):
dut = fir.HBFMACUpsampler(coeff)
n = (len(coeff) + 1)//4
b = log2_int(coeff[2*n - 1])
bias = (1 << max(0, b - 1)) - 1
self.assertEqual(dut.bias.reset.value, bias)
o = []
random.seed(42)
run_simulation(dut, [feed(dut.input, x, maxwait=n*maxwait),
retrieve(dut.output, o, maxwait=n*maxwait//2)],
vcd_name="hbf.vcd")
# first sample out is a zero sample from the center tap
p = np.convolve(coeff, np.c_[np.zeros_like(x), x].ravel())
# bias and rounding
p = (p + bias) >> b
self.assertEqual(o, list(p[:len(o)])) |
299,366 | retranslate ui | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mslib/msui/ui/ui_tableview_window.ui'
#
# Created by: PyQt5 UI code generator 5.12.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_TableViewWindow(object):
def setupUi(self, TableViewWindow):
TableViewWindow.setObjectName("TableViewWindow")
TableViewWindow.resize(1254, 472)
TableViewWindow.setMinimumSize(QtCore.QSize(0, 0))
self.centralwidget = QtWidgets.QWidget(TableViewWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.tableWayPoints = QtWidgets.QTableView(self.centralwidget)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.tableWayPoints.setFont(font)
self.tableWayPoints.setDragEnabled(True)
self.tableWayPoints.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.tableWayPoints.setDefaultDropAction(QtCore.Qt.CopyAction)
self.tableWayPoints.setAlternatingRowColors(True)
self.tableWayPoints.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.tableWayPoints.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tableWayPoints.setObjectName("tableWayPoints")
self.verticalLayout.addWidget(self.tableWayPoints)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.cbTools = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbTools.sizePolicy().hasHeightForWidth())
self.cbTools.setSizePolicy(sizePolicy)
self.cbTools.setBaseSize(QtCore.QSize(0, 0))
self.cbTools.setObjectName("cbTools")
self.cbTools.addItem("")
self.horizontalLayout.addWidget(self.cbTools)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.btAddWayPointToFlightTrack = QtWidgets.QPushButton(self.centralwidget)
self.btAddWayPointToFlightTrack.setObjectName("btAddWayPointToFlightTrack")
self.horizontalLayout.addWidget(self.btAddWayPointToFlightTrack)
self.btCloneWaypoint = QtWidgets.QPushButton(self.centralwidget)
self.btCloneWaypoint.setObjectName("btCloneWaypoint")
self.horizontalLayout.addWidget(self.btCloneWaypoint)
self.btDeleteWayPoint = QtWidgets.QPushButton(self.centralwidget)
self.btDeleteWayPoint.setObjectName("btDeleteWayPoint")
self.horizontalLayout.addWidget(self.btDeleteWayPoint)
self.btInvertDirection = QtWidgets.QPushButton(self.centralwidget)
self.btInvertDirection.setMinimumSize(QtCore.QSize(100, 0))
self.btInvertDirection.setObjectName("btInvertDirection")
self.horizontalLayout.addWidget(self.btInvertDirection)
self.btRoundtrip = QtWidgets.QPushButton(self.centralwidget)
self.btRoundtrip.setObjectName("btRoundtrip")
self.horizontalLayout.addWidget(self.btRoundtrip)
self.verticalLayout.addLayout(self.horizontalLayout)
TableViewWindow.setCentralWidget(self.centralwidget)
self.actionCloseWindow = QtWidgets.QAction(TableViewWindow)
self.actionCloseWindow.setObjectName("actionCloseWindow")
TableViewWindow.addAction(self.actionCloseWindow)
self.METHOD_NAME(TableViewWindow)
self.actionCloseWindow.triggered.connect(TableViewWindow.close)
QtCore.QMetaObject.connectSlotsByName(TableViewWindow)
def METHOD_NAME(self, TableViewWindow):
_translate = QtCore.QCoreApplication.translate
TableViewWindow.setWindowTitle(_translate("TableViewWindow", "Table View - Mission Support System"))
self.cbTools.setItemText(0, _translate("TableViewWindow", "(select to open control)"))
self.label.setText(_translate("TableViewWindow", "Waypoints:"))
self.btAddWayPointToFlightTrack.setText(_translate("TableViewWindow", "insert"))
self.btCloneWaypoint.setText(_translate("TableViewWindow", "clone"))
self.btDeleteWayPoint.setText(_translate("TableViewWindow", "delete selected"))
self.btInvertDirection.setText(_translate("TableViewWindow", "reverse"))
self.btRoundtrip.setText(_translate("TableViewWindow", "make roundtrip"))
self.actionCloseWindow.setText(_translate("TableViewWindow", "Close Window"))
self.actionCloseWindow.setShortcut(_translate("TableViewWindow", "Ctrl+W")) |
299,367 | default auth strategy | from typing import Any, Dict, List, Optional, Union
from aws_lambda_powertools.utilities.data_classes.common import DictWrapper
from aws_lambda_powertools.utilities.data_classes.shared_functions import (
get_header_value,
)
def get_identity_object(identity: Optional[dict]) -> Any:
"""Get the identity object based on the best detected type"""
# API_KEY authorization
if identity is None:
return None
# AMAZON_COGNITO_USER_POOLS authorization
if "sub" in identity:
return AppSyncIdentityCognito(identity)
# AWS_IAM authorization
return AppSyncIdentityIAM(identity)
class AppSyncIdentityIAM(DictWrapper):
"""AWS_IAM authorization"""
@property
def source_ip(self) -> List[str]:
"""The source IP address of the caller received by AWS AppSync."""
return self["sourceIp"]
@property
def username(self) -> str:
"""The username of the authenticated user. IAM user principal"""
return self["username"]
@property
def account_id(self) -> str:
"""The AWS account ID of the caller."""
return self["accountId"]
@property
def cognito_identity_pool_id(self) -> str:
"""The Amazon Cognito identity pool ID associated with the caller."""
return self["cognitoIdentityPoolId"]
@property
def cognito_identity_id(self) -> str:
"""The Amazon Cognito identity ID of the caller."""
return self["cognitoIdentityId"]
@property
def user_arn(self) -> str:
"""The ARN of the IAM user."""
return self["userArn"]
@property
def cognito_identity_auth_type(self) -> str:
"""Either authenticated or unauthenticated based on the identity type."""
return self["cognitoIdentityAuthType"]
@property
def cognito_identity_auth_provider(self) -> str:
"""A comma separated list of external identity provider information used in obtaining the
credentials used to sign the request."""
return self["cognitoIdentityAuthProvider"]
class AppSyncIdentityCognito(DictWrapper):
"""AMAZON_COGNITO_USER_POOLS authorization"""
@property
def source_ip(self) -> List[str]:
"""The source IP address of the caller received by AWS AppSync."""
return self["sourceIp"]
@property
def username(self) -> str:
"""The username of the authenticated user."""
return self["username"]
@property
def sub(self) -> str:
"""The UUID of the authenticated user."""
return self["sub"]
@property
def claims(self) -> Dict[str, str]:
"""The claims that the user has."""
return self["claims"]
@property
def METHOD_NAME(self) -> str:
"""The default authorization strategy for this caller (ALLOW or DENY)."""
return self["defaultAuthStrategy"]
@property
def groups(self) -> List[str]:
"""List of OIDC groups"""
return self["groups"]
@property
def issuer(self) -> str:
"""The token issuer."""
return self["issuer"]
class AppSyncResolverEventInfo(DictWrapper):
"""The info section contains information about the GraphQL request"""
@property
def field_name(self) -> str:
"""The name of the field that is currently being resolved."""
return self["fieldName"]
@property
def parent_type_name(self) -> str:
"""The name of the parent type for the field that is currently being resolved."""
return self["parentTypeName"]
@property
def variables(self) -> Optional[Dict[str, str]]:
"""A map which holds all variables that are passed into the GraphQL request."""
return self.get("variables")
@property
def selection_set_list(self) -> Optional[List[str]]:
"""A list representation of the fields in the GraphQL selection set. Fields that are aliased will
only be referenced by the alias name, not the field name."""
return self.get("selectionSetList")
@property
def selection_set_graphql(self) -> Optional[str]:
"""A string representation of the selection set, formatted as GraphQL schema definition language (SDL).
Although fragments are not be merged into the selection set, inline fragments are preserved."""
return self.get("selectionSetGraphQL")
class AppSyncResolverEvent(DictWrapper):
"""AppSync resolver event
**NOTE:** AppSync Resolver Events can come in various shapes this data class
supports both Amplify GraphQL directive @function and Direct Lambda Resolver
Documentation:
-------------
- https://docs.aws.amazon.com/appsync/latest/devguide/resolver-context-reference.html
- https://docs.amplify.aws/cli/graphql-transformer/function#structure-of-the-function-event
"""
def __init__(self, data: dict):
super().__init__(data)
info: Optional[dict] = data.get("info")
if not info:
info = {"fieldName": self.get("fieldName"), "parentTypeName": self.get("typeName")}
self._info = AppSyncResolverEventInfo(info)
@property
def type_name(self) -> str:
"""The name of the parent type for the field that is currently being resolved."""
return self.info.parent_type_name
@property
def field_name(self) -> str:
"""The name of the field that is currently being resolved."""
return self.info.field_name
@property
def arguments(self) -> Dict[str, Any]:
"""A map that contains all GraphQL arguments for this field."""
return self["arguments"]
@property
def identity(self) -> Union[None, AppSyncIdentityIAM, AppSyncIdentityCognito]:
"""An object that contains information about the caller.
Depending on the type of identify found:
- API_KEY authorization - returns None
- AWS_IAM authorization - returns AppSyncIdentityIAM
- AMAZON_COGNITO_USER_POOLS authorization - returns AppSyncIdentityCognito
"""
return get_identity_object(self.get("identity"))
@property
def source(self) -> Optional[Dict[str, Any]]:
"""A map that contains the resolution of the parent field."""
return self.get("source")
@property
def request_headers(self) -> Dict[str, str]:
"""Request headers"""
return self["request"]["headers"]
@property
def prev_result(self) -> Optional[Dict[str, Any]]:
"""It represents the result of whatever previous operation was executed in a pipeline resolver."""
prev = self.get("prev")
if not prev:
return None
return prev.get("result")
@property
def info(self) -> AppSyncResolverEventInfo:
"""The info section contains information about the GraphQL request."""
return self._info
@property
def stash(self) -> Optional[dict]:
"""The stash is a map that is made available inside each resolver and function mapping template.
The same stash instance lives through a single resolver execution. This means that you can use the
stash to pass arbitrary data across request and response mapping templates, and across functions in
a pipeline resolver."""
return self.get("stash")
def get_header_value(
self,
name: str,
default_value: Optional[str] = None,
case_sensitive: Optional[bool] = False,
) -> Optional[str]:
"""Get header value by name
Parameters
----------
name: str
Header name
default_value: str, optional
Default value if no value was found by name
case_sensitive: bool
Whether to use a case-sensitive look up
Returns
-------
str, optional
Header value
"""
return get_header_value(self.request_headers, name, default_value, case_sensitive) |
299,368 | set up | from __future__ import print_function
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import os
from . import session
from . import settings
from . import resource_suite
from .. import lib
class Test_Irmdir(resource_suite.ResourceBase, unittest.TestCase):
def METHOD_NAME(self):
super(Test_Irmdir, self).METHOD_NAME()
def tearDown(self):
super(Test_Irmdir, self).tearDown()
def test_irmdir_of_nonexistent_collection(self):
self.admin.assert_icommand(['irmdir', 'garbage_dir'], 'STDOUT_SINGLELINE', 'Collection does not exist')
def test_irmdir_of_dataobj(self):
filename = 'test_irmdir_of_dataobj'
lib.make_file(filename, 1024, 'arbitrary')
rods_filename = self.admin.session_collection + '/' + filename
self.admin.assert_icommand(['iput', filename, rods_filename])
self.admin.assert_icommand(['irmdir', rods_filename], 'STDOUT_SINGLELINE', 'Collection does not exist')
self.admin.assert_icommand(['irm', '-f', rods_filename])
os.unlink(filename)
def test_irmdir_of_collection_containing_dataobj(self):
filename = 'test_dataobj'
collname = 'test_collection'
lib.make_file(filename, 1024, 'arbitrary')
rods_collname = self.admin.session_collection + '/' + collname
rods_filename = rods_collname + '/' + filename
self.admin.assert_icommand(['imkdir', rods_collname])
self.admin.assert_icommand(['iput', filename, rods_filename])
self.admin.assert_icommand(['irmdir', rods_collname], 'STDOUT_SINGLELINE', 'Collection is not empty')
os.unlink(filename)
def test_irmdir_of_collection_containing_collection(self):
collname_1 = 'test_collection_1'
collname_2 = 'test_collection_2'
rods_collname_1 = self.admin.session_collection + '/' + collname_1
rods_collname_2 = rods_collname_1 + '/' + collname_2
self.admin.assert_icommand(['imkdir', rods_collname_1])
self.admin.assert_icommand(['imkdir', rods_collname_2])
self.admin.assert_icommand(['irmdir', rods_collname_1], 'STDOUT_SINGLELINE', 'Collection is not empty')
def test_irmdir_of_empty_collection(self):
collname = 'test_collection'
rods_collname = self.admin.session_collection + '/' + collname
self.admin.assert_icommand(['imkdir', rods_collname])
self.admin.assert_icommand(['irmdir', rods_collname])
# If irmdir failed, attempting to make a directory with the same name will also fail
self.admin.assert_icommand(['imkdir', rods_collname])
def test_irmdir_dash_p(self):
path = 'a/b/c'
col_d = 'd'
col_e = 'd/e'
abs_path = os.path.join(self.admin.session_collection, path)
abs_path_to_col_d = os.path.join(self.admin.session_collection, path, col_d)
self.admin.assert_icommand(['imkdir', '-p', os.path.join(path, col_e)])
self.admin.assert_icommand(['icd', path])
self.admin.assert_icommand(['ils'], 'STDOUT_MULTILINE', [abs_path, 'C- {0}'.format(abs_path_to_col_d)])
self.admin.assert_icommand(['irmdir', '-p', col_e])
self.admin.assert_icommand(['ils', col_e], 'STDERR', '/{0} does not exist '.format(col_e))
self.admin.assert_icommand(['icd', self.admin.session_collection])
self.admin.assert_icommand(['irmdir', '-p', path])
self.admin.assert_icommand(['ils', path], 'STDERR', '/{0} does not exist '.format(path))
# Trying to remove a collection that does not exist produces an error.
self.admin.assert_icommand(['irmdir', '-p', 'x/y/z'], 'STDERR', 'Collection does not exist')
# Trying to remove a collection that is not empty produces an error.
self.admin.assert_icommand(['imkdir', '-p', 'a/b/c'])
self.admin.assert_icommand(['imkdir', '-p', 'a/b/d'])
self.admin.assert_icommand(['irmdir', '-p', 'a/b'], 'STDERR', 'Collection is not empty')
self.admin.assert_icommand(['irmdir', 'a/b/c'])
self.admin.assert_icommand(['irmdir', 'a/b/d'])
self.admin.assert_icommand(['irmdir', '-p', 'a/b'])
# Trying to remove a data object produces an error.
filename = 'test_irmdir_of_dataobj'
lib.make_file(filename, 1024, 'arbitrary')
rods_filename = os.path.join(self.admin.session_collection, filename)
self.admin.assert_icommand(['iput', filename, rods_filename])
self.admin.assert_icommand(['irmdir', '-p', rods_filename], 'STDERR', 'Path does not point to a collection')
self.admin.assert_icommand(['irm', '-f', rods_filename])
os.unlink(filename)
def test_irmdir_no_input(self):
self.admin.assert_icommand('irmdir', 'STDOUT_SINGLELINE', 'No collection names specified.')
def test_irmdir_removes_collection_even_if_sibling_exists__issue_4788(self):
col_a = 'foo'
self.admin.assert_icommand(['imkdir', col_a])
col_b = 'foot'
self.admin.assert_icommand(['imkdir', col_b])
filename = 'issue_4788'
file_path = os.path.join(self.admin.local_session_dir, filename)
lib.make_file(file_path, 1024, 'arbitrary')
self.admin.assert_icommand(['iput', file_path, os.path.join(col_b, filename)])
self.admin.assert_icommand(['irmdir', col_a])
self.admin.assert_icommand(['ils', col_a], 'STDERR', ['{0} does not exist'.format(os.path.join(self.admin.session_collection, col_a))])
|
299,369 | requests get | import datetime
import logging
import warnings
from contextlib import nullcontext
from typing import Optional, Set, List
import requests
from django.db.models import OuterRef, Q, Subquery, F
from slugify import slugify
from urllib3.exceptions import InsecureRequestWarning
from importer import JSON
from importer.models import CachedObject, ExternalList
from mainapp.functions.search import search_bulk_index
from mainapp.models import (
LegislativeTerm,
Location,
Body,
File,
Person,
Organization,
Membership,
Meeting,
Paper,
Consultation,
AgendaItem,
)
from mainapp.models.file import fallback_date
from meine_stadt_transparent import settings
logger = logging.getLogger(__name__)
import_order = [
LegislativeTerm,
Location,
Body,
File,
Person,
Organization,
Membership,
Meeting,
Paper,
Consultation,
AgendaItem,
] # type: List[Type[DefaultFields]]
def METHOD_NAME(url, params=None, retries: int = 3, **kwargs) -> requests.Response:
"""Makes a request with the custom user agent and retry on connection error"""
user_agent = "{} ({})".format(
slugify(settings.PRODUCT_NAME), settings.TEMPLATE_META["github"]
)
kwargs.setdefault("headers", {})
kwargs["headers"]["User-Agent"] = user_agent
# Hack to make Landshut work with the RIS' broken SSL setup
if settings.SSL_NO_VERIFY:
kwargs["verify"] = False
with warnings.catch_warnings() if settings.SSL_NO_VERIFY else nullcontext():
if settings.SSL_NO_VERIFY:
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
while True:
try:
response = requests.get(url, params, **kwargs)
response.raise_for_status()
return response
except requests.exceptions.ConnectionError as e:
retries -= 1
if retries == 0:
raise
logger.error(f"Error {e} in request for {url}, retrying")
def externalize(
libobject: JSON, key_callback: Optional[Set[str]] = None
) -> List[CachedObject]:
"""Converts an oparl object with embedded objects to multiple flat json objects"""
externalized = []
# sorted copies, thereby avoiding modification while iterating
for key in sorted(libobject.keys()):
# Skip the geojson object
if key == "geojson":
continue
entry = libobject[key]
if isinstance(entry, dict):
if "id" not in entry:
logger.warning(
f"Embedded object '{key}' in {libobject['id']} does not have an id,"
f" skipping: {entry}"
)
del libobject[key]
continue
if isinstance(key_callback, set):
key_callback.add(key)
entry["mst:backref"] = libobject["id"]
externalized += externalize(entry)
libobject[key] = entry["id"]
if isinstance(entry, list) and len(entry) > 0 and isinstance(entry[0], dict):
if isinstance(key_callback, set):
key_callback.add(key)
for pos, entry in enumerate(entry):
if "id" not in entry:
logger.warning(
f"Embedded object '{key}' in {libobject['id']} does not have an"
f" id, skipping: {entry}"
)
del libobject[key]
break
entry["mst:backref"] = libobject["id"]
entry["mst:backrefPosition"] = pos # We need this for agenda items
externalized += externalize(entry)
libobject[key][pos] = entry["id"]
externalized.append(
CachedObject(
url=libobject["id"],
data=libobject,
oparl_type=libobject["type"].split("/")[-1],
)
)
return externalized
def clear_import(prefix: str, include_cache: bool = True) -> None:
"""Clear all data from the oparl api identified by the prefix"""
for class_object in import_order:
name = class_object.__name__
stats = class_object.objects.filter(oparl_id__startswith=prefix).delete()
logger.info(f"{name}: {stats}")
if include_cache:
deleted = CachedObject.objects.filter(url__startswith=prefix).delete()
logger.info(f"{deleted} cached objects deleted")
deleted = ExternalList.objects.filter(url__startswith=prefix).delete()
logger.info(f"{deleted} external lists deleted")
def import_update(
body_id: Optional[str] = None,
ignore_modified: bool = False,
download_files: bool = True,
) -> None:
from importer.importer import Importer
from importer.loader import get_loader_from_body
if body_id:
bodies = Body.objects.filter(oparl_id=body_id).all()
else:
bodies = Body.objects.filter(oparl_id__isnull=False).all()
for body in bodies:
logger.info(f"Updating body {body}: {body.oparl_id}")
loader = get_loader_from_body(body.oparl_id)
importer = Importer(loader, body, ignore_modified=ignore_modified)
importer.update(body.oparl_id)
importer.force_singlethread = True
if download_files:
importer.load_files(
fallback_city=settings.GEOEXTRACT_SEARCH_CITY or body.short_name,
update=True,
)
def fix_sort_date(import_date: datetime.datetime):
"""
Tries to guess the correct sort date for all papers and files that were created no later
than import_date by looking at
a) the legal date,
b) the the date of the earliest consultation or
c) falling back to fallback_date
"""
logger.info("Fixing the sort date of the papers")
# Use the date of the earliest consultation
earliest_consultation = (
Consultation.objects.filter(paper=OuterRef("pk"), meeting__isnull=False)
.order_by("meeting__start")
.values("meeting__start")[:1]
)
papers_with_consultation = (
Paper.objects.filter(Q(sort_date=fallback_date) | ~Q(sort_date=F("legal_date")))
.annotate(earliest_consultation=Subquery(earliest_consultation))
.filter(earliest_consultation__isnull=False)
# We filter on these to only update those necessary in elasticsearch
.filter(
~Q(sort_date=F("earliest_consultation"))
& ~Q(display_date=F("earliest_consultation"))
)
)
num = papers_with_consultation.update(
sort_date=F("earliest_consultation"), display_date=F("earliest_consultation")
)
if settings.ELASTICSEARCH_ENABLED:
search_bulk_index(Paper, papers_with_consultation)
logger.info(f"{num} sort dates were fix by the earliest consultation")
logger.info("Fixing the sort date of the files")
num = File.objects.filter(
created__lte=import_date, legal_date__isnull=False
).update(sort_date=F("legal_date"), modified=F("legal_date"))
logger.info(f"{num} files were changed")
earliest_paper = (
Paper.objects.filter(files__pk=OuterRef("pk"))
.order_by("sort_date")
.values("sort_date")[:1]
)
file_with_paper = (
File.objects.filter(legal_date__isnull=True)
.annotate(earliest_paper=Subquery(earliest_paper))
.filter(earliest_paper__isnull=False)
# We filter on these to only update those necessary in elasticsearch
.filter(~Q(sort_date=F("earliest_paper")))
)
num = file_with_paper.update(sort_date=F("earliest_paper"))
if settings.ELASTICSEARCH_ENABLED:
search_bulk_index(Paper, file_with_paper)
logger.info(f"{num} files updated") |
299,370 | test double retrieve assembly | # Copyright 2016 by Jacek Smietanski. All rights reserved.
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Testing access to the PDB over the internet."""
import contextlib
import os
import shutil
import tempfile
import unittest
# We want to test this module:
from Bio.PDB.PDBList import PDBList
import requires_internet
requires_internet.check()
class TestPBDListGetList(unittest.TestCase):
"""Test methods responsible for getting lists of entries."""
def test_get_recent_changes(self):
"""Tests the Bio.PDB.PDBList.get_recent_changes method."""
# obsolete_pdb declared to prevent from creating the "obsolete" directory
pdblist = PDBList(obsolete_pdb="unimportant")
url = pdblist.pdb_server + "/pub/pdb/data/status/latest/added.pdb"
entries = pdblist.get_status_list(url)
self.assertIsNotNone(entries)
def test_get_all_entries(self):
"""Tests the Bio.PDB.PDBList.get_all_entries method."""
# obsolete_pdb declared to prevent from creating the "obsolete" directory
pdblist = PDBList(obsolete_pdb="unimportant")
entries = pdblist.get_all_entries()
# As number of entries constantly grow, test checks if a certain number was
# exceeded
self.assertGreater(len(entries), 100000)
def test_get_all_obsolete(self):
"""Tests the Bio.PDB.PDBList.get_all_obsolete method."""
# obsolete_pdb declared to prevent from creating the "obsolete" directory
pdblist = PDBList(obsolete_pdb="unimportant")
entries = pdblist.get_all_obsolete()
# As number of obsolete entries constantly grow, test checks if a certain number
# was exceeded
self.assertGreater(len(entries), 3000)
def test_get_all_assemblies(self):
"""Tests the Bio.PDB.PDBList.get_all_assemblies method."""
# obsolete_pdb declared to prevent from creating the "obsolete" directory
pdblist = PDBList(obsolete_pdb="unimportant")
entries = pdblist.get_all_assemblies()
# As number of obsolete entries constantly grow, test checks if a certain number
# was exceeded
self.assertGreater(len(entries), 100000)
class TestPDBListGetStructure(unittest.TestCase):
"""Test methods responsible for getting structures."""
@contextlib.contextmanager
def make_temp_directory(self, directory):
temp_dir = tempfile.mkdtemp(dir=directory)
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir)
def check(self, structure, filename, file_format, obsolete=False, pdir=None):
with self.make_temp_directory(os.getcwd()) as tmp:
pdblist = PDBList(pdb=tmp)
path = os.path.join(tmp, filename)
if pdir:
pdir = os.path.join(tmp, pdir)
pdblist.retrieve_pdb_file(
structure, obsolete=obsolete, pdir=pdir, file_format=file_format
)
self.assertTrue(os.path.isfile(path))
def test_retrieve_pdb_file_small_pdb(self):
"""Tests retrieving the small molecule in pdb format."""
structure = "127d"
self.check(
structure, os.path.join(structure[1:3], f"pdb{structure}.ent"), "pdb"
)
def test_retrieve_pdb_file_large_pdb(self):
"""Tests retrieving the bundle for large molecule in pdb-like format."""
structure = "3k1q"
self.check(
structure,
os.path.join(structure[1:3], f"{structure}-pdb-bundle.tar"),
"bundle",
)
def test_retrieve_pdb_file_obsolete_pdb(self):
"""Tests retrieving the obsolete molecule in pdb format."""
structure = "347d"
self.check(
structure,
os.path.join("obsolete", structure[1:3], f"pdb{structure}.ent"),
"pdb",
obsolete=True,
)
def test_retrieve_pdb_file_obsolete_mmcif(self):
"""Tests retrieving the obsolete molecule in mmcif format."""
structure = "347d"
self.check(
structure,
os.path.join("obsolete", structure[1:3], f"{structure}.cif"),
"mmCif",
obsolete=True,
)
def test_retrieve_pdb_file_mmcif(self):
"""Tests retrieving the (non-obsolete) molecule in mmcif format."""
structure = "127d"
self.check(structure, os.path.join(structure[1:3], f"{structure}.cif"), "mmCif")
def test_retrieve_pdb_file_obsolete_xml(self):
"""Tests retrieving the obsolete molecule in mmcif format."""
structure = "347d"
self.check(
structure,
os.path.join("obsolete", structure[1:3], f"{structure}.xml"),
"xml",
obsolete=True,
)
def test_retrieve_pdb_file_xml(self):
"""Tests retrieving the (non obsolete) molecule in xml format."""
structure = "127d"
self.check(structure, os.path.join(structure[1:3], f"{structure}.xml"), "xml")
def test_retrieve_pdb_file_mmtf(self):
"""Tests retrieving the molecule in mmtf format."""
structure = "127d"
self.check(structure, os.path.join(structure[1:3], f"{structure}.mmtf"), "mmtf")
def test_double_retrieve_structure(self):
"""Tests retrieving the same file to different directories."""
structure = "127d"
self.check(structure, os.path.join("a", f"{structure}.cif"), "mmCif", pdir="a")
self.check(structure, os.path.join("b", f"{structure}.cif"), "mmCif", pdir="b")
class TestPDBListGetAssembly(unittest.TestCase):
"""Test methods responsible for getting assemblies."""
@contextlib.contextmanager
def make_temp_directory(self, directory):
temp_dir = tempfile.mkdtemp(dir=directory)
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir)
def check(self, structure, assembly_num, filename, file_format, pdir=None):
with self.make_temp_directory(os.getcwd()) as tmp:
pdblist = PDBList(pdb=tmp)
path = os.path.join(tmp, filename)
if pdir:
pdir = os.path.join(tmp, pdir)
pdblist.retrieve_assembly_file(
structure, assembly_num, pdir=pdir, file_format=file_format
)
self.assertTrue(os.path.isfile(path))
def test_retrieve_assembly_file_mmcif(self):
"""Tests retrieving a small assembly in mmCif format."""
structure = "127d"
assembly_num = "1"
self.check(
structure,
assembly_num,
os.path.join(structure[1:3], f"{structure}-assembly{assembly_num}.cif"),
"mmCif",
)
def test_retrieve_assembly_file_pdb(self):
"""Tests retrieving a small assembly in pdb format."""
structure = "127d"
assembly_num = "1"
self.check(
structure,
assembly_num,
os.path.join(structure[1:3], f"{structure}.pdb{assembly_num}"),
"pdb",
)
def METHOD_NAME(self):
"""Tests retrieving the same file to different directories."""
structure = "127d"
assembly_num = "1"
self.check(
structure,
assembly_num,
os.path.join("a", f"{structure}-assembly{assembly_num}.cif"),
"mmCif",
pdir="a",
)
self.check(
structure,
assembly_num,
os.path.join("b", f"{structure}-assembly{assembly_num}.cif"),
"mmCif",
pdir="b",
)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner) |
299,371 | decimate |
import io
from scipy.spatial import Delaunay
import numpy as np
import functools
def _memo(fn):
"""Helper decorator memoizes the given zero-argument function.
Really helpful for memoizing properties so they don't have to be recomputed
dozens of times.
"""
@functools.wraps(fn)
def memofn(self, *args, **kwargs):
if id(fn) not in self._cache:
self._cache[id(fn)] = fn(self)
return self._cache[id(fn)]
return memofn
def tetra_vol(pts):
'''Volume of a tetrahedron'''
tetra = pts[1:] - pts[0]
return np.abs(np.dot(tetra[0], np.cross(tetra[1], tetra[2]))) / 6
def brick_vol(pts):
'''Volume of a triangular prism'''
return tetra_vol(pts[[0, 1, 2, 4]]) + tetra_vol(pts[[0, 2, 3, 4]]) + tetra_vol(pts[[2, 3, 4, 5]])
def sort_polys(polys):
amin = polys.argmin(1)
xind = np.arange(len(polys))
return np.array([polys[xind, amin], polys[xind, (amin+1)%3], polys[xind, (amin+2)%3]]).T
def face_area(pts):
'''Area of triangles
Parameters
----------
pts : array_like
n x 3 x 3 array with n triangles, 3 pts, and (x,y,z) coordinates
'''
return 0.5 * np.sqrt((np.cross(pts[:,1]-pts[:,0], pts[:,2]-pts[:,0])**2).sum(1))
def face_volume(pts1, pts2, polys):
'''Volume of each face in a polyhedron sheet'''
vols = np.zeros((len(polys),))
for i, face in enumerate(polys):
vols[i] = brick_vol(np.append(pts1[face], pts2[face], axis=0))
if i % 1000 == 0:
print(i)
return vols
def METHOD_NAME(pts, polys):
from tvtk.api import tvtk
pd = tvtk.PolyData(points=pts, polys=polys)
try:
dec = tvtk.DecimatePro()
dec.set_input_data(pd)
except Exception:
dec = tvtk.DecimatePro(input=pd) # VTK version < 6
dec.set(preserve_topology=True, splitting=False, boundary_vertex_deletion=False, target_reduction=1.0)
dec.update()
dpts = dec.output.points.to_array()
dpolys = dec.output.polys.to_array().reshape(-1, 4)[:,1:]
return dpts, dpolys
def inside_convex_poly(pts):
"""Returns a function that checks if inputs are inside the convex hull of polyhedron defined by pts
Alternative method to check is to get faces of the convex hull, then check if each normal is pointed away from each point.
As it turns out, this is vastly slower than using qhull's find_simplex, even though the simplex is not needed.
"""
tri = Delaunay(pts)
return lambda x: tri.find_simplex(x) != -1
def make_cube(center=(.5, .5, .5), size=1):
pts = np.array([(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0),
(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1)], dtype=float)
pts -= (.5, .5, .5)
polys = np.array([(0, 2, 3), (0, 3, 1), (0, 1, 4), (1, 5, 4),
(1, 3, 5), (3, 7, 5), (2, 7, 3), (2, 6, 7),
(0, 6, 2), (0, 4, 6), (4, 7, 6), (4, 5, 7)], dtype=np.uint32)
return pts * size + center, polys
def boundary_edges(polys):
'''Returns the edges that are on the boundary of a mesh, as defined by belonging to only 1 face'''
edges = dict()
for i, poly in enumerate(np.sort(polys)):
for a, b in [(0,1), (1,2), (0, 2)]:
key = poly[a], poly[b]
if key not in edges:
edges[key] = []
edges[key].append(i)
epts = []
for edge, faces in edges.items():
if len(faces) == 1:
epts.append(edge)
return np.array(epts)
def trace_poly(edges):
"""Returns the two largest connected components, out of a set of boundary
edges (as returned by `boundary_edges`)
"""
conn = dict((e, []) for e in np.unique(np.array(edges).ravel()))
for a, b in edges:
conn[a].append(b)
conn[b].append(a)
components = []
while len(conn) > 0:
vert, nverts = next(iter(conn.items()))
poly = [vert]
while (len(poly) == 1 or poly[0] != poly[-1]) and len(conn[poly[-1]]) > 0:
nvert = conn[poly[-1]][0]
conn[nvert].remove(poly[-1])
conn[poly[-1]].remove(nvert)
if len(conn[nvert]) == 0:
del conn[nvert]
if len(conn[poly[-1]]) == 0:
del conn[poly[-1]]
poly.append(nvert)
components.append(poly)
# If the flat surfaces have more than 2 components due to cut leftovers,
# we filter them by keeping only the two largest components.
# Note that they are not necessarily ordered as (left, right).
lengths = [len(comp) for comp in components]
order = np.argsort(lengths)
hemisphere_0, hemisphere_1 = components[order[-1]], components[order[-2]]
return hemisphere_0, hemisphere_1
def rasterize(poly, shape=(256, 256)):
#ImageDraw sucks at its job, so we'll use imagemagick to do rasterization
import subprocess as sp
import shlex
from PIL import Image
polygon = " ".join(["%0.3f,%0.3f"%tuple(p[::-1]) for p in np.array(poly)-(.5, .5)])
cmd = 'convert -size %dx%d xc:black -fill white -stroke none -draw "polygon %s" PNG32:-'%(shape[0], shape[1], polygon)
proc = sp.Popen(shlex.split(cmd), stdout=sp.PIPE)
png = io.BytesIO(proc.communicate()[0])
im = Image.open(png)
# For PNG8:
# mode, palette = im.palette.getdata()
# lut = np.fromstring(palette, dtype=np.uint8).reshape(-1, 3)
# if (lut == 255).any():
# white = np.nonzero((lut == 255).all(1))[0][0]
# return np.array(im) == white
# return np.zeros(shape, dtype=bool)
return (np.array(im)[:,:,0] > 128).T
def voxelize(pts, polys, shape=(256, 256, 256), center=(128, 128, 128), mp=True):
from tvtk.api import tvtk
pd = tvtk.PolyData(points=pts + center + (0, 0, 0), polys=polys)
plane = tvtk.Planes(normals=[(0,0,1)], points=[(0,0,0)])
clip = tvtk.ClipPolyData(clip_function=plane, input=pd)
feats = tvtk.FeatureEdges(
manifold_edges=False,
non_manifold_edges=False,
feature_edges=False,
boundary_edges=True,
input=clip.output)
def func(i):
plane.points = [(0,0,i)]
feats.update()
vox = np.zeros(shape[:2][::-1], np.uint8)
if feats.output.number_of_lines > 0:
epts = feats.output.points.to_array()
edges = feats.output.lines.to_array().reshape(-1, 3)[:,1:]
for poly in trace_poly(edges):
vox += rasterize(epts[poly][:,:2]+[.5, .5], shape=shape[:2][::-1])
return vox % 2
if mp:
from . import mp
layers = mp.map(func, range(shape[2]))
else:
#layers = map(func, range(shape[2]))
layers = [func(x) for x in range(shape[2])] # python3 compatible
return np.array(layers).T
def measure_volume(pts, polys):
from tvtk.api import tvtk
pd = tvtk.PolyData(points=pts, polys=polys)
mp = tvtk.MassProperties(input=pd)
return mp.volume
def marching_cubes(volume, smooth=True, METHOD_NAME=True, **kwargs):
from tvtk.api import tvtk
imgdata = tvtk.ImageData(dimensions=volume.shape)
imgdata.point_data.scalars = volume.flatten('F')
contours = tvtk.ContourFilter(input=imgdata, number_of_contours=1)
contours.set_value(0, 1)
if smooth:
smoothargs = dict(number_of_iterations=40, feature_angle = 90, pass_band=.05)
smoothargs.update(kwargs)
contours = tvtk.WindowedSincPolyDataFilter(input=contours.output, **smoothargs)
if METHOD_NAME:
contours = tvtk.QuadricDecimation(input=contours.output, target_reduction=.75)
contours.update()
pts = contours.output.points.to_array()
polys = contours.output.polys.to_array().reshape(-1, 4)[:,1:]
return pts, polys |
299,372 | check statistics periods | import glob
import logging
import os
import shutil
from pathlib import Path
import numpy as np
from pyaerocom import const
from pyaerocom.aeroval.modelentry import ModelEntry
from pyaerocom.aeroval.varinfo_web import VarinfoWeb
from pyaerocom.colocateddata import ColocatedData
from pyaerocom.colocation_auto import Colocator
from pyaerocom.exceptions import TemporalResolutionError
from pyaerocom.griddeddata import GriddedData
from pyaerocom.helpers import (
get_highest_resolution,
get_max_period_range,
make_dummy_cube,
start_stop_str,
)
from pyaerocom.io import ReadGridded
from pyaerocom.tstype import TsType
from pyaerocom.variable import Variable
logger = logging.getLogger(__name__)
def check_var_ranges_avail(model_data, var_name):
"""
Check if lower and upper variable ranges are available for input variable
Parameters
----------
model_data : GriddedData
modeldata containing variable data
var_name : str
variable name to be checked (must be the same as model data
AeroCom variable name).
Raises
------
ValueError
if ranges for input variable are not defined and if input model data
corresponds to a different variable than the input variable name.
Returns
-------
None
"""
try:
VarinfoWeb(var_name)
except AttributeError:
if model_data.var_name_aerocom == var_name:
model_data.register_var_glob(delete_existing=True)
else:
raise ValueError(
f"Mismatch between variable name of input model_data "
f"({model_data.var_name_aerocom}) and var_name {var_name}"
)
def METHOD_NAME(periods: list) -> list:
"""
Check input list of period strings is valid
Parameters
----------
periods : list
list containing period strings to be checked.
Raises
------
ValueError
if input is not a list or any of the provided periods in that list is
not a string or invalid.
Returns
-------
list
list of periods
"""
checked = []
if not isinstance(periods, list):
raise ValueError("statistics_periods needs to be a list")
for per in periods:
if not isinstance(per, str):
raise ValueError("All periods need to be strings")
spl = [x.strip() for x in per.split("-")]
if len(spl) > 2:
raise ValueError(
f"Invalid value for period ({per}), can be either single "
f"years or period of years (e.g. 2000-2010)."
)
_per = "-".join([str(int(val)) for val in spl])
checked.append(_per)
return checked
def _period_str_to_timeslice(period: str) -> slice:
"""
Convert input period to a time slice
Parameters
----------
period : str
period, e.g. "2000-2010"
Raises
------
ValueError
if input period is invalid
Returns
-------
slice
slice containing start and end strings.
"""
spl = period.split("-")
if len(spl) == 1:
return slice(spl[0], spl[0])
elif len(spl) == 2:
return slice(*spl)
raise ValueError(period)
def _get_min_max_year_periods(statistics_periods):
"""Get lowest and highest available year from all periods
Parameters
----------
statistics_periods : list
list of periods for experiment
Returns
-------
int
start year
int
stop year (may be the same as start year, e.g. if periods suggest
single year analysis).
"""
startyr, stopyr = 1e6, -1e6
for per in statistics_periods:
sl = _period_str_to_timeslice(per)
perstart, perstop = int(sl.start), int(sl.stop)
if perstart < startyr:
startyr = perstart
if perstop > stopyr:
stopyr = perstop
return startyr, stopyr
def make_dummy_model(obs_list: list, cfg) -> str:
# Sets up variable for the model register
tmpdir = const.LOCAL_TMP_DIR
const.add_data_search_dir(tmpdir)
model_id = "dummy_model"
outdir = os.path.join(tmpdir, f"{model_id}/renamed")
os.makedirs(outdir, exist_ok=True)
# Finds dates and freq to use, so that all observations are covered
(start, stop) = get_max_period_range(cfg.time_cfg.periods)
freq = get_highest_resolution(*cfg.time_cfg.freqs)
tmp_var_obj = Variable()
# Loops over variables in obs
for obs in obs_list:
for var in cfg.obs_cfg[obs]["obs_vars"]:
# Create dummy cube
dummy_cube = make_dummy_cube(var, start_yr=start, stop_yr=stop, freq=freq)
# Converts cube to GriddedData
dummy_grid = GriddedData(dummy_cube)
# Set the value to be the mean of acceptable values to prevent incorrect outlier removal
# This needs some care though because the defaults are (currently) -inf and inf, which leads to erroneous removal
if not (
dummy_grid.var_info.minimum == tmp_var_obj.VMIN_DEFAULT
or dummy_grid.var_info.maximum == tmp_var_obj.VMAX_DEFAULT
):
dummy_grid.data *= (dummy_grid.var_info.minimum + dummy_grid.var_info.maximum) / 2
# Loop over each year
yr_gen = dummy_grid.split_years()
for dummy_grid_yr in yr_gen:
# Add to netcdf
yr = dummy_grid_yr.years_avail()[0]
vert_code = cfg.obs_cfg[obs]["obs_vert_type"]
save_name = dummy_grid_yr.aerocom_savename(model_id, var, vert_code, yr, freq)
dummy_grid_yr.to_netcdf(outdir, savename=save_name)
# Add dummy model to cfg
cfg.model_cfg["dummy"] = ModelEntry(model_id="dummy_model")
return model_id
def delete_dummy_model(model_id: str) -> None:
tmpdir = const.LOCAL_TMP_DIR
const.add_data_search_dir(tmpdir)
renamed = Path(tmpdir) / f"{model_id}/renamed"
for path in renamed.glob("*.nc"):
print(f"Deleting dummy model {path}")
path.unlink() |
299,373 | set up class | import numpy as np
import torch
import unittest
import heat as ht
from heat import manipulations
from .test_suites.basic_test import TestCase
class TestSignal(TestCase):
@classmethod
def METHOD_NAME(cls):
super(TestSignal, cls).METHOD_NAME()
def test_convolve(self):
full_odd = ht.array(
[0, 1, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 29, 15]
).astype(ht.int)
full_even = ht.array(
[0, 1, 3, 6, 10, 14, 18, 22, 26, 30, 34, 38, 42, 46, 50, 54, 42, 29, 15]
).astype(ht.int)
dis_signal = ht.arange(0, 16, split=0).astype(ht.int)
signal = ht.arange(0, 16).astype(ht.int)
full_ones = ht.ones(7, split=0).astype(ht.int)
kernel_odd = ht.ones(3).astype(ht.int)
kernel_even = [1, 1, 1, 1]
dis_kernel_odd = ht.ones(3, split=0).astype(ht.int)
dis_kernel_even = ht.ones(4, split=0).astype(ht.int)
with self.assertRaises(TypeError):
signal_wrong_type = [0, 1, 2, "tre", 4, "five", 6, "ʻehiku", 8, 9, 10]
ht.convolve(signal_wrong_type, kernel_odd, mode="full")
with self.assertRaises(TypeError):
filter_wrong_type = [1, 1, "pizza", "pineapple"]
ht.convolve(dis_signal, filter_wrong_type, mode="full")
with self.assertRaises(ValueError):
ht.convolve(dis_signal, kernel_odd, mode="invalid")
with self.assertRaises(ValueError):
s = dis_signal.reshape((2, -1))
ht.convolve(s, kernel_odd)
with self.assertRaises(ValueError):
k = ht.eye(3)
ht.convolve(dis_signal, k)
with self.assertRaises(ValueError):
ht.convolve(dis_signal, kernel_even, mode="same")
if self.comm.size > 1:
with self.assertRaises(ValueError):
ht.convolve(full_ones, kernel_even, mode="valid")
with self.assertRaises(ValueError):
ht.convolve(kernel_even, full_ones, mode="valid")
if self.comm.size > 5:
with self.assertRaises(ValueError):
ht.convolve(dis_signal, kernel_even)
# test modes, avoid kernel larger than signal chunk
if self.comm.size <= 3:
modes = ["full", "same", "valid"]
for i, mode in enumerate(modes):
# odd kernel size
conv = ht.convolve(dis_signal, kernel_odd, mode=mode)
gathered = manipulations.resplit(conv, axis=None)
self.assertTrue(ht.equal(full_odd[i : len(full_odd) - i], gathered))
conv = ht.convolve(dis_signal, dis_kernel_odd, mode=mode)
gathered = manipulations.resplit(conv, axis=None)
self.assertTrue(ht.equal(full_odd[i : len(full_odd) - i], gathered))
conv = ht.convolve(signal, dis_kernel_odd, mode=mode)
gathered = manipulations.resplit(conv, axis=None)
self.assertTrue(ht.equal(full_odd[i : len(full_odd) - i], gathered))
# different data types
conv = ht.convolve(dis_signal.astype(ht.float), kernel_odd)
gathered = manipulations.resplit(conv, axis=None)
self.assertTrue(ht.equal(full_odd.astype(ht.float), gathered))
conv = ht.convolve(dis_signal.astype(ht.float), dis_kernel_odd)
gathered = manipulations.resplit(conv, axis=None)
self.assertTrue(ht.equal(full_odd.astype(ht.float), gathered))
conv = ht.convolve(signal.astype(ht.float), dis_kernel_odd)
gathered = manipulations.resplit(conv, axis=None)
self.assertTrue(ht.equal(full_odd.astype(ht.float), gathered))
# even kernel size
# skip mode 'same' for even kernels
if mode != "same":
conv = ht.convolve(dis_signal, kernel_even, mode=mode)
dis_conv = ht.convolve(dis_signal, dis_kernel_even, mode=mode)
gathered = manipulations.resplit(conv, axis=None)
dis_gathered = manipulations.resplit(dis_conv, axis=None)
if mode == "full":
self.assertTrue(ht.equal(full_even, gathered))
self.assertTrue(ht.equal(full_even, dis_gathered))
else:
self.assertTrue(ht.equal(full_even[3:-3], gathered))
self.assertTrue(ht.equal(full_even[3:-3], dis_gathered))
# distributed large signal and kernel
np.random.seed(12)
np_a = np.random.randint(1000, size=4418)
np_b = np.random.randint(1000, size=1543)
np_conv = np.convolve(np_a, np_b, mode=mode)
a = ht.array(np_a, split=0, dtype=ht.int32)
b = ht.array(np_b, split=0, dtype=ht.int32)
conv = ht.convolve(a, b, mode=mode)
self.assert_array_equal(conv, np_conv)
# test edge cases
# non-distributed signal, size-1 kernel
signal = ht.arange(0, 16).astype(ht.int)
alt_signal = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
kernel = ht.ones(1).astype(ht.int)
conv = ht.convolve(alt_signal, kernel)
self.assertTrue(ht.equal(signal, conv))
conv = ht.convolve(1, 5)
self.assertTrue(ht.equal(ht.array([5]), conv)) |
299,374 | test session auth relative timeout | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
import time
import pytest
from pretix.base.models import Organizer
@pytest.mark.django_db
def test_no_auth(client):
resp = client.get('/api/v1/organizers/')
assert resp.status_code == 401
@pytest.mark.django_db
def test_session_auth_no_teams(client, user):
client.login(email=user.email, password='dummy')
resp = client.get('/api/v1/organizers/')
assert resp.status_code == 200
assert len(resp.data['results']) == 0
@pytest.mark.django_db
def test_session_auth_with_teams(client, user, team):
team.members.add(user)
Organizer.objects.create(name='Other dummy', slug='dummy2')
client.login(email=user.email, password='dummy')
resp = client.get('/api/v1/organizers/')
assert resp.status_code == 200
assert len(resp.data['results']) == 1
@pytest.mark.django_db
def METHOD_NAME(client, user, team):
client.login(email=user.email, password='dummy')
session = client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 6
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 - 60
session.save()
resp = client.get('/api/v1/organizers/')
assert resp.status_code == 403
@pytest.mark.django_db
def test_token_invalid(client):
client.credentials(HTTP_AUTHORIZATION='Token ABCDE')
resp = client.get('/api/v1/organizers/')
assert resp.status_code == 401
@pytest.mark.django_db
def test_token_auth_valid(client, team):
Organizer.objects.create(name='Other dummy', slug='dummy2')
t = team.tokens.create(name='Foo')
client.credentials(HTTP_AUTHORIZATION='Token ' + t.token)
resp = client.get('/api/v1/organizers/')
assert resp.status_code == 200
assert len(resp.data['results']) == 1
@pytest.mark.django_db
def test_token_auth_inactive(client, team):
Organizer.objects.create(name='Other dummy', slug='dummy2')
t = team.tokens.create(name='Foo', active=False)
client.credentials(HTTP_AUTHORIZATION='Token ' + t.token)
resp = client.get('/api/v1/organizers/')
assert resp.status_code == 401
@pytest.mark.django_db
def test_device_invalid(client):
client.credentials(HTTP_AUTHORIZATION='Device ABCDE')
resp = client.get('/api/v1/organizers/')
assert resp.status_code == 401
@pytest.mark.django_db
def test_device_auth_valid(client, device):
client.credentials(HTTP_AUTHORIZATION='Device ' + device.api_token)
resp = client.get('/api/v1/organizers/')
assert resp.status_code == 200
assert len(resp.data['results']) == 1
@pytest.mark.django_db
def test_device_auth_revoked(client, device):
client.credentials(HTTP_AUTHORIZATION='Device ' + device.api_token)
device.revoked = True
device.save()
resp = client.get('/api/v1/organizers/')
assert resp.status_code == 401
assert str(resp.data['detail']) == "Device access has been revoked."
@pytest.mark.django_db
def test_device_auth_security_profile(client, device):
client.credentials(HTTP_AUTHORIZATION='Device ' + device.api_token)
device.security_profile = "pretixscan"
device.save()
resp = client.get('/api/v1/organizers/dummy/giftcards/')
assert resp.status_code == 403
device.security_profile = "pretixpos"
device.save()
resp = client.get('/api/v1/organizers/dummy/giftcards/')
assert resp.status_code == 200 |
299,375 | execution recursion decorator | """
Recursions are the recipe of |jedi| to conquer Python code. However, someone
must stop recursions going mad. Some settings are here to make |jedi| stop at
the right time. You can read more about them :ref:`here <settings-recursion>`.
Next to :mod:`jedi.evaluate.cache` this module also makes |jedi| not
thread-safe. Why? ``execution_recursion_decorator`` uses class variables to
count the function calls.
.. _settings-recursion:
Settings
~~~~~~~~~~
Recursion settings are important if you don't want extremly
recursive python code to go absolutely crazy.
The default values are based on experiments while completing the |jedi| library
itself (inception!). But I don't think there's any other Python library that
uses recursion in a similarly extreme way. Completion should also be fast and
therefore the quality might not always be maximal.
.. autodata:: recursion_limit
.. autodata:: total_function_execution_limit
.. autodata:: per_function_execution_limit
.. autodata:: per_function_recursion_limit
"""
from contextlib import contextmanager
from jedi import debug
from jedi.evaluate.base_context import NO_CONTEXTS
recursion_limit = 15
"""
Like ``sys.getrecursionlimit()``, just for |jedi|.
"""
total_function_execution_limit = 200
"""
This is a hard limit of how many non-builtin functions can be executed.
"""
per_function_execution_limit = 6
"""
The maximal amount of times a specific function may be executed.
"""
per_function_recursion_limit = 2
"""
A function may not be executed more than this number of times recursively.
"""
class RecursionDetector(object):
def __init__(self):
self.pushed_nodes = []
@contextmanager
def execution_allowed(evaluator, node):
"""
A decorator to detect recursions in statements. In a recursion a statement
at the same place, in the same module may not be executed two times.
"""
pushed_nodes = evaluator.recursion_detector.pushed_nodes
if node in pushed_nodes:
debug.warning('catched stmt recursion: %s @%s', node,
getattr(node, 'start_pos', None))
yield False
else:
try:
pushed_nodes.append(node)
yield True
finally:
pushed_nodes.pop()
def METHOD_NAME(default=NO_CONTEXTS):
def decorator(func):
def wrapper(self, **kwargs):
detector = self.evaluator.execution_recursion_detector
allowed = detector.push_execution(self)
try:
if allowed:
result = default
else:
result = func(self, **kwargs)
finally:
detector.pop_execution()
return result
return wrapper
return decorator
class ExecutionRecursionDetector(object):
"""
Catches recursions of executions.
"""
def __init__(self, evaluator):
self._evaluator = evaluator
self._recursion_level = 0
self._parent_execution_funcs = []
self._funcdef_execution_counts = {}
self._execution_count = 0
def pop_execution(self):
self._parent_execution_funcs.pop()
self._recursion_level -= 1
def push_execution(self, execution):
funcdef = execution.tree_node
# These two will be undone in pop_execution.
self._recursion_level += 1
self._parent_execution_funcs.append(funcdef)
module = execution.get_root_context()
if module == self._evaluator.builtins_module:
# We have control over builtins so we know they are not recursing
# like crazy. Therefore we just let them execute always, because
# they usually just help a lot with getting good results.
return False
if self._recursion_level > recursion_limit:
return True
if self._execution_count >= total_function_execution_limit:
return True
self._execution_count += 1
if self._funcdef_execution_counts.setdefault(funcdef, 0) >= per_function_execution_limit:
return True
self._funcdef_execution_counts[funcdef] += 1
if self._parent_execution_funcs.count(funcdef) > per_function_recursion_limit:
return True
return False |
299,376 | xyxy to cxcywh inplace | import warnings
from typing import Any, Tuple, Union
import numpy as np
import torch
from super_gradients.training.datasets.data_formats.bbox_formats.bbox_format import (
BoundingBoxFormat,
)
__all__ = ["xyxy_to_cxcywh", "xyxy_to_cxcywh_inplace", "cxcywh_to_xyxy_inplace", "cxcywh_to_xyxy", "CXCYWHCoordinateFormat"]
def xyxy_to_cxcywh(bboxes, image_shape: Tuple[int, int]):
"""
Transforms bboxes from xyxy format to CX-CY-W-H format
:param bboxes: BBoxes of shape (..., 4) in XYXY format
:return: BBoxes of shape (..., 4) in CX-CY-W-H format
"""
x1, y1, x2, y2 = bboxes[..., 0], bboxes[..., 1], bboxes[..., 2], bboxes[..., 3]
w = x2 - x1
h = y2 - y1
cx = x1 + 0.5 * w
cy = y1 + 0.5 * h
if torch.jit.is_scripting():
return torch.stack([cx, cy, w, h], dim=-1)
else:
if torch.is_tensor(bboxes):
return torch.stack([cx, cy, w, h], dim=-1)
elif isinstance(bboxes, np.ndarray):
return np.stack([cx, cy, w, h], axis=-1)
else:
raise RuntimeError(f"Only Torch tensor or Numpy array is supported. Received bboxes of type {str(type(bboxes))}")
def cxcywh_to_xyxy(bboxes, image_shape: Tuple[int, int]):
"""
Transforms bboxes from CX-CY-W-H format to XYXY format
:param bboxes: BBoxes of shape (..., 4) in CX-CY-W-H format
:return: BBoxes of shape (..., 4) in XYXY format
"""
cx, cy, w, h = bboxes[..., 0], bboxes[..., 1], bboxes[..., 2], bboxes[..., 3]
x1 = cx - 0.5 * w
y1 = cy - 0.5 * h
x2 = x1 + w
y2 = y1 + h
if torch.jit.is_scripting():
return torch.stack([x1, y1, x2, y2], dim=-1)
else:
if torch.is_tensor(bboxes):
return torch.stack([x1, y1, x2, y2], dim=-1)
if isinstance(bboxes, np.ndarray):
return np.stack([x1, y1, x2, y2], axis=-1)
else:
raise RuntimeError(f"Only Torch tensor or Numpy array is supported. Received bboxes of type {str(type(bboxes))}")
def is_floating_point_array(array: Union[np.ndarray, Any]) -> bool:
return isinstance(array, np.ndarray) and np.issubdtype(array.dtype, np.floating)
def cxcywh_to_xyxy_inplace(bboxes, image_shape: Tuple[int, int]):
"""
Not that bboxes dtype is preserved, and it may lead to unwanted rounding errors when computing a center of bbox.
:param bboxes: BBoxes of shape (..., 4) in CX-CY-W-H format
:return: BBoxes of shape (..., 4) in XYXY format
"""
if not torch.jit.is_scripting():
if torch.is_tensor(bboxes) and not torch.is_floating_point(bboxes):
warnings.warn(
f"Detected non floating-point ({bboxes.dtype}) input to cxcywh_to_xyxy_inplace function. "
f"This may cause rounding errors and lose of precision. You may want to convert your array to floating-point precision first."
)
if not is_floating_point_array(bboxes):
warnings.warn(
f"Detected non floating-point input ({bboxes.dtype}) to cxcywh_to_xyxy_inplace function. "
f"This may cause rounding errors and lose of precision. You may want to convert your array to floating-point precision first."
)
bboxes[..., 0:2] -= bboxes[..., 2:4] * 0.5 # cxcy -> x1y1
bboxes[..., 2:4] += bboxes[..., 0:2] # x1y1 + wh -> x2y2
return bboxes
def METHOD_NAME(bboxes, image_shape: Tuple[int, int]):
"""
Transforms bboxes from xyxy format to CX-CY-W-H format. This function operates in-place.
Not that bboxes dtype is preserved, and it may lead to unwanted rounding errors when computing a center of bbox.
:param bboxes: BBoxes of shape (..., 4) in XYXY format
:return: BBoxes of shape (..., 4) in CX-CY-W-H format
"""
if not torch.jit.is_scripting():
if torch.is_tensor(bboxes) and not torch.is_floating_point(bboxes):
warnings.warn(
f"Detected non floating-point ({bboxes.dtype}) input to xyxy_to_cxcywh_inplace function. This may cause rounding errors and lose of precision. "
"You may want to convert your array to floating-point precision first."
)
elif isinstance(bboxes, np.ndarray) and not is_floating_point_array(bboxes):
warnings.warn(
f"Detected non floating-point input ({bboxes.dtype}) to xyxy_to_cxcywh_inplace function. This may cause rounding errors and lose of precision. "
"You may want to convert your array to floating-point precision first."
)
bboxes[..., 2:4] -= bboxes[..., 0:2] # x2y2 - x1y2 -> wh
bboxes[..., 0:2] += bboxes[..., 2:4] * 0.5 # cxcywh
return bboxes
class CXCYWHCoordinateFormat(BoundingBoxFormat):
def __init__(self):
self.format = "cxcywh"
self.normalized = False
def get_to_xyxy(self, inplace: bool):
if inplace:
return cxcywh_to_xyxy_inplace
else:
return cxcywh_to_xyxy
def get_from_xyxy(self, inplace: bool):
if inplace:
return METHOD_NAME
else:
return xyxy_to_cxcywh |
299,377 | running with dtensor strategy | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras Utilities for DTensor related API."""
import inspect
import tensorflow.compat.v2 as tf
from keras.dtensor import dtensor_api as dtensor
# All the variable names in the default keras layers. We will use those to map
# against the args in the __init__ method to find corresponding layout args.
# See allow_layout() for more details.
KERAS_VARIABLE_NAMES = [
"alpha",
"beta",
"bias",
"depthwise",
"embeddings",
"gamma",
"kernel",
"moving_mean",
"moving_variance",
"pointwise",
"recurrent",
]
def allow_initializer_layout(init_method):
"""A decorator for injecting layout information to layer.__init__.
Layout will be a new param for any of the weights for all the keras layers.
Adding the param to all the __init__ method will be a big/duplicated work.
This decorator is design to reduce and code duplication and make it easy to
add/remove the dtensor feature if needed.
Sample usage:
```python
class Dense(tf.keras.layer.Layer):
@allow_initializer_layout
def __init__(self, units,
kernel_initializer='zeros',
bias_initializer='zeros',
**kwargs):
super().__init__(**kwargs)
d = Dense(units=8, kernel_layout=layout1, bias_layout=layout2)
d.kernel_layout == layout1
d.bias_layout == layout2
```
By adding this annotation, it will:
1. Filter out the kwargs based on some keywords, eg if the
'kernel_initialzer' appears in method signature, then it will try to pop
the 'kernel_layout' if it presents. Same for "bias" and
"recurrent_kernel", etc. This will make sure the layout related param is
not passed to `BaseLayer.__init__`, which will raise error about unexpect
keyword args.
2. Set the self.kernel/bias_layout attribute after the `__init__` method is
called. Keras framework will use those fields to create weights down the
stream.
Args:
init_method: the `__init__` method of the Keras layer to annotate.
Returns:
the annotated __init__ method.
"""
def _wrap_function(layer_instance, *args, **kwargs):
signature = inspect.signature(init_method)
layout_args = {}
# Check args like 'kernel_initializer' and pop the 'kernel_layout' if it
# presents.
for variable_name in KERAS_VARIABLE_NAMES:
if variable_name + "_initializer" in signature.parameters:
layout = kwargs.pop(variable_name + "_layout", None)
if layout:
layout_args[variable_name + "_layout"] = layout
init_method(layer_instance, *args, **kwargs)
# Inject the layout parameter after the invocation of __init__()
for layout_param_name, layout in layout_args.items():
setattr(layer_instance, layout_param_name, layout)
# return decorated
return tf.__internal__.decorator.make_decorator(
target=init_method, decorator_func=_wrap_function
)
def inject_mesh(init_method):
"""Inject DTensor mesh information to an object.
This is useful for keras object like `Metric` and `Optimizer` which need
DTensor mesh to create the weights, but doesn't want to change the current
public API interface.
This is for temporary usage and eventually the mesh/layout information will
be public arguments in the `__init__` method.
Sample usage:
```python
class Accuracy(tf.keras.metrics.Metric):
@inject_mesh
def __init__(self, name='accuracy', dtype=None):
super().__init__(**kwargs)
acc = Accuracy(mesh=mesh)
assert acc._mesh == mesh
```
Args:
init_method: the `__init__` method of the Keras class to annotate.
Returns:
the annotated __init__ method.
"""
def _wrap_function(instance, *args, **kwargs):
mesh = kwargs.pop("mesh", None)
# Note that the injection of _mesh need to happen before the invocation
# of __init__, since the class might need the mesh to create weights in
# the __init__.
if mesh is not None:
instance._mesh = mesh
init_method(instance, *args, **kwargs)
return tf.__internal__.decorator.make_decorator(
target=init_method, decorator_func=_wrap_function
)
def call_with_layout(fn, layout, *args, **kwargs):
"""Invoke the function with inputs and relayout the result.
Args:
fn: the function to invoke.
layout: if not None, the output of the fn will be relayout with this.
*args: positional arguments to be called with fn.
**kwargs: keyword arguments to be called with fn.
Returns:
The output of fn, with potential relayout with the layout specified.
"""
if layout:
with dtensor.default_mesh(layout.mesh):
result = fn(*args, **kwargs)
return dtensor.relayout(result, layout)
return fn(*args, **kwargs)
def METHOD_NAME():
"""Check whether running with a `Strategy` that is backed by DTensor.
In the DTensor based training, all the tensors are in global context, which
is different from the local context. Some keras components need to
behave differently, e.g. BatchNormalization and SyncBatchNormalization, as
well as optimizers.
This check will help those layer to branch the logic and keep the correct
behavior between different context.
"""
if not tf.distribute.has_strategy():
return False
strategy = tf.distribute.get_strategy()
# TODO(scottzhu): Finalize the strategy API to check if a strategy is backed
# by DTensor.
return getattr(strategy, "_mesh", None) is not None |
299,378 | accept | # Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division
import numpy as np
import png
from nnabla.logger import logger
from .common import upscale_pixel_intensity, check_type_and_cast_if_necessary, \
_imread_before, _imread_after, _imsave_before
from .image_utils_backend import ImageUtilsBackend
class PngBackend(ImageUtilsBackend):
def __init__(self):
ImageUtilsBackend.__init__(self)
@staticmethod
def rgb2gray(arr):
# return shape is 2d of (y, x)
return np.dot(arr[..., :3], [0.299, 0.587, 0.114])
@staticmethod
def convert_num_channles(img, num_channels):
src_channles = 0 if len(img.shape) == 2 else img.shape[-1]
if src_channles == num_channels or num_channels == -1:
return img
if src_channles == 0:
return np.broadcast_to(img[..., np.newaxis], img.shape + (num_channels,))
if num_channels == 3:
return img[..., :3]
if num_channels == 4:
fill = 65535 if img.dtype == np.uint16 else 255
alpha = np.ones(img.shape[:-1] + (1,)).astype(img.dtype) * fill
return np.concatenate((img, alpha), axis=-1)
raise ValueError("invalid number of channels")
@staticmethod
def read_result_to_ndarray(pixels, width, height, metadata, grayscale, as_uint16, num_channels):
if metadata["bitdepth"] == 16 and not as_uint16:
raise ValueError("cannot convert 16bit image to 8bit image."
" Original range of pixel values is unknown.")
output_type = np.uint16 if as_uint16 else np.uint8
img = np.asarray(list(pixels), output_type)
if metadata["bitdepth"] == 8 and as_uint16:
logger.warning("You want to read image as uint16, but the original bit-depth is 8 bit."
"All pixel values are simply increased by 256 times.")
img *= 256
# shape is (height, width * planes), planes = 1 (gray), 3 (rgb) or 4 (rgba)
if not metadata["greyscale"]:
# read image is rgb or rgba
img = img.reshape((height, width, -1))
if grayscale:
img = PngBackend.rgb2gray(img).astype(output_type)
img = PngBackend.convert_num_channles(img, num_channels)
return img
def METHOD_NAME(self, path, ext, operator):
if operator == "resize":
return 'NG'
elif operator == "save":
return 'OK'
else:
if ext == '.png':
f = path if hasattr(path, "read") else open(path, "rb")
r = png.Reader(file=f)
width, height, pixels, metadata = r.asDirect()
f.seek(0)
bit_depth = metadata.get("bitdepth")
if bit_depth not in [8, 16]:
return "NG"
else:
return "Recommended"
else:
return "NG"
def imread(self, path, grayscale=False, size=None, interpolate="bilinear",
channel_first=False, as_uint16=False, num_channels=-1):
"""
Read image by pypng module.
Args:
path (str or 'file object'): File path or object to read.
grayscale (bool):
size (tupple of int):
(width, height).
If None, output img shape depends on the files to read.
channel_first (bool):
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is (height, width, channel).
interpolate (str):
must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"].
as_uint16 (bool):
If True, this function reads image as uint16.
num_channels (int):
channel size of output array.
Default is -1 which preserves raw image shape.
Returns:
numpy.ndarray
"""
_imread_before(grayscale, num_channels)
f = path if hasattr(path, "read") else open(path, "rb")
r = png.Reader(file=f)
width, height, pixels, metadata = r.asDirect()
bit_depth = metadata.get("bitdepth")
if bit_depth not in [8, 16]:
logger.warning("The bit-depth of the image you want to read is unsupported ({}bit)."
"Currently, pypng backend`s imread supports only [8, 16] bit-depth."
"the path for this image is {}".format(bit_depth, path))
return self.next_available(path).imread(path, grayscale=grayscale, size=size, interpolate=interpolate,
channel_first=channel_first, as_uint16=as_uint16, num_channels=num_channels)
try:
img = self.read_result_to_ndarray(
pixels, width, height, metadata, grayscale, as_uint16, num_channels)
except:
return self.next_available(path).imread(path, grayscale=grayscale, size=size, interpolate=interpolate,
channel_first=channel_first, as_uint16=as_uint16, num_channels=num_channels)
return _imread_after(img, size, interpolate, channel_first, self.imresize)
def imsave(self, path, img, channel_first=False, as_uint16=False, auto_scale=True):
"""
Save image by pypng module.
Args:
path (str): output filename
img (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default.
channel_first:
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is (height, width, channel)
as_uint16 (bool):
If True, save image as uint16.
auto_scale (bool) :
Whether upscale pixel values or not.
If you want to save float image, this argument must be True.
In pypng backend, all below are supported.
- float ([0, 1]) to uint8 ([0, 255]) (if img.dtype==float and upscale==True and as_uint16==False)
- float to uint16 ([0, 65535]) (if img.dtype==float and upscale==True and as_uint16==True)
- uint8 to uint16 are supported (if img.dtype==np.uint8 and upscale==True and as_uint16==True)
"""
img = _imsave_before(img, channel_first, auto_scale)
if auto_scale:
img = upscale_pixel_intensity(img, as_uint16)
img = check_type_and_cast_if_necessary(img, as_uint16)
bitdepth = 8 if img.dtype == np.uint8 else 16
grayscale = True if len(img.shape) == 2 or (
len(img.shape) == 3 and img.shape[-1] == 1) else False
writer = png.Writer(img.shape[1], img.shape[0],
greyscale=grayscale, bitdepth=bitdepth)
writer.write(open(path, "wb"), img.reshape(img.shape[0], -1)) |
299,379 | starting calibration data | import pytest
import importlib
import opentrons
from typing import cast, Any, TYPE_CHECKING
from opentrons.calibration_storage import (
types as cs_types,
helpers,
)
if TYPE_CHECKING:
from opentrons_shared_data.labware.dev_types import LabwareDefinition
from opentrons_shared_data.pipette.dev_types import LabwareUri
from opentrons_shared_data.deck.dev_types import RobotModel
@pytest.fixture(autouse=True)
def reload_module(robot_model: "RobotModel") -> None:
importlib.reload(opentrons.calibration_storage)
@pytest.fixture
def METHOD_NAME(
ot_config_tempdir: Any,
minimal_labware_def: "LabwareDefinition",
minimal_labware_def2: "LabwareDefinition",
) -> None:
"""
Starting calibration data fixture.
Adds dummy data to a temporary directory to test delete commands against.
"""
from opentrons.calibration_storage import (
create_tip_length_data,
save_tip_length_calibration,
)
tip_length1 = create_tip_length_data(minimal_labware_def, 22.0)
tip_length2 = create_tip_length_data(minimal_labware_def, 31.0)
tip_length3 = create_tip_length_data(minimal_labware_def2, 31.0)
save_tip_length_calibration("pip1", tip_length1)
save_tip_length_calibration("pip2", tip_length2)
save_tip_length_calibration("pip1", tip_length3)
def test_save_tip_length_calibration(
ot_config_tempdir: Any, minimal_labware_def: "LabwareDefinition"
) -> None:
"""
Test saving tip length calibrations.
"""
from opentrons.calibration_storage import (
tip_lengths_for_pipette,
create_tip_length_data,
save_tip_length_calibration,
)
assert tip_lengths_for_pipette("pip1") == {}
assert tip_lengths_for_pipette("pip2") == {}
tip_rack_hash = helpers.hash_labware_def(minimal_labware_def)
tip_length1 = create_tip_length_data(minimal_labware_def, 22.0)
tip_length2 = create_tip_length_data(minimal_labware_def, 31.0)
save_tip_length_calibration("pip1", tip_length1)
save_tip_length_calibration("pip2", tip_length2)
assert tip_lengths_for_pipette("pip1")[tip_rack_hash].tipLength == 22.0
assert tip_lengths_for_pipette("pip2")[tip_rack_hash].tipLength == 31.0
def test_get_tip_length_calibration(
METHOD_NAME: Any, minimal_labware_def: "LabwareDefinition"
) -> None:
"""
Test ability to get a tip length calibration model.
"""
from opentrons.calibration_storage import load_tip_length_calibration, models
tip_length_data = load_tip_length_calibration("pip1", minimal_labware_def)
assert tip_length_data == models.v1.TipLengthModel(
tipLength=22.0,
source=cs_types.SourceType.user,
lastModified=tip_length_data.lastModified,
uri=cast("LabwareUri", "opentronstest/minimal_labware_def/1"),
)
with pytest.raises(cs_types.TipLengthCalNotFound):
load_tip_length_calibration("nopipette", minimal_labware_def)
def test_delete_specific_tip_calibration(
METHOD_NAME: Any, minimal_labware_def: "LabwareDefinition"
) -> None:
"""
Test delete a specific tip length calibration.
"""
from opentrons.calibration_storage import (
tip_lengths_for_pipette,
delete_tip_length_calibration,
)
assert len(tip_lengths_for_pipette("pip1").keys()) == 2
assert tip_lengths_for_pipette("pip2") != {}
tip_rack_hash = helpers.hash_labware_def(minimal_labware_def)
delete_tip_length_calibration(tip_rack_hash, "pip1")
assert len(tip_lengths_for_pipette("pip1").keys()) == 1
assert tip_lengths_for_pipette("pip2") != {}
def test_delete_all_tip_calibration(METHOD_NAME: Any) -> None:
"""
Test delete all tip length calibration.
"""
from opentrons.calibration_storage import (
tip_lengths_for_pipette,
clear_tip_length_calibration,
)
assert tip_lengths_for_pipette("pip1") != {}
assert tip_lengths_for_pipette("pip2") != {}
clear_tip_length_calibration()
assert tip_lengths_for_pipette("pip1") == {}
assert tip_lengths_for_pipette("pip2") == {} |
299,380 | get slit gaps | # -*- coding: utf-8 -*-
"""
[Name] BeamInfo
[Description]
BeamInfo hardware object informs mxCuBE (HutchMenuBrick) about the beam position
and size.
This is the Soleil PX1 version
[Emited signals]
beamInfoChanged
beamPosChanged
[Included Hardware Objects]
[Example XML file]
<device class = "BeaminfoPX2">
<username>Beamstop</username>
<channel type="tango" tangoname="i11-ma-cx1/ex/md2" polling="1000" name="beamsizex">BeamSizeHorizontal</channel>
<channel type="tango" tangoname="i11-ma-cx1/ex/md2" polling="1000" name="beamsizey">BeamSizeVertical</channel>
<channel type="tango" tangoname="i11-ma-cx1/ex/md2" polling="1000" name="positionx">BeamPositionHorizontal</channel>
<channel type="tango" tangoname="i11-ma-cx1/ex/md2" polling="1000" name="positiony">BeamPositionVertical</channel>
<object role="zoom" hwrid="/zoom"></object>
</device>
"""
import logging
from mxcubecore.BaseHardwareObjects import Equipment
class PX2BeamInfo(Equipment):
def __init__(self, *args):
Equipment.__init__(self, *args)
self.beam_position = [328, 220] # [None, None]
self.beam_size = [0.010, 0.005] # [None, None]
self.shape = "rectangular"
self.beam_info_dict = {"size_x": None, "size_y": None, "shape": self.shape}
self.beam_info_dict["size_x"] = 0.010
self.beam_info_dict["size_y"] = 0.005
self.beam_info_dict["shape"] = "ellipse"
# Channels
self.chanBeamSizeX = None
self.chanBeamSizeY = None
self.chanBeamPosX = None
self.chanBeamPosY = None
# Zoom motor
self.zoomMotor = None
# self.minidiff = None
self.positionTable = {}
def init(self):
try:
self.chanBeamSizeX = self.get_channel_object("beamsizex")
self.chanBeamSizeX.connect_signal("update", self.beamSizeXChanged)
except KeyError:
logging.getLogger().warning(
"%s: cannot connect to beamsize x channel ", self.name()
)
try:
self.chanBeamSizeY = self.get_channel_object("beamsizey")
self.chanBeamSizeY.connect_signal("update", self.beamSizeYChanged)
except KeyError:
logging.getLogger().warning(
"%s: cannot connect to beamsize y channel ", self.name()
)
try:
self.chanBeamPosX = self.get_channel_object("positionx")
self.chanBeamPosX.connect_signal("update", self.beamPosXChanged)
except KeyError:
logging.getLogger().warning(
"%s: cannot connect to beamposition x channel ", self.name()
)
try:
self.chanBeamPosY = self.get_channel_object("positiony")
self.chanBeamPosY.connect_signal("update", self.beamPosYChanged)
except KeyError:
logging.getLogger().warning(
"%s: cannot connect to beamposition z channel ", self.name()
)
self.zoomMotor = self.get_deviceby_role("zoom")
self.beam_position[0], self.beam_position[1] = (
self.chanBeamPosX.value,
self.chanBeamPosY.value,
)
if self.zoomMotor is not None:
self.connect(
self.zoomMotor, "predefinedPositionChanged", self.zoomPositionChanged
)
else:
logging.getLogger().info("Zoom - motor is not good ")
def beamSizeXChanged(self, value):
logging.getLogger().info("beamSizeX changed. It is %s " % value)
self.beam_size[0] = value
self.sizeUpdated()
def beamSizeYChanged(self, value):
logging.getLogger().info("beamSizeY changed. It is %s " % value)
self.beam_size[1] = value
self.sizeUpdated()
def beamPosXChanged(self, value):
logging.getLogger().info("beamPosX changed. It is %s " % value)
self.beam_position[0] = value
self.positionUpdated()
def beamPosYChanged(self, value):
logging.getLogger().info("beamPosY changed. It is %s " % value)
self.beam_position[1] = value
self.positionUpdated()
def zoomPositionChanged(self, name, offset):
logging.getLogger().info(
"zoom position changed. It is %s / offset=%s " % (name, offset)
)
self.beam_position[0], self.beam_position[1] = (
self.chanBeamPosX.value,
self.chanBeamPosY.value,
)
def sizeUpdated(self):
# TODO check values give by md2 it appears that beamSizeXChanged beamSize
self.beam_info_dict["size_x"] = 0.010 # in micro channel in MD2 doesn't work
self.beam_info_dict["size_y"] = 0.005 #
self.emit("beamInfoChanged", (self.beam_info_dict,))
def sizeUpdated2(self):
# not used
if None in self.beam_size:
return
self.beam_info_dict["size_x"] = self.beam_size[0]
self.beam_info_dict["size_y"] = self.beam_size[1]
self.emit("beamInfoChanged", (self.beam_info_dict,))
def positionUpdated(self):
self.emit("beamPosChanged", (self.beam_position,))
self.sizeUpdated()
def get_beam_info(self):
# logging.getLogger().warning('returning beam info It is %s ' % str(self.beam_info_dict))
return self.beam_info_dict
def get_beam_position(self):
# logging.getLogger().warning('returning beam positions. It is %s ' % str(self.beam_position))
return self.beam_position
def get_beam_size(self):
"""
Descript. : returns beam size in millimeters
Return : list with two integers
"""
# self.evaluate_beam_info()
return self.beam_info_dict["size_x"], self.beam_info_dict["size_y"]
def get_beam_shape(self):
"""
Descript. :
Arguments :
Return :
"""
# self.evaluate_beam_info()
return self.shape
def METHOD_NAME(self):
return None, None
def get_beam_divergence_hor(self):
return self.get_property("beam_divergence_hor")
def get_beam_divergence_ver(self):
return self.get_property("beam_divergence_vert") |
299,381 | test install no pep660 setup cfg fallback | import os
from pathlib import Path
from typing import Any, Dict
import tomli_w
from tests.lib import PipTestEnvironment
SETUP_PY = """
from setuptools import setup
setup()
"""
SETUP_CFG = """
[metadata]
name = project
version = 1.0.0
"""
BACKEND_WITHOUT_PEP660 = """
from setuptools.build_meta import (
build_wheel as _build_wheel,
prepare_metadata_for_build_wheel as _prepare_metadata_for_build_wheel,
get_requires_for_build_wheel as _get_requires_for_build_wheel,
)
def get_requires_for_build_wheel(config_settings=None):
with open("log.txt", "a") as f:
print(":get_requires_for_build_wheel called", file=f)
return _get_requires_for_build_wheel(config_settings)
def prepare_metadata_for_build_wheel(metadata_directory, config_settings=None):
with open("log.txt", "a") as f:
print(":prepare_metadata_for_build_wheel called", file=f)
return _prepare_metadata_for_build_wheel(metadata_directory, config_settings)
def build_wheel(wheel_directory, config_settings=None, metadata_directory=None):
with open("log.txt", "a") as f:
print(":build_wheel called", file=f)
return _build_wheel(wheel_directory, config_settings, metadata_directory)
"""
# fmt: off
BACKEND_WITH_PEP660 = BACKEND_WITHOUT_PEP660 + """
def get_requires_for_build_editable(config_settings=None):
with open("log.txt", "a") as f:
print(":get_requires_for_build_editable called", file=f)
return _get_requires_for_build_wheel(config_settings)
def prepare_metadata_for_build_editable(metadata_directory, config_settings=None):
with open("log.txt", "a") as f:
print(":prepare_metadata_for_build_editable called", file=f)
return _prepare_metadata_for_build_wheel(metadata_directory, config_settings)
def build_editable(wheel_directory, config_settings=None, metadata_directory=None):
with open("log.txt", "a") as f:
print(":build_editable called", file=f)
return _build_wheel(wheel_directory, config_settings, metadata_directory)
"""
# fmt: on
def _make_project(
tmpdir: Path, backend_code: str, with_setup_py: bool, with_pyproject: bool = True
) -> Path:
project_dir = tmpdir / "project"
project_dir.mkdir()
project_dir.joinpath("setup.cfg").write_text(SETUP_CFG)
if with_setup_py:
project_dir.joinpath("setup.py").write_text(SETUP_PY)
if backend_code:
assert with_pyproject
buildsys: Dict[str, Any] = {"requires": ["setuptools", "wheel"]}
buildsys["build-backend"] = "test_backend"
buildsys["backend-path"] = ["."]
data = tomli_w.dumps({"build-system": buildsys})
project_dir.joinpath("pyproject.toml").write_text(data)
project_dir.joinpath("test_backend.py").write_text(backend_code)
elif with_pyproject:
project_dir.joinpath("pyproject.toml").touch()
project_dir.joinpath("log.txt").touch()
return project_dir
def _assert_hook_called(project_dir: Path, hook: str) -> None:
log = project_dir.joinpath("log.txt").read_text()
assert f":{hook} called" in log, f"{hook} has not been called"
def _assert_hook_not_called(project_dir: Path, hook: str) -> None:
log = project_dir.joinpath("log.txt").read_text()
assert f":{hook} called" not in log, f"{hook} should not have been called"
def test_install_pep517_basic(tmpdir: Path, script: PipTestEnvironment) -> None:
"""
Check that the test harness we have in this file is sane.
"""
project_dir = _make_project(tmpdir, BACKEND_WITHOUT_PEP660, with_setup_py=False)
script.pip(
"install",
"--no-index",
"--no-build-isolation",
project_dir,
)
_assert_hook_called(project_dir, "prepare_metadata_for_build_wheel")
_assert_hook_called(project_dir, "build_wheel")
def test_install_pep660_basic(tmpdir: Path, script: PipTestEnvironment) -> None:
"""
Test with backend that supports build_editable.
"""
project_dir = _make_project(tmpdir, BACKEND_WITH_PEP660, with_setup_py=False)
result = script.pip(
"install",
"--no-index",
"--no-build-isolation",
"--editable",
project_dir,
)
_assert_hook_called(project_dir, "prepare_metadata_for_build_editable")
_assert_hook_called(project_dir, "build_editable")
assert (
result.test_env.site_packages.joinpath("project.egg-link")
not in result.files_created
), "a .egg-link file should not have been created"
def test_install_no_pep660_setup_py_fallback(
tmpdir: Path, script: PipTestEnvironment
) -> None:
"""
Test that we fall back to setuptools develop when using a backend that
does not support build_editable. Since there is a pyproject.toml,
the prepare_metadata_for_build_wheel hook is called.
"""
project_dir = _make_project(tmpdir, BACKEND_WITHOUT_PEP660, with_setup_py=True)
result = script.pip(
"install",
"--no-index",
"--no-build-isolation",
"--editable",
project_dir,
allow_stderr_warning=False,
)
_assert_hook_called(project_dir, "prepare_metadata_for_build_wheel")
assert (
result.test_env.site_packages.joinpath("project.egg-link")
in result.files_created
), "a .egg-link file should have been created"
def METHOD_NAME(
tmpdir: Path, script: PipTestEnvironment
) -> None:
"""
Test that we fall back to setuptools develop when using a backend that
does not support build_editable. Since there is a pyproject.toml,
the prepare_metadata_for_build_wheel hook is called.
"""
project_dir = _make_project(tmpdir, BACKEND_WITHOUT_PEP660, with_setup_py=False)
result = script.pip(
"install",
"--no-index",
"--no-build-isolation",
"--editable",
project_dir,
allow_stderr_warning=False,
)
print(result.stdout, result.stderr)
_assert_hook_called(project_dir, "prepare_metadata_for_build_wheel")
assert (
result.test_env.site_packages.joinpath("project.egg-link")
in result.files_created
), ".egg-link file should have been created"
def test_wheel_editable_pep660_basic(tmpdir: Path, script: PipTestEnvironment) -> None:
"""
Test 'pip wheel' of an editable pep 660 project.
It must *not* call prepare_metadata_for_build_editable.
"""
project_dir = _make_project(tmpdir, BACKEND_WITH_PEP660, with_setup_py=False)
wheel_dir = tmpdir / "dist"
script.pip(
"wheel",
"--no-index",
"--no-build-isolation",
"--editable",
project_dir,
"-w",
wheel_dir,
)
_assert_hook_not_called(project_dir, "prepare_metadata_for_build_editable")
_assert_hook_not_called(project_dir, "build_editable")
_assert_hook_called(project_dir, "prepare_metadata_for_build_wheel")
_assert_hook_called(project_dir, "build_wheel")
assert len(os.listdir(str(wheel_dir))) == 1, "a wheel should have been created"
def test_download_editable_pep660_basic(
tmpdir: Path, script: PipTestEnvironment
) -> None:
"""
Test 'pip download' of an editable pep 660 project.
It must *not* call prepare_metadata_for_build_editable.
"""
project_dir = _make_project(tmpdir, BACKEND_WITH_PEP660, with_setup_py=False)
reqs_file = tmpdir / "requirements.txt"
reqs_file.write_text(f"-e {project_dir.as_uri()}\n")
download_dir = tmpdir / "download"
script.pip(
"download",
"--no-index",
"--no-build-isolation",
"-r",
reqs_file,
"-d",
download_dir,
)
_assert_hook_not_called(project_dir, "prepare_metadata_for_build_editable")
_assert_hook_called(project_dir, "prepare_metadata_for_build_wheel")
assert len(os.listdir(str(download_dir))) == 1, "a zip should have been created" |
299,382 | register vps | from time import time
from enigma import getBestPlayableServiceReference, eServiceReference
from Components.config import config, ConfigSelection, ConfigDateTime, ConfigClock, getConfigListEntry
from Screens.InfoBarGenerics import InfoBarInstantRecord
from Screens.Timers import onRecordTimerCreate, onRecordTimerSetup, onRecordTimerSave, onRecordTimerChannelChange
from .Vps_setup import VPS_Screen_Info
from .Vps_check import Check_PDC, VPS_check_PDC_Screen, VPS_check_on_instanttimer
from .Vps import vps_timers
timerentry_vpsplugin_enabled_index = 0
def timerCreateHook(self):
try:
self.timerentry_vpsplugin_dontcheck_pdc = not config.plugins.vps.do_PDC_check.getValue()
default_value = "no"
if self.timer.vpsplugin_enabled is not None:
self.timerentry_vpsplugin_dontcheck_pdc = self.timer.vpsplugin_enabled
if self.timer.vpsplugin_enabled:
default_value = {False: "yes_safe", True: "yes"}[self.timer.vpsplugin_overwrite]
elif config.plugins.vps.vps_default.value != "no" and self.timer.eit is not None and self.timer.name != "" and self.timer not in self.session.nav.RecordTimer.timer_list and self.timer not in self.session.nav.RecordTimer.processed_timers:
service = self.timerServiceReference.ref
if service and service.flags & eServiceReference.isGroup:
service = getBestPlayableServiceReference(service, eServiceReference())
has_pdc, last_check, default_vps = Check_PDC.check_service(service)
if has_pdc == 1 or default_vps == 1:
self.timerentry_vpsplugin_dontcheck_pdc = True
default_value = config.plugins.vps.vps_default.value
self.timerentry_vpsplugin_enabled = ConfigSelection(choices=[("no", _("No")), ("yes_safe", _("Yes (safe mode)")), ("yes", _("Yes"))], default=default_value)
self.timerentry_vpsplugin_enabled.addNotifier(timerVps_enabled_Entry_Changed, initial_call=False, extra_args=self)
if self.timer.vpsplugin_time is not None:
self.timerentry_vpsplugin_time_date = ConfigDateTime(default=self.timer.vpsplugin_time, formatstring=_("%d.%B %Y"), increment=86400)
self.timerentry_vpsplugin_time_clock = ConfigClock(default=self.timer.vpsplugin_time)
else:
self.timerentry_vpsplugin_time_date = ConfigDateTime(default=self.timer.begin, formatstring=_("%d.%B %Y"), increment=86400)
self.timerentry_vpsplugin_time_clock = ConfigClock(default=self.timer.begin)
except Exception as exc:
print("[VPS] timerCreateHook : %s" % exc)
pass
def timerSetupHook(self):
global timerentry_vpsplugin_enabled_index
currentIndex = self["config"].getCurrentIndex()
if currentIndex == 0 and timerentry_vpsplugin_enabled_index > 0:
currentIndex = timerentry_vpsplugin_enabled_index
timerentry_vpsplugin_enabled_index = 0
self.timerVps_enabled_Entry = None
try:
if self.timerType.value != "zap" and self.timerRepeat.value == "once" and config.plugins.vps.enabled.value is True:
self.timerVps_enabled_Entry = getConfigListEntry(_("Enable VPS"), self.timerentry_vpsplugin_enabled)
self.list.append(self.timerVps_enabled_Entry)
if self.timerentry_vpsplugin_enabled.value != "no":
service = self.timerServiceReference.ref
if service and service.flags & eServiceReference.isGroup:
service = getBestPlayableServiceReference(service, eServiceReference())
if self.timer.eit is None or self.timer.name == "":
if not self.timerentry_vpsplugin_dontcheck_pdc:
self.timerentry_vpsplugin_dontcheck_pdc = True
has_pdc, last_check, default_vps = Check_PDC.check_service(service)
if has_pdc != 1 or Check_PDC.recheck(has_pdc, last_check):
self.session.open(VPS_check_PDC_Screen, service, self)
self.list.append(getConfigListEntry(_("VPS-Time (date)"), self.timerentry_vpsplugin_time_date))
self.list.append(getConfigListEntry(_("VPS-Time (time)"), self.timerentry_vpsplugin_time_clock))
elif not self.timerentry_vpsplugin_dontcheck_pdc:
self.timerentry_vpsplugin_dontcheck_pdc = True
has_pdc, last_check, default_vps = Check_PDC.check_service(service)
if default_vps != 1 and (has_pdc != 1 or Check_PDC.recheck(has_pdc, last_check)):
self.session.open(VPS_check_PDC_Screen, service, self, False)
# Hilfetext
if config.plugins.vps.infotext.value != 2:
config.plugins.vps.infotext.value = 2
config.plugins.vps.infotext.save()
self.session.open(VPS_Screen_Info)
except Exception as exc:
print("[VPS] timerSetupHook : %s" % exc)
pass
self["config"].list = self.list
self["config"].setCurrentIndex(currentIndex)
def timerVps_enabled_Entry_Changed(configElement, self):
global timerentry_vpsplugin_enabled_index
timerentry_vpsplugin_enabled_index = self["config"].getCurrentIndex()
if configElement.value == "no":
self.timerentry_vpsplugin_dontcheck_pdc = False
def timerSaveHook(self):
try:
self.timer.vpsplugin_enabled = self.timerentry_vpsplugin_enabled.value != "no"
self.timer.vpsplugin_overwrite = self.timerentry_vpsplugin_enabled.value == "yes"
if self.timer.vpsplugin_enabled:
vps_timers.checksoon()
if self.timer.name == "" or self.timer.eit is None:
self.timer.vpsplugin_time = self.getTimestamp(self.timerentry_vpsplugin_time_date.value, self.timerentry_vpsplugin_time_clock.value)
if self.timer.vpsplugin_overwrite:
timerbegin, timerend = self.getBeginEnd()
if (timerbegin - 60) < time() and (self.timer.vpsplugin_time - time()) > 1800:
self.timerentry_date.value = self.timerentry_vpsplugin_time_date.value
self.timerentry_starttime.value = self.timerentry_vpsplugin_time_clock.value
except Exception as exc:
print("[VPS] timerSaveHook : %s" % exc)
pass
def timerChannelChangeHook(self):
try:
if self.timerentry_vpsplugin_enabled.value != "no":
self.timerentry_vpsplugin_dontcheck_pdc = False
self.createSetup()
except Exception as exc:
print("[VPS] timerChannelChangeHook : %s" % exc)
pass
def new_InfoBarInstantRecord_recordQuestionCallback(self, answer, *args, **kwargs):
self._recordQuestionCallback_old_rn_vps(answer, *args, **kwargs)
try:
entry = len(self.recording) - 1
if answer is not None and answer[1] == "event" and config.plugins.vps.instanttimer.value != "no" and entry is not None and entry >= 0:
# If we aren't checking PDC, just put the values in directly
#
if not config.plugins.vps.do_PDC_check.getValue():
if config.plugins.vps.instanttimer.value == "yes":
self.recording[entry].vpsplugin_enabled = True
self.recording[entry].vpsplugin_overwrite = True
vps_timers.checksoon()
elif config.plugins.vps.instanttimer.value == "yes_safe":
self.recording[entry].vpsplugin_enabled = True
self.recording[entry].vpsplugin_overwrite = False
vps_timers.checksoon()
else:
rec_ref = self.recording[entry].service_ref.ref
if rec_ref and rec_ref.flags & eServiceReference.isGroup:
rec_ref = getBestPlayableServiceReference(rec_ref, eServiceReference())
self.session.open(VPS_check_on_instanttimer, rec_ref, self.recording[entry])
except Exception as exc:
print("[VPS] new_InfoBarInstantRecord_recordQuestionCallback : %s" % exc)
pass
def METHOD_NAME():
onRecordTimerCreate.append(timerCreateHook)
onRecordTimerSetup.append(timerSetupHook)
onRecordTimerSave.append(timerSaveHook)
onRecordTimerChannelChange.append(timerChannelChangeHook)
InfoBarInstantRecord._recordQuestionCallback_old_rn_vps = InfoBarInstantRecord.recordQuestionCallback
InfoBarInstantRecord.recordQuestionCallback = new_InfoBarInstantRecord_recordQuestionCallback |
299,383 | check equal | #!/usr/bin/env python3
import time
import numpy as np
import pandas as pd
import pygama
def main():
raw_file = "/Users/wisecg/dev/mj60/data/2018-10-9-P3LTP_Run42343"
t1_file = "/Users/wisecg/dev/mj60/data/t1_run42343.h5"
# daq_to_raw(raw_file)
# raw_to_dsp(t1_file)
raw_to_dsp_quick(t1_file)
# raw_to_dsp_mp(t1_file)
# check_equal(t1_file)
# optimize_raw_to_dsp_mp(t1_file)
def daq_to_raw(raw_file, n_evt=None):
if n_evt is None:
n_evt = 10000 # 487500 or np.inf
# n_evt = 5000
from pygama.processing.daq_to_raw import ProcessRaw
ProcessRaw(raw_file,
verbose=True,
output_dir="/Users/wisecg/dev/mj60/data",
n_max=n_evt,
chan_list=None)
def raw_to_dsp(t1_file):
digitizer = pygama.decoders.digitizers.Gretina4M(
correct_presum = False,
split_waveform = False,
)
event_df = pd.read_hdf(t1_file, key = digitizer.decoder_name)
pyg = pygama.VectorProcess()
# pyg = pygama.ScalarProcess() # just an idea
pyg.AddCalculator(pygama.processing.vectorized.avg_baseline,
wf_names = ["waveform"],
fun_args = {"i_end":700})
pyg.AddCalculator(pygama.processing.vectorized.fit_baseline,
wf_names = ["waveform"],
fun_args = {"i_end":700})
pyg.AddTransformer(pygama.processing.vectorized.bl_subtract,
wf_names = ["waveform"],
fun_args = {"test":False})
# process as "gatified" only
# t1_df = pyg.Process(event_df)
# print(t1_df.shape, t1_df.columns)
# print(t1_df[["bl_avg","bl_int","bl_slope"]])
# process as "gatified" and output waveforms
t1_df, wf_df = pyg.Process(event_df, ["waveform", "wf_blsub"])
# print(type(wf_df), wf_df.shape, wf_df.columns)
# print(wf_df["waveform"][0])
# print(wf_df["wf_blsub"][0])
# TODO: write an object that can easily read wf_df
wfs = pygama.WaveformFrame(wf_df)
def raw_to_dsp_quick(t1_file):
t_start = time.time()
# event_df = pd.read_hdf(t1_file, "ORGretina4MWaveformDecoder")#, stop=40)
# print(event_df)
# exit()
n_evt = 100
# daq_to_raw("/Users/wisecg/dev/mj60/data/2018-10-9-P3LTP_Run42343", n_evt)
# use index (can also use anything in 'data_columns')
# event_df = pd.read_hdf(t1_file, "ORGretina4MWaveformDecoder",
# where='index >= {} & index < {}'.format(15, 30))#, stop=40)
event_df = pd.read_hdf(t1_file, "ORGretina4MWaveformDecoder")
pyg = pygama.VectorProcess(default_list=True)
t1_df = pyg.Process(event_df)
print("done. final shape:", t1_df.shape)
# print(t1_df.to_string())
print("Elapsed: {:.2f} sec".format(time.time()-t_start))
return t1_df
def raw_to_dsp_mp(t1_file):
import multiprocessing as mp
from functools import partial
n_evt = np.inf
# daq_to_raw("/Users/wisecg/dev/mj60/data/2018-10-9-P3LTP_Run42343", n_evt)
t_start = time.time()
h5key = "ORGretina4MWaveformDecoder"
chunksize = 3000 # in rows. optimal for my mac, at least
n_cpu = mp.cpu_count()
with pd.HDFStore(t1_file, 'r') as store:
nrows = store.get_storer(h5key).nrows
chunk_idxs = list(range(nrows//chunksize + 1))
keywords = {"t1_file":t1_file, "chunksize":chunksize, "h5key":h5key}
with mp.Pool(n_cpu) as p:
result_list = p.map(partial(process_chunk, **keywords), chunk_idxs)
mp_df = pd.concat(result_list)
print("Elapsed: {:.2f} sec".format(time.time()-t_start))
return mp_df
def process_chunk(chunk_idx, t1_file, chunksize, h5key):
# print("Processing chunk #{}".format(chunk_idx))
with pd.HDFStore(t1_file, 'r') as store:
start = chunk_idx * chunksize
stop = (chunk_idx + 1) * chunksize
# print("start: {} stop: {}".format(start, stop))
chunk = pd.read_hdf(t1_file, h5key,
where='index >= {} & index < {}'.format(start, stop))
pyg = pygama.VectorProcess(default_list=True)
t1_df = pyg.Process(chunk)
return t1_df
def METHOD_NAME(t1_file):
full_df = raw_to_dsp_quick(t1_file)
mp_df = raw_to_dsp_mp(t1_file)
print("mp df:",mp_df.shape, "standard df:",full_df.shape)
# compare the two dataframes, make sure they're equal
# since we have floating point stuff, df1.equals(df2) won't always work
# so compare row by row and don't worry too much about precision
from collections import Counter
for i in range(len(mp_df)):
mpdf = ["{:.10e}".format(v) for v in mp_df.iloc[i].values]
fulldf = ["{:.10e}".format(v) for v in full_df.iloc[i].values]
if Counter(mpdf) != Counter(fulldf):
print("DAMN.\n MP:",mpdf,"\n FULL:",fulldf)
# else:
# print(i, "YEAH")
def optimize_raw_to_dsp_mp(t1_file):
""" seems that the sweet spot on my mac is chunksize ~ 3000 """
import multiprocessing as mp
from functools import partial
n_evt = 200000
# daq_to_raw("/Users/wisecg/dev/mj60/data/2018-10-9-P3LTP_Run42343", n_evt)
h5key = "ORGretina4MWaveformDecoder"
n_cpu = mp.cpu_count()
n_cpu = 2
print("CPUs used:",n_cpu)
# it's pretty clear the sweet spot is between 500 and 5000
# {500: 12.53, 1000: 9.52, 5000: 11.24, 10000: 12.09, 50000: 105.37}
# n_chunk = [500, 1000, 5000, 10000, 50000] # my mac hates more than 50k
# n_chunk = np.logspace(3, 4, num=20).astype(int) # 100 and 5000
n_chunk = [1000, 3000, 6000]
# print(n_chunk)
# exit()
t_chunk = []
for chunksize in n_chunk:
t_start = time.time()
with pd.HDFStore(t1_file, 'r') as store:
nrows = store.get_storer(h5key).nrows # this doesn't work for 'fixed'
# nrows = store.get_storer("ORSIS3302DecoderForEnergy").shape[0]
chunk_idxs = list(range(nrows//chunksize + 1))
keywords = {"t1_file":t1_file, "chunksize":chunksize, "h5key":h5key}
with mp.Pool(mp.cpu_count()) as p:
result_list = p.map(partial(process_chunk, **keywords), chunk_idxs)
mp_df = pd.concat(result_list)
elap = time.time() - t_start
print("chunk size:", chunksize, "elapsed:", elap)
t_chunk.append(elap)
print(dict(zip(n_chunk, t_chunk)))
if __name__=="__main__":
main( |
299,384 | test registry register | from django.core.exceptions import ImproperlyConfigured
import pytest
from rest_framework.exceptions import APIException
from rest_framework.serializers import IntegerField, ModelSerializer
from baserow.contrib.database.models import Database
from baserow.core.exceptions import (
InstanceTypeAlreadyRegistered,
InstanceTypeDoesNotExist,
)
from baserow.core.registry import (
CustomFieldsInstanceMixin,
CustomFieldsRegistryMixin,
Instance,
MapAPIExceptionsInstanceMixin,
ModelInstanceMixin,
ModelRegistryMixin,
Registry,
)
class FakeModel(object):
pass
class FakeModel2(object):
pass
class TemporaryApplication1(ModelInstanceMixin, Instance):
type = "temporary_1"
model_class = FakeModel
class TemporaryApplication2(ModelInstanceMixin, Instance):
type = "temporary_2"
model_class = FakeModel2
class BaseFakeModel(object):
pass
class SubClassOfBaseFakeModel(BaseFakeModel):
pass
class BaseFakeModelApplication(ModelInstanceMixin, Instance):
type = "temporary_1"
model_class = BaseFakeModel
class SubClassOfBaseFakeModelApplication(ModelInstanceMixin, Instance):
type = "temporary_2"
model_class = SubClassOfBaseFakeModel
class TemporaryRegistry(ModelRegistryMixin, Registry):
name = "temporary"
class CustomFieldsTemporaryRegistry(
ModelRegistryMixin, CustomFieldsRegistryMixin, Registry
):
name = "temporary"
class TemporaryGroupInstanceType(
ModelInstanceMixin, CustomFieldsInstanceMixin, Instance
):
type = "temporary_3"
model_class = Database
allowed_fields = ["name"]
serializer_field_names = ["name"]
serializer_field_overrides = {"name": IntegerField()}
request_serializer_field_names = ["order"]
request_serializer_field_overrides = {"order": IntegerField()}
class InstanceTypeWithCompatType(Instance):
type = "workspace"
compat_type = "group"
class TemporarySerializer(ModelSerializer):
class Meta:
fields = ["id"]
def test_registry():
with pytest.raises(ImproperlyConfigured):
Registry()
def METHOD_NAME():
temporary_1 = TemporaryApplication1()
temporary_2 = TemporaryApplication2()
registry = TemporaryRegistry()
registry.register(temporary_1)
registry.register(temporary_2)
with pytest.raises(ValueError):
registry.register("NOT AN APPLICATION")
with pytest.raises(InstanceTypeAlreadyRegistered):
registry.register(temporary_1)
assert len(registry.registry.items()) == 2
assert registry.registry["temporary_1"] == temporary_1
assert registry.registry["temporary_2"] == temporary_2
registry.unregister(temporary_1)
assert len(registry.registry.items()) == 1
registry.unregister("temporary_2")
assert len(registry.registry.items()) == 0
with pytest.raises(ValueError):
registry.unregister(000)
def test_registry_get():
temporary_1 = TemporaryApplication1()
registry = TemporaryRegistry()
registry.register(temporary_1)
assert registry.get("temporary_1") == temporary_1
with pytest.raises(InstanceTypeDoesNotExist):
registry.get("something")
assert registry.get_by_model(FakeModel) == temporary_1
assert registry.get_by_model(FakeModel()) == temporary_1
with pytest.raises(InstanceTypeDoesNotExist):
registry.get_by_model(FakeModel2)
with pytest.raises(InstanceTypeDoesNotExist):
registry.get_by_model(FakeModel2())
assert registry.get_types() == ["temporary_1"]
def test_registry_get_compat_type_name():
registry = TemporaryRegistry()
compat_instance = InstanceTypeWithCompatType()
registry.register(compat_instance)
assert registry.get(compat_instance.type) == compat_instance
assert registry.get(compat_instance.compat_type) == compat_instance
def test_registry_get_by_model_returns_the_most_specific_value():
base_app = BaseFakeModelApplication()
subtype_of_base_app = SubClassOfBaseFakeModelApplication()
registry = TemporaryRegistry()
registry.register(base_app)
registry.register(subtype_of_base_app)
assert registry.get_by_model(BaseFakeModel()) == base_app
assert registry.get_by_model(SubClassOfBaseFakeModel()) == subtype_of_base_app
def test_api_exceptions_api_mixins():
class FakeInstance(MapAPIExceptionsInstanceMixin, Instance):
type = "fake_instance"
api_exceptions_map = {ValueError: "RANDOM_ERROR"}
instance = FakeInstance()
with pytest.raises(Exception):
with instance.map_api_exceptions():
raise Exception("Failing normally here")
with pytest.raises(APIException) as e:
with instance.map_api_exceptions():
raise ValueError("Should be converted.")
assert e.value.detail["error"] == "RANDOM_ERROR"
assert e.value.detail["detail"] == ""
@pytest.mark.django_db
def test_get_serializer(data_fixture):
database = data_fixture.create_database_application(name="1")
registry = CustomFieldsTemporaryRegistry()
registry.register(TemporaryGroupInstanceType())
serializer = registry.get_serializer(database)
assert serializer.__class__.__name__ == "DatabaseSerializer"
assert "id" not in serializer.data
assert serializer.data["name"] == 1
assert "order" not in serializer.data
serializer = registry.get_serializer(database, base_class=TemporarySerializer)
assert "id" in serializer.data
assert "order" not in serializer.data
serializer = registry.get_serializer(database, request=True)
assert "order" in serializer.data |
299,385 | chop value | from typing import Dict, Any, Tuple, Iterable, Generator, Optional, TYPE_CHECKING
import claripy
import ailment
if TYPE_CHECKING:
from ...code_location import CodeLocation
class Detail:
"""
A companion class used together with PropValue. It describes stored information at each offset (in bytes).
:ivar def_at: Where this expression is defined, or None if it was never explicitly defined in the current block
or the current function.
"""
__slots__ = ("size", "expr", "def_at")
def __init__(self, size: int, expr: Optional[ailment.Expression], def_at: Optional["CodeLocation"]):
self.size = size
self.expr = expr
self.def_at = def_at
def __repr__(self) -> str:
return f"{self.size:x}: {self.expr}@{self.def_at}"
# The base value
class PropValue:
"""
Describes immutable basic value type that is used in Propagator.
"""
__slots__ = (
"value",
"offset_and_details",
)
def __init__(self, value: claripy.ast.Bits, offset_and_details: Optional[Dict[int, Detail]] = None):
self.value = value
self.offset_and_details = offset_and_details
@property
def needs_details(self):
return not bool(self.offset_and_details)
@property
def one_expr(self) -> Optional[ailment.Expression]:
"""
Get the expression that starts at offset 0 and covers the entire PropValue. Returns None if there are no
expressions or multiple expressions.
"""
if self.offset_and_details and len(self.offset_and_details) == 1:
if 0 in self.offset_and_details:
detail = self.offset_and_details[0]
if detail.size == self.value.size() // 8:
return detail.expr
return None
@property
def one_defat(self) -> Optional["CodeLocation"]:
"""
Get the definition location of the expression that starts at offset 0 and covers the entire PropValue. Returns
None if there are no expressions or multiple expressions.
"""
if self.offset_and_details and len(self.offset_and_details) == 1:
if 0 in self.offset_and_details:
detail = self.offset_and_details[0]
if detail.size == self.value.size() // 8:
return detail.def_at
return None
def to_label(self):
raise NotImplementedError()
def with_details(self, size: int, expr: ailment.Expression, def_at: "CodeLocation") -> "PropValue":
return PropValue(self.value, offset_and_details={0: Detail(size, expr, def_at)})
def all_exprs(self) -> Generator[ailment.Expression, None, None]:
if not self.offset_and_details:
return
for detail in self.offset_and_details.values():
yield detail.expr
def non_zero_exprs(self) -> Generator[ailment.Expression, None, None]:
if not self.offset_and_details:
return
for detail in self.offset_and_details.values():
if isinstance(detail.expr, ailment.Expr.Const) and detail.expr.value == 0:
continue
yield detail.expr
@staticmethod
def METHOD_NAME(value: claripy.ast.Bits, begin_offset, end_offset) -> claripy.ast.Bits:
chop_start = value.size() - begin_offset * 8 - 1
chop_end = value.size() - end_offset * 8
if chop_end - chop_start + 1 == value.size():
# fast path: no chopping
return value
if isinstance(value, claripy.ast.FP):
# converting the FP value to an AST so that we can chop
value = claripy.fpToIEEEBV(value)
return value[chop_start:chop_end]
def value_and_labels(self) -> Generator[Tuple[int, claripy.ast.Bits, int, Optional[Dict]], None, None]:
if not self.offset_and_details:
return
keys = list(sorted(self.offset_and_details.keys()))
if keys[0] != 0:
# missing details at 0
yield 0, self.METHOD_NAME(self.value, 0, keys[0]), keys[0], None
end_offset = 0
for idx, offset in enumerate(keys):
detail = self.offset_and_details[offset]
end_offset = offset + detail.size
label = {"expr": detail.expr, "def_at": detail.def_at}
yield offset, self.METHOD_NAME(self.value, offset, end_offset), end_offset - offset, label
# gap detection
if idx != len(keys) - 1:
next_offset = keys[idx + 1]
if end_offset != next_offset:
yield end_offset, self.METHOD_NAME(
self.value, end_offset, next_offset
), next_offset - end_offset, None
# final gap detection
if end_offset < self.value.size() // 8:
yield end_offset, self.METHOD_NAME(
self.value, end_offset, self.value.size() // 8
), self.value.size() // 8 - end_offset, None
@staticmethod
def from_value_and_labels(
value: claripy.ast.Bits, labels: Iterable[Tuple[int, int, int, Dict[str, Any]]]
) -> "PropValue":
if not labels:
return PropValue(value)
offset_and_details = {}
for offset, offset_in_expr, size, label in labels:
expr = label["expr"]
if expr is not None:
if offset_in_expr != 0:
expr = PropValue.extract_ail_expression(offset_in_expr * 8, size * 8, expr)
elif size < expr.size:
expr = PropValue.extract_ail_expression(0, size * 8, expr)
elif size > expr.size:
expr = PropValue.extend_ail_expression((size - expr.size) * 8, expr)
offset_and_details[offset] = Detail(size, expr, label["def_at"])
return PropValue(value, offset_and_details=offset_and_details)
@staticmethod
def from_value_and_details(value: claripy.ast.Bits, size: int, expr: ailment.Expression, def_at: "CodeLocation"):
d = Detail(size, expr, def_at)
return PropValue(value, offset_and_details={0: d})
@staticmethod
def extract_ail_expression(
start: int, bits: int, expr: Optional[ailment.Expr.Expression]
) -> Optional[ailment.Expr.Expression]:
if expr is None:
return None
if isinstance(expr, ailment.Expr.Const):
mask = (1 << bits) - 1
return ailment.Expr.Const(expr.idx, expr.variable, (expr.value >> start) & mask, bits, **expr.tags)
if start == 0:
return ailment.Expr.Convert(None, expr.bits, bits, False, expr, **expr.tags)
else:
a = ailment.Expr.BinaryOp(
None, "Shr", (expr, ailment.Expr.Const(None, None, bits, expr.bits)), False, **expr.tags
)
return ailment.Expr.Convert(None, a.bits, bits, False, a, **expr.tags)
@staticmethod
def extend_ail_expression(bits: int, expr: Optional[ailment.Expr.Expression]) -> Optional[ailment.Expr.Expression]:
if expr is None:
return None
if isinstance(expr, ailment.Expr.Const):
return ailment.Expr.Const(expr.idx, expr.variable, expr.value, bits + expr.bits, **expr.tags)
elif isinstance(expr, ailment.Expr.Convert):
return ailment.Expr.Convert(None, expr.from_bits, bits + expr.to_bits, False, expr.operand, **expr.tags)
return ailment.Expr.Convert(None, expr.bits, bits + expr.bits, False, expr, **expr.tags) |
299,386 | convert to dimension values | """
This module provides the `SparkDataFrameConverter` class,
which allows converting a `pyspark` `DataFrame`
into a list of dictionaries representing series.
"""
from types import ModuleType
from typing import List, Tuple
from ipyvizzu.data.converters.defaults import NAN_DIMENSION, NAN_MEASURE
from ipyvizzu.data.converters.df.defaults import MAX_ROWS
from ipyvizzu.data.converters.df.converter import DataFrameConverter
from ipyvizzu.data.infer_type import InferType
from ipyvizzu.data.type_alias import (
DimensionValue,
MeasureValue,
SeriesValues,
)
class SparkDataFrameConverter(DataFrameConverter):
"""
Converts a `pyspark` `DataFrame` into a list of dictionaries representing series.
Each dictionary contains information about the series `name`, `values` and `type`.
Parameters:
df: The `pyspark` `DataFrame` to convert.
default_measure_value:
Default value to use for missing measure values. Defaults to 0.
default_dimension_value:
Default value to use for missing dimension values. Defaults to an empty string.
max_rows: The maximum number of rows to include in the converted series list.
If the `df` contains more rows,
a random sample of the given number of rows (approximately) will be taken.
Example:
Get series list from `DataFrame` columns:
converter = SparkDataFrameConverter(df)
series_list = converter.get_series_list()
"""
# pylint: disable=too-few-public-methods
def __init__(
self,
df: "pyspark.sql.DataFrame", # type: ignore
default_measure_value: MeasureValue = NAN_MEASURE,
default_dimension_value: DimensionValue = NAN_DIMENSION,
max_rows: int = MAX_ROWS,
) -> None:
super().__init__(default_measure_value, default_dimension_value, max_rows)
self._pyspark, self._pyspark_func = self._get_pyspark()
self._df = self._get_sampled_df(df)
def _get_pyspark(self) -> Tuple[ModuleType, ModuleType]:
try:
import pyspark # pylint: disable=import-outside-toplevel
from pyspark.sql import functions # pylint: disable=import-outside-toplevel
return pyspark, functions
except ImportError as error:
raise ImportError(
"pyspark is not available. Please install pyspark to use this feature."
) from error
def _get_sampled_df(
self, df: "pyspark.sql.DataFrame" # type: ignore
) -> "pyspark.sql.DataFrame": # type: ignore
row_number = df.count()
if self._is_max_rows_exceeded(row_number):
fraction = self._max_rows / row_number
sample_df = df.sample(withReplacement=False, fraction=fraction, seed=42)
return sample_df.limit(self._max_rows)
return df
def _get_columns(self) -> List[str]:
return self._df.columns
def _convert_to_series_values_and_type(
self, obj: str
) -> Tuple[SeriesValues, InferType]:
column_name = obj
column = self._df.select(column_name)
integer_type = self._pyspark.sql.types.IntegerType
double_type = self._pyspark.sql.types.DoubleType
if isinstance(column.schema[column_name].dataType, (integer_type, double_type)):
return self._convert_to_measure_values(column_name), InferType.MEASURE
return self.METHOD_NAME(column_name), InferType.DIMENSION
def _convert_to_measure_values(self, obj: str) -> List[MeasureValue]:
column_name = obj
func = self._pyspark_func
df = self._df.withColumn(
column_name,
func.when(
func.col(column_name).isNull(), self._default_measure_value
).otherwise(func.col(column_name)),
)
df_rdd = (
df.withColumn(column_name, func.col(column_name).cast("float"))
.select(column_name)
.rdd
)
return df_rdd.flatMap(list).collect()
def METHOD_NAME(self, obj: str) -> List[DimensionValue]:
column_name = obj
func = self._pyspark_func
df = self._df.withColumn(
column_name,
func.when(
func.col(column_name).isNull(), self._default_dimension_value
).otherwise(func.col(column_name)),
)
df_rdd = (
df.withColumn(column_name, func.col(column_name).cast("string"))
.select(column_name)
.rdd
)
return df_rdd.flatMap(list).collect() |
299,387 | get values | from __future__ import absolute_import, division, print_function
from wxtbx import metallicbutton
import wxtbx.bitmaps
import wx
import sys
class ListEditor(wx.Panel):
def __init__(self, *args, **kwds):
wx.Panel.__init__(self, *args, **kwds)
self._default_label = "---"
szr = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(szr)
self.sizer = szr
self.CreateList()
self.buttons = wx.BoxSizer(wx.HORIZONTAL)
add_btn = self.AddControlButton(
label="Add",
bitmap=wxtbx.bitmaps.fetch_icon_bitmap("actions", "edit_add", 16))
self.Bind(wx.EVT_BUTTON, self.OnAdd, add_btn)
del_btn = self.AddControlButton(
label="Delete",
bitmap=wxtbx.bitmaps.fetch_icon_bitmap("actions", "cancel", 16))
self.Bind(wx.EVT_BUTTON, self.OnDelete, del_btn)
clear_btn = self.AddControlButton(
label="Clear all",
bitmap=wxtbx.bitmaps.fetch_icon_bitmap("actions", "editdelete", 16))
self.Bind(wx.EVT_BUTTON, self.OnDeleteAll, clear_btn)
update_btn = self.AddControlButton(
label="Update item",
bitmap=wxtbx.bitmaps.fetch_icon_bitmap("actions", "recur", 16))
self.Bind(wx.EVT_BUTTON, self.OnUpdate, update_btn)
szr.Add(self.buttons, 0, wx.LEFT|wx.BOTTOM|wx.RIGHT, 5)
edit_szr = wx.BoxSizer(wx.HORIZONTAL)
edit_label = wx.StaticText(
parent=self,
label="Edit selected:")
edit_szr.Add(edit_label, 0, wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, 5)
self.edit = wx.TextCtrl(
parent=self,
size=(300,-1),
style=wx.TE_PROCESS_ENTER)
self.Bind(wx.EVT_TEXT_ENTER, self.OnUpdate, self.edit)
edit_szr.Add(self.edit, 1, wx.EXPAND|wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, 5)
szr.Add(edit_szr, 0, wx.EXPAND|wx.TOP|wx.LEFT|wx.BOTTOM, 5)
szr.Layout()
self.sizer = szr
self._label = None
self._callback = None
def CreateList(self):
self.list = wx.ListCtrl(
parent=self,
id=-1,
style=wx.LC_REPORT|wx.LC_SINGLE_SEL)
self.list.InsertColumn(0, "Items", width=460)
self.list.SetMinSize((480,160))
if (hasattr(self.list, "SetItemSpacing")) and (wx.VERSION < (2,9)):
self.list.SetItemSpacing(5)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnSelect, self.list)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.OnDeSelect, self.list)
self.sizer.Add(self.list, 1, wx.EXPAND|wx.ALL, 5)
def SetLabel(self, label, font_weight=wx.FONTWEIGHT_BOLD):
if (self._label is not None):
self._label.SetLabel(label)
else :
self._label = wx.StaticText(parent=self, label=label)
font = self._label.GetFont()
font.SetWeight(font_weight)
self._label.SetFont(font)
self.sizer.Insert(0, self._label, 0, wx.TOP|wx.LEFT, 5)
self.sizer.Layout()
def SetColumnHeader(self, header):
col = self.list.GetColumn(0)
col.SetText(header)
self.list.SetColumn(0, col)
def SetToolTip(self, tool_tip):
if isinstance(tool_tip, str):
self.list.SetToolTip(wx.ToolTip(tool_tip))
else :
self.list.SetToolTip(tool_tip)
def AddControlButton(self, label, bitmap):
btn = metallicbutton.MetallicButton(
parent=self,
label=label,
bmp=bitmap,
highlight_color=(200,220,240))
self.buttons.Add(btn, 0, wx.RIGHT, 5)
return btn
def AddItem(self, item):
return self.list.InsertStringItem(sys.maxunicode, item)
def OnAdd(self, event):
i = self.AddItem(self._default_label)
self.list.Select(i, 1)
self.edit.SetFocus()
self.call_back()
def OnDelete(self, event):
i = self.list.GetFirstSelected()
if (i >= 0):
self.list.DeleteItem(i)
self.edit.SetValue("")
self.call_back()
def OnUpdate(self, event):
evt_type = event.GetEventType()
txt = self.edit.GetValue()
if (txt == "") or (txt is None):
txt = self._default_label
i = self.list.GetFirstSelected()
if (i == -1):
if (event.GetEventType() == wx.EVT_TEXT_ENTER.typeId):
i = self.AddItem(txt)
self.list.Select(i, 1)
else :
self.list.SetItemText(i, txt)
self.list.SetFocus()
self.call_back()
def OnSelect(self, event):
item = self.list.GetFirstSelected()
txt = str(self.list.GetItemText(item))
if (txt == self._default_label):
txt = ""
self.edit.SetValue(txt)
self.call_back()
def OnDeSelect(self, event):
self.edit.SetValue("")
self.call_back()
def SetDefaultItemLabel(self, label):
self._default_label = label
def METHOD_NAME(self):
items = []
i = 0
n = self.list.GetItemCount()
while (i < n):
txt = str(self.list.GetItemText(i))
if (txt == self._default_label):
txt = None
items.append(txt)
i += 1
return items
def DeleteAllItems(self):
self.list.DeleteAllItems()
self.call_back()
def OnDeleteAll(self, evt):
if (self.list.GetItemCount() == 0):
return False
confirm = wx.MessageBox(caption="Confirm delete",
message="Are you sure you want to delete all items in the list?")
if (confirm == wx.OK):
self.DeleteAllItems()
def SetSelectedValue(self, txt):
i = self.list.GetFirstSelected()
if (i == -1):
return
self.list.SetItemText(i, txt)
self.edit.SetValue(txt)
def GetSelectedValue(self):
i = self.list.GetFirstSelected()
if (i == -1):
return None
return str(self.list.GetItemText(i))
def SetCallback(self, callback):
assert hasattr(callback, "__call__")
self._callback = callback
def call_back(self):
if (self._callback is not None):
self._callback()
if __name__ == "__main__" :
app = wx.App(0)
frame = wx.Frame(None, -1, "Test frame")
szr = wx.BoxSizer(wx.VERTICAL)
frame.SetSizer(szr)
panel = ListEditor(parent=frame)
panel.SetLabel("TLS groups:")
panel.SetColumnHeader("Atom selection")
szr.Add(panel, 1, wx.EXPAND)
szr.Layout()
szr.Fit(panel)
frame.Fit()
frame.Show()
app.MainLoop() |
299,388 | setinputsizes | import decimal
from _typeshed import Incomplete, ReadableBuffer
from collections.abc import Sequence
from datetime import date, datetime, time
from types import TracebackType
from typing import Any, overload
from typing_extensions import Literal, TypeAlias
from .resultrow import ResultRow
apilevel: str
threadsafety: int
paramstyle: tuple[str, ...]
connect = Connection
class Connection:
def __init__(
self,
address: str,
port: int,
user: str,
password: str,
autocommit: bool = ...,
packetsize: int | None = ...,
userkey: str | None = ...,
*,
sessionvariables: dict[str, str] | None = ...,
forcebulkfetch: bool | None = ...,
) -> None: ...
def cancel(self) -> bool: ...
def close(self) -> None: ...
def commit(self) -> None: ...
def cursor(self) -> Cursor: ...
def getaddress(self) -> str: ...
def getautocommit(self) -> bool: ...
def getclientinfo(self, key: str = ...) -> str | dict[str, str]: ...
def getproperty(self, *args: Incomplete, **kwargs: Incomplete) -> Incomplete: ...
def isconnected(self) -> bool: ...
def rollback(self) -> None: ...
def setautocommit(self, auto: bool = ...) -> None: ...
def setclientinfo(self, key: str, value: str | None = ...) -> None: ...
class LOB:
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
def close(self) -> bool: ...
def find(self, object: str, length: int, position: int = ...) -> int: ...
def read(self, size: int = ..., position: int = ...) -> str | bytes: ...
def write(self, object: str | bytes) -> int: ...
_Parameters: TypeAlias = Sequence[tuple[Any, ...]]
class Cursor:
description: tuple[tuple[Any, ...], ...]
rowcount: int
statementhash: str | None
connection: Connection
arraysize: int
refreshts: Incomplete
maxage: int
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
def __enter__(self) -> Incomplete: ...
def __exit__(self, typ: type[BaseException] | None, val: BaseException | None, tb: TracebackType | None) -> None: ...
def callproc(self, procname: str, parameters: tuple[Any, ...] = ..., overview: bool = ...) -> tuple[Any, ...]: ...
def close(self) -> None: ...
def description_ext(self) -> Sequence[tuple[Any, ...]]: ...
def execute(self, operation: str, parameters: tuple[Any, ...]) -> bool: ...
def executemany(self, operation: str, parameters: _Parameters) -> Any: ...
def executemanyprepared(self, parameters: _Parameters) -> Any: ...
def executeprepared(self, parameters: _Parameters = ...) -> Any: ...
def fetchone(self, uselob: bool = ...) -> ResultRow | None: ...
def fetchall(self) -> list[ResultRow]: ...
def fetchmany(self, size: int | None = ...) -> list[ResultRow]: ...
def getrowsaffectedcounts(self) -> tuple[Any, ...]: ...
def getpacketsize(self) -> int: ...
def get_resultset_holdability(self) -> int: ...
def getwarning(self) -> Warning | None: ...
def haswarning(self) -> bool: ...
def clearwarning(self) -> None: ...
def has_result_set(self) -> bool: ...
def nextset(self) -> None: ...
def parameter_description(self) -> tuple[str, ...]: ...
@overload
def prepare(self, operation: str, newcursor: Literal[True]) -> Cursor: ...
@overload
def prepare(self, operation: str, newcursor: Literal[False]) -> Any: ...
def print_message(self, *args: Incomplete, **kwargs: Incomplete) -> Incomplete: ...
def parsenamedquery(self, *args: Incomplete, **kwargs: Incomplete) -> Incomplete: ...
def scroll(self, value: int, mode: Literal["absolute", "relative"] = ...) -> None: ...
def server_cpu_time(self) -> int: ...
def server_memory_usage(self) -> int: ...
def server_processing_time(self) -> int: ...
def METHOD_NAME(self, *args: Any, **kwargs: Any) -> None: ...
def setfetchsize(self, value: int) -> None: ...
def setquerytimeout(self, value: int) -> None: ...
def setpacketsize(self, value: int) -> None: ...
def set_resultset_holdability(self, holdability: int) -> None: ...
def setoutputsize(self, *args: Any, **kwargs: Any) -> None: ...
def setcommandinfo(self, command_info: str, line_number: int) -> None: ...
class Warning(Exception):
errorcode: int
errortext: str
class Error(Exception):
errorcode: int
errortext: str
class DatabaseError(Error): ...
class OperationalError(DatabaseError): ...
class ProgrammingError(DatabaseError): ...
class IntegrityError(DatabaseError): ...
class InterfaceError(Error): ...
class InternalError(DatabaseError): ...
class DataError(DatabaseError): ...
class NotSupportedError(DatabaseError): ...
class ExecuteManyError(Error):
errors: Incomplete
class ExecuteManyErrorEntry(Error):
rownumber: int
def Date(year: int, month: int, day: int) -> date: ...
def Time(hour: int, minute: int, second: int, millisecond: int = 0) -> time: ...
def Timestamp(year: int, month: int, day: int, hour: int, minute: int, second: int, millisecond: int = 0) -> datetime: ...
def DateFromTicks(ticks: float) -> date: ...
def TimeFromTicks(ticks: float) -> time: ...
def TimestampFromTicks(ticks: float) -> datetime: ...
def Binary(data: ReadableBuffer) -> memoryview: ...
Decimal = decimal.Decimal
NUMBER: type[int | float | complex]
DATETIME: type[date | time | datetime]
STRING = str
BINARY = memoryview
ROWID = int |
299,389 | get auth token | import uuid
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models, transaction
from django.db.models import F, Sum
from django.db.models.fields import DateTimeField, UUIDField
from apps.organizations.models import Organization
from apps.users.models import User
class Product(models.Model):
name = models.CharField(max_length=200)
price = models.DecimalField(max_digits=11, decimal_places=2)
description = models.TextField()
organization = models.ForeignKey(Organization, on_delete=models.CASCADE, related_name="products")
total_quantity = models.PositiveIntegerField()
current_quantity = models.PositiveIntegerField(null=True) # Set to total_quantity upon initialization
max_buyable_quantity = models.PositiveIntegerField(default=1)
# Generic foreign key to related product model instance (e.g event model)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
related_object = GenericForeignKey("content_type", "object_id")
class Meta:
unique_together = ("content_type", "object_id")
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if self.current_quantity is None:
self.current_quantity = self.total_quantity
self.max_buyable_quantity = min(self.max_buyable_quantity, self.total_quantity)
super().save(*args, **kwargs)
@classmethod
def check_and_reserve_quantity(cls, product_id, user: User, quantity: int) -> "Product":
"""
Check whether a requested quantity may be ordered and if so, reserve that quantity for this request.
Raises:
ValueError: If the requested quantity for the given product is not allowed.
"""
with transaction.atomic():
# Check if the requested quantity is allowed
try:
# Acquire DB lock for the product (no other process can change it)
product = cls.objects.select_for_update().get(pk=product_id)
except cls.DoesNotExist:
raise ValueError("Ugyldig produkt")
bought_quantity = Order.objects.filter(
product__id=product_id,
user=user,
payment_status__in=[Order.PaymentStatus.CAPTURED, Order.PaymentStatus.RESERVED],
).aggregate(bought_quantity=Sum("quantity"))["bought_quantity"]
bought_quantity = bought_quantity or 0
if bought_quantity >= product.max_buyable_quantity:
raise ValueError("Du kan ikke kjøpe mer av dette produktet.")
elif quantity + bought_quantity > product.max_buyable_quantity:
raise ValueError("Forespurt antall enheter overskrider tillatt antall.")
elif quantity > product.current_quantity:
raise ValueError("Forespurt antall enheter overskrider tilgjengelige antall enheter.")
# Reserve quantity by updating available quantity
product.current_quantity = F("current_quantity") - quantity
product.save()
product.refresh_from_db()
return product
@classmethod
def restore_quantity(cls, order: "Order"):
"""
Restore quantity that was reserved by an order that was cancelled or failed.
Also restore quantity if an order that was already reserved (not captured) was re-attempted.
"""
with transaction.atomic():
# Acquire DB lock for the product (no other process can change it)
product = cls.objects.select_for_update().get(pk=order.product.id)
product.current_quantity = F("current_quantity") + order.quantity
product.save()
def METHOD_NAME() -> str:
return uuid.uuid4().hex
class Order(models.Model):
class PaymentStatus(models.TextChoices):
INITIATED = "INITIATED", "initiated"
RESERVED = "RESERVED", "reserved"
CAPTURED = "CAPTURED", "captured"
CANCELLED = "CANCELLED", "cancelled"
REFUNDED = "REFUNDED", "refunded"
FAILED = "FAILED", "failed"
REJECTED = "REJECTED", "rejected"
id = UUIDField(primary_key=True, default=uuid.uuid4)
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name="orders")
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="orders")
quantity = models.PositiveIntegerField(default=1)
total_price = models.DecimalField(max_digits=11, decimal_places=2)
payment_status = models.CharField(max_length=255, choices=PaymentStatus.choices, default=PaymentStatus.INITIATED)
timestamp = DateTimeField(auto_now_add=True)
auth_token = models.CharField(max_length=32, default=METHOD_NAME) # For authenticating Vipps callback
payment_attempt = models.PositiveIntegerField(default=1)
def __str__(self):
return f"Order(product={self.product}, user={self.user})"
@property
def failed_statuses(self):
return [self.PaymentStatus.CANCELLED, self.PaymentStatus.FAILED, self.PaymentStatus.REJECTED]
class VippsAccessToken(models.Model):
"""
Stores access tokens from Vipps to use upon Vipps requests.
"""
token = models.CharField(primary_key=True, max_length=2048)
expires_on = DateTimeField() |
299,390 | query parameters | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"automanage best-practice show",
)
class Show(AAZCommand):
"""Get information about a Automanage best practice
:example: Show best-practice
az automanage best-practice show --best-practice-name {}
"""
_aaz_info = {
"version": "2022-05-04",
"resources": [
["mgmt-plane", "/providers/microsoft.automanage/bestpractices/{}", "2022-05-04"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.best_practice_name = AAZStrArg(
options=["--best-practice-name"],
help="The Automanage best practice name.",
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.BestPracticesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class BestPracticesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/providers/Microsoft.Automanage/bestPractices/{bestPracticeName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"bestPracticeName", self.ctx.args.best_practice_name,
required=True,
),
}
return parameters
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-05-04",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType()
_schema_on_200.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.configuration = AAZFreeFormDictType()
system_data = cls._schema_on_200.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"] |
299,391 | train | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from mmcv.cnn import ConvModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmpose.registry import MODELS
from .base_backbone import BaseBackbone
from .utils import InvertedResidual
@MODELS.register_module()
class ViPNAS_MobileNetV3(BaseBackbone):
"""ViPNAS_MobileNetV3 backbone.
"ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search"
More details can be found in the `paper
<https://arxiv.org/abs/2105.10154>`__ .
Args:
wid (list(int)): Searched width config for each stage.
expan (list(int)): Searched expansion ratio config for each stage.
dep (list(int)): Searched depth config for each stage.
ks (list(int)): Searched kernel size config for each stage.
group (list(int)): Searched group number config for each stage.
att (list(bool)): Searched attention config for each stage.
stride (list(int)): Stride config for each stage.
act (list(dict)): Activation config for each stage.
conv_cfg (dict): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save
some memory while slowing down the training speed.
Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default:
``[
dict(type='Normal', std=0.001, layer=['Conv2d']),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]``
"""
def __init__(
self,
wid=[16, 16, 24, 40, 80, 112, 160],
expan=[None, 1, 5, 4, 5, 5, 6],
dep=[None, 1, 4, 4, 4, 4, 4],
ks=[3, 3, 7, 7, 5, 7, 5],
group=[None, 8, 120, 20, 100, 280, 240],
att=[None, True, True, False, True, True, True],
stride=[2, 1, 2, 2, 2, 1, 2],
act=['HSwish', 'ReLU', 'ReLU', 'ReLU', 'HSwish', 'HSwish', 'HSwish'],
conv_cfg=None,
norm_cfg=dict(type='BN'),
frozen_stages=-1,
norm_eval=False,
with_cp=False,
init_cfg=[
dict(type='Normal', std=0.001, layer=['Conv2d']),
dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])
],
):
# Protect mutable default arguments
norm_cfg = copy.deepcopy(norm_cfg)
super().__init__(init_cfg=init_cfg)
self.wid = wid
self.expan = expan
self.dep = dep
self.ks = ks
self.group = group
self.att = att
self.stride = stride
self.act = act
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.frozen_stages = frozen_stages
self.norm_eval = norm_eval
self.with_cp = with_cp
self.conv1 = ConvModule(
in_channels=3,
out_channels=self.wid[0],
kernel_size=self.ks[0],
stride=self.stride[0],
padding=self.ks[0] // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=dict(type=self.act[0]))
self.layers = self._make_layer()
def _make_layer(self):
layers = []
layer_index = 0
for i, dep in enumerate(self.dep[1:]):
mid_channels = self.wid[i + 1] * self.expan[i + 1]
if self.att[i + 1]:
se_cfg = dict(
channels=mid_channels,
ratio=4,
act_cfg=(dict(type='ReLU'),
dict(type='HSigmoid', bias=1.0, divisor=2.0)))
else:
se_cfg = None
if self.expan[i + 1] == 1:
with_expand_conv = False
else:
with_expand_conv = True
for j in range(dep):
if j == 0:
stride = self.stride[i + 1]
in_channels = self.wid[i]
else:
stride = 1
in_channels = self.wid[i + 1]
layer = InvertedResidual(
in_channels=in_channels,
out_channels=self.wid[i + 1],
mid_channels=mid_channels,
kernel_size=self.ks[i + 1],
groups=self.group[i + 1],
stride=stride,
se_cfg=se_cfg,
with_expand_conv=with_expand_conv,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=dict(type=self.act[i + 1]),
with_cp=self.with_cp)
layer_index += 1
layer_name = f'layer{layer_index}'
self.add_module(layer_name, layer)
layers.append(layer_name)
return layers
def forward(self, x):
x = self.conv1(x)
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
return (x, )
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def METHOD_NAME(self, mode=True):
super().METHOD_NAME(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval() |
299,392 | async cache cleanup | """ Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine
This module provides a framework for the use of DNS Service Discovery
using IP multicast.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""
import asyncio
import itertools
import socket
import threading
from typing import TYPE_CHECKING, List, Optional, cast
from ._record_update import RecordUpdate
from ._utils.asyncio import get_running_loop, run_coro_with_timeout
from ._utils.time import current_time_millis
from .const import _CACHE_CLEANUP_INTERVAL
if TYPE_CHECKING:
from ._core import Zeroconf
from ._listener import AsyncListener
from ._transport import _WrappedTransport, make_wrapped_transport
_CLOSE_TIMEOUT = 3000 # ms
class AsyncEngine:
"""An engine wraps sockets in the event loop."""
__slots__ = (
'loop',
'zc',
'protocols',
'readers',
'senders',
'running_event',
'_listen_socket',
'_respond_sockets',
'_cleanup_timer',
)
def __init__(
self,
zeroconf: 'Zeroconf',
listen_socket: Optional[socket.socket],
respond_sockets: List[socket.socket],
) -> None:
self.loop: Optional[asyncio.AbstractEventLoop] = None
self.zc = zeroconf
self.protocols: List[AsyncListener] = []
self.readers: List[_WrappedTransport] = []
self.senders: List[_WrappedTransport] = []
self.running_event: Optional[asyncio.Event] = None
self._listen_socket = listen_socket
self._respond_sockets = respond_sockets
self._cleanup_timer: Optional[asyncio.TimerHandle] = None
def setup(self, loop: asyncio.AbstractEventLoop, loop_thread_ready: Optional[threading.Event]) -> None:
"""Set up the instance."""
self.loop = loop
self.running_event = asyncio.Event()
self.loop.create_task(self._async_setup(loop_thread_ready))
async def _async_setup(self, loop_thread_ready: Optional[threading.Event]) -> None:
"""Set up the instance."""
self._async_schedule_next_cache_cleanup()
await self._async_create_endpoints()
assert self.running_event is not None
self.running_event.set()
if loop_thread_ready:
loop_thread_ready.set()
async def _async_create_endpoints(self) -> None:
"""Create endpoints to send and receive."""
assert self.loop is not None
loop = self.loop
reader_sockets = []
sender_sockets = []
if self._listen_socket:
reader_sockets.append(self._listen_socket)
for s in self._respond_sockets:
if s not in reader_sockets:
reader_sockets.append(s)
sender_sockets.append(s)
for s in reader_sockets:
transport, protocol = await loop.create_datagram_endpoint(
lambda: AsyncListener(self.zc), sock=s # type: ignore[arg-type, return-value]
)
self.protocols.append(cast(AsyncListener, protocol))
self.readers.append(make_wrapped_transport(cast(asyncio.DatagramTransport, transport)))
if s in sender_sockets:
self.senders.append(make_wrapped_transport(cast(asyncio.DatagramTransport, transport)))
def METHOD_NAME(self) -> None:
"""Periodic cache cleanup."""
now = current_time_millis()
self.zc.question_history.async_expire(now)
self.zc.record_manager.async_updates(
now, [RecordUpdate(record, record) for record in self.zc.cache.async_expire(now)]
)
self.zc.record_manager.async_updates_complete(False)
self._async_schedule_next_cache_cleanup()
def _async_schedule_next_cache_cleanup(self) -> None:
"""Schedule the next cache cleanup."""
loop = self.loop
assert loop is not None
self._cleanup_timer = loop.call_at(loop.time() + _CACHE_CLEANUP_INTERVAL, self.METHOD_NAME)
async def _async_close(self) -> None:
"""Cancel and wait for the cleanup task to finish."""
self._async_shutdown()
await asyncio.sleep(0) # flush out any call soons
assert self._cleanup_timer is not None
self._cleanup_timer.cancel()
def _async_shutdown(self) -> None:
"""Shutdown transports and sockets."""
assert self.running_event is not None
self.running_event.clear()
for wrapped_transport in itertools.chain(self.senders, self.readers):
wrapped_transport.transport.close()
def close(self) -> None:
"""Close from sync context.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `_async_close` cannot be completed.
"""
assert self.loop is not None
# Guard against Zeroconf.close() being called from the eventloop
if get_running_loop() == self.loop:
self._async_shutdown()
return
if not self.loop.is_running():
return
run_coro_with_timeout(self._async_close(), self.loop, _CLOSE_TIMEOUT) |
299,393 | merged | import allel
import operator
import pathlib
import numpy as np
import logging
import os
import glob
import tarfile
import stdpopsim
logger = logging.getLogger(__name__)
# make root directory for annotations
annot_path = "annotations"
os.makedirs(annot_path, exist_ok=True)
def METHOD_NAME(intervals, *, closed: bool):
"""
Merge overlapping and adjacent intervals.
:param intervals: An iterable of (start, end) coordinates.
:param bool closed: If True, [start, end] coordinates are closed,
so [1, 2] and [3, 4] are adjacent intervals and will be merged.
If False, [start, end) coordinates are half-open,
so [1, 2) and [3, 4) are not adjacent and will not be merged.
"""
def iter_merged(intervals, *, closed: bool):
"""
Generate tuples of (start, end) coordinates for merged intervals.
"""
intervals = sorted(intervals, key=operator.itemgetter(0))
if len(intervals) == 0:
return
start, end = intervals[0]
for a, b in intervals[1:]:
assert a <= b
if a > end + closed:
# No intersection with the current interval.
yield start, end
start, end = a, b
else:
# Intersects, or is contiguous with, the current interval.
end = max(end, b)
yield start, end
return list(iter_merged(intervals, closed=closed))
def gff_recarray_to_stdpopsim_intervals(gff):
"""
Merge overlapping intervals and convert coordinates. GFF intervals are
1-based [i,j], but stdpopsim intervals are 0-based [i-1,j).
"""
intervals = np.array(METHOD_NAME(zip(gff.start, gff.end), closed=True))
intervals[:, 0] = intervals[:, 0] - 1
return intervals
def get_gff_recarray(url, sha256):
local_path = pathlib.Path(url).name
if not pathlib.Path(local_path).exists():
logger.info(f"downloading {url}")
stdpopsim.utils.download(url, local_path)
logger.info("checking sha256")
local_sha256 = stdpopsim.utils.sha256(local_path)
if local_sha256 != sha256:
logger.info(
f"{local_path}: sha256: expected {sha256}, but found {local_sha256}. "
"Delete the file to download it again."
)
exit(1)
logger.info(f"loading {local_path} into numpy recarray")
gff = allel.gff3_to_recarray(local_path)
return gff
def make_tarfile(output_filename, source_dir, dest):
if os.path.exists(output_filename):
os.remove(output_filename)
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=dest)
tar.close()
def download_process_annotations():
"""
loop through all species and download annotation.
from those annotations suck out what we want and
store them in appropriately named files for upload
"""
for spc in stdpopsim.all_species():
if spc.annotations:
for an in spc.annotations:
CHROM_IDS = [chrom.id for chrom in spc.genome.chromosomes]
logger.info(f"Downloading GFF file {an.id}")
gff = get_gff_recarray(an.url, an.gff_sha256)
logger.info(f"extracting annotations {an.id}")
exons = gff[
np.where(
np.logical_and(
gff.source == an.annotation_source,
gff.type == an.annotation_type,
)
)
]
logger.info(f"merging overlapping regions {an.id}")
# create numpy recarray for each chromosome
spc_name_path = os.path.join(annot_path, spc.id)
os.makedirs(spc_name_path, exist_ok=True)
for chrom_id in CHROM_IDS:
chrom_exons = exons[np.where(exons.seqid == chrom_id)]
if len(chrom_exons) == 0:
continue
intervals = gff_recarray_to_stdpopsim_intervals(chrom_exons)
# double check that the intervals can be used in stdpopsim
stdpopsim.utils._check_intervals_validity(intervals)
out_file = os.path.join(
spc_name_path, an.file_pattern.format(id=chrom_id)
)
np.savetxt(out_file, intervals, fmt="%d")
tf = spc_name_path + f"/{an.id}.tar.gz"
make_tarfile(tf, spc_name_path, "")
logger.info("made tarball at " + spc_name_path)
for f in glob.glob(spc_name_path + "/*.txt"):
logger.info("removing " + f)
os.remove(f) |
299,394 | test metric alert until and forever | from datetime import datetime, timedelta
import pytest
from django.db import IntegrityError, router, transaction
from sentry.models import Rule
from sentry.models.rulesnooze import RuleSnooze
from sentry.testutils.cases import APITestCase
class RuleSnoozeTest(APITestCase):
def setUp(self):
self.issue_alert_rule = Rule.objects.create(
label="test rule", project=self.project, owner=self.team.actor
)
self.metric_alert_rule = self.create_alert_rule(
organization=self.project.organization, projects=[self.project]
)
self.user2 = self.create_user()
def test_snooze_user_and_global(self):
"""Test that a rule can be snoozed by a user and globally"""
issue_alert_rule_snooze_user = self.snooze_rule(
user_id=self.user.id,
owner_id=self.user.id,
rule=self.issue_alert_rule,
until=datetime.now() + timedelta(days=10),
)
issue_alert_rule_snooze_all = self.snooze_rule(
owner_id=self.user2.id,
rule=self.issue_alert_rule,
until=datetime.now() + timedelta(days=1),
)
assert RuleSnooze.objects.filter(id=issue_alert_rule_snooze_user.id).exists()
assert RuleSnooze.objects.filter(id=issue_alert_rule_snooze_all.id).exists()
def test_issue_alert_until_and_forever(self):
issue_alert_rule_snooze_user_until = self.snooze_rule(
user_id=self.user.id,
owner_id=self.user.id,
rule=self.issue_alert_rule,
until=datetime.now() + timedelta(days=1),
)
assert RuleSnooze.objects.filter(id=issue_alert_rule_snooze_user_until.id).exists()
issue_alert_rule2 = Rule.objects.create(
label="test rule", project=self.project, owner=self.team.actor
)
issue_alert_rule_snooze_user_forever = self.snooze_rule(
user_id=self.user.id, owner_id=self.user.id, rule=issue_alert_rule2
)
assert RuleSnooze.objects.filter(id=issue_alert_rule_snooze_user_forever.id).exists()
def METHOD_NAME(self):
metric_alert_rule_snooze_user = self.snooze_rule(
user_id=self.user.id,
owner_id=self.user.id,
alert_rule=self.metric_alert_rule,
until=datetime.now() + timedelta(days=1),
)
assert RuleSnooze.objects.filter(id=metric_alert_rule_snooze_user.id).exists()
metric_alert_rule2 = self.create_alert_rule(
organization=self.project.organization, projects=[self.project]
)
metric_alert_rule_snooze_user = self.snooze_rule(
user_id=self.user.id, owner_id=self.user.id, alert_rule=metric_alert_rule2
)
assert RuleSnooze.objects.filter(id=metric_alert_rule_snooze_user.id).exists()
def test_constraints(self):
# ensure the rule can be globally ignored after it's been individually ignored
metric_alert_rule_snooze_all = self.snooze_rule(alert_rule=self.metric_alert_rule)
issue_alert_rule_snooze_all = self.snooze_rule(rule=self.issue_alert_rule)
assert RuleSnooze.objects.filter(id=metric_alert_rule_snooze_all.id).exists()
assert RuleSnooze.objects.filter(id=issue_alert_rule_snooze_all.id).exists()
# ensure another user can ignore the same issue alert
issue_alert_rule_snooze_user2 = self.snooze_rule(
user_id=self.user2.id, rule=self.issue_alert_rule
)
assert RuleSnooze.objects.filter(id=issue_alert_rule_snooze_user2.id).exists()
def test_errors(self):
# ensure no dupes
self.snooze_rule(owner_id=self.user.id, alert_rule=self.metric_alert_rule)
with pytest.raises(IntegrityError), transaction.atomic(router.db_for_write(RuleSnooze)):
self.snooze_rule(alert_rule=self.metric_alert_rule)
self.snooze_rule(owner_id=self.user.id, rule=self.issue_alert_rule)
with pytest.raises(IntegrityError), transaction.atomic(router.db_for_write(RuleSnooze)):
self.snooze_rule(rule=self.issue_alert_rule)
# ensure valid data
with pytest.raises(IntegrityError), transaction.atomic(router.db_for_write(RuleSnooze)):
self.snooze_rule(
owner_id=self.user.id, rule=self.issue_alert_rule, alert_rule=self.metric_alert_rule
)
with pytest.raises(IntegrityError), transaction.atomic(router.db_for_write(RuleSnooze)):
self.snooze_rule(
user_id=self.user.id,
owner_id=self.user.id,
)
with pytest.raises(IntegrityError), transaction.atomic(router.db_for_write(RuleSnooze)):
self.snooze_rule(owner_id=self.user.id, until=datetime.now() + timedelta(days=1)) |
299,395 | on 200 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network vnet-gateway packet-capture stop",
is_preview=True,
)
class Stop(AAZCommand):
"""Stop packet capture on a virtual network gateway.
:example: Stop packet capture on a virtual network gateway.
az network vnet-gateway packet-capture stop -g MyResourceGroup -n MyVnetGateway --sas-url https://myStorageAct.blob.azure.com/artifacts?st=2019-04-10T22%3A12Z&se=2019-04-11T09%3A12Z&sp=rl&sv=2018-03-28&sr=c&sig=0000000000
"""
_aaz_info = {
"version": "2022-01-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/virtualnetworkgateways/{}/stoppacketcapture", "2022-01-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the VNet gateway.",
required=True,
id_part="name",
)
_args_schema.sas_url = AAZStrArg(
options=["--sas-url"],
help="The SAS url to be used for packet capture.",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualNetworkGatewaysStopPacketCapture(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=False)
return result
class VirtualNetworkGatewaysStopPacketCapture(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.METHOD_NAME,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.METHOD_NAME,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/stopPacketCapture",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"virtualNetworkGatewayName", self.ctx.args.name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-01-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("sasUrl", AAZStrType, ".sas_url")
return self.serialize_content(_content_value)
def METHOD_NAME(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZStrType()
return cls._schema_on_200
class _StopHelper:
"""Helper class for Stop"""
__all__ = ["Stop"] |
299,396 | test network from obj | import pytest
import compas
import json
from random import random, randint
from compas.datastructures import Network
from compas.geometry import Pointcloud
# ==============================================================================
# Fixtures
# ==============================================================================
@pytest.fixture
def k5_network():
network = Network()
network.add_edge("a", "b")
network.add_edge("a", "c")
network.add_edge("a", "d")
network.add_edge("a", "e")
network.add_edge("b", "c")
network.add_edge("b", "d")
network.add_edge("b", "e")
network.add_edge("c", "d")
network.add_edge("c", "e")
network.add_edge("d", "e")
return network
# ==============================================================================
# Basics
# ==============================================================================
# ==============================================================================
# Constructors
# ==============================================================================
@pytest.mark.parametrize(
"filepath",
[
compas.get("lines.obj"),
compas.get("grid_irregular.obj"),
],
)
def METHOD_NAME(filepath):
network = Network.from_obj(filepath)
assert network.number_of_nodes() > 0
assert network.number_of_edges() > 0
assert len(list(network.nodes())) == network._max_node + 1
assert network.is_connected()
def test_network_from_pointcloud():
cloud = Pointcloud.from_bounds(random(), random(), random(), randint(10, 100))
network = Network.from_pointcloud(cloud=cloud, degree=3)
assert network.number_of_nodes() == len(cloud)
for node in network.nodes():
assert network.degree(node) >= 3
# ==============================================================================
# Data
# ==============================================================================
def test_network_data():
cloud = Pointcloud.from_bounds(random(), random(), random(), randint(10, 100))
network = Network.from_pointcloud(cloud=cloud, degree=3)
other = Network.from_data(json.loads(json.dumps(network.data)))
assert network.data == other.data
if not compas.IPY:
assert Network.validate_data(network.data)
assert Network.validate_data(other.data)
# ==============================================================================
# Properties
# ==============================================================================
# ==============================================================================
# Accessors
# ==============================================================================
# ==============================================================================
# Builders
# ==============================================================================
def test_add_node():
network = Network()
assert network.add_node(1) == 1
assert network.add_node("1", x=0, y=0, z=0) == "1"
assert network.add_node(2) == 2
assert network.add_node(0, x=1) == 0
# ==============================================================================
# Modifiers
# ==============================================================================
# ==============================================================================
# Samples
# ==============================================================================
# ==============================================================================
# Attributes
# ==============================================================================
# ==============================================================================
# Conversion
# ==============================================================================
# ==============================================================================
# Methods
# ==============================================================================
def test_non_planar(k5_network):
try:
import planarity # noqa: F401
except ImportError:
return
from compas.datastructures import network_is_planar
assert network_is_planar(k5_network) is not True
def test_planar(k5_network):
try:
import planarity # noqa: F401
except ImportError:
return
from compas.datastructures import network_is_planar
k5_network.delete_edge(("a", "b")) # Delete (a, b) edge to make K5 planar
assert network_is_planar(k5_network) is True |
299,397 | get axis | # GridCal
# Copyright (C) 2022 Santiago Peñate Vera
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from PySide2.QtWidgets import *
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as Navigationtoolbar
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
plt.style.use('fivethirtyeight')
class MplCanvas(FigureCanvas):
def __init__(self):
self.press = None
self.cur_xlim = None
self.cur_ylim = None
self.x0 = None
self.y0 = None
self.x1 = None
self.y1 = None
self.xpress = None
self.ypress = None
self.zoom_x_limits = None
self.zoom_y_limits = None
self.fig = Figure()
try:
self.ax = self.fig.add_subplot(111, facecolor='white')
except Exception as ex:
self.ax = self.fig.add_subplot(111, axisbg='white')
FigureCanvas.__init__(self, self.fig)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
scale = 1.2
f = self.zoom_factory(self.ax, base_scale=scale)
# p = self.pan_factory(self.ax)
self.dragged = None
self.element_dragged = None
self.pick_pos = (0, 0)
self.is_point = False
self.index = None
# Connect events and callbacks
# self.fig.canvas.mpl_connect("pick_event", self.on_pick_event)
# self.fig.canvas.mpl_connect("button_release_event", self.on_release_event)
def setTitle(self, text):
"""
Sets the figure title
"""
self.fig.suptitle(text)
def set_graph_mode(self):
"""
Sets the borders to nicely display graphs
"""
self.fig.subplots_adjust(left=0, bottom=0, right=1, top=0.9, wspace=0, hspace=0)
def zoom_factory(self, ax, base_scale=1.2):
"""
Mouse zoom handler
"""
def zoom(event):
cur_xlim = ax.get_xlim()
cur_ylim = ax.get_ylim()
xdata = event.xdata # get event x location
ydata = event.ydata # get event y location
if event.button == 'down':
# deal with zoom in
scale_factor = 1 / base_scale
elif event.button == 'up':
# deal with zoom out
scale_factor = base_scale
else:
# deal with something that should never happen
scale_factor = 1
new_width = (cur_xlim[1] - cur_xlim[0]) * scale_factor
new_height = (cur_ylim[1] - cur_ylim[0]) * scale_factor
relx = (cur_xlim[1] - xdata)/(cur_xlim[1] - cur_xlim[0])
rely = (cur_ylim[1] - ydata)/(cur_ylim[1] - cur_ylim[0])
self.zoom_x_limits = [xdata - new_width * (1-relx), xdata + new_width * relx]
self.zoom_y_limits = [ydata - new_height * (1-rely), ydata + new_height * rely]
ax.set_xlim(self.zoom_x_limits )
ax.set_ylim(self.zoom_y_limits)
ax.figure.canvas.draw()
fig = ax.get_figure() # get the figure of interest
fig.canvas.mpl_connect('scroll_event', zoom)
return zoom
def rec_zoom(self):
self.zoom_x_limits = self.ax.get_xlim()
self.zoom_y_limits = self.ax.get_ylim()
def set_last_zoom(self):
if self.zoom_x_limits is not None:
self.ax.set_xlim(self.zoom_x_limits )
self.ax.set_ylim(self.zoom_y_limits)
def pan_factory(self, ax):
"""
Mouse pan handler
"""
def onPress(event):
if event.inaxes != ax:
return
self.cur_xlim = ax.get_xlim()
self.cur_ylim = ax.get_ylim()
self.press = self.x0, self.y0, event.xdata, event.ydata
self.x0, self.y0, self.xpress, self.ypress = self.press
def onRelease(event):
self.press = None
ax.figure.canvas.draw()
def onMotion(event):
if self.press is None:
return
if event.inaxes != ax:
return
dx = event.xdata - self.xpress
dy = event.ydata - self.ypress
self.cur_xlim -= dx
self.cur_ylim -= dy
ax.set_xlim(self.cur_xlim)
ax.set_ylim(self.cur_ylim)
ax.figure.canvas.draw()
fig = ax.get_figure() # get the figure of interest
# attach the call back
fig.canvas.mpl_connect('button_press_event', onPress)
fig.canvas.mpl_connect('button_release_event', onRelease)
fig.canvas.mpl_connect('motion_notify_event', onMotion)
# return the function
return onMotion
class MatplotlibWidget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.frame = QWidget()
self.canvas = MplCanvas()
self.canvas.setParent(self.frame)
self.mpltoolbar = Navigationtoolbar(self.canvas, self.frame)
self.vbl = QVBoxLayout()
self.vbl.addWidget(self.canvas)
self.vbl.addWidget(self.mpltoolbar)
self.setLayout(self.vbl)
self.mpltoolbar.toggleViewAction()
def setTitle(self, text):
"""
Sets the figure title
"""
self.canvas.setTitle(text)
def METHOD_NAME(self):
return self.canvas.ax
def get_figure(self):
return self.canvas.fig
def clear(self, force=False):
"""
Clear the interface
Args:
force: Remove the object and create a new one (brute force)
Returns:
"""
if force:
self.canvas.fig.clear()
self.canvas.ax = self.canvas.fig.add_subplot(111)
# self.canvas.ax.clear()
# self.canvas = MplCanvas()
else:
self.canvas.ax.clear()
self.redraw()
def redraw(self):
"""
Redraw the interface
Returns:
"""
self.canvas.ax.figure.canvas.draw()
def plot(self, x, y, title='', xlabel='', ylabel=''):
"""
Plot series
Args:
x: X values
y: Y values
title: Title
xlabel: Label for X
ylabel: Label for Y
Returns:
"""
self.setTitle(title)
self.canvas.ax.plot(x, y)
self.canvas.ax.set_xlabel(xlabel)
self.canvas.ax.set_ylabel(ylabel)
self.redraw()
|
299,398 | get account | import logging
from pathlib import Path
from typing import Any, Optional, Protocol
from multiversx_sdk_core import Address, MessageV1
from multiversx_sdk_network_providers.accounts import AccountOnNetwork
from multiversx_sdk_wallet import UserSigner
from multiversx_sdk_cli.constants import DEFAULT_HRP
from multiversx_sdk_cli.interfaces import IAccount, IAddress, ITransaction
from multiversx_sdk_cli.ledger.config import compare_versions
from multiversx_sdk_cli.ledger.ledger_app_handler import \
SIGN_USING_HASH_VERSION
from multiversx_sdk_cli.ledger.ledger_functions import (
TX_HASH_SIGN_OPTIONS, TX_HASH_SIGN_VERSION, do_get_ledger_address,
do_get_ledger_version, do_sign_message_with_ledger,
do_sign_transaction_with_ledger)
logger = logging.getLogger("accounts")
class INetworkProvider(Protocol):
def METHOD_NAME(self, address: IAddress) -> AccountOnNetwork:
...
class EmptyAddress(IAddress):
def hex(self) -> str:
return ""
def bech32(self) -> str:
return ""
class AccountBase(IAccount):
def __init__(self, address: Any = EmptyAddress()) -> None:
self.address = address
self.nonce: int = 0
def sync_nonce(self, proxy: INetworkProvider):
logger.debug("AccountBase.sync_nonce()")
self.nonce = proxy.METHOD_NAME(self.address).nonce
logger.debug(f"AccountBase.sync_nonce() done: {self.nonce}")
def sign_transaction(self, transaction: ITransaction) -> str:
raise NotImplementedError
def sign_message(self, data: bytes) -> str:
raise NotImplementedError
class Account(AccountBase):
def __init__(self,
address: Any = None,
pem_file: Optional[str] = None,
pem_index: int = 0,
key_file: str = "",
password: str = "") -> None:
super().__init__(address)
if pem_file:
pem_path = Path(pem_file).expanduser().resolve()
self.signer = UserSigner.from_pem_file(pem_path, pem_index)
self.address = Address(self.signer.get_pubkey().buffer, DEFAULT_HRP)
elif key_file and password:
key_file_path = Path(key_file).expanduser().resolve()
self.signer = UserSigner.from_wallet(key_file_path, password)
self.address = Address(self.signer.get_pubkey().buffer, DEFAULT_HRP)
def sign_transaction(self, transaction: ITransaction) -> str:
assert self.signer is not None
return self.signer.sign(transaction).hex()
def sign_message(self, data: bytes) -> str:
assert self.signer is not None
message = MessageV1(data)
signature = self.signer.sign(message)
logger.debug(f"Account.sign_message(): raw_data_to_sign = {data.hex()}, message_data_to_sign = {message.serialize_for_signing().hex()}, signature = {signature.hex()}")
return signature.hex()
class LedgerAccount(Account):
def __init__(self, account_index: int = 0, address_index: int = 0) -> None:
super().__init__()
self.account_index = account_index
self.address_index = address_index
self.address = Address.from_bech32(do_get_ledger_address(account_index=account_index, address_index=address_index))
def sign_transaction(self, transaction: ITransaction) -> str:
ledger_version = do_get_ledger_version()
should_use_hash_signing = compare_versions(ledger_version, SIGN_USING_HASH_VERSION) >= 0
if should_use_hash_signing:
transaction.version = TX_HASH_SIGN_VERSION
transaction.options = TX_HASH_SIGN_OPTIONS
signature = do_sign_transaction_with_ledger(
transaction.serialize_for_signing(),
account_index=self.account_index,
address_index=self.address_index,
sign_using_hash=should_use_hash_signing
)
assert isinstance(signature, str)
return signature
def sign_message(self, data: bytes) -> str:
message_length = len(data).to_bytes(4, byteorder="big")
message_data_to_sign: bytes = message_length + data
logger.debug(f"LedgerAccount.sign_message(): raw_data_to_sign = {data.hex()}, message_data_to_sign = {message_data_to_sign.hex()}")
signature = do_sign_message_with_ledger(
message_data_to_sign,
account_index=self.account_index,
address_index=self.address_index
)
assert isinstance(signature, str)
logger.debug(f"LedgerAccount.sign_message(): signature = {signature}")
return signature |
299,399 | test location is parsed | from io import BytesIO
import pytest
from translate.storage import csvl10n, test_base
class TestCSVUnit(test_base.TestTranslationUnit):
UnitClass = csvl10n.csvunit
class TestCSV(test_base.TestTranslationStore):
StoreClass = csvl10n.csvfile
def parse_store(self, source, **kwargs):
"""Helper that parses source without requiring files."""
return self.StoreClass(BytesIO(source), **kwargs)
def test_singlequoting(self):
"""Tests round trip on single quoting at start of string"""
store = self.StoreClass()
unit1 = store.addsourceunit("Test 'String'")
assert unit1.source == "Test 'String'"
unit2 = store.addsourceunit("'Blessed' String")
assert unit2.source == "'Blessed' String"
unit3 = store.addsourceunit("'Quoted String'")
assert unit3.source == "'Quoted String'"
newstore = self.reparse(store)
self.check_equality(store, newstore)
assert store.units[2] == newstore.units[2]
assert bytes(store) == bytes(newstore)
def test_utf_8(self):
store = self.parse_store("foo.c:1;test;zkouška sirén".encode())
assert len(store.units) == 1
assert store.units[0].source == "test"
assert store.units[0].target == "zkouška sirén"
def test_utf_8_sig(self):
content = '"location";"source";"target"\r\n"foo.c:1";"test";"zkouška sirén"\r\n'.encode(
"utf-8-sig"
)
store = self.parse_store(content)
assert len(store.units) == 1
assert store.units[0].source == "test"
assert store.units[0].target == "zkouška sirén"
assert bytes(store) == content
def test_default(self):
content = b"""ID,English
GENERAL@2|Notes,"cable, motor, switch"
*****END CALL*****|Ask,-"""
store = self.parse_store(content)
assert len(store.units) == 3
assert store.units[0].location == "ID"
assert store.units[0].source == "English"
assert store.units[1].location == "GENERAL@2|Notes"
assert store.units[1].source == "cable, motor, switch"
assert store.units[2].location == "*****END CALL*****|Ask"
assert store.units[2].source == "-"
def METHOD_NAME(self):
"""Tests that units with location are correctly parsed."""
source = b'"65066","Ogre","Ogro"\n"65067","Ogra","Ogros"'
store = self.parse_store(source)
assert len(store.units) == 2
unit1 = store.units[0]
assert unit1.location == "65066"
assert unit1.source == "Ogre"
assert unit1.target == "Ogro"
unit2 = store.units[1]
assert unit2.location == "65067"
assert unit2.source == "Ogra"
assert unit2.target == "Ogros"
assert unit1.getid() != unit2.getid()
def test_context_is_parsed(self):
"""Tests that units with the same source are different based on context."""
source = b'"context","source","target"\n"65066","Ogre","Ogro"\n"65067","Ogre","Ogros"'
store = self.parse_store(source)
assert len(store.units) == 2
unit1 = store.units[0]
assert unit1.context == "65066"
assert unit1.source == "Ogre"
assert unit1.target == "Ogro"
unit2 = store.units[1]
assert unit2.context == "65067"
assert unit2.source == "Ogre"
assert unit2.target == "Ogros"
assert unit1.getid() != unit2.getid()
def test_newline(self):
content = b'"location";"source";"target"\r\n"foo.c:1";"te\\nst";"ot\\nher"\r\n'
store = self.parse_store(content)
assert len(store.units) == 1
assert store.units[0].source == "te\\nst"
assert store.units[0].target == "ot\\nher"
assert bytes(store) == content
def test_parse_sample(self):
content = b'"location";"source";"target"\r\n"foo.c:1";"te\\nst";"ot\\nher"\r\n'
store = self.StoreClass()
store.parse(content, sample_length=None)
assert len(store.units) == 1
assert store.units[0].source == "te\\nst"
assert store.units[0].target == "ot\\nher"
assert bytes(store) == content
def test_utf_8_detection(self):
content = (
""""location","source","target","id","fuzzy","context","translator_comments","developer_comments"\r\n"""
""""","Second","秒","","False","00029.00002","","# Filter Order|IDE_2ND_ORDER_FILTER"\r\n"""
)
store = self.StoreClass()
store.parse(content.encode())
assert len(store.units) == 1
assert store.units[0].source == "Second"
assert store.units[0].target == "秒"
assert bytes(store).decode() == content
def test_encoding(self):
content = "foo.c:1;test;zkouška sirén"
store = self.parse_store(content.encode("utf-8"))
assert len(store.units) == 1
assert store.units[0].source == "test"
assert store.units[0].target == "zkouška sirén"
store = self.parse_store(content.encode("utf-8"), encoding="utf-8")
assert len(store.units) == 1
assert store.units[0].source == "test"
assert store.units[0].target == "zkouška sirén"
store = self.parse_store(content.encode("iso-8859-2"), encoding="iso-8859-2")
assert len(store.units) == 1
assert store.units[0].source == "test"
assert store.units[0].target == "zkouška sirén"
with pytest.raises(UnicodeDecodeError):
store = self.parse_store(content.encode("iso-8859-2"), encoding="utf-8") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.