code stringlengths 281 23.7M |
|---|
(base=RequestContextTask, bind=True, max_retries=5)
def send_event_invoice(self, event_id: int, send_notification: bool=True, force: bool=False):
this_month = this_month_date()
event_invoice = EventInvoice.query.filter_by(event_id=event_id).filter((EventInvoice.issued_at >= this_month)).first()
if ((not force) and event_invoice):
logger.warn('Event Invoice of this month for this event has already been created: %s', event_id)
return
event = Event.query.get(event_id)
try:
saved = False
pdf_url = None
with redis_store.lock('event_invoice_generate', timeout=5, blocking_timeout=20):
event_invoice = EventInvoice(event=event, issued_at=this_month)
pdf_url = event_invoice.populate()
if pdf_url:
try:
save_to_db(event_invoice)
saved = True
logger.info('Generated Event invoice %s for %s. Amount: %f', event_invoice, event, event_invoice.amount)
except Exception as e:
logger.exception('Error while saving invoice. Retrying')
raise self.retry(exc=e)
else:
logger.warning('Failed to generate event invoice PDF %s', event)
if (saved and send_notification):
send_invoice_notification.delay(event_invoice.id)
return pdf_url
except LockError as e:
logger.exception('Error while acquiring lock. Retrying')
self.retry(exc=e) |
class Citator():
REFERENCES = {'ETE': u'Huerta-Cepas J, Serra F, Bork P. ETE 3: Reconstruction, analysis and\n visualization of phylogenomic data. Mol Biol Evol (2016) doi:\n 10.1093/molbev/msw046', 'phyml': u'Guindon S, Dufayard JF, Lefort V, Anisimova M, Hordijk W, Gascuel O.\n New algorithms and methods to estimate maximum-likelihood phylogenies:\n assessing the performance of PhyML 3.0. Syst Biol. 2010\n May;59(3):307-21.', 'fasttree': u'Price MN, Dehal PS, Arkin AP. FastTree 2 -\n approximately maximum-likelihood trees for large alignments. PLoS\n One. 2010 Mar 10;5(3):e9490.', 'raxml': u'Stamatakis A. RAxML version 8: a tool for phylogenetic analysis and\n post-analysis of large phylogenies Bioinformatics (2014) 30 (9): 1312-1313.', 'mafft': u'Katoh K, Kuma K, Toh H, Miyata T. MAFFT version 5:\n improvement in accuracy of multiple sequence alignment. Nucleic Acids\n Res. 2005 Jan 20;33(2):511-8.', 'trimal': u'Capella-Gutierrez S, Silla-Martinez JM, Gabaldon T.\n trimAl: a tool for automated alignment trimming in large-scale\n phylogenetic analyses. Bioinformatics. 2009 Aug 1;25(15):1972-3.', 'muscle': u'Edgar RC. MUSCLE: multiple sequence alignment with\n high accuracy and high throughput.", Nucleic Acids Res. 2004 Mar\n 19;32(5):1792-7.', 'clustalo': u' Sievers F, Wilm A, Dineen D, Gibson TJ, Karplus\n K, Li W, Lopez R, McWilliam H, Remmert M, Soding J, Thompson JD,\n Higgins DG. Fast, scalable generation of high-quality protein\n multiple sequence alignments using Clustal Omega. Mol Syst Biol. 2011\n Oct 11;7:539. doi: 10.1038/msb.2011.75.', 'dialigntx': u'Subramanian AR, Kaufmann M, Morgenstern B.\n DIALIGN-TX: greedy and progressive approaches for segment-based\n multiple sequence alignment. Algorithms Mol Biol. 2008 May 27;3:6.', 'mcoffee': u"Wallace IM, O'Sullivan O, Higgins DG, Notredame C.\n M-Coffee: combining multiple sequence alignment methods with T-Coffee.\n Nucleic Acids Res. 2006 Mar 23;34(6):1692-9. ", 'tcoffee': u'Magis C, Taly JF, Bussotti G, Chang JM, Di Tommaso P, Erb I,\n Espinosa-Carrasco J, Notredame C. T-Coffee: Tree-based consistency objective\n function for alignment evaluation. Methods Mol Biol. 2014;1079:117-29.', 'jmodeltest': u'Darriba D, Taboada GL, Doallo R, Posada\n D. jModelTest 2: more models, new heuristics and parallel computing.Nat\n Methods. 2012 Jul 30;9(8):772.', 'treeko': u'Marcet-Houben M, Gabaldon T. TreeKO: a duplication-aware algorithm for the\n comparison of phylogenetic trees. Nucleic Acids Res. 2011 May;39(10):e66. doi:\n 10.1093/nar/gkr087.', 'iqtree': u'LT Nguyen, H.A. Schmidt, A. von Haeseler, and BQ Minh (2015) IQ-TREE: A\n fast and effective stochastic algorithm for estimating maximum likelihood\n phylogenies. Mol. Biol. Evol., 32, 268-274.', 'pll': u'T Flouri, F Izquierdo-Carrasco, D. Darriba, AJ Aberer, LT Nguyen,\n B.Q. Minh, A. von Haeseler, and A. Stamatakis (2015) The phylogenetic likelihood\n library. Syst. Biol., 64:356-362.', 'ufboot': u'BQ Minh, MAT Nguyen, and A. von Haeseler (2013) Ultrafast approximation\n for phylogenetic bootstrap. Mol. Biol. Evol., 30:1188-1195.'}
def __init__(self):
self.citations = set()
def add(self, ref):
self.citations.add(self.REFERENCES[ref])
def show(self):
wrapper = twrap.TextWrapper(width=75, initial_indent=' ', subsequent_indent=' ', replace_whitespace=False)
citations = sorted(self.citations)
print(' ')
print(' The following published software and/or methods were used. ')
print(' *** Please, do not forget to cite them! *** ')
print(' ')
for ref in citations:
print(wrapper.fill(re.sub('[\n \t]+', ' ', ref).strip())) |
_postgres
def test_gis_select(db):
for model in [Geometry, Geography]:
row = model.new(point=geo.Point(1, 1), line=geo.Line((0, 0), (20, 80), (80, 80)), polygon=geo.Polygon((0, 0), (150, 0), (150, 10), (0, 10), (0, 0)), multipoint=geo.MultiPoint((1, 1), (2, 2)), multiline=geo.MultiLine(((1, 1), (2, 2), (3, 3)), ((1, 1), (4, 4), (5, 5))), multipolygon=geo.MultiPolygon((((0, 0), (20, 0), (20, 20), (0, 0)), ((0, 0), (30, 0), (30, 30), (0, 0))), (((1, 1), (21, 1), (21, 21), (1, 1)), ((1, 1), (31, 1), (31, 31), (1, 1)))))
row.save()
row = model.get(row.id)
assert (row.point.geometry == 'POINT')
assert (row.point.coordinates == (1, 1))
assert (not row.point.groups)
assert (row.line.geometry == 'LINESTRING')
assert (row.line.coordinates == ((0, 0), (20, 80), (80, 80)))
assert (not row.line.groups)
assert (row.polygon.geometry == 'POLYGON')
assert (row.polygon.coordinates == (((0, 0), (150, 0), (150, 10), (0, 10), (0, 0)),))
assert (not row.polygon.groups)
assert (row.multipoint.geometry == 'MULTIPOINT')
assert (row.multipoint.coordinates == ((1, 1), (2, 2)))
assert (len(row.multipoint.groups) == 2)
assert (row.multipoint.groups[0] == geo.Point(1, 1))
assert (row.multipoint.groups[1] == geo.Point(2, 2))
assert (row.multiline.geometry == 'MULTILINESTRING')
assert (row.multiline.coordinates == (((1, 1), (2, 2), (3, 3)), ((1, 1), (4, 4), (5, 5))))
assert (len(row.multiline.groups) == 2)
assert (row.multiline.groups[0] == geo.Line((1, 1), (2, 2), (3, 3)))
assert (row.multiline.groups[1] == geo.Line((1, 1), (4, 4), (5, 5)))
assert (row.multipolygon.geometry == 'MULTIPOLYGON')
assert (row.multipolygon.coordinates == ((((0, 0), (20, 0), (20, 20), (0, 0)), ((0, 0), (30, 0), (30, 30), (0, 0))), (((1, 1), (21, 1), (21, 21), (1, 1)), ((1, 1), (31, 1), (31, 31), (1, 1)))))
assert (len(row.multipolygon.groups) == 2)
assert (row.multipolygon.groups[0] == geo.Polygon(((0, 0), (20, 0), (20, 20), (0, 0)), ((0, 0), (30, 0), (30, 30), (0, 0))))
assert (row.multipolygon.groups[1] == geo.Polygon(((1, 1), (21, 1), (21, 21), (1, 1)), ((1, 1), (31, 1), (31, 31), (1, 1)))) |
class OptionSeriesSolidgaugeSonificationDefaultinstrumentoptionsMappingLowpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def parse_base_args(parser):
parsed_args = parser.parse_args()
from etw import evntrace as et
level = {'critical': et.TRACE_LEVEL_CRITICAL, 'error': et.TRACE_LEVEL_ERROR, 'warning': et.TRACE_LEVEL_WARNING, 'information': et.TRACE_LEVEL_INFORMATION, 'verbose': et.TRACE_LEVEL_VERBOSE, 'reserved6': et.TRACE_LEVEL_RESERVED6, 'reserved7': et.TRACE_LEVEL_RESERVED7, 'reserved8': et.TRACE_LEVEL_RESERVED8, 'reserved9': et.TRACE_LEVEL_RESERVED9}
parsed_args.level = level[parsed_args.level]
if ((parsed_args.default_filters is True) and (parsed_args.filters is not None)):
raise ETWException('Cannot specify use default filters and set filters')
if ((parsed_args.no_conout is True) and (parsed_args.logfile is None)):
raise ETWException('Either console output or logfile must be specified')
return vars(parsed_args) |
def extractLazyslothtranslationsWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class WindowedKeysView(KeysView):
def __init__(self, mapping: WindowWrapperT, event: Optional[EventT]=None) -> None:
self._mapping = mapping
self.event = event
def __iter__(self) -> Iterator:
wrapper = cast(WindowWrapper, self._mapping)
for (key, _) in wrapper._items(self.event):
(yield key)
def __len__(self) -> int:
return len(self._mapping)
def now(self) -> Iterator[Any]:
wrapper = cast(WindowWrapper, self._mapping)
for (key, _) in wrapper._items_now():
(yield key)
def current(self, event: Optional[EventT]=None) -> Iterator[Any]:
wrapper = cast(WindowWrapper, self._mapping)
for (key, _) in wrapper._items_current((event or self.event)):
(yield key)
def delta(self, d: Seconds, event: Optional[EventT]=None) -> Iterator[Any]:
wrapper = cast(WindowWrapper, self._mapping)
for (key, _) in wrapper._items_delta(d, (event or self.event)):
(yield key) |
def test_get_integration_params_channel_logic():
alert = DBT_TEST_ALERT_MOCK
integration = get_slack_integration_mock(slack_token='mock', slack_channel_name='from_conf')
assert (json.dumps(integration._get_integration_params(alert=alert)) == json.dumps({'channel': 'from_conf'}))
alert = DBT_TEST_ALERT_MOCK
alert.model_meta.update(dict(channel='from_alert'))
integration = get_slack_integration_mock(slack_token='mock', slack_channel_name='from_conf')
assert (json.dumps(integration._get_integration_params(alert=alert)) == json.dumps({'channel': 'from_alert'}))
alert = DBT_TEST_ALERT_MOCK
alert.model_meta.update(dict(channel='from_alert'))
integration = get_slack_integration_mock(slack_token='mock', slack_channel_name='from_conf', override_config_defaults=True)
assert (json.dumps(integration._get_integration_params(alert=alert)) == json.dumps({'channel': 'from_conf'})) |
def retype_message(message):
if (type(message) == Message):
if message.topic.endswith('.copr.build.end'):
return BuildChrootEndedV1(body=message.body)
if message.topic.endswith('.copr.build.start'):
return BuildChrootStartedV1(body=message.body)
if message.topic.endswith('.copr.chroot.start'):
return BuildChrootStartedV1DontUse(body=message.body)
return message |
def describe_sh_flags(x):
s = ''
for flag in (SH_FLAGS.SHF_WRITE, SH_FLAGS.SHF_ALLOC, SH_FLAGS.SHF_EXECINSTR, SH_FLAGS.SHF_MERGE, SH_FLAGS.SHF_STRINGS, SH_FLAGS.SHF_INFO_LINK, SH_FLAGS.SHF_LINK_ORDER, SH_FLAGS.SHF_OS_NONCONFORMING, SH_FLAGS.SHF_GROUP, SH_FLAGS.SHF_TLS, SH_FLAGS.SHF_EXCLUDE):
s += (_DESCR_SH_FLAGS[flag] if (x & flag) else '')
return s |
class LicenseBaseInformationError(ErsiliaError):
def __init__(self):
self.message = 'Wrong license'
self.hints = "Listed licenses are: {}. If the model has a license not in this list, please open a PR on the 'license.txt' file in the Ersilia repository".format(', '.join(_read_default_fields('License')))
ErsiliaError.__init__(self, self.message, self.hints) |
_exempt
def retrieve_workflow_by_experimentId(request):
if (len(request.body) == 0):
return HttpResponse('Nothing', content_type='application/json')
json_str = json.loads(request.body.decode('utf-8'))
response = experiment_service.get_workflow_by_experimentId(json_str)
return HttpResponse(response, content_type='application/json') |
class EVENT_FILTER_EVENT_NAME(ct.Structure):
_fields_ = [('MatchAnyKeyword', ct.c_ulonglong), ('MatchAllKeyword', ct.c_ulonglong), ('Level', wt.CHAR), ('FilterIn', wt.BOOLEAN), ('NameCount', wt.USHORT), ('Names', (wt.CHAR * 0))]
def __init__(self, match_any, match_all, level, filter_in, names):
struct_size = (((sum([len(name) for name in names]) * ct.sizeof(wt.CHAR)) + (ct.sizeof(wt.CHAR) * len(names))) + ct.sizeof(EVENT_FILTER_EVENT_NAME))
self._buf = (ct.c_char * struct_size)()
self._props = ct.cast(ct.pointer(self._buf), ct.POINTER(EVENT_FILTER_EVENT_NAME))
self._props.contents.MatchAnyKeyword = match_any
self._props.contents.MatchAllKeyword = match_all
self._props.contents.Level = level
self._props.contents.FilterIn = filter_in
self._props.contents.NameCount = len(names)
str_off = 0
for i in range(len(names)):
ct.memmove(ct.cast(((ct.addressof(self._buf) + ct.sizeof(EVENT_FILTER_EVENT_NAME)) + str_off), ct.c_void_p), names[i], len(names[i]))
str_off += (len(names[i]) + ct.sizeof(wt.CHAR))
def get(self):
return self._props |
class FidesCollectionKey(ConstrainedStr):
def validate(cls, value: str) -> str:
values = value.split('.')
if (len(values) == 2):
FidesKey.validate(values[0])
FidesKey.validate(values[1])
return value
raise ValueError("FidesCollection must be specified in the form 'FidesKey.FidesKey'") |
class OptionSeriesSunburstSonificationContexttracksMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def _merge_configs(*configs) -> Dict[(str, Any)]:
config = dict()
for subconfig in configs:
config.update(subconfig)
for section in ('plugins', 'exceptions'):
config[section] = dict()
for subconfig in configs:
config[section].update(subconfig.get(section, {}))
return config |
class AITSplitter(splitter_base._SplitterBase):
def __init__(self, module: torch.fx.GraphModule, sample_input: Sequence[Any], operator_support: ops.OperatorSupportBase=None, settings: AITSplitterSettings=None):
if (not settings):
settings = AITSplitterSettings()
if (not operator_support):
operator_support = create_ait_operator_support(op_lowering_disallow_list=settings.exclude_support_node_name, allow_int_inputs=settings.allow_int_inputs)
else:
operator_support = ops.chain(operator_support, ops.OpSupports.decline_if_node_in_names(settings.exclude_support_node_name))
super().__init__(module, sample_input, operator_support, settings, non_acc_submodule_name='_run_on_gpu_')
def _lower_model_to_backend(self, mod: torch.fx.GraphModule, inputs: Iterable[torch.Tensor]):
interp = AITInterpreter(mod, [inputs])
interpreter_result = interp.run(*inputs)
return AITModule(torch.classes.fb.AITModel(interpreter_result.engine.lib_path, interpreter_result.input_names, interpreter_result.output_names, torch.float16, torch.float, 1), interpreter_result) |
def horizontal_mirror(art: str) -> str:
art_lines = art.split('\n')
ansi_escape_regex = '((\\x9B|\\x1B\\[)[0-?]*[ -\\/]*[-~])'
output = ''
for line in art_lines:
chars_and_escape_codes = re.split(ansi_escape_regex, line)
reversed_string = ''
for code in chars_and_escape_codes[::(- 1)]:
if re.match(ansi_escape_regex, code):
reversed_string += code
else:
reversed_string += code[::(- 1)]
output += (reversed_string + '\n')
return output[:(- 1)] |
class TestsRGBSerialize(util.ColorAssertsPyTest):
COLORS = [('rgb(255 0 0)', {}, 'rgb(255 0 0)'), ('rgb(255 0 0)', {'names': True}, 'red'), ('rgb(255 0 0)', {'hex': True}, '#ff0000'), ('rgb(255 0 0)', {'hex': True, 'compress': True}, '#f00'), ('rgb(255 0 0 / 0.53333)', {'hex': True}, '#ff000088'), ('rgb(255 0 0 / 0.53333)', {'hex': True, 'compress': True}, '#f008'), ('rgb(255 0 0 / 0.53333)', {'hex': True, 'upper': True}, '#FF000088'), ('rgb(255 0 0 / 0.5)', {}, 'rgb(255 0 0 / 0.5)'), ('rgb(255 0 0)', {'alpha': True}, 'rgb(255 0 0 / 1)'), ('rgb(255 0 0 / 0.5)', {'alpha': False}, 'rgb(255 0 0)'), ('rgb(255 0 0)', {'percent': True}, 'rgb(100% 0% 0%)'), ('rgb(255 0 0)', {'percent': True, 'alpha': True}, 'rgb(100% 0% 0% / 1)'), ('rgb(none 128 10)', {}, 'rgb(0 128 10)'), ('rgb(none 128 10)', {'none': True}, 'rgb(none 128 10)'), ('rgb(260 0 0 )', {}, 'rgb(255 0 0)'), ('rgb(260 0 0 )', {'fit': False}, 'rgb(260 0 0)'), ('rgb(260 0 0 )', {'hex': True}, '#ff0000'), ('rgb(255 0 0)', {'comma': True}, 'rgb(255, 0, 0)'), ('rgb(255 0 0 / 0.5)', {'comma': True}, 'rgba(255, 0, 0, 0.5)'), ('rgb(255 0 0)', {'comma': True, 'alpha': True}, 'rgba(255, 0, 0, 1)'), ('rgb(255 0 0 / 0.5)', {'comma': True, 'alpha': False}, 'rgb(255, 0, 0)'), ('rgb(none 128 10)', {'comma': True}, 'rgb(0, 128, 10)'), ('rgb(none 128 10)', {'comma': True, 'none': True}, 'rgb(0, 128, 10)'), ('rgb(none 255 25.5 / 0.5)', {'color': True}, 'color(srgb 0 1 0.1 / 0.5)'), ('rgb(none 255 25.5)', {'color': True, 'none': True}, 'color(srgb none 1 0.1)'), ('rgb(0 255 25.5)', {'color': True, 'alpha': True}, 'color(srgb 0 1 0.1 / 1)'), ('rgb(0 255 25.5 / 0.5)', {'color': True, 'alpha': False}, 'color(srgb 0 1 0.1)'), ('color(srgb 1.2 0.2 0)', {'color': True}, 'color(srgb 1 0.42056 0.2633)'), ('color(srgb 1.2 0.2 0)', {'color': True, 'fit': False}, 'color(srgb 1.2 0.2 0)')]
.parametrize('color1,options,color2', COLORS)
def test_colors(self, color1, options, color2):
self.assertEqual(Color(color1).to_string(**options), color2) |
def _delete(package_id):
(org, repo, version) = _split_id(package_id)
source_path = _get_data_folder().joinpath(f'packages/{org}/{repo}{version}')
if (not source_path.exists()):
raise FileNotFoundError(f"Package '{_format_pkg(org, repo, version)}' is not installed")
shutil.rmtree(source_path)
notify('SUCCESS', f"Package '{_format_pkg(org, repo, version)}' has been deleted") |
_cache(maxsize=1024)
def normalize_int(value: IntConvertible) -> int:
if is_integer(value):
return cast(int, value)
elif is_bytes(value):
return big_endian_to_int(cast(bytes, value))
elif (is_hex(value) and is_0x_prefixed(value)):
value = cast(str, value)
if (len(value) == 2):
return 0
else:
return int(value, 16)
elif is_string(value):
return int(value)
else:
raise TypeError(f'Unsupported type: Got `{type(value)}`') |
class TestLanguageAgnosticDocs(BaseTestMarkdownDocs):
DOC_PATH = Path(ROOT_DIR, 'docs', 'language-agnostic-definition.md')
def _proto_snippet_selector(cls, block: Dict) -> bool:
return ((block['type'] == 'block_code') and (block['info'].strip() == 'proto'))
def setup_class(cls):
super().setup_class()
cls.code_blocks = list(filter(cls._proto_snippet_selector, cls.flat_blocks))
cls.actual_mail_base_file_content = MAIL_BASE_PROTO.read_text()
cls.actual_default_message_file_content = DEFAULT_MESSAGE_PROTO.read_text()
def test_envelope_code_snippet(self):
block = self.code_blocks[0]
assert (block['info'].strip() == 'proto')
lines = block['text'].splitlines()
(first_part, second_part) = ('\n'.join(lines[:3]), '\n'.join(lines[3:]))
assert (first_part in self.actual_mail_base_file_content)
assert (second_part in self.actual_mail_base_file_content)
def test_dialogue_message_code_snippet(self):
block = self.code_blocks[1]
assert (block['info'].strip() == 'proto')
assert (block['text'] in self.actual_mail_base_file_content)
def test_public_id_regular_expression(self):
expected_regex = PublicId.PUBLIC_ID_REGEX
assert (expected_regex in self.doc_content)
def test_default_message_code_snippet(self):
block = self.code_blocks[2]
assert (block['info'].strip() == 'proto')
assert (block['text'] in self.actual_default_message_file_content), Exception([block['text'], self.actual_default_message_file_content]) |
class ActionExpectsFieldErrorsDirective(ActionExpectsErrorsDirective):
def name(cls):
return 'expect_error_field'
def get_full_grammar(cls):
return ((((super(ActionExpectsFieldErrorsDirective, cls).get_full_grammar() + ',') + Literal('field')) + '=') + Word((alphanums + '-_.{}[]'))('field_name')) |
def extractWuxiaLovers(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol or frag)) or ('preview' in item['title'].lower())):
return None
if (item['title'].startswith('CGA Chapter') or item['title'].startswith('CGA: Chapter')):
return buildReleaseMessageWithType(item, 'Conquer God, Asura, and 1000 Beauties', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('Etranger Chapter'):
return buildReleaseMessageWithType(item, 'Etranger', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('Q11 Chapter'):
return buildReleaseMessageWithType(item, 'Queen of No.11 Agent 11', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('STS Chapter'):
return buildReleaseMessageWithType(item, 'Sky Traversing Sword Master', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('DGM Chapter'):
return buildReleaseMessageWithType(item, 'Descent of the God of Magic', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('The First Hunter Chapter'):
return buildReleaseMessageWithType(item, 'The First Hunter', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('Slaughter System '):
return buildReleaseMessageWithType(item, 'Slaughter System', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
if item['title'].startswith('Getcha Skills Chapter '):
return buildReleaseMessageWithType(item, 'Getcha Skills', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
if item['title'].startswith('Empyrean Ascent Chapter'):
return buildReleaseMessageWithType(item, 'Empyrean Ascent', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
if item['title'].startswith('[Guardian] Chapter'):
return buildReleaseMessageWithType(item, '[Guardian]', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
return False |
def transform_name(name, from_char='_', to_char='-'):
name = name.strip()
name = re.sub('{}+'.format(re.escape(from_char)), to_char, name)
name = re.sub('^{c}|{c}$'.format(c=re.escape(to_char)), '', name)
if (not name):
raise ValueError('Invalid name "{}"'.format(name))
return name |
class ScalarField(Field):
def cast(self, value: Any) -> Optional[Any]:
if (not isinstance(value, QueryToken)):
return self.data_type_converter.to_value(value)
return value
def collect_matching(self, func: Callable[([Field], bool)]) -> Dict[(FieldPath, Field)]:
if func(self):
return {FieldPath(self.name): self}
return {}
def __eq__(self, other: object) -> bool:
if (not isinstance(other, ScalarField)):
return False
return (self.__dict__ == other.__dict__) |
class OptionPlotoptionsDumbbellSonificationDefaultinstrumentoptionsMappingTremoloDepth(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def verify_ping(fledge_url, north_catch_up_time):
get_url = '/fledge/ping'
ping_result = utils.get_request(fledge_url, get_url)
assert ('dataRead' in ping_result)
assert ('dataSent' in ping_result)
assert (0 < ping_result['dataRead']), 'South data NOT seen in ping header'
assert (ping_result['dataRead'] == ping_result['dataSent']), 'Could not send all the data even after waiting {} seconds.'.format(north_catch_up_time) |
def health_check(client, **kwargs):
logger = logging.getLogger(__name__)
logger.debug('KWARGS= "%s"', kwargs)
klist = list(kwargs.keys())
if (not klist):
raise MissingArgument('Must provide at least one keyword argument')
hc_data = client.cluster.health()
response = True
for k in klist:
if (not (k in list(hc_data.keys()))):
raise ConfigurationError('Key "{0}" not in cluster health output')
if (not (hc_data[k] == kwargs[k])):
msg = f'NO MATCH: Value for key "{kwargs[k]}", health check data: {hc_data[k]}'
logger.debug(msg)
response = False
else:
msg = f'MATCH: Value for key "{kwargs[k]}", health check data: {hc_data[k]}'
logger.debug(msg)
if response:
logger.info('Health Check for all provided keys passed.')
return response |
def _prep_venv(name, venvsdir, on_exists, python, failfast):
venvroot = os.path.abspath(os.path.join(venvsdir, name))
(status, kind) = get_venv_status(rootdir)
if (status != 'missing'):
try:
(op, venvroot) = on_exists(venvroot, status, kind)
except Exception as exc:
op = exc
else:
op = 'create'
raise NotImplementedError
run = None
needreqs = False
if isinstance(op, Exception):
if failfast:
raise op
run = op
elif (op == 'create'):
def run(warn=False, dryrun=False, log=logger.info):
if (log is not None):
log(f'creating venv {venvroot}')
if (not dryrun):
_create_venv(venvroot, python)
needreqs = True
elif (op == 'replace'):
if (status == 'valid'):
def delete(warn=False, dryrun=False, log=logger.info):
if (log is not None):
log(f'replacing venv {venvroot}')
if (not dryrun):
shutil.rmtree(venvroot)
def create(warn=False, dryrun=False, log=logger.info):
if (not dryrun):
_create_venv(venvinfo, python)
run = [delete, create]
elif (status == 'invalid'):
def delete(warn=False, dryrun=False, log=logger.info):
if (log is not None):
log(f'replacing non-venv {venvroot} dir with venv')
if (not dryrun):
shutil.rmtree(venvroot)
def create(warn=False, dryrun=False, log=logger.info):
if (not dryrun):
_create_venv(venvinfo, python)
run = [delete, create]
else:
def delete(warn=False, dryrun=False, log=logger.info):
if (log is not None):
log(f'replacing {venvroot} file with venv dir')
if (not dryrun):
os.unlink(venvroot)
def create(warn=False, dryrun=False, log=logger.info):
if (not dryrun):
_create_venv(venvinfo, python)
run = [delete, create]
needreqs = True
elif op:
raise NotImplementedError(op)
elif (status == 'valid'):
def run(warn=False, dryrun=False, log=logger.info):
if warn:
log('venv already exists:')
log(f' {venvroot}')
log('(skipping)')
needreqs = True
elif (status == 'invalid'):
def run(warn=False, dryrun=False, log=logger.info):
log('WARNING: an incomplete venv dir already exists:')
log(f' {venvroot}')
log('(skipping)')
elif (status == 'not-dir'):
def run(warn=False, dryrun=False, log=logger.info):
log('WARNING: a conflicting file already exists:')
log(f' {venvroot}')
log('(skipping)')
elif (status != 'missing'):
raise NotImplementedError(status)
return (venvroot, run, needreqs) |
('versions', cls=FandoghCommand)
('-i', '--image', 'image', prompt='Image name', help='The image name', default=(lambda : get_project_config().get('image.name')))
def versions(image):
if (not image):
image = get_project_config().get('image.name')
table = present((lambda : list_versions(image)), renderer='table', headers=['version', 'size', 'state'], columns=['version', 'size', 'state'])
if len(table.strip()):
click.echo(table)
else:
click.echo("There is no version available for '{}'".format(image)) |
.lkc
def test_cves_metadata_fixes(hound, cve):
fixes = hound.get_rule_fixes(cve)
lkc_fixes = hound.get_cve_metadata(cve)['breaks']
if (fixes == 'v2.6.12-rc2'):
fixes = '1da177e4c3f41524e886b7f1b8a0c1fc7321cac2'
assert (fixes == lkc_fixes), '{} vs. {}'.format(fixes[0:12], lkc_fixes[0:12]) |
class Solution():
def orderOfLargestPlusSign(self, N: int, mines: List[List[int]]) -> int:
grid = {tuple([(x + 1), (y + 1)]) for (x, y) in mines}
dp = [[([0] * 4) for _ in range((N + 2))] for _ in range((N + 2))]
for (dx, dy, dr) in [((- 1), 0, 0), (1, 0, 1), (0, (- 1), 2), (0, 1, 3)]:
for x in range(1, (N + 1))[::((((- dx) >= 0) * 2) - 1)]:
for y in range(1, (N + 1))[::((((- dy) >= 0) * 2) - 1)]:
if ((y, x) not in grid):
dp[y][x][dr] = (dp[(y + dy)][(x + dx)][dr] + 1)
return max((min(q) for p in dp for q in p)) |
class TestMinHashLSH(unittest.TestCase):
def test_init(self):
lsh = MinHashLSH(threshold=0.8)
self.assertTrue(lsh.is_empty())
(b1, r1) = (lsh.b, lsh.r)
lsh = MinHashLSH(threshold=0.8, weights=(0.2, 0.8))
(b2, r2) = (lsh.b, lsh.r)
self.assertTrue((b1 < b2))
self.assertTrue((r1 > r2))
def test__H(self):
for l in range(2, (128 + 1), 16):
lsh = MinHashLSH(num_perm=128)
m = MinHash()
m.update('abcdefg'.encode('utf8'))
m.update('1234567'.encode('utf8'))
lsh.insert('m', m)
sizes = [len(H) for ht in lsh.hashtables for H in ht]
self.assertTrue(all(((sizes[0] == s) for s in sizes)))
def test_unpacking(self):
for b in range(2, (1024 + 1)):
lsh = MinHashLSH(num_perm=(b * 4), params=(b, 4))
m = MinHash(num_perm=(b * 4))
m.update('abcdefg'.encode('utf8'))
m.update('1234567'.encode('utf8'))
lsh.insert('m', m)
sizes = [len(H) for ht in lsh.hashtables for H in ht]
self.assertTrue(all(((sizes[0] == s) for s in sizes)))
def test_insert(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update('a'.encode('utf8'))
m2 = MinHash(16)
m2.update('b'.encode('utf8'))
lsh.insert('a', m1)
lsh.insert('b', m2)
for t in lsh.hashtables:
self.assertTrue((len(t) >= 1))
items = []
for H in t:
items.extend(t[H])
self.assertTrue(('a' in items))
self.assertTrue(('b' in items))
self.assertTrue(('a' in lsh))
self.assertTrue(('b' in lsh))
for (i, H) in enumerate(lsh.keys['a']):
self.assertTrue(('a' in lsh.hashtables[i][H]))
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.insert, 'c', m3)
def test_query(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update('a'.encode('utf8'))
m2 = MinHash(16)
m2.update('b'.encode('utf8'))
lsh.insert('a', m1)
lsh.insert('b', m2)
result = lsh.query(m1)
self.assertTrue(('a' in result))
result = lsh.query(m2)
self.assertTrue(('b' in result))
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.query, m3)
def test_query_buffer(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update('a'.encode('utf8'))
m2 = MinHash(16)
m2.update('b'.encode('utf8'))
lsh.insert('a', m1)
lsh.insert('b', m2)
lsh.add_to_query_buffer(m1)
result = lsh.collect_query_buffer()
self.assertTrue(('a' in result))
lsh.add_to_query_buffer(m2)
result = lsh.collect_query_buffer()
self.assertTrue(('b' in result))
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.add_to_query_buffer, m3)
def test_remove(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update('a'.encode('utf8'))
m2 = MinHash(16)
m2.update('b'.encode('utf8'))
lsh.insert('a', m1)
lsh.insert('b', m2)
lsh.remove('a')
self.assertTrue(('a' not in lsh.keys))
for table in lsh.hashtables:
for H in table:
self.assertGreater(len(table[H]), 0)
self.assertTrue(('a' not in table[H]))
self.assertRaises(ValueError, lsh.remove, 'c')
def test_pickle(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update('a'.encode('utf8'))
m2 = MinHash(16)
m2.update('b'.encode('utf8'))
lsh.insert('a', m1)
lsh.insert('b', m2)
lsh2 = pickle.loads(pickle.dumps(lsh))
result = lsh2.query(m1)
self.assertTrue(('a' in result))
result = lsh2.query(m2)
self.assertTrue(('b' in result))
def test_insert_redis(self):
with patch('redis.Redis', fake_redis) as mock_redis:
lsh = MinHashLSH(threshold=0.5, num_perm=16, storage_config={'type': 'redis', 'redis': {'host': 'localhost', 'port': 6379}})
m1 = MinHash(16)
m1.update('a'.encode('utf8'))
m2 = MinHash(16)
m2.update('b'.encode('utf8'))
lsh.insert('a', m1)
lsh.insert('b', m2)
for t in lsh.hashtables:
self.assertTrue((len(t) >= 1))
items = []
for H in t:
items.extend(t[H])
self.assertTrue((pickle.dumps('a') in items))
self.assertTrue((pickle.dumps('b') in items))
self.assertTrue(('a' in lsh))
self.assertTrue(('b' in lsh))
for (i, H) in enumerate(lsh.keys[pickle.dumps('a')]):
self.assertTrue((pickle.dumps('a') in lsh.hashtables[i][H]))
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.insert, 'c', m3)
def test_query_redis(self):
with patch('redis.Redis', fake_redis) as mock_redis:
lsh = MinHashLSH(threshold=0.5, num_perm=16, storage_config={'type': 'redis', 'redis': {'host': 'localhost', 'port': 6379}})
m1 = MinHash(16)
m1.update('a'.encode('utf8'))
m2 = MinHash(16)
m2.update('b'.encode('utf8'))
lsh.insert('a', m1)
lsh.insert('b', m2)
result = lsh.query(m1)
self.assertTrue(('a' in result))
result = lsh.query(m2)
self.assertTrue(('b' in result))
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.query, m3)
def test_query_buffer_redis(self):
with patch('redis.Redis', fake_redis) as mock_redis:
lsh = MinHashLSH(threshold=0.5, num_perm=16, storage_config={'type': 'redis', 'redis': {'host': 'localhost', 'port': 6379}})
m1 = MinHash(16)
m1.update('a'.encode('utf8'))
m2 = MinHash(16)
m2.update('b'.encode('utf8'))
lsh.insert('a', m1)
lsh.insert('b', m2)
lsh.query(m1)
lsh.add_to_query_buffer(m1)
result = lsh.collect_query_buffer()
self.assertTrue(('a' in result))
lsh.add_to_query_buffer(m2)
result = lsh.collect_query_buffer()
self.assertTrue(('b' in result))
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.add_to_query_buffer, m3)
def test_insertion_session(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update('a'.encode('utf8'))
m2 = MinHash(16)
m2.update('b'.encode('utf8'))
data = [('a', m1), ('b', m2)]
with lsh.insertion_session() as session:
for (key, minhash) in data:
session.insert(key, minhash)
for t in lsh.hashtables:
self.assertTrue((len(t) >= 1))
items = []
for H in t:
items.extend(t[H])
self.assertTrue(('a' in items))
self.assertTrue(('b' in items))
self.assertTrue(('a' in lsh))
self.assertTrue(('b' in lsh))
for (i, H) in enumerate(lsh.keys['a']):
self.assertTrue(('a' in lsh.hashtables[i][H]))
def test_get_counts(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update('a'.encode('utf8'))
m2 = MinHash(16)
m2.update('b'.encode('utf8'))
lsh.insert('a', m1)
lsh.insert('b', m2)
counts = lsh.get_counts()
self.assertEqual(len(counts), lsh.b)
for table in counts:
self.assertEqual(sum(table.values()), 2) |
def load_jupyter_server_extension(nb_server_app):
web_app = nb_server_app.web_app
template_dirs = nb_server_app.config.get('JupyterLabTemplates', {}).get('template_dirs', [])
allowed_extensions = nb_server_app.config.get('JupyterLabTemplates', {}).get('allowed_extensions', ['*.ipynb'])
if nb_server_app.config.get('JupyterLabTemplates', {}).get('include_default', True):
template_dirs.insert(0, os.path.join(os.path.dirname(__file__), 'templates'))
base_url = web_app.settings['base_url']
host_pattern = '.*$'
nb_server_app.log.info(('Installing jupyterlab_templates handler on path %s' % url_path_join(base_url, 'templates')))
if nb_server_app.config.get('JupyterLabTemplates', {}).get('include_core_paths', True):
template_dirs.extend([os.path.join(x, 'notebook_templates') for x in jupyter_core.paths.jupyter_path()])
nb_server_app.log.info(('Search paths:\n\t%s' % '\n\t'.join(template_dirs)))
loader = TemplatesLoader(template_dirs, allowed_extensions=allowed_extensions)
nb_server_app.log.info(('Available templates:\n\t%s' % '\n\t'.join((t for t in loader.get_templates()[1].keys()))))
web_app.add_handlers(host_pattern, [(url_path_join(base_url, 'templates/names'), TemplateNamesHandler, {'loader': loader})])
web_app.add_handlers(host_pattern, [(url_path_join(base_url, 'templates/get'), TemplatesHandler, {'loader': loader})]) |
def fortios_firewall_shaper(data, fos, check_mode):
fos.do_member_operation('firewall.shaper', 'per-ip-shaper')
if data['firewall_shaper_per_ip_shaper']:
resp = firewall_shaper_per_ip_shaper(data, fos, check_mode)
else:
fos._module.fail_json(msg=('missing task body: %s' % 'firewall_shaper_per_ip_shaper'))
if check_mode:
return resp
return ((not is_successful_status(resp)), (is_successful_status(resp) and (resp['revision_changed'] if ('revision_changed' in resp) else True)), resp, {}) |
class MultiDictionary():
_d: Dict[(Any, Set[Any])]
def __init__(self) -> None:
self._d = {}
def add(self, key: Any, value: Any) -> None:
if (key not in self._d):
self._d[key] = {value}
else:
self._d[key].add(value)
def __getitem__(self, key: Any) -> Set[Any]:
return (self._d[key] if (key in self) else set())
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
def __contains__(self, key: Any):
return (key in self._d)
def keys(self):
return self._d.keys()
def items(self):
return self._d.items()
def __repr__(self) -> str:
return (('{' + '\n'.join(((((str(key) + ':{') + ',\n'.join(sorted((str(v) for v in self[key])))) + '}') for key in self))) + '}') |
def addTextBox(fid, value, maxlength):
global TXBuffer
TXBuffer += "<input class='wide' type='text' name='"
TXBuffer += str(fid)
TXBuffer += "' id='"
TXBuffer += str(fid)
TXBuffer += "' maxlength="
TXBuffer += str(maxlength)
TXBuffer += " value='"
TXBuffer += str(value)
TXBuffer += "'>" |
.integrationtest
def test_pipeline(instrument, elasticapm_client, redis_conn):
elasticapm_client.begin_transaction('transaction.test')
with capture_span('test_pipeline', 'test'):
pipeline = redis_conn.pipeline()
pipeline.rpush('mykey', 'a', 'b')
pipeline.expire('mykey', 1000)
pipeline.execute()
elasticapm_client.end_transaction('MyView')
transactions = elasticapm_client.events[TRANSACTION]
spans = elasticapm_client.spans_for_transaction(transactions[0])
assert (spans[0]['name'] in ('StrictPipeline.execute', 'Pipeline.execute'))
assert (spans[0]['type'] == 'db')
assert (spans[0]['subtype'] == 'redis')
assert (spans[0]['action'] == 'query')
assert (spans[0]['context']['destination'] == {'address': os.environ.get('REDIS_HOST', 'localhost'), 'port': int(os.environ.get('REDIS_PORT', 6379)), 'service': {'name': '', 'resource': 'redis', 'type': ''}})
assert (spans[1]['name'] == 'test_pipeline')
assert (spans[1]['type'] == 'test')
assert (len(spans) == 2) |
class TestPluginFileNameResolver():
def setup_class(cls):
cls.resolver = supplier.PluginFileNameResolver('test-plugin')
def test_resolve(self):
self.resolver.revision = 'abc123'
assert (self.resolver.file_name == 'test-plugin-abc123.zip')
def test_artifact_key(self):
assert (self.resolver.artifact_key == 'test-plugin')
def test_to_artifact_path(self):
file_system_path = '/tmp/test'
assert (self.resolver.to_artifact_path(file_system_path) == f'file://{file_system_path}')
def test_to_file_system_path(self):
file_system_path = '/tmp/test'
assert (self.resolver.to_file_system_path(f'file://{file_system_path}') == file_system_path) |
class TlsSubscription(ModelNormal):
allowed_values = {}
validations = {}
_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type)
_nullable = False
_property
def openapi_types():
lazy_import()
return {'data': (TlsSubscriptionData,)}
_property
def discriminator():
return None
attribute_map = {'data': 'data'}
read_only_vars = {}
_composed_schemas = {}
_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self
required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes'])
_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.') |
class OptionPlotoptionsHistogramSonificationDefaultinstrumentoptionsMappingVolume(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionSeriesHistogramSonificationTracksMappingFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def minimize_cem(fn: Callable, x0: jnp.ndarray, key: jnp.ndarray, args: Optional[Tuple]=(), *, bounds: Tuple[(jnp.ndarray, jnp.ndarray)], n_iterations: int, population_size: int, elite_fraction: float, alpha: float, fn_use_key: bool=False, return_mean_elites: bool=False) -> jnp.ndarray:
num_elites = int((population_size * elite_fraction))
(lower_bound, upper_bound) = bounds
input_shape = x0.shape
population_shape = ((population_size,) + input_shape)
def cond_fn(state):
t = state[0]
return (t < n_iterations)
def loop(state):
(t, best_cost, best_solution, mu, var, rng, args) = state
(rng, key, key2) = jax.random.split(rng, 3)
lb_dist = (mu - lower_bound)
ub_dist = (upper_bound - mu)
constrained_var = jnp.minimum(jnp.minimum(jnp.square((lb_dist / 2)), jnp.square((ub_dist / 2))), var)
population = jax.random.truncated_normal(key, lower=(- 2.0), upper=2.0, shape=population_shape)
population = ((population * jnp.sqrt(constrained_var)) + mu)
if fn_use_key:
costs = fn(population, key2, *args)
else:
costs = fn(population, *args)
costs: jnp.ndarray = jnp.where((~ jnp.isfinite(costs)), .0, costs)
chex.assert_shape(costs, (population_size,))
(_, elite_idx) = lax.top_k((- costs), num_elites)
elites = population[elite_idx]
best_costs = costs[elite_idx]
new_mu = jnp.mean(elites, axis=0)
new_var = jnp.var(elites, axis=0)
new_best_cost = jnp.where((best_costs[0] < best_cost), best_costs[0], best_cost)
new_best_solution = jnp.where((best_costs[0] < best_cost), elites[0], best_solution)
mu = ((alpha * mu) + ((1.0 - alpha) * new_mu))
var = ((alpha * var) + ((1.0 - alpha) * new_var))
return ((t + 1), new_best_cost, new_best_solution, mu, var, rng, args)
initial_var = (jnp.square((upper_bound - lower_bound)) / 16.0)
initial_mu = x0
assert (initial_var.shape == initial_mu.shape)
best_cost = jnp.inf
best_solution = jnp.empty_like(initial_mu)
state = (0, best_cost, best_solution, initial_mu, initial_var, key, args)
(_, new_best_cost, best, mu, _, _, _) = jax.lax.while_loop(cond_fn, loop, state)
return ((mu if return_mean_elites else best), new_best_cost) |
class AgentDialogue(GymDialogue):
def __init__(self, dialogue_label: DialogueLabel, self_address: Address, role: BaseDialogue.Role, message_class: Type[GymMessage]) -> None:
GymDialogue.__init__(self, dialogue_label=dialogue_label, self_address=self_address, role=role, message_class=message_class) |
def subscription_note_notify(subscription):
domain = Site.objects.get_current().domain
admin_path = urlresolvers.reverse('subscription_manage_detail', args=(subscription.location.slug, subscription.id))
text_content = ('Howdy,\n\nA new note has been added to a subscription for %s %s. \n\nManage this subscription at %s%s.' % (subscription.user.first_name, subscription.user.last_name, domain, admin_path))
recipients = []
for admin in subscription.location.house_admins.all():
if (not (admin.email in recipients)):
recipients.append(admin.email)
subject = ('[%s] New subscription note for %s %s' % (subscription.location.email_subject_prefix, subscription.user.first_name, subscription.user.last_name))
mailgun_data = {'from': subscription.location.from_email(), 'to': recipients, 'subject': subject, 'text': text_content}
return mailgun_send(mailgun_data) |
(urls.RULE_TARGET_DETAIL, status_code=HTTP_204_NO_CONTENT, dependencies=[Security(verify_oauth_client, scopes=[scope_registry.RULE_DELETE])])
def delete_rule_target(*, policy_key: FidesKey, rule_key: FidesKey, rule_target_key: FidesKey, db: Session=Depends(deps.get_db)) -> None:
policy = get_policy_or_error(db, policy_key)
logger.info("Finding rule with key '{}'", rule_key)
rule = Rule.filter(db=db, conditions=((Rule.key == rule_key) and (Rule.policy_id == policy.id))).first()
if (not rule):
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f'No Rule found for key {rule_key} on Policy {policy_key}.')
logger.info("Finding rule target with key '{}'", rule_target_key)
target = RuleTarget.filter(db=db, conditions=((RuleTarget.key == rule_target_key) and (RuleTarget.rule_id == rule.id))).first()
if (not target):
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f'No RuleTarget found for key {rule_target_key} at Rule {rule_key} on Policy {policy_key}.')
logger.info("Deleting rule target with key '{}'", rule_target_key)
target.delete(db=db) |
class TestSpotifyAudiobook():
.xfail(reason='API inconsistencies.')
def test_audiobook_without_market_raises(self, app_client):
app_client.audiobook(audiobook_id)
def test_audiobook_with_US_market(self, app_client):
book = app_client.audiobook(audiobook_id, market='US')
assert (book.id == audiobook_id)
assert (book.type == 'audiobook')
assert (from_uri(book.uri)[0] == 'show')
.xfail(reason='API inconsistencies.')
def test_audiobook_with_non_US_market(self, app_client):
app_client.audiobook(audiobook_id, market='FI')
def test_audiobooks_no_market_not_found(self, app_client):
with pytest.raises(NotFound):
app_client.audiobooks(audiobook_ids)
def test_audiobooks_with_US_market(self, app_client):
with pytest.raises(NotFound):
app_client.audiobooks(audiobook_ids, market='US')
.xfail(reason='API inconsistencies.')
def test_audiobook_chapters_no_market_not_found(self, app_client):
app_client.audiobook_chapters(audiobook_id, limit=1)
def test_audiobook_chapters_US_market(self, app_client):
chapters = app_client.audiobook_chapters(audiobook_id, market='US', limit=1)
assert (chapters.items[0] is not None)
.xfail(reason='API inconsistencies.')
def test_audiobook_chapters_non_US_market(self, app_client):
app_client.audiobook_chapters(audiobook_id, market='FI', limit=1) |
def test_multiple_subdomains_3d(mesh_3d):
DG = VectorFunctionSpace(mesh_3d, 'DG', 1)
n = FacetNormal(mesh_3d)
u = TestFunction(DG)
(x, y, z) = SpatialCoordinate(mesh_3d)
f = project(as_vector([x, y, z]), DG)
ds_sd = ds(1)
for i in range(2, 7):
ds_sd += ds(i)
form = (inner(n, (((f[0] * f[1]) * f[2]) * u)) * ds_sd)
A = assemble(Tensor(form)).dat.data
ref = assemble(form).dat.data
assert np.allclose(A, ref, rtol=1e-14) |
class Book(db.Model):
__tablename__ = 'books'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
book_title = db.Column(db.String(128), unique=True, nullable=False)
secret_content = db.Column(db.String(128), nullable=False)
user_id = db.Column(db.Integer, ForeignKey('users.id'))
user = relationship('User', back_populates='books')
def __init__(self, book_title, secret_content, user_id=user_id):
self.book_title = book_title
self.secret_content = secret_content
self.user_id = user_id
def __repr__(self):
return f'<User(book_title={self.book_title}, user={self.user})>'
def json(self):
return {'book_title': self.book_title, 'user': self.user.username}
def get_all_books():
return [Book.json(user) for user in Book.query.all()] |
class PyfaceFont(TraitType):
default_value_type = DefaultValue.callable_and_args
parser = None
def __init__(self, value=None, *, parser=simple_parser, **metadata):
self.parser = parser
if (value is not None):
try:
font = self.validate(None, None, value)
except TraitError:
raise ValueError((('expected ' + self.info()) + f', but got {value!r}'))
default_value = (Font, (), font.trait_get(transient=(lambda x: (not x))))
else:
default_value = (Font, (), {})
super().__init__(default_value, **metadata)
def validate(self, object, name, value):
if isinstance(value, Font):
return value
if isinstance(value, str):
try:
return Font(**self.parser(value))
except FontParseError:
self.error(object, name, value)
self.error(object, name, value)
def info(self):
return 'a Pyface Font, or a string describing a Pyface Font' |
def check_for_employee(emp_name, emp_code, center):
err_msg = ''
filters = {}
if emp_name:
filters['employee_name'] = emp_name
if emp_code:
filters['zenoti_employee_code'] = emp_code
if (not filters):
err_msg = _('Details for Employee missing')
return err_msg
if (not frappe.db.exists('Employee', filters)):
err_msg = center.sync_employees()
return err_msg |
def test_complex_fields():
assert (not SIM.complex_fields)
bound_spec = td.BoundarySpec(x=td.Boundary(plus=td.PECBoundary(), minus=td.PMCBoundary()), y=td.Boundary(plus=td.BlochBoundary(bloch_vec=1.0), minus=td.BlochBoundary(bloch_vec=1.0)), z=td.Boundary(plus=td.Periodic(), minus=td.Periodic()))
S2 = SIM_FULL.copy(update=dict(boundary_spec=bound_spec))
assert S2.complex_fields |
class BottleneckBlock(CNNBlockBase):
def __init__(self, in_channels, out_channels, *, bottleneck_channels, stride=1, num_groups=1, norm='BN', stride_in_1x1=False, dilation=1):
super().__init__(in_channels, out_channels, stride)
if (in_channels != out_channels):
self.shortcut = nn.Conv2dBias(in_channels, out_channels, 1, stride, 0)
else:
self.shortcut = None
(stride_1x1, stride_3x3) = ((stride, 1) if stride_in_1x1 else (1, stride))
self.conv1 = nn.Conv2dBiasRelu(in_channels, bottleneck_channels, 1, stride_1x1, 0)
self.conv2 = nn.Conv2dBiasRelu(bottleneck_channels, bottleneck_channels, 3, stride_3x3, (1 * dilation), dilation)
self.conv3 = nn.Conv2dBiasAddRelu(bottleneck_channels, out_channels, 1, 1, 0)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if (self.shortcut is not None):
shortcut = self.shortcut(x)
else:
shortcut = x
out = self.conv3(out, shortcut)
return out |
.compilertest
def test_hr_error_2():
yaml = '\n---\napiVersion: getambassador.io/v3alpha1\nkind: Mapping\nmetadata:\n name: mapping-1\n namespace: default\nspec:\n hostname: "*"\n prefix: /\n service: svc1\n host_redirect: true\n---\napiVersion: getambassador.io/v3alpha1\nkind: Mapping\nmetadata:\n name: mapping-2\n namespace: default\nspec:\n hostname: "*"\n prefix: /\n service: svc2\n'
cache = Cache(logger)
r1 = Compile(logger, yaml, k8s=True)
r2 = Compile(logger, yaml, k8s=True, cache=cache)
require_errors(r1['ir'], [('-global-', 'cannot accept mapping-2 without host_redirect after mapping-1 with host_redirect')])
require_errors(r2['ir'], [('-global-', 'cannot accept mapping-2 without host_redirect after mapping-1 with host_redirect')]) |
class ProtocolVersion(enum.IntEnum):
VERSION_1 = 1
VERSION_2 = 2
VERSION_3 = 3
def prefix(self):
return (b'pysoa-redis/%d//' % self.value)
def extract_version(message_data):
match = PROTOCOL_VERSION_RE.match(message_data)
if match:
return (ProtocolVersion(int(match.group('version'))), message_data[match.end():])
if message_data.startswith(b'content-type'):
return (ProtocolVersion.VERSION_2, message_data)
return (ProtocolVersion.VERSION_1, message_data) |
class JITTest(unittest.TestCase):
def test_function_transformation_1(self) -> None:
self.maxDiff = None
self.assertTrue(norm.is_random_variable)
bmgast = _bm_function_to_bmg_ast(f, 'f_helper')
observed = astor.to_source(bmgast)
expected = "\ndef f_helper(bmg):\n import operator\n\n def f(x):\n a2 = bmg.handle_dot_get(math, 'exp')\n r3 = [x]\n r4 = {}\n r1 = bmg.handle_function(a2, r3, r4)\n return r1\n return f"
self.assertEqual(observed.strip(), expected.strip())
bmgast = _bm_function_to_bmg_ast(norm().function, 'norm_helper')
observed = astor.to_source(bmgast)
expected = "\ndef norm_helper(bmg):\n import operator\n\n def norm(n):\n global counter\n a1 = 1\n counter = bmg.handle_function(operator.add, [counter, a1])\n r4 = []\n a9 = 0.0\n a8 = dict(loc=a9)\n a11 = 1.0\n a10 = dict(scale=a11)\n r7 = dict(**a8, **a10)\n r2 = bmg.handle_function(Normal, r4, r7)\n return r2\n a3 = bmg.handle_dot_get(bm, 'random_variable')\n r5 = [norm]\n r6 = {}\n norm = bmg.handle_function(a3, r5, r6)\n return norm\n"
self.assertEqual(observed.strip(), expected.strip())
bmg = BMGRuntime()
lifted_f = _bm_function_to_bmg_function(f, bmg)
norm_sample = bmg._rv_to_node(norm(0))
result = lifted_f(norm_sample)
self.assertTrue(isinstance(result, ExpNode))
dot = to_dot(bmg._bmg)
expected = '\ndigraph "graph" {\n N0[label=0.0];\n N1[label=1.0];\n N2[label=Normal];\n N3[label=Sample];\n N4[label=Exp];\n N0 -> N2[label=mu];\n N1 -> N2[label=sigma];\n N2 -> N3[label=operand];\n N3 -> N4[label=operand];\n}\n'
self.assertEqual(dot.strip(), expected.strip())
global counter
self.assertEqual(counter, 1)
bmg._rv_to_node(norm(0))
self.assertEqual(counter, 1)
bmg._rv_to_node(norm(1))
self.assertEqual(counter, 2)
bmg._rv_to_node(norm(1))
self.assertEqual(counter, 2)
def test_function_transformation_2(self) -> None:
self.maxDiff = None
bmg = BMGRuntime()
bmg._rv_to_node(flip())
dot = to_dot(bmg._bmg)
expected = '\ndigraph "graph" {\n N0[label=2.0];\n N1[label=Beta];\n N2[label=Sample];\n N3[label=Bernoulli];\n N4[label=Sample];\n N0 -> N1[label=alpha];\n N0 -> N1[label=beta];\n N1 -> N2[label=operand];\n N2 -> N3[label=probability];\n N3 -> N4[label=operand];\n}\n'
self.assertEqual(dot.strip(), expected.strip())
def test_function_transformation_3(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
queries = [coin(), exp_coin()]
observations = {flip(): tensor(1.0)}
bmg = rt.accumulate_graph(queries, observations)
dot = to_dot(bmg)
expected = '\ndigraph "graph" {\n N0[label=2.0];\n N1[label=Beta];\n N2[label=Sample];\n N3[label=Bernoulli];\n N4[label=Sample];\n N5[label="Observation tensor(1.)"];\n N6[label=Query];\n N7[label=Exp];\n N8[label=Query];\n N0 -> N1[label=alpha];\n N0 -> N1[label=beta];\n N1 -> N2[label=operand];\n N2 -> N3[label=probability];\n N2 -> N6[label=operator];\n N2 -> N7[label=operand];\n N3 -> N4[label=operand];\n N4 -> N5[label=operand];\n N7 -> N8[label=operator];\n}\n'
self.assertEqual(dot.strip(), expected.strip())
def test_function_transformation_4(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
queries = [exp_norm(0)]
observations = {}
bmg = rt.accumulate_graph(queries, observations)
dot = to_dot(bmg)
expected = '\ndigraph "graph" {\n N0[label=0.0];\n N1[label=1.0];\n N2[label=Normal];\n N3[label=Sample];\n N4[label=Exp];\n N5[label=Query];\n N0 -> N2[label=mu];\n N1 -> N2[label=sigma];\n N2 -> N3[label=operand];\n N3 -> N4[label=operand];\n N4 -> N5[label=operator];\n}\n'
self.assertEqual(dot.strip(), expected.strip())
def test_function_transformation_5(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
queries = [exp_coin_3()]
observations = {}
bmg = rt.accumulate_graph(queries, observations)
dot = to_dot(bmg)
expected = '\ndigraph "graph" {\n N0[label=1];\n N1[label=2.0];\n N2[label=Beta];\n N3[label=Sample];\n N4[label=Exp];\n N5[label="+"];\n N6[label=Query];\n N0 -> N5[label=left];\n N1 -> N2[label=alpha];\n N1 -> N2[label=beta];\n N2 -> N3[label=operand];\n N3 -> N4[label=operand];\n N4 -> N5[label=right];\n N5 -> N6[label=operator];\n}\n'
self.assertEqual(expected.strip(), dot.strip())
def test_function_transformation_6(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
queries = [coin_with_class()]
observations = {}
bmg = rt.accumulate_graph(queries, observations)
dot = to_dot(bmg)
expected = '\ndigraph "graph" {\n N0[label=2.0];\n N1[label=Beta];\n N2[label=Sample];\n N3[label=Query];\n N0 -> N1[label=alpha];\n N0 -> N1[label=beta];\n N1 -> N2[label=operand];\n N2 -> N3[label=operator];\n}\n'
self.assertEqual(dot.strip(), expected.strip())
def test_bad_control_flow_1(self) -> None:
self.maxDiff = None
bmg = BMGRuntime()
queries = [bad_functional_1()]
observations = {}
with self.assertRaises(ValueError) as ex:
bmg.accumulate_graph(queries, observations)
self.assertEqual(str(ex.exception), 'Stochastic control flow must have finite support.')
def test_bad_control_flow_2(self) -> None:
self.maxDiff = None
bmg = BMGRuntime()
queries = [bad_functional_2()]
observations = {}
with self.assertRaises(ValueError) as ex:
bmg.accumulate_graph(queries, observations)
self.assertEqual(str(ex.exception), 'Stochastic control flow is too complex.')
def test_bad_control_flow_3(self) -> None:
self.maxDiff = None
bmg = BMGRuntime()
queries = [bad_functional_3()]
observations = {}
with self.assertRaises(ValueError) as ex:
bmg.accumulate_graph(queries, observations)
self.assertEqual(str(ex.exception), 'Random variable function calls must not have named arguments.')
def test_bad_control_flow_4(self) -> None:
self.maxDiff = None
bmg = BMGRuntime()
queries = [bad_functional_4()]
observations = {}
with self.assertRaises(ValueError) as ex:
bmg.accumulate_graph(queries, observations)
self.assertEqual(str(ex.exception), 'Functional calls must not have named arguments.')
def test_rv_identity(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
queries = [beta_tensor_1a(), beta_tensor_1b()]
observations = {}
bmg = rt.accumulate_graph(queries, observations)
observed = to_dot(bmg)
expected = '\ndigraph "graph" {\n N0[label=2.0];\n N1[label=Beta];\n N2[label=Sample];\n N3[label=Log];\n N4[label=Query];\n N5[label=Exp];\n N6[label=Query];\n N0 -> N1[label=alpha];\n N0 -> N1[label=beta];\n N1 -> N2[label=operand];\n N2 -> N3[label=operand];\n N2 -> N5[label=operand];\n N3 -> N4[label=operator];\n N5 -> N6[label=operator];\n}'
self.assertEqual(expected.strip(), observed.strip())
def test_assertions_are_removed(self) -> None:
global observable_side_effect
self.maxDiff = None
self.assertEqual(observable_side_effect, 0)
assert cause_side_effect()
self.assertEqual(observable_side_effect, 1)
observable_side_effect = 0
bmg = BMGRuntime()
bmg.accumulate_graph([assertions_are_removed()], {})
self.assertEqual(observable_side_effect, 0)
def test_nested_functions_and_comprehensions(self) -> None:
self.maxDiff = None
bmg = BMGRuntime()
bmg.accumulate_graph([flip_with_nested_function()], {})
bmg = BMGRuntime()
bmg.accumulate_graph([flip_with_comprehension()], {})
def test_aliased_rv(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
queries = [aliased_rv()]
observations = {}
bmg = rt.accumulate_graph(queries, observations)
observed = to_dot(bmg)
expected = '\ndigraph "graph" {\n N0[label=0.5];\n N1[label=Bernoulli];\n N2[label=Sample];\n N3[label=Query];\n N0 -> N1[label=probability];\n N1 -> N2[label=operand];\n N2 -> N3[label=operator];\n}\n'
self.assertEqual(expected.strip(), observed.strip())
def test_undecorated_rv(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
queries = [undecorated_rv()]
observations = {}
bmg = rt.accumulate_graph(queries, observations)
observed = to_dot(bmg)
expected = '\ndigraph "graph" {\n N0[label=0.5];\n N1[label=Bernoulli];\n N2[label=Sample];\n N3[label=Query];\n N0 -> N1[label=probability];\n N1 -> N2[label=operand];\n N2 -> N3[label=operator];\n}\n'
self.assertEqual(expected.strip(), observed.strip())
def test_nested_rv(self) -> None:
self.maxDiff = None
prob = 0.5
_variable
def nested_flip():
return Bernoulli(prob)
observed = to_dot(BMGRuntime().accumulate_graph([nested_flip()], {}))
expected = '\ndigraph "graph" {\n N0[label=0.5];\n N1[label=Bernoulli];\n N2[label=Sample];\n N3[label=Query];\n N0 -> N1[label=probability];\n N1 -> N2[label=operand];\n N2 -> N3[label=operator];\n}\n'
self.assertEqual(expected.strip(), observed.strip())
mean = 0.0
sigma = 1.0
shift = 2.0
lambda_norm = bm.random_variable((lambda : Normal(mean, sigma)))
lambda_mult = (lambda x, y: (x * y))
lambda_sum = bm.functional((lambda : lambda_mult((lambda_norm() + shift), 4.0)))
observed = to_dot(BMGRuntime().accumulate_graph([lambda_sum()], {}))
expected = '\ndigraph "graph" {\n N0[label=0.0];\n N1[label=1.0];\n N2[label=Normal];\n N3[label=Sample];\n N4[label=2.0];\n N5[label="+"];\n N6[label=4.0];\n N7[label="*"];\n N8[label=Query];\n N0 -> N2[label=mu];\n N1 -> N2[label=sigma];\n N2 -> N3[label=operand];\n N3 -> N5[label=left];\n N4 -> N5[label=right];\n N5 -> N7[label=left];\n N6 -> N7[label=right];\n N7 -> N8[label=operator];\n}\n'
self.assertEqual(expected.strip(), observed.strip())
_variable
def norm1():
return Normal(0.0, 1.0)
_variable
def norm2():
def mult(x, y):
return (x * y)
return Normal(mult(norm1(), 2.0), 3.0)
observed = to_dot(BMGRuntime().accumulate_graph([norm2()], {}))
expected = '\ndigraph "graph" {\n N0[label=0.0];\n N1[label=1.0];\n N2[label=Normal];\n N3[label=Sample];\n N4[label=2.0];\n N5[label="*"];\n N6[label=3.0];\n N7[label=Normal];\n N8[label=Sample];\n N9[label=Query];\n N0 -> N2[label=mu];\n N1 -> N2[label=sigma];\n N2 -> N3[label=operand];\n N3 -> N5[label=left];\n N4 -> N5[label=right];\n N5 -> N7[label=mu];\n N6 -> N7[label=sigma];\n N7 -> N8[label=operand];\n N8 -> N9[label=operator];\n}\n'
self.assertEqual(expected.strip(), observed.strip())
_variable
def norm3():
_variable
def norm4():
return Normal(0.0, 1.0)
return Normal((norm4() * 5.0), 6.0)
observed = to_dot(BMGRuntime().accumulate_graph([norm3()], {}))
expected = '\ndigraph "graph" {\n N0[label=0.0];\n N1[label=1.0];\n N2[label=Normal];\n N3[label=Sample];\n N4[label=5.0];\n N5[label="*"];\n N6[label=6.0];\n N7[label=Normal];\n N8[label=Sample];\n N9[label=Query];\n N0 -> N2[label=mu];\n N1 -> N2[label=sigma];\n N2 -> N3[label=operand];\n N3 -> N5[label=left];\n N4 -> N5[label=right];\n N5 -> N7[label=mu];\n N6 -> N7[label=sigma];\n N7 -> N8[label=operand];\n N8 -> N9[label=operator];\n}\n\n'
self.assertEqual(expected.strip(), observed.strip()) |
class User(SoftDeletionModel):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
_email = db.Column(CIText, unique=True, nullable=False)
_password = db.Column(db.String(128), nullable=False)
facebook_id = db.Column(db.BigInteger, unique=True, nullable=True, name='facebook_id')
facebook_login_hash = db.Column(db.String, nullable=True)
reset_password = db.Column(db.String(128))
salt = db.Column(db.String(128))
avatar_url = db.Column(db.String)
tokens = db.Column(db.Text)
first_name = db.Column(db.String, nullable=True)
last_name = db.Column(db.String, nullable=True)
details = db.Column(db.String)
contact = db.Column(db.String)
facebook_url = db.Column(db.String)
twitter_url = db.Column(db.String)
instagram_url = db.Column(db.String)
google_plus_url = db.Column(db.String)
original_image_url = db.Column(db.String, nullable=True, default=None)
thumbnail_image_url = db.Column(db.String)
small_image_url = db.Column(db.String)
icon_image_url = db.Column(db.String)
is_super_admin = db.Column(db.Boolean, default=False)
is_admin = db.Column(db.Boolean, default=False)
is_sales_admin = db.Column(db.Boolean, default=False)
is_marketer = db.Column(db.Boolean, default=False)
is_verified = db.Column(db.Boolean, default=False)
is_blocked = db.Column(db.Boolean, nullable=False, default=False)
is_profile_public = db.Column(db.Boolean, nullable=False, default=False, server_default='False')
public_name = db.Column(db.String)
was_registered_with_order = db.Column(db.Boolean, default=False)
last_accessed_at = db.Column(db.DateTime(timezone=True))
created_at = db.Column(db.DateTime(timezone=True), default=func.now())
billing_contact_name = db.Column(db.String)
billing_phone = db.Column(db.String)
billing_state = db.Column(db.String)
billing_country = db.Column(db.String)
billing_tax_info = db.Column(db.String)
company = db.Column(db.String)
billing_address = db.Column(db.String)
billing_city = db.Column(db.String)
language_prefrence = db.Column(db.String)
billing_zip_code = db.Column(db.String)
billing_additional_info = db.Column(db.String)
rocket_chat_token = db.Column(db.String)
speaker = db.relationship('Speaker', backref='user')
favourite_events = db.relationship('UserFavouriteEvent', backref='user')
session = db.relationship('Session', backref='user')
feedback = db.relationship('Feedback', backref='user')
access_codes = db.relationship('AccessCode', backref='user')
discount_codes = db.relationship('DiscountCode', backref='user')
marketer_events = db.relationship('Event', viewonly=True, secondary='join(UserSystemRole, CustomSysRole, and_(CustomSysRole.id == UserSystemRole.role_id, CustomSysRole.name == "Marketer"))', primaryjoin='UserSystemRole.user_id == User.id', secondaryjoin='Event.id == UserSystemRole.event_id')
sales_admin_events = db.relationship('Event', viewonly=True, secondary='join(UserSystemRole, CustomSysRole, and_(CustomSysRole.id == UserSystemRole.role_id, CustomSysRole.name == "Sales Admin"))', primaryjoin='UserSystemRole.user_id == User.id', secondaryjoin='Event.id == UserSystemRole.event_id')
_property
def password(self):
return self._password
def password(self, password):
salt = str(generate_random_salt(), 'utf-8')
self._password = str(generate_password_hash(password, salt), 'utf-8')
hash_ = random.getrandbits(128)
self.reset_password = str(hash_)
self.salt = salt
_property
def email(self):
return self._email
def email(self, email):
if (self._email != email):
self._email = email
self.is_verified = False
def can_publish_event(self):
perm = UserPermission.query.filter_by(name='publish_event').first()
if (not perm):
return self.is_verified
if (self.is_verified is False):
return perm.unverified_user
return True
def can_create_event(self):
perm = UserPermission.query.filter_by(name='create_event').first()
if (not perm):
return self.is_verified
if (self.is_verified is False):
return perm.unverified_user
return True
def _is_role(self, role_name, event_id=None):
from app.models.users_groups_role import UsersGroupsRoles
role = Role.query.filter_by(name=role_name).first()
uer = UER.query.filter_by(user=self, role=role)
ugr = UsersGroupsRoles.query.filter_by(user=self, role=role, accepted=True)
if event_id:
uer = uer.filter_by(event_id=event_id)
event = Event.query.get(event_id)
if ((event is not None) and (event.group is not None)):
ugr = ugr.filter_by(group=event.group)
return bool((uer.first() or ugr.first()))
def is_owner(self, event_id):
return self._is_role(Role.OWNER, event_id)
def is_organizer(self, event_id):
return self._is_role(Role.ORGANIZER, event_id)
def is_coorganizer(self, event_id):
return self._is_role(Role.COORGANIZER, event_id)
def is_track_organizer(self, event_id):
return self._is_role(TRACK_ORGANIZER, event_id)
def is_moderator(self, event_id):
return self._is_role(MODERATOR, event_id)
def is_registrar(self, event_id):
return self._is_role(REGISTRAR, event_id)
def has_event_access(self, event_id):
return (self._is_role(Role.OWNER, event_id) or self._is_role(Role.ORGANIZER, event_id) or self._is_role(Role.COORGANIZER, event_id))
_property
def is_user_owner(self):
return self._is_role(Role.OWNER)
_property
def is_user_organizer(self):
return self._is_role(Role.ORGANIZER)
_property
def is_user_coorganizer(self):
return self._is_role(Role.COORGANIZER)
_property
def is_user_track_organizer(self):
return self._is_role(TRACK_ORGANIZER)
_property
def is_user_moderator(self):
return self._is_role(MODERATOR)
_property
def is_user_registrar(self):
return self._is_role(REGISTRAR)
def _has_perm(self, operation, service_class, event_id):
operations = {'create': 'can_create', 'read': 'can_read', 'update': 'can_update', 'delete': 'can_delete'}
if (operation not in list(operations.keys())):
raise ValueError('No such operation defined')
try:
service_name = service_class.get_service_name()
except AttributeError:
return False
if self.is_super_admin:
return True
service = Service.query.filter_by(name=service_name).first()
uer_querylist = UER.query.filter_by(user=self, event_id=event_id)
for uer in uer_querylist:
role = uer.role
perm = Permission.query.filter_by(role=role, service=service).first()
if getattr(perm, operations[operation]):
return True
return False
def can_create(self, service_class, event_id):
return self._has_perm('create', service_class, event_id)
def can_read(self, service_class, event_id):
return self._has_perm('read', service_class, event_id)
def can_update(self, service_class, event_id):
return self._has_perm('update', service_class, event_id)
def can_delete(self, service_class, event_id):
return self._has_perm('delete', service_class, event_id)
def is_speaker_at_session(self, session_id):
try:
session = Session.query.filter(Session.speakers.any((Speaker.user_id == self.id))).filter((Session.id == session_id)).one()
return bool(session)
except MultipleResultsFound:
return False
except NoResultFound:
return False
def is_speaker_at_event(self, event_id):
try:
session = Session.query.filter(Session.speakers.any((Speaker.user_id == self.id))).filter((Session.event_id == event_id)).first()
return bool(session)
except MultipleResultsFound:
return False
except NoResultFound:
return False
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
def is_correct_password(self, password):
salt = self.salt
password = str(generate_password_hash(password, salt), 'utf-8')
if (password == self._password):
return True
return False
def is_staff(self):
return (self.is_super_admin or self.is_admin)
def is_sys_role(self, role_id):
role = UserSystemRole.query.filter_by(user=self, role_id=role_id).first()
return bool(role)
def first_access_panel(self):
custom_role = UserSystemRole.query.filter_by(user=self).first()
if (not custom_role):
return False
perm = PanelPermission.query.filter(PanelPermission.custom_system_roles.any(id=custom_role.role_id)).first()
if (not perm):
return False
return perm.panel_name
def can_access_panel(self, panel_name):
if self.is_staff:
return True
custom_sys_roles = UserSystemRole.query.filter_by(user=self)
for custom_role in custom_sys_roles:
if custom_role.role.can_access(panel_name):
return True
return False
def get_unread_notif_count(self):
return get_count(Notification.query.filter_by(user=self, is_read=False))
def get_unread_notifs(self):
notifs = []
unread_notifs = Notification.query.filter_by(user=self, is_read=False).order_by(desc(Notification.received_at))
for notif in unread_notifs:
notifs.append({'title': notif.title, 'received_at': humanize.naturaltime((datetime.now(pytz.utc) - notif.received_at)), 'mark_read': url_for('notifications.mark_as_read', notification_id=notif.id)})
return notifs
def update_lat(self):
self.last_accessed_at = datetime.now()
def fullname(self):
return self.full_name
def full_name(self):
return ' '.join(filter(None, [self.first_name, self.last_name]))
def get_full_billing_address(self, sep: str='\n') -> str:
return sep.join(filter(None, [self.billing_address, self.billing_city, self.billing_state, self.billing_zip_code, self.billing_country]))
full_billing_address = property(get_full_billing_address)
def anonymous_name(self):
return ' '.join(map((lambda x: x.capitalize()), generate(2)))
def rocket_chat_username(self):
name = (self.public_name or self.full_name or f'user_{self.id}')
return slugify(name, word_boundary=True, max_length=32, separator='.')
def rocket_chat_password(self):
return get_serializer().dumps(f'rocket_chat_user_{self.id}')
def is_rocket_chat_registered(self) -> bool:
return (self.rocket_chat_token is not None)
def __repr__(self):
return ('<User %r>' % self.email)
def __setattr__(self, name, value):
if (name == 'details'):
super().__setattr__(name, clean_html(clean_up_string(value)))
else:
super().__setattr__(name, value) |
class petsc_LU(KSP_Preconditioner):
def __init__(self, L, prefix=None):
self.PCType = 'lu'
self.L = L
self._initializePC(prefix)
self.pc.setFromOptions()
def _initializePC(self, prefix):
self.pc = p4pyPETSc.PC().create()
self.pc.setOptionsPrefix(prefix)
self.pc.setType('lu')
def setUp(self, global_ksp=None, newton_its=None):
pass |
.scheduler()
.integration_test
.usefixtures('copy_poly_case', 'try_queue_and_scheduler', 'monkeypatch')
def test_failing_job_cli_error_message():
with open('poly_eval.py', mode='a', encoding='utf-8') as poly_script:
poly_script.writelines([" raise RuntimeError('Argh')"])
args = Mock()
args.config = 'poly_high_min_reals.ert'
parser = ArgumentParser(prog='test_main')
parsed = ert_parser(parser, [TEST_RUN_MODE, 'poly.ert'])
expected_substrings = ['Realization: 0 failed after reaching max submit (2)', 'job poly_eval failed', 'Process exited with status code 1', 'Traceback', "raise RuntimeError('Argh')", 'RuntimeError: Argh']
try:
run_cli(parsed)
except ErtCliError as error:
for substring in expected_substrings:
assert (substring in f'{error}')
else:
pytest.fail(msg='Expected run cli to raise ErtCliError!') |
def tri_occ(pos, ind, val, num):
ceof = np.zeros((512, 512, (num * 2)), dtype=np.float32)
mask = np.zeros((512, 512), dtype=np.bool_)
ktmp = np.zeros(num, dtype=np.float32)
for k in range(1, num, 1):
ktmp[k] = (k * np.pi)
prepre = 0
for i in range(len(pos)):
xx = (pos[i] // 512)
yy = (pos[i] % 512)
mask[(xx, yy)] = 1
for j in range(prepre, ind[i], 2):
ceof[(xx, yy, 0)] += (val[(j + 1)] - val[j])
for k in range(1, num, 1):
ceof[(xx, yy, k)] += ((np.sin((ktmp[k] * val[(j + 1)])) - np.sin((ktmp[k] * val[j]))) / ktmp[k])
ceof[(xx, yy, (k + num))] += ((np.cos((ktmp[k] * val[j])) - np.cos((ktmp[k] * val[(j + 1)]))) / ktmp[k])
prepre = ind[i]
return (ceof, mask) |
class ChannelRedis(Channel):
def __init__(self, redis: Redis, channel_in: str, channel_out: str):
self._queue_in = MessageQueue(redis, channel_in)
self._queue_out = MessageQueue(redis, channel_out)
def send_message(self, message: Any):
self._queue_out.push(message)
def recv_message(self, timeout_epoch: Optional[float]=None) -> Any:
return self._queue_in.pop(timeout_epoch) |
def group_gemm_instance(op_def: str, func_attrs: Dict[(str, Any)], for_profiler: bool, cutlass_3x: bool=False):
op_def = update_alignments_in_group_gemm_instance(op_def, func_attrs, for_profiler)
tmp = op_def.replace('DefaultGemmUniversal', 'DefaultGemmGrouped')
tmp = tmp.replace('false,', '')
tmp = re.sub('cutlass::layout::ColumnMajor,\\n', 'cutlass::layout::RowMajor,\n', tmp)
tmp = re.sub('GemmIdentityThreadblockSwizzle<\\d>', 'GemmBatchedIdentityThreadblockSwizzle', tmp)
tmp = re.sub('cutlass::arch::OpMultiplyAdd', ('cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly,\n' + 'cutlass::arch::OpMultiplyAdd'), tmp)
return tmp |
class Post(Base):
__tablename__ = 'post'
id = Column(Integer, primary_key=True)
titulo = Column(String)
conteudo = Column(String)
autor_id = Column(Integer, ForeignKey('pessoa.id'))
autor = relationship('Pessoa', backref='post')
def __repr__(self):
return f'Post({self.titulo})' |
def extractWwwFishytranslationCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Op Waifus', 'Being Able to Edit Skills in Another World, I Gained OP Waifus', 'translated'), ('Rebirth Junior High School', 'Rebirth Junior High School: The Exceling Top Student Goddess', 'translated'), ('How to Raise a Silver-Haired Loli', 'How to Raise a Silver-Haired Loli', 'translated'), ('The Pitiful Me Does Not Need a Dazzling Life', 'The Pitiful Me Does Not Need a Dazzling Life', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class Web3ModuleTest():
def test_web3_client_version(self, w3: Web3) -> None:
client_version = w3.client_version
self._check_web3_client_version(client_version)
def _check_web3_client_version(self, client_version: str) -> NoReturn:
raise NotImplementedError('Must be implemented by subclasses')
.parametrize('types,values,expected', ((['bool'], [True], HexBytes('0x5fe7f977e71dba2ea1a68e21057beebb9be2ac30c6410aa38d4f3fbe41dcffd2')), (['uint8', 'uint8', 'uint8'], [97, 98, 99], HexBytes('0x4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45')), (['uint248'], [30], HexBytes('0x30f95deb33ae4d53d405b26f920e765dff87cca8e9a4aec99f82671')), (['bool', 'uint16'], [True, 299], HexBytes('0xed18599ccd80ee9fae9a28b0e34a5573c3233d7468f808fd659bc171cf0b43bd')), (['int256'], [(- 10)], HexBytes('0xd6fb717f7e270a360f5093ce6a7a3752183e89c9a9afe5c0cb54b458a304d3d5')), (['int256'], [10], HexBytes('0xc65a7bb8d6351c1cf70c95a316cc6a92839c986682d98bc35f958f4883f9d2a8')), (['int8', 'uint8'], [(- 10), 18], HexBytes('0x5c6ab1e634c08d9c0f4df4d789e8727943ef010dd7ca8e3c89de197a26d148be')), (['address'], ['0x49eddd3769cd86597b84ac5c2f5614'], InvalidAddress), (['address'], ['0x49EdDD3769cD86597B84ac5c2F5614'], HexBytes('0x2ff37b5607484cd4eecf6d13292e22bd6e5401eaffcc07e279583bc742c68882')), (['bytes2'], ['0x5402'], HexBytes('0x4ed9171bda52fca71ab28e7f452bd6eacc3e5a568a47e0fa53b503159a9b8910')), (['bytes3'], ['0x5402'], HexBytes('0x4ed9171bda52fca71ab28e7f452bd6eacc3e5a568a47e0fa53b503159a9b8910')), (['bytes'], ['0xb6c6f6eee7374736f6ce6374696f6e'], HexBytes('0xd78a84d65721b67e4011b10c99dafdedcdcd7cbf773e210b4762e22f')), (['string'], ['testing a string!'], HexBytes('0xe8c275c0b4070a5ec6cfcb83f0ba394b30ddd283de785d43f2eabfb04bd96747')), (['string', 'bool', 'uint16', 'bytes2', 'address'], ['testing a string!', False, 299, '0x5402', '0x49eddd3769cd86597b84ac5c2f5614'], InvalidAddress), (['string', 'bool', 'uint16', 'bytes2', 'address'], ['testing a string!', False, 299, '0x5402', '0x49EdDD3769cD86597B84ac5c2F5614'], HexBytes('0x8cc6eabb25b842715e8ca39e2524ed946759aa37bfb7d4b81829cf5a7e266103')), (['bool[2][]'], [[[True, False], [False, True]]], HexBytes('0x1eef261f2eb51a8c736d52be3f91ff79e78a9ec5df2b7f50d0c6f98ed1e2bc06')), (['bool[]'], [[True, False, True]], HexBytes('0x5c6090c0461491a2941743bda5c3658bf1ea53bbd3edcde54e16205e18b45792')), (['uint24[]'], [[1, 0, 1]], HexBytes('0x5c6090c0461491a2941743bda5c3658bf1ea53bbd3edcde54e16205e18b45792')), (['uint8[2]'], [[8, 9]], HexBytes('0xc7694af312c4ffd0ba6a52461fcee8ab19a343af92538a')), (['uint256[2]'], [[8, 9]], HexBytes('0xc7694af312c4ffd0ba6a52461fcee8ab19a343af92538a')), (['uint8[]'], [[8]], HexBytes('0xf3f7a9fe364faab93b216da50a3214154f22a0a2b415b23a84c8169e8b636ee3')), (['address[]'], [['0x49EdDD3769cD86597B84ac5c2F5614', '0xA6b759bBbf4B59D24acf7E06e79f3a5D104fdCE5']], HexBytes('0xb98565c0c26a962fd54d93b0ed6fb9296e03e9da29d2281ed3e3473109ef7dde')), (['address[]'], [['0x49EdDD3769cD86597B84ac5c2F5614', '0xa6b759bbbf4b59d24acf7e06e79f3a5d104fdce5']], InvalidAddress)))
.parametrize('w3', (Web3, AsyncWeb3))
def test_solidity_keccak(self, w3: Union[('Web3', 'AsyncWeb3')], types: Sequence[TypeStr], values: Sequence[Any], expected: HexBytes) -> None:
if (isinstance(expected, type) and issubclass(expected, Exception)):
with pytest.raises(expected):
w3.solidity_keccak(types, values)
return
actual = w3.solidity_keccak(types, values)
assert (actual == expected)
.parametrize('types, values, expected', ((['address'], ['one.eth'], HexBytes('0x2ff37b5607484cd4eecf6d13292e22bd6e5401eaffcc07e279583bc742c68882')), (['address[]'], [['one.eth', 'two.eth']], HexBytes('0xb98565c0c26a962fd54d93b0ed6fb9296e03e9da29d2281ed3e3473109ef7dde'))))
.parametrize('w3', (Web3(), AsyncWeb3()))
def test_solidity_keccak_ens(self, w3: Union[('Web3', 'AsyncWeb3')], types: Sequence[TypeStr], values: Sequence[str], expected: HexBytes) -> None:
with ens_addresses(w3, {'one.eth': ChecksumAddress(HexAddress(HexStr('0x49EdDD3769cD86597B84ac5c2F5614'))), 'two.eth': ChecksumAddress(HexAddress(HexStr('0xA6b759bBbf4B59D24acf7E06e79f3a5D104fdCE5')))}):
with pytest.raises(InvalidAddress):
Web3.solidity_keccak(types, values)
actual = w3.solidity_keccak(types, values)
assert (actual == expected)
.parametrize('types,values', ((['address'], ['0xA6b759bBbf4B59D24acf7E06e79f3a5D104fdCE5', True]), (['address', 'bool'], ['0xA6b759bBbf4B59D24acf7E06e79f3a5D104fdCE5']), ([], ['0xA6b759bBbf4B59D24acf7E06e79f3a5D104fdCE5'])))
def test_solidity_keccak_same_number_of_types_and_values(self, w3: 'Web3', types: Sequence[TypeStr], values: Sequence[Any]) -> None:
with pytest.raises(ValueError):
w3.solidity_keccak(types, values)
def test_is_connected(self, w3: 'Web3') -> None:
assert w3.is_connected() |
def extractSadstranslatesPage(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class LogFile():
def __init__(self, path):
self._path = path
def timestamp(self):
dstr = self.filename().split('_')[3]
return datetime.datetime(*map(int, [dstr[:4], dstr[4:6], dstr[6:8], dstr[9:11], dstr[11:13]])).replace(tzinfo=pytz.utc)
def filename(self):
return os.path.split(self._path)[(- 1)]
def has_valid_filename(self):
pattern = re.compile('[0-9]+_CloudTrail_[a-z0-9-]+_[0-9TZ]+_[a-zA-Z0-9]+\\.json\\.gz')
return pattern.match(self.filename())
def records(self):
logging.debug('Loading %s', self._path)
try:
with gzip.open(self._path, 'rt') as unzipped:
json_data = json.load(unzipped)
records = json_data['Records']
return parse_records(records)
except (IOError, OSError) as error:
logging.warning('Could not load %s: %s', self._path, error)
return []
def contains_events_for_timeframe(self, from_date, to_date):
return (from_date <= self.timestamp() <= (to_date + datetime.timedelta(hours=1))) |
.parametrize('value,expected', (('', False), ('0x', True), ('0X', True), ('abcdef', True), ('ABCDEF', True), ('AbCdEf', True), ('0xabcdef', True), ('0xABCDEF', True), ('0xAbCdEf', True), ('12345', True), ('0x12345', True), ('123456xx', False), ('0x123456xx', False), ('0\x80', False)))
def test_is_hex(value, expected):
actual = is_hex(value)
assert (actual is expected) |
def graphs_with_cast_dereference_assignments():
x = vars('x', 2)
ptr = vars('ptr', 1, type=Pointer(int32))
in_n0 = BasicBlock(0, [_assign(x[0], _cast(int64, _deref(_add(ptr[0], _mul(x[1], Constant(4)))))), _call('func_modifying_pointer', [], [ptr[0]]), _ret(x[0])])
in_cfg = ControlFlowGraph()
in_cfg.add_node(in_n0)
out_cfg = ControlFlowGraph()
out_cfg.add_node(BasicBlock(0, [_assign(x[0], _cast(int64, _deref(_add(ptr[0], _mul(x[1], Constant(4)))))), _call('func_modifying_pointer', [], [ptr[0]]), _ret(x[0])]))
return (in_cfg, out_cfg) |
class PhiDependencyResolver():
def __init__(self, phi_functions: Dict[(BasicBlock, List[Phi])]):
self._phi_functions_of = phi_functions
def resolve(self) -> None:
for (basic_block, phi_instructions) in self._phi_functions_of.items():
dependency_graph = PhiDependencyGraph(phi_instructions)
directed_fvs = dependency_graph.compute_directed_feedback_vertex_set_of()
for phi_function in directed_fvs:
self._remove_dependency_of(phi_function, basic_block, dependency_graph)
topological_order = self._get_topological_order_for(dependency_graph)
self._sort_phi_functions_using(topological_order, basic_block)
def _remove_dependency_of(self, phi_function: Phi, basic_block: BasicBlock, dependency_graph: PhiDependencyGraph):
successors = dependency_graph.successors(phi_function)
dependency_graph.remove_node(phi_function)
variable = phi_function.definitions[0]
copy_variable = Variable(('copy_' + variable.name), variable.type, variable.ssa_label, variable.is_aliased)
phi_function.rename_destination(variable, copy_variable)
dependency_graph.add_edges_from([(phi_function, succ) for succ in successors])
self._add_definition_to_cfg(variable, copy_variable, basic_block)
def _add_definition_to_cfg(self, definition: Variable, value: Variable, basic_block: BasicBlock):
assignment = Assignment(definition, value)
basic_block.instructions.insert(len(self._phi_functions_of[basic_block]), assignment)
def _get_topological_order_for(self, dependency_graph: PhiDependencyGraph) -> List[Phi]:
return list(topological_sort(dependency_graph))
def _sort_phi_functions_using(self, sorted_phi_functions: List[Phi], basic_block: BasicBlock) -> None:
if (len(sorted_phi_functions) != len(self._phi_functions_of[basic_block])):
error_message = f'The length of our ordered list of Phi-functions {sorted_phi_functions} is different from our original list of Phi-functions {self._phi_functions_of[basic_block]}'
logging.error(error_message)
raise ValueError(error_message)
self._phi_functions_of[basic_block] = sorted_phi_functions
basic_block.instructions[:len(sorted_phi_functions)] = sorted_phi_functions |
class InitalizeTests(unittest.TestCase):
('anitya.db.meta.create_engine')
('anitya.db.meta.Session')
def test_initialize(self, mock_session, mock_create_engine):
config = {'DB_URL': 'postgresql://postgres:/mydb'}
engine = meta.initialize(config)
mock_create_engine.assert_called_once_with(config['DB_URL'], echo=False)
self.assertEqual(engine, mock_create_engine.return_value)
mock_session.configure.assert_called_once_with(bind=engine)
('anitya.db.meta.create_engine')
('anitya.db.meta.event.listen')
('anitya.db.meta.Session')
def test_initalize_sqlite(self, mock_session, mock_listen, mock_create_engine):
config = {'DB_URL': 'sqlite://', 'SQL_DEBUG': True}
engine = meta.initialize(config)
mock_create_engine.assert_called_once_with(config['DB_URL'], echo=True)
mock_session.configure.assert_called_once_with(bind=engine)
self.assertEqual(1, mock_listen.call_count)
self.assertEqual(engine, mock_listen.call_args_list[0][0][0])
self.assertEqual('connect', mock_listen.call_args_list[0][0][1]) |
def parse_args():
parser = argparse.ArgumentParser(description='Create images from a text prompt.')
parser.add_argument('--attention-slicing', action='store_true', help='Use less memory at the expense of inference speed')
parser.add_argument('--device', type=str, nargs='?', default='cuda', help='The cpu or cuda device to use to render images')
parser.add_argument('--half', action='store_true', help='Use float16 (half-sized) tensors instead of float32')
parser.add_argument('--height', type=int, nargs='?', default=512, help='Image height in pixels')
parser.add_argument('--image', type=str, nargs='?', help='The input image to use for image-to-image diffusion')
parser.add_argument('--image-scale', type=float, nargs='?', help='How closely the image should follow the original image')
parser.add_argument('--iters', type=int, nargs='?', default=1, help='Number of times to run pipeline')
parser.add_argument('--mask', type=str, nargs='?', help='The input mask to use for diffusion inpainting')
parser.add_argument('--model', type=str, nargs='?', default='CompVis/stable-diffusion-v1-4', help='The model used to render images')
parser.add_argument('--negative-prompt', type=str, nargs='?', help='The prompt to not render into an image')
parser.add_argument('--onnx', action='store_true', help='Use the onnx runtime for inference')
parser.add_argument('--prompt', type=str, nargs='?', help='The prompt to render into an image')
parser.add_argument('--samples', type=int, nargs='?', default=1, help='Number of images to create per run')
parser.add_argument('--scale', type=float, nargs='?', default=7.5, help='How closely the image should follow the prompt')
parser.add_argument('--scheduler', type=str, nargs='?', help='Override the scheduler used to denoise the image')
parser.add_argument('--seed', type=int, nargs='?', default=0, help='RNG seed for repeatability')
parser.add_argument('--skip', action='store_true', help='Skip the safety checker')
parser.add_argument('--steps', type=int, nargs='?', default=50, help='Number of sampling steps')
parser.add_argument('--strength', type=float, default=0.75, help='Diffusion strength to apply to the input image')
parser.add_argument('--token', type=str, nargs='?', help='Huggingface user access token')
parser.add_argument('--vae-slicing', action='store_true', help='Use less memory when creating large batches of images')
parser.add_argument('--vae-tiling', action='store_true', help='Use less memory when creating ultra-high resolution images')
parser.add_argument('--width', type=int, nargs='?', default=512, help='Image width in pixels')
parser.add_argument('--xformers-memory-efficient-attention', action='store_true', help='Use less memory but require the xformers library')
parser.add_argument('prompt0', metavar='PROMPT', type=str, nargs='?', help='The prompt to render into an image')
args = parser.parse_args()
if (args.prompt0 is not None):
args.prompt = args.prompt0
return args |
.django_db
def test_new_award_count_specific_award_type(client, monkeypatch, new_award_data, helpers, elasticsearch_award_index):
setup_elasticsearch_test(monkeypatch, elasticsearch_award_index)
resp = client.get(url.format(code='123', filter='?fiscal_year=2020&award_type_codes=[A,B]'))
assert (resp.status_code == status.HTTP_200_OK)
assert (resp.data['new_award_count'] == 2)
resp = client.get(url.format(code='789', filter='?fiscal_year=2020&award_type_codes=[07,08]'))
assert (resp.status_code == status.HTTP_200_OK)
assert (resp.data['new_award_count'] == 0)
resp = client.get(url.format(code='789', filter='?fiscal_year=2021&award_type_codes=[08]'))
assert (resp.status_code == status.HTTP_200_OK)
assert (resp.data['new_award_count'] == 1) |
class CustomField(BaseObject):
def __init__(self, api=None, id=None, value=None, **kwargs):
self.api = api
self.id = id
self.value = value
for (key, value) in kwargs.items():
setattr(self, key, value)
for key in self.to_dict():
if (getattr(self, key) is None):
try:
self._dirty_attributes.remove(key)
except KeyError:
continue |
class ListUsersPage():
def __init__(self, download, page_token, max_results):
self._download = download
self._max_results = max_results
self._current = download(page_token, max_results)
def users(self):
return [ExportedUserRecord(user) for user in self._current.get('users', [])]
def next_page_token(self):
return self._current.get('nextPageToken', '')
def has_next_page(self):
return bool(self.next_page_token)
def get_next_page(self):
if self.has_next_page:
return ListUsersPage(self._download, self.next_page_token, self._max_results)
return None
def iterate_all(self):
return _UserIterator(self) |
class OptionSeriesScatterClusterDatalabels(Options):
def align(self):
return self._config_get('center')
def align(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def format(self):
return self._config_get('{point.clusterPointsAmount}')
def format(self, text: str):
self._config(text, js_type=False)
def inside(self):
return self._config_get(True)
def inside(self, flag: bool):
self._config(flag, js_type=False)
def style(self) -> 'OptionSeriesScatterClusterDatalabelsStyle':
return self._config_sub_data('style', OptionSeriesScatterClusterDatalabelsStyle)
def verticalAlign(self):
return self._config_get('middle')
def verticalAlign(self, text: str):
self._config(text, js_type=False) |
.parametrize('matrix', [matrix])
.parametrize('outFileName', [outfile])
.parametrize('chromosomes', ['chrX', 'chr3R'])
.parametrize('action', ['keep', 'remove', 'mask'])
.parametrize('regions', [bed_file, None])
def test_trivial_run(matrix, outFileName, chromosomes, action, regions):
args = '--matrix {} --outFileName {} --chromosomes {} --action {}'.format(matrix, outFileName.name, chromosomes, action).split()
if regions:
args = '--matrix {} --outFileName {} --regions {} --action {}'.format(matrix, outFileName.name, regions, action).split()
compute(hicAdjustMatrix.main, args, 5) |
class MyComboBox(QComboBox):
remove_item_signal = Signal(str)
def __init__(self, parent=None):
QComboBox.__init__(self, parent=parent)
copy_action = QAction('Copy all', self, statusTip='', triggered=self._on_copy)
self.addAction(copy_action)
paste_action = QAction('Paste && replace all', self, statusTip='', triggered=self._on_paste)
self.addAction(paste_action)
sep_action = QAction('', self)
sep_action.setSeparator(True)
self.addAction(sep_action)
browse_action = QAction('Browse', self, statusTip='', triggered=self._on_browse)
self.addAction(browse_action)
self.setContextMenuPolicy(Qt.ActionsContextMenu)
def _on_browse(self):
dialog = QFileDialog(self)
dialog.setOption(QFileDialog.HideNameFilterDetails, True)
pkg_dir = os.path.dirname(interpret_path(self.currentText()))
if pkg_dir:
dialog.setDirectory(pkg_dir)
if dialog.exec_():
fileNames = dialog.selectedFiles()
self.setEditText(fileNames[0])
def _on_copy(self):
QApplication.clipboard().setText(self.currentData())
def _on_paste(self):
self.setEditText(QApplication.clipboard().text())
def keyPressEvent(self, event):
key_mod = QApplication.keyboardModifiers()
if ((key_mod & Qt.ShiftModifier) and (event.key() == Qt.Key_Delete)):
try:
curr_text = self.currentText()
if curr_text:
for i in range(self.count()):
if (curr_text == self.itemText(i)):
self.removeItem(i)
self.remove_item_signal.emit(curr_text)
self.clearEditText()
except Exception:
print(traceback.format_exc(1))
QComboBox.keyPressEvent(self, event) |
def test_mesh_fs_gced():
from firedrake.functionspacedata import FunctionSpaceData
gc.collect()
gc.collect()
nmesh = howmany((MeshTopology, MeshGeometry))
nfs = howmany(FunctionSpaceData)
for i in range(10):
m = UnitIntervalMesh(5)
for fs in ['CG', 'DG']:
V = FunctionSpace(m, fs, 1)
del m, V
gc.collect()
gc.collect()
nmesh1 = howmany((MeshTopology, MeshGeometry))
nfs1 = howmany(FunctionSpaceData)
assert ((nmesh1 - nmesh) < 5)
assert ((nfs1 - nfs) < 10) |
def test_quoting_of_filenames():
with requests_mock.mock(case_sensitive=True) as m:
client = FoundryRestClient()
needs_quotation = 'prod2_api_v2_reports_query?#2022-06-27T22_24_18.528205Z.json'
needs_no_quotation = 'All 123 - BLUB Extract_new (Export 2021-12-21 18_25)-.140314.csv'
m.post(url=ANY, status_code=204)
client.upload_dataset_file(dataset_rid='rid', transaction_rid='transaction_rid', path_or_buf=io.BytesIO(), path_in_foundry_dataset=needs_quotation)
client.upload_dataset_file(dataset_rid='rid', transaction_rid='transaction_rid', path_or_buf=io.BytesIO(), path_in_foundry_dataset=needs_no_quotation)
history = m.request_history
assert (history[0].query == 'logicalPath=prod2_api_v2_reports_query%3F%232022-06-27T22_24_18.528205Z.json')
assert (history[1].query == 'logicalPath=All+123+-+BLUB+Extract_new+%28Export+2021-12-21+18_25%29-.140314.csv')
m.get(url=ANY, status_code=200, body=io.BytesIO(b'some-content'))
client.download_dataset_file(dataset_rid='rid', output_directory=None, view='master', foundry_file_path=needs_quotation)
client.download_dataset_file(dataset_rid='rid', output_directory=None, view='master', foundry_file_path=needs_no_quotation)
history = m.request_history
assert (history[2].path.split('/')[(- 1)] == quote(needs_quotation))
assert (history[3].path.split('/')[(- 1)] == quote(needs_no_quotation)) |
def test_convert_to_flyte_state():
assert (convert_to_flyte_state('FAILED') == RETRYABLE_FAILURE)
assert (convert_to_flyte_state('TIMEDOUT') == RETRYABLE_FAILURE)
assert (convert_to_flyte_state('CANCELED') == RETRYABLE_FAILURE)
assert (convert_to_flyte_state('DONE') == SUCCEEDED)
assert (convert_to_flyte_state('SUCCEEDED') == SUCCEEDED)
assert (convert_to_flyte_state('SUCCESS') == SUCCEEDED)
assert (convert_to_flyte_state('RUNNING') == RUNNING)
invalid_state = 'INVALID_STATE'
with pytest.raises(Exception, match=f'Unrecognized state: {invalid_state.lower()}'):
convert_to_flyte_state(invalid_state) |
class LiveVideoTargeting(AbstractObject):
def __init__(self, api=None):
super(LiveVideoTargeting, self).__init__()
self._isLiveVideoTargeting = True
self._api = api
class Field(AbstractObject.Field):
age_max = 'age_max'
age_min = 'age_min'
excluded_countries = 'excluded_countries'
geo_locations = 'geo_locations'
_field_types = {'age_max': 'unsigned int', 'age_min': 'unsigned int', 'excluded_countries': 'list<string>', 'geo_locations': 'TargetingGeoLocation'}
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info |
_required
def is_registrar(view, view_args, view_kwargs, *args, **kwargs):
user = current_user
event_id = kwargs['event_id']
if user.is_staff:
return view(*view_args, **view_kwargs)
if (user.is_registrar(event_id) or user.has_event_access(event_id)):
return view(*view_args, **view_kwargs)
raise ForbiddenError({'source': ''}, 'Registrar Access is Required.') |
class StaticStorageBackend(AbstractStateBackend):
def get_value(self):
filename = settings.MAINTENANCE_MODE_STATE_FILE_NAME
if staticfiles_storage.exists(filename):
with staticfiles_storage.open(filename, 'r') as statefile:
return self.from_str_to_bool_value(statefile.read())
return False
def set_value(self, value):
filename = settings.MAINTENANCE_MODE_STATE_FILE_NAME
if staticfiles_storage.exists(filename):
staticfiles_storage.delete(filename)
content = ContentFile(self.from_bool_to_str_value(value).encode())
staticfiles_storage.save(filename, content) |
class TestLedgerApiHandler(ERC1155ClientTestCase):
is_agent_to_agent_messages = False
def test_setup(self):
assert (self.ledger_api_handler.setup() is None)
self.assert_quantity_in_outbox(0)
def test_handle_unidentified_dialogue(self):
incorrect_dialogue_reference = ('', '')
incoming_message = cast(LedgerApiMessage, self.build_incoming_message(message_type=LedgerApiMessage, dialogue_reference=incorrect_dialogue_reference, performative=LedgerApiMessage.Performative.BALANCE, ledger_id=self.ledger_id, balance=10))
with patch.object(self.logger, 'log') as mock_logger:
self.ledger_api_handler.handle(incoming_message)
mock_logger.assert_any_call(logging.INFO, f'received invalid ledger_api message={incoming_message}, unidentified dialogue.')
def test_handle_balance(self):
balance = 10
ledger_api_dialogue = cast(LedgerApiDialogue, self.prepare_skill_dialogue(dialogues=self.ledger_api_dialogues, messages=self.list_of_ledger_api_messages[:1], counterparty=LEDGER_API_ADDRESS))
incoming_message = cast(LedgerApiMessage, self.build_incoming_message_for_skill_dialogue(dialogue=ledger_api_dialogue, performative=LedgerApiMessage.Performative.BALANCE, ledger_id=self.ledger_id, balance=balance))
with patch.object(self.logger, 'log') as mock_logger:
self.ledger_api_handler.handle(incoming_message)
mock_logger.assert_any_call(logging.INFO, f'starting balance on {self.ledger_id} ledger={incoming_message.balance}.')
def test_handle_error(self):
ledger_api_dialogue = cast(LedgerApiDialogue, self.prepare_skill_dialogue(dialogues=self.ledger_api_dialogues, messages=self.list_of_ledger_api_messages[:1]))
incoming_message = cast(LedgerApiMessage, self.build_incoming_message_for_skill_dialogue(dialogue=ledger_api_dialogue, performative=LedgerApiMessage.Performative.ERROR, code=1))
with patch.object(self.logger, 'log') as mock_logger:
self.ledger_api_handler.handle(incoming_message)
mock_logger.assert_any_call(logging.INFO, f'received ledger_api error message={incoming_message} in dialogue={ledger_api_dialogue}.')
def test_handle_invalid(self):
invalid_performative = LedgerApiMessage.Performative.GET_BALANCE
incoming_message = cast(LedgerApiMessage, self.build_incoming_message(message_type=LedgerApiMessage, dialogue_reference=('1', ''), performative=invalid_performative, ledger_id=self.ledger_id, address=self.address, to=str(self.skill.skill_context.skill_id)))
with patch.object(self.logger, 'log') as mock_logger:
self.ledger_api_handler.handle(incoming_message)
mock_logger.assert_any_call(logging.WARNING, f'cannot handle ledger_api message of performative={invalid_performative} in dialogue={self.ledger_api_dialogues.get_dialogue(incoming_message)}.')
def test_teardown(self):
assert (self.ledger_api_handler.teardown() is None)
self.assert_quantity_in_outbox(0) |
_ns.route('/reschedule_build_chroot/', methods=['POST', 'PUT'])
_authenticated
def reschedule_build_chroot():
response = {}
build_id = flask.request.json.get('build_id')
task_id = flask.request.json.get('task_id')
chroot = flask.request.json.get('chroot')
try:
build = ComplexLogic.get_build(build_id)
except ObjectNotFound:
response['result'] = 'noop'
response['msg'] = "Build {} wasn't found".format(build_id)
return flask.jsonify(response)
if build.canceled:
response['result'] = 'noop'
response['msg'] = 'build was cancelled, ignoring'
return flask.jsonify(response)
run_statuses = set([StatusEnum('starting'), StatusEnum('running')])
if (task_id == build.task_id):
if (build.source_status in run_statuses):
app.logger.info('rescheduling source build %s', build.id)
BuildsLogic.update_state_from_dict(build, {'task_id': task_id, 'status': StatusEnum('pending')})
db.session.commit()
response['result'] = 'done'
else:
response['result'] = 'noop'
response['msg'] = 'build is not in running states, ignoring'
else:
build_chroot = build.chroots_dict_by_name.get(chroot)
if (build_chroot and (build_chroot.status in run_statuses)):
app.logger.info('rescheduling build {} chroot: {}'.format(build.id, build_chroot.name))
BuildsLogic.update_state_from_dict(build, {'task_id': task_id, 'chroot': chroot, 'status': StatusEnum('pending')})
db.session.commit()
response['result'] = 'done'
else:
response['result'] = 'noop'
response['msg'] = 'build chroot is not in running states, ignoring'
return flask.jsonify(response) |
class TestSuperFencesCustomLegacyArithmatexPreview(util.MdCase):
extension = ['pymdownx.superfences']
extension_configs = {'pymdownx.superfences': {'custom_fences': [{'name': 'math', 'class': 'arithmatex', 'format': arithmatex.fence_mathjax_preview_format}]}}
def test_legacy_arithmatex_preview(self):
with warnings.catch_warnings(record=True) as w:
self.check_markdown('\n ```math\n E(\\mathbf{v}, \\mathbf{h}) = -\\sum_{i,j}w_{ij}v_i h_j - \\sum_i b_i v_i - \\sum_j c_j h_j\n ```\n ', '\n <div class="arithmatex">\n <div class="MathJax_Preview">\n E(\\mathbf{v}, \\mathbf{h}) = -\\sum_{i,j}w_{ij}v_i h_j - \\sum_i b_i v_i - \\sum_j c_j h_j\n </div>\n <script type="math/tex; mode=display">\n E(\\mathbf{v}, \\mathbf{h}) = -\\sum_{i,j}w_{ij}v_i h_j - \\sum_i b_i v_i - \\sum_j c_j h_j\n </script>\n </div>\n ', True)
self.assertTrue((len(w) == 1))
self.assertTrue(issubclass(w[(- 1)].category, DeprecationWarning)) |
.parametrize('widget_class,kwargs', parameters)
def test_widget_init_config(manager_nospawn, minimal_conf_noscreen, widget_class, kwargs):
if (widget_class in exclusive_backend):
if (exclusive_backend[widget_class] != manager_nospawn.backend.name):
pytest.skip('Unsupported backend')
widget = widget_class(**kwargs)
widget.draw = no_op
if isinstance(widget, ImportErrorWidget):
pytest.skip(f"{kwargs['widgetname']} skipped: ImportError")
for (k, v) in kwargs.items():
assert (getattr(widget, k) == v)
config = minimal_conf_noscreen
config.screens = [libqtile.config.Screen(top=libqtile.bar.Bar([widget], 10))]
manager_nospawn.start(config)
i = manager_nospawn.c.bar['top'].info()
allowed_names = [widget.name, '<no name>']
assert (i['widgets'][0]['name'] in allowed_names) |
class BucketData(object):
def __init__(self):
self.data_list = []
self.label_list = []
self.label_list_plain = []
self.comment_list = []
def append(self, datum, label, label_plain, comment):
self.data_list.append(datum)
self.label_list.append(label)
self.label_list_plain.append(label_plain)
self.comment_list.append(comment)
return len(self.data_list)
def flush_out(self, bucket_specs, valid_target_length=float('inf'), go_shift=1):
res = {}
decoder_input_len = bucket_specs[0][1]
res['data'] = np.array(self.data_list)
res['labels'] = self.label_list_plain
res['comments'] = self.comment_list
target_weights = []
for l_idx in range(len(self.label_list)):
label_len = len(self.label_list[l_idx])
if (label_len <= decoder_input_len):
self.label_list[l_idx] = np.concatenate((self.label_list[l_idx], np.zeros((decoder_input_len - label_len), dtype=np.int32)))
one_mask_len = min((label_len - go_shift), valid_target_length)
target_weights.append(np.concatenate((np.ones(one_mask_len, dtype=np.float32), np.zeros((decoder_input_len - one_mask_len), dtype=np.float32))))
else:
raise NotImplementedError
res['decoder_inputs'] = [a.astype(np.int32) for a in np.array(self.label_list).T]
res['target_weights'] = [a.astype(np.float32) for a in np.array(target_weights).T]
assert (len(res['decoder_inputs']) == len(res['target_weights']))
(self.data_list, self.label_list, self.label_list_plain, self.comment_list) = ([], [], [], [])
return res
def __len__(self):
return len(self.data_list)
def __iadd__(self, other):
self.data_list += other.data_list
self.label_list += other.label_list
self.label_list_plain += other.label_list_plain
self.comment_list += other.comment_list
def __add__(self, other):
res = BucketData()
res.data_list = (self.data_list + other.data_list)
res.label_list = (self.label_list + other.label_list)
res.label_list_plain = (self.label_list_plain + other.label_list_plain)
res.comment_list = (self.comment_list + other.comment_list)
return res |
def test_get_messages_with_round(conversation_with_messages):
last_round_messages = conversation_with_messages.get_messages_with_round(1)
assert (len(last_round_messages) == 2)
assert (last_round_messages[0].content == 'How are you?')
assert (last_round_messages[1].content == "I'm good, thanks")
last_two_rounds_messages = conversation_with_messages.get_messages_with_round(2)
assert (len(last_two_rounds_messages) == 4)
assert (last_two_rounds_messages[0].content == 'Hello')
assert (last_two_rounds_messages[1].content == 'Hi') |
def test_int():
c = Config('testconfig', foo=(1, int, ''), bar=('1', int, ''))
assert (c.foo == 1)
assert (c.bar == 1)
c.foo = 12.1
assert (c.foo == 12)
c.foo = '7'
assert (c.foo == 7)
c.foo = '-23'
assert (c.foo == (- 23))
for val in ([], None, '1e2', '12.1', 'a'):
with raises(ValueError):
c.foo = val |
def is_sim_folder_ok(sim_folder):
if (not os.path.exists(sim_folder)):
logging.error((('The provided DispaSET simulation environment folder (' + sim_folder) + ') does not exist'))
return False
if (not os.path.exists(os.path.join(sim_folder, u'Inputs.gdx'))):
logging.error((('There is no Inputs.gdx file within the specified DispaSET simulation environment folder (' + sim_folder) + '). Check that the GDX output is activated in the option file and that no error stated during the pre-processing'))
return False
if (not os.path.exists(os.path.join(sim_folder, u'UCM_h.gms'))):
logging.error((('There is no UCM_h.gms file within the specified DispaSET simulation environment folder (' + sim_folder) + ')'))
return False
return True |
def extractYummytlsWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
((detect_target().name() == 'rocm'), 'Not supported by ROCM.')
class GroupGEMMRcrTestCase(unittest.TestCase):
([param(False, 'group_gemm_rcr_run_once', 'float16'), param(True, 'group_gemm_rcr_run_twice', 'float16'), param(False, 'group_gemm_rcr_run_once_fp32', 'float32'), param(False, 'group_gemm_rcr_run_once_bf16', 'bfloat16')])
def test_group_gemm_rcr(self, run_twice: bool, test_name: str, dtype: str):
M = 256
K1 = 128
N1 = 60
K2 = 192
N2 = 64
target = detect_target()
if (int(target._arch) < 80):
_LOGGER.warning('Group Gemm need SM80 HW')
return
X1 = Tensor(shape=[M, K1], dtype=dtype, name='x1', is_input=True)
X2 = Tensor(shape=[M, K2], dtype=dtype, name='x2', is_input=True)
W1 = Tensor(shape=[N1, K1], dtype=dtype, name='w1', is_input=True)
W2 = Tensor(shape=[N2, K2], dtype=dtype, name='w2', is_input=True)
OP = ops.group_gemm_rcr()
(Y1, Y2) = OP(operand_groups=[[X1, W1], [X2, W2]])
Y1._attrs['name'] = 'y1'
Y1._attrs['is_output'] = True
Y2._attrs['name'] = 'y2'
Y2._attrs['is_output'] = True
graph_outputs = [Y1, Y2]
if run_twice:
Y3 = ops.group_gemm_rcr()(operand_groups=[[X1, W1]])[0]
Y3._attrs['name'] = 'y3'
Y3._attrs['is_output'] = True
graph_outputs.append(Y3)
module = compile_model(graph_outputs, target, './tmp', test_name)
X1_pt = get_random_torch_tensor(shape=(M, K1), dtype=dtype)
X2_pt = get_random_torch_tensor(shape=(M, K2), dtype=dtype)
W1_pt = get_random_torch_tensor(shape=(N1, K1), dtype=dtype)
W2_pt = get_random_torch_tensor(shape=(N2, K2), dtype=dtype)
Y1_pt = torch.nn.functional.linear(X1_pt, W1_pt)
Y2_pt = torch.nn.functional.linear(X2_pt, W2_pt)
inputs = {'x1': X1_pt, 'w1': W1_pt, 'x2': X2_pt, 'w2': W2_pt}
y1 = torch.empty_like(Y1_pt)
y2 = torch.empty_like(Y2_pt)
outputs = {'y1': y1, 'y2': y2}
if run_twice:
outputs['y3'] = torch.empty_like(y1)
module.run_with_tensors(inputs, outputs)
torch.testing.assert_close(Y1_pt, y1, atol=0.1, rtol=0.1)
torch.testing.assert_close(Y2_pt, y2, atol=0.1, rtol=0.1)
if run_twice:
torch.testing.assert_close(Y1_pt, outputs['y3'], atol=0.1, rtol=0.1) |
class OptionNavigationAnnotationsoptionsControlpointoptionsStyle(Options):
def cursor(self):
return self._config_get('pointer')
def cursor(self, text: str):
self._config(text, js_type=False)
def fill(self):
return self._config_get('#ffffff')
def fill(self, text: str):
self._config(text, js_type=False)
def stroke(self):
return self._config_get('#000000')
def stroke(self, text: str):
self._config(text, js_type=False)
def stroke_width(self):
return self._config_get(2)
_width.setter
def stroke_width(self, num: float):
self._config(num, js_type=False) |
.integration()
def test_two_instances(random_file, fsspec_write_test_folder):
random_file = random_file.get()
fs = FoundryFileSystem(dataset=fsspec_write_test_folder[0], branch='master')
fs_other_instance = FoundryFileSystem(dataset=fsspec_write_test_folder[0], branch='master', skip_instance_cache=True)
with fs.transaction:
with fs.open(random_file, 'w') as f:
f.write('content')
fs_same_instance = FoundryFileSystem(dataset=fsspec_write_test_folder[0], branch='master')
assert (fs_same_instance._intrans is True)
assert (fs_same_instance.exists(random_file) is True)
assert (fs_other_instance._intrans is False)
assert (fs_other_instance.exists(random_file) is False)
fs.delete(random_file)
with pytest.raises(FileNotFoundError), fs_other_instance.open(random_file, 'r') as f:
f.read() |
class PluginAgent(ConversableAgent):
DEFAULT_SYSTEM_MESSAGE = '\n You are a useful artificial intelligence tool agent assistant.\n You have been assigned the following list of tools, please select the most appropriate tool to complete the task based on the current user\'s goals:\n {tool_list}\n \n *** IMPORTANT REMINDER ***\n Please read the parameter definition of the tool carefully and extract the specific parameters required to execute the tool from the user gogal.\n Please output the selected tool name and specific parameter information in json in the following required format, refer to the following example:\n user: Search for the latest hot financial news\n assisant: {{\n "tool_name":"The chart rendering method currently selected by SQL",\n "args": "{{\n "query": "latest hot financial news",\n }}",\n "thought":"I will use the google-search tool to search for the latest hot financial news."\n }}\n \n Please think step by step and return it in the following json format\n {{\n "tool_name":"The chart rendering method currently selected by SQL",\n "args": "{{\n "arg name1": "arg value1",\n "arg name2": "arg value2",\n }}",\n "thought":"Summary of thoughts to the user"\n }}\n Make sure the response is correct json and can be parsed by Python json.loads.\n '
DEFAULT_DESCRIBE = 'You can use the following tools to complete the task objectives, tool information: {tool-infos}'
NAME = 'ToolScientist'
def __init__(self, memory: GptsMemory, agent_context: AgentContext, describe: Optional[str]=DEFAULT_DESCRIBE, is_termination_msg: Optional[Callable[([Dict], bool)]]=None, max_consecutive_auto_reply: Optional[int]=None, human_input_mode: Optional[str]='NEVER', **kwargs):
super().__init__(name=self.NAME, memory=memory, describe=describe, system_message=self.DEFAULT_SYSTEM_MESSAGE, is_termination_msg=is_termination_msg, max_consecutive_auto_reply=max_consecutive_auto_reply, human_input_mode=human_input_mode, agent_context=agent_context, **kwargs)
self.register_reply(Agent, PluginAgent.tool_call)
self.agent_context = agent_context
async def a_system_fill_param(self):
params = {'tool_infos': self.db_connect.get_table_info(), 'dialect': self.db_connect.db_type}
self.update_system_message(self.DEFAULT_SYSTEM_MESSAGE.format(**params))
async def tool_call(self, message: Optional[str]=None, sender: Optional[Agent]=None, reviewer: Optional[Agent]=None, config: Optional[Union[(Dict, Literal[False])]]=None):
json_objects = find_json_objects(message)
fail_reason = 'The required json format answer was not generated.'
json_count = len(json_objects)
response_success = True
view = None
content = None
if (json_count != 1):
response_success = False
else:
try:
view = ''
except Exception as e:
view = f'''```vis-convert-error
{content}
```'''
return (True, {'is_exe_success': response_success, 'content': content, 'view': view}) |
def email(entry, option_key='Email Address', **kwargs):
if (not entry):
raise ValueError(_('Email address field empty!'))
valid = validate_email_address(entry)
if (not valid):
raise ValueError(_("That isn't a valid {option_key}!").format(option_key=option_key))
return entry |
class OptionSeriesColumnrangeMarkerStates(Options):
def hover(self) -> 'OptionSeriesColumnrangeMarkerStatesHover':
return self._config_sub_data('hover', OptionSeriesColumnrangeMarkerStatesHover)
def normal(self) -> 'OptionSeriesColumnrangeMarkerStatesNormal':
return self._config_sub_data('normal', OptionSeriesColumnrangeMarkerStatesNormal)
def select(self) -> 'OptionSeriesColumnrangeMarkerStatesSelect':
return self._config_sub_data('select', OptionSeriesColumnrangeMarkerStatesSelect) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.