blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb2478a5255dc00006b64d19687fcc61e6587261
|
5e601244fbf32ee5190fb5210a0cd334473a0abe
|
/functions/performance/concurrence/celery/celeryOps.py
|
e487e0e0227ad1661ff2af5467d499341f1a2e42
|
[] |
no_license
|
DingGuodong/LinuxBashShellScriptForOps
|
69ebe45cf3f92b741a078b9b78c2600328ce9b9e
|
b2ca1e4c870626dd078d447e2d1479b08602bdf6
|
refs/heads/master
| 2023-08-21T20:53:40.617397
| 2023-07-17T01:41:05
| 2023-07-17T01:41:05
| 57,015,255
| 453
| 343
| null | 2023-02-16T01:29:23
| 2016-04-25T05:55:28
|
Python
|
UTF-8
|
Python
| false
| false
| 650
|
py
|
celeryOps.py
|
#!/usr/bin/python
# encoding: utf-8
# -*- coding: utf8 -*-
"""
Created by PyCharm.
File: LinuxBashShellScriptForOps:celeryOps.py
User: Guodong
Create Date: 2017/1/13
Create Time: 18:14
"""
from celery import Celery
import time
# redis://:password@hostname:port/db_number
# transport://userid:password@hostname:port/virtual_host
broker = 'amqp://rabbit:rabbitmq@10.20.0.129:5672//'
backend = 'redis://10.20.0.129:6379/0'
celery = Celery('tasks', broker=broker, backend=backend)
@celery.task
def sendmail(mail):
print('sending mail to %s...' % mail['to'])
time.sleep(2.0)
print('mail sent.')
|
fa3039c1bda18ec361c6feeecae8a3d560a53673
|
e3eecf0f0cc96bc983cb16126001ce727e8a9e67
|
/Lilygo-7000g/MicroPython_LoBo/sd.py
|
fa8b57c8dbdcc18e544a84de7a3870279096c8df
|
[] |
no_license
|
mudmin/AnotherMaker
|
f18137b838c43f9a93319bd94a676624d612381f
|
f159ff3a6aefe54d38ca2923356e14346070a2f1
|
refs/heads/master
| 2023-09-04T04:58:12.002916
| 2023-08-23T19:26:03
| 2023-08-23T19:26:03
| 202,879,733
| 179
| 60
| null | 2022-04-09T19:55:31
| 2019-08-17T12:43:19
|
C++
|
UTF-8
|
Python
| false
| false
| 273
|
py
|
sd.py
|
import os
import uos
import machine
# The SD SPI pin of the firmware compiled by lobo is not the pin used by T-SIM7000,
# so here we use uos to initialize the SD card
uos.sdconfig(uos.SDMODE_SPI, clk=14, mosi=15, miso=2, cs=13, maxspeed=16)
os.mountsd()
os.listdir('/sd')
|
18ab42dcf92cf65ab02d0242d77368f3ab7c8f2c
|
af101b467134e10270bb72d02f41f07daa7f57d8
|
/tests/test_datasets/test_transforms/test_formatting.py
|
b1e146c20491d6327857a3843eb6fa97c4fcd65d
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmagic
|
4d864853417db300de4dfe7e83ce380fd1557a23
|
a382f143c0fd20d227e1e5524831ba26a568190d
|
refs/heads/main
| 2023-08-31T14:40:24.936423
| 2023-08-30T05:05:56
| 2023-08-30T05:05:56
| 203,999,962
| 1,370
| 192
|
Apache-2.0
| 2023-09-14T11:39:18
| 2019-08-23T13:04:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,948
|
py
|
test_formatting.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmcv.transforms import to_tensor
from mmagic.datasets.transforms import PackInputs
from mmagic.structures.data_sample import DataSample
def assert_tensor_equal(img, ref_img, ratio_thr=0.999):
"""Check if img and ref_img are matched approximately."""
assert img.shape == ref_img.shape
assert img.dtype == ref_img.dtype
area = ref_img.shape[-1] * ref_img.shape[-2]
diff = torch.abs(img - ref_img)
assert torch.sum(diff <= 1) / float(area) > ratio_thr
def test_pack_inputs():
pack_inputs = PackInputs(meta_keys='a', data_keys='numpy')
assert repr(pack_inputs) == 'PackInputs'
ori_results = dict(
img=np.random.rand(64, 64, 3),
gt=[np.random.rand(64, 61, 3),
np.random.rand(64, 61, 3)],
img_lq=np.random.rand(64, 64, 3),
ref=np.random.rand(64, 62, 3),
ref_lq=np.random.rand(64, 62, 3),
mask=np.random.rand(64, 63, 3),
gt_heatmap=np.random.rand(64, 65, 3),
gt_unsharp=np.random.rand(64, 65, 3),
merged=np.random.rand(64, 64, 3),
trimap=np.random.rand(64, 66, 3),
alpha=np.random.rand(64, 67, 3),
fg=np.random.rand(64, 68, 3),
bg=np.random.rand(64, 69, 3),
img_shape=(64, 64),
a='b',
numpy=np.random.rand(48, 48, 3))
results = ori_results.copy()
packed_results = pack_inputs(results)
target_keys = ['inputs', 'data_samples']
assert set(target_keys).issubset(set(packed_results.keys()))
data_sample = packed_results['data_samples']
assert isinstance(data_sample, DataSample)
assert data_sample.img_shape == (64, 64)
assert data_sample.a == 'b'
numpy_tensor = to_tensor(ori_results['numpy'])
numpy_tensor = numpy_tensor.permute(2, 0, 1)
assert_tensor_equal(data_sample.numpy, numpy_tensor)
gt_tensors = [to_tensor(v) for v in ori_results['gt']]
gt_tensors = [v.permute(2, 0, 1) for v in gt_tensors]
gt_tensor = torch.stack(gt_tensors, dim=0)
assert_tensor_equal(data_sample.gt_img, gt_tensor)
img_lq_tensor = to_tensor(ori_results['ref'])
img_lq_tensor = img_lq_tensor.permute(2, 0, 1)
assert_tensor_equal(data_sample.ref_img, img_lq_tensor)
ref_lq_tensor = to_tensor(ori_results['ref'])
ref_lq_tensor = ref_lq_tensor.permute(2, 0, 1)
assert_tensor_equal(data_sample.ref_img, ref_lq_tensor)
ref_tensor = to_tensor(ori_results['ref'])
ref_tensor = ref_tensor.permute(2, 0, 1)
assert_tensor_equal(data_sample.ref_img, ref_tensor)
mask_tensor = to_tensor(ori_results['mask'])
mask_tensor = mask_tensor.permute(2, 0, 1)
assert_tensor_equal(data_sample.mask, mask_tensor)
gt_heatmap_tensor = to_tensor(ori_results['gt_heatmap'])
gt_heatmap_tensor = gt_heatmap_tensor.permute(2, 0, 1)
assert_tensor_equal(data_sample.gt_heatmap, gt_heatmap_tensor)
gt_unsharp_tensor = to_tensor(ori_results['gt_heatmap'])
gt_unsharp_tensor = gt_unsharp_tensor.permute(2, 0, 1)
assert_tensor_equal(data_sample.gt_heatmap, gt_unsharp_tensor)
gt_merged_tensor = to_tensor(ori_results['merged'])
gt_merged_tensor = gt_merged_tensor.permute(2, 0, 1)
assert_tensor_equal(data_sample.gt_merged, gt_merged_tensor)
trimap_tensor = to_tensor(ori_results['trimap'])
trimap_tensor = trimap_tensor.permute(2, 0, 1)
assert_tensor_equal(data_sample.trimap, trimap_tensor)
gt_alpha_tensor = to_tensor(ori_results['alpha'])
gt_alpha_tensor = gt_alpha_tensor.permute(2, 0, 1)
assert_tensor_equal(data_sample.gt_alpha, gt_alpha_tensor)
gt_fg_tensor = to_tensor(ori_results['fg'])
gt_fg_tensor = gt_fg_tensor.permute(2, 0, 1)
assert_tensor_equal(data_sample.gt_fg, gt_fg_tensor)
gt_bg_tensor = to_tensor(ori_results['bg'])
gt_bg_tensor = gt_bg_tensor.permute(2, 0, 1)
assert_tensor_equal(data_sample.gt_bg, gt_bg_tensor)
|
b5beae7924a7fa4fc87ec4a356557e073fda339c
|
c0f0cab1645b4770366bddcb31f8807ea6b3dff3
|
/rexmex/metrics/coverage.py
|
f4a14197d74bde4b6c6b8e991734c8c32cdda093
|
[
"Apache-2.0"
] |
permissive
|
AstraZeneca/rexmex
|
376eab25082cbe719a27681bfa4a1dd359b58860
|
9a4538f8529a6b022ad848a1c47ed21b034f7171
|
refs/heads/main
| 2023-06-07T22:30:04.684295
| 2023-06-02T13:57:57
| 2023-06-02T13:57:57
| 420,948,708
| 266
| 25
| null | 2023-08-22T07:11:27
| 2021-10-25T08:56:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,916
|
py
|
coverage.py
|
from typing import List, Tuple, Union
import numpy as np
def user_coverage(
possible_users_items: Tuple[List[Union[int, str]], List[Union[int, str]]],
recommendations: List[Tuple[Union[int, str], Union[int, str]]],
) -> float:
"""
Calculates the coverage value for users in possible_users_items[0] given the collection of recommendations.
Recommendations over users/items not in possible_users_items are discarded.
Args:
possible_users_items (Tuple[List[Union[int, str]], List[Union[int, str]]]): contains exactly TWO sub-lists,
first one with users, second with items
recommendations (List[Tuple[Union[int, str], Union[int, str]]]): contains user-item recommendation tuples,
e.g. [(user1, item1),(user2, item2),]
Returns: user coverage (float): a metric showing the fraction of users who got at least one recommendation out
of all possible users.
"""
if len(possible_users_items) != 2:
raise ValueError("possible_users_items must be of length 2: [users, items]")
if np.any([len(x) == 0 for x in possible_users_items]):
raise ValueError("possible_users_items cannot hold empty lists!")
possible_users = set(possible_users_items[0])
users_with_recommendations = set([x[0] for x in recommendations])
users_without_recommendations = possible_users.difference(users_with_recommendations)
user_cov = 1 - len(users_without_recommendations) / len(possible_users)
return round(user_cov, 3)
def item_coverage(
possible_users_items: Tuple[List[Union[int, str]], List[Union[int, str]]],
recommendations: List[Tuple[Union[int, str], Union[int, str]]],
) -> float:
"""
Calculates the coverage value for items in possible_users_items[1] given the collection of recommendations.
Recommendations over users/items not in possible_users_items are discarded.
Args:
possible_users_items (Tuple[List[Union[int, str]], List[Union[int, str]]]): contains exactly TWO sub-lists,
first one with users, second with items
recommendations (List[Tuple[Union[int, str], Union[int, str]]]): contains user-item recommendation tuples,
e.g. [(user1, item1),(user2, item2),]
Returns: item coverage (float): a metric showing the fraction of items which got recommended at least once.
"""
if len(possible_users_items) != 2:
raise ValueError("possible_users_items must be of length 2: [users, items]")
if np.any([len(x) == 0 for x in possible_users_items]):
raise ValueError("possible_users_items cannot hold empty lists!")
possible_items = set(possible_users_items[1])
items_with_recommendations = set([x[1] for x in recommendations])
items_without_recommendations = possible_items.difference(items_with_recommendations)
item_cov = 1 - len(items_without_recommendations) / len(possible_items)
return round(item_cov, 3)
|
28eb5865ffec93eb098e7bd7752444abd134617e
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/sso/tests/generator.py
|
afed5371241aceb44cf4e828aefe605cb67214d8
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,331
|
py
|
generator.py
|
from django.core.files.uploadedfile import SimpleUploadedFile
from django_prbac.models import Role
from django.contrib.sessions.middleware import SessionMiddleware
from corehq.apps.accounting.models import (
SoftwarePlan,
SoftwarePlanEdition,
SoftwarePlanVisibility,
SoftwareProductRate,
SoftwarePlanVersion,
)
from corehq.apps.sso import certificates
from corehq.apps.accounting.tests import generator as accounting_gen
from corehq.util.test_utils import unit_testing_only
from corehq.apps.sso.models import (
IdentityProvider,
)
@unit_testing_only
def create_idp(slug, account, include_certs=False):
idp = IdentityProvider(
name=f"Azure AD for {account.name}",
slug=slug,
owner=account,
)
idp.save()
if include_certs:
idp.create_service_provider_certificate()
idp.entity_id = "https://testidp.com/saml2/entity_id"
idp.login_url = "https://testidp.com/saml2/login"
idp.logout_url = "https://testidp.com/saml2/logout"
key_pair = certificates.create_key_pair()
cert = certificates.create_self_signed_cert(key_pair)
idp.idp_cert_public = certificates.get_public_key(cert)
idp.date_idp_cert_expiration = certificates.get_expiration_date(cert)
idp.save()
return idp
@unit_testing_only
def get_billing_account_for_idp():
billing_contact = accounting_gen.create_arbitrary_web_user_name()
dimagi_user = accounting_gen.create_arbitrary_web_user_name(is_dimagi=True)
return accounting_gen.billing_account(
dimagi_user, billing_contact, is_customer_account=True
)
@unit_testing_only
def get_enterprise_plan():
enterprise_plan = SoftwarePlan.objects.create(
name="Helping Earth INGO Enterprise Plan",
description="Enterprise plan for Helping Earth",
edition=SoftwarePlanEdition.ENTERPRISE,
visibility=SoftwarePlanVisibility.INTERNAL,
is_customer_software_plan=True,
)
first_product_rate = SoftwareProductRate.objects.create(
monthly_fee=3000,
name="HQ Enterprise"
)
return SoftwarePlanVersion.objects.create(
plan=enterprise_plan,
role=Role.objects.first(),
product_rate=first_product_rate
)
@unit_testing_only
def create_request_session(request, use_saml_sso=False, use_oidc_sso=False):
def get_response(request):
raise AssertionError("should not get here")
SessionMiddleware(get_response).process_request(request)
request.session.save()
if use_saml_sso:
request.session['samlSessionIndex'] = '_7c84c96e-8774-4e64-893c-06f91d285100'
if use_oidc_sso:
request.session["oidc_state"] = '_7c84c96e-8774-4e64-893c-06f91d285100'
@unit_testing_only
def store_full_name_in_saml_user_data(request, first_name, last_name):
request.session['samlUserdata'] = {
'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname': [first_name],
'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/surname': [last_name],
}
@unit_testing_only
def store_display_name_in_saml_user_data(request, display_name):
request.session['samlUserdata'] = {
'http://schemas.microsoft.com/identity/claims/displayname': [display_name],
}
@unit_testing_only
def store_full_name_in_oidc_user_data(request, first_name, last_name):
request.session['oidcUserData'] = {
'given_name': first_name,
'family_name': last_name,
}
@unit_testing_only
def store_display_name_in_oidc_user_data(request, display_name):
request.session['oidcUserData'] = {
'name': display_name,
}
@unit_testing_only
def get_public_cert_file(expiration_in_seconds=certificates.DEFAULT_EXPIRATION):
key_pair = certificates.create_key_pair()
cert = certificates.create_self_signed_cert(
key_pair,
expiration_in_seconds
)
cert_bytes = certificates.crypto.dump_certificate(
certificates.crypto.FILETYPE_PEM,
cert
)
return SimpleUploadedFile(
"certificate.cer",
cert_bytes,
content_type="application/x-x509-ca-cert",
)
@unit_testing_only
def get_bad_cert_file(bad_cert_data):
return SimpleUploadedFile(
"certificate.cer",
bad_cert_data,
content_type="application/x-x509-ca-cert",
)
|
9021b6261675932351a66e9aef6d88f240751c8d
|
704976ea552111c6a5af9cd7cb62b9d9abaf3996
|
/rpython/jit/metainterp/test/test_fficall.py
|
01c44df61676ed9997ba934d0b23d5435250a811
|
[
"BSD-3-Clause"
] |
permissive
|
mesalock-linux/mesapy
|
4f02c5819ce7f2f6e249d34840f1aa097577645d
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
refs/heads/mesapy2.7
| 2023-08-16T21:33:02.239581
| 2019-08-13T10:29:43
| 2019-08-13T18:06:45
| 136,080,721
| 396
| 33
|
NOASSERTION
| 2020-04-01T03:05:18
| 2018-06-04T20:45:17
|
Python
|
UTF-8
|
Python
| false
| false
| 15,855
|
py
|
test_fficall.py
|
import py
from _pytest.monkeypatch import monkeypatch
import sys
import ctypes, math
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.annlowlevel import llhelper
from rpython.jit.metainterp.test.support import LLJitMixin
from rpython.jit.codewriter.longlong import is_longlong, is_64_bit
from rpython.rlib import jit
from rpython.rlib import jit_libffi
from rpython.rlib.jit_libffi import (types, CIF_DESCRIPTION, FFI_TYPE_PP,
jit_ffi_call)
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.rarithmetic import intmask, r_longlong, r_singlefloat, r_uint
from rpython.rlib.longlong2float import float2longlong
def get_description(atypes, rtype):
p = lltype.malloc(CIF_DESCRIPTION, len(atypes),
flavor='raw', immortal=True)
p.abi = 1 # default
p.nargs = len(atypes)
p.rtype = rtype
p.atypes = lltype.malloc(FFI_TYPE_PP.TO, len(atypes),
flavor='raw', immortal=True)
for i in range(len(atypes)):
p.atypes[i] = atypes[i]
return p
class FakeFFI(object):
"""
Context manager to monkey patch jit_libffi with our custom "libffi-like"
function
"""
def __init__(self, fake_call_impl_any):
self.fake_call_impl_any = fake_call_impl_any
self.monkey = monkeypatch()
def __enter__(self, *args):
self.monkey.setattr(jit_libffi, 'jit_ffi_call_impl_any', self.fake_call_impl_any)
def __exit__(self, *args):
self.monkey.undo()
class FfiCallTests(object):
def _run(self, atypes, rtype, avalues, rvalue,
expected_call_release_gil_i=1,
expected_call_release_gil_f=0,
expected_call_release_gil_n=0,
expected_call_may_force_f=0,
supports_floats=True,
supports_longlong=False,
supports_singlefloats=False):
cif_description = get_description(atypes, rtype)
def verify(*args):
for a, exp_a in zip(args, avalues):
if (lltype.typeOf(exp_a) == rffi.ULONG and
lltype.typeOf(a) == lltype.Signed):
a = rffi.cast(rffi.ULONG, a)
assert a == exp_a
return rvalue
FUNC = lltype.FuncType([lltype.typeOf(avalue) for avalue in avalues],
lltype.typeOf(rvalue))
func = lltype.functionptr(FUNC, 'verify', _callable=verify)
func_addr = rffi.cast(rffi.VOIDP, func)
for i in range(len(avalues)):
cif_description.exchange_args[i] = (i+1) * 16
cif_description.exchange_result = (len(avalues)+1) * 16
unroll_avalues = unrolling_iterable(avalues)
BIG_ENDIAN = (sys.byteorder == 'big')
def fake_call_impl_any(cif_description, func_addr, exchange_buffer):
ofs = 16
for avalue in unroll_avalues:
TYPE = rffi.CArray(lltype.typeOf(avalue))
data = rffi.ptradd(exchange_buffer, ofs)
got = rffi.cast(lltype.Ptr(TYPE), data)[0]
if lltype.typeOf(avalue) is lltype.SingleFloat:
got = float(got)
avalue = float(avalue)
elif (lltype.typeOf(avalue) is rffi.SIGNEDCHAR or
lltype.typeOf(avalue) is rffi.UCHAR):
got = intmask(got)
avalue = intmask(avalue)
assert got == avalue
ofs += 16
write_to_ofs = 0
if rvalue is not None:
write_rvalue = rvalue
if BIG_ENDIAN:
if (lltype.typeOf(write_rvalue) is rffi.SIGNEDCHAR or
lltype.typeOf(write_rvalue) is rffi.UCHAR):
# 'write_rvalue' is an int type smaller than Signed
write_to_ofs = rffi.sizeof(rffi.LONG) - 1
else:
write_rvalue = 12923 # ignored
TYPE = rffi.CArray(lltype.typeOf(write_rvalue))
data = rffi.ptradd(exchange_buffer, ofs)
rffi.cast(lltype.Ptr(TYPE), data)[write_to_ofs] = write_rvalue
def f(i):
exbuf = lltype.malloc(rffi.CCHARP.TO, (len(avalues)+2) * 16,
flavor='raw')
targetptr = rffi.ptradd(exbuf, 16)
for avalue in unroll_avalues:
TYPE = rffi.CArray(lltype.typeOf(avalue))
if i >= 9: # a guard that can fail
pass
rffi.cast(lltype.Ptr(TYPE), targetptr)[0] = avalue
targetptr = rffi.ptradd(targetptr, 16)
jit_ffi_call(cif_description, func_addr, exbuf)
if rvalue is None:
res = 654321
else:
TYPE = rffi.CArray(lltype.typeOf(rvalue))
res = rffi.cast(lltype.Ptr(TYPE), targetptr)[0]
lltype.free(exbuf, flavor='raw')
if lltype.typeOf(res) is lltype.SingleFloat:
res = float(res)
return res
def matching_result(res, rvalue):
if rvalue is None:
return res == 654321
if isinstance(rvalue, r_singlefloat):
rvalue = float(rvalue)
if lltype.typeOf(rvalue) is rffi.ULONG:
res = intmask(res)
rvalue = intmask(rvalue)
return res == rvalue
with FakeFFI(fake_call_impl_any):
res = f(-42)
assert matching_result(res, rvalue)
res = self.interp_operations(f, [-42],
supports_floats = supports_floats,
supports_longlong = supports_longlong,
supports_singlefloats = supports_singlefloats)
if is_longlong(FUNC.RESULT):
# longlongs are returned as floats, but that's just
# an inconvenience of interp_operations(). Normally both
# longlong and floats are passed around as longlongs.
res = float2longlong(res)
assert matching_result(res, rvalue)
self.check_operations_history(call_may_force_i=0,
call_may_force_f=expected_call_may_force_f,
call_may_force_n=0,
call_release_gil_i=expected_call_release_gil_i,
call_release_gil_f=expected_call_release_gil_f,
call_release_gil_n=expected_call_release_gil_n)
##################################################
driver = jit.JitDriver(reds=['i'], greens=[])
def main():
i = 0
while 1:
driver.jit_merge_point(i=i)
res = f(i)
i += 1
if i == 12:
return res
self.meta_interp(main, [])
def test_simple_call_int(self):
self._run([types.signed] * 2, types.signed, [456, 789], -42)
def test_many_arguments(self):
for i in [0, 6, 20]:
self._run([types.signed] * i, types.signed,
[-123456*j for j in range(i)],
-42434445)
def test_simple_call_float(self, **kwds):
kwds.setdefault('supports_floats', True)
kwds['expected_call_release_gil_f'] = kwds.pop('expected_call_release_gil', 1)
kwds['expected_call_release_gil_i'] = 0
self._run([types.double] * 2, types.double, [45.6, 78.9], -4.2, **kwds)
def test_simple_call_longlong(self, **kwds):
kwds.setdefault('supports_longlong', True)
if is_64_bit:
kwds['expected_call_release_gil_i'] = kwds.pop('expected_call_release_gil', 1)
else:
kwds['expected_call_release_gil_f'] = kwds.pop('expected_call_release_gil', 1)
kwds['expected_call_release_gil_i'] = 0
maxint32 = 2147483647
a = r_longlong(maxint32) + 1
b = r_longlong(maxint32) + 2
self._run([types.slonglong] * 2, types.slonglong, [a, b], a, **kwds)
def test_simple_call_singlefloat_args(self, **kwds):
kwds.setdefault('supports_singlefloats', True)
kwds['expected_call_release_gil_f'] = kwds.pop('expected_call_release_gil', 1)
kwds['expected_call_release_gil_i'] = 0
self._run([types.float] * 2, types.double,
[r_singlefloat(10.5), r_singlefloat(31.5)],
-4.5, **kwds)
def test_simple_call_singlefloat(self, **kwds):
kwds.setdefault('supports_singlefloats', True)
kwds['expected_call_release_gil_i'] = kwds.pop('expected_call_release_gil', 1)
self._run([types.float] * 2, types.float,
[r_singlefloat(10.5), r_singlefloat(31.5)],
r_singlefloat(-4.5), **kwds)
def test_simple_call_longdouble(self):
# longdouble is not supported, so we expect NOT to generate a call_release_gil
self._run([types.longdouble] * 2, types.longdouble, [12.3, 45.6], 78.9,
expected_call_release_gil_i=0, expected_call_release_gil_f=0,
)
def test_returns_none(self):
self._run([types.signed] * 2, types.void, [456, 789], None,
expected_call_release_gil_i=0, expected_call_release_gil_n=1)
def test_returns_signedchar(self):
self._run([types.sint8], types.sint8,
[rffi.cast(rffi.SIGNEDCHAR, -28)],
rffi.cast(rffi.SIGNEDCHAR, -42))
def test_handle_unsigned(self):
self._run([types.ulong], types.ulong,
[rffi.cast(rffi.ULONG, r_uint(sys.maxint + 91348))],
rffi.cast(rffi.ULONG, r_uint(sys.maxint + 4242)))
def test_handle_unsignedchar(self):
self._run([types.uint8], types.uint8,
[rffi.cast(rffi.UCHAR, 191)],
rffi.cast(rffi.UCHAR, 180))
def _add_libffi_types_to_ll2types_maybe(self):
# not necessary on the llgraph backend, but needed for x86.
# see rpython/jit/backend/x86/test/test_fficall.py
pass
def test_guard_not_forced_fails(self):
self._add_libffi_types_to_ll2types_maybe()
FUNC = lltype.FuncType([lltype.Signed], lltype.Signed)
cif_description = get_description([types.slong], types.slong)
cif_description.exchange_args[0] = 16
cif_description.exchange_result = 32
ARRAY = lltype.Ptr(rffi.CArray(lltype.Signed))
@jit.dont_look_inside
def fn(n):
if n >= 50:
exctx.m = exctx.topframeref().n # forces the frame
return n*2
# this function simulates what a real libffi_call does: reading from
# the buffer, calling a function (which can potentially call callbacks
# and force frames) and write back to the buffer
def fake_call_impl_any(cif_description, func_addr, exchange_buffer):
# read the args from the buffer
data_in = rffi.ptradd(exchange_buffer, 16)
n = rffi.cast(ARRAY, data_in)[0]
#
# logic of the function
func_ptr = rffi.cast(lltype.Ptr(FUNC), func_addr)
n = func_ptr(n)
#
# write the result to the buffer
data_out = rffi.ptradd(exchange_buffer, 32)
rffi.cast(ARRAY, data_out)[0] = n
def do_call(n):
func_ptr = llhelper(lltype.Ptr(FUNC), fn)
exbuf = lltype.malloc(rffi.CCHARP.TO, 48, flavor='raw', zero=True)
data_in = rffi.ptradd(exbuf, 16)
rffi.cast(ARRAY, data_in)[0] = n
jit_ffi_call(cif_description, func_ptr, exbuf)
data_out = rffi.ptradd(exbuf, 32)
res = rffi.cast(ARRAY, data_out)[0]
lltype.free(exbuf, flavor='raw')
return res
#
#
class XY:
pass
class ExCtx:
pass
exctx = ExCtx()
myjitdriver = jit.JitDriver(greens = [], reds = ['n'])
def f():
n = 0
while n < 100:
myjitdriver.jit_merge_point(n=n)
xy = XY()
xy.n = n
exctx.topframeref = vref = jit.virtual_ref(xy)
res = do_call(n) # this is equivalent of a cffi call which
# sometimes forces a frame
# when n==50, fn() will force the frame, so guard_not_forced
# fails and we enter blackholing: this test makes sure that
# the result of call_release_gil is kept alive before the
# raw_store, and that the corresponding box is passed
# in the fail_args. Before the fix, the result of
# call_release_gil was simply lost and when guard_not_forced
# failed, and the value of "res" was unpredictable.
# See commit b84ff38f34bd and subsequents.
assert res == n*2
jit.virtual_ref_finish(vref, xy)
exctx.topframeref = jit.vref_None
n += 1
return n
with FakeFFI(fake_call_impl_any):
assert f() == 100
res = self.meta_interp(f, [])
assert res == 100
class TestFfiCall(FfiCallTests, LLJitMixin):
def test_jit_ffi_vref(self):
py.test.skip("unsupported so far")
from rpython.rlib import clibffi
from rpython.rlib.jit_libffi import jit_ffi_prep_cif, jit_ffi_call
math_sin = intmask(ctypes.cast(ctypes.CDLL(None).sin,
ctypes.c_void_p).value)
math_sin = rffi.cast(rffi.VOIDP, math_sin)
cd = lltype.malloc(CIF_DESCRIPTION, 1, flavor='raw')
cd.abi = clibffi.FFI_DEFAULT_ABI
cd.nargs = 1
cd.rtype = clibffi.cast_type_to_ffitype(rffi.DOUBLE)
atypes = lltype.malloc(clibffi.FFI_TYPE_PP.TO, 1, flavor='raw')
atypes[0] = clibffi.cast_type_to_ffitype(rffi.DOUBLE)
cd.atypes = atypes
cd.exchange_size = 64 # 64 bytes of exchange data
cd.exchange_result = 24
cd.exchange_args[0] = 16
def f():
#
jit_ffi_prep_cif(cd)
#
assert rffi.sizeof(rffi.DOUBLE) == 8
exb = lltype.malloc(rffi.DOUBLEP.TO, 8, flavor='raw')
exb[2] = 1.23
jit_ffi_call(cd, math_sin, rffi.cast(rffi.CCHARP, exb))
res = exb[3]
lltype.free(exb, flavor='raw')
#
return res
#
res = self.interp_operations(f, [])
lltype.free(cd, flavor='raw')
assert res == math.sin(1.23)
lltype.free(atypes, flavor='raw')
def test_simple_call_float_unsupported(self):
self.test_simple_call_float(supports_floats=False,
expected_call_release_gil=0)
def test_simple_call_longlong_unsupported(self):
self.test_simple_call_longlong(supports_longlong=False,
expected_call_release_gil=is_64_bit)
def test_simple_call_singlefloat_unsupported(self):
self.test_simple_call_singlefloat(supports_singlefloats=False,
expected_call_release_gil=0)
def test_calldescrof_dynamic_returning_none(self):
from rpython.jit.backend.llgraph.runner import LLGraphCPU
old = LLGraphCPU.calldescrof_dynamic
try:
LLGraphCPU.calldescrof_dynamic = lambda *args: None
self.test_simple_call_float(expected_call_release_gil=0,
expected_call_may_force_f=1)
finally:
LLGraphCPU.calldescrof_dynamic = old
|
0ff27f1f00a593df49290302bfdfb6cd82638db0
|
45de7d905486934629730945619f49281ad19359
|
/xlsxwriter/test/workbook/test_write_sheet.py
|
d8206524f9deb1bf254f917ddf884200c78d21a8
|
[
"BSD-2-Clause"
] |
permissive
|
jmcnamara/XlsxWriter
|
599e1d225d698120ef931a776a9d93a6f60186ed
|
ab13807a1be68652ffc512ae6f5791d113b94ee1
|
refs/heads/main
| 2023-09-04T04:21:04.559742
| 2023-08-31T19:30:52
| 2023-08-31T19:30:52
| 7,433,211
| 3,251
| 712
|
BSD-2-Clause
| 2023-08-28T18:52:14
| 2013-01-04T01:07:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,419
|
py
|
test_write_sheet.py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ...workbook import Workbook
class TestWriteSheet(unittest.TestCase):
"""
Test the Workbook _write_sheet() method.
"""
def setUp(self):
self.fh = StringIO()
self.workbook = Workbook()
self.workbook._set_filehandle(self.fh)
def test_write_sheet1(self):
"""Test the _write_sheet() method"""
self.workbook._write_sheet("Sheet1", 1, 0)
exp = """<sheet name="Sheet1" sheetId="1" r:id="rId1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet2(self):
"""Test the _write_sheet() method"""
self.workbook._write_sheet("Sheet1", 1, 1)
exp = """<sheet name="Sheet1" sheetId="1" state="hidden" r:id="rId1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet3(self):
"""Test the _write_sheet() method"""
self.workbook._write_sheet("Bits & Bobs", 1, 0)
exp = """<sheet name="Bits & Bobs" sheetId="1" r:id="rId1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def tearDown(self):
self.workbook.fileclosed = 1
|
e9e08e5ebc3d9376a9bc1b6c43f13140a394b625
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/sa/interfaces/igetlldpneighbors.py
|
418b3dda58bfcf5826a9fd817ad9556585a31247
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,004
|
py
|
igetlldpneighbors.py
|
# ---------------------------------------------------------------------
# IGetLLDPNeighbors
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC Modules
from noc.core.interface.base import BaseInterface
from .base import (
DictListParameter,
StringParameter,
IntParameter,
InterfaceNameParameter,
MACAddressParameter,
IPv4Parameter,
)
#
# LLDP neighbor information
#
# Rules:
# local_inteface must be filled with interface name (will be cleaned automatically)
#
# local_interface_id depens upon how the box advertises own interfaces:
#
# If interfaces advertised with macAddress(3) LldpPortIdSubtype,
# local_interface_id must be set to interface MAC address
# (will be cleaned automatically)
#
# If interface advertised with networkAddress(4) LldpPortIdSubtype,
# local_interface_id must be set to interface IP address
#
# If interfaces advertised with interfaceName(5) LldpPortIdSubtype,
# local_interface_id must be left empty or ommited.
#
# If interfaces advertised with local(7) LldpPortIdSubtype,
# local_interface_id must be set to local identifier
#
# Remote port handling solely depends upon remote_port_subtype:
#
# For macAddress(3) - convert to common normalized form
#
# For networkAddress(4) - return as IP address
#
# For interfaceName(5) - return untouched
#
# For local(7) - convert to integer and return untouched
#
class IGetLLDPNeighbors(BaseInterface):
returns = DictListParameter(
attrs={
"local_interface": InterfaceNameParameter(),
# Should be set when platform advertises not LldpPortIdSubtype==5
"local_interface_id": IntParameter(required=False)
| MACAddressParameter(required=False)
| IPv4Parameter(required=False),
"neighbors": DictListParameter(
attrs={
# LldpChassisIdSubtype TC, macAddress(4)
"remote_chassis_id_subtype": IntParameter(default=4),
# Remote chassis ID
"remote_chassis_id": MACAddressParameter(accept_bin=False)
| IPv4Parameter()
| StringParameter(),
# LldpPortIdSubtype TC, interfaceName(5)
"remote_port_subtype": IntParameter(default=5),
"remote_port": MACAddressParameter(accept_bin=False)
| IPv4Parameter()
| StringParameter(),
"remote_port_description": StringParameter(required=False),
"remote_system_name": StringParameter(required=False),
"remote_system_description": StringParameter(required=False),
# LldpSystemCapabilitiesMap TC bitmask
"remote_capabilities": IntParameter(default=0),
}
),
}
)
|
2a13526e60492b8ba021391cecdfac43e9a813de
|
6b551bec528a1d6544201d3c6d86835e885343b5
|
/deep_privacy/dataset/transforms/transforms.py
|
942bfa76c214116909e8ecef081e30c3fc9f2fd8
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
hukkelas/DeepPrivacy
|
9471c8e9389828aa09330905081205b061161d81
|
5ee3f1b0608f03ac54d5694b6421f6132cb63f0e
|
refs/heads/master
| 2023-08-16T00:41:02.366235
| 2023-03-28T06:23:34
| 2023-03-28T06:23:34
| 206,106,232
| 1,288
| 194
|
MIT
| 2021-08-18T08:21:33
| 2019-09-03T15:08:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,576
|
py
|
transforms.py
|
import numpy as np
import albumentations
import cv2
from .build import TRANSFORM_REGISTRY
@TRANSFORM_REGISTRY.register_module
class RandomFlip:
def __init__(self, flip_ratio=None, **kwargs):
self.flip_ratio = flip_ratio
if self.flip_ratio is None:
self.flip_ratio = 0.5
assert 0 <= self.flip_ratio <= 1
def __call__(self, container):
if np.random.rand() > self.flip_ratio:
return container
img = container["img"]
container["img"] = np.flip(img, axis=1)
if "landmarks" in container:
landmarks_XY = container["landmarks"]
landmarks_XY[:, 0] = 1 - landmarks_XY[:, 0]
container["landmarks"] = landmarks_XY
if "mask" in container:
mask = container["mask"]
mask = np.flip(mask, axis=1)
container["mask"] = mask
return container
@TRANSFORM_REGISTRY.register_module
class FlattenLandmark:
def __init__(self, *args, **kwargs):
return
def __call__(self, container, **kwargs):
assert "landmarks" in container,\
f"Did not find landmarks in container. {container.keys()}"
landmarks_XY = container["landmarks"]
landmarks_XY = landmarks_XY.reshape(-1)
landmarks_XY.clip(-1, 1)
container["landmarks"] = landmarks_XY
return container
def _resize(im, imsize):
min_size = min(im.shape[:2])
factor = imsize / min_size
new_size = [int(size * factor) + 1 for size in im.shape[:2]]
im = albumentations.augmentations.functional.resize(im, *new_size)
return im
@TRANSFORM_REGISTRY.register_module
class RandomCrop:
def __init__(self, imsize, **kwargs):
self.imsize = imsize
def __call__(self, container, **kwargs):
im = container["img"]
if any(size < self.imsize for size in im.shape[:2]):
im = _resize(im, self.imsize)
im = albumentations.augmentations.functional.random_crop(
im, self.imsize, self.imsize, 0, 0)
container["img"] = im
return container
@TRANSFORM_REGISTRY.register_module
class CenterCrop:
def __init__(self, imsize, **kwargs):
self.imsize = imsize
def __call__(self, container, **kwargs):
im = container["img"]
if any(size < self.imsize for size in im.shape[:2]):
im = _resize(im, self.imsize)
im = albumentations.augmentations.functional.center_crop(
im, self.imsize, self.imsize)
container["img"] = im
return container
@TRANSFORM_REGISTRY.register_module
class RandomResize:
def __init__(self, resize_ratio, min_imsize: int, max_imsize: int,
imsize: int, **kwargs):
self.resize_ratio = resize_ratio
imsize = min(min_imsize, imsize)
self.possible_shapes = []
while imsize <= max_imsize:
self.possible_shapes.append(imsize)
imsize *= 2
def __call__(self, container, **kwargs):
if np.random.rand() > self.resize_ratio:
return container
im = container["img"]
shape = im.shape
imsize = np.random.choice(self.possible_shapes)
orig_imsize = im.shape[0]
im = albumentations.augmentations.functional.resize(
im, imsize, imsize, interpolation=cv2.INTER_LINEAR)
im = albumentations.augmentations.functional.resize(
im, orig_imsize, orig_imsize, interpolation=cv2.INTER_LINEAR)
assert im.shape == shape
container["img"] = im
return container
|
8d3c513b4ff4df80b707739ab3469b0ab3c5ffd0
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/translations/migrations/0005_auto_20190405_1747.py
|
7487792986d8aaf409cbb285b06007ce91f557b3
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 976
|
py
|
0005_auto_20190405_1747.py
|
# flake8: noqa
# Generated by Django 1.11.20 on 2019-04-05 17:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('translations', '0004_auto_20190318_1905'),
]
operations = [
migrations.AlterField(
model_name='transifexblacklist',
name='field_name',
field=models.TextField(help_text="\nThis is the same string that appears in the bulk translations download.\nUsually the string in either case list or detail under 'property'.\nThis could be an xpath or case property name.\nIf it is an ID Mapping then the property should be '<property> (ID Mapping Text)'.\nFor the values each value should be '<id mapping value> (ID Mapping Value)'.\n<br>\nExample: case detail for tasks_type could have entries:\n<ul>\n <li>tasks_type (ID Mapping Text)</li>\n <li>child (ID Mapping Value)</li>\n <li>pregnancy (ID Mapping Value)</li>\n</ul>\n"),
),
]
|
5fed9ca29c795c2303f2f1af2e6ad14767b877c3
|
71fb04f723b46a1bf45295be239bcec25e07f98c
|
/keras_cv/losses/smooth_l1.py
|
acdb705a3e71c0ddf816d827c42ba543db5da3f1
|
[
"Apache-2.0"
] |
permissive
|
keras-team/keras-cv
|
9bca4479474e853ec3a1c541b8be20fea2447a1a
|
e83f229f1b7b847cd712d5cd4810097d3e06d14e
|
refs/heads/master
| 2023-08-31T10:22:08.406394
| 2023-08-30T20:24:57
| 2023-08-30T20:24:57
| 265,079,853
| 818
| 287
|
NOASSERTION
| 2023-09-12T16:49:01
| 2020-05-18T22:39:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,913
|
py
|
smooth_l1.py
|
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.losses.SmoothL1Loss")
class SmoothL1Loss(keras.losses.Loss):
"""Implements Smooth L1 loss.
SmoothL1Loss implements the SmoothL1 function, where values less than
`l1_cutoff` contribute to the overall loss based on their squared
difference, and values greater than l1_cutoff contribute based on their raw
difference.
Args:
l1_cutoff: differences between y_true and y_pred that are larger than
`l1_cutoff` are treated as `L1` values
"""
def __init__(self, l1_cutoff=1.0, **kwargs):
super().__init__(**kwargs)
self.l1_cutoff = l1_cutoff
def call(self, y_true, y_pred):
difference = y_true - y_pred
absolute_difference = ops.abs(difference)
squared_difference = difference**2
loss = ops.where(
absolute_difference < self.l1_cutoff,
0.5 * squared_difference,
absolute_difference - 0.5,
)
return ops.mean(loss, axis=-1)
def get_config(self):
config = {
"l1_cutoff": self.l1_cutoff,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
|
c7225d21c405653cdc079e433e8b1a316e60406f
|
dab10c721000fd9eb38676d6b2730f155eedd54e
|
/recirq/qaoa/gates_and_compilation_test.py
|
edff2155932a40c091a426f3a7c4babc778d6d26
|
[
"Apache-2.0"
] |
permissive
|
quantumlib/ReCirq
|
f45e55e432f2e29fb8f2fe35a3d436a629219e86
|
d021621a3837693ae9c5fdc5c05058de20fba314
|
refs/heads/master
| 2023-09-03T19:35:55.281836
| 2023-09-01T01:12:40
| 2023-09-01T01:12:40
| 246,951,354
| 260
| 116
|
Apache-2.0
| 2023-09-09T00:41:35
| 2020-03-12T23:51:33
|
Python
|
UTF-8
|
Python
| false
| false
| 15,119
|
py
|
gates_and_compilation_test.py
|
import random
import networkx as nx
import numpy as np
import pytest
import cirq
from cirq.testing import random_special_unitary
from recirq.qaoa.circuit_structure import validate_well_structured
from recirq.qaoa.gates_and_compilation import ZZSwap, compile_problem_unitary_to_zzswap, \
ProblemUnitary, DriverUnitary, SwapNetworkProblemUnitary, \
compile_problem_unitary_to_swap_network, compile_swap_network_to_zzswap, \
single_qubit_matrix_to_phased_x_z_const_depth, zzswap_as_syc, zz_as_syc, \
compile_driver_unitary_to_rx, compile_single_qubit_gates, compile_to_syc, \
measure_with_final_permutation, compile_out_virtual_z, compile_to_non_negligible, \
_hardware_graph, compile_problem_unitary_to_hardware_graph
from recirq.qaoa.problems import random_plus_minus_1_weights
def test_zz_swap():
q1, q2 = cirq.LineQubit.range(2)
circuit1 = cirq.Circuit(ZZSwap(zz_exponent=0.123).on(q1, q2))
u1 = circuit1.unitary(dtype=np.complex128)
circuit2 = cirq.Circuit(
cirq.ZZ(q1, q2) ** 0.123,
cirq.SWAP(q1, q2)
)
u2 = circuit2.unitary(dtype=np.complex128)
circuit3 = cirq.Circuit(
cirq.SWAP(q1, q2),
cirq.ZZ(q1, q2) ** 0.123
)
u3 = circuit3.unitary(dtype=np.complex128)
np.testing.assert_allclose(u1, u2)
np.testing.assert_allclose(u2, u3)
np.testing.assert_allclose(u3, u1)
def test_compile_problem_unitary_to_zzswap():
n = 5
q = cirq.LineQubit.range(n)
problem = nx.complete_graph(n=n)
problem = random_plus_minus_1_weights(problem)
gamma = 0.123
circuit1 = cirq.Circuit()
for i1, i2, w in problem.edges.data('weight'):
circuit1.append(
cirq.ZZPowGate(exponent=2 * gamma * w / np.pi, global_shift=-0.5).on(q[i1], q[i2]))
u1 = circuit1.unitary(dtype=np.complex128)
circuit2 = cirq.Circuit(
compile_problem_unitary_to_zzswap(problem_graph=problem, gamma=gamma, qubits=q),
compile_problem_unitary_to_zzswap(problem_graph=problem, gamma=0, qubits=q))
u2 = circuit2.unitary(dtype=np.complex128)
np.testing.assert_allclose(u1, u2)
def test_problem_unitary():
n = 5
q = cirq.LineQubit.range(n)
problem = nx.complete_graph(n=n)
problem = random_plus_minus_1_weights(problem, rs=np.random.RandomState(52))
gamma = 0.151
problem_unitary = ProblemUnitary(problem_graph=problem, gamma=gamma)
u1 = cirq.Circuit(problem_unitary.on(*q)).unitary(qubit_order=q, dtype=np.complex128)
circuit = cirq.Circuit()
for i1, i2, w in problem.edges.data('weight'):
circuit.append(
cirq.ZZPowGate(exponent=2 * gamma * w / np.pi, global_shift=-0.5).on(q[i1], q[i2]))
u2 = circuit.unitary(dtype=np.complex128)
np.testing.assert_allclose(u1, u2)
def test_swap_network_problem_unitary():
n = 5
q = cirq.LineQubit.range(n)
problem = nx.complete_graph(n=n)
problem = random_plus_minus_1_weights(problem, rs=np.random.RandomState(52))
gamma = 0.151
spu = SwapNetworkProblemUnitary(problem_graph=problem, gamma=gamma)
u1 = cirq.Circuit(spu.on(*q)).unitary(qubit_order=q, dtype=np.complex128)
circuit = cirq.Circuit()
for i1, i2, w in problem.edges.data('weight'):
circuit.append(
cirq.ZZPowGate(exponent=2 * gamma * w / np.pi, global_shift=-0.5).on(q[i1], q[i2]))
circuit += cirq.QubitPermutationGate(list(range(n))[::-1]).on(*q)
u2 = circuit.unitary(dtype=np.complex128)
np.testing.assert_allclose(u1, u2)
def test_compile_problem_unitary_to_swap_network_p1():
n = 5
q = cirq.LineQubit.range(n)
problem = nx.complete_graph(n=n)
problem = random_plus_minus_1_weights(problem)
c1 = cirq.Circuit(ProblemUnitary(problem_graph=problem, gamma=0.123).on(*q))
c2 = compile_problem_unitary_to_swap_network(c1)
assert c1 != c2
assert isinstance(c2.moments[-1].operations[0].gate, cirq.QubitPermutationGate)
u1 = c1.unitary(dtype=np.complex128)
u2 = c2.unitary(dtype=np.complex128)
np.testing.assert_allclose(u1, u2)
def test_compile_problem_unitary_to_swap_network_p2():
n = 5
q = cirq.LineQubit.range(n)
problem = nx.complete_graph(n=n)
problem = random_plus_minus_1_weights(problem)
c1 = cirq.Circuit(
ProblemUnitary(problem_graph=problem, gamma=0.123).on(*q),
ProblemUnitary(problem_graph=problem, gamma=0.321).on(*q),
)
c2 = compile_problem_unitary_to_swap_network(c1)
assert c1 != c2
assert not isinstance(c2.moments[-1].operations[0].gate, cirq.QubitPermutationGate)
u1 = c1.unitary(dtype=np.complex128)
u2 = c2.unitary(dtype=np.complex128)
np.testing.assert_allclose(u1, u2)
def test_compile_swap_network_to_zzswap():
n = 5
q = cirq.LineQubit.range(n)
problem = nx.complete_graph(n=n)
problem = random_plus_minus_1_weights(problem)
c1 = cirq.Circuit(ProblemUnitary(problem_graph=problem, gamma=0.123).on(*q))
c2 = compile_problem_unitary_to_swap_network(c1)
assert c1 != c2
c3 = compile_swap_network_to_zzswap(c2)
assert c2 != c3
u1 = c1.unitary(dtype=np.complex128)
u3 = c3.unitary(dtype=np.complex128)
np.testing.assert_allclose(u1, u3)
def test_compile_sk_problem_unitary_to_zzswap_2():
n = 5
q = cirq.LineQubit.range(n)
problem = nx.complete_graph(n=n)
problem = random_plus_minus_1_weights(problem)
c1 = cirq.Circuit(
ProblemUnitary(problem_graph=problem, gamma=0.123).on(*q),
DriverUnitary(num_qubits=n, beta=0.456).on(*q),
ProblemUnitary(problem_graph=problem, gamma=0.789).on(*q),
)
c2 = compile_problem_unitary_to_swap_network(c1)
assert c1 != c2
c3 = compile_swap_network_to_zzswap(c2)
assert c2 != c3
u1 = c1.unitary(dtype=np.complex128)
u3 = c3.unitary(dtype=np.complex128)
np.testing.assert_allclose(u1, u3)
def test_hardware_graph():
coordinates = [
(10, 10), (10, 11),
(9, 10), (9, 11),
]
problem = nx.from_edgelist([
(0, 1), # 10,10 - 10,11
(1, 3), # 10,11 - 9,11
(0, 2), # 10,10 - 9,10
(2, 3), # 9,10, 9,11
])
nx.set_edge_attributes(problem, 1, name='weight')
qubits = cirq.LineQubit.range(4)
circuit = cirq.Circuit(_hardware_graph(problem, 0.123, coordinates, qubits))
assert circuit.to_text_diagram(transpose=True) == """ 0 1 2 3
│ │ │ │
ZZ─ZZ^0.078 ZZ───────ZZ^0.078
│ │ │ │
│ │ │ │
│ │ │ │
┌╴│ │ │ │ ╶┐
│ ZZ─┼────────ZZ^0.078 │ │
│ │ ZZ───────┼────────ZZ^0.078 │
└╴│ │ │ │ ╶┘
│ │ │ │
│ │ │ │
│ │ │ │"""
def test_compile_problem_unitary_to_hardware_graph():
problem = nx.grid_2d_graph(3, 3)
coordinates = sorted(problem.nodes)
problem = nx.relabel_nodes(problem, {coord: i for i, coord in enumerate(coordinates)})
problem = random_plus_minus_1_weights(problem)
qubits = cirq.LineQubit.range(problem.number_of_nodes())
c1 = cirq.Circuit(ProblemUnitary(problem, gamma=random.random()).on(*qubits))
c2 = compile_problem_unitary_to_hardware_graph(c1, coordinates)
assert c1 != c2
np.testing.assert_allclose(c1.unitary(dtype=np.complex128), c2.unitary(dtype=np.complex128))
def test_driver_unitary():
n = 5
q = cirq.LineQubit.range(n)
du = DriverUnitary(num_qubits=n, beta=0.123).on(*q)
circuit = cirq.Circuit(cirq.rx(2 * 0.123).on_each(*q))
u1 = cirq.Circuit(du).unitary(qubit_order=q, dtype=np.complex128)
u2 = circuit.unitary(dtype=np.complex128)
np.testing.assert_allclose(u1, u2)
def test_compile_driver_unitary_to_rx():
n = 5
q = cirq.LineQubit.range(n)
c1 = cirq.Circuit(DriverUnitary(num_qubits=n, beta=0.123).on(*q))
c2 = cirq.Circuit(cirq.rx(2 * 0.123).on_each(*q))
c3 = compile_driver_unitary_to_rx(c1)
u1 = c1.unitary(dtype=np.complex128)
u2 = c2.unitary(dtype=np.complex128)
u3 = c3.unitary(dtype=np.complex128)
np.testing.assert_allclose(u1, u2)
np.testing.assert_allclose(u1, u3)
def test_single_q_const_depth():
su = random_special_unitary(2)
ops = single_qubit_matrix_to_phased_x_z_const_depth(su)
assert len(ops) == 2
circuit = cirq.Circuit([op.on(cirq.LineQubit(1)) for op in ops])
u2 = circuit.unitary(dtype=np.complex128)
cirq.testing.assert_allclose_up_to_global_phase(su, u2, atol=1e-8)
def test_compile_single_qubit_gates():
q = cirq.LineQubit(0)
c1 = cirq.Circuit()
for _ in range(10):
c1.append(random.choice([cirq.X, cirq.Y, cirq.Z])(q) ** random.random())
c2 = compile_single_qubit_gates(c1)
assert c1 != c2
assert len(c2) == 2
assert isinstance(c2[0].operations[0].gate, cirq.PhasedXPowGate)
assert isinstance(c2[1].operations[0].gate, cirq.ZPowGate)
u1 = c1.unitary(dtype=np.complex128)
u2 = c2.unitary(dtype=np.complex128)
cirq.testing.assert_allclose_up_to_global_phase(u1, u2, atol=1e-8)
def test_zzswap_as_syc():
q1, q2 = cirq.LineQubit.range(2)
zzs = ZZSwap(zz_exponent=np.random.rand(1))
circuit = zzswap_as_syc(zzs.theta, q1, q2)
assert len(circuit) == 3 * 3 + 2
u1 = cirq.Circuit(zzs.on(q1, q2)).unitary(qubit_order=(q1, q2), dtype=np.complex128)
u2 = circuit.unitary(dtype=np.complex128)
cirq.testing.assert_allclose_up_to_global_phase(u1, u2, atol=1e-8)
@pytest.mark.skip("KAK instability")
def test_zzswap_as_syc_2():
q1, q2 = cirq.LineQubit.range(2)
zzs = ZZSwap(zz_exponent=0.123)
circuit = zzswap_as_syc(zzs.theta, q1, q2)
assert str(circuit) == """\
0: ───PhX(0.145)^(0)───Z^-0.2──────SYC───PhX(0.214)^0.576───Z^-0.131───SYC─────────────────────────SYC───PhX(-0.0833)^0.576───Z^-0.548───
│ │ │
1: ───PhX(0.973)^(0)───Z^(-1/14)───SYC───PhX(-0.369)────────Z^0.869────SYC───PhX(0.0)^0.52───Z^0───SYC───PhX(-0.394)──────────Z^0.036────\
"""
def test_zz_as_syc():
q1, q2 = cirq.LineQubit.range(2)
zz = cirq.ZZPowGate(exponent=np.random.rand(1))
circuit = zz_as_syc(zz.exponent * np.pi / 2, q1, q2)
assert len(circuit) == 3 * 2 + 2
u1 = cirq.Circuit(zz.on(q1, q2)).unitary(qubit_order=(q1, q2), dtype=np.complex128)
u2 = circuit.unitary(dtype=np.complex128)
cirq.testing.assert_allclose_up_to_global_phase(u1, u2, atol=1e-8)
@pytest.mark.skip("KAK instability")
def test_zz_as_syc_2():
q1, q2 = cirq.LineQubit.range(2)
zz = cirq.ZZPowGate(exponent=0.123)
circuit = zz_as_syc(zz.exponent * np.pi / 2, q1, q2)
assert len(circuit) == 3 * 2 + 2
validate_well_structured(circuit)
cirq.testing.assert_has_diagram(circuit, """
0: ───PhX(1)^0.483───Z^(1/12)─────SYC────────────────────────SYC───PhX(0.917)^0.483───Z^(1/12)───
│ │
1: ───PhX(-0.583)────Z^(-11/12)───SYC───PhX(0)^0.873───Z^0───SYC───PhX(0)─────────────T^-1───────
""")
@pytest.mark.parametrize("p", [1, 2, 3])
def test_compile_to_syc(p):
problem = nx.complete_graph(n=5)
problem = random_plus_minus_1_weights(problem)
qubits = cirq.LineQubit.range(5)
c1 = cirq.Circuit(
cirq.H.on_each(qubits),
[
[
ProblemUnitary(problem, gamma=np.random.random()).on(*qubits),
DriverUnitary(5, beta=np.random.random()).on(*qubits)
]
for _ in range(p)
]
)
c2 = compile_problem_unitary_to_swap_network(c1)
c3 = compile_swap_network_to_zzswap(c2)
c4 = compile_driver_unitary_to_rx(c3)
c5 = compile_to_syc(c4)
validate_well_structured(c5, allow_terminal_permutations=True)
np.testing.assert_allclose(c1.unitary(dtype=np.complex128), c2.unitary(dtype=np.complex128))
np.testing.assert_allclose(c1.unitary(dtype=np.complex128), c3.unitary(dtype=np.complex128))
np.testing.assert_allclose(c1.unitary(dtype=np.complex128), c4.unitary(dtype=np.complex128))
# Single qubit throws out global phase
cirq.testing.assert_allclose_up_to_global_phase(
c1.unitary(dtype=np.complex128), c5.unitary(dtype=np.complex128), atol=1e-8)
@pytest.mark.parametrize("p", [1, 2, 3])
def test_measure_with_final_permutation(p):
problem = nx.complete_graph(n=5)
problem = random_plus_minus_1_weights(problem)
qubits = cirq.LineQubit.range(5)
c1 = cirq.Circuit(
cirq.H.on_each(qubits),
[
[
ProblemUnitary(problem, gamma=np.random.random()).on(*qubits),
DriverUnitary(5, beta=np.random.random()).on(*qubits)
]
for _ in range(p)
]
)
c2 = compile_problem_unitary_to_swap_network(c1)
c3 = compile_swap_network_to_zzswap(c2)
c4 = compile_driver_unitary_to_rx(c3)
c5 = compile_to_syc(c4)
validate_well_structured(c5, allow_terminal_permutations=True)
c6, final_qubits = measure_with_final_permutation(c5, qubits)
validate_well_structured(c6, allow_terminal_permutations=False)
if p % 2 == 1:
assert final_qubits == qubits[::-1]
else:
assert final_qubits == qubits
permutation = []
for q in qubits:
permutation.append(final_qubits.index(q))
c1_prime = (
c1
+ cirq.QubitPermutationGate(permutation).on(*qubits)
+ cirq.measure(*qubits, key='z')
)
cirq.testing.assert_circuits_with_terminal_measurements_are_equivalent(c1_prime, c6, atol=1e-5)
def test_compile_out_virtual_z():
qubits = cirq.LineQubit.range(2)
circuit = cirq.Circuit(ZZSwap(zz_exponent=0.123).on(*qubits))
c1 = compile_to_syc(circuit)
assert len(c1) == 3 * 3 + 2
c2 = compile_out_virtual_z(c1)
assert c1 != c2
assert len(c2) == 3 * 2 + 2
def test_structured():
qubits = cirq.LineQubit.range(2)
circuit = cirq.Circuit(ZZSwap(zz_exponent=0.123).on(*qubits))
circuit = compile_to_syc(circuit)
validate_well_structured(circuit)
def test_compile_to_non_negligible():
qubits = cirq.LineQubit.range(2)
circuit = cirq.Circuit(ZZSwap(zz_exponent=0.123).on(*qubits))
c1 = compile_to_syc(circuit)
validate_well_structured(c1)
assert len(c1) == 3 * 3 + 2
c2 = compile_to_non_negligible(c1)
assert c1 != c2
# KAK instability (https://github.com/quantumlib/Cirq/issues/1647)
# means the first layer of PhX gets removed on mpharrigan's machine
# but not in docker / in CI.
assert len(c2) in [9, 10]
|
8123caffa83dee1a71a6441d9f93192eefb246ed
|
091a6200be74bf6577c86f623665bcc24e16b02b
|
/Adafruit_Proximity_Trinkey/Proximity_Spacebar_Game/code.py
|
f9a5b893fa29d4d5313866c4dc273e39b2cfb1a1
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Learning_System_Guides
|
b5f7bce40a16da64e7a79d4b39de032f2cca41d4
|
5eaa7a15a437c533b89f359a25983e24bb6b5438
|
refs/heads/main
| 2023-09-05T18:31:41.621956
| 2023-09-05T15:36:09
| 2023-09-05T15:36:09
| 105,065,494
| 937
| 937
|
MIT
| 2023-09-12T18:48:53
| 2017-09-27T20:22:44
|
C
|
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
code.py
|
# SPDX-FileCopyrightText: 2021 Kattni Rembor for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
Proximity spacebar dino game example. Sends a space when you move your hand close to the proximity
sensor and turns the LEDs on to let you know you're in the right range. For use with the Chrome
Dino game, reachable in Chrome with chrome://dino or when you have no network connectivity.
"""
import board
import usb_hid
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
import neopixel
from adafruit_apds9960.apds9960 import APDS9960
i2c = board.I2C() # uses board.SCL and board.SDA
# i2c = board.STEMMA_I2C() # For using the built-in STEMMA QT connector on a microcontroller
apds = APDS9960(i2c)
pixels = neopixel.NeoPixel(board.NEOPIXEL, 2)
keyboard = Keyboard(usb_hid.devices)
apds.enable_proximity = True
space = False
while True:
print(apds.proximity)
current_proximity = apds.proximity
if current_proximity > 100 and not space:
pixels.fill((255, 0, 0))
keyboard.send(Keycode.SPACE)
space = True
elif current_proximity < 50 and space:
pixels.fill(0)
space = False
|
3431cc01f18d44f40f6081820f2fab665b70f3dd
|
e986b7650d2317f740d65f44b19e663cb646d85e
|
/jdaviz/core/region_translators.py
|
2522b4ab17a7703ef93fcf26eeff10f599928c7c
|
[
"BSD-3-Clause"
] |
permissive
|
spacetelescope/jdaviz
|
a223230d2296f7fdee17a43ae1a4bee45452ec13
|
17a864ed7d64cece18fbc29f3561c137e6bf0942
|
refs/heads/main
| 2023-08-17T05:59:55.109052
| 2023-08-15T19:46:49
| 2023-08-15T19:46:49
| 185,452,341
| 105
| 70
|
BSD-3-Clause
| 2023-09-14T15:20:16
| 2019-05-07T17:54:06
|
Python
|
UTF-8
|
Python
| false
| false
| 14,300
|
py
|
region_translators.py
|
"""The ``region_translators`` module houses translations of
:ref:`regions:shapes` to :ref:`photutils:photutils-aperture` apertures.
"""
from astropy import units as u
from astropy.coordinates import Angle
from glue.core.roi import CircularROI, EllipticalROI, RectangularROI, CircularAnnulusROI
from photutils.aperture import (CircularAperture, SkyCircularAperture,
EllipticalAperture, SkyEllipticalAperture,
RectangularAperture, SkyRectangularAperture,
CircularAnnulus, SkyCircularAnnulus,
EllipticalAnnulus, SkyEllipticalAnnulus,
RectangularAnnulus, SkyRectangularAnnulus)
from regions import (CirclePixelRegion, CircleSkyRegion,
EllipsePixelRegion, EllipseSkyRegion,
RectanglePixelRegion, RectangleSkyRegion,
CircleAnnulusPixelRegion, CircleAnnulusSkyRegion,
EllipseAnnulusPixelRegion, EllipseAnnulusSkyRegion,
RectangleAnnulusPixelRegion, RectangleAnnulusSkyRegion, PixCoord)
__all__ = ['regions2roi', 'regions2aperture', 'aperture2regions']
def _get_region_from_spatial_subset(plugin_obj, subset_state):
"""Convert the given ``glue`` ROI subset state to ``regions`` shape.
.. note:: This is for internal use only in Imviz plugins.
Parameters
----------
plugin_obj : obj
Plugin instance that needs this translation.
The plugin is assumed to have a special setup that gives
it access to these attributes: ``app`` and ``dataset_selected``.
The ``app._jdaviz_helper.get_link_type`` method must also
exist.
subset_state : obj
ROI subset state to translate.
Returns
-------
reg : `regions.Region`
An equivalent ``regions`` shape. This can be a pixel or sky
region, so the plugin needs to be able to deal with both.
See Also
--------
regions2roi
"""
from glue_astronomy.translators.regions import roi_subset_state_to_region
# Subset is defined against its parent. This is not necessarily
# the current viewer reference data, which can be changed.
# See https://github.com/spacetelescope/jdaviz/issues/2230
link_type = plugin_obj.app._jdaviz_helper.get_link_type(
subset_state.xatt.parent.label, plugin_obj.dataset_selected)
return roi_subset_state_to_region(subset_state, to_sky=(link_type == 'wcs'))
def regions2roi(region_shape, wcs=None):
"""Convert a given ``regions`` shape to ``glue`` ROI.
This is the opposite of what is offered by
``glue_astronomy.translators.regions.AstropyRegionsHandler.to_object``
but does not cover all the same shapes exactly.
Parameters
----------
region_shape : `regions.Region`
A supported ``regions`` shape.
wcs : `~astropy.wcs.WCS` or `None`
A compatible WCS object, if required.
**This is only used for sky aperture.**
Returns
-------
roi : `glue.core.roi.Roi`
An equivalent ``glue`` ROI.
Raises
------
ValueError
WCS is required but not provided.
NotImplementedError
The given ``regions`` shape is not supported.
Examples
--------
Translate a `regions.CirclePixelRegion` to `glue.core.roi.CircularROI`:
>>> from regions import CirclePixelRegion, PixCoord
>>> from jdaviz.core.region_translators import regions2roi
>>> region_shape = CirclePixelRegion(center=PixCoord(x=42, y=43), radius=4.2)
>>> regions2roi(region_shape) # doctest: +ELLIPSIS
<glue.core.roi.CircularROI object at ...>
"""
if isinstance(region_shape, (CircleSkyRegion, EllipseSkyRegion, RectangleSkyRegion,
CircleAnnulusSkyRegion)):
if wcs is None:
raise ValueError(f'WCS must be provided for {region_shape}')
# Convert sky to pixel region first, if necessary.
region_shape = region_shape.to_pixel(wcs)
if isinstance(region_shape, CirclePixelRegion):
roi = CircularROI(
xc=region_shape.center.x, yc=region_shape.center.y, radius=region_shape.radius)
elif isinstance(region_shape, EllipsePixelRegion):
roi = EllipticalROI(
xc=region_shape.center.x, yc=region_shape.center.y,
radius_x=region_shape.width * 0.5, radius_y=region_shape.height * 0.5,
theta=region_shape.angle.to_value(u.radian))
elif isinstance(region_shape, RectanglePixelRegion):
half_w = region_shape.width * 0.5
half_h = region_shape.height * 0.5
xmin = region_shape.center.x - half_w
xmax = region_shape.center.x + half_w
ymin = region_shape.center.y - half_h
ymax = region_shape.center.y + half_h
roi = RectangularROI(
xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax,
theta=region_shape.angle.to_value(u.radian))
elif isinstance(region_shape, CircleAnnulusPixelRegion):
roi = CircularAnnulusROI(
xc=region_shape.center.x, yc=region_shape.center.y,
inner_radius=region_shape.inner_radius, outer_radius=region_shape.outer_radius)
else:
raise NotImplementedError(f'{region_shape.__class__.__name__} is not supported')
return roi
def regions2aperture(region_shape):
"""Convert a given ``regions`` shape to ``photutils`` aperture.
Parameters
----------
region_shape : `regions.Region`
A supported ``regions`` shape.
Returns
-------
aperture : `photutils.aperture.Aperture`
An equivalent ``photutils`` aperture.
Raises
------
NotImplementedError
The given ``regions`` shape is not supported.
Examples
--------
Translate a `regions.CirclePixelRegion` to `photutils.aperture.CircularAperture`:
>>> from regions import CirclePixelRegion, PixCoord
>>> from jdaviz.core.region_translators import regions2aperture
>>> region_shape = CirclePixelRegion(center=PixCoord(x=42, y=43), radius=4.2)
>>> regions2aperture(region_shape)
<CircularAperture([42., 43.], r=4.2)>
See Also
--------
aperture2regions
"""
if isinstance(region_shape, CirclePixelRegion):
aperture = CircularAperture(region_shape.center.xy, region_shape.radius)
elif isinstance(region_shape, CircleSkyRegion):
aperture = SkyCircularAperture(region_shape.center, region_shape.radius)
elif isinstance(region_shape, EllipsePixelRegion):
aperture = EllipticalAperture(
region_shape.center.xy, region_shape.width * 0.5, region_shape.height * 0.5,
theta=region_shape.angle.to_value(u.radian))
elif isinstance(region_shape, EllipseSkyRegion):
aperture = SkyEllipticalAperture(
region_shape.center, region_shape.width * 0.5, region_shape.height * 0.5,
theta=(region_shape.angle - (90 * u.deg)))
elif isinstance(region_shape, RectanglePixelRegion):
aperture = RectangularAperture(
region_shape.center.xy, region_shape.width, region_shape.height,
theta=region_shape.angle.to_value(u.radian))
elif isinstance(region_shape, RectangleSkyRegion):
aperture = SkyRectangularAperture(
region_shape.center, region_shape.width, region_shape.height,
theta=(region_shape.angle - (90 * u.deg)))
elif isinstance(region_shape, CircleAnnulusPixelRegion):
aperture = CircularAnnulus(
region_shape.center.xy, region_shape.inner_radius, region_shape.outer_radius)
elif isinstance(region_shape, CircleAnnulusSkyRegion):
aperture = SkyCircularAnnulus(
region_shape.center, region_shape.inner_radius, region_shape.outer_radius)
elif isinstance(region_shape, EllipseAnnulusPixelRegion):
aperture = EllipticalAnnulus(
region_shape.center.xy, region_shape.inner_width * 0.5, region_shape.outer_width * 0.5,
region_shape.outer_height * 0.5, b_in=region_shape.inner_height * 0.5,
theta=region_shape.angle.to_value(u.radian))
elif isinstance(region_shape, EllipseAnnulusSkyRegion):
aperture = SkyEllipticalAnnulus(
region_shape.center, region_shape.inner_width * 0.5, region_shape.outer_width * 0.5,
region_shape.outer_height * 0.5, b_in=region_shape.inner_height * 0.5,
theta=(region_shape.angle - (90 * u.deg)))
elif isinstance(region_shape, RectangleAnnulusPixelRegion):
aperture = RectangularAnnulus(
region_shape.center.xy, region_shape.inner_width, region_shape.outer_width,
region_shape.outer_height, h_in=region_shape.inner_height,
theta=region_shape.angle.to_value(u.radian))
elif isinstance(region_shape, RectangleAnnulusSkyRegion):
aperture = SkyRectangularAnnulus(
region_shape.center, region_shape.inner_width, region_shape.outer_width,
region_shape.outer_height, h_in=region_shape.inner_height,
theta=(region_shape.angle - (90 * u.deg)))
else:
raise NotImplementedError(f'{region_shape.__class__.__name__} is not supported')
return aperture
def aperture2regions(aperture):
"""Convert a given ``photutils`` aperture to ``regions`` shape.
Parameters
----------
aperture : `photutils.aperture.Aperture`
An equivalent ``photutils`` aperture.
Returns
-------
region_shape : `regions.Region`
A supported ``regions`` shape.
Raises
------
NotImplementedError
The given ``photutils`` aperture is not supported.
ValueError
Invalid inputs.
Examples
--------
Translate a `photutils.aperture.CircularAperture` to `regions.CirclePixelRegion`:
>>> from photutils.aperture import CircularAperture
>>> from jdaviz.core.region_translators import aperture2regions
>>> aperture = CircularAperture((42, 43), 4.2)
>>> aperture2regions(aperture)
<CirclePixelRegion(center=PixCoord(x=42.0, y=43.0), radius=4.2)>
See Also
--------
regions2aperture
"""
if isinstance(aperture, CircularAperture):
region_shape = CirclePixelRegion(
center=positions2pixcoord(aperture.positions), radius=aperture.r)
elif isinstance(aperture, SkyCircularAperture):
region_shape = CircleSkyRegion(center=aperture.positions, radius=aperture.r)
elif isinstance(aperture, EllipticalAperture):
region_shape = EllipsePixelRegion(
center=positions2pixcoord(aperture.positions), width=aperture.a * 2,
height=aperture.b * 2, angle=theta2angle(aperture.theta))
elif isinstance(aperture, SkyEllipticalAperture):
region_shape = EllipseSkyRegion(
center=aperture.positions, width=aperture.a * 2, height=aperture.b * 2,
angle=(aperture.theta + (90 * u.deg)))
elif isinstance(aperture, RectangularAperture):
region_shape = RectanglePixelRegion(
center=positions2pixcoord(aperture.positions), width=aperture.w, height=aperture.h,
angle=theta2angle(aperture.theta))
elif isinstance(aperture, SkyRectangularAperture):
region_shape = RectangleSkyRegion(
center=aperture.positions, width=aperture.w, height=aperture.h,
angle=(aperture.theta + (90 * u.deg)))
elif isinstance(aperture, CircularAnnulus):
region_shape = CircleAnnulusPixelRegion(
center=positions2pixcoord(aperture.positions), inner_radius=aperture.r_in,
outer_radius=aperture.r_out)
elif isinstance(aperture, SkyCircularAnnulus):
region_shape = CircleAnnulusSkyRegion(
center=aperture.positions, inner_radius=aperture.r_in, outer_radius=aperture.r_out)
elif isinstance(aperture, EllipticalAnnulus):
region_shape = EllipseAnnulusPixelRegion(
center=positions2pixcoord(aperture.positions), inner_width=aperture.a_in * 2,
inner_height=aperture.b_in * 2, outer_width=aperture.a_out * 2,
outer_height=aperture.b_out * 2, angle=theta2angle(aperture.theta))
elif isinstance(aperture, SkyEllipticalAnnulus):
region_shape = EllipseAnnulusSkyRegion(
center=aperture.positions, inner_width=aperture.a_in * 2,
inner_height=aperture.b_in * 2, outer_width=aperture.a_out * 2,
outer_height=aperture.b_out * 2,
angle=(aperture.theta + (90 * u.deg)))
elif isinstance(aperture, RectangularAnnulus):
region_shape = RectangleAnnulusPixelRegion(
center=positions2pixcoord(aperture.positions), inner_width=aperture.w_in,
inner_height=aperture.h_in, outer_width=aperture.w_out, outer_height=aperture.h_out,
angle=theta2angle(aperture.theta))
elif isinstance(aperture, SkyRectangularAnnulus):
region_shape = RectangleAnnulusSkyRegion(
center=aperture.positions, inner_width=aperture.w_in, inner_height=aperture.h_in,
outer_width=aperture.w_out, outer_height=aperture.h_out,
angle=(aperture.theta + (90 * u.deg)))
else: # pragma: no cover
raise NotImplementedError(f'{aperture.__class__.__name__} is not supported')
return region_shape
def positions2pixcoord(positions):
"""Convert ``photutils`` aperture positions to `~regions.PixCoord`
that is acceptable by ``regions`` shape.
"""
if positions.shape != (2, ):
raise ValueError('regions shape only accepts scalar x and y positions')
if isinstance(positions, u.Quantity):
pixcoord = PixCoord(x=positions[0].value, y=positions[1].value)
else:
pixcoord = PixCoord(x=positions[0], y=positions[1])
return pixcoord
def theta2angle(theta):
"""Convert ``photutils`` theta to ``regions`` angle for pixel regions."""
return Angle(theta, u.radian)
|
ca84df779ce21282964a520ea146169677e280d1
|
753cd066a9bd26b6c37c8d53a86c7a9c659ec18c
|
/gnn/ogb_lsc_pcqm4mv2/tensorflow2/custom_callbacks.py
|
600f1bbe57ed4284cbabb7c06676158279060e6b
|
[
"MIT",
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
graphcore/examples
|
ac872015808ed2a913d4d7bf0d63202ce15ebbae
|
e2f834dd60e7939672c1795b4ac62e89ad0bca49
|
refs/heads/master
| 2023-08-05T02:08:12.341836
| 2023-07-27T11:13:10
| 2023-07-27T11:13:10
| 143,977,106
| 311
| 80
|
MIT
| 2023-09-11T16:42:56
| 2018-08-08T07:29:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,601
|
py
|
custom_callbacks.py
|
# Copyright (c) 2022 Graphcore Ltd. All rights reserved.
import logging
import os
import tensorflow as tf
import wandb
class CheckpointCallback(tf.keras.callbacks.ModelCheckpoint):
def __init__(
self, use_wandb=False, upload_to_wandb=False, save_checkpoints_locally=False, total_epochs=None, *args, **kwargs
) -> None:
super().__init__(*args, **kwargs)
self.use_wandb = use_wandb
self.upload_to_wandb = upload_to_wandb
self.save_checkpoints_locally = save_checkpoints_locally
self.epochs = total_epochs
def on_train_end(self, epoch, logs=None):
"""Overwrite the on train end method to save the last checkpoint
and then save the checkpoint to wandb
"""
filepath = self.filepath.replace("{epoch:05d}", "FINAL")
self.model.save_weights(filepath, overwrite=True, options=self._options)
if self.use_wandb and self.upload_to_wandb:
logging.info(f"Saving model weights from {filepath} to wandb...")
# Save all model checkpoints with string from above
if self.save_checkpoints_locally:
# This allows the saving format to be the same when coming from a tmp dir
splits = filepath.split("/")
base_path = os.path.join(*splits[:-1])
else:
# If the checkpoint is saved in 'tmp/' no base_path is needed
base_path = None
# Final checkpoints uploaded to wandb in root directory of wandb run
wandb.save(filepath + "*", policy="now", base_path=base_path)
|
49b42ae18b7a677f6133a3ad9f75b369e97a2359
|
e90bf4b372da78ceec15282d060b48d18ba8d4e9
|
/tests/mounts/test_manager.py
|
9341a26122de162d0c057aebcdfafd09d7a56ad2
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/supervisor
|
67f2e1755ff5fbf7cf2084351e1c32c6995274e0
|
4838b280adafed0997f32e021274b531178386cd
|
refs/heads/main
| 2023-08-31T22:51:25.949277
| 2023-08-31T08:01:42
| 2023-08-31T08:01:42
| 84,926,758
| 928
| 477
|
Apache-2.0
| 2023-09-14T17:11:27
| 2017-03-14T08:54:15
|
Python
|
UTF-8
|
Python
| false
| false
| 22,379
|
py
|
test_manager.py
|
"""Tests for mount manager."""
import json
import os
from pathlib import Path
from dbus_fast import DBusError, ErrorType, Variant
from dbus_fast.aio.message_bus import MessageBus
import pytest
from supervisor.coresys import CoreSys
from supervisor.dbus.const import UnitActiveState
from supervisor.exceptions import (
MountActivationError,
MountError,
MountJobError,
MountNotFound,
)
from supervisor.mounts.manager import MountManager
from supervisor.mounts.mount import Mount
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
from supervisor.resolution.data import Issue, Suggestion
from tests.common import mock_dbus_services
from tests.dbus_service_mocks.base import DBusServiceMock
from tests.dbus_service_mocks.systemd import Systemd as SystemdService
from tests.dbus_service_mocks.systemd_unit import SystemdUnit as SystemdUnitService
ERROR_NO_UNIT = DBusError("org.freedesktop.systemd1.NoSuchUnit", "error")
BACKUP_TEST_DATA = {
"name": "backup_test",
"type": "cifs",
"usage": "backup",
"server": "backup.local",
"share": "backups",
}
MEDIA_TEST_DATA = {
"name": "media_test",
"type": "nfs",
"usage": "media",
"server": "media.local",
"path": "/media",
}
SHARE_TEST_DATA = {
"name": "share_test",
"type": "nfs",
"usage": "share",
"server": "share.local",
"path": "/share",
}
@pytest.fixture(name="mount")
async def fixture_mount(
coresys: CoreSys, tmp_supervisor_data, path_extern, mount_propagation
) -> Mount:
"""Add an initial mount and load mounts."""
mount = Mount.from_dict(coresys, MEDIA_TEST_DATA)
coresys.mounts._mounts = {"media_test": mount} # pylint: disable=protected-access
await coresys.mounts.load()
yield mount
async def test_load(
coresys: CoreSys,
all_dbus_services: dict[str, DBusServiceMock],
tmp_supervisor_data,
path_extern,
mount_propagation,
):
"""Test mount manager loading."""
systemd_service: SystemdService = all_dbus_services["systemd"]
systemd_service.StartTransientUnit.calls.clear()
backup_test = Mount.from_dict(coresys, BACKUP_TEST_DATA)
media_test = Mount.from_dict(coresys, MEDIA_TEST_DATA)
# pylint: disable=protected-access
coresys.mounts._mounts = {
"backup_test": backup_test,
"media_test": media_test,
}
# pylint: enable=protected-access
assert coresys.mounts.backup_mounts == [backup_test]
assert coresys.mounts.media_mounts == [media_test]
assert backup_test.state is None
assert media_test.state is None
assert not backup_test.local_where.exists()
assert not media_test.local_where.exists()
assert not any(coresys.config.path_media.iterdir())
systemd_service.response_get_unit = {
"mnt-data-supervisor-mounts-backup_test.mount": [
ERROR_NO_UNIT,
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
],
"mnt-data-supervisor-mounts-media_test.mount": [
ERROR_NO_UNIT,
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
],
"mnt-data-supervisor-media-media_test.mount": [
ERROR_NO_UNIT,
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
],
}
await coresys.mounts.load()
assert backup_test.state == UnitActiveState.ACTIVE
assert media_test.state == UnitActiveState.ACTIVE
assert backup_test.local_where.is_dir()
assert media_test.local_where.is_dir()
assert (coresys.config.path_media / "media_test").is_dir()
assert systemd_service.StartTransientUnit.calls == [
(
"mnt-data-supervisor-mounts-backup_test.mount",
"fail",
[
["Options", Variant("s", "noserverino,guest")],
["Type", Variant("s", "cifs")],
["Description", Variant("s", "Supervisor cifs mount: backup_test")],
["What", Variant("s", "//backup.local/backups")],
],
[],
),
(
"mnt-data-supervisor-mounts-media_test.mount",
"fail",
[
["Options", Variant("s", "soft,timeo=200")],
["Type", Variant("s", "nfs")],
["Description", Variant("s", "Supervisor nfs mount: media_test")],
["What", Variant("s", "media.local:/media")],
],
[],
),
(
"mnt-data-supervisor-media-media_test.mount",
"fail",
[
["Options", Variant("s", "bind")],
["Description", Variant("s", "Supervisor bind mount: bind_media_test")],
["What", Variant("s", "/mnt/data/supervisor/mounts/media_test")],
],
[],
),
]
async def test_load_share_mount(
coresys: CoreSys,
all_dbus_services: dict[str, DBusServiceMock],
tmp_supervisor_data,
path_extern,
mount_propagation,
):
"""Test mount manager loading with share mount."""
systemd_service: SystemdService = all_dbus_services["systemd"]
systemd_service.StartTransientUnit.calls.clear()
share_test = Mount.from_dict(coresys, SHARE_TEST_DATA)
# pylint: disable=protected-access
coresys.mounts._mounts = {
"share_test": share_test,
}
# pylint: enable=protected-access
assert coresys.mounts.share_mounts == [share_test]
assert share_test.state is None
assert not share_test.local_where.exists()
assert not any(coresys.config.path_share.iterdir())
systemd_service.response_get_unit = {
"mnt-data-supervisor-mounts-share_test.mount": [
ERROR_NO_UNIT,
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
],
"mnt-data-supervisor-share-share_test.mount": [
ERROR_NO_UNIT,
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
],
}
await coresys.mounts.load()
assert share_test.state == UnitActiveState.ACTIVE
assert share_test.local_where.is_dir()
assert (coresys.config.path_share / "share_test").is_dir()
assert systemd_service.StartTransientUnit.calls == [
(
"mnt-data-supervisor-mounts-share_test.mount",
"fail",
[
["Options", Variant("s", "soft,timeo=200")],
["Type", Variant("s", "nfs")],
["Description", Variant("s", "Supervisor nfs mount: share_test")],
["What", Variant("s", "share.local:/share")],
],
[],
),
(
"mnt-data-supervisor-share-share_test.mount",
"fail",
[
["Options", Variant("s", "bind")],
["Description", Variant("s", "Supervisor bind mount: bind_share_test")],
["What", Variant("s", "/mnt/data/supervisor/mounts/share_test")],
],
[],
),
]
async def test_mount_failed_during_load(
coresys: CoreSys,
all_dbus_services: dict[str, DBusServiceMock],
dbus_session_bus: MessageBus,
tmp_supervisor_data,
path_extern,
mount_propagation,
):
"""Test mount failed during load."""
await mock_dbus_services(
{"systemd_unit": "/org/freedesktop/systemd1/unit/tmp_test"}, dbus_session_bus
)
systemd_service: SystemdService = all_dbus_services["systemd"]
systemd_unit_service: SystemdUnitService = all_dbus_services["systemd_unit"]
systemd_service.StartTransientUnit.calls.clear()
backup_test = Mount.from_dict(coresys, BACKUP_TEST_DATA)
media_test = Mount.from_dict(coresys, MEDIA_TEST_DATA)
# pylint: disable=protected-access
coresys.mounts._mounts = {
"backup_test": backup_test,
"media_test": media_test,
}
# pylint: enable=protected-access
assert backup_test.state is None
assert media_test.state is None
assert not backup_test.local_where.exists()
assert not media_test.local_where.exists()
assert not any(coresys.config.path_emergency.iterdir())
assert not any(coresys.config.path_media.iterdir())
assert coresys.resolution.issues == []
assert coresys.resolution.suggestions == []
systemd_service.response_get_unit = {
"mnt-data-supervisor-mounts-backup_test.mount": [
ERROR_NO_UNIT,
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
],
"mnt-data-supervisor-mounts-media_test.mount": [
ERROR_NO_UNIT,
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
],
"mnt-data-supervisor-media-media_test.mount": [
ERROR_NO_UNIT,
"/org/freedesktop/systemd1/unit/tmp_test",
],
}
systemd_unit_service.active_state = "failed"
await coresys.mounts.load()
assert backup_test.state == UnitActiveState.FAILED
assert media_test.state == UnitActiveState.FAILED
assert backup_test.local_where.is_dir()
assert media_test.local_where.is_dir()
assert (coresys.config.path_media / "media_test").is_dir()
emergency_dir = coresys.config.path_emergency / "media_test"
assert emergency_dir.is_dir()
assert os.access(emergency_dir, os.R_OK)
assert not os.access(emergency_dir, os.W_OK)
assert (
Issue(IssueType.MOUNT_FAILED, ContextType.MOUNT, reference="backup_test")
in coresys.resolution.issues
)
assert (
Suggestion(
SuggestionType.EXECUTE_RELOAD, ContextType.MOUNT, reference="backup_test"
)
in coresys.resolution.suggestions
)
assert (
Suggestion(
SuggestionType.EXECUTE_REMOVE, ContextType.MOUNT, reference="backup_test"
)
in coresys.resolution.suggestions
)
assert (
Issue(IssueType.MOUNT_FAILED, ContextType.MOUNT, reference="media_test")
in coresys.resolution.issues
)
assert (
Suggestion(
SuggestionType.EXECUTE_RELOAD, ContextType.MOUNT, reference="media_test"
)
in coresys.resolution.suggestions
)
assert (
Suggestion(
SuggestionType.EXECUTE_REMOVE, ContextType.MOUNT, reference="media_test"
)
in coresys.resolution.suggestions
)
assert len(systemd_service.StartTransientUnit.calls) == 3
assert systemd_service.StartTransientUnit.calls[2] == (
"mnt-data-supervisor-media-media_test.mount",
"fail",
[
["Options", Variant("s", "bind")],
[
"Description",
Variant("s", "Supervisor bind mount: emergency_media_test"),
],
["What", Variant("s", "/mnt/data/supervisor/emergency/media_test")],
],
[],
)
async def test_create_mount(
coresys: CoreSys,
all_dbus_services: dict[str, DBusServiceMock],
tmp_supervisor_data,
path_extern,
mount_propagation,
):
"""Test creating a mount."""
systemd_service: SystemdService = all_dbus_services["systemd"]
systemd_service.StartTransientUnit.calls.clear()
await coresys.mounts.load()
mount = Mount.from_dict(coresys, MEDIA_TEST_DATA)
assert mount.state is None
assert mount not in coresys.mounts
assert "media_test" not in coresys.mounts
assert not mount.local_where.exists()
assert not any(coresys.config.path_media.iterdir())
# Create the mount
systemd_service.response_get_unit = [
ERROR_NO_UNIT,
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
ERROR_NO_UNIT,
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
]
await coresys.mounts.create_mount(mount)
assert mount.state == UnitActiveState.ACTIVE
assert mount in coresys.mounts
assert "media_test" in coresys.mounts
assert mount.local_where.exists()
assert (coresys.config.path_media / "media_test").exists()
assert [call[0] for call in systemd_service.StartTransientUnit.calls] == [
"mnt-data-supervisor-mounts-media_test.mount",
"mnt-data-supervisor-media-media_test.mount",
]
async def test_update_mount(
coresys: CoreSys,
all_dbus_services: dict[str, DBusServiceMock],
mount: Mount,
):
"""Test updating a mount."""
systemd_service: SystemdService = all_dbus_services["systemd"]
systemd_service.StartTransientUnit.calls.clear()
systemd_service.StopUnit.calls.clear()
# Update the mount. Should be unmounted then remounted
mount_new = Mount.from_dict(coresys, MEDIA_TEST_DATA)
assert mount.state == UnitActiveState.ACTIVE
assert mount_new.state is None
systemd_service.response_get_unit = [
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
ERROR_NO_UNIT,
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
ERROR_NO_UNIT,
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
]
await coresys.mounts.create_mount(mount_new)
assert mount.state is None
assert mount_new.state == UnitActiveState.ACTIVE
assert [call[0] for call in systemd_service.StartTransientUnit.calls] == [
"mnt-data-supervisor-mounts-media_test.mount",
"mnt-data-supervisor-media-media_test.mount",
]
assert [call[0] for call in systemd_service.StopUnit.calls] == [
"mnt-data-supervisor-media-media_test.mount",
"mnt-data-supervisor-mounts-media_test.mount",
]
async def test_reload_mount(
coresys: CoreSys,
all_dbus_services: dict[str, DBusServiceMock],
mount: Mount,
):
"""Test reloading a mount."""
systemd_service: SystemdService = all_dbus_services["systemd"]
systemd_service.ReloadOrRestartUnit.calls.clear()
# Reload the mount
systemd_service.response_get_unit = [
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount"
]
await coresys.mounts.reload_mount(mount.name)
assert len(systemd_service.ReloadOrRestartUnit.calls) == 1
assert (
systemd_service.ReloadOrRestartUnit.calls[0][0]
== "mnt-data-supervisor-mounts-media_test.mount"
)
async def test_remove_mount(
coresys: CoreSys, all_dbus_services: dict[str, DBusServiceMock], mount: Mount
):
"""Test removing a mount."""
systemd_service: SystemdService = all_dbus_services["systemd"]
systemd_service.StopUnit.calls.clear()
# Remove the mount
assert mount == await coresys.mounts.remove_mount(mount.name)
assert mount.state is None
assert mount not in coresys.mounts
assert [call[0] for call in systemd_service.StopUnit.calls] == [
"mnt-data-supervisor-media-media_test.mount",
"mnt-data-supervisor-mounts-media_test.mount",
]
async def test_remove_reload_mount_missing(coresys: CoreSys, mount_propagation):
"""Test removing or reloading a non existent mount errors."""
await coresys.mounts.load()
with pytest.raises(MountNotFound):
await coresys.mounts.remove_mount("does_not_exist")
with pytest.raises(MountNotFound):
await coresys.mounts.reload_mount("does_not_exist")
async def test_save_data(
coresys: CoreSys, tmp_supervisor_data: Path, path_extern, mount_propagation
):
"""Test saving mount config data."""
# Replace mount manager with one that doesn't have save_data mocked
coresys._mounts = MountManager(coresys) # pylint: disable=protected-access
path = tmp_supervisor_data / "mounts.json"
assert not path.exists()
await coresys.mounts.load()
await coresys.mounts.create_mount(
Mount.from_dict(
coresys,
{
"name": "auth_test",
"type": "cifs",
"usage": "backup",
"server": "backup.local",
"share": "backups",
"username": "admin",
"password": "password",
},
)
)
coresys.mounts.save_data()
assert path.exists()
with path.open() as file:
config = json.load(file)
assert config["mounts"] == [
{
"version": None,
"name": "auth_test",
"type": "cifs",
"usage": "backup",
"server": "backup.local",
"share": "backups",
"username": "admin",
"password": "password",
}
]
async def test_create_mount_start_unit_failure(
coresys: CoreSys,
all_dbus_services: dict[str, DBusServiceMock],
tmp_supervisor_data,
path_extern,
mount_propagation,
):
"""Test failure to start mount unit does not add mount to the list."""
systemd_service: SystemdService = all_dbus_services["systemd"]
systemd_service.StartTransientUnit.calls.clear()
systemd_service.ResetFailedUnit.calls.clear()
systemd_service.StopUnit.calls.clear()
systemd_service.response_get_unit = ERROR_NO_UNIT
systemd_service.response_start_transient_unit = DBusError(ErrorType.FAILED, "fail")
await coresys.mounts.load()
mount = Mount.from_dict(coresys, BACKUP_TEST_DATA)
with pytest.raises(MountError):
await coresys.mounts.create_mount(mount)
assert mount.state is None
assert mount not in coresys.mounts
assert len(systemd_service.StartTransientUnit.calls) == 1
assert not systemd_service.ResetFailedUnit.calls
assert not systemd_service.StopUnit.calls
async def test_create_mount_activation_failure(
coresys: CoreSys,
all_dbus_services: dict[str, DBusServiceMock],
tmp_supervisor_data,
path_extern,
mount_propagation,
):
"""Test activation failure during create mount does not add mount to the list and unmounts new mount."""
systemd_service: SystemdService = all_dbus_services["systemd"]
systemd_unit_service: SystemdUnitService = all_dbus_services["systemd_unit"]
systemd_service.StartTransientUnit.calls.clear()
systemd_service.ResetFailedUnit.calls.clear()
systemd_service.StopUnit.calls.clear()
systemd_service.response_get_unit = [
ERROR_NO_UNIT,
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
]
systemd_unit_service.active_state = "failed"
await coresys.mounts.load()
mount = Mount.from_dict(coresys, BACKUP_TEST_DATA)
with pytest.raises(MountActivationError):
await coresys.mounts.create_mount(mount)
assert mount.state is None
assert mount not in coresys.mounts
assert len(systemd_service.StartTransientUnit.calls) == 1
assert len(systemd_service.ResetFailedUnit.calls) == 1
assert not systemd_service.StopUnit.calls
async def test_reload_mounts(
coresys: CoreSys, all_dbus_services: dict[str, DBusServiceMock], mount: Mount
):
"""Test reloading mounts."""
systemd_unit_service: SystemdUnitService = all_dbus_services["systemd_unit"]
systemd_service: SystemdService = all_dbus_services["systemd"]
systemd_service.ReloadOrRestartUnit.calls.clear()
await coresys.mounts.load()
assert mount.state == UnitActiveState.ACTIVE
assert mount.failed_issue not in coresys.resolution.issues
systemd_unit_service.active_state = "failed"
await coresys.mounts.reload()
assert mount.state == UnitActiveState.FAILED
assert mount.failed_issue in coresys.resolution.issues
assert len(coresys.resolution.suggestions_for_issue(mount.failed_issue)) == 2
assert len(systemd_service.ReloadOrRestartUnit.calls) == 1
# This shouldn't reload the mount again since this isn't a new failure
await coresys.mounts.reload()
assert len(systemd_service.ReloadOrRestartUnit.calls) == 1
# This should now remove the issue from the list
systemd_unit_service.active_state = "active"
await coresys.mounts.reload()
assert mount.state == UnitActiveState.ACTIVE
assert mount.failed_issue not in coresys.resolution.issues
assert not coresys.resolution.suggestions_for_issue(mount.failed_issue)
@pytest.mark.parametrize("os_available", ["9.5"], indirect=True)
async def test_mounting_not_supported(
coresys: CoreSys,
caplog: pytest.LogCaptureFixture,
os_available,
):
"""Test mounting not supported on system."""
caplog.clear()
await coresys.mounts.load()
assert not caplog.text
mount = Mount.from_dict(coresys, MEDIA_TEST_DATA)
coresys.mounts._mounts = {"media_test": mount} # pylint: disable=protected-access
# Only tell the user about an issue here if they actually have mounts we couldn't load
# This is an edge case but users can downgrade OS so its possible
await coresys.mounts.load()
assert "Cannot load configured mounts" in caplog.text
with pytest.raises(MountJobError):
await coresys.mounts.create_mount(mount)
with pytest.raises(MountJobError):
await coresys.mounts.reload_mount("media_test")
with pytest.raises(MountJobError):
await coresys.mounts.remove_mount("media_test")
async def test_create_share_mount(
coresys: CoreSys,
all_dbus_services: dict[str, DBusServiceMock],
tmp_supervisor_data,
path_extern,
mount_propagation,
):
"""Test creating a share mount."""
systemd_service: SystemdService = all_dbus_services["systemd"]
systemd_service.StartTransientUnit.calls.clear()
await coresys.mounts.load()
mount = Mount.from_dict(coresys, SHARE_TEST_DATA)
assert mount.state is None
assert mount not in coresys.mounts
assert "share_test" not in coresys.mounts
assert not mount.local_where.exists()
assert not any(coresys.config.path_share.iterdir())
# Create the mount
systemd_service.response_get_unit = [
ERROR_NO_UNIT,
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
ERROR_NO_UNIT,
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
]
await coresys.mounts.create_mount(mount)
assert mount.state == UnitActiveState.ACTIVE
assert mount in coresys.mounts
assert "share_test" in coresys.mounts
assert mount.local_where.exists()
assert (coresys.config.path_share / "share_test").exists()
assert [call[0] for call in systemd_service.StartTransientUnit.calls] == [
"mnt-data-supervisor-mounts-share_test.mount",
"mnt-data-supervisor-share-share_test.mount",
]
|
0e7f3a1bc8913af6168723ea54810c1eed3c0a7c
|
e627710c2c3bf663b50b762ade435b4cae9800a8
|
/code/patrol/src/shapes.py
|
5b346a77173112baacc5b2e9300fad9b3a2e9657
|
[
"Apache-2.0"
] |
permissive
|
osrf/rosbook
|
9996e007f02f633b7d820fc300896699e59f8fb5
|
92c247447676db4cc2aed4fdd02832ed1355c4d4
|
refs/heads/master
| 2023-08-09T06:13:02.504080
| 2023-07-24T14:28:36
| 2023-07-24T14:28:36
| 44,305,519
| 512
| 276
|
Apache-2.0
| 2023-07-24T14:28:37
| 2015-10-15T08:57:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,028
|
py
|
shapes.py
|
# BEGIN ALL
#!/usr/bin/env python
import rospy
from smach import State,StateMachine
from time import sleep
class Drive(State):
def __init__(self, distance):
State.__init__(self, outcomes=['success'])
self.distance = distance
def execute(self, userdata):
print 'Driving', self.distance
sleep(1)
return 'success'
class Turn(State):
def __init__(self, angle):
State.__init__(self, outcomes=['success'])
self.angle = angle
def execute(self, userdata):
print 'Turning', self.angle
sleep(1)
return 'success'
if __name__ == '__main__':
# BEGIN PART_2
triangle = StateMachine(outcomes=['success'])
with triangle:
StateMachine.add('SIDE1', Drive(1), transitions={'success':'TURN1'})
StateMachine.add('TURN1', Turn(120), transitions={'success':'SIDE2'})
StateMachine.add('SIDE2', Drive(1), transitions={'success':'TURN2'})
StateMachine.add('TURN2', Turn(120), transitions={'success':'SIDE3'})
StateMachine.add('SIDE3', Drive(1), transitions={'success':'success'})
# END PART_2
square = StateMachine(outcomes=['success'])
with square:
StateMachine.add('SIDE1', Drive(1), transitions={'success':'TURN1'})
StateMachine.add('TURN1', Turn(90), transitions={'success':'SIDE2'})
StateMachine.add('SIDE2', Drive(1), transitions={'success':'TURN2'})
StateMachine.add('TURN2', Turn(90), transitions={'success':'SIDE3'})
StateMachine.add('SIDE3', Drive(1), transitions={'success':'TURN3'})
StateMachine.add('TURN3', Turn(90), transitions={'success':'SIDE4'})
StateMachine.add('SIDE4', Drive(1), transitions={'success':'success'})
# BEGIN PART_3
shapes = StateMachine(outcomes=['success'])
with shapes:
StateMachine.add('TRIANGLE', triangle, transitions={'success':'SQUARE'})
StateMachine.add('SQUARE', square, transitions={'success':'success'})
shapes.execute()
# END PART_3
# END ALL
|
061f50e05f60f8713f1bb9a703019f676ecf48f3
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayUserCertdocQueryResponse.py
|
5244264cb91344aa90cce32dc376ecd85e7f3ccc
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 5,449
|
py
|
AlipayUserCertdocQueryResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.AlipayUserCertDocDrivingLicense import AlipayUserCertDocDrivingLicense
from alipay.aop.api.domain.AlipayUserCertDocDrivingLicense import AlipayUserCertDocDrivingLicense
from alipay.aop.api.domain.AlipayUserCertDocIDCard import AlipayUserCertDocIDCard
from alipay.aop.api.domain.AlipayUserCertDocIDCard import AlipayUserCertDocIDCard
from alipay.aop.api.domain.AlipayUserCertDocPassport import AlipayUserCertDocPassport
from alipay.aop.api.domain.AlipayUserCertDocVehicleLicense import AlipayUserCertDocVehicleLicense
from alipay.aop.api.domain.AlipayUserCertDocVehicleLicense import AlipayUserCertDocVehicleLicense
class AlipayUserCertdocQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayUserCertdocQueryResponse, self).__init__()
self._driving_license = None
self._driving_license_list = None
self._identity_card = None
self._identity_card_list = None
self._passport_list = None
self._self_vehicle_license_list = None
self._vehicle_license_list = None
@property
def driving_license(self):
return self._driving_license
@driving_license.setter
def driving_license(self, value):
if isinstance(value, AlipayUserCertDocDrivingLicense):
self._driving_license = value
else:
self._driving_license = AlipayUserCertDocDrivingLicense.from_alipay_dict(value)
@property
def driving_license_list(self):
return self._driving_license_list
@driving_license_list.setter
def driving_license_list(self, value):
if isinstance(value, list):
self._driving_license_list = list()
for i in value:
if isinstance(i, AlipayUserCertDocDrivingLicense):
self._driving_license_list.append(i)
else:
self._driving_license_list.append(AlipayUserCertDocDrivingLicense.from_alipay_dict(i))
@property
def identity_card(self):
return self._identity_card
@identity_card.setter
def identity_card(self, value):
if isinstance(value, AlipayUserCertDocIDCard):
self._identity_card = value
else:
self._identity_card = AlipayUserCertDocIDCard.from_alipay_dict(value)
@property
def identity_card_list(self):
return self._identity_card_list
@identity_card_list.setter
def identity_card_list(self, value):
if isinstance(value, list):
self._identity_card_list = list()
for i in value:
if isinstance(i, AlipayUserCertDocIDCard):
self._identity_card_list.append(i)
else:
self._identity_card_list.append(AlipayUserCertDocIDCard.from_alipay_dict(i))
@property
def passport_list(self):
return self._passport_list
@passport_list.setter
def passport_list(self, value):
if isinstance(value, list):
self._passport_list = list()
for i in value:
if isinstance(i, AlipayUserCertDocPassport):
self._passport_list.append(i)
else:
self._passport_list.append(AlipayUserCertDocPassport.from_alipay_dict(i))
@property
def self_vehicle_license_list(self):
return self._self_vehicle_license_list
@self_vehicle_license_list.setter
def self_vehicle_license_list(self, value):
if isinstance(value, list):
self._self_vehicle_license_list = list()
for i in value:
if isinstance(i, AlipayUserCertDocVehicleLicense):
self._self_vehicle_license_list.append(i)
else:
self._self_vehicle_license_list.append(AlipayUserCertDocVehicleLicense.from_alipay_dict(i))
@property
def vehicle_license_list(self):
return self._vehicle_license_list
@vehicle_license_list.setter
def vehicle_license_list(self, value):
if isinstance(value, list):
self._vehicle_license_list = list()
for i in value:
if isinstance(i, AlipayUserCertDocVehicleLicense):
self._vehicle_license_list.append(i)
else:
self._vehicle_license_list.append(AlipayUserCertDocVehicleLicense.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayUserCertdocQueryResponse, self).parse_response_content(response_content)
if 'driving_license' in response:
self.driving_license = response['driving_license']
if 'driving_license_list' in response:
self.driving_license_list = response['driving_license_list']
if 'identity_card' in response:
self.identity_card = response['identity_card']
if 'identity_card_list' in response:
self.identity_card_list = response['identity_card_list']
if 'passport_list' in response:
self.passport_list = response['passport_list']
if 'self_vehicle_license_list' in response:
self.self_vehicle_license_list = response['self_vehicle_license_list']
if 'vehicle_license_list' in response:
self.vehicle_license_list = response['vehicle_license_list']
|
3efde495960557be1139c1b8bc4fe7f2431b78ef
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/recollect_waste/config_flow.py
|
c3e770cc4583fe9399eb4c0024f0769e40eea550
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 3,139
|
py
|
config_flow.py
|
"""Config flow for ReCollect Waste integration."""
from __future__ import annotations
from typing import Any
from aiorecollect.client import Client
from aiorecollect.errors import RecollectError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_FRIENDLY_NAME
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import aiohttp_client
from .const import CONF_PLACE_ID, CONF_SERVICE_ID, DOMAIN, LOGGER
DATA_SCHEMA = vol.Schema(
{vol.Required(CONF_PLACE_ID): str, vol.Required(CONF_SERVICE_ID): str}
)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for ReCollect Waste."""
VERSION = 2
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> config_entries.OptionsFlow:
"""Define the config flow to handle options."""
return RecollectWasteOptionsFlowHandler(config_entry)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle configuration via the UI."""
if user_input is None:
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors={}
)
unique_id = f"{user_input[CONF_PLACE_ID]}, {user_input[CONF_SERVICE_ID]}"
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
session = aiohttp_client.async_get_clientsession(self.hass)
client = Client(
user_input[CONF_PLACE_ID], user_input[CONF_SERVICE_ID], session=session
)
try:
await client.async_get_pickup_events()
except RecollectError as err:
LOGGER.error("Error during setup of integration: %s", err)
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors={"base": "invalid_place_or_service_id"},
)
return self.async_create_entry(
title=unique_id,
data={
CONF_PLACE_ID: user_input[CONF_PLACE_ID],
CONF_SERVICE_ID: user_input[CONF_SERVICE_ID],
},
)
class RecollectWasteOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a Recollect Waste options flow."""
def __init__(self, entry: config_entries.ConfigEntry) -> None:
"""Initialize."""
self._entry = entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_FRIENDLY_NAME,
default=self._entry.options.get(CONF_FRIENDLY_NAME),
): bool
}
),
)
|
cd3ae2989d1bc389fbb1a64d818746f179d352a1
|
790856ce4e4f38e67875fd9e0b337ab2b714268e
|
/benchmarks/isoneutral_mixing/isoneutral_numba.py
|
cfbe9244bc2fbd3b1d6eff74897bbadea1fc8d39
|
[
"Unlicense"
] |
permissive
|
dionhaefner/pyhpc-benchmarks
|
43dec379e35d7f869e75886af95547838f503e6d
|
4ae8685768a012a8af76b19da1fa551780b89373
|
refs/heads/master
| 2023-02-05T08:30:08.785398
| 2023-01-30T11:36:44
| 2023-01-30T11:36:44
| 212,333,820
| 279
| 29
|
Unlicense
| 2023-01-10T12:37:26
| 2019-10-02T12:22:16
|
Python
|
UTF-8
|
Python
| false
| false
| 9,047
|
py
|
isoneutral_numba.py
|
import numpy as np
import numba as nb
@nb.jit(nopython=True, fastmath=True)
def get_drhodT(salt, temp, p):
rho0 = 1024.0
z0 = 0.0
theta0 = 283.0 - 273.15
grav = 9.81
betaT = 1.67e-4
betaTs = 1e-5
gammas = 1.1e-8
zz = -p - z0
thetas = temp - theta0
return -(betaTs * thetas + betaT * (1 - gammas * grav * zz * rho0)) * rho0
@nb.jit(nopython=True, fastmath=True)
def get_drhodS(salt, temp, p):
betaS = 0.78e-3
rho0 = 1024.0
return betaS * rho0
@nb.jit(nopython=True, fastmath=True)
def dm_taper(sx):
"""
tapering function for isopycnal slopes
"""
iso_slopec = 1e-3
iso_dslope = 1e-3
return 0.5 * (1.0 + np.tanh((-np.abs(sx) + iso_slopec) / iso_dslope))
@nb.jit(nopython=True, boundscheck=False, nogil=True, fastmath=True, cache=True)
def isoneutral_diffusion_pre(
maskT,
maskU,
maskV,
maskW,
dxt,
dxu,
dyt,
dyu,
dzt,
dzw,
cost,
cosu,
salt,
temp,
zt,
K_iso,
K_11,
K_22,
K_33,
Ai_ez,
Ai_nz,
Ai_bx,
Ai_by,
):
"""
Isopycnal diffusion for tracer
following functional formulation by Griffies et al
Code adopted from MOM2.1
"""
nx, ny, nz = maskT.shape
epsln = 1e-20
K_iso_steep = 50.0
tau = 0
drdT = np.empty_like(K_11)
drdS = np.empty_like(K_11)
dTdx = np.empty_like(K_11)
dSdx = np.empty_like(K_11)
dTdy = np.empty_like(K_11)
dSdy = np.empty_like(K_11)
dTdz = np.empty_like(K_11)
dSdz = np.empty_like(K_11)
"""
drho_dt and drho_ds at centers of T cells
"""
for i in range(nx):
for j in range(ny):
for k in range(nz):
drdT[i, j, k] = maskT[i, j, k] * get_drhodT(
salt[i, j, k, tau], temp[i, j, k, tau], np.abs(zt[k])
)
drdS[i, j, k] = maskT[i, j, k] * get_drhodS(
salt[i, j, k, tau], temp[i, j, k, tau], np.abs(zt[k])
)
"""
gradients at top face of T cells
"""
for i in range(nx):
for j in range(ny):
for k in range(nz - 1):
dTdz[i, j, k] = (
maskW[i, j, k]
* (temp[i, j, k + 1, tau] - temp[i, j, k, tau])
/ dzw[k]
)
dSdz[i, j, k] = (
maskW[i, j, k]
* (salt[i, j, k + 1, tau] - salt[i, j, k, tau])
/ dzw[k]
)
dTdz[i, j, -1] = 0.0
dSdz[i, j, -1] = 0.0
"""
gradients at eastern face of T cells
"""
for i in range(nx - 1):
for j in range(ny):
for k in range(nz):
dTdx[i, j, k] = (
maskU[i, j, k]
* (temp[i + 1, j, k, tau] - temp[i, j, k, tau])
/ (dxu[i] * cost[j])
)
dSdx[i, j, k] = (
maskU[i, j, k]
* (salt[i + 1, j, k, tau] - salt[i, j, k, tau])
/ (dxu[i] * cost[j])
)
dTdx[-1, :, :] = 0.0
dSdx[-1, :, :] = 0.0
"""
gradients at northern face of T cells
"""
for i in range(nx):
for j in range(ny - 1):
for k in range(nz):
dTdy[i, j, k] = (
maskV[i, j, k]
* (temp[i, j + 1, k, tau] - temp[i, j, k, tau])
/ dyu[j]
)
dSdy[i, j, k] = (
maskV[i, j, k]
* (salt[i, j + 1, k, tau] - salt[i, j, k, tau])
/ dyu[j]
)
dTdy[:, -1, :] = 0.0
dSdy[:, -1, :] = 0.0
"""
Compute Ai_ez and K11 on center of east face of T cell.
"""
for i in range(1, nx - 2):
for j in range(2, ny - 2):
for k in range(0, nz):
if k == 0:
diffloc = 0.5 * (K_iso[i, j, k] + K_iso[i + 1, j, k])
else:
diffloc = 0.25 * (
K_iso[i, j, k]
+ K_iso[i, j, k - 1]
+ K_iso[i + 1, j, k]
+ K_iso[i + 1, j, k - 1]
)
sumz = 0.0
for kr in (0, 1):
if k == 0 and kr == 0:
continue
for ip in (0, 1):
drodxe = (
drdT[i + ip, j, k] * dTdx[i, j, k]
+ drdS[i + ip, j, k] * dSdx[i, j, k]
)
drodze = (
drdT[i + ip, j, k] * dTdz[i + ip, j, k + kr - 1]
+ drdS[i + ip, j, k] * dSdz[i + ip, j, k + kr - 1]
)
sxe = -drodxe / (min(0.0, drodze) - epsln)
taper = dm_taper(sxe)
sumz += (
dzw[k + kr - 1]
* maskU[i, j, k]
* max(K_iso_steep, diffloc * taper)
)
Ai_ez[i, j, k, ip, kr] = taper * sxe * maskU[i, j, k]
K_11[i, j, k] = sumz / (4.0 * dzt[k])
"""
Compute Ai_nz and K_22 on center of north face of T cell.
"""
for i in range(2, nx - 2):
for j in range(1, ny - 2):
for k in range(0, nz):
if k == 0:
diffloc = 0.5 * (K_iso[i, j, k] + K_iso[i, j + 1, k])
else:
diffloc = 0.25 * (
K_iso[i, j, k]
+ K_iso[i, j, k - 1]
+ K_iso[i, j + 1, k]
+ K_iso[i, j + 1, k - 1]
)
sumz = 0.0
for kr in (0, 1):
if k == 0 and kr == 0:
continue
for jp in (0, 1):
drodyn = (
drdT[i, j + jp, k] * dTdy[i, j, k]
+ drdS[i, j + jp, k] * dSdy[i, j, k]
)
drodzn = (
drdT[i, j + jp, k] * dTdz[i, j + jp, k + kr - 1]
+ drdS[i, j + jp, k] * dSdz[i, j + jp, k + kr - 1]
)
syn = -drodyn / (min(0.0, drodzn) - epsln)
taper = dm_taper(syn)
sumz += (
dzw[k + kr - 1]
* maskV[i, j, k]
* max(K_iso_steep, diffloc * taper)
)
Ai_nz[i, j, k, jp, kr] = taper * syn * maskV[i, j, k]
K_22[i, j, k] = sumz / (4.0 * dzt[k])
"""
compute Ai_bx, Ai_by and K33 on top face of T cell.
"""
for i in range(2, nx - 2):
for j in range(2, ny - 2):
for k in range(nz - 1):
sumx = 0.0
sumy = 0.0
for kr in (0, 1):
drodzb = (
drdT[i, j, k + kr] * dTdz[i, j, k]
+ drdS[i, j, k + kr] * dSdz[i, j, k]
)
# eastward slopes at the top of T cells
for ip in (0, 1):
drodxb = (
drdT[i, j, k + kr] * dTdx[i - 1 + ip, j, k + kr]
+ drdS[i, j, k + kr] * dSdx[i - 1 + ip, j, k + kr]
)
sxb = -drodxb / (min(0.0, drodzb) - epsln)
taper = dm_taper(sxb)
sumx += (
dxu[i - 1 + ip]
* K_iso[i, j, k]
* taper
* sxb ** 2
* maskW[i, j, k]
)
Ai_bx[i, j, k, ip, kr] = taper * sxb * maskW[i, j, k]
# northward slopes at the top of T cells
for jp in (0, 1):
facty = cosu[j - 1 + jp] * dyu[j - 1 + jp]
drodyb = (
drdT[i, j, k + kr] * dTdy[i, j + jp - 1, k + kr]
+ drdS[i, j, k + kr] * dSdy[i, j + jp - 1, k + kr]
)
syb = -drodyb / (min(0.0, drodzb) - epsln)
taper = dm_taper(syb)
sumy += (
facty * K_iso[i, j, k] * taper * syb ** 2 * maskW[i, j, k]
)
Ai_by[i, j, k, jp, kr] = taper * syb * maskW[i, j, k]
K_33[i, j, k] = sumx / (4 * dxt[i]) + sumy / (4 * dyt[j] * cost[j])
K_33[i, j, -1] = 0.0
def run(*inputs, device="cpu"):
isoneutral_diffusion_pre(*inputs)
return inputs[-7:]
|
05b8d0d5f2c971024e3c6148f9bd9ff8de7f193e
|
c2d48caa5db7e746a38beca625406fcf47379d3c
|
/src/olympia/constants/reviewers.py
|
b553f26cb8ed311767ea14b9b004dad688f80f5e
|
[
"CC-BY-4.0",
"CC-BY-NC-4.0",
"CC-BY-ND-3.0",
"MIT",
"CC-BY-SA-3.0",
"LGPL-3.0-only",
"MPL-2.0",
"CC-BY-NC-ND-4.0",
"MPL-1.1",
"BSD-3-Clause",
"CC-BY-NC-SA-3.0",
"GPL-3.0-only",
"GPL-2.0-only",
"LGPL-2.0-only"
] |
permissive
|
mozilla/addons-server
|
1f6269ec0a4aa5a0142a5f81978ef674daf213a7
|
e0f043bca8a64478e2ba62f877c9dc28620be22f
|
refs/heads/master
| 2023-09-01T09:34:41.867534
| 2023-09-01T07:21:22
| 2023-09-01T07:21:22
| 16,416,867
| 920
| 590
|
BSD-3-Clause
| 2023-09-14T16:15:01
| 2014-01-31T18:44:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,057
|
py
|
reviewers.py
|
from django.utils.translation import gettext_lazy as _
from .base import ADDON_ANY, ADDON_EXTENSION, ADDON_STATICTHEME
# Reviewer Tools
REVIEWER_VIEWING_INTERVAL = 8 # How often we ping for "who's watching?"
REVIEWER_REVIEW_LOCK_LIMIT = 3 # How many pages can a reviewer "watch"
# Default delayed rejection period in days
REVIEWER_DELAYED_REJECTION_PERIOD_DAYS_DEFAULT = 14
REVIEWER_STANDARD_REVIEW_TIME = 3 # How many (week)days we expect to review within
REVIEWER_STANDARD_REPLY_TIME = 2 # How many (week)days we expect to reply within
# Risk tiers for post-review weight.
POST_REVIEW_WEIGHT_HIGHEST_RISK = 275
POST_REVIEW_WEIGHT_HIGH_RISK = 175
POST_REVIEW_WEIGHT_MEDIUM_RISK = 90
REPUTATION_CHOICES = {
0: _('No Reputation'),
1: _('Good (1)'),
2: _('Very Good (2)'),
3: _('Excellent (3)'),
}
# Review queue pagination
REVIEWS_PER_PAGE = 200
REVIEWS_PER_PAGE_MAX = 400
VERSIONS_PER_REVIEW_PAGE = 10
ACTION_MOREINFO = 0
ACTION_FLAG = 1
ACTION_DUPLICATE = 2
ACTION_REJECT = 3
ACTION_APPROVE = 4
REVIEW_ACTIONS = {
ACTION_MOREINFO: _('Request More Info'),
ACTION_FLAG: _('Flag'),
ACTION_DUPLICATE: _('Duplicate'),
ACTION_REJECT: _('Reject'),
ACTION_APPROVE: _('Approve'),
}
WOULD_NOT_HAVE_BEEN_AUTO_APPROVED = 0
WOULD_HAVE_BEEN_AUTO_APPROVED = 1
AUTO_APPROVED = 2
NOT_AUTO_APPROVED = 3
AUTO_APPROVAL_VERDICT_CHOICES = (
(
WOULD_NOT_HAVE_BEEN_AUTO_APPROVED,
'Would have been auto-approved (dry-run mode was in effect)',
),
(
WOULD_HAVE_BEEN_AUTO_APPROVED,
'Would *not* have been auto-approved (dry-run mode was in effect)',
),
(AUTO_APPROVED, 'Was auto-approved'),
(NOT_AUTO_APPROVED, 'Was *not* auto-approved'),
)
# Types of Add-ons for Reasons.
REASON_ADDON_TYPE_CHOICES = {
ADDON_ANY: _('All'),
ADDON_EXTENSION: _('Extension'),
ADDON_STATICTHEME: _('Theme'),
}
# Target number of reviews each task that adds extra versions to the review
# queue will add per day.
EXTRA_REVIEW_TARGET_PER_DAY_CONFIG_KEY = 'extra-review-target-per-day'
|
d3ffe1e667fcdd38fce7283e08144dc24040a276
|
61004e474b7b2ad0071c16766f0f7874f04f9466
|
/examples/cloudml-collaborative-filtering/preprocessing/preprocess_test.py
|
cfbd14458174dd506cfd85df9b3547be64ba6a75
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/professional-services
|
eb79751efae765a8c691a745e520f44f51bd715c
|
0f51121b945bd74c7f667e74e8861fceda87565c
|
refs/heads/main
| 2023-09-05T02:57:33.328973
| 2023-08-30T14:40:30
| 2023-08-30T14:40:30
| 91,730,359
| 2,626
| 1,381
|
Apache-2.0
| 2023-09-14T20:13:42
| 2017-05-18T19:29:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,363
|
py
|
preprocess_test.py
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the beam pipeline."""
from __future__ import absolute_import
import unittest
import apache_beam as beam
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from constants import constants
from preprocessing import preprocess
class TestPreprocess(unittest.TestCase):
"""Tests beam pipeline functions."""
def test_handle_null_user_tags(self):
"""Tests the _handle_null_user_tags function."""
source = [
{constants.USER_TAGS_KEY: []},
{constants.USER_TAGS_KEY: [1] * constants.USER_TAGS_LENGTH},
]
target = [
{constants.USER_TAGS_KEY: [0] * constants.USER_TAGS_LENGTH},
{constants.USER_TAGS_KEY: [1] * constants.USER_TAGS_LENGTH},
]
with TestPipeline() as p:
features = (p
| "CreateStubs" >> beam.Create(source)
| "HandleNullUserTags" >> beam.Map(
preprocess._handle_null_user_tags))
assert_that(features, equal_to(target))
def test_normalize_user_tags(self):
"""Tests the _normalize_user_tags function."""
n_tags = constants.USER_TAGS_LENGTH
source = [
{constants.USER_TAGS_KEY: [0] * n_tags},
{constants.USER_TAGS_KEY: [1] * n_tags},
]
target = [
{constants.USER_TAGS_KEY: [0] * n_tags},
{constants.USER_TAGS_KEY: [n_tags**-1] * n_tags},
]
with TestPipeline() as p:
features = (p
| "CreateStubs" >> beam.Create(source)
| "NormalizeUserTags" >> beam.Map(
preprocess._normalize_user_tags))
assert_that(features, equal_to(target))
if __name__ == "__main__":
unittest.main()
|
cd345172f704f72c9fb22c2612476aed88cad4bf
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/desktop/core/ext-py3/pysaml2-7.3.1/src/saml2/response.py
|
2bc147dbba953a4f9cabd94e9532667dc7dc891d
|
[
"Apache-2.0",
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501
| 2023-08-28T20:45:00
| 2023-08-28T20:45:00
| 732,593
| 5,655
| 2,244
|
Apache-2.0
| 2023-09-14T03:05:41
| 2010-06-21T19:46:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 48,112
|
py
|
response.py
|
#!/usr/bin/env python
#
import calendar
import logging
from saml2 import SAMLError
from saml2 import class_name
from saml2 import extension_elements_to_elements
from saml2 import saml
from saml2 import samlp
from saml2 import time_util
from saml2 import xmldsig as ds
from saml2 import xmlenc as xenc
from saml2.attribute_converter import to_local
from saml2.s_utils import RequestVersionTooHigh
from saml2.s_utils import RequestVersionTooLow
from saml2.saml import SCM_BEARER
from saml2.saml import SCM_HOLDER_OF_KEY
from saml2.saml import SCM_SENDER_VOUCHES
from saml2.saml import XSI_TYPE
from saml2.saml import attribute_from_string
from saml2.saml import encrypted_attribute_from_string
from saml2.samlp import STATUS_AUTHN_FAILED
from saml2.samlp import STATUS_INVALID_ATTR_NAME_OR_VALUE
from saml2.samlp import STATUS_INVALID_NAMEID_POLICY
from saml2.samlp import STATUS_NO_AUTHN_CONTEXT
from saml2.samlp import STATUS_NO_AVAILABLE_IDP
from saml2.samlp import STATUS_NO_PASSIVE
from saml2.samlp import STATUS_NO_SUPPORTED_IDP
from saml2.samlp import STATUS_PARTIAL_LOGOUT
from saml2.samlp import STATUS_PROXY_COUNT_EXCEEDED
from saml2.samlp import STATUS_REQUEST_DENIED
from saml2.samlp import STATUS_REQUEST_UNSUPPORTED
from saml2.samlp import STATUS_REQUEST_VERSION_DEPRECATED
from saml2.samlp import STATUS_REQUEST_VERSION_TOO_HIGH
from saml2.samlp import STATUS_REQUEST_VERSION_TOO_LOW
from saml2.samlp import STATUS_RESOURCE_NOT_RECOGNIZED
from saml2.samlp import STATUS_RESPONDER
from saml2.samlp import STATUS_TOO_MANY_RESPONSES
from saml2.samlp import STATUS_UNKNOWN_ATTR_PROFILE
from saml2.samlp import STATUS_UNKNOWN_PRINCIPAL
from saml2.samlp import STATUS_UNSUPPORTED_BINDING
from saml2.samlp import STATUS_VERSION_MISMATCH
from saml2.sigver import DecryptError
from saml2.sigver import SignatureError
from saml2.sigver import security_context
from saml2.sigver import signed
from saml2.time_util import later_than
from saml2.time_util import str_to_time
from saml2.validate import NotValid
from saml2.validate import valid_address
from saml2.validate import valid_instance
from saml2.validate import validate_before
from saml2.validate import validate_on_or_after
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
class IncorrectlySigned(SAMLError):
pass
class InvalidAssertion(SAMLError):
pass
class DecryptionFailed(SAMLError):
pass
class VerificationError(SAMLError):
pass
class StatusError(SAMLError):
pass
class UnsolicitedResponse(SAMLError):
pass
class StatusVersionMismatch(StatusError):
pass
class StatusAuthnFailed(StatusError):
pass
class StatusInvalidAttrNameOrValue(StatusError):
pass
class StatusInvalidAuthnResponseStatement(StatusError):
pass
class StatusInvalidNameidPolicy(StatusError):
pass
class StatusNoAuthnContext(StatusError):
pass
class StatusNoAvailableIdp(StatusError):
pass
class StatusNoPassive(StatusError):
pass
class StatusNoSupportedIdp(StatusError):
pass
class StatusPartialLogout(StatusError):
pass
class StatusProxyCountExceeded(StatusError):
pass
class StatusRequestDenied(StatusError):
pass
class StatusRequestUnsupported(StatusError):
pass
class StatusRequestVersionDeprecated(StatusError):
pass
class StatusRequestVersionTooHigh(StatusError):
pass
class StatusRequestVersionTooLow(StatusError):
pass
class StatusResourceNotRecognized(StatusError):
pass
class StatusTooManyResponses(StatusError):
pass
class StatusUnknownAttrProfile(StatusError):
pass
class StatusUnknownPrincipal(StatusError):
pass
class StatusUnsupportedBinding(StatusError):
pass
class StatusResponder(StatusError):
pass
STATUSCODE2EXCEPTION = {
STATUS_VERSION_MISMATCH: StatusVersionMismatch,
STATUS_AUTHN_FAILED: StatusAuthnFailed,
STATUS_INVALID_ATTR_NAME_OR_VALUE: StatusInvalidAttrNameOrValue,
STATUS_INVALID_NAMEID_POLICY: StatusInvalidNameidPolicy,
STATUS_NO_AUTHN_CONTEXT: StatusNoAuthnContext,
STATUS_NO_AVAILABLE_IDP: StatusNoAvailableIdp,
STATUS_NO_PASSIVE: StatusNoPassive,
STATUS_NO_SUPPORTED_IDP: StatusNoSupportedIdp,
STATUS_PARTIAL_LOGOUT: StatusPartialLogout,
STATUS_PROXY_COUNT_EXCEEDED: StatusProxyCountExceeded,
STATUS_REQUEST_DENIED: StatusRequestDenied,
STATUS_REQUEST_UNSUPPORTED: StatusRequestUnsupported,
STATUS_REQUEST_VERSION_DEPRECATED: StatusRequestVersionDeprecated,
STATUS_REQUEST_VERSION_TOO_HIGH: StatusRequestVersionTooHigh,
STATUS_REQUEST_VERSION_TOO_LOW: StatusRequestVersionTooLow,
STATUS_RESOURCE_NOT_RECOGNIZED: StatusResourceNotRecognized,
STATUS_TOO_MANY_RESPONSES: StatusTooManyResponses,
STATUS_UNKNOWN_ATTR_PROFILE: StatusUnknownAttrProfile,
STATUS_UNKNOWN_PRINCIPAL: StatusUnknownPrincipal,
STATUS_UNSUPPORTED_BINDING: StatusUnsupportedBinding,
STATUS_RESPONDER: StatusResponder,
}
# ---------------------------------------------------------------------------
def _dummy(_):
return None
def for_me(conditions, myself):
"""Am I among the intended audiences"""
if not conditions.audience_restriction: # No audience restriction
return True
for restriction in conditions.audience_restriction:
if not restriction.audience:
continue
for audience in restriction.audience:
if audience.text and audience.text.strip() == myself:
return True
else:
logger.debug(f"AudienceRestriction - One condition not satisfied: {audience.text} != {myself}")
logger.debug("AudienceRestrictions not satisfied!")
return False
def authn_response(
conf,
return_addrs,
outstanding_queries=None,
timeslack=0,
asynchop=True,
allow_unsolicited=False,
want_assertions_signed=False,
conv_info=None,
):
sec = security_context(conf)
if not timeslack:
try:
timeslack = int(conf.accepted_time_diff)
except TypeError:
timeslack = 0
return AuthnResponse(
sec,
conf.attribute_converters,
conf.entityid,
return_addrs,
outstanding_queries,
timeslack,
asynchop=asynchop,
allow_unsolicited=allow_unsolicited,
want_assertions_signed=want_assertions_signed,
conv_info=conv_info,
)
# comes in over SOAP so synchronous
def attribute_response(conf, return_addrs, timeslack=0, asynchop=False, test=False, conv_info=None):
sec = security_context(conf)
if not timeslack:
try:
timeslack = int(conf.accepted_time_diff)
except TypeError:
timeslack = 0
return AttributeResponse(
sec,
conf.attribute_converters,
conf.entityid,
return_addrs,
timeslack,
asynchop=asynchop,
test=test,
conv_info=conv_info,
)
class StatusResponse:
msgtype = "status_response"
def __init__(self, sec_context, return_addrs=None, timeslack=0, request_id=0, asynchop=True, conv_info=None):
self.sec = sec_context
self.return_addrs = return_addrs or []
self.timeslack = timeslack
self.request_id = request_id
self.xmlstr = ""
self.origxml = ""
self.name_id = None
self.response = None
self.not_on_or_after = 0
self.in_response_to = None
self.signature_check = self.sec.correctly_signed_response
self.require_signature = False
self.require_response_signature = False
self.require_signature_or_response_signature = False
self.not_signed = False
self.asynchop = asynchop
self.do_not_verify = False
self.conv_info = conv_info or {}
def _clear(self):
self.xmlstr = ""
self.name_id = None
self.response = None
self.not_on_or_after = 0
def _postamble(self):
if not self.response:
logger.warning("Response was not correctly signed")
if self.xmlstr:
logger.debug("Response: %s", self.xmlstr)
raise IncorrectlySigned()
logger.debug("response: %s", self.response)
try:
valid_instance(self.response)
except NotValid as exc:
logger.warning("Not valid response: %s", exc.args[0])
self._clear()
return self
self.in_response_to = self.response.in_response_to
return self
def load_instance(self, instance):
if signed(instance):
# This will check signature on Assertion which is the default
try:
self.response = self.sec.check_signature(instance)
except SignatureError:
# The response as a whole might be signed or not
self.response = self.sec.check_signature(instance, f"{samlp.NAMESPACE}:Response")
else:
self.not_signed = True
self.response = instance
return self._postamble()
def _loads(self, xmldata, decode=True, origxml=None):
# own copy
if isinstance(xmldata, bytes):
self.xmlstr = xmldata[:].decode("utf-8")
else:
self.xmlstr = xmldata[:]
logger.debug("xmlstr: %s", self.xmlstr)
if origxml:
self.origxml = origxml
else:
self.origxml = self.xmlstr
if self.do_not_verify:
args = {"do_not_verify": True}
else:
args = {}
try:
self.response = self.signature_check(
xmldata,
origdoc=origxml,
must=self.require_signature,
require_response_signature=self.require_response_signature,
**args,
)
except TypeError:
raise
except SignatureError:
raise
except Exception as excp:
logger.exception("EXCEPTION: %s", str(excp))
raise
return self._postamble()
def status_ok(self):
status = self.response.status
logger.debug("status: %s", status)
if not status or status.status_code.value == samlp.STATUS_SUCCESS:
return True
err_code = status.status_code.status_code.value if status.status_code.status_code else None
err_msg = status.status_message.text if status.status_message else err_code or "Unknown error"
err_cls = STATUSCODE2EXCEPTION.get(err_code, StatusError)
msg = f"Unsuccessful operation: {status}\n{err_msg} from {err_code}"
logger.debug(msg)
raise err_cls(msg)
def issue_instant_ok(self):
"""Check that the response was issued at a reasonable time"""
upper = time_util.shift_time(time_util.time_in_a_while(days=1), self.timeslack).timetuple()
lower = time_util.shift_time(time_util.time_a_while_ago(days=1), -self.timeslack).timetuple()
# print("issue_instant: %s" % self.response.issue_instant)
# print("%s < x < %s" % (lower, upper))
issued_at = str_to_time(self.response.issue_instant)
return lower < issued_at < upper
def _verify(self):
if self.request_id and self.in_response_to and self.in_response_to != self.request_id:
logger.error("Not the id I expected: %s != %s", self.in_response_to, self.request_id)
return None
if self.response.version != "2.0":
_ver = float(self.response.version)
if _ver < 2.0:
raise RequestVersionTooLow()
else:
raise RequestVersionTooHigh()
if self.asynchop:
if self.response.destination and self.response.destination not in self.return_addrs:
logger.error(
"destination '%s' not in return addresses '%s'", self.response.destination, self.return_addrs
)
return None
valid = self.issue_instant_ok() and self.status_ok()
return valid
def loads(self, xmldata, decode=True, origxml=None):
return self._loads(xmldata, decode, origxml)
def verify(self, keys=None):
try:
return self._verify()
except AssertionError:
logger.exception("verify")
return None
def update(self, mold):
self.xmlstr = mold.xmlstr
self.in_response_to = mold.in_response_to
self.response = mold.response
def issuer(self):
issuer_value = (self.response.issuer.text if self.response.issuer is not None else "").strip()
return issuer_value
class LogoutResponse(StatusResponse):
msgtype = "logout_response"
def __init__(self, sec_context, return_addrs=None, timeslack=0, asynchop=True, conv_info=None):
StatusResponse.__init__(self, sec_context, return_addrs, timeslack, asynchop=asynchop, conv_info=conv_info)
self.signature_check = self.sec.correctly_signed_logout_response
class NameIDMappingResponse(StatusResponse):
msgtype = "name_id_mapping_response"
def __init__(self, sec_context, return_addrs=None, timeslack=0, request_id=0, asynchop=True, conv_info=None):
StatusResponse.__init__(self, sec_context, return_addrs, timeslack, request_id, asynchop, conv_info=conv_info)
self.signature_check = self.sec.correctly_signed_name_id_mapping_response
class ManageNameIDResponse(StatusResponse):
msgtype = "manage_name_id_response"
def __init__(self, sec_context, return_addrs=None, timeslack=0, request_id=0, asynchop=True, conv_info=None):
StatusResponse.__init__(self, sec_context, return_addrs, timeslack, request_id, asynchop, conv_info=conv_info)
self.signature_check = self.sec.correctly_signed_manage_name_id_response
# ----------------------------------------------------------------------------
class AuthnResponse(StatusResponse):
"""This is where all the profile compliance is checked.
This one does saml2int compliance."""
msgtype = "authn_response"
def __init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs=None,
outstanding_queries=None,
timeslack=0,
asynchop=True,
allow_unsolicited=False,
test=False,
allow_unknown_attributes=False,
want_assertions_signed=False,
want_assertions_or_response_signed=False,
want_response_signed=False,
conv_info=None,
**kwargs,
):
StatusResponse.__init__(self, sec_context, return_addrs, timeslack, asynchop=asynchop, conv_info=conv_info)
self.entity_id = entity_id
self.attribute_converters = attribute_converters
if outstanding_queries:
self.outstanding_queries = outstanding_queries
else:
self.outstanding_queries = {}
self.context = "AuthnReq"
self.came_from = None
self.ava = None
self.assertion = None
self.assertions = []
self.session_not_on_or_after = 0
self.allow_unsolicited = allow_unsolicited
self.require_signature = want_assertions_signed
self.require_signature_or_response_signature = want_assertions_or_response_signed
self.require_response_signature = want_response_signed
self.test = test
self.allow_unknown_attributes = allow_unknown_attributes
#
try:
self.extension_schema = kwargs["extension_schema"]
except KeyError:
self.extension_schema = {}
def check_subject_confirmation_in_response_to(self, irp):
for assertion in self.response.assertion:
for _sc in assertion.subject.subject_confirmation:
if _sc.subject_confirmation_data.in_response_to != irp:
return False
return True
def loads(self, xmldata, decode=True, origxml=None):
self._loads(xmldata, decode, origxml)
if self.asynchop:
if self.in_response_to in self.outstanding_queries:
self.came_from = self.outstanding_queries[self.in_response_to]
# del self.outstanding_queries[self.in_response_to]
try:
if not self.check_subject_confirmation_in_response_to(self.in_response_to):
raise UnsolicitedResponse(f"Unsolicited response: {self.in_response_to}")
except AttributeError:
pass
elif self.allow_unsolicited:
# Should check that I haven't seen this before
pass
else:
raise UnsolicitedResponse(f"Unsolicited response: {self.in_response_to}")
return self
def clear(self):
self._clear()
self.came_from = None
self.ava = None
self.assertion = None
def authn_statement_ok(self, optional=False):
n_authn_statements = len(self.assertion.authn_statement)
if n_authn_statements != 1:
if optional:
return True
else:
msg = f"Invalid number of AuthnStatement found in Response: {n_authn_statements}"
raise ValueError(msg)
authn_statement = self.assertion.authn_statement[0]
if authn_statement.session_not_on_or_after:
if validate_on_or_after(authn_statement.session_not_on_or_after, self.timeslack):
self.session_not_on_or_after = calendar.timegm(
time_util.str_to_time(authn_statement.session_not_on_or_after)
)
else:
return False
return True
# check authn_statement.session_index
def condition_ok(self, lax=False):
if not self.assertion.conditions:
# Conditions is Optional for Assertion, so, if it's absent, then we
# assume that its valid
return True
if self.test:
lax = True
conditions = self.assertion.conditions
logger.debug("conditions: %s", conditions)
# if no sub-elements or elements are supplied, then the
# assertion is considered to be valid.
if not conditions.keyswv():
return True
# if both are present NotBefore must be earlier than NotOnOrAfter
if conditions.not_before and conditions.not_on_or_after:
if not later_than(conditions.not_on_or_after, conditions.not_before):
return False
try:
if conditions.not_on_or_after:
self.not_on_or_after = validate_on_or_after(conditions.not_on_or_after, self.timeslack)
if conditions.not_before:
validate_before(conditions.not_before, self.timeslack)
except Exception as excp:
logger.error("Exception on conditions: %s", str(excp))
if not lax:
raise
else:
self.not_on_or_after = 0
if not for_me(conditions, self.entity_id):
if not lax:
raise Exception(f"AudienceRestrictions conditions not satisfied! (Local entity_id={self.entity_id})")
if conditions.condition: # extra conditions
for cond in conditions.condition:
try:
if cond.extension_attributes[XSI_TYPE] in self.extension_schema:
pass
else:
raise Exception("Unknown condition")
except KeyError:
raise Exception("Missing xsi:type specification")
return True
def decrypt_attributes(self, attribute_statement, keys=None):
"""
Decrypts possible encrypted attributes and adds the decrypts to the
list of attributes.
:param attribute_statement: A SAML.AttributeStatement which might
contain both encrypted attributes and attributes.
"""
# _node_name = [
# "urn:oasis:names:tc:SAML:2.0:assertion:EncryptedData",
# "urn:oasis:names:tc:SAML:2.0:assertion:EncryptedAttribute"]
for encattr in attribute_statement.encrypted_attribute:
if not encattr.encrypted_key:
_decr = self.sec.decrypt_keys(encattr.encrypted_data, keys=keys)
_attr = attribute_from_string(_decr)
attribute_statement.attribute.append(_attr)
else:
_decr = self.sec.decrypt_keys(encattr, keys=keys)
enc_attr = encrypted_attribute_from_string(_decr)
attrlist = enc_attr.extensions_as_elements("Attribute", saml)
attribute_statement.attribute.extend(attrlist)
def read_attribute_statement(self, attr_statem):
logger.debug("Attribute Statement: %s", attr_statem)
# for aconv in self.attribute_converters:
# logger.debug("Converts name format: %s", aconv.name_format)
self.decrypt_attributes(attr_statem)
return to_local(self.attribute_converters, attr_statem, self.allow_unknown_attributes)
def get_identity(self):
"""The assertion can contain zero or more attributeStatements"""
ava = {}
for _assertion in self.assertions:
if _assertion.advice:
if _assertion.advice.assertion:
for tmp_assertion in _assertion.advice.assertion:
if tmp_assertion.attribute_statement:
n_attr_statements = len(tmp_assertion.attribute_statement)
if n_attr_statements != 1:
msg = "Invalid number of AuthnStatement found in Response: {n}".format(
n=n_attr_statements
)
raise ValueError(msg)
ava.update(self.read_attribute_statement(tmp_assertion.attribute_statement[0]))
if _assertion.attribute_statement:
logger.debug("Assertion contains %s attribute statement(s)", (len(self.assertion.attribute_statement)))
for _attr_statem in _assertion.attribute_statement:
logger.debug(f"Attribute Statement: {_attr_statem}")
ava.update(self.read_attribute_statement(_attr_statem))
if not ava:
logger.debug("Assertion contains no attribute statements")
return ava
def _bearer_confirmed(self, data):
if not data:
return False
if data.address:
if not valid_address(data.address):
return False
# verify that I got it from the correct sender
# These two will raise exception if untrue
validate_on_or_after(data.not_on_or_after, self.timeslack)
validate_before(data.not_before, self.timeslack)
# not_before must be < not_on_or_after
if not later_than(data.not_on_or_after, data.not_before):
return False
if self.asynchop and self.came_from is None:
if data.in_response_to:
if data.in_response_to in self.outstanding_queries:
self.came_from = self.outstanding_queries[data.in_response_to]
# del self.outstanding_queries[data.in_response_to]
elif self.allow_unsolicited:
pass
else:
# This is where I don't allow unsolicited reponses
# Either in_response_to == None or has a value I don't
# recognize
logger.debug("in response to: '%s'", data.in_response_to)
logger.info("outstanding queries: %s", self.outstanding_queries.keys())
raise Exception("Combination of session id and requestURI I don't " "recall")
return True
def _holder_of_key_confirmed(self, data):
if not data or not data.extension_elements:
return False
has_keyinfo = False
for element in extension_elements_to_elements(data.extension_elements, [samlp, saml, xenc, ds]):
if isinstance(element, ds.KeyInfo):
has_keyinfo = True
return has_keyinfo
def get_subject(self, keys=None):
"""The assertion must contain a Subject"""
if not self.assertion:
raise ValueError("Missing assertion")
if not self.assertion.subject:
raise ValueError(f"Invalid assertion subject: {self.assertion.subject}")
subject = self.assertion.subject
subjconf = []
if not self.verify_attesting_entity(subject.subject_confirmation):
raise VerificationError("No valid attesting address")
for subject_confirmation in subject.subject_confirmation:
_data = subject_confirmation.subject_confirmation_data
if subject_confirmation.method == SCM_BEARER:
if not self._bearer_confirmed(_data):
continue
elif subject_confirmation.method == SCM_HOLDER_OF_KEY:
if not self._holder_of_key_confirmed(_data):
continue
elif subject_confirmation.method == SCM_SENDER_VOUCHES:
pass
else:
raise ValueError(f"Unknown subject confirmation method: {subject_confirmation.method}")
_recip = _data.recipient
if not _recip or not self.verify_recipient(_recip):
raise VerificationError("No valid recipient")
subjconf.append(subject_confirmation)
if not subjconf:
raise VerificationError("No valid subject confirmation")
subject.subject_confirmation = subjconf
# The subject may contain a name_id
if subject.name_id:
self.name_id = subject.name_id
elif subject.encrypted_id:
# decrypt encrypted ID
_name_id_str = self.sec.decrypt_keys(subject.encrypted_id.encrypted_data.to_string(), keys=keys)
_name_id = saml.name_id_from_string(_name_id_str)
self.name_id = _name_id
logger.info("Subject NameID: %s", self.name_id)
return self.name_id
def _assertion(self, assertion, verified=False):
"""
Check the assertion
:param assertion:
:return: True/False depending on if the assertion is sane or not
"""
if not hasattr(assertion, "signature") or not assertion.signature:
logger.debug("unsigned")
if self.require_signature:
raise SignatureError("Signature missing for assertion")
else:
logger.debug("signed")
if not verified and self.do_not_verify is False:
try:
self.sec.check_signature(assertion, class_name(assertion), self.xmlstr)
except Exception as exc:
logger.error("correctly_signed_response: %s", exc)
raise
self.assertion = assertion
logger.debug("assertion context: %s", self.context)
logger.debug("assertion keys: %s", assertion.keyswv())
logger.debug("outstanding_queries: %s", self.outstanding_queries)
# if self.context == "AuthnReq" or self.context == "AttrQuery":
if self.context == "AuthnReq":
self.authn_statement_ok()
# elif self.context == "AttrQuery":
# self.authn_statement_ok(True)
if not self.condition_ok():
raise VerificationError("Condition not OK")
logger.debug("--- Getting Identity ---")
# if self.context == "AuthnReq" or self.context == "AttrQuery":
# self.ava = self.get_identity()
# logger.debug("--- AVA: {0}".format(self.ava))
try:
self.get_subject()
if self.asynchop:
if self.allow_unsolicited:
pass
elif self.came_from is None:
raise VerificationError("Came from")
return True
except Exception:
logger.exception("get subject")
raise
def decrypt_assertions(self, encrypted_assertions, decr_txt, issuer=None, verified=False):
"""Moves the decrypted assertion from the encrypted assertion to a
list.
:param encrypted_assertions: A list of encrypted assertions.
:param decr_txt: The string representation containing the decrypted
data. Used when verifying signatures.
:param issuer: The issuer of the response.
:param verified: If True do not verify signatures, otherwise verify
the signature if it exists.
:return: A list of decrypted assertions.
"""
res = []
for encrypted_assertion in encrypted_assertions:
if encrypted_assertion.extension_elements:
assertions = extension_elements_to_elements(encrypted_assertion.extension_elements, [saml, samlp])
for assertion in assertions:
if assertion.signature and not verified:
if not self.sec.check_signature(
assertion, origdoc=decr_txt, node_name=class_name(assertion), issuer=issuer
):
logger.error("Failed to verify signature on '%s'", assertion)
raise SignatureError()
res.append(assertion)
return res
def find_encrypt_data_assertion(self, enc_assertions):
"""Verifies if a list of encrypted assertions contains encrypted data.
:param enc_assertions: A list of encrypted assertions.
:return: True encrypted data exists otherwise false.
"""
for _assertion in enc_assertions:
if _assertion.encrypted_data is not None:
return True
def find_encrypt_data_assertion_list(self, _assertions):
"""Verifies if a list of assertions contains encrypted data in the
advice element.
:param _assertions: A list of assertions.
:return: True encrypted data exists otherwise false.
"""
for _assertion in _assertions:
if _assertion.advice:
if _assertion.advice.encrypted_assertion:
res = self.find_encrypt_data_assertion(_assertion.advice.encrypted_assertion)
if res:
return True
def find_encrypt_data(self, resp):
"""Verifies if a saml response contains encrypted assertions with
encrypted data.
:param resp: A saml response.
:return: True encrypted data exists otherwise false.
"""
if resp.encrypted_assertion:
res = self.find_encrypt_data_assertion(resp.encrypted_assertion)
if res:
return True
if resp.assertion:
for tmp_assertion in resp.assertion:
if tmp_assertion.advice:
if tmp_assertion.advice.encrypted_assertion:
res = self.find_encrypt_data_assertion(tmp_assertion.advice.encrypted_assertion)
if res:
return True
return False
def parse_assertion(self, keys=None):
"""Parse the assertions for a saml response.
:param keys: A string representing a RSA key or a list of strings
containing RSA keys.
:return: True if the assertions are parsed otherwise False.
"""
if self.context == "AuthnQuery":
# can contain one or more assertions
pass
else:
# This is a saml2int limitation
n_assertions = len(self.response.assertion)
n_assertions_enc = len(self.response.encrypted_assertion)
if n_assertions != 1 and n_assertions_enc != 1 and self.assertion is None:
raise InvalidAssertion(f"Invalid number of assertions in Response: {n_assertions + n_assertions_enc}")
if self.response.assertion:
logger.debug("***Unencrypted assertion***")
for assertion in self.response.assertion:
if not self._assertion(assertion, False):
return False
if self.find_encrypt_data(self.response):
logger.debug("***Encrypted assertion/-s***")
_enc_assertions = []
resp = self.response
decr_text = str(self.response)
decr_text_old = None
while self.find_encrypt_data(resp) and decr_text_old != decr_text:
decr_text_old = decr_text
try:
decr_text = self.sec.decrypt_keys(decr_text, keys=keys)
except DecryptError:
continue
else:
resp = samlp.response_from_string(decr_text)
# check and prepare for comparison between str and unicode
if type(decr_text_old) != type(decr_text):
if isinstance(decr_text_old, bytes):
decr_text_old = decr_text_old.decode("utf-8")
else:
decr_text_old = decr_text_old.encode("utf-8")
_enc_assertions = self.decrypt_assertions(resp.encrypted_assertion, decr_text)
decr_text_old = None
while (
self.find_encrypt_data(resp) or self.find_encrypt_data_assertion_list(_enc_assertions)
) and decr_text_old != decr_text:
decr_text_old = decr_text
try:
decr_text = self.sec.decrypt_keys(decr_text, keys=keys)
except DecryptError:
continue
else:
resp = samlp.response_from_string(decr_text)
_enc_assertions = self.decrypt_assertions(resp.encrypted_assertion, decr_text, verified=True)
# check and prepare for comparison between str and unicode
if type(decr_text_old) != type(decr_text):
if isinstance(decr_text_old, bytes):
decr_text_old = decr_text_old.decode("utf-8")
else:
decr_text_old = decr_text_old.encode("utf-8")
all_assertions = _enc_assertions
if resp.assertion:
all_assertions = all_assertions + resp.assertion
if len(all_assertions) > 0:
for tmp_ass in all_assertions:
if tmp_ass.advice and tmp_ass.advice.encrypted_assertion:
advice_res = self.decrypt_assertions(
tmp_ass.advice.encrypted_assertion, decr_text, tmp_ass.issuer
)
if tmp_ass.advice.assertion:
tmp_ass.advice.assertion.extend(advice_res)
else:
tmp_ass.advice.assertion = advice_res
if len(advice_res) > 0:
tmp_ass.advice.encrypted_assertion = []
self.response.assertion = resp.assertion
for assertion in _enc_assertions:
if not self._assertion(assertion, True):
return False
else:
self.assertions.append(assertion)
self.xmlstr = decr_text
if len(_enc_assertions) > 0:
self.response.encrypted_assertion = []
if self.response.assertion:
for assertion in self.response.assertion:
self.assertions.append(assertion)
if self.assertions and len(self.assertions) > 0:
self.assertion = self.assertions[0]
if self.context == "AuthnReq" or self.context == "AttrQuery":
self.ava = self.get_identity()
logger.debug(f"--- AVA: {self.ava}")
return True
def verify(self, keys=None):
"""Verify that the assertion is syntactically correct and the
signature is correct if present.
:param keys: If not the default key file should be used then use one
of these.
"""
try:
res = self._verify()
except AssertionError as err:
logger.error("Verification error on the response: %s", str(err))
raise
else:
if not res:
return None
if not isinstance(self.response, samlp.Response):
return self
if self.parse_assertion(keys):
return self
else:
logger.error("Could not parse the assertion")
return None
def session_id(self):
"""Returns the SessionID of the response"""
return self.response.in_response_to
def id(self):
"""Return the ID of the response"""
return self.response.id
def authn_info(self):
res = []
for statement in getattr(self.assertion, "authn_statement", []):
authn_instant = getattr(statement, "authn_instant", "")
context = statement.authn_context
if not context:
continue
authn_class = (
getattr(context.authn_context_class_ref, "text", None)
or getattr(context.authn_context_decl_ref, "text", None)
or ""
)
authenticating_authorities = getattr(context, "authenticating_authority", [])
authn_auth = [authority.text for authority in authenticating_authorities]
res.append((authn_class, authn_auth, authn_instant))
return res
def authz_decision_info(self):
res = {"permit": [], "deny": [], "indeterminate": []}
for adstat in self.assertion.authz_decision_statement:
# one of 'Permit', 'Deny', 'Indeterminate'
res[adstat.decision.text.lower()] = adstat
return res
def session_info(self):
"""Returns a predefined set of information gleened from the
response.
:returns: Dictionary with information
"""
if self.session_not_on_or_after > 0:
nooa = self.session_not_on_or_after
else:
nooa = self.not_on_or_after
if self.context == "AuthzQuery":
return {
"name_id": self.name_id,
"came_from": self.came_from,
"issuer": self.issuer(),
"not_on_or_after": nooa,
"authz_decision_info": self.authz_decision_info(),
}
elif getattr(self.assertion, "authn_statement", None):
authn_statement = self.assertion.authn_statement[0]
return {
"ava": self.ava,
"name_id": self.name_id,
"came_from": self.came_from,
"issuer": self.issuer(),
"not_on_or_after": nooa,
"authn_info": self.authn_info(),
"session_index": authn_statement.session_index,
}
else:
raise StatusInvalidAuthnResponseStatement("The Authn Response Statement is not valid")
def __str__(self):
return self.xmlstr
def verify_recipient(self, recipient):
"""
Verify that I'm the recipient of the assertion
:param recipient: A URI specifying the entity or location to which an
attesting entity can present the assertion.
:return: True/False
"""
if not self.conv_info:
return True
_info = self.conv_info
try:
if recipient == _info["entity_id"]:
return True
except KeyError:
pass
try:
if recipient in self.return_addrs:
return True
except KeyError:
pass
return False
def verify_attesting_entity(self, subject_confirmation):
"""
At least one address specification has to be correct.
:param subject_confirmation: A SubbjectConfirmation instance
:return: True/False
"""
try:
address = self.conv_info["remote_addr"]
except KeyError:
address = "0.0.0.0"
correct = 0
for subject_conf in subject_confirmation:
if subject_conf.subject_confirmation_data is None:
correct += 1 # In reality undefined
elif subject_conf.subject_confirmation_data.address:
if address == "0.0.0.0": # accept anything
correct += 1
elif subject_conf.subject_confirmation_data.address == address:
correct += 1
else:
correct += 1
if correct:
return True
else:
return False
class AuthnQueryResponse(AuthnResponse):
msgtype = "authn_query_response"
def __init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs=None,
timeslack=0,
asynchop=False,
test=False,
conv_info=None,
):
AuthnResponse.__init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs,
timeslack=timeslack,
asynchop=asynchop,
test=test,
conv_info=conv_info,
)
self.entity_id = entity_id
self.attribute_converters = attribute_converters
self.assertion = None
self.context = "AuthnQuery"
def condition_ok(self, lax=False): # Should I care about conditions ?
return True
class AttributeResponse(AuthnResponse):
msgtype = "attribute_response"
def __init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs=None,
timeslack=0,
asynchop=False,
test=False,
conv_info=None,
):
AuthnResponse.__init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs,
timeslack=timeslack,
asynchop=asynchop,
test=test,
conv_info=conv_info,
)
self.entity_id = entity_id
self.attribute_converters = attribute_converters
self.assertion = None
self.context = "AttrQuery"
class AuthzResponse(AuthnResponse):
"""A successful response will be in the form of assertions containing
authorization decision statements."""
msgtype = "authz_decision_response"
def __init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs=None,
timeslack=0,
asynchop=False,
conv_info=None,
):
AuthnResponse.__init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs,
timeslack=timeslack,
asynchop=asynchop,
conv_info=conv_info,
)
self.entity_id = entity_id
self.attribute_converters = attribute_converters
self.assertion = None
self.context = "AuthzQuery"
class ArtifactResponse(AuthnResponse):
msgtype = "artifact_response"
def __init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs=None,
timeslack=0,
asynchop=False,
test=False,
conv_info=None,
):
AuthnResponse.__init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs,
timeslack=timeslack,
asynchop=asynchop,
test=test,
conv_info=conv_info,
)
self.entity_id = entity_id
self.attribute_converters = attribute_converters
self.assertion = None
self.context = "ArtifactResolve"
def response_factory(
xmlstr,
conf,
return_addrs=None,
outstanding_queries=None,
timeslack=0,
decode=True,
request_id=0,
origxml=None,
asynchop=True,
allow_unsolicited=False,
want_assertions_signed=False,
conv_info=None,
):
sec_context = security_context(conf)
if not timeslack:
try:
timeslack = int(conf.accepted_time_diff)
except TypeError:
timeslack = 0
attribute_converters = conf.attribute_converters
entity_id = conf.entityid
extension_schema = conf.extension_schema
response = StatusResponse(sec_context, return_addrs, timeslack, request_id, asynchop, conv_info=conv_info)
try:
response.loads(xmlstr, decode, origxml)
if response.response.assertion or response.response.encrypted_assertion:
authnresp = AuthnResponse(
sec_context,
attribute_converters,
entity_id,
return_addrs,
outstanding_queries,
timeslack,
asynchop,
allow_unsolicited,
extension_schema=extension_schema,
want_assertions_signed=want_assertions_signed,
conv_info=conv_info,
)
authnresp.update(response)
return authnresp
except TypeError:
response.signature_check = sec_context.correctly_signed_logout_response
response.loads(xmlstr, decode, origxml)
logoutresp = LogoutResponse(sec_context, return_addrs, timeslack, asynchop=asynchop, conv_info=conv_info)
logoutresp.update(response)
return logoutresp
return response
# ===========================================================================
# A class of it's own
class AssertionIDResponse:
msgtype = "assertion_id_response"
def __init__(self, sec_context, attribute_converters, timeslack=0, **kwargs):
self.sec = sec_context
self.timeslack = timeslack
self.xmlstr = ""
self.origxml = ""
self.name_id = ""
self.response = None
self.not_signed = False
self.attribute_converters = attribute_converters
self.assertion = None
self.context = "AssertionIdResponse"
self.signature_check = self.sec.correctly_signed_assertion_id_response
# Because this class is not a subclass of StatusResponse we need
# to add these attributes directly so that the _parse_response()
# method of the Entity class can treat instances of this class
# like all other responses.
self.require_signature = False
self.require_response_signature = False
self.require_signature_or_response_signature = False
def loads(self, xmldata, decode=True, origxml=None):
# own copy
self.xmlstr = xmldata[:]
logger.debug("xmlstr: %s", self.xmlstr)
self.origxml = origxml
try:
self.response = self.signature_check(xmldata, origdoc=origxml)
self.assertion = self.response
except TypeError:
raise
except SignatureError:
raise
except Exception as excp:
logger.exception("EXCEPTION: %s", str(excp))
raise
# print("<", self.response)
return self._postamble()
def verify(self, keys=None):
try:
valid_instance(self.response)
except NotValid as exc:
logger.error("Not valid response: %s", exc.args[0])
raise
return self
def _postamble(self):
if not self.response:
logger.warning("Response was not correctly signed")
if self.xmlstr:
logger.debug("Response: %s", self.xmlstr)
raise IncorrectlySigned()
logger.debug("response: %s", self.response)
return self
|
955d7579fb96e237df5f24275d521265d79640f3
|
84724b34b3f1e84dc53cbca5f3660590dbc34a9f
|
/nova/notifications/objects/image.py
|
01c86d1cb04e2ea7e4424457549d351393975df2
|
[
"Apache-2.0"
] |
permissive
|
openstack/nova
|
2c24b64e3677595611715bae6dda14edd3f90a24
|
065c5906d2da3e2bb6eeb3a7a15d4cd8d98b35e9
|
refs/heads/master
| 2023-08-28T15:10:05.126314
| 2023-08-25T20:31:27
| 2023-08-25T20:31:27
| 790,031
| 2,287
| 2,320
|
Apache-2.0
| 2023-07-08T02:10:29
| 2010-07-22T02:04:27
|
Python
|
UTF-8
|
Python
| false
| false
| 6,465
|
py
|
image.py
|
# Copyright 2018 NTT Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.notifications.objects import base
from nova.objects import base as nova_base
from nova.objects import fields
from nova.objects import image_meta
@nova_base.NovaObjectRegistry.register_notification
class ImageMetaPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
SCHEMA = {
'id': ('image_meta', 'id'),
'name': ('image_meta', 'name'),
'status': ('image_meta', 'status'),
'visibility': ('image_meta', 'visibility'),
'protected': ('image_meta', 'protected'),
'checksum': ('image_meta', 'checksum'),
'owner': ('image_meta', 'owner'),
'size': ('image_meta', 'size'),
'virtual_size': ('image_meta', 'virtual_size'),
'container_format': ('image_meta', 'container_format'),
'disk_format': ('image_meta', 'disk_format'),
'created_at': ('image_meta', 'created_at'),
'updated_at': ('image_meta', 'updated_at'),
'tags': ('image_meta', 'tags'),
'direct_url': ('image_meta', 'direct_url'),
'min_ram': ('image_meta', 'min_ram'),
'min_disk': ('image_meta', 'min_disk')
}
# NOTE(takashin): The reason that each field is nullable is as follows.
#
# a. It is defined as "The value might be null (JSON null data type)."
# in the "Show image" API (GET /v2/images/{image_id})
# in the glance API v2 Reference.
# (https://docs.openstack.org/api-ref/image/v2/index.html)
#
# * checksum
# * container_format
# * disk_format
# * min_disk
# * min_ram
# * name
# * owner
# * size
# * updated_at
# * virtual_size
#
# b. It is optional in the response from glance.
# * direct_url
#
# a. It is defined as nullable in the ImageMeta object.
# * created_at
#
# c. It cannot be got in the boot from volume case.
# See VIM_IMAGE_ATTRIBUTES in nova/block_device.py.
#
# * id (not 'image_id')
# * visibility
# * protected
# * status
# * tags
fields = {
'id': fields.UUIDField(nullable=True),
'name': fields.StringField(nullable=True),
'status': fields.StringField(nullable=True),
'visibility': fields.StringField(nullable=True),
'protected': fields.FlexibleBooleanField(nullable=True),
'checksum': fields.StringField(nullable=True),
'owner': fields.StringField(nullable=True),
'size': fields.IntegerField(nullable=True),
'virtual_size': fields.IntegerField(nullable=True),
'container_format': fields.StringField(nullable=True),
'disk_format': fields.StringField(nullable=True),
'created_at': fields.DateTimeField(nullable=True),
'updated_at': fields.DateTimeField(nullable=True),
'tags': fields.ListOfStringsField(nullable=True),
'direct_url': fields.StringField(nullable=True),
'min_ram': fields.IntegerField(nullable=True),
'min_disk': fields.IntegerField(nullable=True),
'properties': fields.ObjectField('ImageMetaPropsPayload')
}
def __init__(self, image_meta):
super(ImageMetaPayload, self).__init__()
self.properties = ImageMetaPropsPayload(
image_meta_props=image_meta.properties)
self.populate_schema(image_meta=image_meta)
@nova_base.NovaObjectRegistry.register_notification
class ImageMetaPropsPayload(base.NotificationPayloadBase):
"""Built dynamically from ImageMetaProps.
This has the following implications:
* When you make a versioned update to ImageMetaProps, you must *also* bump
the version of this object, even though you didn't make any explicit
changes here. There's an object hash test that should catch this for you.
* As currently written, this relies on all of the fields of ImageMetaProps
being initialized with no arguments. If you add one with arguments (e.g.
``nullable=True`` or with a ``default``), something needs to change here.
"""
# Version 1.0: Initial version
# Version 1.1: Added 'gop', 'virtio' and 'none' to hw_video_model field
# Version 1.2: Added hw_pci_numa_affinity_policy field
# Version 1.3: Added hw_mem_encryption, hw_pmu and hw_time_hpet fields
# Version 1.4: Added 'mixed' to hw_cpu_policy field
# Version 1.5: Added 'hw_tpm_model' and 'hw_tpm_version' fields
# Version 1.6: Added 'socket' to hw_pci_numa_affinity_policy
# Version 1.7: Added 'hw_input_bus' field
# Version 1.8: Added 'bochs' as an option to 'hw_video_model'
# Version 1.9: Added 'hw_emulation_architecture' field
# Version 1.10: Added 'hw_ephemeral_encryption' and
# 'hw_ephemeral_encryption_format' fields
# Version 1.11: Added 'hw_locked_memory' field
# Version 1.12: Added 'hw_viommu_model' field
VERSION = '1.12'
SCHEMA = {
k: ('image_meta_props', k) for k in image_meta.ImageMetaProps.fields}
# NOTE(efried): This logic currently relies on all of the fields of
# ImageMetaProps being initialized with no arguments. See the docstring.
# NOTE(efried): It's possible this could just be:
# fields = image_meta.ImageMetaProps.fields
# But it is not clear that OVO can tolerate the same *instance* of a type
# class being used in more than one place.
fields = {
k: v.__class__() for k, v in image_meta.ImageMetaProps.fields.items()}
def __init__(self, image_meta_props):
super(ImageMetaPropsPayload, self).__init__()
# NOTE(takashin): If fields are not set in the ImageMetaProps object,
# it will not set the fields in the ImageMetaPropsPayload
# in order to avoid too many fields whose values are None.
self.populate_schema(set_none=False, image_meta_props=image_meta_props)
|
27ad9fc1419ef42e51fc75b3093e628a89f56489
|
943fa4169d201d054f3d035007181bbb78429752
|
/tinydb/storages.py
|
d5a2db7804444a430534219838e55fcfeee7d305
|
[
"MIT"
] |
permissive
|
msiemens/tinydb
|
9b10563b0dc925470313f5c4d0713328b0e1b365
|
3dc6a952ef8700706909bf60a1b15cf21af47608
|
refs/heads/master
| 2023-08-24T09:39:56.747943
| 2023-07-24T18:53:19
| 2023-07-24T18:53:19
| 11,380,094
| 6,284
| 675
|
MIT
| 2023-09-03T18:56:40
| 2013-07-12T23:31:13
|
Python
|
UTF-8
|
Python
| false
| false
| 5,084
|
py
|
storages.py
|
"""
Contains the :class:`base class <tinydb.storages.Storage>` for storages and
implementations.
"""
import io
import json
import os
import warnings
from abc import ABC, abstractmethod
from typing import Dict, Any, Optional
__all__ = ('Storage', 'JSONStorage', 'MemoryStorage')
def touch(path: str, create_dirs: bool):
"""
Create a file if it doesn't exist yet.
:param path: The file to create.
:param create_dirs: Whether to create all missing parent directories.
"""
if create_dirs:
base_dir = os.path.dirname(path)
# Check if we need to create missing parent directories
if not os.path.exists(base_dir):
os.makedirs(base_dir)
# Create the file by opening it in 'a' mode which creates the file if it
# does not exist yet but does not modify its contents
with open(path, 'a'):
pass
class Storage(ABC):
"""
The abstract base class for all Storages.
A Storage (de)serializes the current state of the database and stores it in
some place (memory, file on disk, ...).
"""
# Using ABCMeta as metaclass allows instantiating only storages that have
# implemented read and write
@abstractmethod
def read(self) -> Optional[Dict[str, Dict[str, Any]]]:
"""
Read the current state.
Any kind of deserialization should go here.
Return ``None`` here to indicate that the storage is empty.
"""
raise NotImplementedError('To be overridden!')
@abstractmethod
def write(self, data: Dict[str, Dict[str, Any]]) -> None:
"""
Write the current state of the database to the storage.
Any kind of serialization should go here.
:param data: The current state of the database.
"""
raise NotImplementedError('To be overridden!')
def close(self) -> None:
"""
Optional: Close open file handles, etc.
"""
pass
class JSONStorage(Storage):
"""
Store the data in a JSON file.
"""
def __init__(self, path: str, create_dirs=False, encoding=None, access_mode='r+', **kwargs):
"""
Create a new instance.
Also creates the storage file, if it doesn't exist and the access mode
is appropriate for writing.
Note: Using an access mode other than `r` or `r+` will probably lead to
data loss or data corruption!
:param path: Where to store the JSON data.
:param access_mode: mode in which the file is opened (r, r+)
:type access_mode: str
"""
super().__init__()
self._mode = access_mode
self.kwargs = kwargs
if access_mode not in ('r', 'rb', 'r+', 'rb+'):
warnings.warn(
'Using an `access_mode` other than \'r\', \'rb\', \'r+\' '
'or \'rb+\' can cause data loss or corruption'
)
# Create the file if it doesn't exist and creating is allowed by the
# access mode
if any([character in self._mode for character in ('+', 'w', 'a')]): # any of the writing modes
touch(path, create_dirs=create_dirs)
# Open the file for reading/writing
self._handle = open(path, mode=self._mode, encoding=encoding)
def close(self) -> None:
self._handle.close()
def read(self) -> Optional[Dict[str, Dict[str, Any]]]:
# Get the file size by moving the cursor to the file end and reading
# its location
self._handle.seek(0, os.SEEK_END)
size = self._handle.tell()
if not size:
# File is empty, so we return ``None`` so TinyDB can properly
# initialize the database
return None
else:
# Return the cursor to the beginning of the file
self._handle.seek(0)
# Load the JSON contents of the file
return json.load(self._handle)
def write(self, data: Dict[str, Dict[str, Any]]):
# Move the cursor to the beginning of the file just in case
self._handle.seek(0)
# Serialize the database state using the user-provided arguments
serialized = json.dumps(data, **self.kwargs)
# Write the serialized data to the file
try:
self._handle.write(serialized)
except io.UnsupportedOperation:
raise IOError('Cannot write to the database. Access mode is "{0}"'.format(self._mode))
# Ensure the file has been written
self._handle.flush()
os.fsync(self._handle.fileno())
# Remove data that is behind the new cursor in case the file has
# gotten shorter
self._handle.truncate()
class MemoryStorage(Storage):
"""
Store the data as JSON in memory.
"""
def __init__(self):
"""
Create a new instance.
"""
super().__init__()
self.memory = None
def read(self) -> Optional[Dict[str, Dict[str, Any]]]:
return self.memory
def write(self, data: Dict[str, Dict[str, Any]]):
self.memory = data
|
8a933587db5b47b66cf2b289bfecc66f5e26c024
|
0f85c7bfd4f29bcd856adc316cecc097fda744dc
|
/tests/test_loaders.py
|
f4122d2f721c44629e8484ea38b1870affe8bcba
|
[
"MIT"
] |
permissive
|
yandex/yandex-taxi-testsuite
|
260f46731c9888a9efcc3372c3d92329f2fb4d56
|
8befda8c13ef58d83b2ea7d0444e34de0f67ac7f
|
refs/heads/develop
| 2023-08-31T23:28:31.874786
| 2023-08-14T16:00:53
| 2023-08-14T16:00:53
| 244,937,107
| 150
| 41
|
MIT
| 2023-09-13T16:34:07
| 2020-03-04T15:35:09
|
Python
|
UTF-8
|
Python
| false
| false
| 858
|
py
|
test_loaders.py
|
import pytest
from testsuite.plugins import common
def test_when_loading_invalid_yaml_then_error_specifies_file(load_yaml):
with pytest.raises(common.LoadYamlError) as err:
load_yaml('invalid.yaml')
assert 'invalid.yaml' in str(err)
def test_when_yaml_file_not_found_then_error_is_specific(load_yaml):
with pytest.raises(FileNotFoundError) as err:
load_yaml('non_existing.yaml')
assert 'non_existing.yaml' in str(err)
def test_when_loading_invalid_json_then_error_specifies_file(load_json):
with pytest.raises(common.LoadJsonError) as err:
load_json('invalid.json')
assert 'invalid.json' in str(err)
def test_when_json_file_not_found_then_error_is_specific(load_json):
with pytest.raises(FileNotFoundError) as err:
load_json('non_existing.json')
assert 'non_existing.json' in str(err)
|
8d3d28e50fea7030c82d5ffc5afc8c4cd3bd6cf3
|
f5de163623c0e1f2207104a69fea499b2d4b3948
|
/nikola/data/themes/base/messages/messages_da.py
|
b070db5680c966e6307b4f213a96f4b0b028f37c
|
[
"MIT"
] |
permissive
|
getnikola/nikola
|
cbc233706ebd52fe76bc14b2ff3c5a7c27678275
|
2b10e9952bac5a1119e6845c7a2c28273aca9775
|
refs/heads/master
| 2023-09-03T12:52:22.617757
| 2023-08-05T19:24:18
| 2023-08-05T19:24:18
| 4,025,121
| 2,142
| 467
|
MIT
| 2023-09-13T12:38:11
| 2012-04-14T13:53:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
messages_da.py
|
# -*- encoding:utf-8 -*-
"""Autogenerated file, do not edit. Submit translations on Transifex."""
MESSAGES = {
"%d min remaining to read": "%d min. tilbage at læse",
"(active)": "",
"Also available in:": "Fås også i:",
"Archive": "Arkiv",
"Atom feed": "",
"Authors": "",
"Categories": "Kategorier",
"Comments": "Kommentarer",
"LANGUAGE": "Dansk",
"Languages:": "Sprog:",
"More posts about %s": "Yderligere indlæg om %s",
"Newer posts": "Nyere indlæg",
"Next post": "Næste indlæg",
"Next": "",
"No posts found.": "Søgningen gav ingen resultater.",
"Nothing found.": "Søgningen gav ingen resultater.",
"Older posts": "Ældre indlæg",
"Original site": "Oprindeligt hjemmeside",
"Posted:": "Opslået:",
"Posts about %s": "Indlæg om %s",
"Posts by %s": "",
"Posts for year %s": "Indlæg for %s",
"Posts for {month_day_year}": "Indlæs for {month_day_year}",
"Posts for {month_year}": "Indlæg for {month_year}",
"Previous post": "Tidligere indlæg",
"Previous": "",
"Publication date": "Udgivelsesdato",
"RSS feed": "RSS-nyhedskilde",
"Read in English": "Læs på dansk",
"Read more": "Læs mere",
"Skip to main content": "Hop direkte til hovedindhold",
"Source": "Kilde",
"Subcategories:": "",
"Tags and Categories": "Nøgleord og kategorier",
"Tags": "Nøgleord",
"Toggle navigation": "",
"Uncategorized": "",
"Up": "",
"Updates": "",
"Write your page here.": "",
"Write your post here.": "",
"old posts, page %d": "gamle indlæg, side %d",
"page %d": "side %d",
"updated": "",
}
|
b86c3a2efd19904e9f8b0400df28f8d35226e6b4
|
e72db16e9747386afd01edd1dd36d9589a3f71ef
|
/src/test/ex_libpmem2/TESTS.py
|
7b4bbd50baa824933f91d959bf8107c4104e60f1
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
pmem/pmdk
|
5e49feee8017574340db2a0f9b4e2eab99f8581c
|
ccadb16227bddea1bdcc518bd8f298d551b2baad
|
refs/heads/master
| 2023-08-31T02:28:06.884934
| 2023-08-30T14:52:25
| 2023-08-30T14:52:25
| 23,637,153
| 1,017
| 402
|
NOASSERTION
| 2023-09-07T11:52:42
| 2014-09-03T20:55:23
|
C
|
UTF-8
|
Python
| false
| false
| 4,519
|
py
|
TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2023, Intel Corporation
#
import futils
import testframework as t
from testframework import granularity as g
@t.require_build(['debug', 'nondebug'])
class EX_LIBPMEM2(t.Test):
test_type = t.Medium
file_size = 1 * t.MiB
offset = str(97 * t.KiB)
length = str(65 * t.KiB)
class TEST0(EX_LIBPMEM2):
def run(self, ctx):
example_path = futils.get_example_path(ctx, 'pmem2', 'basic')
file_path = ctx.create_non_zero_file(self.file_size, 'testfile0')
ctx.exec(example_path, file_path)
class TEST1(EX_LIBPMEM2):
def run(self, ctx):
example_path = futils.get_example_path(ctx, 'pmem2', 'advanced')
file_path = ctx.create_non_zero_file(self.file_size, 'testfile0')
ctx.exec(example_path, file_path, self.offset, self.length)
class TEST2(EX_LIBPMEM2):
file_size = 16 * t.MiB
def run(self, ctx):
example_path = futils.get_example_path(ctx, 'pmem2', 'log')
file_path = ctx.create_holey_file(self.file_size, 'testfile0')
args = ['appendv', '4', 'PMDK ', 'is ', 'the best ', 'open source ',
'append', 'project in the world.', 'dump', 'rewind', 'dump',
'appendv', '2', 'End of ', 'file.', 'dump']
ctx.exec(example_path, file_path, *args, stdout_file='out2.log')
class TEST3(EX_LIBPMEM2):
def run(self, ctx):
example_path = futils.get_example_path(ctx, 'pmem2', 'redo')
file_path = ctx.create_holey_file(self.file_size, 'testfile0')
for x in range(1, 100):
ctx.exec(example_path, "add", file_path, x, x)
ctx.exec(example_path, "check", file_path)
ctx.exec(example_path, "print", file_path, stdout_file='out3.log')
class TEST4(EX_LIBPMEM2):
def run(self, ctx):
example_path = futils.get_example_path(ctx, 'pmem2',
'map_multiple_files')
args = []
for x in range(1, 10):
file_path = ctx.create_holey_file(self.file_size,
'testfile{}'.format(x))
args.append(file_path)
ctx.exec(example_path, *args, stdout_file='out4.log')
# XXX Disable the test execution under pmemcheck with g.PAGE until the issue
# https://github.com/pmem/pmdk/issues/5641 is fixed.
# additionall test TEST501 has been added to cover non-pmemcheck configs.
class EX_LIBPMEM2_TEST5(EX_LIBPMEM2):
def run(self, ctx):
example_path = futils.get_example_path(ctx, 'pmem2', 'unsafe_shutdown')
file_path = ctx.create_holey_file(self.file_size, 'testfile0')
ctx.exec(example_path, "write", file_path, "foobar")
ctx.exec(example_path, "read", file_path, stdout_file='out5.log')
@g.require_granularity(g.CACHELINE, g.BYTE) # to be removed when fixed
@t.require_valgrind_enabled('pmemcheck') # to be removed when fixed
class TEST5(EX_LIBPMEM2_TEST5):
pass
# XXX Disable the test execution with 'memcheck' until the issue:
# https://github.com/pmem/pmdk/issues/5635 is fixed.
# additionall test TEST501 has been added to cover non-pmemcheck configs.
# @t.require_valgrind_disabled('memcheck') # to be removed when fixed
# @t.require_valgrind_disabled('pmemcheck')
@t.require_valgrind_disabled('pmemcheck', 'memcheck')
class TEST501(EX_LIBPMEM2_TEST5): # to be removed when fixed
pass
# XXX disable the test for `memcheck' and 'helgrind'
# until https://github.com/pmem/pmdk/issues/5638 is fixed.
# @t.require_valgrind_disabled('memcheck', 'helgrind')
# XXX disable the test for `drd'
# until https://github.com/pmem/pmdk/issues/5593 is fixed.
# @t.require_valgrind_disabled('drd')
# This test case would require two VALGRIND_SET_CLEAN() calls
# to be added to the "src/examples/libpmem2/ringbuf/ringbuf.c"
# example (see https://github.com/pmem/pmdk/pull/5604)
# in order to pass under pmemcheck, but examples
# do not use valgrind macros on purpose (to avoid unnecessary
# complication), so this test case just should not be run under pmemcheck.
# @t.require_valgrind_disabled('pmemcheck')
# XXX _disabled() can be used only once.
@t.require_valgrind_disabled('memcheck', 'drd', 'pmemcheck', 'helgrind')
class TEST6(EX_LIBPMEM2):
def run(self, ctx):
example_path = futils.get_example_path(ctx, 'pmem2', 'ringbuf')
file_path = ctx.create_holey_file(self.file_size, 'testfile0')
ctx.exec(example_path, file_path, 10000, 4096, stdout_file='out6.log')
|
dff51987130f11100e3dfbb121e3817b7230c026
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/studio/micro-services/SREWorks/saas/aiops/api/aiops-server/models/redis_conn.py
|
8c7f3c17d99d39d1b576f99e44d5c44d93a8ad52
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"EPL-1.0",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"MIT",
"MPL-2.0",
"GPL-2.0-only",
"JSON"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 1,261
|
py
|
redis_conn.py
|
#!/usr/bin/env python
# encoding: utf-8
""" """
__author__ = 'sreworks'
import redis
import threading
from common.config import get_config
# 同步锁
def synchronous_lock(func):
def wrapper(*args, **kwargs):
with threading.Lock():
return func(*args, **kwargs)
return wrapper
class RedisConnFactory(object):
instance = None
@synchronous_lock
def __new__(cls, *args, **kwargs):
if cls.instance is None:
cls.instance = object.__new__(cls)
return cls.instance
def __init__(self):
self._object_map = {}
config = get_config()
self.redis_conn_config = config.get('redis')
def get_tsp_redis_conn(self):
return self._get_redis_conn('tsp')
def _get_redis_conn(self, name):
if name in self._object_map:
return self._object_map[name]
else:
r_config = self.redis_conn_config.get(name)
r_conn = redis.Redis(
host=r_config['host'],
port=r_config['port'],
db=r_config['db'],
password=r_config['password']
)
self._object_map[name] = r_conn
return r_conn
|
1a2939b0e69cc76103378f069cc3021fafcbdf5d
|
bb0e88e72382e27c48222d1e7a308476794740fe
|
/core/tests/base_payload.py
|
044c82d20afef8d8591f9945b30559d61755b80c
|
[
"BSD-3-Clause"
] |
permissive
|
facebookincubator/OnlineSchemaChange
|
b6b0e62618a54bc5a7058a8742a9c5eef395d22a
|
99c27961ec3a87c7d1bb650107e79971ae8c465f
|
refs/heads/main
| 2023-09-01T21:09:11.494282
| 2023-09-01T05:53:07
| 2023-09-01T05:53:07
| 79,285,486
| 998
| 167
|
NOASSERTION
| 2023-01-24T03:10:54
| 2017-01-18T00:06:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,439
|
py
|
base_payload.py
|
#!/usr/bin/env python3
"""
Copyright (c) 2017-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
import unittest
from unittest.mock import Mock
from ..lib.error import OSCError
from ..lib.payload.base import Payload
class BasePayloadTestCase(unittest.TestCase):
def test_failed_to_get_name_lock(self):
payload = Payload()
payload.skip_named_lock = False
with self.assertRaises(OSCError) as err_context:
payload.query = Mock(return_value=None)
payload.get_osc_lock()
self.assertEqual(err_context.exception.err_key, "UNABLE_TO_GET_LOCK")
with self.assertRaises(OSCError) as err_context:
payload.query = Mock(return_value=[{"lockstatus": 0}])
payload.get_osc_lock()
self.assertEqual(err_context.exception.err_key, "UNABLE_TO_GET_LOCK")
def test_successfully_get_name_lock(self):
payload = Payload()
payload.skip_named_lock = False
payload.query = Mock(return_value=[{"lockstatus": 1}])
payload.get_osc_lock()
def test_get_name_lock_ignore_failure(self):
payload = Payload()
payload.skip_named_lock = True
# Nothing should happen if we skip named lock
payload.query = Mock(return_value=None)
self.assertFalse(payload.query.called)
|
59f0d7c0b3e1852207749d81b900c5791853a83c
|
6416b746ee71d897789eab1e450000831674dbd0
|
/src/otx/hpo/utils.py
|
886cfb44853902831f0ebff25f92c02e3cc5d2df
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/training_extensions
|
c921f83ad52311af96ff45ae0b88d0aecddd855b
|
80454808b38727e358e8b880043eeac0f18152fb
|
refs/heads/develop
| 2023-08-31T06:29:07.229339
| 2023-08-31T01:57:26
| 2023-08-31T01:57:26
| 154,843,614
| 397
| 230
|
Apache-2.0
| 2023-09-14T06:17:01
| 2018-10-26T14:02:29
|
Python
|
UTF-8
|
Python
| false
| false
| 3,194
|
py
|
utils.py
|
"""Collections of Utils for HPO."""
# Copyright (C) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
from typing import Literal, Optional
def left_vlaue_is_better(val1, val2, mode: Literal["max", "min"]) -> bool:
"""Check left value is better than right value.
Whether check it's greather or lesser is changed depending on 'model'.
Args:
val1 : value to check that it's bigger than other value.
val2 : value to check that it's bigger than other value.
mode (Literal['max', 'min']): value to decide whether better means greater or lesser.
Returns:
bool: whether val1 is better than val2.
"""
check_mode_input(mode)
if mode == "max":
return val1 > val2
return val1 < val2
def check_positive(value, variable_name: Optional[str] = None, error_message: Optional[str] = None):
"""Validate that value is positivle.
Args:
value (Any): value to validate.
variable_name (Optional[str], optional): name of value. It's used for error message. Defaults to None.
error_message (Optional[str], optional): Error message to use when type is different. Defaults to None.
Raises:
ValueError: If value isn't positive, the error is raised.
"""
if value <= 0:
if error_message is not None:
message = error_message
elif variable_name:
message = f"{variable_name} should be positive.\n" f"your value : {value}"
else:
raise ValueError
raise ValueError(message)
def check_not_negative(value, variable_name: Optional[str] = None, error_message: Optional[str] = None):
"""Validate that value isn't negative.
Args:
value (Any): value to validate.
variable_name (Optional[str], optional): name of value. It's used for error message. Defaults to None.
error_message (Optional[str], optional): Error message to use when type is different. Defaults to None.
Raises:
ValueError: If value is negative, the error is raised.
"""
if value < 0:
if error_message is not None:
message = error_message
elif variable_name:
message = f"{variable_name} should be positive.\n" f"your value : {value}"
else:
raise ValueError
raise ValueError(message)
def check_mode_input(mode: str):
"""Validate that mode is 'max' or 'min'.
Args:
mode (str): string to validate.
Raises:
ValueError: If 'mode' is not both 'max' and 'min', the error is raised.
"""
if mode not in ["max", "min"]:
raise ValueError("mode should be max or min.\n" f"Your value : {mode}")
|
0c56229721e086a94a6a283720f712846f77d71a
|
307d3837d31f9e3728af2b62ca51ebf63fe6ec6b
|
/hall_of_fame/lysuk96/Week12/12_BOJ_전설의JBNU.py
|
fefc81badf9cd6eeb524d0ce2d23038e757f5847
|
[] |
no_license
|
ellynhan/challenge100-codingtest-study
|
905043497d154b8a7333ca536e536d013f6e7454
|
bcdc6d04f13b12ba80b42e066f9d244d7c2cc698
|
refs/heads/master
| 2023-09-01T14:10:13.481013
| 2023-08-27T14:38:52
| 2023-08-27T14:38:52
| 401,561,230
| 162
| 176
| null | 2023-09-09T14:56:25
| 2021-08-31T03:30:36
|
C++
|
UTF-8
|
Python
| false
| false
| 1,310
|
py
|
12_BOJ_전설의JBNU.py
|
from collections import defaultdict
from bisect import insort,bisect_right
# def is_pruning(a, b):
# if abs(b - a) <= K:
# return True
# return False
def find(x):
idx = bisect_right(keys, x)
if idx == 0:
right = keys[idx]
if x - right <= K:
return right
elif idx == len(keys):
left = keys[idx-1]
if x - left <= K:
return left
else:
left = keys[idx-1]
right = keys[idx]
if right -x <= K or x-left <= K:
if right -x == x - left:
return '?'
elif x - left < right -x:
return left
else:
return right
return -1
N, M, K = map(int, input().split(" "))
dic = defaultdict(int)
keys = []
for _ in range(N):
a, b = map(int, input().split(" "))
dic[a] = b
insort(keys, a)
for _ in range(M):
command, *tmp = map(int , input().split(" "))
if command == 1:
dic[tmp[0]] = tmp[1]
insort(keys, tmp[0])
elif command == 2:
now = find(tmp[0])
if now != '?' and now != -1:
dic[now] = tmp[1]
# print(dic)
elif command ==3:
now = find(tmp[0])
if now != '?' and now != -1:
print(dic[now])
else:
print(now)
|
d6a5c0240576f85e4dd4de8316e903c649e109f0
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/core/interface/loader.py
|
fdf9e0050a7cd8031cf510e8518f9f38ec5b48d1
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 4,266
|
py
|
loader.py
|
# ----------------------------------------------------------------------
# Interface loader
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import sys
import glob
import logging
import inspect
import threading
import os
import re
import importlib
# NOC modules
from .base import BaseInterface
from noc.config import config
logger = logging.getLogger(__name__)
class InterfaceLoader(object):
rx_class = re.compile(r"^class\s+(?P<name>\S+)\(", re.MULTILINE)
def __init__(self):
self.interfaces = {} # Load interfaces
self.lock = threading.Lock()
self.all_interfaces = set()
def get_interface(self, name):
"""
Load script and return BaseScript instance.
Returns None when no script found or loading error occured
"""
with self.lock:
interface = self.interfaces.get(name)
if interface:
return interface
logger.info("Loading interface %s", name)
if not self.is_valid_name(name):
logger.error("Invalid interface name")
return None
imname = name.lower()
for p in config.get_customized_paths("", prefer_custom=True):
if os.path.exists(os.path.join(p, "sa", "interfaces", "%s.py" % imname)):
if p:
# Custom script
base_name = os.path.basename(os.path.dirname(config.path.custom_path))
else:
# Common script
base_name = "noc"
module_name = "%s.sa.interfaces.%s" % (base_name, imname)
break
else:
logger.error("Interface not found: %s", name)
self.interfaces[name] = None
return None
try:
sm = __import__(module_name, {}, {}, "*")
for n in dir(sm):
o = getattr(sm, n)
if (
inspect.isclass(o)
and issubclass(o, BaseInterface)
and o.__module__ == sm.__name__
):
self.interfaces[name] = o
return o
except Exception as e:
logger.error("Failed to load interface %s: %s", name, e)
self.interfaces[name] = None
return None
def reload(self):
"""
Reset script cache and release all modules
"""
with self.lock:
logger.info("Reloading interfaces")
for s in self.interfaces:
logger.debug("Reload interface %s", s)
importlib.reload(sys.modules[s.__module__])
self.interfaces = {}
self.all_interfaces = set()
def is_valid_name(self, name):
return ".." not in name
def find_interfaces(self):
"""
Scan all available scripts
"""
ns = set()
for gx in config.get_customized_paths(os.path.join("sa", "interfaces", "*.py")):
for path in glob.glob(gx):
if path in ("base.py", "__init__.py"):
continue
with open(path) as f:
data = f.read()
for match in self.rx_class.finditer(data):
iname = match.group("name")
fname = os.path.split(path)[1]
if iname.lower() == fname[:-3]:
ns.add(iname)
with self.lock:
self.all_interfaces = ns
def iter_interfaces(self):
"""
Returns all available script names
"""
if not self.all_interfaces:
self.find_interfaces()
yield from sorted(self.all_interfaces)
def has_interface(self, name):
"""
Check script is exists
"""
if not self.all_interfaces:
self.find_interfaces()
return name in self.all_interfaces
# Create singleton object
loader = InterfaceLoader()
|
ebd7518565c265a1dd51e4cc620cd939980e5381
|
6793f3b093478fdde550d8669b9b955081af5e0e
|
/nbconvert/exporters/slides.py
|
dacc2aaba0490282e4b0b6cc3230f970aafe80e0
|
[
"BSD-3-Clause"
] |
permissive
|
jupyter/nbconvert
|
0afe110c4ec39b68661c601f8f3b20fd21a9ba13
|
51c6e0a7d40918366e2a68c5ea471fd2c65722cb
|
refs/heads/main
| 2023-09-03T16:05:25.981152
| 2023-08-29T13:57:58
| 2023-08-29T13:57:58
| 33,653,617
| 1,645
| 654
|
BSD-3-Clause
| 2023-09-11T10:42:26
| 2015-04-09T06:58:23
|
Python
|
UTF-8
|
Python
| false
| false
| 6,465
|
py
|
slides.py
|
"""HTML slide show Exporter class"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from copy import deepcopy
from warnings import warn
from traitlets import Bool, Unicode, default
from nbconvert.preprocessors.base import Preprocessor
from .html import HTMLExporter
class _RevealMetadataPreprocessor(Preprocessor):
# A custom preprocessor adding convenience metadata to cells
def preprocess(self, nb, resources=None):
nb = deepcopy(nb)
for cell in nb.cells:
# Make sure every cell has a slide_type
try:
slide_type = cell.metadata.get("slideshow", {}).get("slide_type", "-")
except AttributeError:
slide_type = "-"
cell.metadata.slide_type = slide_type
# Find the first visible cell
for index, cell in enumerate(nb.cells):
if cell.metadata.slide_type not in {"notes", "skip"}:
cell.metadata.slide_type = "slide"
cell.metadata.slide_start = True
cell.metadata.subslide_start = True
first_slide_ix = index
break
else:
msg = "All cells are hidden, cannot create slideshow"
raise ValueError(msg)
in_fragment = False
for index, cell in enumerate(nb.cells[first_slide_ix + 1 :], start=(first_slide_ix + 1)):
previous_cell = nb.cells[index - 1]
# Slides are <section> elements in the HTML, subslides (the vertically
# stacked slides) are also <section> elements inside the slides,
# and fragments are <div>s within subslides. Subslide and fragment
# elements can contain content:
# <section>
# <section>
# (content)
# <div class="fragment">(content)</div>
# </section>
# </section>
# Get the slide type. If type is subslide or slide,
# end the last slide/subslide/fragment as applicable.
if cell.metadata.slide_type == "slide":
previous_cell.metadata.slide_end = True
cell.metadata.slide_start = True
if cell.metadata.slide_type in {"subslide", "slide"}:
previous_cell.metadata.fragment_end = in_fragment
previous_cell.metadata.subslide_end = True
cell.metadata.subslide_start = True
in_fragment = False
elif cell.metadata.slide_type == "fragment":
cell.metadata.fragment_start = True
if in_fragment:
previous_cell.metadata.fragment_end = True
else:
in_fragment = True
# The last cell will always be the end of a slide
nb.cells[-1].metadata.fragment_end = in_fragment
nb.cells[-1].metadata.subslide_end = True
nb.cells[-1].metadata.slide_end = True
return nb, resources
class SlidesExporter(HTMLExporter):
"""Exports HTML slides with reveal.js"""
# Overrides from HTMLExporter
#################################
export_from_notebook = "Reveal.js slides"
@default("template_name")
def _template_name_default(self):
return "reveal"
@default("file_extension")
def _file_extension_default(self):
return ".slides.html"
@default("template_extension")
def _template_extension_default(self):
return ".html.j2"
# Extra resources
#################################
reveal_url_prefix = Unicode(
help="""The URL prefix for reveal.js (version 3.x).
This defaults to the reveal CDN, but can be any url pointing to a copy
of reveal.js.
For speaker notes to work, this must be a relative path to a local
copy of reveal.js: e.g., "reveal.js".
If a relative path is given, it must be a subdirectory of the
current directory (from which the server is run).
See the usage documentation
(https://nbconvert.readthedocs.io/en/latest/usage.html#reveal-js-html-slideshow)
for more details.
"""
).tag(config=True)
@default("reveal_url_prefix")
def _reveal_url_prefix_default(self):
if "RevealHelpPreprocessor.url_prefix" in self.config:
warn(
"Please update RevealHelpPreprocessor.url_prefix to "
"SlidesExporter.reveal_url_prefix in config files.",
stacklevel=2,
)
return self.config.RevealHelpPreprocessor.url_prefix
return "https://unpkg.com/reveal.js@4.0.2"
reveal_theme = Unicode(
"simple",
help="""
Name of the reveal.js theme to use.
We look for a file with this name under
``reveal_url_prefix``/css/theme/``reveal_theme``.css.
https://github.com/hakimel/reveal.js/tree/master/css/theme has
list of themes that ship by default with reveal.js.
""",
).tag(config=True)
reveal_transition = Unicode(
"slide",
help="""
Name of the reveal.js transition to use.
The list of transitions that ships by default with reveal.js are:
none, fade, slide, convex, concave and zoom.
""",
).tag(config=True)
reveal_scroll = Bool(
False,
help="""
If True, enable scrolling within each slide
""",
).tag(config=True)
reveal_number = Unicode(
"",
help="""
slide number format (e.g. 'c/t'). Choose from:
'c': current, 't': total, 'h': horizontal, 'v': vertical
""",
).tag(config=True)
font_awesome_url = Unicode(
"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.css",
help="""
URL to load font awesome from.
Defaults to loading from cdnjs.
""",
).tag(config=True)
def _init_resources(self, resources):
resources = super()._init_resources(resources)
if "reveal" not in resources:
resources["reveal"] = {}
resources["reveal"]["url_prefix"] = self.reveal_url_prefix
resources["reveal"]["theme"] = self.reveal_theme
resources["reveal"]["transition"] = self.reveal_transition
resources["reveal"]["scroll"] = self.reveal_scroll
resources["reveal"]["number"] = self.reveal_number
return resources
|
0e29461ac713de451279167415753c37e4a7c911
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/google-cloud-cpp/2.x/components_2_12_0.py
|
943f3345fcb419584035458a0e79b127e7f4a37a
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 43,236
|
py
|
components_2_12_0.py
|
# Automatically generated by /usr/local/google/home/coryan/cci-develop/recipes/google-cloud-cpp/2.x/extract_dependencies.py DO NOT EDIT
DEPENDENCIES = {
"accessapproval_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf'],
"accesscontextmanager_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_expr_protos'],
"advisorynotifications_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf'],
"aiplatform_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_httpbody_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_interval_protos', 'type_money_protos'],
"alloydb_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_dayofweek_protos', 'type_timeofday_protos'],
"apigateway_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"apigeeconnect_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf', 'rpc_status_protos'],
"apikeys_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"appengine_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'logging_type_type_protos', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"artifactregistry_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_expr_protos'],
"asset_protos": ['accesscontextmanager_protos', 'api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'longrunning_operations_protos', 'osconfig_protos', 'protobuf::libprotobuf', 'rpc_code_protos', 'rpc_status_protos', 'type_date_protos', 'type_datetime_protos', 'type_dayofweek_protos', 'type_expr_protos', 'type_timeofday_protos'],
"assuredworkloads_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"automl_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"baremetalsolution_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"batch_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"beyondcorp_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"cloud_bigquery_protos": ['api_annotations_protos', 'api_client_protos', 'api_distribution_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_label_protos', 'api_launch_stage_protos', 'api_metric_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'protobuf::libprotobuf', 'rpc_error_details_protos', 'rpc_status_protos', 'type_expr_protos'],
"bigtable_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'api_routing_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_expr_protos'],
"billing_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'protobuf::libprotobuf', 'type_date_protos', 'type_expr_protos', 'type_money_protos'],
"binaryauthorization_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grafeas_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf', 'rpc_status_protos'],
"certificatemanager_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"channel_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_date_protos', 'type_datetime_protos', 'type_decimal_protos', 'type_money_protos', 'type_postal_address_protos'],
"cloudbuild_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_httpbody_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"cloud_common_common_protos": ['api_field_behavior_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf'],
"composer_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_date_protos'],
"confidentialcomputing_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf'],
"connectors_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"contactcenterinsights_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"container_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf', 'rpc_code_protos', 'rpc_status_protos'],
"containeranalysis_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grafeas_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_expr_protos'],
"contentwarehouse_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'documentai_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_policy_protos', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_color_protos', 'type_date_protos', 'type_datetime_protos', 'type_expr_protos', 'type_interval_protos', 'type_money_protos', 'type_postal_address_protos'],
"datacatalog_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_expr_protos'],
"datamigration_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_expr_protos'],
"dataplex_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_expr_protos'],
"dataproc_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"datastream_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"deploy_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_date_protos'],
"cloud_dialogflow_v2_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_latlng_protos'],
"dialogflow_cx_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_latlng_protos'],
"dlp_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_date_protos', 'type_dayofweek_protos', 'type_timeofday_protos'],
"documentai_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_color_protos', 'type_date_protos', 'type_datetime_protos', 'type_money_protos', 'type_postal_address_protos'],
"domains_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_money_protos', 'type_postal_address_protos'],
"edgecontainer_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"essentialcontacts_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf'],
"eventarc_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_code_protos', 'rpc_status_protos'],
"filestore_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'cloud_common_common_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"functions_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_expr_protos'],
"gameservices_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"gkebackup_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"gkehub_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"gkemulticloud_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"grafeas_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf', 'rpc_status_protos'],
"iam_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_expr_protos'],
"iap_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'protobuf::libprotobuf', 'type_expr_protos'],
"ids_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"iot_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_expr_protos'],
"kms_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf'],
"language_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf'],
"logging_protos": ['api_annotations_protos', 'api_client_protos', 'api_distribution_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_label_protos', 'api_launch_stage_protos', 'api_metric_protos', 'api_monitored_resource_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'logging_type_type_protos', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"logging_type_type_protos": ['grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf'],
"managedidentities_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"memcache_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_dayofweek_protos', 'type_timeofday_protos'],
"monitoring_protos": ['api_annotations_protos', 'api_client_protos', 'api_distribution_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_label_protos', 'api_launch_stage_protos', 'api_metric_protos', 'api_monitored_resource_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_calendar_period_protos'],
"networkconnectivity_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"networkmanagement_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"networkservices_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"notebooks_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"optimization_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_latlng_protos'],
"orgpolicy_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf', 'type_expr_protos'],
"osconfig_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_date_protos', 'type_datetime_protos', 'type_dayofweek_protos', 'type_timeofday_protos'],
"oslogin_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf'],
"policytroubleshooter_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_policy_protos', 'protobuf::libprotobuf', 'type_expr_protos'],
"privateca_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_expr_protos'],
"profiler_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf'],
"pubsub_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf'],
"recaptchaenterprise_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf'],
"recommender_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf', 'type_money_protos'],
"redis_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_dayofweek_protos', 'type_timeofday_protos'],
"resourcemanager_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_expr_protos'],
"resourcesettings_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf'],
"retail_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_httpbody_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_date_protos'],
"run_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'api_routing_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_expr_protos'],
"scheduler_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf', 'rpc_status_protos'],
"secretmanager_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'protobuf::libprotobuf', 'type_expr_protos'],
"securitycenter_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_expr_protos'],
"servicecontrol_protos": ['api_annotations_protos', 'api_client_protos', 'api_distribution_protos', 'api_http_protos', 'api_launch_stage_protos', 'grpc::_grpc', 'grpc::grpc++', 'logging_type_type_protos', 'protobuf::libprotobuf', 'rpc_context_attribute_context_protos', 'rpc_status_protos'],
"servicedirectory_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'protobuf::libprotobuf', 'type_expr_protos'],
"servicemanagement_protos": ['api_annotations_protos', 'api_auth_protos', 'api_backend_protos', 'api_billing_protos', 'api_client_protos', 'api_config_change_protos', 'api_context_protos', 'api_control_protos', 'api_documentation_protos', 'api_endpoint_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_label_protos', 'api_launch_stage_protos', 'api_log_protos', 'api_logging_protos', 'api_metric_protos', 'api_monitored_resource_protos', 'api_monitoring_protos', 'api_quota_protos', 'api_resource_protos', 'api_service_protos', 'api_source_info_protos', 'api_system_parameter_protos', 'api_usage_protos', 'api_visibility_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"serviceusage_protos": ['api_annotations_protos', 'api_auth_protos', 'api_client_protos', 'api_documentation_protos', 'api_endpoint_protos', 'api_http_protos', 'api_label_protos', 'api_launch_stage_protos', 'api_monitored_resource_protos', 'api_monitoring_protos', 'api_quota_protos', 'api_usage_protos', 'api_visibility_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"shell_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"spanner_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_expr_protos'],
"cloud_speech_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"storage_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'api_routing_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'protobuf::libprotobuf', 'type_date_protos', 'type_expr_protos'],
"storageinsights_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_date_protos', 'type_datetime_protos'],
"storagetransfer_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_code_protos', 'rpc_status_protos', 'type_date_protos', 'type_timeofday_protos'],
"support_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"talent_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_latlng_protos', 'type_money_protos', 'type_postal_address_protos', 'type_timeofday_protos'],
"tasks_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'iam_v1_iam_policy_protos', 'iam_v1_options_protos', 'iam_v1_policy_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_expr_protos'],
"cloud_texttospeech_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"timeseriesinsights_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf', 'rpc_status_protos'],
"tpu_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"devtools_cloudtrace_v2_trace_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf', 'rpc_status_protos'],
"translate_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"video_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_datetime_protos'],
"videointelligence_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"vision_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos', 'type_color_protos', 'type_latlng_protos'],
"vmmigration_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_error_details_protos', 'rpc_status_protos'],
"vmwareengine_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"vpcaccess_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"webrisk_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"websecurityscanner_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'protobuf::libprotobuf'],
"workflows_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"workstations_protos": ['api_annotations_protos', 'api_client_protos', 'api_field_behavior_protos', 'api_http_protos', 'api_launch_stage_protos', 'api_resource_protos', 'grpc::_grpc', 'grpc::grpc++', 'longrunning_operations_protos', 'protobuf::libprotobuf', 'rpc_status_protos'],
"api_annotations_protos": ['api_http_protos'],
"api_auth_protos": ['api_annotations_protos'],
"api_billing_protos": ['api_annotations_protos', 'api_metric_protos'],
"api_client_protos": ['api_launch_stage_protos'],
"api_distribution_protos": ['api_annotations_protos'],
"api_endpoint_protos": ['api_annotations_protos'],
"api_log_protos": ['api_label_protos'],
"api_logging_protos": ['api_annotations_protos', 'api_label_protos'],
"api_metric_protos": ['api_label_protos', 'api_launch_stage_protos'],
"api_monitored_resource_protos": ['api_label_protos', 'api_launch_stage_protos'],
"api_monitoring_protos": ['api_annotations_protos'],
"api_quota_protos": ['api_annotations_protos'],
"api_service_protos": ['api_annotations_protos', 'api_auth_protos', 'api_backend_protos', 'api_billing_protos', 'api_client_protos', 'api_context_protos', 'api_control_protos', 'api_documentation_protos', 'api_endpoint_protos', 'api_http_protos', 'api_label_protos', 'api_log_protos', 'api_logging_protos', 'api_metric_protos', 'api_monitored_resource_protos', 'api_monitoring_protos', 'api_quota_protos', 'api_resource_protos', 'api_source_info_protos', 'api_system_parameter_protos', 'api_usage_protos'],
"api_usage_protos": ['api_annotations_protos', 'api_visibility_protos'],
"devtools_cloudtrace_v2_tracing_protos": ['api_client_protos', 'api_field_behavior_protos', 'devtools_cloudtrace_v2_trace_protos', 'devtools_cloudtrace_v2_trace_protos', 'rpc_status_protos'],
}
PROTO_COMPONENTS = {
"accessapproval_protos",
"accesscontextmanager_protos",
"advisorynotifications_protos",
"aiplatform_protos",
"alloydb_protos",
"api_annotations_protos",
"api_auth_protos",
"api_backend_protos",
"api_billing_protos",
"api_client_protos",
"api_config_change_protos",
"api_context_protos",
"api_control_protos",
"api_distribution_protos",
"api_documentation_protos",
"api_endpoint_protos",
"api_field_behavior_protos",
"api_http_protos",
"api_httpbody_protos",
"api_label_protos",
"api_launch_stage_protos",
"api_log_protos",
"api_logging_protos",
"api_metric_protos",
"api_monitored_resource_protos",
"api_monitoring_protos",
"api_quota_protos",
"api_resource_protos",
"api_routing_protos",
"api_service_protos",
"api_source_info_protos",
"api_system_parameter_protos",
"api_usage_protos",
"api_visibility_protos",
"apigateway_protos",
"apigeeconnect_protos",
"apikeys_protos",
"appengine_protos",
"artifactregistry_protos",
"asset_protos",
"assuredworkloads_protos",
"automl_protos",
"baremetalsolution_protos",
"batch_protos",
"beyondcorp_protos",
"bigtable_protos",
"billing_protos",
"binaryauthorization_protos",
"certificatemanager_protos",
"channel_protos",
"cloud_bigquery_protos",
"cloud_common_common_protos",
"cloud_dialogflow_v2_protos",
"cloud_speech_protos",
"cloud_texttospeech_protos",
"cloudbuild_protos",
"composer_protos",
"confidentialcomputing_protos",
"connectors_protos",
"contactcenterinsights_protos",
"container_protos",
"containeranalysis_protos",
"contentwarehouse_protos",
"datacatalog_protos",
"datamigration_protos",
"dataplex_protos",
"dataproc_protos",
"datastream_protos",
"deploy_protos",
"devtools_cloudtrace_v2_trace_protos",
"devtools_cloudtrace_v2_tracing_protos",
"devtools_source_v1_source_context_protos",
"dialogflow_cx_protos",
"dlp_protos",
"documentai_protos",
"domains_protos",
"edgecontainer_protos",
"essentialcontacts_protos",
"eventarc_protos",
"filestore_protos",
"functions_protos",
"gameservices_protos",
"gkebackup_protos",
"gkehub_protos",
"gkemulticloud_protos",
"grafeas_protos",
"iam_protos",
"iam_v1_iam_policy_protos",
"iam_v1_options_protos",
"iam_v1_policy_protos",
"iap_protos",
"ids_protos",
"iot_protos",
"kms_protos",
"language_protos",
"logging_protos",
"logging_type_type_protos",
"longrunning_operations_protos",
"managedidentities_protos",
"memcache_protos",
"monitoring_protos",
"networkconnectivity_protos",
"networkmanagement_protos",
"networkservices_protos",
"notebooks_protos",
"optimization_protos",
"orgpolicy_protos",
"osconfig_protos",
"oslogin_protos",
"policytroubleshooter_protos",
"privateca_protos",
"profiler_protos",
"pubsub_protos",
"recaptchaenterprise_protos",
"recommender_protos",
"redis_protos",
"resourcemanager_protos",
"resourcesettings_protos",
"retail_protos",
"rpc_code_protos",
"rpc_context_attribute_context_protos",
"rpc_error_details_protos",
"rpc_status_protos",
"run_protos",
"scheduler_protos",
"secretmanager_protos",
"securitycenter_protos",
"servicecontrol_protos",
"servicedirectory_protos",
"servicemanagement_protos",
"serviceusage_protos",
"shell_protos",
"spanner_protos",
"storage_protos",
"storageinsights_protos",
"storagetransfer_protos",
"support_protos",
"talent_protos",
"tasks_protos",
"timeseriesinsights_protos",
"tpu_protos",
"translate_protos",
"type_calendar_period_protos",
"type_color_protos",
"type_date_protos",
"type_datetime_protos",
"type_dayofweek_protos",
"type_decimal_protos",
"type_expr_protos",
"type_interval_protos",
"type_latlng_protos",
"type_money_protos",
"type_postal_address_protos",
"type_timeofday_protos",
"video_protos",
"videointelligence_protos",
"vision_protos",
"vmmigration_protos",
"vmwareengine_protos",
"vpcaccess_protos",
"webrisk_protos",
"websecurityscanner_protos",
"workflows_protos",
"workstations_protos"
}
COMPONENTS = {
"accessapproval",
"accesscontextmanager",
"advisorynotifications",
"aiplatform",
"alloydb",
"apigateway",
"apigeeconnect",
"apikeys",
"appengine",
"artifactregistry",
"asset",
"assuredworkloads",
"automl",
"baremetalsolution",
"batch",
"beyondcorp",
"bigquery",
"bigtable",
"billing",
"binaryauthorization",
"certificatemanager",
"channel",
"cloudbuild",
"composer",
"confidentialcomputing",
"connectors",
"contactcenterinsights",
"container",
"containeranalysis",
"contentwarehouse",
"datacatalog",
"datamigration",
"dataplex",
"dataproc",
"datastream",
"deploy",
"dialogflow_cx",
"dialogflow_es",
"dlp",
"documentai",
"domains",
"edgecontainer",
"essentialcontacts",
"eventarc",
"filestore",
"functions",
"gameservices",
"gkebackup",
"gkehub",
"gkemulticloud",
"iam",
"iap",
"ids",
"iot",
"kms",
"language",
"logging",
"managedidentities",
"memcache",
"monitoring",
"networkconnectivity",
"networkmanagement",
"networkservices",
"notebooks",
"optimization",
"orgpolicy",
"osconfig",
"oslogin",
"policytroubleshooter",
"privateca",
"profiler",
"pubsub",
"recaptchaenterprise",
"recommender",
"redis",
"resourcemanager",
"resourcesettings",
"retail",
"run",
"scheduler",
"secretmanager",
"securitycenter",
"servicecontrol",
"servicedirectory",
"servicemanagement",
"serviceusage",
"shell",
"spanner",
"speech",
"storage",
"storageinsights",
"storagetransfer",
"support",
"talent",
"tasks",
"texttospeech",
"timeseriesinsights",
"tpu",
"trace",
"translate",
"video",
"videointelligence",
"vision",
"vmmigration",
"vmwareengine",
"vpcaccess",
"webrisk",
"websecurityscanner",
"workflows",
"workstations"
}
|
b1743b5c7b2847e1c95a6425cb14381e89bcb075
|
13800b7827598e76428a335559b7bf11867ec2f0
|
/python/ccxt/test/base/test_order.py
|
7ce6e8fd8186d58be90732e839084eca88597990
|
[
"MIT"
] |
permissive
|
ccxt/ccxt
|
b40a0466f5c430a3c0c6026552ae697aa80ba6c6
|
e4065f6a490e6fc4dd7a72b375428b2faa570668
|
refs/heads/master
| 2023-09-04T03:41:29.787733
| 2023-09-03T19:25:57
| 2023-09-03T19:25:57
| 91,253,698
| 30,798
| 8,190
|
MIT
| 2023-09-14T21:59:09
| 2017-05-14T15:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 3,947
|
py
|
test_order.py
|
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
sys.path.append(root)
# ----------------------------------------------------------------------------
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
# ----------------------------------------------------------------------------
# -*- coding: utf-8 -*-
from ccxt.test.base import test_shared_methods # noqa E402
from ccxt.test.base.test_trade import test_trade # noqa E402
def test_order(exchange, skipped_properties, method, entry, symbol, now):
format = {
'info': {},
'id': '123',
'clientOrderId': '1234',
'timestamp': 1649373600000,
'datetime': '2022-04-07T23:20:00.000Z',
'lastTradeTimestamp': 1649373610000,
'symbol': 'XYZ/USDT',
'type': 'limit',
'timeInForce': 'GTC',
'postOnly': True,
'side': 'sell',
'price': exchange.parse_number('1.23456'),
'stopPrice': exchange.parse_number('1.1111'),
'amount': exchange.parse_number('1.23'),
'cost': exchange.parse_number('2.34'),
'average': exchange.parse_number('1.234'),
'filled': exchange.parse_number('1.23'),
'remaining': exchange.parse_number('0.123'),
'status': 'ok',
'fee': {},
'trades': [],
}
empty_allowed_for = ['clientOrderId', 'stopPrice', 'trades', 'timestamp', 'datetime', 'lastTradeTimestamp', 'average', 'type', 'timeInForce', 'postOnly', 'side', 'price', 'amount', 'cost', 'filled', 'remaining', 'status', 'fee'] # there are exchanges that return only order id, so we don't need to strictly requite all props to be set.
test_shared_methods.assert_structure(exchange, skipped_properties, method, entry, format, empty_allowed_for)
test_shared_methods.assert_timestamp(exchange, skipped_properties, method, entry, now)
#
test_shared_methods.assert_in_array(exchange, skipped_properties, method, entry, 'timeInForce', ['GTC', 'GTK', 'IOC', 'FOK'])
test_shared_methods.assert_in_array(exchange, skipped_properties, method, entry, 'status', ['open', 'closed', 'canceled'])
test_shared_methods.assert_in_array(exchange, skipped_properties, method, entry, 'side', ['buy', 'sell'])
test_shared_methods.assert_in_array(exchange, skipped_properties, method, entry, 'postOnly', [True, False])
test_shared_methods.assert_symbol(exchange, skipped_properties, method, entry, 'symbol', symbol)
test_shared_methods.assert_greater(exchange, skipped_properties, method, entry, 'price', '0')
test_shared_methods.assert_greater(exchange, skipped_properties, method, entry, 'stopPrice', '0')
test_shared_methods.assert_greater_or_equal(exchange, skipped_properties, method, entry, 'cost', '0')
test_shared_methods.assert_greater(exchange, skipped_properties, method, entry, 'average', '0')
test_shared_methods.assert_greater_or_equal(exchange, skipped_properties, method, entry, 'filled', '0')
test_shared_methods.assert_greater_or_equal(exchange, skipped_properties, method, entry, 'remaining', '0')
test_shared_methods.assert_greater_or_equal(exchange, skipped_properties, method, entry, 'amount', '0')
test_shared_methods.assert_greater_or_equal(exchange, skipped_properties, method, entry, 'amount', exchange.safe_string(entry, 'remaining'))
test_shared_methods.assert_greater_or_equal(exchange, skipped_properties, method, entry, 'amount', exchange.safe_string(entry, 'filled'))
if not ('trades' in skipped_properties):
if entry['trades'] is not None:
for i in range(0, len(entry['trades'])):
test_trade(exchange, skipped_properties, method, entry['trades'][i], symbol, now)
test_shared_methods.assert_fee_structure(exchange, skipped_properties, method, entry, 'fee')
|
0589600af033f28e6b1b4a7e5c6f7a9118de0898
|
67b4ea0c739de7ae48da9778f276d1d8cf486721
|
/src/pipgrip/libs/mixology/result.py
|
d79ce6dab670dd47c1749f3da113537f601384f3
|
[
"BSD-3-Clause"
] |
permissive
|
ddelange/pipgrip
|
316c533b37497eddc2308fcd0a6478b39ae9d21d
|
cd66e71302d0b184597f573b3e4976bcbd1459f1
|
refs/heads/master
| 2023-08-24T15:15:36.579900
| 2023-08-12T20:22:11
| 2023-08-12T20:22:11
| 230,768,810
| 155
| 13
|
BSD-3-Clause
| 2023-09-02T07:09:29
| 2019-12-29T15:28:50
|
Python
|
UTF-8
|
Python
| false
| false
| 483
|
py
|
result.py
|
from typing import Any, Dict, Hashable
class SolverResult:
def __init__(
self, decisions, attempted_solutions
): # type: (Dict[Hashable, Any], int) -> None
self._decisions = decisions
self._attempted_solutions = attempted_solutions
@property
def decisions(self): # type: () -> Dict[Hashable, Any]
return self._decisions
@property
def attempted_solutions(self): # type: () -> int
return self._attempted_solutions
|
25343cb1e8aef4d45a33cbb232bad2ab02c5a475
|
3196488df20871d5196e7a7224577c6bb345477e
|
/darwin/future/data_objects/release.py
|
bbd6562536a8ad496e953b2342a584bd0af610aa
|
[
"MIT"
] |
permissive
|
v7labs/darwin-py
|
60360d94c12fc5170643588a2fa890981aeab075
|
3cc2d5299fb48d48aeac10e01f79f49e856e6967
|
refs/heads/master
| 2023-08-31T10:06:55.334381
| 2023-08-31T09:51:01
| 2023-08-31T09:51:01
| 192,462,056
| 110
| 36
|
MIT
| 2023-09-13T17:25:24
| 2019-06-18T03:58:27
|
Python
|
UTF-8
|
Python
| false
| false
| 680
|
py
|
release.py
|
from typing import List
from pydantic import validator
from darwin.future.data_objects import validators as darwin_validators
from darwin.future.pydantic_base import DefaultDarwin
class Release(DefaultDarwin):
"""A class to manage all the information around a release on the darwin platform, including validation
Attributes
----------
name : str
Methods
----------
_name_validator: validates and auto formats the name variable
"""
name: str
def __str__(self) -> str:
return self.name
# Data Validation
_name_validator = validator("name", allow_reuse=True)(darwin_validators.parse_name)
ReleaseList = List[Release]
|
4815acf4daf60552dd086ce669adf53c75c2eefb
|
4d8df3fd1c531ea93068b56218b6f9aa44fd898e
|
/test/fake_pebble.py
|
f35b1973cd5407250730bd8a623b2e865e1d636e
|
[
"Apache-2.0"
] |
permissive
|
canonical/operator
|
f7adb02afe5209f1dd711a6b4c25bfaedacb2d02
|
c4e3266a6568ba310064ca8b9bff7adb89676224
|
refs/heads/main
| 2023-09-01T14:07:37.685404
| 2023-08-30T19:55:19
| 2023-08-30T19:55:19
| 212,098,176
| 226
| 109
|
Apache-2.0
| 2023-08-30T19:55:20
| 2019-10-01T13:06:11
|
Python
|
UTF-8
|
Python
| false
| false
| 5,746
|
py
|
fake_pebble.py
|
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake (partial) Pebble server to allow testing the HTTP-over-Unix-socket protocol."""
import http.server
import json
import os
import re
import socketserver
import tempfile
import threading
import urllib.parse
class Handler(http.server.BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.routes = [
('GET', re.compile(r'^/system-info$'), self.get_system_info),
('POST', re.compile(r'^/services$'), self.services_action),
]
self._services = ['foo']
super().__init__(request, ('unix-socket', 80), server)
def log_message(self, format, *args):
# Disable logging for tests
pass
def respond(self, d, status=200):
self.send_response(status)
self.send_header('Content-Type', 'application/json')
self.end_headers()
d_json = json.dumps(d, indent=4, sort_keys=True)
self.wfile.write(d_json.encode('utf-8'))
def bad_request(self, message):
d = {
"result": {
"message": message,
},
"status": "Bad Request",
"status-code": 400,
"type": "error"
}
self.respond(d, 400)
def not_found(self):
d = {
"result": {
"message": "invalid API endpoint requested"
},
"status": "Not Found",
"status-code": 404,
"type": "error"
}
self.respond(d, 404)
def method_not_allowed(self):
d = {
"result": {
"message": 'method "PUT" not allowed'
},
"status": "Method Not Allowed",
"status-code": 405,
"type": "error"
}
self.respond(d, 405)
def internal_server_error(self, msg):
d = {
"result": {
"message": f"internal server error: {msg}",
},
"status": "Internal Server Error",
"status-code": 500,
"type": "error"
}
self.respond(d, 500)
def do_GET(self): # noqa: N802 ("should be lowercase")
self.do_request('GET')
def do_POST(self): # noqa: N802 ("should be lowercase")
self.do_request('POST')
def do_request(self, request_method):
path, _, query = self.path.partition('?')
path = urllib.parse.unquote(path)
query = dict(urllib.parse.parse_qsl(query))
if not path.startswith('/v1/'):
self.not_found()
return
path = path[3:]
allowed = []
for method, regex, func in self.routes:
match = regex.match(path)
if match:
if request_method == method:
data = self.read_body_json()
try:
func(match, query, data)
except Exception as e:
self.internal_server_error(e)
raise
return
allowed.append(method)
if allowed:
self.method_not_allowed()
return
self.not_found()
def read_body_json(self):
try:
content_len = int(self.headers.get('Content-Length', ''))
except ValueError:
content_len = 0
if not content_len:
return None
body = self.rfile.read(content_len)
if isinstance(body, bytes):
body = body.decode('utf-8')
return json.loads(body)
def get_system_info(self, match, query, data):
self.respond({
"result": {
"version": "3.14.159"
},
"status": "OK",
"status-code": 200,
"type": "sync"
})
def services_action(self, match, query, data):
action = data['action']
services = data['services']
if action == 'start':
for service in services:
if service not in self._services:
self.bad_request(f'service "{service}" does not exist')
return
self.respond({
"change": "1234",
"result": None,
"status": "Accepted",
"status-code": 202,
"type": "async"
})
else:
self.bad_request(f'action "{action}" not implemented')
def start_server():
socket_dir = tempfile.mkdtemp(prefix='test-ops.pebble')
socket_path = os.path.join(socket_dir, 'test.socket')
server = socketserver.UnixStreamServer(socket_path, Handler)
thread = threading.Thread(target=server.serve_forever)
thread.start()
def shutdown():
server.shutdown()
server.server_close()
thread.join()
os.remove(socket_path)
os.rmdir(socket_dir)
return (shutdown, socket_path)
if __name__ == '__main__':
import time
shutdown, socket_path = start_server()
print('Serving HTTP over socket', socket_path)
# Wait forever (or till Ctrl-C pressed)
try:
while True:
time.sleep(1)
finally:
shutdown()
|
ac6b971d156d79cd16cd8a83751a92db4cdf07f0
|
88a39b8ec20b386400bd8b1d5fc1d5ad3314681d
|
/codalab/rest/worksheet_block_schemas.py
|
d570b812428c3cc3f8109aaa2297651af98ea1fe
|
[
"Apache-2.0"
] |
permissive
|
codalab/codalab-worksheets
|
bb35681454a0d74903aaa7468e17303986793464
|
5be8cb3fa4b43c9e7e8f0a3b217644a7f0a39628
|
refs/heads/master
| 2023-08-18T10:16:01.766541
| 2023-08-06T20:02:30
| 2023-08-06T20:02:30
| 27,352,490
| 126
| 65
|
NOASSERTION
| 2023-09-14T14:54:07
| 2014-11-30T22:33:18
|
Python
|
UTF-8
|
Python
| false
| false
| 6,935
|
py
|
worksheet_block_schemas.py
|
"""
Marshmallow schemas that represent worksheet block.
Used for serializing resource dicts into JSON API documents, and vice-versa.
The schemas also perform some basic validation.
"""
from marshmallow import Schema as PlainSchema, validate
from marshmallow_jsonapi import fields
# Enum that represents different modes for a block.
class BlockModes:
markup_block = 'markup_block'
record_block = 'record_block'
table_block = 'table_block'
contents_block = 'contents_block'
image_block = 'image_block'
graph_block = 'graph_block'
schema_block = 'schema_block'
subworksheets_block = 'subworksheets_block'
placeholder_block = 'placeholder_block'
values = (
markup_block,
record_block,
table_block,
contents_block,
image_block,
graph_block,
schema_block,
subworksheets_block,
placeholder_block,
)
class FetchStatusCodes:
"""
The values here correspond with FETCH_STATUS_SCHEMA in the frontend.
"""
unknown = 'unknown'
pending = 'pending'
briefly_loaded = 'briefly_loaded'
ready = 'ready'
not_found = 'not_found'
no_permission = 'no_permission'
values = (unknown, pending, briefly_loaded, ready, not_found, no_permission)
class FetchStatusSchema(PlainSchema):
"""
Schema that represents the status of fetching a resource.
"""
code = fields.String(validate=validate.OneOf(set(FetchStatusCodes.values)))
error_message = fields.String()
@staticmethod
def get_unknown_status():
return {'code': FetchStatusCodes.unknown, 'error_message': ''}
@staticmethod
def get_pending_status():
return {'code': FetchStatusCodes.pending, 'error_message': ''}
@staticmethod
def get_briefly_loaded_status():
return {'code': FetchStatusCodes.briefly_loaded, 'error_message': ''}
@staticmethod
def get_ready_status():
return {'code': FetchStatusCodes.ready, 'error_message': ''}
class BundlesSpecSchema(PlainSchema):
uuid_spec_type = 'uuid_spec'
spec_types = uuid_spec_type
# Fields
spec_type = fields.String(validate=validate.OneOf(set(spec_types)))
bundle_infos = fields.List(fields.Dict())
fetch_status = fields.Nested(FetchStatusSchema, required=True)
class BundleUUIDSpecSchema(BundlesSpecSchema):
spec_type = fields.Constant(BundlesSpecSchema.uuid_spec_type)
uuid_spec = fields.List(fields.String(), required=True)
@staticmethod
def create_json(bundle_infos):
return {
'spec_type': BundlesSpecSchema.uuid_spec_type,
'uuid_spec': [bundle_info['uuid'] for bundle_info in bundle_infos],
'bundle_infos': bundle_infos,
'fetch_status': FetchStatusSchema.get_ready_status(),
}
class WorksheetBlockSchema(PlainSchema):
"""
Parent schema for all worksheet blocks.
"""
mode = fields.String(validate=validate.OneOf(set(BlockModes.values)))
is_refined = fields.Bool(default=False)
class Meta:
type_ = 'worksheet-block'
class MarkupBlockSchema(WorksheetBlockSchema):
"""
Schema for blocks that contain markup.
Does not need refining, contains markup text as payload.
"""
ids = fields.List(fields.Integer())
sort_keys = fields.List(fields.Integer())
mode = fields.Constant(BlockModes.markup_block)
is_refined = fields.Bool(validate=validate.Equal(True)) # always refined
text = fields.String()
error = fields.Bool(default=False) # True if the markdown shows an error
class BundleBlockSchema(WorksheetBlockSchema):
"""
Parent schema for blocks that load data from a single bundle.
Stores state relevant to fetching information from bundle.
"""
bundles_spec = fields.Nested(BundlesSpecSchema, required=True)
target_genpath = fields.String(required=True)
status = fields.Nested(FetchStatusSchema, required=True)
class BundleContentsBlockSchema(BundleBlockSchema):
mode = fields.Constant(BlockModes.contents_block)
max_lines = fields.Integer()
lines = fields.List(fields.String())
class BundleImageBlockSchema(BundleBlockSchema):
mode = fields.Constant(BlockModes.image_block)
image_data = fields.String()
height = fields.Integer()
width = fields.Integer()
class SchemaBlockSchema(WorksheetBlockSchema):
"""
Schema for user-defined schemas in worksheets
"""
mode = fields.Constant(BlockModes.schema_block)
schema_name = fields.String(required=True)
header = fields.List(fields.String(), required=True)
field_rows = fields.List(fields.Dict(), required=True)
sort_keys = fields.List(fields.Integer())
ids = fields.List(fields.Integer())
class TableBlockSchema(WorksheetBlockSchema):
mode = fields.Constant(BlockModes.table_block)
bundles_spec = fields.Nested(BundlesSpecSchema, required=True)
status = fields.Nested(FetchStatusSchema, required=True)
header = fields.List(fields.String(), required=True)
rows = fields.List(fields.Dict(), required=True)
sort_keys = fields.List(fields.Integer())
first_bundle_source_index = fields.Integer() # index for the first bundle in source
using_schemas = fields.List(fields.String())
class RecordsRowSchema(PlainSchema):
key = fields.String(required=True)
value = fields.Raw(required=True)
class RecordsBlockSchema(BundleBlockSchema):
mode = fields.Constant(BlockModes.record_block)
bundles_spec = fields.Nested(BundlesSpecSchema, required=True)
status = fields.Nested(FetchStatusSchema, required=True)
header = fields.Constant(('key', 'value'))
rows = fields.Nested(RecordsRowSchema, many=True, required=True)
sort_keys = fields.List(fields.Integer())
first_bundle_source_index = fields.Integer() # index for the first bundle in source
using_schemas = fields.List(fields.String())
class GraphTrajectorySchema(PlainSchema):
bundle_uuid = fields.String(required=True)
display_name = fields.String(required=True)
target_genpath = fields.String()
points = fields.List(fields.String())
class GraphBlockSchema(BundleBlockSchema):
mode = fields.Constant(BlockModes.graph_block)
bundles_spec = fields.Nested(BundlesSpecSchema, required=True)
status = fields.Nested(FetchStatusSchema, required=True)
trajectories = fields.Nested(GraphTrajectorySchema, many=True, required=True)
max_lines = fields.Integer()
xlabel = fields.String()
ylabel = fields.String()
class SubworksheetsBlock(WorksheetBlockSchema):
mode = fields.Constant(BlockModes.subworksheets_block)
subworksheet_infos = fields.List(fields.Dict, required=True)
sort_keys = fields.List(fields.Integer())
class PlaceholderBlockSchema(WorksheetBlockSchema):
mode = fields.Constant(BlockModes.placeholder_block)
directive = fields.String()
sort_keys = fields.List(fields.Integer())
|
e4920c8f2efd61ad5d05485f5ceb52ddb848fba9
|
6ffc81125d6bb5f8476f95b2334a27807b8290de
|
/scripts/giza/sample_and_filter_long_bitext.py
|
8d653c5aadfec511befd5bbf974cd951260b5731
|
[
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
oaqa/FlexNeuART
|
4cb341ca3c3f94fa28a7cfd4aef5451de3a4a2cb
|
0bd3e06735ff705731fb6cee62d3486276beccdf
|
refs/heads/master
| 2023-09-01T00:19:33.980081
| 2023-05-26T19:19:30
| 2023-05-26T19:19:30
| 64,071,121
| 156
| 21
|
Apache-2.0
| 2023-09-10T01:27:05
| 2016-07-24T15:08:03
|
Java
|
UTF-8
|
Python
| false
| false
| 2,377
|
py
|
sample_and_filter_long_bitext.py
|
#!/usr/bin/env python
#
# Copyright 2014+ Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Just a simple script to
1. sample bitext
2. ignore pairs where a question is much longer
then the respective document (or vice versa)
"""
import sys
import random
from flexneuart.io import open_with_default_enc
if len(sys.argv) != 8:
print("Usage: <questions> <answers> <maximum fertility> <output questions> <output answers> <symmetrize? 1 or 0> <sample prob>")
quest_file = open_with_default_enc(sys.argv[1], 'r')
answ_file = open_with_default_enc(sys.argv[2], 'r')
max_fert = int(sys.argv[3])
out_file_quest = open_with_default_enc(sys.argv[4], "w")
out_file_answ = open_with_default_enc(sys.argv[5], "w")
symmetr = int(sys.argv[6]) != 0
sample_prob = float(sys.argv[7])
assert sample_prob > 0
assert sample_prob <= 1 + 1e-6
print("Symmetrizing %d, max. fertility %d" % (symmetr, max_fert))
qty_orig = 0
qty_direct = 0
qty_flip = 0
random.seed(0) # always use the same zero seed for reproducibility
for quest in quest_file:
answ = answ_file.readline()
qty_orig += 1
# We read the question and the answer,
# here's time to make a random decsision:
# to sample or not to sample
if random.random() <= sample_prob:
len1 = len(answ.split())
len2 = len(quest.split())
if len2 <= len1 * max_fert and len1 <= len2 * max_fert:
out_file_quest.write(quest)
out_file_answ.write(answ)
qty_direct += 1
if symmetr:
out_file_quest.write(answ)
out_file_answ.write(quest)
qty_flip += 1
print(f'The sampling and filtering script processed {qty_orig} QA pairs and ' +
f'wrote {qty_direct} original and {qty_flip} flipped pairs')
out_file_quest.close()
out_file_answ.close()
|
8010657d9f2b6412a52309c765616ea2fc554983
|
d1c2d00078520cd556f60b7213c27856f8b3460d
|
/sdks/python/apache_beam/examples/complete/game/user_score.py
|
564cea8c425c37f45a8d47f4848a2d762390114f
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-protobuf",
"Apache-2.0",
"Python-2.0"
] |
permissive
|
apache/beam
|
ed11b9e043465c720659eac20ac71b5b171bfa88
|
6d5048e05087ea54abc889ce402ae2a0abb9252b
|
refs/heads/master
| 2023-09-04T07:41:07.002653
| 2023-09-01T23:01:05
| 2023-09-01T23:01:05
| 50,904,245
| 7,061
| 4,522
|
Apache-2.0
| 2023-09-14T21:43:38
| 2016-02-02T08:00:06
|
Java
|
UTF-8
|
Python
| false
| false
| 6,295
|
py
|
user_score.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""First in a series of four pipelines that tell a story in a 'gaming' domain.
Concepts: batch processing; reading input from Google Cloud Storage or a from a
local text file, and writing output to a text file; using standalone DoFns; use
of the CombinePerKey transform.
In this gaming scenario, many users play, as members of different teams, over
the course of a day, and their actions are logged for processing. Some of the
logged game events may be late-arriving, if users play on mobile devices and go
transiently offline for a period of time.
This pipeline does batch processing of data collected from gaming events. It
calculates the sum of scores per user, over an entire batch of gaming data
(collected, say, for each day). The batch processing will not include any late
data that arrives after the day's cutoff point.
For a description of the usage and options, use -h or --help.
To specify a different runner:
--runner YOUR_RUNNER
NOTE: When specifying a different runner, additional runner-specific options
may have to be passed in as well
EXAMPLES
--------
# DirectRunner
python user_score.py \
--output /local/path/user_score/output
# DataflowRunner
python user_score.py \
--output gs://$BUCKET/user_score/output \
--runner DataflowRunner \
--project $PROJECT_ID \
--region $REGION_ID \
--temp_location gs://$BUCKET/user_score/temp
"""
# pytype: skip-file
# beam-playground:
# name: UserScore
# description: batch processing; reading input from Google Cloud Storage or a
# from a local text file, and writing output to a text file; using
# standalone DoFns; use of the CombinePerKey transform.
# multifile: false
# pipeline_options: --output output.txt
# context_line: 174
# categories:
# - Batch
# - Combiners
# - Options
# complexity: ADVANCED
# tags:
# - batch
# - combine
# - io
# - strings
import argparse
import csv
import logging
import apache_beam as beam
from apache_beam.metrics.metric import Metrics
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
class ParseGameEventFn(beam.DoFn):
"""Parses the raw game event info into a Python dictionary.
Each event line has the following format:
username,teamname,score,timestamp_in_ms,readable_time
e.g.:
user2_AsparagusPig,AsparagusPig,10,1445230923951,2015-11-02 09:09:28.224
The human-readable time string is not used here.
"""
def __init__(self):
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
# super().__init__()
beam.DoFn.__init__(self)
self.num_parse_errors = Metrics.counter(self.__class__, 'num_parse_errors')
def process(self, elem):
try:
row = list(csv.reader([elem]))[0]
yield {
'user': row[0],
'team': row[1],
'score': int(row[2]),
'timestamp': int(row[3]) / 1000.0,
}
except: # pylint: disable=bare-except
# Log and count parse errors
self.num_parse_errors.inc()
logging.error('Parse error on "%s"', elem)
# [START extract_and_sum_score]
class ExtractAndSumScore(beam.PTransform):
"""A transform to extract key/score information and sum the scores.
The constructor argument `field` determines whether 'team' or 'user' info is
extracted.
"""
def __init__(self, field):
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
# super().__init__()
beam.PTransform.__init__(self)
self.field = field
def expand(self, pcoll):
return (
pcoll
| beam.Map(lambda elem: (elem[self.field], elem['score']))
| beam.CombinePerKey(sum))
# [END extract_and_sum_score]
class UserScore(beam.PTransform):
def expand(self, pcoll):
return (
pcoll
| 'ParseGameEventFn' >> beam.ParDo(ParseGameEventFn())
# Extract and sum username/score pairs from the event data.
| 'ExtractAndSumScore' >> ExtractAndSumScore('user'))
# [START main]
def run(argv=None, save_main_session=True):
"""Main entry point; defines and runs the user_score pipeline."""
parser = argparse.ArgumentParser()
# The default maps to two large Google Cloud Storage files (each ~12GB)
# holding two subsequent day's worth (roughly) of data.
parser.add_argument(
'--input',
type=str,
default='gs://apache-beam-samples/game/small/gaming_data.csv',
help='Path to the data file(s) containing game data.')
parser.add_argument(
'--output', type=str, required=True, help='Path to the output file(s).')
args, pipeline_args = parser.parse_known_args(argv)
options = PipelineOptions(pipeline_args)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
options.view_as(SetupOptions).save_main_session = save_main_session
with beam.Pipeline(options=options) as p:
def format_user_score_sums(user_score):
(user, score) = user_score
return 'user: %s, total_score: %s' % (user, score)
( # pylint: disable=expression-not-assigned
p
| 'ReadInputText' >> beam.io.ReadFromText(args.input)
| 'UserScore' >> UserScore()
| 'FormatUserScoreSums' >> beam.Map(format_user_score_sums)
| 'WriteUserScoreSums' >> beam.io.WriteToText(args.output))
# [END main]
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
|
4a5621df47061d174bcb2e82181ecc5f6a43ef36
|
0e92203844a29b8c36d2c289ef0658204f80c127
|
/utils/Test_Rail.py
|
a3c11d37a08214b0afbd09b10a333d9429a0b40f
|
[
"MIT"
] |
permissive
|
qxf2/qxf2-page-object-model
|
b61ae2bd77cb2e6b97db991707945779c6254224
|
17b9d6095b881c6e9f25f8a467d90fc4bb6cef91
|
refs/heads/master
| 2023-08-08T08:30:10.739019
| 2023-07-27T14:15:07
| 2023-07-27T14:15:07
| 77,039,202
| 263
| 197
|
MIT
| 2023-07-27T14:15:08
| 2016-12-21T09:50:23
|
Python
|
UTF-8
|
Python
| false
| false
| 9,150
|
py
|
Test_Rail.py
|
"""
TestRail integration:
* limited to what we need at this time
* we assume TestRail operates in single suite mode
i.e., the default, reccomended mode
API reference: http://docs.gurock.com/testrail-api2/start
"""
import os,sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils import testrail
import conf.testrailenv_conf as conf_file
class Test_Rail:
"Wrapper around TestRail's API"
# Added below to fix PytestCollectionWarning
__test__ = False
def __init__(self):
"Initialize the TestRail objects"
self.set_testrail_conf()
def set_testrail_conf(self):
"Set the TestRail URL and username, password"
#Set the TestRail URL
self.testrail_url = conf_file.testrail_url
self.client = testrail.APIClient(self.testrail_url)
#TestRail User and Password
self.client.user = conf_file.testrail_user
self.client.password = conf_file.testrail_password
def get_project_id(self,project_name):
"Get the project ID using project name"
project_id=None
projects = self.client.send_get('get_projects')
for project in projects:
if project['name'] == project_name:
project_id = project['id']
break
return project_id
def get_suite_id(self,project_name,suite_name):
"Get the suite ID using project name and suite name"
suite_id=None
project_id = self.get_project_id(project_name)
suites = self.client.send_get('get_suites/%s'%(project_id))
for suite in suites:
if suite['name'] == suite_name:
suite_id = suite['id']
break
return suite_id
def get_milestone_id(self,project_name,milestone_name):
"Get the milestone ID using project name and milestone name"
milestone_id = None
project_id = self.get_project_id(project_name)
milestones = self.client.send_get('get_milestones/%s'%(project_id))
for milestone in milestones:
if milestone['name'] == milestone_name:
milestone_id = milestone['id']
break
return milestone_id
def get_user_id(self,user_name):
"Get the user ID using user name"
user_id=None
users = self.client.send_get('get_users')
for user in users:
if user['name'] == user_name:
user_id = user['id']
break
return user_id
def get_run_id(self,project_name,test_run_name):
"Get the run ID using test name and project name"
run_id=None
project_id = self.get_project_id(project_name)
try:
test_runs = self.client.send_get('get_runs/%s'%(project_id))
except Exception as e:
print('Exception in update_testrail() updating TestRail.')
print('PYTHON SAYS: ')
print(e)
else:
for test_run in test_runs:
if test_run['name'] == test_run_name:
run_id = test_run['id']
break
return run_id
def create_milestone(self,project_name,milestone_name,milestone_description=""):
"Create a new milestone if it does not already exist"
milestone_id = self.get_milestone_id(project_name,milestone_name)
if milestone_id is None:
project_id = self.get_project_id(project_name)
if project_id is not None:
try:
data = {'name':milestone_name,
'description':milestone_description}
self.client.send_post('add_milestone/%s'%str(project_id),
data)
except Exception as e:
print('Exception in create_new_project() creating new project.')
print('PYTHON SAYS: ')
print(e)
else:
print('Created the milestone: %s'%milestone_name)
else:
print("Milestone '%s' already exists"%milestone_name)
def create_new_project(self,new_project_name,project_description,show_announcement,suite_mode):
"Create a new project if it does not already exist"
project_id = self.get_project_id(new_project_name)
if project_id is None:
try:
self.client.send_post('add_project',
{'name': new_project_name,
'announcement': project_description,
'show_announcement': show_announcement,
'suite_mode': suite_mode,})
except Exception as e:
print('Exception in create_new_project() creating new project.')
print('PYTHON SAYS: ')
print(e)
else:
print("Project already exists %s"%new_project_name)
def create_test_run(self,project_name,test_run_name,milestone_name=None,description="",suite_name=None,case_ids=[],assigned_to=None):
"Create a new test run if it does not already exist"
#reference: http://docs.gurock.com/testrail-api2/reference-runs
project_id = self.get_project_id(project_name)
test_run_id = self.get_run_id(project_name,test_run_name)
if project_id is not None and test_run_id is None:
data = {}
if suite_name is not None:
suite_id = self.get_suite_id(project_name,suite_name)
if suite_id is not None:
data['suite_id'] = suite_id
data['name'] = test_run_name
data['description'] = description
if milestone_name is not None:
milestone_id = self.get_milestone_id(project_name,milestone_name)
if milestone_id is not None:
data['milestone_id'] = milestone_id
if assigned_to is not None:
assignedto_id = self.get_user_id(assigned_to)
if assignedto_id is not None:
data['assignedto_id'] = assignedto_id
if len(case_ids) > 0:
data['case_ids'] = case_ids
data['include_all'] = False
try:
self.client.send_post('add_run/%s'%(project_id),data)
except Exception as e:
print('Exception in create_test_run() Creating Test Run.')
print('PYTHON SAYS: ')
print(e)
else:
print('Created the test run: %s'%test_run_name)
else:
if project_id is None:
print("Cannot add test run %s because Project %s was not found"%(test_run_name,project_name))
elif test_run_id is not None:
print("Test run '%s' already exists"%test_run_name)
def delete_project(self,new_project_name,project_description):
"Delete an existing project"
project_id = self.get_project_id(new_project_name)
if project_id is not None:
try:
self.client.send_post('delete_project/%s'%(project_id),project_description)
except Exception as e:
print('Exception in delete_project() deleting project.')
print('PYTHON SAYS: ')
print(e)
else:
print('Cant delete the project given project name: %s'%(new_project_name))
def delete_test_run(self,test_run_name,project_name):
"Delete an existing test run"
run_id = self.get_run_id(test_run_name,project_name)
if run_id is not None:
try:
self.client.send_post('delete_run/%s'%(run_id),test_run_name)
except Exception as e:
print('Exception in update_testrail() updating TestRail.')
print('PYTHON SAYS: ')
print(e)
else:
print('Cant delete the test run for given project and test run name: %s , %s'%(project_name,test_run_name))
def update_testrail(self,case_id,run_id,result_flag,msg=""):
"Update TestRail for a given run_id and case_id"
update_flag = False
#Update the result in TestRail using send_post function.
#Parameters for add_result_for_case is the combination of runid and case id.
#status_id is 1 for Passed, 2 For Blocked, 4 for Retest and 5 for Failed
status_id = 1 if result_flag is True else 5
if ((run_id is not None) and (case_id != 'None')) :
try:
self.client.send_post(
'add_result_for_case/%s/%s'%(run_id,case_id),
{'status_id': status_id, 'comment': msg })
except Exception as e:
print('Exception in update_testrail() updating TestRail.')
print('PYTHON SAYS: ')
print(e)
else:
print('Updated test result for case: %s in test run: %s\n'%(case_id,run_id))
return update_flag
|
fc2b3550ee6c26d74c025ad8f68f7311a98f7c24
|
7860d9fba242d9bdcb7c06c32ee4064e4a7fa2f1
|
/litex_boards/platforms/hackaday_hadbadge.py
|
4602b3a47a1d22a57cacce08fe646b7e28175016
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
litex-hub/litex-boards
|
ef1f200fd6d34c96621f4efa094ede874f4c34ab
|
b92c96b3a445fde31037f593a40fe621f85cb58c
|
refs/heads/master
| 2023-09-03T15:09:11.198560
| 2023-08-30T15:22:11
| 2023-08-30T15:22:11
| 191,191,221
| 291
| 283
|
BSD-2-Clause
| 2023-09-03T20:32:58
| 2019-06-10T15:09:10
|
Python
|
UTF-8
|
Python
| false
| false
| 8,596
|
py
|
hackaday_hadbadge.py
|
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2020 Michael Welling <mwelling@ieee.org>
# Copyright (c) 2020 Sean Cross <sean@xobs.io>
# Copyright (c) 2020 Drew Fustini <drew@pdp7.com>
# Copyright (c) 2020 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.lattice import LatticeECP5Platform
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk8", 0, Pins("U18"), IOStandard("LVCMOS33")),
("programn", 0, Pins("R1"), IOStandard("LVCMOS33")),
# Leds
("led", 0, Pins("E3 D3 C3 C4 C2 B1 B20 B19 A18 K20 K19"), IOStandard("LVCMOS33")), # Anodes
("led", 1, Pins("P19 L18 K18"), IOStandard("LVCMOS33")), # Cathodes via FET
# Serial
("serial", 0,
Subsignal("rx", Pins("U2"), IOStandard("LVCMOS33"), Misc("PULLMODE=UP")),
Subsignal("tx", Pins("U1"), IOStandard("LVCMOS33")),
),
# USB
("usb", 0,
Subsignal("d_p", Pins("F3")),
Subsignal("d_n", Pins("G3")),
Subsignal("pullup", Pins("E4")),
Subsignal("vbusdet", Pins("F4")),
IOStandard("LVCMOS33")
),
# KeyPad
("keypad", 0,
Subsignal("left", Pins("G2"), Misc("PULLMODE=UP")),
Subsignal("right", Pins("F2"), Misc("PULLMODE=UP")),
Subsignal("up", Pins("F1"), Misc("PULLMODE=UP")),
Subsignal("down", Pins("C1"), Misc("PULLMODE=UP")),
Subsignal("start", Pins("E1"), Misc("PULLMODE=UP")),
Subsignal("select", Pins("D2"), Misc("PULLMODE=UP")),
Subsignal("a", Pins("D1"), Misc("PULLMODE=UP")),
Subsignal("b", Pins("E2"), Misc("PULLMODE=UP")),
),
# HDMI
("hdmi_out", 0,
Subsignal("clk_p", Pins("P20"), Inverted(), IOStandard("TMDS_33")),
Subsignal("clk_n", Pins("R20"), Inverted(), IOStandard("TMDS_33")),
Subsignal("data0_p", Pins("N19"), IOStandard("TMDS_33")),
Subsignal("data0_n", Pins("N20"), IOStandard("TMDS_33")),
Subsignal("data1_p", Pins("L20"), IOStandard("TMDS_33")),
Subsignal("data1_n", Pins("M20"), IOStandard("TMDS_33")),
Subsignal("data2_p", Pins("L16"), IOStandard("TMDS_33")),
Subsignal("data2_n", Pins("L17"), IOStandard("TMDS_33")),
Subsignal("hpd_notif", Pins("R18"), IOStandard("LVCMOS33")), # Also called HDMI_HEAC_n
Subsignal("hdmi_heac_p", Pins("T19"), IOStandard("LVCMOS33")),
Misc("DRIVE=4"),
),
# LCD
("lcd", 0,
Subsignal("db", Pins(
"J3 H1 K4 J1 K3 K2 L4 K1",
"L3 L2 M4 L1 M3 M1 N4 N2",
"N3 N1")),
Subsignal("rd", Pins("P2")),
Subsignal("wr", Pins("P4")),
Subsignal("rs", Pins("P1")),
Subsignal("cs", Pins("P3")),
Subsignal("id", Pins("J4")),
Subsignal("rst", Pins("H2")),
Subsignal("fmark", Pins("G1")),
Subsignal("blen", Pins("P5")),
IOStandard("LVCMOS33")
),
# SPIFlash
("spiflash", 0, # Clock needs to be accessed through USRMCLK
Subsignal("cs_n", Pins("R2")),
Subsignal("mosi", Pins("W2")),
Subsignal("miso", Pins("V2")),
Subsignal("wp", Pins("Y2")),
Subsignal("hold", Pins("W1")),
IOStandard("LVCMOS33")
),
("spiflash4x", 0, # Clock needs to be accessed through USRMCLK
Subsignal("cs_n", Pins("R2")),
Subsignal("dq", Pins("W2 V2 Y2 W1")),
IOStandard("LVCMOS33")
),
# SPIRam
("spiram4x", 0,
Subsignal("cs_n", Pins("D20")),
Subsignal("clk", Pins("E20")),
Subsignal("dq", Pins("E19 D19 C20 F19"), Misc("PULLMODE=UP")),
IOStandard("LVCMOS33"), Misc("SLEWRATE=SLOW")
),
("spiram4x", 1,
Subsignal("cs_n", Pins("F20")),
Subsignal("clk", Pins("J19")),
Subsignal("dq", Pins("J20 G19 G20 H20"), Misc("PULLMODE=UP")),
IOStandard("LVCMOS33"), Misc("SLEWRATE=SLOW")
),
# SDR SDRAM
("sdram_clock", 0, Pins("D11"), IOStandard("LVCMOS33")),
("sdram", 0,
Subsignal("a", Pins("A8 D9 C9 B9 C14 E17 A12 B12 H17 G18 B8 A11 B11")),
Subsignal("dq", Pins("C5 B5 A5 C6 B10 C10 D10 A9")),
Subsignal("we_n", Pins("B6")),
Subsignal("ras_n", Pins("D6")),
Subsignal("cas_n", Pins("A6")),
Subsignal("cs_n", Pins("C7")),
Subsignal("cke", Pins("C11")),
Subsignal("ba", Pins("A7 C8")),
Subsignal("dm", Pins("A10")),
IOStandard("LVCMOS33"), Misc("SLEWRATE=FAST")
),
# SAO
("sao", 0,
Subsignal("sda", Pins("B3")),
Subsignal("scl", Pins("B2")),
Subsignal("gpio", Pins("A2 A3 B4")),
Subsignal("drm", Pins("A4")),
IOStandard("LVCMOS33"),
),
("sao", 1,
Subsignal("sda", Pins("A16")),
Subsignal("scl", Pins("B17")),
Subsignal("gpio", Pins("B18 A17 B16")),
Subsignal("drm", Pins("C17")),
IOStandard("LVCMOS33"),
),
# Test Points
("testpts", 0,
Subsignal("a1", Pins("A15")),
Subsignal("a2", Pins("C16")),
Subsignal("a3", Pins("A14")),
Subsignal("a4", Pins("D16")),
Subsignal("b1", Pins("B15")),
Subsignal("b2", Pins("C15")),
Subsignal("b3", Pins("A13")),
Subsignal("b4", Pins("B13")),
IOStandard("LVCMOS33"),
),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = [
("pmod", "A15 C16 A14 D16 B15 C15 A13 B13"),
("genio", "C5 B5 A5 C6 B6 A6 D6 C7 ", # 0-7
"A7 C8 B8 A8 D9 C9 B9 A9 ", # 8-15
"D10 C10 B10 A10 D11 C11 B11 A11", # 16-23
"G18 H17 B12 A12 E17 C14"), # 24-29
]
# PMODs --------------------------------------------------------------------------------------------
_pmod_gpio = [
("pmod_gpio", 0,
Subsignal("p0", Pins("pmod:0")),
Subsignal("p1", Pins("pmod:1")),
Subsignal("p2", Pins("pmod:2")),
Subsignal("p3", Pins("pmod:3")),
Subsignal("p4", Pins("pmod:4")),
Subsignal("p5", Pins("pmod:5")),
Subsignal("p6", Pins("pmod:6")),
Subsignal("p7", Pins("pmod:7")),
IOStandard("LVCMOS33")
),
]
# Generic IOs --------------------------------------------------------------------------------------
_genio_gpio = [
("genio_gpio", 0,
Subsignal("p0", Pins("genio:0")),
Subsignal("p1", Pins("genio:1")),
Subsignal("p2", Pins("genio:2")),
Subsignal("p3", Pins("genio:3")),
Subsignal("p4", Pins("genio:4")),
Subsignal("p5", Pins("genio:5")),
Subsignal("p6", Pins("genio:6")),
Subsignal("p7", Pins("genio:7")),
Subsignal("p8", Pins("genio:8")),
Subsignal("p9", Pins("genio:9")),
Subsignal("p10", Pins("genio:10")),
Subsignal("p11", Pins("genio:11")),
Subsignal("p12", Pins("genio:12")),
Subsignal("p13", Pins("genio:13")),
Subsignal("p14", Pins("genio:14")),
Subsignal("p15", Pins("genio:15")),
Subsignal("p16", Pins("genio:16")),
Subsignal("p17", Pins("genio:17")),
Subsignal("p18", Pins("genio:18")),
Subsignal("p19", Pins("genio:19")),
Subsignal("p20", Pins("genio:20")),
Subsignal("p21", Pins("genio:21")),
Subsignal("p22", Pins("genio:22")),
Subsignal("p23", Pins("genio:23")),
Subsignal("p24", Pins("genio:24")),
Subsignal("p25", Pins("genio:25")),
Subsignal("p26", Pins("genio:26")),
Subsignal("p27", Pins("genio:27")),
Subsignal("p28", Pins("genio:28")),
Subsignal("p29", Pins("genio:29")),
)
]
# Platform -----------------------------------------------------------------------------------------
class Platform(LatticeECP5Platform):
default_clk_name = "clk8"
default_clk_period = 1e9/8e6
def __init__(self, toolchain="trellis", **kwargs):
LatticeECP5Platform.__init__(self, "LFE5U-45F-8CABGA381", io=_io, connectors=_connectors,
toolchain=toolchain, **kwargs)
def create_programmer(self):
raise ValueError("{} programmer is not supported"
.format(self.programmer))
def do_finalize(self, fragment):
LatticeECP5Platform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk8", loose=True), 1e9/8e6)
|
7ee6a4f46c7b5365ee573dd31f42ef1ddc5d44e0
|
20f125a17856c1251727314c571091a59bc770f0
|
/Chapter 03/3.05.py
|
735d0880d2b292c2b177575feeec423705c6253e
|
[
"MIT"
] |
permissive
|
PacktPublishing/Tkinter-GUI-Application-Development-Blueprints-Second-Edition
|
310983285d54c59bdd02e69b9a913aa9372c869a
|
1e160c0575028e446295c121a84142164ee5ced2
|
refs/heads/master
| 2023-07-10T05:34:39.159752
| 2023-01-30T09:20:16
| 2023-01-30T09:20:16
| 123,231,531
| 142
| 94
|
MIT
| 2023-07-03T23:09:32
| 2018-02-28T04:59:53
|
Python
|
UTF-8
|
Python
| false
| false
| 11,346
|
py
|
3.05.py
|
"""
Code illustration: 3.05
- Loading drum samples
New modules imported here:
- os, tkinter.filedialog
New methods implemented here
on_open_file_button_clicked():
display_all_drum_file_names():
display_drum_name():
Chapter 3 : Programmable Drum Machine
Tkinter GUI Application Development Blueprints
"""
import os
from tkinter import Tk, Entry, W, E, N, S, PhotoImage, Checkbutton, Button, \
Menu, Frame, Label, Spinbox, END
from tkinter import filedialog
PROGRAM_NAME = ' Explosion Drum Machine '
MAX_NUMBER_OF_PATTERNS = 10
MAX_NUMBER_OF_DRUM_SAMPLES = 5
MAX_NUMBER_OF_UNITS = 5
MAX_BPU = 5
INITIAL_NUMBER_OF_UNITS = 4
INITIAL_BPU = 4
INITIAL_BEATS_PER_MINUTE = 240
MIN_BEATS_PER_MINUTE = 80
MAX_BEATS_PER_MINUTE = 360
COLOR_1 = 'grey55'
COLOR_2 = 'khaki'
BUTTON_CLICKED_COLOR = 'green'
class DrumMachine:
def __init__(self, root):
self.root = root
self.root.title(PROGRAM_NAME)
self.all_patterns = [None] * MAX_NUMBER_OF_PATTERNS
self.beats_per_minute = INITIAL_BEATS_PER_MINUTE
self.current_pattern_index = 0
self.drum_load_entry_widget = [None] * MAX_NUMBER_OF_DRUM_SAMPLES
self.init_all_patterns()
self.init_gui()
def on_open_file_button_clicked(self, drum_index):
def event_handler():
file_path = filedialog.askopenfilename(defaultextension=".wav",
filetypes=[("Wave Files", "*.wav"), ("OGG Files", "*.ogg")])
if not file_path:
return
self.set_drum_file_path(drum_index, file_path)
self.display_all_drum_file_names()
return event_handler
def display_all_drum_file_names(self):
for i, drum_name in enumerate(self.get_list_of_drum_files()):
self.display_drum_name(i, drum_name)
def display_drum_name(self, text_widget_num, file_path):
if file_path is None:
return
drum_name = os.path.basename(file_path)
self.drum_load_entry_widget[text_widget_num].delete(0, END)
self.drum_load_entry_widget[text_widget_num].insert(0, drum_name)
#
# getters and setters begins
#
def get_current_pattern_dict(self):
return self.all_patterns[self.current_pattern_index]
def get_bpu(self):
return self.get_current_pattern_dict()['bpu']
def set_bpu(self):
self.get_current_pattern_dict()['bpu'] = int(self.bpu_widget.get())
def get_number_of_units(self):
return self.get_current_pattern_dict()['number_of_units']
def set_number_of_units(self):
self.get_current_pattern_dict(
)['number_of_units'] = int(self.number_of_units_widget.get())
def get_list_of_drum_files(self):
return self.get_current_pattern_dict()['list_of_drum_files']
def get_drum_file_path(self, drum_index):
return self.get_list_of_drum_files()[drum_index]
def set_drum_file_path(self, drum_index, file_path):
self.get_list_of_drum_files()[drum_index] = file_path
def get_is_button_clicked_list(self):
return self.get_current_pattern_dict()['is_button_clicked_list']
def set_is_button_clicked_list(self, num_of_rows, num_of_columns):
self.get_current_pattern_dict()['is_button_clicked_list'] = [
[False] * num_of_columns for x in range(num_of_rows)]
def init_all_patterns(self):
self.all_patterns = [
{
'list_of_drum_files': [None] * MAX_NUMBER_OF_DRUM_SAMPLES,
'number_of_units': INITIAL_NUMBER_OF_UNITS,
'bpu': INITIAL_BPU,
'is_button_clicked_list':
self.init_is_button_clicked_list(
MAX_NUMBER_OF_DRUM_SAMPLES,
INITIAL_NUMBER_OF_UNITS * INITIAL_BPU
)
}
for k in range(MAX_NUMBER_OF_PATTERNS)]
def on_pattern_changed(self):
pass
def on_number_of_units_changed(self):
self.set_number_of_units()
self.set_is_button_clicked_list(MAX_NUMBER_OF_DRUM_SAMPLES,
self.find_number_of_columns())
self.create_right_button_matrix()
def on_bpu_changed(self):
self.set_bpu()
self.set_is_button_clicked_list(MAX_NUMBER_OF_DRUM_SAMPLES,
self.find_number_of_columns())
self.create_right_button_matrix()
def on_play_button_clicked(self):
pass
def on_stop_button_clicked(self):
pass
def on_loop_button_toggled(self):
pass
def on_beats_per_minute_changed(self):
pass
def init_is_button_clicked_list(self, num_of_rows, num_of_columns):
return [[False] * num_of_columns for x in range(num_of_rows)]
def get_button_value(self, row, col):
return self.all_patterns[self.current_pattern_index][
'is_button_clicked_list'][row][col]
def find_number_of_columns(self):
return int(self.number_of_units_widget.get()) * int(self.bpu_widget.get())
def process_button_clicked(self, row, col):
self.set_button_value(row, col, not self.get_button_value(row, col))
self.display_button_color(row, col)
def set_button_value(self, row, col, bool_value):
self.all_patterns[self.current_pattern_index][
'is_button_clicked_list'][row][col] = bool_value
def on_button_clicked(self, row, col):
def event_handler():
self.process_button_clicked(row, col)
return event_handler
def display_all_button_colors(self):
number_of_columns = self.find_number_of_columns()
for r in range(MAX_NUMBER_OF_DRUM_SAMPLES):
for c in range(number_of_columns):
self.display_button_color(r, c)
def display_button_color(self, row, col):
bpu = int(self.bpu_widget.get())
original_color = COLOR_1 if ((col//bpu) % 2) else COLOR_2
button_color = BUTTON_CLICKED_COLOR if self.get_button_value(
row, col) else original_color
self.buttons[row][col].config(background=button_color)
def create_play_bar(self):
playbar_frame = Frame(self.root, height=15)
start_row = MAX_NUMBER_OF_DRUM_SAMPLES + 10
playbar_frame.grid(row=start_row, columnspan=13,
sticky=W + E, padx=15, pady=10)
self.play_icon = PhotoImage(file="images/play.gif")
self.play_button = Button(
playbar_frame, text='Play', image=self.play_icon, compound='left', command=self.on_play_button_clicked)
self.play_button.grid(row=start_row, column=1, padx=2)
Button(playbar_frame, text='Stop', command=self.on_stop_button_clicked).grid(
row=start_row, column=3, padx=2)
self.loopbutton = Checkbutton(
playbar_frame, text='Loop', command=self.on_loop_button_toggled)
self.loopbutton.grid(row=start_row, column=16, padx=5)
Label(playbar_frame, text='Beats Per Minute').grid(
row=start_row, column=25)
self.beats_per_minute_widget = Spinbox(playbar_frame, from_=MIN_BEATS_PER_MINUTE, to=MAX_BEATS_PER_MINUTE, width=5,
increment=5.0, command=self.on_beats_per_minute_changed)
self.beats_per_minute_widget.grid(row=start_row, column=30)
self.beats_per_minute_widget.delete(0,"end")
self.beats_per_minute_widget.insert(0,INITIAL_BEATS_PER_MINUTE)
photo = PhotoImage(file='images/signature.gif')
label = Label(playbar_frame, image=photo)
label.image = photo
label.grid(row=start_row, column=50, padx=1, sticky='w')
def create_right_button_matrix(self):
right_frame = Frame(self.root)
right_frame.grid(row=10, column=6, sticky=W +
E + N + S, padx=15, pady=4)
self.buttons = [[None for x in range(
self.find_number_of_columns())] for x in range(MAX_NUMBER_OF_DRUM_SAMPLES)]
for row in range(MAX_NUMBER_OF_DRUM_SAMPLES):
for col in range(self.find_number_of_columns()):
self.buttons[row][col] = Button(
right_frame, command=self.on_button_clicked(row, col))
self.buttons[row][col].grid(row=row, column=col)
self.display_button_color(row, col)
def create_left_drum_loader(self):
left_frame = Frame(self.root)
left_frame.grid(row=10, column=0, columnspan=6, sticky=W + E + N + S)
open_file_icon = PhotoImage(file='images/openfile.gif')
for i in range(MAX_NUMBER_OF_DRUM_SAMPLES):
open_file_button = Button(left_frame, image=open_file_icon,
command=self.on_open_file_button_clicked(i))
open_file_button.image = open_file_icon
open_file_button.grid(row=i, column=0, padx=5, pady=4)
self.drum_load_entry_widget[i] = Entry(left_frame)
self.drum_load_entry_widget[i].grid(
row=i, column=4, padx=7, pady=4)
def create_top_bar(self):
topbar_frame = Frame(self.root, height=25)
topbar_frame.grid(row=0, columnspan=12, rowspan=10, padx=5, pady=5)
Label(topbar_frame, text='Pattern Number:').grid(row=0, column=1)
self.pattern_index_widget = Spinbox(topbar_frame, from_=0, to=MAX_NUMBER_OF_PATTERNS - 1, width=5,
command=self.on_pattern_changed)
self.pattern_index_widget.grid(row=0, column=2)
self.current_pattern_name_widget = Entry(topbar_frame)
self.current_pattern_name_widget.grid(row=0, column=3, padx=7, pady=2)
Label(topbar_frame, text='Number of Units:').grid(row=0, column=4)
self.number_of_units_widget = Spinbox(topbar_frame, from_=1, to=MAX_NUMBER_OF_UNITS, width=5,
command=self.on_number_of_units_changed)
self.number_of_units_widget.delete(0,"end")
self.number_of_units_widget.insert(0,INITIAL_NUMBER_OF_UNITS)
self.number_of_units_widget.grid(row=0, column=5)
Label(topbar_frame, text='BPUs:').grid(row=0, column=6)
self.bpu_widget = Spinbox(topbar_frame, from_=1, to=MAX_BPU, width=5,
command=self.on_bpu_changed)
self.bpu_widget.grid(row=0, column=7)
self.bpu_widget.delete(0,"end")
self.bpu_widget.insert(0,INITIAL_BPU)
def create_top_menu(self):
self.menu_bar = Menu(self.root)
self.file_menu = Menu(self.menu_bar, tearoff=0)
self.file_menu.add_command(label="Load Project")
self.file_menu.add_command(label="Save Project")
self.file_menu.add_separator()
self.file_menu.add_command(label="Exit")
self.menu_bar.add_cascade(label="File", menu=self.file_menu)
self.about_menu = Menu(self.menu_bar, tearoff=0)
self.about_menu.add_command(label="About")
self.menu_bar.add_cascade(label="About", menu=self.about_menu)
self.root.config(menu=self.menu_bar)
def init_gui(self):
self.create_top_menu()
self.create_top_bar()
self.create_left_drum_loader()
self.create_right_button_matrix()
self.create_play_bar()
if __name__ == '__main__':
root = Tk()
DrumMachine(root)
root.mainloop()
|
9ea9f5adf157d1a29233988bf04bb393fa858d87
|
09fa8d62c28752670f516e5aa0d65c4d2fb15824
|
/avod/core/trainer_test.py
|
107b64d46bebd41419d8424ec0a442db79e748e5
|
[
"MIT"
] |
permissive
|
melfm/avod-ssd
|
76457b449611c04eafc054e7719a57fc6641281d
|
69804f1f7c2edf505f27e46d477e0936a9591b0d
|
refs/heads/master
| 2022-05-11T06:27:53.248229
| 2022-05-03T15:38:27
| 2022-05-03T15:38:27
| 129,295,659
| 104
| 27
|
MIT
| 2022-05-03T15:38:28
| 2018-04-12T18:36:56
|
Python
|
UTF-8
|
Python
| false
| false
| 3,687
|
py
|
trainer_test.py
|
"""Tests for avod.core.trainer with a dummy Detection Model"""
import tensorflow as tf
import numpy as np
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import math_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from google.protobuf import text_format
from avod.core import trainer
from avod.core import model
from avod.protos import train_pb2
from avod.protos import model_pb2
class FakeBatchNormClassifier(model.DetectionModel):
def __init__(self, model_config, num_classes=1):
# Sets model configs (_config and _num_classes)
super(FakeBatchNormClassifier, self).__init__(model_config)
self.tf_inputs, self.tf_labels = self.get_input()
self._train_op = None
self._loss = None
def BatchNormClassifier(self, inputs):
inputs = layers.batch_norm(inputs, decay=0.1, fused=None)
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
def get_input(self):
"""Creates an easy training set."""
np.random.seed(0)
inputs = np.zeros((16, 4))
labels = np.random.randint(
0, 2, size=(16, 1)).astype(
np.float32)
for i in range(16):
j = int(2 * labels[i] + np.random.randint(0, 2))
inputs[i, j] = 1
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(labels, dtype=dtypes.float32)
return tf_inputs, tf_labels
def build(self):
"""Prediction tensors from inputs tensor.
Args:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
Returns:
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
tf_predictions = self.BatchNormClassifier(self.tf_inputs)
return tf_predictions
def loss(self, tf_predictions):
"""Compute scalar loss tensors with respect to provided groundtruth.
"""
# trainer expects two losses, pass in a dummy one
dummy_loss_dict = {}
total_loss = tf.losses.log_loss(self.tf_labels,
tf_predictions,
scope='BatchNormLoss')
return dummy_loss_dict, total_loss
class ClassifierTrainerTest(tf.test.TestCase):
def test_batch_norm_class(self):
# This tests the model and trainer set up
train_config_text_proto = """
optimizer {
gradient_descent {
learning_rate {
constant_learning_rate {
learning_rate: 1.0
}
}
}
}
max_iterations: 5
"""
model_config_text_proto = """
path_drop_probabilities: [1.0, 1.0]
"""
train_config = train_pb2.TrainConfig()
text_format.Merge(train_config_text_proto, train_config)
model_config = model_pb2.ModelConfig()
text_format.Merge(model_config_text_proto, model_config)
train_config.overwrite_checkpoints = True
test_root_dir = '/tmp/avod_unit_test/'
paths_config = model_config.paths_config
paths_config.logdir = test_root_dir + 'logs/'
paths_config.checkpoint_dir = test_root_dir
classifier = FakeBatchNormClassifier(model_config)
trainer.train(classifier,
train_config)
if __name__ == '__main__':
tf.test.main()
|
5915cd1a3f96fe1f380e40f2dc3fd6564655d598
|
cfda7e170793992b3b0359fdd775b03c0b6e91a5
|
/fitlog/fastserver/summary_app.py
|
77340ce78171c2763113763f9388075c8f56efc5
|
[
"Apache-2.0"
] |
permissive
|
fastnlp/fitlog
|
8c91e1ee6e5ffc4b6396b96020594800dfc34ed2
|
def816d968e6f688ea769dcca8c2475ad56ae693
|
refs/heads/master
| 2023-06-23T18:05:37.674425
| 2022-11-17T13:38:19
| 2022-11-17T13:38:19
| 179,245,899
| 1,409
| 138
|
Apache-2.0
| 2022-11-17T13:30:58
| 2019-04-03T08:31:57
|
Python
|
UTF-8
|
Python
| false
| false
| 8,255
|
py
|
summary_app.py
|
# 这个文件主要是用于响应summary以及计算summary等
from flask import render_template
import traceback
from flask import request, jsonify
from flask import Blueprint
from .server.data_container import all_data
from .server.server_config import _get_config_names
from .server.summary_utils import _get_all_summuries
from .server.summary_utils import check_uuid_summary
from .server.summary_utils import get_summary_selection_from_logs
from .server.summary_utils import read_logs
from .server.summary_utils import generate_summary_table
from .server.summary_utils import _summary_eq
from .server.summary_utils import read_summary
from ..fastgit.committer import _colored_string
from .server.summary_utils import save_summary
from werkzeug.utils import secure_filename
from .server.summary_utils import delete_summary
from .server.utils import stringify_dict_key
summary_page = Blueprint('summary_page', __name__, template_folder='templates')
SUMMARIES = {}
@summary_page.route('/summary', methods=['GET', 'POST'])
def summary_index():
# 这种情况直接寻找当前的default_config, check是否有summary
ids = {}
if request.method=='POST':
# 应该table传入ids,{‘ids':[xxx]}
# 一个list的ids,
for id in request.values['ids'].split(','):
ids[id] = 1
return render_template('summary.html', server_uuid=all_data['uuid'], log_names=ids,
settings={key.replace('_', ' '):value for key, value in all_data['settings'].items()})
# 获取可选的config与summary与所有的summaries
@summary_page.route('/summary/summary_config', methods=['POST'])
def summaries_configs():
res = check_uuid_summary(all_data['uuid'], request.json['uuid'])
if res != None:
return jsonify(res)
root_log_dir = all_data['root_log_dir']
config_names = {}
for name in _get_config_names(root_log_dir):
if name == all_data['log_config_name']:
config_names[name] = 1
else:
config_names[name] = 0
summary_names = {key:0 for key in _get_all_summuries(root_log_dir)}
summary_names['Create New Summary'] = 1
return jsonify({'status':'success',
'summary_names':summary_names,
'config_names':config_names})
@summary_page.route('/summary/summary_json', methods=['POST'])
def summary_json():
# 获取某个summary的内容
res = check_uuid_summary(all_data['uuid'], request.json['uuid'])
if res != None:
return jsonify(res)
summary_name = request.json['summary_name']
summary_names = _get_all_summuries(all_data['root_log_dir'])
if summary_names.index(summary_name)==-1:
return jsonify(status='fail', msg='There is no summary named `{}`.'.format(summary_name))
else:
summary = read_summary(all_data['root_log_dir'], summary_name)
summary.pop('extra_data', None)
return jsonify(status='success', summary=summary)
@summary_page.route('/summary/selections', methods=['POST'])
def summary_selections():
# 根据数据生成axis与metric的选项
res = check_uuid_summary(all_data['uuid'], request.json['uuid'])
if res != None:
return jsonify(res)
try:
if 'config_name' in request.json:
logs = read_logs(request.json['config_name'], all_data['root_log_dir'])
elif 'log_names' in request.json:
logs = read_logs(request.json['log_names'], all_data['root_log_dir'], all_data['extra_data'])
else:
raise ValueError("Corrupted request.")
if isinstance(logs, dict):
return jsonify(logs)
if len(logs)==0:
return jsonify(status='fail', msg='No valid log found.')
axises, metrics = get_summary_selection_from_logs(logs)
if len(metrics)==0:
return jsonify(status='fail', msg='No valid metric.')
if len(axises)==0:
return jsonify(status='fail', msg='No valid hypers or others')
return jsonify(status='success', metrics=metrics, axises=axises)
except Exception as e:
import traceback
traceback.print_exc()
print(e)
return jsonify(status='fail', msg="Unknown error from the server.")
@summary_page.route('/summary/new_summary', methods=['POST'])
def new_summary():
# 根据前端发送的数据生成新的summary, 得到summary的数据
res = check_uuid_summary(all_data['uuid'], request.json['uuid'])
if res != None:
return jsonify(res)
try:
vertical = request.json['vertical']
horizontals = request.json['horizontals']
method = request.json['method']
criteria = request.json['criteria']
results = request.json['results']
result_maps = request.json['result_maps']
selected_data = request.json['selected_data']
summary_name = request.json['summary_name']
extra_summary = []
summary_names = _get_all_summuries(all_data['root_log_dir'])
if summary_name in summary_names:
request_summary = {'vertical': vertical,
'horizontals': horizontals,
'method': method,
'criteria':criteria,
'results': results,
'result_maps':result_maps}
summary = read_summary(all_data['root_log_dir'], summary_name)
if _summary_eq(request_summary, summary):
extra_summary = summary.pop('extra_data', {})
# {'data': data, 'unchanged_columns':unchange_columns, 'column_order': new_column_order, 'column_dict':new_column_dict,
# 'hidden_columns': new_hidden_columns, 'status':}
summary_table = generate_summary_table(vertical, horizontals, method, criteria, results, result_maps, selected_data,
all_data['root_log_dir'], all_data['extra_data'], extra_summary)
# 为了修复不能以bool为key的bug
summary_table = stringify_dict_key(summary_table)
def change_order_keys_to_str(_dict):
for key, value in _dict.copy().items():
if key == 'OrderKeys':
value = list(map(str, value))
_dict[key] = value
if isinstance(value, dict):
change_order_keys_to_str(value)
change_order_keys_to_str(summary_table)
return jsonify(summary_table)
except Exception as e:
print(e)
traceback.print_exc()
return jsonify(status='fail', msg="Please refer to your server for exception reason.")
@summary_page.route('/summary/save_summary', methods=['POST'])
def save_summary_api():
res = check_uuid_summary(all_data['uuid'], request.json['uuid'])
if res != None:
return jsonify(res)
summary = request.json['summary']
summary_name = request.json['summary_name']
try:
summary_name = secure_filename(summary_name)
save_summary(all_data['root_log_dir'], summary_name, summary)
return jsonify(status='success', summary_name=summary_name)
except Exception as e:
print(_colored_string("Save summary failed.", 'red'))
print(e)
import traceback
traceback.print_exc()
return jsonify(status='fail', msg='Fail to save summary, check server log.')
@summary_page.route('/summary/delete_summary', methods=['POST'])
def delete_summary_api():
res = check_uuid_summary(all_data['uuid'], request.json['uuid'])
if res != None:
return jsonify(res)
summary_names = request.json['summary_names']
fail_to_delete = summary_names.copy()
for summary_name in summary_names[::-1]:
try:
flag = delete_summary(all_data['root_log_dir'], summary_name)
if flag:
fail_to_delete.pop(-1)
except Exception as e:
print(_colored_string("Delete summary {} encountered an error.".format(summary_name), 'red'))
print(repr(e))
fail_to_delete.append(summary_name)
if fail_to_delete:
return jsonify(status='fail', msg="Fail to delete {}.".format(fail_to_delete))
else:
return jsonify(status='success')
|
80f728c979fa01aa584d071077bb3178a434132a
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/miniclient/login/pointcuts.py
|
b6ecd1b5f1873c8fa4614c1a6fdef0259020b159
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 385
|
py
|
pointcuts.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/miniclient/login/pointcuts.py
import aspects
from helpers import aop
class ShowBGWallpaper(aop.Pointcut):
def __init__(self):
aop.Pointcut.__init__(self, 'gui.Scaleform.daapi.view.login.login_modes.view_background', 'ViewBackground', 'show$', aspects=(aspects.ShowBGWallpaper,))
|
3abf5544766ad42499ec2e7108188970a5cf104f
|
d32b0c51323e4a2106f82d3fdc81094fb03f989e
|
/guillotina/events.py
|
cc2f4934c485baaa2b2a5093ccc4cf01826cf83b
|
[
"BSD-2-Clause"
] |
permissive
|
plone/guillotina
|
69f768bddc53b397471c3748eee6f01e7db619ab
|
9085dd8b788a5f081db5d799965d39831b2d4ee2
|
refs/heads/master
| 2023-05-01T13:23:30.323625
| 2022-12-12T14:47:56
| 2022-12-12T14:47:56
| 83,574,500
| 185
| 61
|
NOASSERTION
| 2023-04-16T11:58:58
| 2017-03-01T16:12:44
|
Python
|
UTF-8
|
Python
| false
| false
| 8,182
|
py
|
events.py
|
from guillotina.component.interfaces import IObjectEvent
from guillotina.db.orm.interfaces import IBaseObject
from guillotina.interfaces import IAfterAsyncUtilityLoadedEvent
from guillotina.interfaces import IApplicationCleanupEvent
from guillotina.interfaces import IApplicationConfiguredEvent
from guillotina.interfaces import IApplicationEvent
from guillotina.interfaces import IApplicationInitializedEvent
from guillotina.interfaces import IBeforeAsyncUtilityLoadedEvent
from guillotina.interfaces import IBeforeObjectAddedEvent
from guillotina.interfaces import IBeforeObjectModifiedEvent
from guillotina.interfaces import IBeforeObjectMovedEvent
from guillotina.interfaces import IBeforeObjectRemovedEvent
from guillotina.interfaces import IBeforeRenderViewEvent
from guillotina.interfaces import IDatabaseInitializedEvent
from guillotina.interfaces import IFileBeforeFinishUploaded
from guillotina.interfaces import IFileFinishUploaded
from guillotina.interfaces import IFileStartedUpload
from guillotina.interfaces import INewUserAdded
from guillotina.interfaces import IObjectAddedEvent
from guillotina.interfaces import IObjectDuplicatedEvent
from guillotina.interfaces import IObjectLoadedEvent
from guillotina.interfaces import IObjectLocationEvent
from guillotina.interfaces import IObjectModifiedEvent
from guillotina.interfaces import IObjectMovedEvent
from guillotina.interfaces import IObjectPermissionsModifiedEvent
from guillotina.interfaces import IObjectPermissionsViewEvent
from guillotina.interfaces import IObjectRemovedEvent
from guillotina.interfaces import IObjectVisitedEvent
from guillotina.interfaces import IRegistry
from guillotina.interfaces import IRegistryEditedEvent
from guillotina.interfaces import ITraversalMissEvent
from guillotina.interfaces import ITraversalResourceMissEvent
from guillotina.interfaces import ITraversalRouteMissEvent
from guillotina.interfaces import ITraversalViewMissEvent
from guillotina.interfaces import IUserLogin
from guillotina.interfaces import IUserRefreshToken
from guillotina.interfaces import IValidationEvent
from zope.interface import implementer
import typing
@implementer(IObjectEvent)
class ObjectEvent:
def __init__(self, object, **kwargs):
self.object = object
self.data = kwargs
@implementer(IFileStartedUpload)
class FileUploadStartedEvent(ObjectEvent):
pass
@implementer(IFileFinishUploaded)
class FileUploadFinishedEvent(ObjectEvent):
pass
@implementer(IFileBeforeFinishUploaded)
class FileBeforeUploadFinishedEvent(ObjectEvent):
pass
@implementer(IObjectLocationEvent)
class ObjectLocationEvent(ObjectEvent):
"""An object has been moved"""
def __init__(self, object, old_parent, old_name, new_parent, new_name, payload=None):
ObjectEvent.__init__(self, object)
self.old_parent = old_parent
self.old_name = old_name
self.new_parent = new_parent
self.new_name = new_name
self.payload = payload
@implementer(IObjectMovedEvent)
class ObjectMovedEvent(ObjectLocationEvent):
"""An object has been moved"""
@implementer(IBeforeRenderViewEvent)
class BeforeRenderViewEvent:
def __init__(self, request, view):
self.request = request
self.view = view
@implementer(IBeforeObjectMovedEvent)
class BeforeObjectMovedEvent(ObjectLocationEvent):
pass
class BaseAddedEvent(ObjectLocationEvent):
"""An object has been added to a container"""
def __init__(self, object, new_parent=None, new_name=None, payload=None):
if new_parent is None:
new_parent = object.__parent__
if new_name is None:
new_name = object.__name__
super().__init__(object, None, None, new_parent, new_name, payload=payload)
@implementer(IObjectAddedEvent)
class ObjectAddedEvent(BaseAddedEvent):
"""An object has been added to a container"""
@implementer(IObjectDuplicatedEvent)
class ObjectDuplicatedEvent(ObjectAddedEvent):
def __init__(self, object, original_object, new_parent=None, new_name=None, payload=None):
super().__init__(object, new_parent, new_name, payload)
@implementer(IBeforeObjectAddedEvent)
class BeforeObjectAddedEvent(BaseAddedEvent):
pass
class BaseObjectRemovedEvent(ObjectLocationEvent):
"""An object has been removed from a container"""
def __init__(self, object, old_parent=None, old_name=None, payload=None):
if old_parent is None:
old_parent = object.__parent__
if old_name is None:
old_name = object.__name__
super().__init__(object, old_parent, old_name, None, None)
@implementer(IObjectRemovedEvent)
class ObjectRemovedEvent(BaseObjectRemovedEvent):
"""An object has been removed from a container"""
@implementer(IBeforeObjectRemovedEvent)
class BeforeObjectRemovedEvent(BaseObjectRemovedEvent):
pass
@implementer(IBeforeObjectModifiedEvent)
class BeforeObjectModifiedEvent(object):
def __init__(self, object, payload=None):
self.object = object
self.payload = payload or {}
@implementer(IObjectModifiedEvent)
class ObjectModifiedEvent(object):
def __init__(self, object, payload=None):
self.object = object
self.payload = payload or {}
@implementer(IObjectLoadedEvent)
class ObjectLoadedEvent(ObjectEvent):
"""An object has been modified."""
@implementer(IObjectVisitedEvent)
class ObjectVisitedEvent(ObjectEvent):
"""An object has been modified."""
@implementer(IObjectPermissionsViewEvent)
class ObjectPermissionsViewEvent(ObjectEvent):
"""An object has been modified."""
@implementer(IObjectPermissionsModifiedEvent)
class ObjectPermissionsModifiedEvent(ObjectModifiedEvent):
"""An object has been modified."""
@implementer(INewUserAdded)
class NewUserAdded(object):
"""An object has been created."""
def __init__(self, user):
self.user = user
@implementer(IUserLogin)
class UserLogin(object):
"""An object has logged in."""
def __init__(self, user, token):
self.user = user
self.token = token
@implementer(IUserRefreshToken)
class UserRefreshToken(object):
"""An object has been created."""
def __init__(self, user, token):
self.user = user
self.token = token
@implementer(IApplicationEvent)
class ApplicationEvent:
def __init__(self, app, loop=None, **kwargs):
self.app = app
self.loop = loop
self.data = kwargs
@implementer(IApplicationConfiguredEvent)
class ApplicationConfiguredEvent(ApplicationEvent):
pass
@implementer(IApplicationInitializedEvent)
class ApplicationInitializedEvent(ApplicationEvent):
pass
@implementer(IApplicationCleanupEvent)
class ApplicationCleanupEvent(ApplicationEvent):
pass
@implementer(ITraversalMissEvent)
class TraversalMissEvent:
def __init__(self, request, tail):
self.request = request
self.tail = tail
@implementer(ITraversalResourceMissEvent)
class TraversalResourceMissEvent(TraversalMissEvent):
pass
@implementer(ITraversalViewMissEvent)
class TraversalViewMissEvent(TraversalMissEvent):
pass
@implementer(ITraversalRouteMissEvent)
class TraversalRouteMissEvent(TraversalMissEvent):
pass
@implementer(IDatabaseInitializedEvent)
class DatabaseInitializedEvent:
def __init__(self, database):
self.database = database
@implementer(IRegistryEditedEvent)
class RegistryEditedEvent(ObjectEvent):
"""
Registry has been edited
"""
def __init__(self, object: IBaseObject, registry: IRegistry, changes: typing.Dict):
ObjectEvent.__init__(self, object)
self.changes = changes
@implementer(IBeforeAsyncUtilityLoadedEvent)
class BeforeAsyncUtilityLoadedEvent:
def __init__(self, name, config):
self.name = name
self.config = config
@implementer(IAfterAsyncUtilityLoadedEvent)
class AfterAsyncUtilityLoadedEvent:
def __init__(self, name, config, utility, task):
self.name = name
self.config = config
self.utility = utility
self.task = task
@implementer(IValidationEvent)
class ValidationEvent:
def __init__(self, data):
self.data = data
|
89c29d742895d436f870526e31cd440b80d56762
|
f509ab9825c542e09b0c6591d86ef1f9feb540a6
|
/pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/nxos/clear_logging.py
|
c33356ba5b2bc3b1c6160f36d418e05d472cdb77
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genielibs
|
97f597117193aaa18028defeb69078ebb241173a
|
e42e51475cddcb10f5c7814d0fe892ac865742ba
|
refs/heads/master
| 2023-08-11T16:39:41.959947
| 2023-07-27T17:58:42
| 2023-07-27T17:58:42
| 130,717,047
| 109
| 60
|
Apache-2.0
| 2023-08-29T22:32:08
| 2018-04-23T15:21:56
|
Python
|
UTF-8
|
Python
| false
| false
| 292
|
py
|
clear_logging.py
|
# python
import logging
log = logging.getLogger(__name__)
class ClearLogging(object):
def clear_logging(self, device):
try:
device.execute('clear logging logfile')
except Exception as e:
self.failed('Failed to clear logging', from_exception=e)
|
4d1c1c41938379219e1acaecf010a4a1a707b013
|
8287ced5b14dd2a0060545b1fb53aa8a8deb878b
|
/precise/annoyance_estimator.py
|
b06f6be415e7db3d2a7a9c76d637623f329f0706
|
[
"Apache-2.0"
] |
permissive
|
MycroftAI/mycroft-precise
|
30fa89e632189e27d9fa2dabcef82a7867d4f6ae
|
e1a635e9675047eb86f64ca489a1b941321c489a
|
refs/heads/dev
| 2023-07-19T06:19:58.303669
| 2020-08-05T02:09:50
| 2020-08-05T02:09:50
| 101,455,393
| 777
| 233
|
Apache-2.0
| 2023-06-22T16:14:39
| 2017-08-26T01:56:48
|
Python
|
UTF-8
|
Python
| false
| false
| 5,562
|
py
|
annoyance_estimator.py
|
# Copyright 2020 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from glob import glob
from os.path import join
import numpy as np
from precise.params import pr
from precise.util import load_audio
from precise.vectorization import vectorize_raw
AnnoyanceEstimate = namedtuple(
'AnnoyanceEstimate',
'annoyance ww_annoyance nww_annoyance threshold'
)
class AnnoyanceEstimator:
"""
This class attempts to estimate the "annoyance" of a user
of a given network. It models annoyance as follows:
Annoyance from false negatives (not activating when it should):
We assume that the annoyance incurred by each subsequent failed
activation attempt is double that of the previous attempt. ie.
two failed activations causes 1 + 2 = 3 annoyance units but three
failed activations causes 1 + 2 + 4 = 7 annoyance units.
Annoyance from false positives (activating when it should not):
We assume that each false positive incurs some constant annoyance
With this, we can compute net annoyance from false positives
and negatives individually, combine them for the total annoyance.
Finally, we can recompute this annoyance for each threshold
value to find the threshold that yields the lowest net annoyance
"""
def __init__(self, model, interaction_estimate, ambient_annoyance):
self.thresholds = 1 / (1 + np.exp(-np.linspace(-20, 20, 1000)))
self.interaction_estimate = interaction_estimate
self.ambient_annoyance = ambient_annoyance
def compute_nww_annoyances(self, model, noise_folder, batch_size):
"""
Given some number, x, of ambient activations per hour, we can
compute the annoyance per day from false positives as 24 * x
times the annoyance incurred per ambient activation.
"""
nww_seconds = 0.0
nww_buckets = np.zeros_like(self.thresholds)
for i in glob(join(noise_folder, '*.wav')):
print('Evaluating ambient activations on {}...'.format(i))
inputs, audio_len = self._load_inputs(i)
nww_seconds += audio_len / pr.sample_rate
ambient_predictions = model.predict(inputs, batch_size=batch_size)
del inputs
nww_buckets += (ambient_predictions.reshape((-1, 1))
> self.thresholds.reshape((1, -1))).sum(axis=0)
nww_acts_per_hour = nww_buckets * 60 * 60 / nww_seconds
return self.ambient_annoyance * nww_acts_per_hour * 24
def compute_ww_annoyances(self, ww_predictions):
"""
Given some proportion, p, of not recognizing the wake word, our
total annoyance per interaction is modelled as p^1 * 2^0 + p^2 * 2^1
+ ... + p^i * 2^(i - 1) which converges to 1 / (1 - 2p) - 1.
Given some number of interactions per day we can then find the
expected annoyance per day from false negatives.
"""
ww_buckets = (ww_predictions.reshape((-1, 1)) >
self.thresholds.reshape((1, -1))).sum(axis=0)
ww_fail_ratios = 1 - ww_buckets / len(ww_predictions)
# Performs 1 / (1 - 2 * ww_fail_ratios) - 1, handling edge case
ann_per_interaction = np.divide(
1, 1 - 2 * ww_fail_ratios,
where=ww_fail_ratios < 0.5
) - 1
ann_per_interaction[ww_fail_ratios >= 0.5] = float('inf')
return self.interaction_estimate * ann_per_interaction
def estimate(self, model, predictions, targets, noise_folder, batch_size):
"""
Estimates the annoyance a model incurs according to the model
described in the class documentation
"""
ww_predictions = predictions[np.where(targets > 0.5)]
ww_annoyances = self.compute_ww_annoyances(ww_predictions)
nww_annoyances = self.compute_nww_annoyances(
model, noise_folder, batch_size
)
annoyance_by_threshold = ww_annoyances + nww_annoyances
best_threshold_id = np.argmin(annoyance_by_threshold)
min_annoyance = annoyance_by_threshold[best_threshold_id]
return AnnoyanceEstimate(
annoyance=min_annoyance,
ww_annoyance=ww_annoyances[best_threshold_id],
nww_annoyance=nww_annoyances[best_threshold_id],
threshold=self.thresholds[best_threshold_id]
)
@staticmethod
def _load_inputs(audio_file, chunk_size=4096):
"""
Loads network inputs from an audio file without caching
Handles data conservatively in case the audio file is large
Args:
audio_file: Filename to load
chunk_size: Samples to skip forward when loading network inpus
"""
audio = load_audio(audio_file)
audio_len = len(audio)
mfccs = vectorize_raw(audio)
del audio
mfcc_hops = chunk_size // pr.hop_samples
return np.array([
mfccs[i - pr.n_features:i] for i in range(pr.n_features, len(mfccs), mfcc_hops)
]), audio_len
|
923511f71c43d12790f86c4ed3a938b82011fa44
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/python/framework/summary_test_util.py
|
caaa255f48af3aa576666e7ab517b48b35e26d25
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 1,812
|
py
|
summary_test_util.py
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to test summaries."""
import os
from tensorflow.core.util import event_pb2
from tensorflow.python.lib.io import tf_record
from tensorflow.python.platform import gfile
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.compat.v1.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.compat.v1.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert gfile.Exists(logdir)
files = gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
|
2269fa5a84be4a56726a6e43bb83e12fa68714b7
|
30112ee67896c0cf08940cb01febc0e56774e9c9
|
/lib/datasets/__init__.py
|
91a9be629c828727ee5b1b132c80e7f839cae110
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
HRNet/HRNet-Semantic-Segmentation
|
543d1d9de2e9be03aa8478f515631f2c4d9292c2
|
0bbb2880446ddff2d78f8dd7e8c4c610151d5a51
|
refs/heads/HRNet-OCR
| 2023-05-30T23:08:16.903008
| 2021-05-04T11:21:17
| 2021-05-04T11:21:17
| 180,372,347
| 3,173
| 825
|
NOASSERTION
| 2022-11-17T13:07:11
| 2019-04-09T13:24:09
|
Python
|
UTF-8
|
Python
| false
| false
| 588
|
py
|
__init__.py
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Ke Sun (sunk@mail.ustc.edu.cn)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .cityscapes import Cityscapes as cityscapes
from .lip import LIP as lip
from .pascal_ctx import PASCALContext as pascal_ctx
from .ade20k import ADE20K as ade20k
from .cocostuff import COCOStuff as cocostuff
|
a9f1bc90d49660fbb68aa391dd5a3ede5e91eb17
|
dac12c9178b13d60f401c4febff5569af8aa2719
|
/cvat/apps/events/export.py
|
07adc468570220ca39214d3836f9f139a0e933fd
|
[
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
opencv/cvat
|
39dc66ca20f972ba40b79c44d7ce43590dc0b0b5
|
899c9fd75146744def061efd7ab1b1c6c9f6942f
|
refs/heads/develop
| 2023-08-19T04:27:56.974498
| 2023-08-18T09:58:25
| 2023-08-18T09:58:25
| 139,156,354
| 6,558
| 1,887
|
MIT
| 2023-09-14T12:44:39
| 2018-06-29T14:02:45
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 6,020
|
py
|
export.py
|
# Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
import os
import csv
from datetime import datetime, timedelta, timezone
from dateutil import parser
import uuid
import django_rq
from django.conf import settings
import clickhouse_connect
from rest_framework import serializers, status
from rest_framework.response import Response
from cvat.apps.dataset_manager.views import clear_export_cache, log_exception
from cvat.apps.engine.log import slogger
from cvat.apps.engine.utils import sendfile
DEFAULT_CACHE_TTL = timedelta(hours=1)
def _create_csv(query_params, output_filename, cache_ttl):
try:
clickhouse_settings = settings.CLICKHOUSE['events']
time_filter = {
'from': query_params.pop('from'),
'to': query_params.pop('to'),
}
query = "SELECT * FROM events"
conditions = []
parameters = {}
if time_filter['from']:
conditions.append(f"timestamp >= {{from:DateTime64}}")
parameters['from'] = time_filter['from']
if time_filter['to']:
conditions.append(f"timestamp <= {{to:DateTime64}}")
parameters['to'] = time_filter['to']
for param, value in query_params.items():
if value:
conditions.append(f"{param} = {{{param}:UInt64}}")
parameters[param] = value
if conditions:
query += " WHERE " + " AND ".join(conditions)
query += " ORDER BY timestamp ASC"
with clickhouse_connect.get_client(
host=clickhouse_settings['HOST'],
database=clickhouse_settings['NAME'],
port=clickhouse_settings['PORT'],
username=clickhouse_settings['USER'],
password=clickhouse_settings['PASSWORD'],
) as client:
result = client.query(query, parameters=parameters)
with open(output_filename, 'w', encoding='UTF8') as f:
writer = csv.writer(f)
writer.writerow(result.column_names)
writer.writerows(result.result_rows)
archive_ctime = os.path.getctime(output_filename)
scheduler = django_rq.get_scheduler(settings.CVAT_QUEUES.EXPORT_DATA.value)
cleaning_job = scheduler.enqueue_in(time_delta=cache_ttl,
func=clear_export_cache,
file_path=output_filename,
file_ctime=archive_ctime,
)
slogger.glob.info(
f"The {output_filename} is created "
f"and available for downloading for the next {cache_ttl}. "
f"Export cache cleaning job is enqueued, id '{cleaning_job.id}'"
)
return output_filename
except Exception:
log_exception(slogger.glob)
raise
def export(request, filter_query, queue_name):
action = request.query_params.get('action', None)
filename = request.query_params.get('filename', None)
query_params = {
'org_id': filter_query.get('org_id', None),
'project_id': filter_query.get('project_id', None),
'task_id': filter_query.get('task_id', None),
'job_id': filter_query.get('job_id', None),
'user_id': filter_query.get('user_id', None),
'from': filter_query.get('from', None),
'to': filter_query.get('to', None),
}
try:
if query_params['from']:
query_params['from'] = parser.parse(query_params['from']).timestamp()
except parser.ParserError:
raise serializers.ValidationError(
f"Cannot parse 'from' datetime parameter: {query_params['from']}"
)
try:
if query_params['to']:
query_params['to'] = parser.parse(query_params['to']).timestamp()
except parser.ParserError:
raise serializers.ValidationError(
f"Cannot parse 'to' datetime parameter: {query_params['to']}"
)
if query_params['from'] and query_params['to'] and query_params['from'] > query_params['to']:
raise serializers.ValidationError("'from' must be before than 'to'")
# Set the default time interval to last 30 days
if not query_params["from"] and not query_params["to"]:
query_params["to"] = datetime.now(timezone.utc)
query_params["from"] = query_params["to"] - timedelta(days=30)
if action not in (None, 'download'):
raise serializers.ValidationError(
"Unexpected action specified for the request")
query_id = request.query_params.get('query_id', None) or uuid.uuid4()
rq_id = f"export:csv-logs-{query_id}-by-{request.user}"
response_data = {
'query_id': query_id,
}
queue = django_rq.get_queue(queue_name)
rq_job = queue.fetch_job(rq_id)
if rq_job:
if rq_job.is_finished:
file_path = rq_job.return_value
if action == "download" and os.path.exists(file_path):
rq_job.delete()
timestamp = datetime.strftime(datetime.now(), "%Y_%m_%d_%H_%M_%S")
filename = filename or f"logs_{timestamp}.csv"
return sendfile(request, file_path, attachment=True,
attachment_filename=filename)
else:
if os.path.exists(file_path):
return Response(status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
exc_info = rq_job.meta.get('formatted_exception', str(rq_job.exc_info))
rq_job.delete()
return Response(exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response(data=response_data, status=status.HTTP_202_ACCEPTED)
ttl = DEFAULT_CACHE_TTL.total_seconds()
output_filename = os.path.join(settings.TMP_FILES_ROOT, f"{query_id}.csv")
queue.enqueue_call(
func=_create_csv,
args=(query_params, output_filename, DEFAULT_CACHE_TTL),
job_id=rq_id,
meta={},
result_ttl=ttl, failure_ttl=ttl)
return Response(data=response_data, status=status.HTTP_202_ACCEPTED)
|
240f8fd0798eb346dd3373da6e7ae31f47788200
|
d2943a542496fe6060f471cf511735a1b8c3ada9
|
/qualcoder/GUI/ui_dialog_report_compare_coder_file.py
|
4c49a5e4f1b4f1fd1c932333ad631a4fcbb84270
|
[
"MIT"
] |
permissive
|
ccbogel/QualCoder
|
7dc7f5e2aac077ee16f39eb45ebc82bd884fc735
|
87e4f6d8dd9287c7b3558000af10e6b16f7b955b
|
refs/heads/master
| 2023-09-04T23:04:05.254037
| 2023-08-30T22:09:55
| 2023-08-30T22:09:55
| 165,788,605
| 264
| 56
|
MIT
| 2023-07-18T22:11:30
| 2019-01-15T05:04:43
|
Python
|
UTF-8
|
Python
| false
| false
| 7,129
|
py
|
ui_dialog_report_compare_coder_file.py
|
# Form implementation generated from reading ui file 'ui_dialog_report_compare_coder_file.ui'
#
# Created by: PyQt6 UI code generator 6.3.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic6 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt6 import QtCore, QtGui, QtWidgets
class Ui_Dialog_reportCompareCoderFile(object):
def setupUi(self, Dialog_reportCompareCoderFile):
Dialog_reportCompareCoderFile.setObjectName("Dialog_reportCompareCoderFile")
Dialog_reportCompareCoderFile.setWindowModality(QtCore.Qt.WindowModality.NonModal)
Dialog_reportCompareCoderFile.resize(989, 580)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog_reportCompareCoderFile)
self.verticalLayout.setContentsMargins(1, 1, 1, 1)
self.verticalLayout.setSpacing(1)
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox = QtWidgets.QGroupBox(Dialog_reportCompareCoderFile)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Preferred, QtWidgets.QSizePolicy.Policy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setMinimumSize(QtCore.QSize(0, 100))
self.groupBox.setMaximumSize(QtCore.QSize(16777215, 100))
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setGeometry(QtCore.QRect(10, 27, 101, 28))
self.label_2.setObjectName("label_2")
self.comboBox_coders = QtWidgets.QComboBox(self.groupBox)
self.comboBox_coders.setGeometry(QtCore.QRect(115, 27, 211, 28))
self.comboBox_coders.setObjectName("comboBox_coders")
self.label_title = QtWidgets.QLabel(self.groupBox)
self.label_title.setGeometry(QtCore.QRect(10, -1, 291, 24))
self.label_title.setObjectName("label_title")
self.label_selections = QtWidgets.QLabel(self.groupBox)
self.label_selections.setGeometry(QtCore.QRect(333, 27, 400, 28))
self.label_selections.setObjectName("label_selections")
self.pushButton_clear = QtWidgets.QPushButton(self.groupBox)
self.pushButton_clear.setGeometry(QtCore.QRect(50, 62, 32, 32))
self.pushButton_clear.setText("")
self.pushButton_clear.setObjectName("pushButton_clear")
self.pushButton_export_odt = QtWidgets.QPushButton(self.groupBox)
self.pushButton_export_odt.setGeometry(QtCore.QRect(90, 62, 32, 32))
self.pushButton_export_odt.setText("")
self.pushButton_export_odt.setObjectName("pushButton_export_odt")
self.pushButton_run = QtWidgets.QPushButton(self.groupBox)
self.pushButton_run.setGeometry(QtCore.QRect(10, 62, 32, 32))
self.pushButton_run.setText("")
self.pushButton_run.setObjectName("pushButton_run")
self.pushButton_help1 = QtWidgets.QPushButton(self.groupBox)
self.pushButton_help1.setGeometry(QtCore.QRect(130, 62, 32, 32))
self.pushButton_help1.setText("")
self.pushButton_help1.setObjectName("pushButton_help1")
self.verticalLayout.addWidget(self.groupBox)
self.groupBox_2 = QtWidgets.QGroupBox(Dialog_reportCompareCoderFile)
self.groupBox_2.setTitle("")
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.splitter = QtWidgets.QSplitter(self.groupBox_2)
self.splitter.setOrientation(QtCore.Qt.Orientation.Horizontal)
self.splitter.setObjectName("splitter")
self.splitter_vert = QtWidgets.QSplitter(self.splitter)
self.splitter_vert.setOrientation(QtCore.Qt.Orientation.Vertical)
self.splitter_vert.setObjectName("splitter_vert")
self.listWidget_files = QtWidgets.QListWidget(self.splitter_vert)
self.listWidget_files.setObjectName("listWidget_files")
self.treeWidget = QtWidgets.QTreeWidget(self.splitter_vert)
self.treeWidget.setObjectName("treeWidget")
self.treeWidget.headerItem().setText(0, "Code Tree")
self.textEdit = QtWidgets.QTextEdit(self.splitter)
self.textEdit.setReadOnly(True)
self.textEdit.setObjectName("textEdit")
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox_2)
self.retranslateUi(Dialog_reportCompareCoderFile)
QtCore.QMetaObject.connectSlotsByName(Dialog_reportCompareCoderFile)
Dialog_reportCompareCoderFile.setTabOrder(self.comboBox_coders, self.pushButton_run)
Dialog_reportCompareCoderFile.setTabOrder(self.pushButton_run, self.pushButton_clear)
Dialog_reportCompareCoderFile.setTabOrder(self.pushButton_clear, self.pushButton_export_odt)
Dialog_reportCompareCoderFile.setTabOrder(self.pushButton_export_odt, self.pushButton_help1)
Dialog_reportCompareCoderFile.setTabOrder(self.pushButton_help1, self.listWidget_files)
Dialog_reportCompareCoderFile.setTabOrder(self.listWidget_files, self.treeWidget)
Dialog_reportCompareCoderFile.setTabOrder(self.treeWidget, self.textEdit)
def retranslateUi(self, Dialog_reportCompareCoderFile):
_translate = QtCore.QCoreApplication.translate
Dialog_reportCompareCoderFile.setWindowTitle(_translate("Dialog_reportCompareCoderFile", "Reports"))
self.label_2.setText(_translate("Dialog_reportCompareCoderFile", "Coders:"))
self.comboBox_coders.setToolTip(_translate("Dialog_reportCompareCoderFile", "<html><head/><body><p>Select two coders</p></body></html>"))
self.label_title.setToolTip(_translate("Dialog_reportCompareCoderFile", "To compare coding.\n"
"Select two coders, one file, one code."))
self.label_title.setText(_translate("Dialog_reportCompareCoderFile", "Coder comparisons by file"))
self.label_selections.setText(_translate("Dialog_reportCompareCoderFile", "Coders selected"))
self.pushButton_clear.setToolTip(_translate("Dialog_reportCompareCoderFile", "<html><head/><body><p>Clear selection</p></body></html>"))
self.pushButton_export_odt.setToolTip(_translate("Dialog_reportCompareCoderFile", "Export ODT file"))
self.pushButton_run.setToolTip(_translate("Dialog_reportCompareCoderFile", "<html><head/><body><p>Run comparison.</p><p>Select 2 coders, 1 file, 1 code.</p></body></html>"))
self.pushButton_help1.setToolTip(_translate("Dialog_reportCompareCoderFile", "Statistics explanation"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog_reportCompareCoderFile = QtWidgets.QDialog()
ui = Ui_Dialog_reportCompareCoderFile()
ui.setupUi(Dialog_reportCompareCoderFile)
Dialog_reportCompareCoderFile.show()
sys.exit(app.exec())
|
9e53c8cbf7c3c158ddb4b090a88e6ea7d3b66088
|
de7db88ca700cb8d1c5f1cf64f8b181c4073cc1f
|
/decompiler/codegen.py
|
72747488742d1fec110f67b5402b94e51c9c0032
|
[
"MIT"
] |
permissive
|
CensoredUsername/unrpyc
|
b77759eb2b976a8b33d8f4ec5f8624a05935a681
|
2f9810c104d88982e7a63e9efbb52a33a9f76032
|
refs/heads/master
| 2023-07-05T21:13:30.807565
| 2022-03-31T22:50:34
| 2022-03-31T22:50:34
| 3,176,344
| 708
| 168
|
NOASSERTION
| 2022-12-04T20:45:15
| 2012-01-14T04:00:32
|
Python
|
UTF-8
|
Python
| false
| false
| 37,803
|
py
|
codegen.py
|
# License
# Copyright (c) 2008, Armin Ronacher
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
# - Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written
# permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Taken from http://github.com/jonathaneunice/codegen
"""
codegen
~~~~~~~
Extension to ast that allow ast -> python code generation.
:copyright: Copyright 2008 by Armin Ronacher.
:license: BSD.
"""
import sys
PY3 = sys.version_info >= (3, 0)
# These might not exist, so we put them equal to NoneType
Try = TryExcept = TryFinally = YieldFrom = MatMult = Await = type(None)
from ast import *
class Sep(object):
# Performs the common pattern of returning a different symbol the first
# time the object is called
def __init__(self, last, first=''):
self.last = last
self.first = first
self.begin = True
def __call__(self):
if self.begin:
self.begin = False
return self.first
return self.last
def to_source(node, indent_with=' ' * 4, add_line_information=False, correct_line_numbers=False):
"""This function can convert a node tree back into python sourcecode.
This is useful for debugging purposes, especially if you're dealing with
custom asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
If `add_line_information` is set to `True` comments for the line numbers
of the nodes are added to the output. This can be used to spot wrong line
number information of statement nodes.
"""
if correct_line_numbers:
if hasattr(node, 'lineno'):
return SourceGenerator(indent_with, add_line_information, True, node.lineno).process(node)
else:
return SourceGenerator(indent_with, add_line_information, True).process(node)
else:
return SourceGenerator(indent_with, add_line_information).process(node)
class SourceGenerator(NodeVisitor):
"""This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
COMMA = ', '
COLON = ': '
ASSIGN = ' = '
SEMICOLON = '; '
ARROW = ' -> '
BOOLOP_SYMBOLS = {
And: (' and ', 5),
Or: (' or ', 4)
}
BINOP_SYMBOLS = {
Add: (' + ', 12),
Sub: (' - ', 12),
Mult: (' * ', 13),
MatMult: (' @ ', 13),
Div: (' / ', 13),
FloorDiv: (' // ', 13),
Mod: (' % ', 13),
Pow: (' ** ', 15),
LShift: (' << ', 11),
RShift: (' >> ', 11),
BitOr: (' | ', 8),
BitAnd: (' & ', 10),
BitXor: (' ^ ', 9)
}
CMPOP_SYMBOLS = {
Eq: (' == ', 7),
Gt: (' > ', 7),
GtE: (' >= ', 7),
In: (' in ', 7),
Is: (' is ', 7),
IsNot: (' is not ', 7),
Lt: (' < ', 7),
LtE: (' <= ', 7),
NotEq: (' != ', 7),
NotIn: (' not in ', 7)
}
UNARYOP_SYMBOLS = {
Invert: ('~', 14),
Not: ('not ', 6),
UAdd: ('+', 14),
USub: ('-', 14)
}
BLOCK_NODES = (If, For, While, With, Try, TryExcept, TryFinally,
FunctionDef, ClassDef)
def __init__(self, indent_with, add_line_information=False, correct_line_numbers=False, line_number=1):
self.result = []
self.indent_with = indent_with
self.add_line_information = add_line_information
self.indentation = 0
self.new_lines = 0
# precedence_stack: what precedence level are we on, could we safely newline before and is this operator left-to-right
self.precedence_stack = [[0, False, None]]
self.correct_line_numbers = correct_line_numbers
# The current line number we *think* we are on. As in it's most likely
# the line number of the last node we passed which can differ when
# the ast is broken
self.line_number = line_number
# Can we insert a newline here without having to escape it?
# (are we between delimiting characters)
self.can_newline = False
# after a colon, we don't have to print a semi colon. set to 1 when self.body() is called,
# set to 2 or 0 when it's actually used. set to 0 at the end of the body
self.after_colon = 0
# reset by a call to self.newline, set by the first call to write() afterwards
# determines if we have to print the newlines and indent
self.indented = False
# the amount of newlines to be printed
self.newlines = 0
# force the printing of a proper newline (and not a semicolon)
self.force_newline = False
def process(self, node):
self.visit(node)
result = ''.join(self.result)
self.result = []
return result
# Precedence management
def prec_start(self, value, ltr=None):
newline = self.can_newline
if value < self.precedence_stack[-1][0]:
self.write('(')
self.can_newline = True
if ltr == False:
value += 1
self.precedence_stack.append([value, newline, ltr])
def prec_middle(self, level=None):
if level is not None:
self.precedence_stack[-1][0] = level
elif self.precedence_stack[-1][2]:
self.precedence_stack[-1][0] += 1
elif self.precedence_stack[-1][2] is False:
self.precedence_stack[-1][0] -= 1
def prec_end(self):
precedence, newline, ltr = self.precedence_stack.pop()
if ltr:
precedence -= 1
if precedence < self.precedence_stack[-1][0]:
self.write(')')
self.can_newline = newline
def paren_start(self, symbol='('):
self.precedence_stack.append([0, self.can_newline, None])
self.write(symbol)
self.can_newline = True
def paren_end(self, symbol=')'):
_, self.can_newline, _ = self.precedence_stack.pop()
self.write(symbol)
# convenience functions
def write(self, x):
# ignore empty writes
if not x:
return
# Before we write, we must check if newlines have been queued.
# If this is the case, we have to handle them properly
if self.correct_line_numbers:
if not self.indented:
self.new_lines = max(self.new_lines, 1 if self.force_newline else 0)
self.force_newline = False
if self.new_lines:
# we have new lines to print
if self.after_colon == 2:
self.result.append(';'+'\\\n' * self.new_lines)
else:
self.after_colon = 0
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
elif self.after_colon == 1:
# we're directly after a block-having statement and can write on the same line
self.after_colon = 2
self.result.append(' ')
elif self.result:
# we're after any statement. or at the start of the file
self.result.append(self.SEMICOLON)
self.indented = True
elif self.new_lines > 0:
if self.can_newline:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * (self.indentation + 1))
else:
self.result.append('\\\n' * self.new_lines)
self.result.append(self.indent_with * (self.indentation + 1))
self.new_lines = 0
elif self.new_lines:
# normal behaviour
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, node=None, extra=0, force=False):
if not self.correct_line_numbers:
self.new_lines = max(self.new_lines, 1 + extra)
if not self.result:
self.new_lines = 0
if node is not None and self.add_line_information:
self.write('# line: %s' % node.lineno)
self.new_lines = 1
else:
if extra:
#Ignore extra
return
self.indented = False
if node is None:
# else/finally statement. insert one true newline. body is implicit
self.force_newline = True
self.new_lines += 1
self.line_number += 1
elif force:
# statement with a block: needs a true newline before it
self.force_newline = True
self.new_lines += node.lineno - self.line_number
self.line_number = node.lineno
else:
# block-less statement: needs a semicolon, colon, or newline in front of it
self.new_lines += node.lineno - self.line_number
self.line_number = node.lineno
def maybe_break(self, node):
if self.correct_line_numbers:
self.new_lines += node.lineno - self.line_number
self.line_number = node.lineno
def body(self, statements):
self.force_newline = any(isinstance(i, self.BLOCK_NODES) for i in statements)
self.indentation += 1
self.after_colon = 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
self.force_newline = True
self.after_colon = 0 # do empty blocks even exist?
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def visit_bare(self, node):
# this node is allowed to be a bare tuple
if isinstance(node, Tuple):
self.visit_Tuple(node, False)
else:
self.visit(node)
def visit_bareyield(self, node):
if isinstance(node, Yield):
self.visit_Yield(node, False)
elif isinstance(node, YieldFrom):
self.visit_YieldFrom(node, False)
else:
self.visit_bare(node)
def decorators(self, node):
for decorator in node.decorator_list:
self.newline(decorator, force=True)
self.write('@')
self.visit(decorator)
if node.decorator_list:
self.newline()
else:
self.newline(node, force=True)
# Module
def visit_Module(self, node):
self.generic_visit(node)
self.write('\n')
self.line_number += 1
# Statements
def visit_Assert(self, node):
self.newline(node)
self.write('assert ')
self.visit(node.test)
if node.msg:
self.write(self.COMMA)
self.visit(node.msg)
def visit_Assign(self, node):
self.newline(node)
for target in node.targets:
self.visit_bare(target)
self.write(self.ASSIGN)
self.visit_bareyield(node.value)
def visit_AugAssign(self, node):
self.newline(node)
self.visit_bare(node.target)
self.write(self.BINOP_SYMBOLS[type(node.op)][0].rstrip() + self.ASSIGN.lstrip())
self.visit_bareyield(node.value)
def visit_Await(self, node):
self.maybe_break(node)
self.prec_start(16, True)
self.prec_middle()
self.write('await ')
self.visit(node.value)
self.prec_end()
def visit_ImportFrom(self, node):
self.newline(node)
self.write('from ')
self.write('%s%s' % ('.' * node.level, node.module or ''))
self.write(' import ')
sep = Sep(self.COMMA)
for item in node.names:
self.write(sep())
self.visit(item)
def visit_Import(self, node):
self.newline(node)
self.write('import ')
sep = Sep(self.COMMA)
for item in node.names:
self.write(sep())
self.visit(item)
def visit_Exec(self, node):
self.newline(node)
self.write('exec ')
self.visit(node.body)
if node.globals:
self.write(' in ')
self.visit(node.globals)
if node.locals:
self.write(self.COMMA)
self.visit(node.locals)
def visit_Expr(self, node):
self.newline(node)
self.visit_bareyield(node.value)
def visit_AsyncFunctionDef(self, node):
self.visit_FunctionDef(node, True)
def visit_FunctionDef(self, node, async=False):
self.newline(extra=1)
# first decorator line number will be used
self.decorators(node)
if async:
self.write('async ')
self.write('def ')
self.write(node.name)
self.paren_start()
self.visit_arguments(node.args)
self.paren_end()
if hasattr(node, 'returns') and node.returns is not None:
self.write(self.ARROW)
self.visit(node.returns)
self.write(':')
self.body(node.body)
def visit_arguments(self, node):
sep = Sep(self.COMMA)
padding = [None] * (len(node.args) - len(node.defaults))
if hasattr(node, 'kwonlyargs'):
for arg, default in zip(node.args, padding + node.defaults):
self.write(sep())
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
self.write(sep())
if hasattr(node, 'varargannotation'):
if node.varargannotation is None:
self.write('*' + node.vararg)
else:
self.maybe_break(node.varargannotation)
self.write('*' + node.vararg)
self.write(self.COLON)
self.visit(node.varargannotation)
else:
self.maybe_break(node.vararg)
self.write('*')
self.visit(node.vararg)
elif node.kwonlyargs:
self.write(sep() + '*')
for arg, default in zip(node.kwonlyargs, node.kw_defaults):
self.write(sep())
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.kwarg is not None:
self.write(sep())
if hasattr(node, 'kwargannotation'):
if node.kwargannotation is None:
self.write('**' + node.kwarg)
else:
self.maybe_break(node.kwargannotation)
self.write('**' + node.kwarg)
self.write(self.COLON)
self.visit(node.kwargannotation)
else:
self.maybe_break(node.kwarg)
self.write('**')
self.visit(node.kwarg)
else:
for arg, default in zip(node.args, padding + node.defaults):
self.write(sep())
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
self.write(sep())
self.write('*' + node.vararg)
if node.kwarg is not None:
self.write(sep())
self.write('**' + node.kwarg)
def visit_arg(self, node):
# Py3 only
self.maybe_break(node)
self.write(node.arg)
if node.annotation is not None:
self.write(self.COLON)
self.visit(node.annotation)
def visit_keyword(self, node):
self.maybe_break(node.value)
if node.arg is not None:
self.write(node.arg + '=')
else:
self.write('**')
self.visit(node.value)
def visit_ClassDef(self, node):
self.newline(extra=2)
# first decorator line number will be used
self.decorators(node)
self.write('class %s' % node.name)
if (node.bases or (hasattr(node, 'keywords') and node.keywords) or
(hasattr(node, 'starargs') and (node.starargs or node.kwargs))):
self.paren_start()
sep = Sep(self.COMMA)
for base in node.bases:
self.write(sep())
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, 'keywords'):
for keyword in node.keywords:
self.write(sep())
self.visit(keyword)
if hasattr(node, 'starargs'):
if node.starargs is not None:
self.write(sep())
self.maybe_break(node.starargs)
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
self.write(sep())
self.maybe_break(node.kwargs)
self.write('**')
self.visit(node.kwargs)
self.paren_end()
self.write(':')
self.body(node.body)
def visit_If(self, node):
self.newline(node, force=True)
self.write('if ')
self.visit(node.test)
self.write(':')
self.body(node.body)
while True:
if len(node.orelse) == 1 and isinstance(node.orelse[0], If):
node = node.orelse[0]
self.newline(node.test, force=True)
self.write('elif ')
self.visit(node.test)
self.write(':')
self.body(node.body)
else:
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
break
def visit_AsyncFor(self, node):
self.visit_For(node, True)
def visit_For(self, node, async=False):
self.newline(node, force=True)
if async:
self.write('async ')
self.write('for ')
self.visit_bare(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.body_or_else(node)
def visit_While(self, node):
self.newline(node, force=True)
self.write('while ')
self.visit(node.test)
self.write(':')
self.body_or_else(node)
def visit_AsyncWith(self, node):
self.visit_With(node, True)
def visit_With(self, node, async=False):
self.newline(node, force=True)
if async:
self.write('async ')
self.write('with ')
if hasattr(node, 'items'):
sep = Sep(self.COMMA)
for item in node.items:
self.write(sep())
self.visit_withitem(item)
else:
# in python 2, similarly to the elif statement, multiple nested context managers
# are generally the multi-form of a single with statement
self.visit_withitem(node)
while len(node.body) == 1 and isinstance(node.body[0], With):
node = node.body[0]
self.write(self.COMMA)
self.visit_withitem(node)
self.write(':')
self.body(node.body)
def visit_withitem(self, node):
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
def visit_Pass(self, node):
self.newline(node)
self.write('pass')
def visit_Print(self, node):
# XXX: python 2 only
self.newline(node)
self.write('print ')
sep = Sep(self.COMMA)
if node.dest is not None:
self.write(' >> ')
self.visit(node.dest)
sep()
for value in node.values:
self.write(sep())
self.visit(value)
if not node.nl:
self.write(',')
def visit_Delete(self, node):
self.newline(node)
self.write('del ')
sep = Sep(self.COMMA)
for target in node.targets:
self.write(sep())
self.visit(target)
def visit_Try(self, node):
# Python 3 only. exploits the fact that TryExcept uses the same attribute names
self.visit_TryExcept(node)
if node.finalbody:
self.newline()
self.write('finally:')
self.body(node.finalbody)
def visit_TryExcept(self, node):
self.newline(node, force=True)
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def visit_TryFinally(self, node):
# Python 2 only
if len(node.body) == 1 and isinstance(node.body[0], TryExcept):
self.visit_TryExcept(node.body[0])
else:
self.newline(node, force=True)
self.write('try:')
self.body(node.body)
self.newline()
self.write('finally:')
self.body(node.finalbody)
def visit_ExceptHandler(self, node):
self.newline(node, force=True)
self.write('except')
if node.type:
self.write(' ')
self.visit(node.type)
if node.name:
self.write(' as ')
# Compatability
if isinstance(node.name, AST):
self.visit(node.name)
else:
self.write(node.name)
self.write(':')
self.body(node.body)
def visit_Global(self, node):
self.newline(node)
self.write('global ' + self.COMMA.join(node.names))
def visit_Nonlocal(self, node):
self.newline(node)
self.write('nonlocal ' + self.COMMA.join(node.names))
def visit_Return(self, node):
self.newline(node)
if node.value is not None:
self.write('return ')
self.visit(node.value)
else:
self.write('return')
def visit_Break(self, node):
self.newline(node)
self.write('break')
def visit_Continue(self, node):
self.newline(node)
self.write('continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline(node)
if hasattr(node, 'exc') and node.exc is not None:
self.write('raise ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.write('raise ')
self.visit(node.type)
if node.inst is not None:
self.write(self.COMMA)
self.visit(node.inst)
if node.tback is not None:
self.write(self.COMMA)
self.visit(node.tback)
else:
self.write('raise')
# Expressions
def visit_Attribute(self, node):
self.maybe_break(node)
# Edge case: due to the use of \d*[.]\d* for floats \d*[.]\w*, you have
# to put parenthesis around an integer literal do get an attribute from it
if isinstance(node.value, Num):
self.paren_start()
self.visit(node.value)
self.paren_end()
else:
self.prec_start(17)
self.visit(node.value)
self.prec_end()
self.write('.' + node.attr)
def visit_Call(self, node):
self.maybe_break(node)
#need to put parenthesis around numbers being called (this makes no sense)
if isinstance(node.func, Num):
self.paren_start()
self.visit_Num(node.func)
self.paren_end()
else:
self.prec_start(17)
self.visit(node.func)
self.prec_end()
# special case generator expressions as only argument
if (len(node.args) == 1 and isinstance(node.args[0], GeneratorExp) and
not node.keywords and hasattr(node, 'starargs') and
not node.starargs and not node.kwargs):
self.visit_GeneratorExp(node.args[0])
return
self.paren_start()
sep = Sep(self.COMMA)
for arg in node.args:
self.write(sep())
self.maybe_break(arg)
self.visit(arg)
for keyword in node.keywords:
self.write(sep())
self.visit(keyword)
if hasattr(node, 'starargs'):
if node.starargs is not None:
self.write(sep())
self.maybe_break(node.starargs)
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
self.write(sep())
self.maybe_break(node.kwargs)
self.write('**')
self.visit(node.kwargs)
self.paren_end()
def visit_Name(self, node):
self.maybe_break(node)
self.write(node.id)
def visit_NameConstant(self, node):
self.maybe_break(node)
self.write(repr(node.value))
def visit_Str(self, node, frombytes=False):
self.maybe_break(node)
if frombytes:
newline_count = node.s.count('\n'.encode('utf-8'))
else:
newline_count = node.s.count('\n')
# heuristic, expand when more than 1 newline and when at least 80%
# of the characters aren't newlines
expand = newline_count > 1 and len(node.s) > 5 * newline_count
if self.correct_line_numbers:
# Also check if we have enougn newlines to expand in if we're going for correct line numbers
if self.after_colon:
# Although this makes little sense just after a colon
expand = expand and self.new_lines > newline_count
else:
expand = expand and self.new_lines >= newline_count
if expand and (not self.correct_line_numbers or self.new_lines >= newline_count):
if self.correct_line_numbers:
self.new_lines -= newline_count
a = repr(node.s)
delimiter = a[-1]
header, content = a[:-1].split(delimiter, 1)
lines = []
chain = False
for i in content.split('\\n'):
if chain:
i = lines.pop() + i
chain = False
if (len(i) - len(i.rstrip('\\'))) % 2:
i += '\\n'
chain = True
lines.append(i)
assert newline_count + 1 == len(lines)
self.write(header)
self.write(delimiter * 3)
self.write('\n'.join(lines))
self.write(delimiter * 3)
else:
self.write(repr(node.s))
def visit_Bytes(self, node):
self.visit_Str(node, True)
def visit_Num(self, node):
self.maybe_break(node)
negative = (node.n.imag or node.n.real) < 0 and not PY3
if negative:
self.prec_start(self.UNARYOP_SYMBOLS[USub][1])
# 1e999 and related friends are parsed into inf
if abs(node.n) == 1e999:
if negative:
self.write('-')
self.write('1e999')
if node.n.imag:
self.write('j')
else:
self.write(repr(node.n))
if negative:
self.prec_end()
def visit_Tuple(self, node, guard=True):
if guard or not node.elts:
self.paren_start()
sep = Sep(self.COMMA)
for item in node.elts:
self.write(sep())
self.visit(item)
if len(node.elts) == 1:
self.write(',')
if guard or not node.elts:
self.paren_end()
def _sequence_visit(left, right): # pylint: disable=E0213
def visit(self, node):
self.maybe_break(node)
self.paren_start(left)
sep = Sep(self.COMMA)
for item in node.elts:
self.write(sep())
self.visit(item)
self.paren_end(right)
return visit
visit_List = _sequence_visit('[', ']')
visit_Set = _sequence_visit('{', '}')
def visit_Dict(self, node):
self.maybe_break(node)
self.paren_start('{')
sep = Sep(self.COMMA)
for key, value in zip(node.keys, node.values):
self.write(sep())
self.visit(key)
self.write(self.COLON)
self.visit(value)
self.paren_end('}')
def visit_BinOp(self, node):
self.maybe_break(node)
symbol, precedence = self.BINOP_SYMBOLS[type(node.op)]
self.prec_start(precedence, type(node.op) != Pow)
# work around python's negative integer literal optimization
if isinstance(node.op, Pow):
self.visit(node.left)
self.prec_middle(14)
else:
self.visit(node.left)
self.prec_middle()
self.write(symbol)
self.visit(node.right)
self.prec_end()
def visit_BoolOp(self, node):
self.maybe_break(node)
symbol, precedence = self.BOOLOP_SYMBOLS[type(node.op)]
self.prec_start(precedence, True)
self.prec_middle()
sep = Sep(symbol)
for value in node.values:
self.write(sep())
self.visit(value)
self.prec_end()
def visit_Compare(self, node):
self.maybe_break(node)
self.prec_start(7, True)
self.prec_middle()
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(self.CMPOP_SYMBOLS[type(op)][0])
self.visit(right)
self.prec_end()
def visit_UnaryOp(self, node):
self.maybe_break(node)
symbol, precedence = self.UNARYOP_SYMBOLS[type(node.op)]
self.prec_start(precedence)
self.write(symbol)
# workaround: in python 2, an explicit USub node around a number literal
# indicates the literal was surrounded by parenthesis
if (not PY3 and isinstance(node.op, USub) and isinstance(node.operand, Num)
and (node.operand.n.real or node.operand.n.imag) >= 0):
self.paren_start()
self.visit(node.operand)
self.paren_end()
else:
self.visit(node.operand)
self.prec_end()
def visit_Subscript(self, node):
self.maybe_break(node)
# have to surround literals by parenthesis (at least in Py2)
if isinstance(node.value, Num):
self.paren_start()
self.visit_Num(node.value)
self.paren_end()
else:
self.prec_start(17)
self.visit(node.value)
self.prec_end()
self.paren_start('[')
self.visit(node.slice)
self.paren_end(']')
def visit_Index(self, node, guard=False):
# Index has no lineno information
# When a subscript includes a tuple directly, the parenthesis can be dropped
if not guard:
self.visit_bare(node.value)
else:
self.visit(node.value)
def visit_Slice(self, node):
# Slice has no lineno information
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_Ellipsis(self, node):
# Ellipsis has no lineno information
self.write('...')
def visit_ExtSlice(self, node):
# Extslice has no lineno information
for idx, item in enumerate(node.dims):
if idx:
self.write(self.COMMA)
if isinstance(item, Index):
self.visit_Index(item, True)
else:
self.visit(item)
def visit_Yield(self, node, paren=True):
# yield can only be used in a statement context, or we're between parenthesis
self.maybe_break(node)
if paren:
self.paren_start()
if node.value is not None:
self.write('yield ')
self.visit_bare(node.value)
else:
self.write('yield')
if paren:
self.paren_end()
def visit_YieldFrom(self, node, paren=True):
# Even though yield and yield from technically occupy precedence level 1, certain code
# using them is illegal e.g. "return yield from a()" will not work unless you
# put the yield from statement within parenthesis.
self.maybe_break(node)
if paren:
self.paren_start()
self.write('yield from ')
self.visit(node.value)
if paren:
self.paren_end()
def visit_Lambda(self, node):
self.maybe_break(node)
self.prec_start(2)
self.write('lambda ')
self.visit_arguments(node.args)
self.write(self.COLON)
self.visit(node.body)
self.prec_end()
def _generator_visit(left, right):
def visit(self, node):
self.maybe_break(node)
self.paren_start(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.paren_end(right)
return visit
visit_ListComp = _generator_visit('[', ']')
visit_GeneratorExp = _generator_visit('(', ')')
visit_SetComp = _generator_visit('{', '}')
def visit_DictComp(self, node):
self.maybe_break(node)
self.paren_start('{')
self.visit(node.key)
self.write(self.COLON)
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.paren_end('}')
def visit_IfExp(self, node):
self.maybe_break(node)
self.prec_start(3, False)
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.prec_middle(2)
self.write(' else ')
self.visit(node.orelse)
self.prec_end()
def visit_Starred(self, node):
self.maybe_break(node)
self.write('*')
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.maybe_break(node)
self.write('`')
self.visit(node.value)
self.write('`')
# Helper Nodes
def visit_alias(self, node):
# alias does not have line number information
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.maybe_break(node.target)
self.write(' for ')
self.visit_bare(node.target)
self.write(' in ')
# workaround: lambda and ternary need to be within parenthesis here
self.prec_start(4)
self.visit(node.iter)
self.prec_end()
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
|
76a065635787e7086739281fc8bb63cd0c677d75
|
fbbead70a9ae48838c621f5d83a1433f1c545487
|
/tests/monad_test.py
|
8f137644f2bda329ed7716f7c2439204022bee4b
|
[
"MIT"
] |
permissive
|
suned/pfun
|
fbd033221a400f0b77533179be36a7881dbc4e29
|
bebd4b73bac20243a0143de6f493d2b30e4e1434
|
refs/heads/master
| 2023-08-09T10:19:50.415670
| 2022-12-09T11:35:34
| 2022-12-09T11:35:34
| 200,543,653
| 156
| 15
|
MIT
| 2023-07-22T12:41:48
| 2019-08-04T21:32:12
|
Python
|
UTF-8
|
Python
| false
| false
| 418
|
py
|
monad_test.py
|
from abc import ABC, abstractmethod
from .functor_test import FunctorTest
class MonadTest(FunctorTest, ABC):
@abstractmethod
def test_right_identity_law(self, *args):
raise NotImplementedError()
@abstractmethod
def test_left_identity_law(self, *args):
raise NotImplementedError()
@abstractmethod
def test_associativity_law(self, *args):
raise NotImplementedError()
|
9cb7b38700df37c36c1ea0fc115d1fb6500e6dae
|
b28bc89706650e5014839b99be9ce2c02fc04119
|
/django_mongoengine/forms/__init__.py
|
7f9df9c7e98c042d1235720aea90a158ac082304
|
[
"BSD-3-Clause"
] |
permissive
|
MongoEngine/django-mongoengine
|
9458e7dcb780a5cd040aea4e72067e1eaa1116b4
|
83e06c6dc772fca629c48c4ae1e75e3c00c7f051
|
refs/heads/master
| 2023-08-19T20:01:13.829717
| 2023-08-16T10:11:30
| 2023-08-16T10:11:30
| 4,334,103
| 739
| 278
|
NOASSERTION
| 2023-08-16T09:40:03
| 2012-05-15T09:28:40
|
Python
|
UTF-8
|
Python
| false
| false
| 46
|
py
|
__init__.py
|
from .documents import *
from .utils import *
|
831952a5cf9b74c47c0b9064cf7df5b284384899
|
f04babfe5351de24f6b09709895f5412af93dbf6
|
/diverging_map.py
|
c3a22b4ebb2c7d70fd27882ccf47fb321b5cfb9a
|
[
"MIT"
] |
permissive
|
ethankruse/kepler_orrery
|
3106fd5f577efda8256771b06704bd36d6167d0e
|
73280e5ceef88380b65a8c1ebb14b617eeecce88
|
refs/heads/master
| 2022-11-08T23:17:46.915578
| 2022-10-27T19:18:39
| 2022-10-27T19:18:39
| 47,040,248
| 109
| 22
|
MIT
| 2020-11-05T10:56:13
| 2015-11-28T21:32:47
|
Python
|
UTF-8
|
Python
| false
| false
| 12,013
|
py
|
diverging_map.py
|
#!/usr/bin/env python
#------------------------------------------------------------------------------
# Name: colorMapCreator.py
# Purpose: Generate reasonable diverging colormaps using the technique
# presented in "Diverging Color Maps for Scientific Visualization
# (Expanded)" by Kenneth Moreland.
#
# Author: Carlo Barth
#
# Created: 22.10.2013
# Copyright: (c) 2013
#------------------------------------------------------------------------------
# main() (diverge_map) function modified by Ethan Kruse 2015
# to return a colormap directly. Also found some bugs, but am hacking around
# that for now
# Imports
import numpy as np
# =============================================================================
# ====================== The Class ColorMapCreator ============================
# =============================================================================
class ColorMapCreator:
"""
Class ColorMapCreator:
Create diverging colormaps from RGB1 to RGB2 using the method of Moreland
or a simple CIELAB-interpolation. numColors controls the number of color
values to output (odd number) and divide gives the possibility to output
RGB-values from 0.0-1.0 instead of 0-255. If a filename different than
"" is given, the colormap will be saved to this file, otherwise a simple
output using print will be given.
"""
# ======================== Global Variables ===============================
# Reference white-point D65
Xn, Yn, Zn = [95.047, 100.0, 108.883] # from Adobe Cookbook
# Transfer-matrix for the conversion of RGB to XYZ color space
transM = np.array([[0.4124564, 0.2126729, 0.0193339],
[0.3575761, 0.7151522, 0.1191920],
[0.1804375, 0.0721750, 0.9503041]])
# ============================= Functions =================================
def __init__(self, RGB1, RGB2, numColors = 33., divide = 255.,
method = "moreland", filename = ""):
# create a class variable for the number of colors
self.numColors = numColors
# assert an odd number of points
assert np.mod(numColors,2) == 1, \
"For diverging colormaps odd numbers of colors are desireable!"
# assert a known method was specified
knownMethods = ["moreland", "lab"]
assert method in knownMethods, "Unknown method was specified!"
if method == knownMethods[0]:
#generate the Msh diverging colormap
self.colorMap = self.generateColorMap(RGB1, RGB2, divide)
elif method == knownMethods[1]:
# generate the Lab diverging colormap
self.colorMap = self.generateColorMapLab(RGB1, RGB2, divide)
# print out the colormap of save it to file named filename
if filename == "":
for c in self.colorMap:
pass
# print "{0}, {1}, {2}".format(c[0], c[1], c[2])
else:
with open(filename, "w") as f:
for c in self.colorMap:
f.write("{0}, {1}, {2}\n".format(c[0], c[1], c[2]))
#-
def rgblinear(self, RGB):
"""
Conversion from the sRGB components to RGB components with physically
linear properties.
"""
# initialize the linear RGB array
RGBlinear = np.zeros((3,))
# calculate the linear RGB values
for i,value in enumerate(RGB):
value = float(value) / 255.
if value > 0.04045 :
value = ( ( value + 0.055 ) / 1.055 ) ** 2.4
else :
value = value / 12.92
RGBlinear[i] = value * 100.
return RGBlinear
#-
def sRGB(self, RGBlinear):
"""
Back conversion from linear RGB to sRGB.
"""
# initialize the sRGB array
RGB = np.zeros((3,))
# calculate the sRGB values
for i,value in enumerate(RGBlinear):
value = float(value) / 100.
if value > 0.00313080495356037152:
value = (1.055 * np.power(value,1./2.4) ) - 0.055
else :
value = value * 12.92
RGB[i] = round(value * 255.)
return RGB
#-
def rgb2xyz(self, RGB):
"""
Conversion of RGB to XYZ using the transfer-matrix
"""
return np.dot(self.rgblinear(RGB), self.transM)
#-
def xyz2rgb(self, XYZ):
"""
Conversion of RGB to XYZ using the transfer-matrix
"""
#return np.round(np.dot(XYZ, np.array(np.matrix(transM).I)))
return self.sRGB(np.dot(XYZ, np.array(np.matrix(self.transM).I)))
#-
def rgb2Lab(self, RGB):
"""
Conversion of RGB to CIELAB
"""
# convert RGB to XYZ
X, Y, Z = (self.rgb2xyz(RGB)).tolist()
# helper function
def f(x):
limit = 0.008856
if x> limit:
return np.power(x, 1./3.)
else:
return 7.787*x + 16./116.
# calculation of L, a and b
L = 116. * ( f(Y/self.Yn) - (16./116.) )
a = 500. * ( f(X/self.Xn) - f(Y/self.Yn) )
b = 200. * ( f(Y/self.Yn) - f(Z/self.Zn) )
return np.array([L, a, b])
#-
def Lab2rgb(self, Lab):
"""
Conversion of CIELAB to RGB
"""
# unpack the Lab-array
L, a, b = Lab.tolist()
# helper function
def finverse(x):
xlim = 0.008856
a = 7.787
b = 16./116.
ylim = a*xlim+b
if x > ylim:
return np.power(x, 3)
else:
return ( x - b ) / a
# calculation of X, Y and Z
X = self.Xn * finverse( (a/500.) + (L+16.)/116. )
Y = self.Yn * finverse( (L+16.)/116. )
Z = self.Zn * finverse( (L+16.)/116. - (b/200.) )
# conversion of XYZ to RGB
return self.xyz2rgb(np.array([X,Y,Z]))
#-
def Lab2Msh(self, Lab):
"""
Conversion of CIELAB to Msh
"""
# unpack the Lab-array
L, a, b = Lab.tolist()
# calculation of M, s and h
M = np.sqrt(np.sum(np.power(Lab, 2)))
s = np.arccos(L/M)
h = np.arctan2(b,a)
return np.array([M,s,h])
#-
def Msh2Lab(self, Msh):
"""
Conversion of Msh to CIELAB
"""
# unpack the Msh-array
M, s, h = Msh.tolist()
# calculation of L, a and b
L = M*np.cos(s)
a = M*np.sin(s)*np.cos(h)
b = M*np.sin(s)*np.sin(h)
return np.array([L,a,b])
#-
def rgb2Msh(self, RGB):
""" Direct conversion of RGB to Msh. """
return self.Lab2Msh(self.rgb2Lab(RGB))
#-
def Msh2rgb(self, Msh):
""" Direct conversion of Msh to RGB. """
return self.Lab2rgb(self.Msh2Lab(Msh))
#-
def adjustHue(self, MshSat, Munsat):
"""
Function to provide an adjusted hue when interpolating to an
unsaturated color in Msh space.
"""
# unpack the saturated Msh-array
Msat, ssat, hsat = MshSat.tolist()
if Msat >= Munsat:
return hsat
else:
hSpin = ssat * np.sqrt(Munsat**2 - Msat**2) / \
(Msat * np.sin(ssat))
if hsat > -np.pi/3:
return hsat + hSpin
else:
return hsat - hSpin
#-
def interpolateColor(self, RGB1, RGB2, interp):
"""
Interpolation algorithm to automatically create continuous diverging
color maps.
"""
# convert RGB to Msh and unpack
Msh1 = self.rgb2Msh(RGB1)
M1, s1, h1 = Msh1.tolist()
Msh2 = self.rgb2Msh(RGB2)
M2, s2, h2 = Msh2.tolist()
# If points saturated and distinct, place white in middle
if (s1>0.05) and (s2>0.05) and ( np.abs(h1-h2) > np.pi/3. ):
Mmid = max([M1, M2, 88.])
if interp < 0.5:
M2 = Mmid
s2 = 0.
h2 = 0.
interp = 2*interp
else:
M1 = Mmid
s1 = 0.
h1 = 0.
interp = 2*interp-1.
# Adjust hue of unsaturated colors
if (s1 < 0.05) and (s2 > 0.05):
h1 = self.adjustHue(np.array([M2,s2,h2]), M1)
elif (s2 < 0.05) and (s1 > 0.05):
h2 = self.adjustHue(np.array([M1,s1,h1]), M2)
# Linear interpolation on adjusted control points
MshMid = (1-interp)*np.array([M1,s1,h1]) + \
interp*np.array([M2,s2,h2])
return self.Msh2rgb(MshMid)
#-
def generateColorMap(self, RGB1, RGB2, divide):
"""
Generate the complete diverging color map using the Moreland-technique
from RGB1 to RGB2, placing "white" in the middle. The number of points
given by "numPoints" controls the resolution of the colormap. The
optional parameter "divide" gives the possibility to scale the whole
colormap, for example to have float values from 0 to 1.
"""
# calculate
scalars = np.linspace(0., 1., self.numColors)
RGBs = np.zeros((self.numColors, 3))
for i,s in enumerate(scalars):
RGBs[i,:] = self.interpolateColor(RGB1, RGB2, s)
return RGBs/divide
#-
def generateColorMapLab(self, RGB1, RGB2, divide):
"""
Generate the complete diverging color map using a transition from
Lab1 to Lab2, transitioning true RGB-white. The number of points
given by "numPoints" controls the resolution of the colormap. The
optional parameter "divide" gives the possibility to scale the whole
colormap, for example to have float values from 0 to 1.
"""
# convert to Lab-space
Lab1 = self.rgb2Lab(RGB1)
Lab2 = self.rgb2Lab(RGB2)
LabWhite = np.array([100., 0., 0.])
# initialize the resulting arrays
Lab = np.zeros((self.numColors ,3))
RGBs = np.zeros((self.numColors ,3))
N2 = np.floor(self.numColors/2.)
# calculate
for i in range(3):
Lab[0:N2+1, i] = np.linspace(Lab1[i], LabWhite[i], N2+1)
Lab[N2:, i] = np.linspace(LabWhite[i], Lab2[i], N2+1)
for i,l in enumerate(Lab):
RGBs[i,:] = self.Lab2rgb(l)
return RGBs/divide
#-
# =============================================================================
# ========================== The Main-Function ================================
# =============================================================================
# define the initial and final RGB-colors (low and high end of the diverging
# colormap
def diverge_map(RGB1=np.array([59, 76, 192]), RGB2=np.array([180, 4, 38]),
numColors=101):
# create a new instance of the ColorMapCreator-class using the desired
# options
colormap = ColorMapCreator(RGB1, RGB2, numColors=numColors)
# there's clearly some bugs since it's possible to get values > 1
# e.g. with starting values RGB1 = [1,185,252], RGB2 = [220, 55, 19],
# numColors > 3
# but this is good enough for now
colormap.colorMap = np.clip(colormap.colorMap, 0, 1)
cdict = {'red': [], 'green': [], 'blue': []}
inds = np.linspace(0.,1.,numColors)
# create a matplotlib colormap
for ii, ind in enumerate(inds):
cdict['red'].append([ind, colormap.colorMap[ii, 0],
colormap.colorMap[ii, 0]])
cdict['green'].append([ind, colormap.colorMap[ii, 1],
colormap.colorMap[ii, 1]])
cdict['blue'].append([ind, colormap.colorMap[ii, 2],
colormap.colorMap[ii, 2]])
from matplotlib.colors import LinearSegmentedColormap
mycmap = LinearSegmentedColormap('BlueRed1', cdict)
return mycmap
|
52dae5d66a9953345dd6bd28b99299dba6e53387
|
876452eccd9f13719d2b9f4784fa932eba0872fc
|
/eco/optimize_score.py
|
c75ecc2f46d2d00234c64d3a9cd46e71aea19c2c
|
[
"MIT"
] |
permissive
|
StrangerZhang/pyECO
|
89c1c21ebdbabb73d7615ff7f7ef629d4c9999df
|
9179450159d8b4a5dd654d089af0cb5b75c70e9e
|
refs/heads/master
| 2021-07-11T15:35:12.638263
| 2019-01-07T14:25:09
| 2019-01-07T14:25:09
| 143,963,585
| 203
| 48
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,746
|
py
|
optimize_score.py
|
from .fourier_tools import sample_fs
from .config import config
import numpy as np
if config.use_gpu:
import cupy as cp
"""
code no problem
"""
def optimize_score(scores_fs, iterations):
"""
Maximizes the continuous convolution response (classification scores)
"""
if config.use_gpu:
xp = cp.get_array_module(scores_fs)
else:
xp = np
if len(scores_fs.shape) == 2:
scores_fs = scores_fs[:, :, xp.newaxis]
output_sz = scores_fs.shape[:2]
# do the grid search step by finding the maximum in the sampled response for each scale
sampled_scores = sample_fs(scores_fs)
init_max_score = xp.max(sampled_scores, axis=(0, 1))
max_idx = xp.reshape(sampled_scores, (-1, sampled_scores.shape[2])).argmax(axis=0)
max_pos = xp.column_stack(xp.unravel_index(max_idx, sampled_scores[:,:,0].shape))
row = max_pos[:, 0:1]
col = max_pos[:, 1:2]
# shift and rescale the coordinate system to [-pi, -pi]
trans_row = (row + np.floor((output_sz[0] - 1)/2)) % output_sz[0] - np.floor((output_sz[1]-1)/2)
trans_col = (col + np.floor((output_sz[1] - 1)/2)) % output_sz[1] - np.floor((output_sz[1]-1)/2)
init_pos_y = 2 * np.pi * trans_row / output_sz[0]
init_pos_x = 2 * np.pi * trans_col / output_sz[1]
max_pos_y = init_pos_y
max_pos_x = init_pos_x
# construct grid
ky = xp.arange(- np.ceil((output_sz[0] - 1)/2), np.floor(output_sz[0]-1)/2 + 1).reshape(1, -1)
kx = xp.arange(- np.ceil((output_sz[1] - 1)/2), np.floor(output_sz[1]-1)/2 + 1).reshape(-1, 1)
exp_iky = xp.exp(1j * max_pos_y * ky)[:, xp.newaxis, :].astype(xp.complex64)
exp_ikx = xp.exp(1j * kx * max_pos_x.T).transpose()[:, :, xp.newaxis].astype(xp.complex64)
ky2 = ky * ky
kx2 = kx * kx
max_pos_y = max_pos_y[:, :, xp.newaxis]
max_pos_x = max_pos_x[:, :, xp.newaxis]
init_pos_y = init_pos_y[:, :, xp.newaxis]
init_pos_x = init_pos_x[:, :, xp.newaxis]
scores_fs = scores_fs.transpose(2, 0, 1)
for _ in range(iterations):
# compute gradient
ky_exp_ky = ky * exp_iky
kx_exp_kx = kx * exp_ikx
y_resp = xp.matmul(exp_iky, scores_fs)
resp_x = xp.matmul(scores_fs, exp_ikx)
grad_y = -xp.imag(xp.matmul(ky_exp_ky, resp_x))
grad_x = -xp.imag(xp.matmul(y_resp, kx_exp_kx))
# compute hessian
ival = 1j * xp.matmul(exp_iky, resp_x)
H_yy = xp.real(-xp.matmul(ky2 * exp_iky, resp_x) + ival)
H_xx = xp.real(-xp.matmul(y_resp, kx2 * exp_ikx) + ival)
H_xy = xp.real(-xp.matmul(ky_exp_ky, xp.matmul(scores_fs, kx_exp_kx)))
det_H = H_yy * H_xx - H_xy * H_xy
# compute new position using newtons method
max_pos_y = max_pos_y - (H_xx * grad_y - H_xy * grad_x) / det_H
max_pos_x = max_pos_x - (H_yy * grad_x - H_xy * grad_y) / det_H
# evaluate maximum
exp_iky = xp.exp(1j * ky * max_pos_y).astype(xp.complex64)
exp_ikx = xp.exp(1j * kx * max_pos_x).astype(xp.complex64)
max_score = xp.real(xp.matmul(xp.matmul(exp_iky, scores_fs), exp_ikx)).flatten()
# check for scales that have not increased in score
idx = max_score < init_max_score
max_score[idx] = init_max_score[idx]
max_pos_y[idx] = init_pos_y[idx]
max_pos_x[idx] = init_pos_x[idx]
scale_idx = xp.argmax(max_score)
max_scale_response = max_score[scale_idx]
disp_row = ((max_pos_y[scale_idx][0][0] + np.pi) % (2 * np.pi) - np.pi) / (2 * np.pi) * output_sz[0]
disp_col = ((max_pos_x[scale_idx][0][0] + np.pi) % (2 * np.pi) - np.pi) / (2 * np.pi) * output_sz[1]
if xp is np:
return disp_row, disp_col, scale_idx
else:
return disp_row.get(), disp_col.get(), scale_idx.get()
|
02fdfb24467970e4f47aee3ac0310b9622881dff
|
ff7f0f16a3632b218bba659405e30b1923c3b3ba
|
/smac/env/starcraft2/render.py
|
8fb12162fdbf7b2b16e9f44d38b09e959344f34f
|
[
"MIT"
] |
permissive
|
oxwhirl/smac
|
52694823528a6be2c31b2e5723f32e3d4e828be2
|
12614f1760427026cce82083dc3f0ab3ff1d939e
|
refs/heads/master
| 2023-08-30T22:03:34.506707
| 2023-08-29T09:11:01
| 2023-08-29T09:11:01
| 164,697,388
| 932
| 245
|
MIT
| 2023-08-29T09:11:02
| 2019-01-08T17:12:37
|
Python
|
UTF-8
|
Python
| false
| false
| 11,378
|
py
|
render.py
|
import numpy as np
import re
import subprocess
import platform
from absl import logging
import math
import time
import collections
import os
import pygame
import queue
from pysc2.lib import colors
from pysc2.lib import point
from pysc2.lib.renderer_human import _Surface
from pysc2.lib import transform
from pysc2.lib import features
def clamp(n, smallest, largest):
return max(smallest, min(n, largest))
def _get_desktop_size():
"""Get the desktop size."""
if platform.system() == "Linux":
try:
xrandr_query = subprocess.check_output(["xrandr", "--query"])
sizes = re.findall(
r"\bconnected primary (\d+)x(\d+)", str(xrandr_query)
)
if sizes[0]:
return point.Point(int(sizes[0][0]), int(sizes[0][1]))
except ValueError:
logging.error("Failed to get the resolution from xrandr.")
# Most general, but doesn't understand multiple monitors.
display_info = pygame.display.Info()
return point.Point(display_info.current_w, display_info.current_h)
class StarCraft2Renderer:
def __init__(self, env, mode):
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide"
self.env = env
self.mode = mode
self.obs = None
self._window_scale = 0.75
self.game_info = game_info = self.env._controller.game_info()
self.static_data = self.env._controller.data()
self._obs_queue = queue.Queue()
self._game_times = collections.deque(
maxlen=100
) # Avg FPS over 100 frames. # pytype: disable=wrong-keyword-args
self._render_times = collections.deque(
maxlen=100
) # pytype: disable=wrong-keyword-args
self._last_time = time.time()
self._last_game_loop = 0
self._name_lengths = {}
self._map_size = point.Point.build(game_info.start_raw.map_size)
self._playable = point.Rect(
point.Point.build(game_info.start_raw.playable_area.p0),
point.Point.build(game_info.start_raw.playable_area.p1),
)
window_size_px = point.Point(
self.env.window_size[0], self.env.window_size[1]
)
window_size_px = self._map_size.scale_max_size(
window_size_px * self._window_scale
).ceil()
self._scale = window_size_px.y // 32
self.display = pygame.Surface(window_size_px)
if mode == "human":
self.display = pygame.display.set_mode(window_size_px, 0, 32)
pygame.display.init()
pygame.display.set_caption("Starcraft Viewer")
pygame.font.init()
self._world_to_world_tl = transform.Linear(
point.Point(1, -1), point.Point(0, self._map_size.y)
)
self._world_tl_to_screen = transform.Linear(scale=window_size_px / 32)
self.screen_transform = transform.Chain(
self._world_to_world_tl, self._world_tl_to_screen
)
surf_loc = point.Rect(point.origin, window_size_px)
sub_surf = self.display.subsurface(
pygame.Rect(surf_loc.tl, surf_loc.size)
)
self._surf = _Surface(
sub_surf,
None,
surf_loc,
self.screen_transform,
None,
self.draw_screen,
)
self._font_small = pygame.font.Font(None, int(self._scale * 0.5))
self._font_large = pygame.font.Font(None, self._scale)
def close(self):
pygame.display.quit()
pygame.quit()
def _get_units(self):
for u in sorted(
self.obs.observation.raw_data.units,
key=lambda u: (u.pos.z, u.owner != 16, -u.radius, u.tag),
):
yield u, point.Point.build(u.pos)
def get_unit_name(self, surf, name, radius):
"""Get a length limited unit name for drawing units."""
key = (name, radius)
if key not in self._name_lengths:
max_len = surf.world_to_surf.fwd_dist(radius * 1.6)
for i in range(len(name)):
if self._font_small.size(name[: i + 1])[0] > max_len:
self._name_lengths[key] = name[:i]
break
else:
self._name_lengths[key] = name
return self._name_lengths[key]
def render(self, mode):
self.obs = self.env._obs
self.score = self.env.reward
self.step = self.env._episode_steps
now = time.time()
self._game_times.append(
(
now - self._last_time,
max(
1,
self.obs.observation.game_loop
- self.obs.observation.game_loop,
),
)
)
if mode == "human":
pygame.event.pump()
self._surf.draw(self._surf)
observation = np.array(pygame.surfarray.pixels3d(self.display))
if mode == "human":
pygame.display.flip()
self._last_time = now
self._last_game_loop = self.obs.observation.game_loop
# self._obs_queue.put(self.obs)
return (
np.transpose(observation, axes=(1, 0, 2))
if mode == "rgb_array"
else None
)
def draw_base_map(self, surf):
"""Draw the base map."""
hmap_feature = features.SCREEN_FEATURES.height_map
hmap = self.env.terrain_height * 255
hmap = hmap.astype(np.uint8)
if (
self.env.map_name == "corridor"
or self.env.map_name == "so_many_baneling"
or self.env.map_name == "2s_vs_1sc"
):
hmap = np.flip(hmap)
else:
hmap = np.rot90(hmap, axes=(1, 0))
if not hmap.any():
hmap = hmap + 100 # pylint: disable=g-no-augmented-assignment
hmap_color = hmap_feature.color(hmap)
out = hmap_color * 0.6
surf.blit_np_array(out)
def draw_units(self, surf):
"""Draw the units."""
unit_dict = None # Cache the units {tag: unit_proto} for orders.
tau = 2 * math.pi
for u, p in self._get_units():
fraction_damage = clamp(
(u.health_max - u.health) / (u.health_max or 1), 0, 1
)
surf.draw_circle(
colors.PLAYER_ABSOLUTE_PALETTE[u.owner], p, u.radius
)
if fraction_damage > 0:
surf.draw_circle(
colors.PLAYER_ABSOLUTE_PALETTE[u.owner] // 2,
p,
u.radius * fraction_damage,
)
surf.draw_circle(colors.black, p, u.radius, thickness=1)
if self.static_data.unit_stats[u.unit_type].movement_speed > 0:
surf.draw_arc(
colors.white,
p,
u.radius,
u.facing - 0.1,
u.facing + 0.1,
thickness=1,
)
def draw_arc_ratio(
color, world_loc, radius, start, end, thickness=1
):
surf.draw_arc(
color, world_loc, radius, start * tau, end * tau, thickness
)
if u.shield and u.shield_max:
draw_arc_ratio(
colors.blue, p, u.radius - 0.05, 0, u.shield / u.shield_max
)
if u.energy and u.energy_max:
draw_arc_ratio(
colors.purple * 0.9,
p,
u.radius - 0.1,
0,
u.energy / u.energy_max,
)
elif u.orders and 0 < u.orders[0].progress < 1:
draw_arc_ratio(
colors.cyan, p, u.radius - 0.15, 0, u.orders[0].progress
)
if u.buff_duration_remain and u.buff_duration_max:
draw_arc_ratio(
colors.white,
p,
u.radius - 0.2,
0,
u.buff_duration_remain / u.buff_duration_max,
)
if u.attack_upgrade_level:
draw_arc_ratio(
self.upgrade_colors[u.attack_upgrade_level],
p,
u.radius - 0.25,
0.18,
0.22,
thickness=3,
)
if u.armor_upgrade_level:
draw_arc_ratio(
self.upgrade_colors[u.armor_upgrade_level],
p,
u.radius - 0.25,
0.23,
0.27,
thickness=3,
)
if u.shield_upgrade_level:
draw_arc_ratio(
self.upgrade_colors[u.shield_upgrade_level],
p,
u.radius - 0.25,
0.28,
0.32,
thickness=3,
)
def write_small(loc, s):
surf.write_world(self._font_small, colors.white, loc, str(s))
name = self.get_unit_name(
surf,
self.static_data.units.get(u.unit_type, "<none>"),
u.radius,
)
if name:
write_small(p, name)
start_point = p
for o in u.orders:
target_point = None
if o.HasField("target_unit_tag"):
if unit_dict is None:
unit_dict = {
t.tag: t
for t in self.obs.observation.raw_data.units
}
target_unit = unit_dict.get(o.target_unit_tag)
if target_unit:
target_point = point.Point.build(target_unit.pos)
if target_point:
surf.draw_line(colors.cyan, start_point, target_point)
start_point = target_point
else:
break
def draw_overlay(self, surf):
"""Draw the overlay describing resources."""
obs = self.obs.observation
times, steps = zip(*self._game_times)
sec = obs.game_loop // 22.4
surf.write_screen(
self._font_large,
colors.green,
(-0.2, 0.2),
"Score: %s, Step: %s, %.1f/s, Time: %d:%02d"
% (
self.score,
self.step,
sum(steps) / (sum(times) or 1),
sec // 60,
sec % 60,
),
align="right",
)
surf.write_screen(
self._font_large,
colors.green * 0.8,
(-0.2, 1.2),
"APM: %d, EPM: %d, FPS: O:%.1f, R:%.1f"
% (
obs.score.score_details.current_apm,
obs.score.score_details.current_effective_apm,
len(times) / (sum(times) or 1),
len(self._render_times) / (sum(self._render_times) or 1),
),
align="right",
)
def draw_screen(self, surf):
"""Draw the screen area."""
self.draw_base_map(surf)
self.draw_units(surf)
self.draw_overlay(surf)
|
3e242bf43645a317f6129884a7fbbacb555d92e7
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/melnor/conftest.py
|
3e87a4e646fab1a855802abea06b9cb5e848dabb
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 8,358
|
py
|
conftest.py
|
"""Tests for the melnor integration."""
from __future__ import annotations
from collections.abc import Generator
from datetime import UTC, datetime, time, timedelta
from unittest.mock import AsyncMock, patch
from melnor_bluetooth.device import Device
import pytest
from homeassistant.components.bluetooth.models import BluetoothServiceInfoBleak
from homeassistant.components.melnor.const import DOMAIN
from homeassistant.const import CONF_ADDRESS
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
from tests.components.bluetooth import generate_advertisement_data, generate_ble_device
FAKE_ADDRESS_1 = "FAKE-ADDRESS-1"
FAKE_ADDRESS_2 = "FAKE-ADDRESS-2"
FAKE_SERVICE_INFO_1 = BluetoothServiceInfoBleak(
name="YM_TIMER%",
address=FAKE_ADDRESS_1,
rssi=-63,
manufacturer_data={
13: b"Y\x08\x02\x8f\x00\x00\x00\x00\x00\x00\xf0\x00\x00\xf0\x00\x00\xf0\x00\x00\xf0*\x9b\xcf\xbc"
},
service_uuids=[],
service_data={},
source="local",
device=generate_ble_device(FAKE_ADDRESS_1, None),
advertisement=generate_advertisement_data(local_name=""),
time=0,
connectable=True,
)
FAKE_SERVICE_INFO_2 = BluetoothServiceInfoBleak(
name="YM_TIMER%",
address=FAKE_ADDRESS_2,
rssi=-63,
manufacturer_data={
13: b"Y\x08\x02\x8f\x00\x00\x00\x00\x00\x00\xf0\x00\x00\xf0\x00\x00\xf0\x00\x00\xf0*\x9b\xcf\xbc"
},
service_uuids=[],
service_data={},
source="local",
device=generate_ble_device(FAKE_ADDRESS_2, None),
advertisement=generate_advertisement_data(local_name=""),
time=0,
connectable=True,
)
@pytest.fixture(autouse=True)
def mock_bluetooth(enable_bluetooth):
"""Auto mock bluetooth."""
class MockFrequency:
"""Mocked class for a Frequency."""
_duration: int
_interval: int
_is_watering: bool
_start_time: time
_next_run_time: datetime
def __init__(self) -> None:
"""Initialize a mocked frequency."""
self._duration = 0
self._interval = 0
self._is_watering = False
self._start_time = time(12, 0)
self._next_run_time = datetime(2021, 1, 1, 12, 0, tzinfo=UTC)
@property
def duration_minutes(self) -> int:
"""Return the duration in minutes."""
return self._duration
@duration_minutes.setter
def duration_minutes(self, duration: int) -> None:
"""Set the duration in minutes."""
self._duration = duration
@property
def interval_hours(self) -> int:
"""Return the interval in hours."""
return self._interval
@interval_hours.setter
def interval_hours(self, interval: int) -> None:
"""Set the interval in hours."""
self._interval = interval
@property
def start_time(self) -> time:
"""Return the start time."""
return self._start_time
@start_time.setter
def start_time(self, start_time: time) -> None:
"""Set the start time."""
self._start_time = start_time
@property
def is_watering(self) -> bool:
"""Return true if the frequency is currently watering."""
return self._is_watering
@property
def next_run_time(self) -> datetime:
"""Return the next run time."""
return self._next_run_time
@property
def schedule_end_time(self) -> datetime:
"""Return the schedule end time."""
return self._next_run_time + timedelta(minutes=self._duration)
class MockValve:
"""Mocked class for a Valve."""
_id: int
_is_watering: bool
_manual_watering_minutes: int
_end_time: int
_frequency: MockFrequency
_schedule_enabled: bool
def __init__(self, identifier: int) -> None:
"""Initialize a mocked valve."""
self._end_time = 0
self._id = identifier
self._is_watering = False
self._manual_watering_minutes = 0
self._schedule_enabled = False
self._frequency = MockFrequency()
@property
def id(self) -> int:
"""Return the valve id."""
return self._id
@property
def frequency(self):
"""Return the frequency."""
return self._frequency
@property
def is_watering(self):
"""Return true if the valve is currently watering."""
return self._is_watering
@property
def manual_watering_minutes(self):
"""Return the number of minutes the valve is set to manual watering."""
return self._manual_watering_minutes
@property
def next_cycle(self):
"""Return the end time of the current watering cycle."""
return self._frequency.next_run_time
@property
def schedule_enabled(self) -> bool:
"""Return true if the schedule is enabled."""
return self._schedule_enabled
@property
def watering_end_time(self) -> int:
"""Return the end time of the current watering cycle."""
return self._end_time
async def set_is_watering(self, is_watering: bool):
"""Set the valve to manual watering."""
self._is_watering = is_watering
async def set_manual_watering_minutes(self, minutes: int):
"""Set the valve to manual watering."""
self._manual_watering_minutes = minutes
async def set_frequency_interval_hours(self, interval: int):
"""Set the frequency interval in hours."""
self._frequency.interval_hours = interval
async def set_frequency_duration_minutes(self, duration: int):
"""Set the frequency duration in minutes."""
self._frequency.duration_minutes = duration
async def set_frequency_enabled(self, enabled: bool):
"""Set the frequency schedule enabled."""
self._schedule_enabled = enabled
async def set_frequency_start_time(self, value: time):
"""Set the frequency schedule enabled."""
self._frequency.start_time = value
def mock_config_entry(hass: HomeAssistant):
"""Return a mock config entry."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=FAKE_ADDRESS_1,
data={CONF_ADDRESS: FAKE_ADDRESS_1},
)
entry.add_to_hass(hass)
return entry
def mock_melnor_device():
"""Return a mocked Melnor device."""
with patch("melnor_bluetooth.device.Device") as mock:
device = mock.return_value
device.connect = AsyncMock(return_value=True)
device.disconnect = AsyncMock(return_value=True)
device.fetch_state = AsyncMock(return_value=device)
device.push_state = AsyncMock(return_value=None)
device.battery_level = 80
device.mac = FAKE_ADDRESS_1
device.model = "test_model"
device.name = "test_melnor"
device.rssi = -50
device.zone1 = MockValve(0)
device.zone2 = MockValve(1)
device.zone3 = MockValve(2)
device.zone4 = MockValve(3)
device.__getitem__.side_effect = lambda key: getattr(device, key)
return device
@pytest.fixture
def mock_setup_entry() -> Generator[AsyncMock, None, None]:
"""Patch async setup entry to return True."""
with patch(
"homeassistant.components.melnor.async_setup_entry", return_value=True
) as mock_setup:
yield mock_setup
# pylint: disable=dangerous-default-value
def patch_async_discovered_service_info(
return_value: list[BluetoothServiceInfoBleak] = [FAKE_SERVICE_INFO_1],
):
"""Patch async_discovered_service_info a mocked device info."""
return patch(
"homeassistant.components.melnor.config_flow.async_discovered_service_info",
return_value=return_value,
)
def patch_async_ble_device_from_address(
return_value: BluetoothServiceInfoBleak | None = FAKE_SERVICE_INFO_1,
):
"""Patch async_ble_device_from_address to return a mocked BluetoothServiceInfoBleak."""
return patch(
"homeassistant.components.bluetooth.async_ble_device_from_address",
return_value=return_value,
)
def patch_melnor_device(device: Device = mock_melnor_device()):
"""Patch melnor_bluetooth.device to return a mocked Melnor device."""
return patch("homeassistant.components.melnor.Device", return_value=device)
def patch_async_register_callback():
"""Patch async_register_callback to return True."""
return patch("homeassistant.components.bluetooth.async_register_callback")
|
0f29eedc576c31ccc791a0b0065f36c384eafd6e
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/nn/modules/unique.py
|
b4d0cefde6ca3796a82435f92a52266160c99ba8
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 3,205
|
py
|
unique.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
def unique_op(
input, sorted=True, return_inverse=False, return_counts=False, dtype=flow.int
):
r"""
Returns the unique elements of the input tensor.
The documentation is referenced from: https://pytorch.org/docs/1.10/generated/torch.unique.html.
Args:
input (Tensor): The input tensor.
sorted (bool): Whether to sort the unique elements in ascending order before returning as output.
return_inverse (bool): Whether to also return the indices for where elements in the original input ended up in the returned unique list.
return_counts (bool): Whether to also return the counts for each unique element.
dtype (flow.dtype): Dtype of the returned indices and counts.
Returns:
oneflow.Tensor or List of oneflow.Tensor:
- **output** (Tensor): the output list of unique scalar elements.
- **inverse_indices** (Tensor): (optional) if return_inverse is True,
there will be an additional returned tensor (same shape as input) representing
the indices for where elements in the original input map to in the output;
otherwise, this function will only return a single tensor.
- **counts** (Tensor): (optional) if return_counts is True, there will be an additional
returned tensor (same shape as output or output.size(dim), if dim was specified)
representing the number of occurrences for each unique value or tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> x = flow.tensor([3, 1, 2, 0 ,2])
>>> flow.unique(x)
tensor([0, 1, 2, 3], dtype=oneflow.int64)
>>> flow.unique(x, sorted=False)
tensor([3, 1, 2, 0], dtype=oneflow.int64)
>>> results, indices = flow.unique(x, return_inverse=True)
>>> indices
tensor([3, 1, 2, 0, 2], dtype=oneflow.int32)
>>> results, counts = flow.unique(x, return_counts=True)
>>> counts
tensor([1, 1, 2, 1], dtype=oneflow.int32)
>>> results, indices = flow.unique(x, return_inverse=True, dtype=flow.long)
>>> indices
tensor([3, 1, 2, 0, 2], dtype=oneflow.int64)
"""
if not return_inverse and not return_counts:
return flow._C.unique(input, sorted, dtype=dtype)
else:
return flow._C.unique(
input,
sorted,
return_inverse=return_inverse,
return_counts=return_counts,
dtype=dtype,
)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
|
b484afe883702849dddd3c983c9b7578b0c2360d
|
6186a3787d1e74f1866844491da48b9643c8f1a9
|
/ghostwriter/api/migrations/0001_initial.py
|
e0f7462a5829cf33660d3b9de3d4d66b2ff1ad80
|
[
"BSD-3-Clause"
] |
permissive
|
GhostManager/Ghostwriter
|
b46b2421e5737ed0afbf49182dce9eeb5eb31936
|
b9eae4459ba192fbb2d4a5b66f8210d57fd7112a
|
refs/heads/master
| 2023-09-04T02:34:54.085997
| 2023-07-13T22:38:44
| 2023-07-13T22:38:44
| 197,269,443
| 1,011
| 197
|
BSD-3-Clause
| 2023-09-08T00:19:52
| 2019-07-16T21:19:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,824
|
py
|
0001_initial.py
|
# Generated by Django 3.2.11 on 2022-04-05 21:22
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="APIKey",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("name", models.CharField(default=None, help_text="A name to identify this API key", max_length=255)),
("token", models.TextField(editable=False)),
("created", models.DateTimeField(auto_now_add=True, db_index=True)),
(
"expiry_date",
models.DateTimeField(
blank=True,
help_text="Once API key expires, clients cannot use it anymore",
null=True,
verbose_name="Expires",
),
),
(
"revoked",
models.BooleanField(
blank=True,
default=False,
help_text="If the API key is revoked, clients cannot use it anymore (this is irreversible)",
),
),
("user", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
"verbose_name": "API key",
"verbose_name_plural": "API keys",
"ordering": ("-created",),
"abstract": False,
},
),
]
|
0f957b24404bdc125275f43cf56e933e1ad2694e
|
2f679ea4787bcd765dc9d3025a03f15a25d360cb
|
/docker/lr-cartographer/python/extract_bounded_read_sections.py
|
9e6ae40290ec3bc1384e6f6c55efe0428796ce2c
|
[
"BSD-3-Clause"
] |
permissive
|
broadinstitute/long-read-pipelines
|
f7d0958c23b68c4143d350c0b77b62d0bbea914e
|
9620d58f49f29dd2f27fa5f30f72c8257aa2064b
|
refs/heads/main
| 2023-08-31T20:46:00.456332
| 2023-08-15T18:36:14
| 2023-08-15T18:36:14
| 186,657,809
| 101
| 23
|
BSD-3-Clause
| 2023-09-06T14:39:55
| 2019-05-14T16:12:33
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 89,355
|
py
|
extract_bounded_read_sections.py
|
#!/usr/bin/env python3.8
import time
import argparse
import math
import logging
import os
import sys
import inspect
import subprocess
import tempfile
import re
import uuid
from collections import OrderedDict
from collections import namedtuple
from enum import Enum
import numpy as np
import pysam
from tesserae import Tesserae
from tesserae.sequence import Sequence
from tesserae.tesserae import TesseraeAlignmentResult
from tesserae.tesserae import DEFAULT_REC
################################################################################
LOGGER = logging.getLogger("extract_bounded_read_sections")
################################################################################
BARCODE_NAME_IDENTIFIER = "BARCODE"
UNKNOWN_NAME_IDENTIFIER = "UNKNOWN"
# Make a random string at the end to attempt to prevent unplanned matches:
RC_READ_NAME_IDENTIFIER_BASE = "_RC"
RC_READ_NAME_IDENTIFIER = RC_READ_NAME_IDENTIFIER_BASE + "_" + str(uuid.uuid1())[:8]
class AlignmentAlgorithm(Enum):
"""
Specifier for an alignment algorithm.
"""
TESSERAE = 1
MOSAIC_ALIGNER = 2
SMITH_WATERMAN = 3
NEEDLEMAN_WUNSCH = 4
BWA_MEM = 5
BWA_ALN = 6
VALID_DNA_SEQUENCE_PATTERN = re.compile(r"^[ATGCNatgcn]+$")
MOSAIC_ALIGNER_LINE_MATCH_PATTERN = re.compile(r"^\s*(.*?)\s\[\s*(\d+)-\s*(\d+)\]\t+(.*)\s*$")
# Default max PL:
MAX_ALIGNMENT_PL = 60
# Minimum reported PL quality value (will override lower values):
MIN_ALIGNMENT_PL = 0
# Min PL for an alignment to be kept as "good":
# This should be equal to the base read quality.
MIN_GOOD_ALIGNMENT_PL = 7.0
# Min number of bases required to be in an alignment for it to be kept in consideration.
# TODO: Make the default based on the number of bases in each known segment.
MIN_ALIGNMENT_LENGTH = 4
P_REC_KNOWN = DEFAULT_REC
P_REC_UNKNOWN = DEFAULT_REC / 10
# Named tuple to store alignment information:
DetailedAlignmentInfo = namedtuple(
"DetailedAlignmentInfo", ["start_pos", "end_pos", "template_length", "cigar", "qual_pl"]
)
class IdentityMap:
"""A map to that returns exactly what is given."""
def __init__(self):
pass
def __getitem__(self, key):
return key
class ReadFile:
"""
A class to abstract away the differences between reading from bam/sam files and fasta/fastq files with pysam.
"""
def __init__(self, file_name):
self.file_name = file_name
self._file_object = None
self._is_alignment_file = file_name.lower().endswith(".sam") or file_name.lower().endswith(".bam")
def __enter__(self):
if self._is_alignment_file:
# Determine read flags (is it a bam or a sam file?)
file_flags = ''
if self.file_name.endswith('.bam'):
file_flags = 'b'
self._file_object = pysam.AlignmentFile(self.file_name, 'r' + file_flags, check_sq=False)
else:
self._file_object = pysam.FastxFile(self.file_name)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._file_object.close()
def is_sam(self):
"""
:return: True iff this file is a sam/bam file.
"""
return self._is_alignment_file
def get_reads(self):
"""
Generator function to yield a Sequence object for every read in the
supporting read file corresponding to self._file_name
"""
if self._is_alignment_file:
for read in self._file_object.fetch(until_eof=True):
yield Sequence(read.query_name, read.query_sequence)
else:
for read in self._file_object:
yield Sequence(read.name, read.sequence)
# IUPAC RC's from: http://arep.med.harvard.edu/labgc/adnan/projects/Utilities/revcomp.html
# and https://www.dnabaser.com/articles/IUPAC%20ambiguity%20codes.html
RC_BASE_MAP = {"N": "N", "A": "T", "T": "A", "G": "C", "C": "G", "Y": "R", "R": "Y", "S": "S", "W": "W", "K": "M",
"M": "K", "B": "V", "V": "B", "D": "H", "H": "D", "n": "n", "a": "t", "t": "a", "g": "c", "c": "g",
"y": "r", "r": "y", "s": "s", "w": "w", "k": "m", "m": "k", "b": "v", "v": "b", "d": "h", "h": "d"}
def reverse_complement(base_string):
"""
Reverse complements the given base_string.
:param base_string: String of bases to be reverse-complemented.
:return: The reverse complement of the given base string.
"""
return ''.join(map(lambda b: RC_BASE_MAP[b], base_string[::-1]))
def _log_var(var):
"""
Logs the given variable's name and contents at the DEBUG log level.
Supports logging even if var is an expression in the invocation.
:param var: The variable whose contents are to be logged.
:return: None
"""
# prev_frame = inspect.currentframe().f_back
# Get the name of this function:
fn_name = inspect.currentframe().f_code.co_name
# Get the source code line of where this function was called:
call_line = inspect.stack()[1][4][0].strip()
# Make sure we're in the right place:
assert call_line.startswith(f"{fn_name}(")
# Pull out the variable name from the function call:
var_name = call_line[len(f"{fn_name}("):][:-1].strip()
LOGGER.debug("%s = %s", var_name, var)
CIGAR_ELEMENT_STRING_MAP = {
pysam.CDEL: "D",
pysam.CMATCH: "M",
pysam.CBACK: "B",
pysam.CINS: "I",
pysam.CPAD: "P",
pysam.CEQUAL: "=",
pysam.CSOFT_CLIP: "S",
pysam.CHARD_CLIP: "H",
pysam.CREF_SKIP: "N",
pysam.CDIFF: "X"
}
STRING_CIGAR_ELEMENT_MAP = {v: k for (k, v) in CIGAR_ELEMENT_STRING_MAP.items()}
def cigar_tuple_to_string(cigar_tuples):
"""
Converts a given list of cigar tuples to a string.
:param cigar_tuples: list of tuples, each containing a cigar element and count.
:return: A string representing the given cigar tuple list.
"""
cigar_string_elements = []
for element, count in cigar_tuples:
cigar_string_elements.append(f"{count}{CIGAR_ELEMENT_STRING_MAP[element]}")
return "".join(cigar_string_elements)
class ProcessedAlignmentResult(namedtuple(
"ProcessedAlignmentResult", ["seq_name", "alignment_string", "target_start_index", "target_end_index",
"read_start_pos", "read_end_pos", "template_length", "cigar", "overall_quality"]
)):
__slots__ = ()
def __str__(self):
return (
f"ProcessedAlignmentResult({self.seq_name}, {self.alignment_string}, {self.target_start_index}, "
f"{self.target_end_index}, {self.read_start_pos}, {self.read_end_pos}, "
f"{self.template_length}, {cigar_tuple_to_string(self.cigar)}, {self.overall_quality})"
)
MetaSequenceInfo = namedtuple(
"MetaSequenceInfo", ["name", "raw_read_alignment_string", "alignment_start_index", "alignment_end_index"]
)
################################################################################
def configure_logging(args):
"""Set up logging for the module"""
format_string = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
# Set logging level:
log_level = logging.INFO
if args.quiet:
log_level = logging.CRITICAL
elif args.verbose:
log_level = logging.DEBUG
elif args.veryverbose:
log_level = logging.NOTSET
logging.basicConfig(level=log_level, format=format_string)
def print_logo(alignment_type):
"""Print the logo to the log."""
alignment_algorithm = AlignmentAlgorithm.TESSERAE
if alignment_type:
try:
alignment_algorithm = AlignmentAlgorithm[alignment_type]
except KeyError:
LOGGER.error("Error: You must provide a valid alignment algorithm. Options are: %s",
", ".join([e.name for e in AlignmentAlgorithm]))
sys.exit(1)
LOGGER.info("====================================================================")
LOGGER.info(" ____ _ _")
LOGGER.info(" / ___|__ _ _ __| |_ ___ __ _ _ __ __ _ _ __ | |__ ___ _ __")
LOGGER.info(" | | / _` | '__| __/ _ \\ / _` | '__/ _` | '_ \\| '_ \\ / _ \\ '__|")
LOGGER.info(" | |__| (_| | | | || (_) | (_| | | | (_| | |_) | | | | __/ |")
LOGGER.info(" \\____\\__,_|_| \\__\\___/ \\__, |_| \\__,_| .__/|_| |_|\\___|_|")
LOGGER.info(" |___/ |_|")
LOGGER.info(" ___________________________________________________________________")
LOGGER.info("")
LOGGER.info(" Extract Bounded Read Sections")
LOGGER.info(" ___________________________________________________________________")
LOGGER.info("/ _____.----------._ \\")
LOGGER.info("| _ __,---'_ \" `. _,--. __,---. |")
LOGGER.info("| | \\___ / ( ) \" \" `-.,' (') \\ ( / _\\ \\ |")
LOGGER.info("| \\ \\__/_ __(( _)_ ( \" \" (_\\_) \\___ `-.___,' |")
LOGGER.info("| \\ ( )_(__)_|( )) \" )) \" | \" \\ _ |")
LOGGER.info("| \\__ (( _( ( )) ) _) (( \\\\// \" | \" \\_____,' | |")
LOGGER.info("| \\ ( ))(_)(_)_)| \" )) //\\\\ \" __,---._ \" \" \" / |")
LOGGER.info("| |(_ _)| | | | \" ( \" ,-' `-. ___ / |")
LOGGER.info("| | | | | _,--- ,--. _ \" ( ) /___\\ \\ |")
LOGGER.info("| / | _,----._,'`--'\\.`-._ `._ _ __ _,-' |H__| \\ |")
LOGGER.info("| / \" _,-' / `\\ ,' / _' \\`.---.._ __ \" \\ |")
LOGGER.info("| / / .-' , / ' _,'_ - _ '- _`._ `.`-._ _/- `--. \" \" \\ |")
LOGGER.info("| / / _-- `---, .-' __ -- _,---. `-._ _,-'- / ` \\ \\_ \" | |")
LOGGER.info("| | | -- _ / / `-_- _ _,' ' \\ \\_`-._,-' / -- \\ - \\_ / |")
LOGGER.info("| | \\ - / | \" ,-'_ /- `_ ._`._`-...._____...._,--' / |")
LOGGER.info("| \\ \\_ / / / ___ `--- --- - - ' ,--. ___ | |")
LOGGER.info("| \\ ,' | \" (o o) \" \" \" | \\_,-' `. ,' |")
LOGGER.info("| |__,-' \\ \\\"/ \" \" \" / O `-.__/ |")
LOGGER.info("| `.______________________/ | |")
LOGGER.info("\\___________________________________________________________________/")
LOGGER.info("")
if alignment_algorithm == AlignmentAlgorithm.MOSAIC_ALIGNER:
LOGGER.info(r" +---------------------------+")
LOGGER.info(r" |__ \__/ \__ \__/ \__ |")
LOGGER.info(r" powered | \__/ \ \__/ \ \| ")
LOGGER.info(r" |__/ / __/ / __/|")
LOGGER.info(r" by | \__ \__/ \__ \__/ \| ")
LOGGER.info(r" | \__/ \ \__/ \ |")
LOGGER.info(r" MosaicAligner | __/ / __/ / |")
LOGGER.info(r" |__/ \__ \__/ \__ \__/|")
LOGGER.info(r" | \ \__/ \ \__/ \| ")
LOGGER.info(r" +---------------------------+")
elif alignment_algorithm == AlignmentAlgorithm.TESSERAE:
LOGGER.info(" +-----+")
LOGGER.info(" | \\ | \\")
LOGGER.info(" powered | +-----+")
LOGGER.info(" +--|--+ |")
LOGGER.info(" by \\ | \\ |")
LOGGER.info(" +-----+")
LOGGER.info(" tesserae")
else:
LOGGER.info("")
LOGGER.info(" powered by %s", alignment_algorithm.name)
LOGGER.info("====================================================================")
LOGGER.info("")
def compute_detailed_alignment_info(
query_alignment_string, target_alignment_string, target_start_index, target_end_index,
):
"""Compute detailed alignment information from the given information.
Alignment details are based off the differences between the alignment
strings.
This method returns a tuple containing:
- The Start Position in the reference of the alignment.
- The Template Length of this alignment.
- The Cigar representing this alignment.
- The Phred-Scaled quality score of this alignment.
Where:
- The Start Position is the 0-based, inclusive position in the reference
at which this alignment begins.
- The End Position is the 0-based, inclusive position in the reference
at which this alignment ends.
- The Template Length is the number of bases accounted by this alignment
with respect to the reference.
- The Cigar is a list of tuples: (CIGAR_ELEMENT, COUNT) where each
CIGAR_ELEMENT is defined in pysam.
- The Phred-Scaled quality score is defined by the following formula:
-10 log_10((# mismatches + # insertions + # deletions)/target_length)
However, because of how Tesserae works, we ignore leading and trailing deletions.
"""
no_alignment = DetailedAlignmentInfo(-1, -1, 0, (), MIN_ALIGNMENT_PL)
# Do some basic checks here:
if len(query_alignment_string) == 0:
return no_alignment
start_index = get_start_index_from_alignment_start_string(target_alignment_string)
# Now that we know where in the reference this target begins, we can start
# to loop through both alignment strings at the same time.
# In this loop we will:
# construct a cigar string
# determine counts for alignment quality score
# determine template length
num_errors = 0
cigar = []
current_cigar_element = None
current_cigar_element_count = 0
num_leading_deletions = 0
num_trailing_deletions = 0
num_query_bases_used = 0
have_only_seen_deletions = True
for query_base, target_base in zip(
query_alignment_string[start_index:].upper(), target_alignment_string[start_index:].upper()
):
# The Tesserae2 / Mosaic Alignment algorithm can only produce "-" or
# <BASE> for any position (other than blanks / spaces). Therefore we
# only have to check the following 4 cases:
if query_base == "-":
# We have an insertion relative to the reference:
num_errors += 1
cigar_element = pysam.CINS
have_only_seen_deletions = False
num_trailing_deletions = 0
elif query_base == target_base:
# Bases match:
# We use CMATCH here because that cigar operator accounts for
# BOTH matches and mismatches.
cigar_element = pysam.CEQUAL
have_only_seen_deletions = False
num_trailing_deletions = 0
num_query_bases_used += 1
elif target_base == "-":
# We have a deletion relative to the reference:
num_errors += 1
cigar_element = pysam.CDEL
if have_only_seen_deletions:
num_leading_deletions += 1
num_trailing_deletions += 1
num_query_bases_used += 1
else:
# We have a mismatch relative to the reference:
num_errors += 1
# We use CMATCH here because that cigar operator accounts for
# BOTH matches and mismatches.
cigar_element = pysam.CMATCH
have_only_seen_deletions = False
num_trailing_deletions = 0
num_query_bases_used += 1
# Accumulate our cigar elements:
if cigar_element != current_cigar_element:
if current_cigar_element is not None:
cigar.append((current_cigar_element, current_cigar_element_count))
current_cigar_element = cigar_element
current_cigar_element_count = 1
else:
current_cigar_element_count += 1
# Add the last remaining cigar element to our list:
cigar.append((current_cigar_element, current_cigar_element_count))
# Adjust cigar for leading / trailing deletions:
if num_leading_deletions != 0:
cigar = cigar[1:]
if num_trailing_deletions != 0:
cigar = cigar[:-1]
# Our template length is the number of bases accounted by this alignment
# with respect to the reference.
# We add one because the end index is inclusive.
template_length = target_end_index - target_start_index + 1
# Compute end index (subtract 1 because of inclusive coordinates):
end_index = start_index + num_query_bases_used - num_leading_deletions - num_trailing_deletions - 1
# Make sure we have something to work with:
if end_index < start_index:
return no_alignment
num_errors -= num_leading_deletions
num_errors -= num_trailing_deletions
# Compute PL score:
aligned_qual_pl = get_qual_pl(num_errors, template_length)
# Return our detailed info.
return DetailedAlignmentInfo(start_index, end_index, template_length, tuple(cigar), aligned_qual_pl)
def create_pretty_alignment_string(query_alignment_info, target_aligned_info):
"""
Create a pretty string containing all the information comparing the target alignments to the query alignment.
:param query_alignment_info: A TesseraeAlignmentResult object representing the query string.
:param target_aligned_info: A list[TesseraeAlignmentResult] representing the alignments of all targets to the
query.
:return: A string representing the alignment of all given targets against the given query.
"""
out_string_list = ["Alignment:"]
pos_label = "Position"
# Get the width of the name field:
name_width = max(len(query_alignment_info.seq_name), max([len(a.seq_name) for a in target_aligned_info]))
# Get width of the numbering of the targets:
tgt_num_width = math.ceil(math.log10(len(target_aligned_info)))
# To deal with identifying the length of the string, we add numbers above the alignment representing the position
# of each base and add start with that string.
align_len = len(query_alignment_info.alignment_string)
num_places = math.floor(math.log10(align_len))
have_printed_pos_label = False
for exponent in range(num_places, -1, -1):
s = np.full(align_len, " ", dtype=str)
ten_pow = int(math.pow(10, exponent))
for i in range(0, align_len, min(ten_pow, 10)):
s[i] = (i / ten_pow) % 10
s[-1] = ((align_len - 1) / ten_pow) % 10
if not have_printed_pos_label:
out_string_list.append(f"{pos_label:>{tgt_num_width + 2 + name_width}} : " + "".join(s))
have_printed_pos_label = True
else:
out_string_list.append(((tgt_num_width + 2 + name_width+3) * " ") + "".join(s))
# Now that we have our numbered strings, we can add in the query sequence and the alignment string:
out_string_list.append(f"{(tgt_num_width + 2) * ' '}{query_alignment_info.seq_name:>{name_width}} : "
f"{query_alignment_info.alignment_string}")
# Default the alignments to bases being equal (we're optimists):
s = np.full(align_len, " ", dtype=str)
for t in target_aligned_info:
# The starting position is the index of the first character in the alignment string of the first target that
# is not a space:
pos = get_start_index_from_alignment_start_string(t.alignment_string)
# We take advantage of the fact that there will be only ONE alignment to each position and they are ordered:
for t_base in t.alignment_string.lower()[pos:]:
q_base = query_alignment_info.alignment_string[pos].lower()
if q_base == "-":
s[pos] = "~"
elif t_base == "-":
s[pos] = "^"
elif t_base == q_base:
s[pos] = "|"
pos += 1
out_string_list.append(((tgt_num_width + 2 + name_width+3) * " ") + "".join(s))
# Now append all the targets:
for i, target in enumerate(target_aligned_info):
padding = " " * (align_len - len(target.alignment_string))
out_string_list.append(f"{i:>{tgt_num_width}}: {target.seq_name:>{name_width}} : "
f"{target.alignment_string}{padding}")
return "\n".join(out_string_list)
def get_start_index_from_alignment_start_string(target_alignment_string):
"""Get the alignment start index from the given alignment string.
We know that the alignment strings will always start with spaces until
the region that actually aligns to the reference, so we count the number
of spaces in the target_alignment_string to get our starting alignment
position.
"""
try:
start_index = next(i for i, v in enumerate(target_alignment_string) if v != " ")
except StopIteration:
# If we have a string of all spaces, we just need to skip it:
start_index = len(target_alignment_string) - 1
return start_index
def process_raw_results(raw_results_list, minqual, minbases):
"""
Read through the raw results, computing additional statistics and filtering reads we shouldn't consider.
:param raw_results_list: list of TesseraeAlignmentResults.
:param minqual: minimum quality alignment to include (Phred scaled likelihood)
:param minbases: minimum number of bases for an alignment to be retained.
:return: A list of ProcessedAlignmentResult
"""
# The read should be the first entry in the alignment:
read_result = raw_results_list[0] # TesseraeAlignmentResult
last_target_name = None
last_seq = None
last_start_indx = None
last_end_index = None
added_last = False
results = []
for target_name, sequence, target_start_index, target_end_index in raw_results_list[1:]:
# Merge adjacent sequences together!
# Rules for merging:
# Same sequence identifier
# last sequence ends at base just before current sequence starts
# last sequence does not end in a deletion ("-")
# current sequence does not start with a deletion ("-")
if target_name == last_target_name \
and target_start_index == (last_end_index + 1) \
and last_seq[-1] != "-" and sequence[0] != "-":
LOGGER.info("Merging adjacent alignments: %s: [%d - %d] + [%d - %d]", target_name,
last_start_indx, last_end_index, target_start_index, target_end_index)
# Now we can merge the sequences together.
# We append the sequences together trimming whitespace from appropriate parts,
# then we overwrite the start offset:
sequence = last_seq + sequence.lstrip()
target_start_index = last_start_indx
LOGGER.debug(" Merged sequence: %s", sequence)
LOGGER.debug(" Merged target start index: %d", target_start_index)
# We must also remove the most recent entry in our results list, since it corresponds to the
# entry for the "first half" of this alignment:
# TODO: Fix this loop to not duplicate the alignment work.
if added_last and results[-1].seq_name == target_name:
results.pop()
LOGGER.debug("Seq: %s", target_name)
detailed_alignment_info = compute_detailed_alignment_info(
read_result.alignment_string, sequence, target_start_index, target_end_index
)
if detailed_alignment_info.qual_pl > minqual and detailed_alignment_info.template_length >= minbases:
processed_result = ProcessedAlignmentResult(
target_name, sequence, target_start_index, target_end_index,
detailed_alignment_info.start_pos, detailed_alignment_info.end_pos,
detailed_alignment_info.template_length, detailed_alignment_info.cigar,
detailed_alignment_info.qual_pl)
LOGGER.debug("Adding in sequence: %s(q=%d-%d, tgt=%d-%d)", processed_result.seq_name,
processed_result.read_start_pos, processed_result.read_end_pos,
processed_result.target_start_index, processed_result.target_end_index)
results.append(processed_result)
added_last = True
else:
if LOGGER.isEnabledFor(logging.DEBUG):
reason_string = ""
if detailed_alignment_info.template_length < minbases:
reason_string = "[min aligned base: " + \
str(detailed_alignment_info.template_length) + \
" < " + \
str(minbases) + "]"
if detailed_alignment_info.qual_pl <= minqual:
sep = ""
if len(reason_string) > 0:
sep = " ,"
reason_string = reason_string + sep + "[quality - " + str(detailed_alignment_info.qual_pl) + \
" <= " + str(minqual) + "]"
LOGGER.debug("Target does not pass threshold: %s: %s (%s)",
target_name, reason_string, detailed_alignment_info)
added_last = False
# Track our info to the next iteration:
last_target_name = target_name
last_seq = sequence
last_start_indx = target_start_index
last_end_index = target_end_index
# Since the alignments come out in the order they appear in the read sequence,
# we should NOT sort them.
# This will make sequence consolidation and order detection easier.
return results
def remove_overlapping_bounded_seqs(bounded_seq_tuple_list):
"""
Filters the given bounded sequence tuple list such that no sequences overlap.
If two bounded regions overlap, one is selected as follows:
start seg alignment quality (high to low)
end seg alignment quality (high to low)
start seg name (ABC Order)
end seg name (ABC Order)
Order in the given tuple list.
:param bounded_seq_tuple_list: A list of tuples, with each tuple containing exactly two
ProcessedAlignmentResult and representing an alignment to be excised from the parent read.
:return: A set of tuples, with each tuple containing exactly two
ProcessedAlignmentResult and representing an alignment to be excised from the parent read.
Where no tuple overlaps the bounds of another tuple.
"""
filtered_regions = []
filtered_region_set = set()
overlap_list_list = []
overlapping_region_map = set()
# 1 - find all overlapping tuples.
for i, (result_start, result_end) in enumerate(bounded_seq_tuple_list):
overlaps = []
# Adjust for forward / reverse direction:
if result_start.read_start_pos < result_start.read_end_pos:
start = result_start.read_start_pos
end = result_end.read_end_pos
else:
start = result_end.read_end_pos
end = result_start.read_start_pos
for other_result_start, other_result_end in bounded_seq_tuple_list[i+1:]:
# Adjust for forward / reverse direction:
if other_result_start.read_start_pos < other_result_end.read_end_pos:
other_start = other_result_start.read_start_pos
other_end = other_result_end.read_end_pos
else:
other_start = other_result_end.read_end_pos
other_end = other_result_start.read_start_pos
# Other fragment overlaps the start
# Other fragment overlaps the end
# current fragment overlaps the other's start
# current fragment overlaps the other's end
# current fragment is the same as the other's end
if ((other_result_start, other_result_end) not in overlapping_region_map) and \
(other_start < start < other_end) or (other_start < end < other_end) or \
(start < other_start < end) or (start < other_end < end) or \
(start == other_start and end == other_end):
overlaps.append((other_result_start, other_result_end))
# Do our best not to account for regions more than once:
if (result_start, result_end) in overlapping_region_map:
continue
elif len(overlaps) == 0:
filtered_regions.append((result_start, result_end))
filtered_region_set.add((result_start, result_end))
else:
overlaps.insert(0, (result_start, result_end))
overlap_list_list.append(overlaps)
for r in overlaps:
overlapping_region_map.add(r)
LOGGER.debug("Overlap list lengths: %s", [len(x) for x in overlap_list_list])
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug("Overlap lists:")
for i, tuple_list in enumerate(overlap_list_list):
LOGGER.debug("List %d:", i)
for t in tuple_list:
LOGGER.debug(" (%s -> %s)", t[0].seq_name, t[1].seq_name)
# Now we have a list of overlapping regions and non-overlapping regions.
# We must resolve the overlaps, and chose only one alignment from each of the overlap lists:
for overlap_list in overlap_list_list:
current_best_region = overlap_list[0]
best_read_region_len = current_best_region[1].read_end_pos + current_best_region[0].read_start_pos
best_avg_qual = (current_best_region[0].overall_quality + current_best_region[1].overall_quality) / 2
best_template_len = current_best_region[0].template_length + current_best_region[1].template_length
for overlap_region in overlap_list[1:]:
read_region_len = current_best_region[1].read_end_pos + current_best_region[0].read_start_pos
avg_qual = (current_best_region[0].overall_quality + current_best_region[1].overall_quality) / 2
template_len = current_best_region[0].template_length + current_best_region[1].template_length
new_best = False
if read_region_len > best_read_region_len:
new_best = True
elif read_region_len == best_read_region_len:
if avg_qual > best_avg_qual:
new_best = True
elif avg_qual == best_avg_qual:
if template_len > best_template_len:
new_best = True
if new_best:
best_read_region_len = read_region_len
best_avg_qual = avg_qual
best_template_len = template_len
current_best_region = overlap_region
# We have our best. Now we add it:
filtered_regions.append(current_best_region)
LOGGER.debug(" Best region: %s", current_best_region)
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug("Best region list:")
for r in filtered_regions:
LOGGER.debug(" %s", r)
return filtered_regions
def find_all_bounded_seqs_in_alignment_results(processed_results, seq_boundaries):
"""
Search the given processed results for sequences with the given seq_boundaries.
Accounts for reverse-complemented read sections as well as the given (assumed forward) direction
(as specified by the RC_READ_NAME_IDENTIFIER suffix).
:param processed_results: A list of ProcessedAlignmentResult containing alignments to search.
:param seq_boundaries: List of tuples, with each tuple containing exactly two sequence names.
:return: A list of tuples, with each tuple containing exactly two ProcessedAlignmentResult and representing
an alignment to be excised from the parent read.
"""
hits = []
start_seqs = [b[0] for b in seq_boundaries]
end_seqs = [b[1] for b in seq_boundaries]
# Go through the results and get the position of the first start sequence.
# Then get the first position of corresponding end sequence.
# Continue until you get to the end of the list.
is_open_segment = False
cur_start_result = None
cur_indx = 0
for i, result in enumerate(processed_results):
if is_open_segment:
if (result.seq_name in end_seqs) and ((cur_start_result.seq_name, result.seq_name) in seq_boundaries):
hits.append((cur_start_result, result))
is_open_segment = False
LOGGER.debug("Found bounded subread: [%d - %d]", cur_indx, i)
LOGGER.debug(" %s", cur_start_result)
LOGGER.debug(" %s", result)
elif result.seq_name in start_seqs:
cur_start_result = result
cur_indx = i
is_open_segment = True
num_forward_hits = len(hits)
LOGGER.debug("Found %d forward direction segments for extraction.", num_forward_hits)
# Now we account for the RC versions:
is_open_segment = False
cur_start_result = None
cur_start_name_f = None
cur_indx = 0
for i, result in enumerate(processed_results[::-1]):
# Correct the index for our traversal direction:
i = len(processed_results) - i - 1
# Get equivalent Forward sequence name:
seg_name_f = result.seq_name[:-len(RC_READ_NAME_IDENTIFIER)]
if is_open_segment:
if (seg_name_f in end_seqs) and \
((cur_start_name_f, seg_name_f) in seq_boundaries):
hits.append((cur_start_result, result))
is_open_segment = False
LOGGER.debug("Found bounded subread: [%d - %d]", cur_indx, i)
LOGGER.debug(" %s", cur_start_result)
LOGGER.debug(" %s", result)
elif seg_name_f in start_seqs:
cur_start_result = result
cur_start_name_f = seg_name_f
cur_indx = i
is_open_segment = True
LOGGER.debug("Found %d reverse complement direction segments for extraction.", len(hits) - num_forward_hits)
return hits
def dump_seq_map(seq_map, name='Reads'):
"""Dumps the given sequence map to the log as a DEBUG message."""
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('%s:', name)
for e in seq_map.items():
LOGGER.debug(' %s -> %s', e[0], e[1])
def dump_sequence_boundaries(seq_boundaries):
"""Dumps the given sequence list to the log as a DEBUG message."""
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('Sequence Boundaries:')
for i, (s, e) in enumerate(seq_boundaries):
LOGGER.debug(' % 2d: %s : %s', i+1, s, e)
def dump_results(results_tuple_list):
"""Dumps the results tuple to the log as a DEBUG message."""
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('Results:')
for r in results_tuple_list:
LOGGER.debug(' %s', str(r))
def assert_valid_sequence_boundaries(seq_boundaries, known_segment_names_to_seq_dict):
"""
Validate that each boundary list in the given seq_boundaries contains only known sequences
in the given known_segment_names_to_seq_dict.
:param seq_boundaries: List of tuples, with each tuple containing exactly two sequence names.
:param known_segment_names_to_seq_dict: dictionary of sequence name -> sequence upon which to validate seq_list.
:return: None. Raises a ValueError if the given seq_list is not valid.
"""
for i, (s, e) in enumerate(seq_boundaries):
if s not in known_segment_names_to_seq_dict:
raise ValueError(
f"Boundary {i+1} start is not in the known sequence list: {s}. "
"Sequence names must correspond to a sequence in the given Segments file."
)
elif e not in known_segment_names_to_seq_dict:
raise ValueError(
f"Boundary {i+1} end is not in the known sequence list: {e}. "
"Sequence names must correspond to a sequence in the given Segments file."
)
def create_alignment_targets(known_segments_to_seq_dict, sequence_boundaries):
"""
Create Tesserae Sequence objects to align known segments with Tesserae.
Adds both the given direction and the reverse-complemented direction so that the sequences can be aligned in either
direction. This is significantly faster than running the whole alignment twice.
:param known_segments_to_seq_dict: dict of sequence_name -> sequence_bases
:param sequence_boundaries: List of tuples, with each tuple containing exactly two sequence names.
:return: list(tesserae.Sequence) of all known sequences that are not UNKNOWN / BARCODE.
"""
alignment_target_seqs = []
for bound_seqs in sequence_boundaries:
for s in bound_seqs:
alignment_target_seqs.append(Sequence(s, known_segments_to_seq_dict[s]))
alignment_target_seqs.append(
Sequence(s + RC_READ_NAME_IDENTIFIER, reverse_complement(known_segments_to_seq_dict[s]))
)
return alignment_target_seqs
def ingest_fastx_file(file_path):
"""Ingest the contents of a FASTA/FASTQ file and return two dictionaries
onc
1: Mapping from read name to read sequence
2: Mapping from template name to read name
The `template name` is simply the word 'template' with the
order in which a given read occurs in the file
(e.g. 'template0' or 'template10').
"""
t_num = 0
_read_to_sequence_dict = OrderedDict()
_template_to_read_name_dict = dict()
with pysam.FastxFile(file_path) as file_handle:
for entry in file_handle:
_read_to_sequence_dict[entry.name] = entry.sequence
_template_to_read_name_dict[f"template{t_num}"] = entry.name
t_num += 1
return _read_to_sequence_dict, _template_to_read_name_dict
def ingest_sequence_boundaries(file_path):
"""Ingest the contents of a boundaries file.
A boundaries file is a plain text file with two comma separated sequence names per line.
These sequence names should correspond to the names of sequences in a FASTX
file that is given as an argument to Cartographer.
Sequence names will have all whitespace at the start and end stripped off (after splitting by ",")
The validity of sequence names is not checked here."""
seq_boundaries = []
with open(file_path, 'r') as f:
for i, line in enumerate(f.readlines()):
bounds = line.strip().split(",")
if len(bounds) != 2:
raise ValueError(
"Each line in the boundaries file must contain exactly two sequences. "
f"On line {i} found {len(bounds)}",
)
seq_boundaries.append((bounds[0].strip(), bounds[1].strip()))
return seq_boundaries
################################################################################
def extract_read_sections(args):
"""Main CLI call for the Extract Bounded Read Sections tool."""
# Set up multi-threadding if the system will support it:
LOGGER.info("Python version: %s", sys.version.replace('\n', ''))
min_multithread_version = (3, 8)
if sys.version_info >= (3, 8):
num_threads = os.cpu_count() - 1
num_threads = 1 if num_threads < 1 else num_threads
else:
LOGGER.warning(
"Python version to early for multithreading (%d.%d.%d<%d.%d).",
sys.version_info[0],
sys.version_info[1],
sys.version_info[2],
min_multithread_version[0],
min_multithread_version[1],
)
num_threads = 1
LOGGER.info("Setting thread count to: %d", num_threads)
if args.max_read_length:
LOGGER.info("Filtering out reads of length > %d to file: %s", args.max_read_length, args.rejected_outfile)
LOGGER.info("Writing output to %s", args.outfile)
if os.path.exists(args.outfile):
LOGGER.warning("Outfile already exists. Will overwrite: %s", args.outfile)
alignment_algorithm = AlignmentAlgorithm.TESSERAE
if args.aligner:
try:
alignment_algorithm = AlignmentAlgorithm[args.aligner]
except KeyError:
LOGGER.error("Error: You must provide a valid alignment algorithm. Options are: %s",
", ".join([e.name for e in AlignmentAlgorithm]))
sys.exit(1)
LOGGER.info("Alignment Algorithm: %s", alignment_algorithm.name)
LOGGER.info("Ingesting boundaries from %s ...", args.boundaries)
sequence_boundaries = ingest_sequence_boundaries(args.boundaries)
LOGGER.info("Ingested %d sequence boundaries.", len(sequence_boundaries))
dump_sequence_boundaries(sequence_boundaries)
LOGGER.info("Ingesting known segments from %s ...", args.segments)
known_segment_names_to_seq_dict, _ = ingest_fastx_file(args.segments)
LOGGER.info("Ingested %d known segments.", len(known_segment_names_to_seq_dict))
dump_seq_map(known_segment_names_to_seq_dict, "Known Segments")
LOGGER.info("Validating given sequence boundaries...")
assert_valid_sequence_boundaries(sequence_boundaries, known_segment_names_to_seq_dict)
LOGGER.debug("Boundary list is valid.")
# Create an ordered dict of our ordered sequences:
alignment_target_seqs = create_alignment_targets(known_segment_names_to_seq_dict, sequence_boundaries)
# A couple of spacing variables for nice looking logs:
spacing_one = " " * 4
# Open all our files here so they'll be automatically closed:
with open(args.outfile, 'w') as out_file, \
open(args.rejected_outfile, 'w') as rejected_out_file, \
open(args.raw_marker_alignments, 'w') as raw_marker_alignments_file, \
open(args.initial_section_alignments, 'w') as initial_section_alignments, \
open(args.final_section_alignments, 'w') as final_section_alignments_file:
num_sequences_extracted = 0
num_reads_with_sub_sequences = 0
num_forward_subsequences_extracted = 0
num_rc_subsequences_extracted = 0
num_rejected = 0
LOGGER.info("Processing reads...")
with ReadFile(args.reads) as reads_file:
num_reads = 0
for read_num, read_sequence in enumerate(reads_file.get_reads()):
num_reads += 1
LOGGER.debug(
"%sProcessing read %d: %s (len=%d)",
spacing_one,
read_num,
read_sequence.name,
len(read_sequence.sequence)
)
if args.max_read_length and len(read_sequence.sequence) > args.max_read_length:
LOGGER.warning("Ignoring read %d - %s: Length too long: %d > %d", read_num, read_sequence.name,
len(read_sequence.sequence), args.max_read_length)
rejected_out_file.write(f">{read_sequence.name}\n")
rejected_out_file.write(f">{read_sequence.sequence}\n")
num_rejected += 1
continue
bounded_seq_tuple_list = []
LOGGER.info("%sInitial alignment of known segments ...", spacing_one)
segment_alignment_results, query_result = align_sequences(
read_sequence, alignment_target_seqs, args.minqual, args.minbases, p_rec=P_REC_KNOWN,
alignment_type=alignment_algorithm, threads=num_threads
)
if len(segment_alignment_results) != 0:
LOGGER.debug("%sDetermining if expected sequence appears in processed alignment results...",
spacing_one)
# Create a position map so we can get real read positions from teh alignment string positions:
alignment_read_pos_map = IdentityMap()
if alignment_algorithm != AlignmentAlgorithm.BWA_MEM and \
alignment_algorithm != AlignmentAlgorithm.BWA_ALN:
# We only need to adjust the positions we're not using the BWA aligners:
alignment_read_pos_map = create_alignment_to_base_map(query_result.alignment_string)
# Write our raw marker alignments:
write_marker_alignments_to_output_file(
raw_marker_alignments_file,
read_sequence.name,
segment_alignment_results,
alignment_read_pos_map
)
bounded_seq_tuple_list = find_all_bounded_seqs_in_alignment_results(
segment_alignment_results, sequence_boundaries
)
# Write our initial section alignments:
write_section_alignments_to_output_file(
initial_section_alignments,
read_sequence.name,
bounded_seq_tuple_list,
alignment_read_pos_map
)
LOGGER.info("%sExpected ordered sequences occur %d time(s).", spacing_one, len(bounded_seq_tuple_list))
if len(bounded_seq_tuple_list) == 0:
LOGGER.info("%sThis read has no complete matches for the given known ordered sequences.",
spacing_one)
else:
# We now have a list of ALL matching sequences.
# We need to filter it so that none of the matches overlap
# (that would double-count and would also be silly).
filtered_bounded_seq_tuple_list = remove_overlapping_bounded_seqs(bounded_seq_tuple_list)
# Write our initial section alignments:
write_section_alignments_to_output_file(
final_section_alignments_file,
read_sequence.name,
bounded_seq_tuple_list,
alignment_read_pos_map
)
# Write out our new subsequences to the output fasta file:
write_sub_sequences(read_sequence, filtered_bounded_seq_tuple_list, alignment_read_pos_map, out_file)
# Track subsequence statistics:
num_forward_subsequences_extracted += sum(
not b[0].seq_name.endswith(RC_READ_NAME_IDENTIFIER) for b in filtered_bounded_seq_tuple_list
)
num_rc_subsequences_extracted += sum(
b[0].seq_name.endswith(RC_READ_NAME_IDENTIFIER) for b in filtered_bounded_seq_tuple_list
)
num_sequences_extracted_this_read = len(filtered_bounded_seq_tuple_list)
num_sequences_extracted += num_sequences_extracted_this_read
num_reads_with_sub_sequences += 1
LOGGER.info("Processed %d reads.", num_reads)
if args.max_read_length:
LOGGER.info("Rejected %d reads.", num_rejected)
LOGGER.info("# Reads containing sub-sequences: %d", num_reads_with_sub_sequences)
LOGGER.info("# forward direction sub-sequences extracted: %d", num_forward_subsequences_extracted)
LOGGER.info("# reverse-complemented direction sub-sequences extracted: %d", num_rc_subsequences_extracted)
LOGGER.info("Total # sub-sequences extracted: %d", num_sequences_extracted)
def write_marker_alignments_to_output_file(out_file, read_name, alignments, align_pos_read_pos_map):
"""
Writes the given marker alignments to the given out_file.
Writes one line: the read_name, followed by a basic string representation of each alignment separated by tabs.
If len(alignments) == 0, then does not write anything.
:param out_file: An open File object to which to write the data.
:param read_name: The name of the read to which the given alignments belong.
:param alignments: list(ProcessedAlignmentResult) representing alignments to write to the file.
:param align_pos_read_pos_map: Map from alignment string position to read position.
"""
if len(alignments) > 0:
out_file.write(read_name)
for a in alignments:
LOGGER.debug(f"a.seq_name = {a.seq_name}")
LOGGER.debug(f"a.read_start_pos = {a.read_start_pos}")
LOGGER.debug(f"a.read_end_pos = {a.read_end_pos}")
LOGGER.debug(f"a.overall_quality = {a.overall_quality}")
out_file.write(f"\t{a.seq_name}:"
f"{align_pos_read_pos_map[a.read_start_pos]}"
f"-{align_pos_read_pos_map[a.read_end_pos]}"
f"@{a.overall_quality}")
out_file.write("\n")
def write_section_alignments_to_output_file(out_file, read_name, section_tuples, align_pos_read_pos_map):
"""
Writes the given marker alignments to the given out_file.
Writes one line: the read_name, followed by a basic string representation of each alignment separated by tabs.
If len(section_tuples) == 0, then does not write anything.
:param out_file: An open File object to which to write the data.
:param read_name: The name of the read to which the given alignments belong.
:param section_tuples: A list of tuples, with each tuple containing exactly two ProcessedAlignmentResult and
representing an alignment to be excised from the parent read.
:param align_pos_read_pos_map: Map from alignment string position to read position.
"""
if len(section_tuples) != 0:
out_file.write(read_name)
for s1, s2 in section_tuples:
out_file.write("\t")
out_file.write(f"[{s1.seq_name}:"
f"{align_pos_read_pos_map[s1.read_start_pos]}"
f"-{align_pos_read_pos_map[s1.read_end_pos]}"
f"@{s1.overall_quality}")
out_file.write("<>")
out_file.write(f"{s2.seq_name}:"
f"{align_pos_read_pos_map[s2.read_start_pos]}"
f"-{align_pos_read_pos_map[s2.read_end_pos]}"
f"@{s2.overall_quality}]")
out_file.write("\n")
def create_alignment_to_base_map(alignment_string):
"""
Create a dictionary mapping positions in the given alignment string back to the original string it came from.
Assumes that every base in the original string appears in the alignment string and the only additional characters
are '-' to represent insertions.
:param alignment_string: Alignment string containing original bases or '-' (to indicate insertions).
:return: A dict() mapping positions in the given alignment string back to the original string.
"""
position_map = {i: i for i in range(len(alignment_string))}
o_pos = 0
for i, c in enumerate(alignment_string):
position_map[i] = o_pos
if not c == "-":
o_pos += 1
return position_map
def write_sub_sequences(read_sequence, bounded_seq_tuple_list, alignment_read_pos_map, out_fasta_file):
"""
Write out the sections of the given read_sequence as bounded by bounded_seq_list to the given out_file in
FASTA format.
:param read_sequence: A tesserae.Sequence object representing the parent read to the given bounded_seq_list.
:param bounded_seq_tuple_list: A list of tuples, with each tuple containing exactly two ProcessedAlignmentResult
and representing an alignment to be excised from the parent read.
:param alignment_read_pos_map: Dictionary mapping positions in the given alignment string back to the original
string. This is required to decode all positions from alignment string positions to original read positions.
:param out_fasta_file: An open file object to which to write results.
:return: None
"""
for start_alignment, end_alignment in bounded_seq_tuple_list:
# Do some math here to account for how we create the tuple list:
# And adjust for reverse complements:
start_coord = alignment_read_pos_map[start_alignment.read_start_pos]
end_coord = alignment_read_pos_map[end_alignment.read_end_pos]
start_name = start_alignment.seq_name
end_name = end_alignment.seq_name
if start_alignment.seq_name.endswith(RC_READ_NAME_IDENTIFIER):
start_coord = end_alignment.read_end_pos
end_coord = start_alignment.read_start_pos
# Clean up the disambiguators (md5sums) from the RC names:
decorator_length = len(RC_READ_NAME_IDENTIFIER) - len(RC_READ_NAME_IDENTIFIER_BASE)
start_name = start_alignment.seq_name[:-decorator_length]
end_name = end_alignment.seq_name[:-decorator_length]
out_fasta_file.write(f">{read_sequence.name}_{start_coord}-{end_coord}_"
f"{start_name}-{end_name}\n")
# Quick bounds check on the coords for the read sequence:
if start_coord < end_coord:
out_fasta_file.write(f"{read_sequence.sequence[start_coord:end_coord+1]}\n")
else:
out_fasta_file.write(f"{read_sequence.sequence[end_coord:start_coord+1]}\n")
def get_qual_pl(num_errors, seq_length):
"""
Computes the Phred-Scaled quality score of an alignment.
:param num_errors: Number of errors in the alignment.
:param seq_length: Length of the alignment.
:return: The PL quality.
"""
if num_errors == 0:
return MAX_ALIGNMENT_PL
else:
q = -10 * math.log10(num_errors / seq_length)
if q < MIN_ALIGNMENT_PL:
return MIN_ALIGNMENT_PL
else:
return q
def create_alignment_with_bwa_mem(read_sequence, target_sequences, minqual, minbases, threads=1, log_spacing=" "):
"""
Perform a BWA MEM 2 alignment on
:param read_sequence: tesserae Sequence object against which to align all sequences in target_sequences
:param target_sequences: ordered list of tessereae Sequence objects to align.
:param minqual: Minimum quality for an alignment to be retained.
:param minbases: Minimum number of bases for an alignment to be retained.
:param threads: number of threads to use when aligning.
:param log_spacing: Spacing to precede any log statements.
:return: A list of ProcessedAlignmentResult objects.
"""
out_file_name = "tmp.sam"
# Write sequences to tmp fasta file:
_, ref_file = tempfile.mkstemp()
_, seq_file = tempfile.mkstemp()
try:
LOGGER.debug("%sCreating tmp \"reference\" fasta file: %s", log_spacing, ref_file)
with open(ref_file, "w", encoding="ascii") as tmp:
tmp.write(f">{read_sequence.name}\n")
tmp.write(f"{read_sequence.sequence}\n")
bwa_index_args = ["/bwa-mem2-2.0pre2_x64-linux/bwa-mem2", "index", ref_file]
LOGGER.debug(
"%sRunning BWA Index on tmp file (%s): %s", log_spacing, ref_file, " ".join(bwa_index_args)
)
_ = subprocess.run(bwa_index_args, capture_output=True, check=True)
LOGGER.debug("%sCreating tmp known segment \"read\" fasta file: %s", log_spacing, seq_file)
seen_targets = set()
with open(seq_file, "w", encoding="ascii") as tmp:
for t in target_sequences:
# We don't need to explicitly align reverse-complemented sequences here:
if t.name.endswith(RC_READ_NAME_IDENTIFIER):
continue
elif t.name not in seen_targets:
tmp.write(f">{t.name}\n")
tmp.write(f"{t.sequence}\n")
seen_targets.add(t.name)
LOGGER.debug("Contents of tmp \"reference\" fasta file:")
with open(ref_file, "r") as f:
for l in f.readlines():
LOGGER.debug(l.rstrip())
LOGGER.debug("Contents of known segment \"read\" fasta file:")
with open(seq_file, "r") as f:
for l in f.readlines():
LOGGER.debug(l.rstrip())
bwa_mem_args = ["/bwa-mem2-2.0pre2_x64-linux/bwa-mem2", "mem",
"-a", # Output all found alignments for single-end or unpaired paired-end reads.
# These alignments will be flagged as secondary alignments.
"-S", # skip mate rescue
"-P", # skip pairing; mate rescue performed unless -S also in use
"-k8", # minimum seed length
"-A", "1", # Matching score.
"-B", "4", # Mismatch penalty.
# The sequence error rate is approximately: {.75 * exp[-log(4) * B/A]}.
"-O", "6,6", # Gap open penalty.
"-E", "1,1", # gap extension penalty; a gap of size k cost '{-O} + {-E}*k'
"-L", "5,5", # penalty for 5'- and 3'-end clipping
"-U", "17", # penalty for an unpaired read pair
"-T", "30", # minimum score to output
"-c", "1000", # skip seeds with more than INT occurrences
"-t", str(threads),
"-o", out_file_name,
ref_file, seq_file]
LOGGER.debug(
"%sRunning BWA Mem on tmp file (%s): %s", log_spacing, ref_file, " ".join(bwa_mem_args)
)
completed_process = subprocess.run(bwa_mem_args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True, text=True)
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug("BWA Mem output:")
for l in completed_process.stdout.split("\n"):
LOGGER.debug(l)
with open(out_file_name, 'rb') as f:
LOGGER.debug("=" * 80)
LOGGER.debug("Raw BWA MEM Alignment:")
for line in f.readlines():
LOGGER.debug("%s", line.decode("ascii").rstrip())
LOGGER.debug("=" * 80)
return get_processed_results_from_bwa_mem_file(out_file_name, minqual, minbases)
except subprocess.CalledProcessError as e:
LOGGER.error("Could not align with BWA Mem!")
LOGGER.error("Stdout: %s", e.stdout.decode("utf-8"))
LOGGER.error("Stderr: %s", e.stderr.decode("utf-8"))
raise e
finally:
os.remove(ref_file)
os.remove(seq_file)
try:
os.remove(out_file_name)
pass
except FileNotFoundError:
# If the alignment failed, we won't necessarily have an output file:
pass
def create_alignment_with_bwa_aln(read_sequence, target_sequences, minqual, minbases, threads=1, log_spacing=" "):
"""
Perform a BWA ALN alignment on
:param read_sequence: tesserae Sequence object against which to align all sequences in target_sequences
:param target_sequences: ordered list of tessereae Sequence objects to align.
:param minqual: Minimum quality for an alignment to be retained.
:param minbases: Minimum number of bases for an alignment to be retained.
:param threads: number of threads to use when aligning.
:param log_spacing: Spacing to precede any log statements.
:return: A list of TesseraeAlignmentResult objects.
"""
out_sai_file = "tmp.sai"
out_file_name = "tmp.sam"
# Write sequences to tmp fasta file:
_, ref_file = tempfile.mkstemp()
_, seq_file = tempfile.mkstemp()
try:
LOGGER.debug("%sCreating tmp \"reference\" fasta file: %s", log_spacing, ref_file)
with open(ref_file, "w", encoding="ascii") as tmp:
tmp.write(f">{read_sequence.name}\n")
tmp.write(f"{read_sequence.sequence}\n")
bwa_index_args = ["/bwa/bwa", "index", ref_file]
LOGGER.debug(
"%sRunning BWA Index on tmp file (%s): %s", log_spacing, ref_file, " ".join(bwa_index_args)
)
_ = subprocess.run(bwa_index_args, capture_output=True, check=True)
LOGGER.debug("%sCreating tmp known segment \"read\" fasta file: %s", log_spacing, seq_file)
seen_targets = set()
with open(seq_file, "w", encoding="ascii") as tmp:
for t in target_sequences:
# We don't need to explicitly align reverse-complemented sequences here:
if t.name.endswith(RC_READ_NAME_IDENTIFIER):
continue
elif t.name not in seen_targets:
tmp.write(f">{t.name}\n")
tmp.write(f"{t.sequence}\n")
seen_targets.add(t.name)
LOGGER.debug("Contents of tmp \"reference\" fasta file:")
with open(ref_file, "r") as f:
for l in f.readlines():
LOGGER.debug(l.rstrip())
LOGGER.debug("Contents of known segment \"read\" fasta file:")
with open(seq_file, "r") as f:
for l in f.readlines():
LOGGER.debug(l.rstrip())
# Run the alignment:
bwa_aln_args = [
"/bwa/bwa", "aln",
"-n 0.04", # max #diff (int) or missing prob under 0.02 err rate (float) [0.04]
"-o 1", # maximum number or fraction of gap opens [1]
"-e -1", # maximum number of gap extensions, -1 for disabling long gaps [-1]
"-d 10", # maximum occurrences for extending a long deletion [10]
"-k 2", # maximum differences in the seed [2]
"-M 3", # mismatch penalty [3]
"-O 11", # gap open penalty [11]
"-E 4", # gap extension penalty [4]
"-l 8", # seed length [32]
"-i 1", # do not put an indel within INT bp towards the ends [5]
"-R 30", # stop searching when there are >INT equally best hits
"-q 0", # quality threshold for read trimming down to 35bp [0]
"-t", str(threads),
"-f", out_sai_file,
ref_file,
seq_file
]
LOGGER.debug(
"%sRunning BWA ALN on tmp file (%s): %s", log_spacing, ref_file, " ".join(bwa_aln_args)
)
subprocess.run(bwa_aln_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True, text=True)
# Convert the alignment to sam file:
bwa_samse_args = [
"/bwa/bwa", "samse",
f"-n {len(target_sequences)}", # Maximum number of alignments to output in the XA tag for
# reads paired properly. If a read has more than INT hits, the
# XA tag will not be written. [3]
f"-f{out_file_name}",
ref_file,
out_sai_file,
seq_file
]
LOGGER.debug(
f"{log_spacing}Running BWA SAMSE on tmp SAI file ({out_sai_file}): {' '.join(bwa_samse_args)}"
)
completed_process = subprocess.run(bwa_samse_args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True, text=True)
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug("BWA ALN output:")
for l in completed_process.stdout.split("\n"):
LOGGER.debug(l)
with open(out_file_name, 'rb') as f:
LOGGER.debug("=" * 80)
LOGGER.debug("Raw BWA ALN Alignment:")
for line in f.readlines():
LOGGER.debug("%s", line.decode("ascii").rstrip())
LOGGER.debug("=" * 80)
return get_processed_results_from_bwa_aln_file(out_file_name, minqual, minbases)
except subprocess.CalledProcessError as e:
LOGGER.error("Could not align with BWA ALN!")
LOGGER.error("Stdout: %s", e.stdout.decode("utf-8"))
LOGGER.error("Stderr: %s", e.stderr.decode("utf-8"))
raise e
finally:
os.remove(ref_file)
os.remove(seq_file)
try:
os.remove(out_sai_file)
pass
except FileNotFoundError:
# If the alignment failed, we won't necessarily have an output file:
pass
try:
os.remove(out_file_name)
pass
except FileNotFoundError:
# If the alignment failed, we won't necessarily have an output file:
pass
def create_raw_alignment_with_mosaic_aligner(read_sequence, target_sequences, log_spacing=" "):
"""
Perform a MosaicAligner alignment on
:param read_sequence: tesserae Sequence object against which to align all sequences in target_sequences
:param target_sequences: ordered list of tessereae Sequence objects to align.
:param log_spacing: Spacing to precede any log statements.
:return: A list of TesseraeAlignmentResult objects.
"""
out_file_name = "align.txt"
# Write sequences to tmp fasta file:
fd, seq_file = tempfile.mkstemp()
LOGGER.debug("%sCreating tmp fasta file: %s", log_spacing, seq_file)
seen_targets = set()
try:
with open(seq_file, "w", encoding="ascii") as tmp:
tmp.write(f">{read_sequence.name}\n")
tmp.write(f"{read_sequence.sequence}\n")
for t in target_sequences:
if t.name not in seen_targets:
tmp.write(f">{t.name}\n")
tmp.write(f"{t.sequence}\n")
seen_targets.add(t.name)
LOGGER.debug("Contents of tmp fasta file:")
with open(seq_file, "r") as f:
for l in f.readlines():
LOGGER.debug(l.rstrip())
mosaic_aligner_args = ["/MosaicAligner/mosaic", "-breakOnLowerSeqPos", "-nt", "-seq", seq_file]
LOGGER.debug(
"%sRunning MosaicAligner on tmp file (%s): %s", log_spacing, seq_file, " ".join(mosaic_aligner_args)
)
_ = subprocess.run(mosaic_aligner_args, capture_output=False, check=True)
if LOGGER.isEnabledFor(logging.DEBUG):
with open(out_file_name, 'rb') as f:
LOGGER.debug("=" * 80)
LOGGER.debug("Raw MosaicAligner Alignment:")
for line in f.readlines():
LOGGER.debug("%s", line.decode("ascii").rstrip())
LOGGER.debug("=" * 80)
return get_raw_results_from_mosaic_aligner_file(out_file_name)
except subprocess.CalledProcessError as e:
LOGGER.error("Could not align with MosaicAligner!")
LOGGER.error("Stdout: %s", e.stdout.decode("utf-8"))
LOGGER.error("Stderr: %s", e.stderr.decode("utf-8"))
raise e
finally:
os.remove(seq_file)
try:
os.remove(out_file_name)
pass
except FileNotFoundError:
# If the alignment failed, we won't necessarily have an output file:
pass
def get_processed_results_from_bwa_mem_file(file_path, minqual, minbases):
"""
Ingests the given file and creates raw alignments from it.
:param file_path: Path to the sam file of a BWA MEM run.
:param minqual: Minimum quality for an alignment to be retained.
:param minbases: Minimum number of bases for an alignment to be retained.
:return: A list of ProcessedAlignmentResult objects.
"""
processed_results = []
# Only primary hits will have sequence information with them so we have to
# Read it off first. There shouldn't be very many reads, so this is a little slow, but should be OK.
read_seqs = dict()
with pysam.AlignmentFile(file_path, 'r', check_sq=False) as f:
for read in f.fetch(until_eof=True):
if read.query_sequence:
read_seqs[read.query_name] = read.query_sequence
with pysam.AlignmentFile(file_path, 'r', check_sq=False) as f:
for read in f.fetch(until_eof=True):
if read.is_unmapped:
continue
seq_name = read.query_name
if read.is_reverse:
seq_name = seq_name + RC_READ_NAME_IDENTIFIER
bases = read_seqs[read.query_name]
template_length = read.infer_read_length()
qual_pl = get_qual_pl(read.get_tag("NM"), template_length)
# Get the leading and trailing clips so we can remove them from the aligned string:
leading_clips = 0
for e in read.cigartuples:
if e[0] == pysam.CSOFT_CLIP or e[0] == pysam.CHARD_CLIP:
leading_clips += e[1]
else:
break
trailing_clips = 0
for e in read.cigartuples[::-1]:
if e[0] == pysam.CSOFT_CLIP or e[0] == pysam.CHARD_CLIP:
trailing_clips += e[1]
else:
break
# Note - must adjust ref start/end pos to align properly with conventions from other aligners
# (other aligner conventions: 1-based coordinates, inclusive end positions)
p = ProcessedAlignmentResult(
seq_name, bases, leading_clips, len(bases)-trailing_clips-1,
int(read.reference_start), int(read.reference_end-1),
template_length, tuple(read.cigartuples), qual_pl
)
# Check against thresholds to make sure we should report the alignment:
if qual_pl < minqual or template_length < minbases:
if qual_pl < minqual and template_length < minbases:
reason_string = f"qual too low ({qual_pl} < {minqual}) " \
f"AND aligment too short ({template_length} < {minbases})"
elif template_length < minbases:
reason_string = f"aligment too short ({template_length} < {minbases})"
else:
reason_string = f"qual too low ({qual_pl} < {minqual})"
LOGGER.debug("Target does not pass threshold: %s: %s (%s)", seq_name, reason_string, p)
else:
processed_results.append(p)
# Sort by the order in which they appear in the read.
# This is _VERY_IMPORTANT_ for finding the ordered regions in a following step.
processed_results.sort(key=lambda x: x.read_start_pos)
for r in processed_results:
LOGGER.debug(" %s", r)
return processed_results
def parse_cigar_string_to_tuples(cigar_string):
"""
Parse the given cigar string into a list of cigar tuples.
:param cigar_string: String containing CIGAR information.
:return: Tuple of cigar tuples corresponding to the given CIGAR string.
"""
# This is not very pythonic and I don't know how to fix it offhand...
cigar_tuple_list = []
buf = []
for c in cigar_string:
# Still in the number portion of the cigar element:
v = ord(c)
if 47 < v < 58:
buf.append(c)
else:
# Now we're at the "string" portion:
try:
v = STRING_CIGAR_ELEMENT_MAP[c]
except KeyError:
raise KeyError(f"Error: No such cigar element: {c}")
cigar_tuple_list.append((v, int("".join(buf))))
buf = []
return tuple(cigar_tuple_list)
def get_processed_results_from_bwa_aln_file(file_path, minqual, minbases):
"""
Ingests the given file and creates raw alignments from it.
This method is VERY similar to the BWA MEM equivalent, but with an epilog to handle XA tags for each read.
:param file_path: Path to the sam file of a BWA ALN run.
:param minqual: Minimum quality for an alignment to be retained.
:param minbases: Minimum number of bases for an alignment to be retained.
:return: A list of ProcessedAlignmentResult objects.
"""
processed_results = []
# Only primary hits will have sequence information with them so we have to
# Read it off first. There shouldn't be very many reads, so this is a little slow, but should be OK.
read_seqs = dict()
with pysam.AlignmentFile(file_path, 'r', check_sq=False) as f:
for read in f.fetch(until_eof=True):
if read.query_sequence:
read_seqs[read.query_name] = read.query_sequence
with pysam.AlignmentFile(file_path, 'r', check_sq=False) as f:
for read in f.fetch(until_eof=True):
if read.is_unmapped:
continue
seq_name = read.query_name
if read.is_reverse:
seq_name = seq_name + RC_READ_NAME_IDENTIFIER
bases = read_seqs[read.query_name]
template_length = read.infer_read_length()
qual_pl = get_qual_pl(read.get_tag("NM"), template_length)
# Get the leading and trailing clips so we can remove them from the aligned string:
leading_clips = 0
for e in read.cigartuples:
if e[0] == pysam.CSOFT_CLIP or e[0] == pysam.CHARD_CLIP:
leading_clips += e[1]
else:
break
trailing_clips = 0
for e in read.cigartuples[::-1]:
if e[0] == pysam.CSOFT_CLIP or e[0] == pysam.CHARD_CLIP:
trailing_clips += e[1]
else:
break
# Make a list of candidate read alignments so we can filter them all later:
candidate_alignments = []
# Note - must adjust ref start/end pos to align properly with conventions from other aligners
# (other aligner conventions: 1-based coordinates, inclusive end positions)
p = ProcessedAlignmentResult(
seq_name, bases, leading_clips, len(bases)-trailing_clips-1,
int(read.reference_start), int(read.reference_end-1),
template_length, tuple(read.cigartuples), qual_pl
)
candidate_alignments.append(p)
# Get all XA alignments as well:
candidate_alignments.extend(process_xa_tags(read, read_seqs))
# Process all alignments with the same rules:
for p in candidate_alignments:
# Check against thresholds to make sure we should report the alignment:
if p.overall_quality < minqual or p.template_length < minbases:
if p.overall_quality < minqual and p.template_length < minbases:
reason_string = f"qual too low ({p.overall_quality} < {minqual}) " \
f"AND aligment too short ({p.template_length} < {minbases})"
elif p.template_length < minbases:
reason_string = f"aligment too short ({p.template_length} < {minbases})"
else:
reason_string = f"qual too low ({p.overall_quality} < {minqual})"
LOGGER.debug("Target does not pass threshold: %s: %s (%s)", p.seq_name, reason_string, p)
else:
processed_results.append(p)
# Sort by the order in which they appear in the read.
# This is _VERY_IMPORTANT_ for finding the ordered regions in a following step.
processed_results.sort(key=lambda x: x.read_start_pos)
for r in processed_results:
LOGGER.debug(" %s", r)
return processed_results
def process_xa_tags(read, read_seqs):
xa_processed_reads = []
try:
# split the XA tags by delimiters:
alternate_alignments = read.get_tag("XA").split(';')
for xa_alignment_string in alternate_alignments:
if len(xa_alignment_string) == 0:
continue
LOGGER.debug("XA Alignment: %s", xa_alignment_string)
# Parse the data into an aligned segment object:
ref_name, pos, cigar, edit_dist = xa_alignment_string.split(',')
pos = int(pos)
edit_dist = int(edit_dist)
seq_name = read.query_name
xa_read = pysam.AlignedSegment()
xa_read.query_name = seq_name
xa_read.query_sequence = read_seqs[seq_name]
xa_read.is_reverse = (pos < 0)
xa_read.reference_start = pos if pos >= 0 else -pos
xa_read.cigartuples = parse_cigar_string_to_tuples(cigar)
xa_read.set_tag("NM", edit_dist, value_type='i')
if pos < 0:
seq_name = seq_name + RC_READ_NAME_IDENTIFIER
template_length = xa_read.infer_read_length()
qual_pl = get_qual_pl(edit_dist, template_length)
# Get the leading and trailing clips so we can remove them from the aligned string:
leading_clips = 0
for e in xa_read.cigartuples:
if e[0] == pysam.CSOFT_CLIP or e[0] == pysam.CHARD_CLIP:
leading_clips += e[1]
else:
break
trailing_clips = 0
for e in xa_read.cigartuples[::-1]:
if e[0] == pysam.CSOFT_CLIP or e[0] == pysam.CHARD_CLIP:
trailing_clips += e[1]
else:
break
# Note - must adjust ref start/end pos to align properly with conventions from other aligners
# (other aligner conventions: 1-based coordinates, inclusive end positions)
p = ProcessedAlignmentResult(
seq_name, xa_read.query_sequence, leading_clips, len(xa_read.query_sequence) - trailing_clips - 1,
int(xa_read.reference_start), int(xa_read.reference_end-1),
template_length, tuple(xa_read.cigartuples), qual_pl
)
LOGGER.debug(f"Adding: {p}")
xa_processed_reads.append(p)
except KeyError:
return []
return xa_processed_reads
def get_raw_results_from_mosaic_aligner_file(file_path="align.txt"):
"""
Ingests the given file and creates raw alignments from it.
:param file_path: Path to the results of a MosaicAligner run.
:return: A list of TesseraeAlignmentResult objects.
"""
raw_results = []
with open(file_path, "r") as f:
# The only result we need is the first one in the file:
while not f.readline().startswith("Target"):
pass
# Now we're at our first alignment line (corresponding to the query line).
# We can now iterate through the rest of the lines:
line = f.readline().rstrip()
while len(line) > 1:
# Add in all our results:
match = MOSAIC_ALIGNER_LINE_MATCH_PATTERN.match(line)
if match:
t = TesseraeAlignmentResult(
match.group(1),
match.group(4),
int(match.group(2)),
int(match.group(3)),
)
raw_results.append(t)
line = f.readline().rstrip()
return raw_results
def align_sequences(read_sequence, target_sequences, minqual, minbases,
p_rec=DEFAULT_REC, alignment_type=AlignmentAlgorithm.TESSERAE,
threads=1, log_spacing=" "):
"""
Perform a alignment on
:param read_sequence: tesserae Sequence object against which to align all sequences in target_sequences
:param target_sequences: ordered list of tessereae Sequence objects to align.
:param minqual: Minimum quality for an alignment to be retained.
:param minbases: Minimum number of bases for an alignment to be retained.
:param p_rec: Prior probability of recombination event taking place.
:param alignment_type: AlignmentAlgorithm object specifying which alignment algorithm to use.
:param threads: Number of threads with which to run Tesserae alignment.
:param log_spacing: Spacing to precede any log statements.
:return: A tuple of results and the read sequence result (list(ProcessedAlignmentResult), TesseraeAlignmentResult)
"""
# BWA MEM Alignments are sufficiently different that we have to handle them separately:
if alignment_type == AlignmentAlgorithm.BWA_MEM:
start_time = time.time()
processed_results = create_alignment_with_bwa_mem(read_sequence, target_sequences, minqual, minbases,
threads=threads)
end_time = time.time()
LOGGER.info("%sCreated %d Alignments. Alignment took %fs", log_spacing, len(processed_results),
end_time - start_time)
query_result = TesseraeAlignmentResult(read_sequence.name, read_sequence.sequence, 0, len(read_sequence.sequence))
return processed_results, query_result
# BWA ALN Alignments are sufficiently different that we have to handle them separately:
if alignment_type == AlignmentAlgorithm.BWA_ALN:
start_time = time.time()
processed_results = create_alignment_with_bwa_aln(read_sequence, target_sequences, minqual, minbases,
threads=threads)
end_time = time.time()
LOGGER.info("%sCreated %d Alignments. Alignment took %fs", log_spacing, len(processed_results),
end_time - start_time)
query_result = TesseraeAlignmentResult(read_sequence.name,
read_sequence.sequence,
0,
len(read_sequence.sequence))
return processed_results, query_result
# Handle all other alignment types here:
start_time = time.time()
if alignment_type == AlignmentAlgorithm.MOSAIC_ALIGNER:
raw_results = create_raw_alignment_with_mosaic_aligner(read_sequence, target_sequences)
elif alignment_type == AlignmentAlgorithm.TESSERAE:
raw_results = Tesserae(prho=p_rec, threads=threads).align(read_sequence, target_sequences)
elif alignment_type == AlignmentAlgorithm.SMITH_WATERMAN:
LOGGER.error("SMITH WATERMAN not implemented yet for structural alignment.")
sys.exit(1)
else:
LOGGER.error("%s not implemented yet for structural alignment.", alignment_type.name)
sys.exit(1)
end_time = time.time()
LOGGER.info("%sCreated %d Alignments. Alignment took %fs", log_spacing, len(raw_results), end_time - start_time)
dump_results(raw_results)
query_result = raw_results[0]
LOGGER.debug(create_pretty_alignment_string(query_result, raw_results[1:]))
# Clean alignment results and calculate qualities:
LOGGER.debug("%sProcessing raw results...", log_spacing)
start_time = time.time()
alignment_results = process_raw_results(raw_results, minqual, minbases)
end_time = time.time()
LOGGER.debug("%sProcessing results took %fs", log_spacing, end_time - start_time)
LOGGER.debug("%sDumping processed results:", log_spacing)
dump_results(alignment_results)
if len(alignment_results) != 0:
LOGGER.debug(create_pretty_alignment_string(query_result, alignment_results))
return alignment_results, query_result
################################################################################
def main(raw_args):
# Get our start time:
overall_start = time.time()
parser = argparse.ArgumentParser(
description="Ingests three files: "
"one read file(FASTA/FASTQ/SAM/BAM) containing reads to be mapped, "
"one FASTA file containing known possible sequences that can occur in the read,"
"and a file containing read sequence boundaries. This sequence boundaries file is"
"a plain text file with two comma separated sequence names per line. The names should correspond"
"to the sequence names in the given sequence FASTA file.",
usage="extract read sections bounded by given known sequences into a new fasta file",
)
align_required_args = parser.add_argument_group("required arguments")
align_required_args.add_argument(
"-r", "--reads", help="Reads SAM/BAM/FASTA/FASTQ file.", required=True
)
align_required_args.add_argument(
"-s", "--segments", help="Segments FASTA/FASTQ file.", required=True
)
align_required_args.add_argument(
"-b", "--boundaries", help="Sequence boundaries file.", required=True
)
parser.add_argument(
"-o",
"--outfile",
help="Output file in which to store alignment results.",
default="extracted_bounded_sub_reads.fasta",
required=False,
)
parser.add_argument(
"-m",
"--minqual",
help=f"Minimum quality for good alignment (default: {MIN_GOOD_ALIGNMENT_PL})",
default=MIN_GOOD_ALIGNMENT_PL,
type=float
)
parser.add_argument(
"-n",
"--minbases",
help=f"Minimum number of bases for an alignment to be retained (default: {MIN_ALIGNMENT_LENGTH})",
default=MIN_ALIGNMENT_LENGTH,
type=float
)
parser.add_argument(
"--prec_known",
help=f"Probability of recombination for known segment alignment (default: {P_REC_KNOWN})",
default=P_REC_KNOWN,
type=float
)
parser.add_argument(
"--prec_unknown",
help=f"Probability of recombination for UNKNOWN segment alignment (default: {P_REC_UNKNOWN})",
default=P_REC_UNKNOWN,
type=float
)
parser.add_argument(
"--max_read_length",
help=f"Maximum read length to be processed. Reads exceeding this length will be written to a rejects file. "
f"This option is off by default (no filtering will occur).",
type=int,
required=False,
)
parser.add_argument(
"--rejected_outfile",
help="Output file in which to store rejected reads.",
default="extracted_bounded_sub_reads.rejected.fasta",
required=False,
)
parser.add_argument(
"--raw_marker_alignments",
help="Output file in which to store raw alignments of markers for each section for each read.",
default="extracted_bounded_sub_reads.raw_marker_alignments.txt",
required=False,
)
parser.add_argument(
"--initial_section_alignments",
help="Output file in which to store initial section alignments per read.",
default="extracted_bounded_sub_reads.initial_section_alignments.txt",
required=False,
)
parser.add_argument(
"--final_section_alignments",
help="Output file in which to store filtered/final section alignments per read.",
default="extracted_bounded_sub_reads.final_section_alignments.txt",
required=False,
)
parser.add_argument(
"-A",
"--aligner",
help="Use the given aligner. [" + ", ".join([e.name for e in AlignmentAlgorithm]) + "]",
type=str,
required=False
)
verbosity_group = parser.add_mutually_exclusive_group()
verbosity_group.add_argument(
"-q", "--quiet", help="silence logging except errors", action="store_true"
)
verbosity_group.add_argument(
"-v", "--verbose", help="increase output verbosity", action="store_true"
)
verbosity_group.add_argument(
"-vv", "--veryverbose", help="maximal output verbosity", action="store_true"
)
# ---------------------------------------
# Parse args
# args = parser.parse_args(args=raw_args)
args = parser.parse_args()
configure_logging(args)
# Print logo:
print_logo(args.aligner)
# Log our command-line and log level so we can have it in the log file:
LOGGER.info("Invoked by: %s", " ".join(raw_args))
LOGGER.info("Complete runtime configuration settings:")
for name, val in vars(args).items():
LOGGER.info(" %s = %s", name, val)
LOGGER.info("Log level set to: %s", logging.getLevelName(logging.getLogger().level))
# Call our main method:
extract_read_sections(args)
overall_end = time.time()
LOGGER.info("Elapsed time: %f", overall_end - overall_start)
################################################################################
if __name__ == '__main__':
main(sys.argv)
|
18a19f36cd8224d63d51990fa9a77b0b5805a129
|
61b95ee2aefbcfbd6c4abf9511d976d0b9d0e100
|
/faker/providers/automotive/fi_FI/__init__.py
|
aa9420bf49498bfffbcc8dba68c79fe18d6ef484
|
[
"MIT"
] |
permissive
|
joke2k/faker
|
fed7472580ced2bce326fe4ea0c3d1c810853d5e
|
33e36b1b6cc9c6f039fe387988853771bab60624
|
refs/heads/master
| 2023-09-04T00:43:33.599705
| 2023-08-31T16:15:04
| 2023-08-31T16:15:04
| 6,662,075
| 14,544
| 2,215
|
MIT
| 2023-09-11T16:06:14
| 2012-11-12T23:00:09
|
Python
|
UTF-8
|
Python
| false
| false
| 276
|
py
|
__init__.py
|
from .. import Provider as AutomotiveProvider
class Provider(AutomotiveProvider):
"""Implement automotive provider for ``fi_FI`` locale.
Source:
- https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_Finland
"""
license_formats = ("???-###",)
|
882f036711c73391ffeb058f2b390a5c54efa313
|
0c8ac66ae050e1a98dd8afd7525c9ed74ec5d300
|
/django_school_management/articles/migrations/0003_auto_20210125_1402.py
|
ce29b97aca11f5cb3ae4af73744b37537f13ebc3
|
[] |
no_license
|
TareqMonwer/Django-School-Management
|
5b1c8145d04082063bc14fc9db1ce38b4db97a9d
|
3d425d300a77ad505089a3a4c0a9dc71cacbe89a
|
refs/heads/master
| 2023-08-19T23:36:34.359488
| 2023-08-13T05:53:42
| 2023-08-13T05:53:42
| 221,053,244
| 409
| 163
| null | 2023-08-13T05:53:44
| 2019-11-11T19:22:28
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 545
|
py
|
0003_auto_20210125_1402.py
|
# Generated by Django 2.2.13 on 2021-01-25 08:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0002_category'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'categories'},
),
migrations.AddField(
model_name='article',
name='categories',
field=models.ManyToManyField(blank=True, to='articles.Category'),
),
]
|
cee960061b36791acaa63e0658cb2cbbd98fe802
|
71c970991d67ea8998ef685f83fe585b7825a791
|
/python/runtime/xgboost/predict.py
|
79e4b63e1b06b3daff1e61f25420a6368b712ee5
|
[
"Apache-2.0"
] |
permissive
|
sql-machine-learning/sqlflow
|
a0ba932a8b0605700d5aca591802f7a99e0ea1ae
|
6c492098320875427b08ad82ce3f874c0b6aaa7a
|
refs/heads/develop
| 2023-03-09T04:58:03.460647
| 2022-05-13T01:29:48
| 2022-05-13T01:29:48
| 151,525,500
| 5,257
| 799
|
Apache-2.0
| 2023-02-25T11:53:32
| 2018-10-04T06:00:50
|
Go
|
UTF-8
|
Python
| false
| false
| 6,774
|
py
|
predict.py
|
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import numpy as np
import xgboost as xgb
from runtime import db
from runtime.dbapi.paiio import PaiIOConnection
from runtime.model.metadata import load_metadata
from runtime.xgboost.dataset import DMATRIX_FILE_SEP, xgb_dataset
DEFAULT_PREDICT_BATCH_SIZE = 10000
def pred(datasource,
select,
feature_metas,
feature_column_names,
train_label_meta,
pred_label_meta,
result_table,
is_pai=False,
pai_table="",
model_params=None,
train_params=None,
transform_fn=None,
feature_column_code="",
flags=None):
rank = 0
nworkers = len(flags.worker_hosts.split(",")) if flags else 1
if nworkers > 1:
if not is_pai:
raise Exception(
"XGBoost distributed predict is only supported on PAI")
if flags.job_name != "worker":
return # ignore ps
rank = flags.task_index
pred_imp(datasource, select, feature_metas, feature_column_names,
train_label_meta, pred_label_meta, result_table, is_pai,
pai_table, model_params, train_params, transform_fn,
feature_column_code, rank, nworkers)
def pred_imp(datasource,
select,
feature_metas,
feature_column_names,
train_label_meta,
pred_label_meta,
result_table,
is_pai=False,
pai_table="",
model_params=None,
train_params=None,
transform_fn=None,
feature_column_code="",
rank=0,
nworkers=1):
print("rank={} nworkers={}".format(rank, nworkers))
if not is_pai:
conn = db.connect_with_data_source(datasource)
else:
conn = PaiIOConnection.from_table(pai_table)
dpred = xgb_dataset(
datasource=datasource,
fn='predict.txt',
dataset_sql=select,
feature_metas=feature_metas,
feature_column_names=feature_column_names,
label_meta=None,
is_pai=is_pai,
pai_table=pai_table,
pai_single_file=True,
cache=True,
batch_size=DEFAULT_PREDICT_BATCH_SIZE,
rank=rank,
nworkers=nworkers,
transform_fn=transform_fn,
feature_column_code=feature_column_code,
raw_data_dir="predict.raw.dir") # NOTE: default to use external memory
bst = xgb.Booster({'nthread': 4}) # init model
bst.load_model("my_model") # load data
print("{} Start predicting XGBoost model...".format(datetime.now()))
if not model_params:
model_params = load_metadata("model_meta.json")["attributes"]
selected_cols = db.selected_cols(conn, select)
feature_file_id = 0
train_label_name = train_label_meta["feature_name"]
pred_label_name = pred_label_meta["feature_name"]
for pred_dmatrix in dpred:
predict_and_store_result(bst, pred_dmatrix, feature_file_id,
model_params, selected_cols, train_label_name,
pred_label_name, feature_column_names,
feature_metas, is_pai, conn, result_table,
rank)
feature_file_id += 1
print("{} Done predicting. Predict table: {}".format(
datetime.now(), result_table))
def predict_and_store_result(bst,
dpred,
feature_file_id,
model_params,
selected_cols,
train_label_name,
pred_label_name,
feature_column_names,
feature_metas,
is_pai,
conn,
result_table,
slice_id=0):
preds = bst.predict(dpred)
if model_params:
obj = model_params["objective"]
# binary:hinge output class labels
if obj == "binary:logistic":
preds = (preds > 0.5).astype(int)
elif obj == "multi:softprob":
preds = np.argmax(np.array(preds), axis=1)
elif obj == "multi:softmax":
# multi:softmax output class labels
# Need to convert to int. Otherwise, the
# table writer of MaxCompute would cause
# error because of writing float values.
preds = np.array(preds).astype(int)
# TODO(typhoonzero): deal with binary:logitraw when needed.
else:
# prediction output with multi-class job has two dimensions, this
# is a temporary way, can remove this else branch when we can load
# the model meta not only on PAI submitter.
if len(preds.shape) == 2:
preds = np.argmax(np.array(preds), axis=1)
if is_pai:
feature_file_read = open("predict.txt.raw", "r")
else:
feature_file_read = open(
"predict.raw.dir/predict.txt_%d" % feature_file_id, "r")
result_column_names = selected_cols[:]
# remove train_label_name from result column, if train_label_name == "" or
# the train_label_name is not selected, the index should be -1
try:
train_label_index = selected_cols.index(train_label_name)
except ValueError:
train_label_index = -1
if train_label_index != -1:
del result_column_names[train_label_index]
result_column_names.append(pred_label_name)
line_no = 0
with db.buffered_db_writer(conn, result_table, result_column_names, 100,
slice_id) as w:
while True:
line = feature_file_read.readline()
if not line:
break
# FIXME(typhoonzero): how to output columns that are not used
# as features, like ids?
row = [
item
for i, item in enumerate(line.strip().split(DMATRIX_FILE_SEP))
if i != train_label_index
]
row.append(preds[line_no])
w.write(row)
line_no += 1
|
09720e64bde8a62ef6db342b09a5e89bb2909f28
|
4129d5b10c0ac8288db205f91ed45a40b812ef5c
|
/photutils/detection/__init__.py
|
76e39be3a57007d855e5d9543baae9e8bbe9a49c
|
[
"BSD-3-Clause"
] |
permissive
|
astropy/photutils
|
163762aa560fd13c8a4a49aff2d6b0a522cedbcc
|
a6d629774c52cc82af18d0444c6e5584e5d0b492
|
refs/heads/main
| 2023-09-01T20:51:05.823954
| 2023-08-28T19:35:54
| 2023-08-28T19:35:54
| 2,640,766
| 204
| 130
|
BSD-3-Clause
| 2023-09-13T22:46:41
| 2011-10-25T02:39:28
|
Python
|
UTF-8
|
Python
| false
| false
| 379
|
py
|
__init__.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains tools for detecting sources in an astronomical
image.
"""
from .core import * # noqa: F401, F403
from .daofinder import * # noqa: F401, F403
from .irafstarfinder import * # noqa: F401, F403
from .peakfinder import * # noqa: F401, F403
from .starfinder import * # noqa: F401, F403
|
ce9460f91008de9ed3905cf28d9445b2cdf5ad87
|
0d6e9c15edafe14b5d373f2180809b94786cdb88
|
/torch_mimicry/datasets/imagenet/__init__.py
|
5eb944a90687c208291e32ca5b5f8635588ccce2
|
[
"MIT"
] |
permissive
|
kwotsin/mimicry
|
6190e159f96418e9773a453aa17450f372767ffa
|
a7fda06c4aff1e6af8dc4c4a35ed6636e434c766
|
refs/heads/master
| 2023-08-02T21:49:54.864461
| 2022-08-07T18:28:05
| 2022-08-07T18:28:05
| 251,697,485
| 621
| 70
|
MIT
| 2022-08-07T18:30:23
| 2020-03-31T18:35:55
|
Python
|
UTF-8
|
Python
| false
| false
| 24
|
py
|
__init__.py
|
from .imagenet import *
|
7df870a4acdf3b19d15e6d03f44f2cf3f70f5010
|
4578be5ff20640cd0940faa27901489daa471ffe
|
/S08 - Padrões de projeto/interpreter/multiplicacao.py
|
f557206cbb1d8ea5017bfda9596b01564a1d22ee
|
[] |
no_license
|
CAECOMP/provas
|
cd31c48a912ad5e73f5bf8b826db40cf895f46b1
|
3f5eb4ec63fc91ad2c2e4ae6e5b3ac87c09ca916
|
refs/heads/master
| 2023-07-07T11:53:38.798374
| 2023-04-27T03:13:57
| 2023-04-27T03:13:57
| 55,001,094
| 125
| 83
| null | 2023-07-04T21:21:05
| 2016-03-29T18:38:41
|
HTML
|
UTF-8
|
Python
| false
| false
| 319
|
py
|
multiplicacao.py
|
from operador import Operador
class Multiplicar(Operador):
def __init__(self, esquerda: Operador, direita: Operador):
self._esquerda: Operador = esquerda
self._direita: Operador = direita
def interpretar(self) -> int:
return self._esquerda.interpretar() * self._direita.interpretar()
|
d133ed5d56a16c4d7e696e5760c11891d8066ea2
|
0e083f405af00029c9ec31849f0f7f81c56844b5
|
/tests/test_codebase/test_mmdet3d/data/centerpoint_pillar02_second_secfpn_nus.py
|
18fe7853235071788151c8d9a27a00b51f2d28d1
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmdeploy
|
39b9e7b611caab2c76a6142fcb99f0bf1d92ad24
|
5479c8774f5b88d7ed9d399d4e305cb42cc2e73a
|
refs/heads/main
| 2023-09-01T21:29:25.315371
| 2023-08-31T09:59:29
| 2023-08-31T09:59:29
| 441,467,833
| 2,164
| 605
|
Apache-2.0
| 2023-09-14T10:39:04
| 2021-12-24T13:04:44
|
Python
|
UTF-8
|
Python
| false
| false
| 3,371
|
py
|
centerpoint_pillar02_second_secfpn_nus.py
|
# Copyright (c) OpenMMLab. All rights reserved.
voxel_size = [0.2, 0.2, 8]
model = dict(
type='CenterPoint',
data_preprocessor=dict(
type='Det3DDataPreprocessor',
voxel=True,
voxel_layer=dict(
max_num_points=20,
voxel_size=voxel_size,
max_voxels=(30000, 40000))),
pts_voxel_encoder=dict(
type='PillarFeatureNet',
in_channels=5,
feat_channels=[64],
with_distance=False,
voxel_size=(0.2, 0.2, 8),
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
legacy=False),
pts_middle_encoder=dict(
type='PointPillarsScatter', in_channels=64, output_shape=(512, 512)),
pts_backbone=dict(
type='SECOND',
in_channels=64,
out_channels=[64, 128, 256],
layer_nums=[3, 5, 5],
layer_strides=[2, 2, 2],
norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
conv_cfg=dict(type='Conv2d', bias=False)),
pts_neck=dict(
type='SECONDFPN',
in_channels=[64, 128, 256],
out_channels=[128, 128, 128],
upsample_strides=[0.5, 1, 2],
norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
upsample_cfg=dict(type='deconv', bias=False),
use_conv_for_no_stride=True),
pts_bbox_head=dict(
type='CenterHead',
in_channels=sum([128, 128, 128]),
tasks=[
dict(num_class=1, class_names=['car']),
dict(num_class=2, class_names=['truck', 'construction_vehicle']),
dict(num_class=2, class_names=['bus', 'trailer']),
dict(num_class=1, class_names=['barrier']),
dict(num_class=2, class_names=['motorcycle', 'bicycle']),
dict(num_class=2, class_names=['pedestrian', 'traffic_cone']),
],
common_heads=dict(
reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)),
share_conv_channel=64,
bbox_coder=dict(
type='CenterPointBBoxCoder',
post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],
max_num=500,
score_threshold=0.1,
out_size_factor=4,
voxel_size=voxel_size[:2],
code_size=9),
separate_head=dict(
type='SeparateHead', init_bias=-2.19, final_kernel=3),
loss_cls=dict(type='mmdet.GaussianFocalLoss', reduction='mean'),
loss_bbox=dict(
type='mmdet.L1Loss', reduction='mean', loss_weight=0.25),
norm_bbox=True),
# model training and testing settings
train_cfg=dict(
pts=dict(
grid_size=[512, 512, 1],
voxel_size=voxel_size,
out_size_factor=4,
dense_reg=1,
gaussian_overlap=0.1,
max_objs=500,
min_radius=2,
code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2])),
test_cfg=dict(
pts=dict(
post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],
max_per_img=500,
max_pool_nms=False,
min_radius=[4, 12, 10, 1, 0.85, 0.175],
score_threshold=0.1,
pc_range=[-51.2, -51.2],
out_size_factor=4,
voxel_size=voxel_size[:2],
nms_type='rotate',
pre_max_size=1000,
post_max_size=83,
nms_thr=0.2)))
|
13e95d74865443ff868348672c78748af30ac710
|
100bfa827dacb23637d3dd2d1396a830c7d9a4b2
|
/mode/examples/Basics/Math/Map/Map.pyde
|
9f576be915b66c42fd4a09d5b21892219ef28152
|
[
"Apache-2.0"
] |
permissive
|
jdf/processing.py
|
82b37e5b1f4ce68825b5fe919205362ecdc16993
|
f38544c70892c7534f059e8acc1c9a492e2b7c86
|
refs/heads/master
| 2023-08-26T01:42:50.442853
| 2023-02-15T21:33:12
| 2023-02-15T21:33:12
| 833,574
| 1,399
| 246
|
Apache-2.0
| 2023-02-21T12:28:09
| 2010-08-12T14:29:22
|
Python
|
UTF-8
|
Python
| false
| false
| 864
|
pyde
|
Map.pyde
|
'''
Map.
Use the map() function to take any number and scale it to a new number
that is more useful for the project that you are working on. For example, use the
numbers from the mouse position to control the size or color of a shape.
In this example, the mouse’s x-coordinate (a number between 0 and 360) is scaled to a
new number to define the color and size of a circle.
Example ReWritten in Python By: Prabhjot Singh (NITH)
example original in Java mode examples: Math->Maps
'''
def setup():
size(640, 360)
noStroke()
def draw():
background(0)
# Scale the mouseX value from 0 to 640 to a range between 0 and 175
c = map(mouseX, 0, width, 0, 175)
# Scale the mouseX value from 0 to 640 to a range between 40 and 300
d = map(mouseX, 0, width, 40, 300)
fill(255, c, 0)
ellipse(width / 2, height / 2, d, d)
|
217b3ef7b9853382a26a828218028d33e36e703e
|
8ca19f1a31070738b376c0370c4bebf6b7efcb43
|
/office365/sharepoint/social/following/manager.py
|
a7c6be847cbe49845ba93cd0d41d6b70f0158560
|
[
"MIT"
] |
permissive
|
vgrem/Office365-REST-Python-Client
|
2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3
|
cbd245d1af8d69e013c469cfc2a9851f51c91417
|
refs/heads/master
| 2023-09-02T14:20:40.109462
| 2023-08-31T19:14:05
| 2023-08-31T19:14:05
| 51,305,798
| 1,006
| 326
|
MIT
| 2023-08-28T05:38:02
| 2016-02-08T15:24:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,540
|
py
|
manager.py
|
from office365.runtime.client_result import ClientResult
from office365.runtime.client_value_collection import ClientValueCollection
from office365.runtime.paths.resource_path import ResourcePath
from office365.runtime.queries.service_operation import ServiceOperationQuery
from office365.sharepoint.base_entity import BaseEntity
from office365.sharepoint.social.actor import SocialActor
class SocialFollowingManager(BaseEntity):
"""The SocialFollowingManager class provides properties and methods for managing a user's list of followed actors.
Actors can be users, documents, sites, and tags."""
def __init__(self, context):
super(SocialFollowingManager, self).__init__(context, ResourcePath("SP.Social.SocialFollowingManager"))
def get_followers(self):
"""
The GetFollowers method returns the users who are followers of the current user.
"""
return_type = ClientResult(self.context, ClientValueCollection(SocialActor))
qry = ServiceOperationQuery(self, "GetFollowers", None, None, None, return_type)
self.context.add_query(qry)
return return_type
def get_suggestions(self):
"""
The GetSuggestions method returns a list of actors that are suggestions for the current user to follow.
"""
return_type = ClientResult(self.context, ClientValueCollection(SocialActor))
qry = ServiceOperationQuery(self, "GetSuggestions", None, None, None, return_type)
self.context.add_query(qry)
return return_type
|
017259193e77a390c2c6e2a794856e1bb66a5069
|
c2d8cef05ef7a1d6db780f6168a0cac62c4a04ba
|
/squirrel/benchmark/quantify_randomness.py
|
e8ee6b13636fcea989f0ee38927181c1b0b0fa6a
|
[
"Apache-2.0"
] |
permissive
|
merantix-momentum/squirrel-core
|
f212aa75ebc64e14f961b3b6cbefc181793b208d
|
1fc20b7ca7d5854a7514f6dc0383b250a1e8ff2d
|
refs/heads/main
| 2023-08-17T04:45:37.391472
| 2023-08-04T12:50:39
| 2023-08-04T12:50:39
| 458,099,869
| 253
| 5
|
Apache-2.0
| 2023-09-11T18:27:05
| 2022-02-11T08:15:36
|
Python
|
UTF-8
|
Python
| false
| false
| 3,253
|
py
|
quantify_randomness.py
|
from typing import Callable, Iterable
import numpy as np
from scipy.stats import kendalltau
from squirrel.constants import SeedType
from squirrel.driver import MapDriver
from squirrel.iterstream.base import Composable
class DummyShardedDriver(MapDriver):
"""Return integer elements in shards"""
name = "dummy_sharded_driver"
def __init__(self, num_shard: int, shard_size: int) -> None:
"""Init dummy sharded driver"""
self.shard_size = shard_size
self.key_it = range(num_shard)
self.data = np.arange(num_shard * shard_size)
def get(self, key: str) -> int:
"""Get item with key"""
return self.data[int(key) * self.shard_size : (int(key) + 1) * self.shard_size]
def keys(self) -> Iterable:
"""Get key iterator"""
yield from map(str, self.key_it)
def get_iter(self, flatten: bool = True, **kwargs) -> Composable:
"""Get iterator"""
return super().get_iter(flatten=flatten, **kwargs)
def kendalltau_metric(result1: np.array, result2: np.array) -> float:
"""Compute the kendall tau randomness metric"""
tau, _ = kendalltau(result1, result2)
return tau
def quantify_randomness(
num_shard: int,
shard_size: int,
buffer_size: int,
initial: int,
n_samples: int = 250,
metric: Callable = kendalltau_metric,
seed1: SeedType = None,
seed2: SeedType = None,
) -> float:
"""Quantify the randomness of sampling from a driver with the given shuffle parameters.
This function assumes that we always fully shuffle all keys and the parameters for the item buffer is what we
are interested in.
Args:
num_shard (int): number of shards
shard_size (int): size of each shard assuming that all shards are of equal size
buffer_size (int): buffer size for item shuffle buffer
initial (int): initial size of item shuffle buffer
n_samples (int): influences the accuracy of the estimate by controlling the number of sampled trajectories
metric (Callable): how to measure the distance
seed1 (SeedType): seed for the first trajectory
seed2 (SeedType): seed for the second trajectory
Returns:
float: randomness measure computed from the kendall tau coefficient. Values between 0 and 1 while 1 means
completely deterministic and 0 means random.
"""
driver = DummyShardedDriver(num_shard, shard_size)
distances = []
for _ in range(n_samples):
# sample two random trajectories
result1 = driver.get_iter(
shuffle_key_buffer=num_shard,
shuffle_item_buffer=buffer_size,
item_shuffle_kwargs={"initial": initial, "seed": seed1},
key_shuffle_kwargs={"seed": seed1},
).collect()
result2 = driver.get_iter(
shuffle_key_buffer=num_shard,
shuffle_item_buffer=buffer_size,
item_shuffle_kwargs={"initial": initial, "seed": seed2},
key_shuffle_kwargs={"seed": seed2},
).collect()
# and get their distance via the kendall tau function
distances.append(metric(result1, result2))
# return the median of distances
return np.abs(np.median(np.array(distances)))
|
92461943ab17ca705b2fffbe8abd4c10370fd01e
|
807b63a4dda1e4bcf33a9b456cb9535eb69625fc
|
/python/stencila/types/if_.py
|
1c2e6161b0c9457e99af7774c91c817ffde23b8b
|
[
"Apache-2.0"
] |
permissive
|
stencila/stencila
|
4d63a5653adb67a45dd5eb11c7a27d569f57a49e
|
eac602910d009d7db7048b28b4049ecc952ecd32
|
refs/heads/main
| 2023-08-30T18:34:19.055238
| 2023-08-30T07:14:02
| 2023-08-30T07:14:02
| 4,503,128
| 719
| 51
|
Apache-2.0
| 2023-09-14T21:35:38
| 2012-05-31T02:43:31
|
Rust
|
UTF-8
|
Python
| false
| false
| 449
|
py
|
if_.py
|
# Generated file; do not edit. See the Rust `schema-gen` crate.
from .prelude import *
from .executable import Executable
from .if_clause import IfClause
@dataclass(kw_only=True, frozen=True)
class If(Executable):
"""
Show and execute alternative content conditional upon an executed expression
"""
type: Literal["If"] = field(default="If", init=False)
clauses: List[IfClause]
"""The clauses making up the `If` node"""
|
b80e9b88015030ef4a459307e47b5e28105b9f01
|
bdf3364eb293abcb02aca9b1594e7181ecbc651f
|
/ds-phasespace-drawing-exercise.py
|
6f916665e7690e4efabe1e76c8fbe8d2754ec732
|
[
"BSD-2-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
hsayama/PyCX
|
f5ee2ec1d02b1ec7529725fc7c175b431a45ef06
|
f61a56fcb5b79734216daf08203f7c08c9662dfb
|
refs/heads/master
| 2023-03-16T20:59:55.706196
| 2023-03-08T15:19:38
| 2023-03-08T15:19:38
| 228,787,396
| 210
| 77
|
NOASSERTION
| 2023-01-17T14:32:30
| 2019-12-18T07:54:03
|
Python
|
UTF-8
|
Python
| false
| false
| 687
|
py
|
ds-phasespace-drawing-exercise.py
|
from pylab import *
def initialize(x0, y0):
global x, y, xresult, yresult
x = x0
y = y0
xresult = [x]
yresult = [y]
def observe():
global x, y, xresult, yresult
xresult.append(x)
yresult.append(y)
def update():
global x, y, xresult, yresult
nextx = x + 0.1 * (x - x * y)
nexty = y + 0.1 * (y - x * y)
x, y = nextx, nexty
for x0 in arange(0, 2, .1):
for y0 in arange(0, 2, .1):
initialize(x0, y0)
for t in range(30):
update()
observe()
plot(xresult, yresult, 'b')
axis([0, 2, 0, 2]) ### added to zoom in the area within which
### initial states were varied
show()
|
8ad9ad68fcce3f09f06524f82c8308939b1097f5
|
93713f46f16f1e29b725f263da164fed24ebf8a8
|
/Library/lib/python3.7/site-packages/bokeh-1.4.0-py3.7.egg/bokeh/protocol/versions.py
|
ac01d01004b9b6de54ab58e25a4a59bdb49baac9
|
[
"BSD-3-Clause"
] |
permissive
|
holzschu/Carnets
|
b83d15136d25db640cea023abb5c280b26a9620e
|
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
|
refs/heads/master
| 2023-02-20T12:05:14.980685
| 2023-02-13T15:59:23
| 2023-02-13T15:59:23
| 167,671,526
| 541
| 36
|
BSD-3-Clause
| 2022-11-29T03:08:22
| 2019-01-26T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,696
|
py
|
versions.py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide definitions for Bokeh WebSocket protocol versions.
A *protocol specification* is a sequence of tuples of the form:
.. code-block:: python
(
(<message_type>, <revision>),
(<message_type>, <revision>),
...
)
Where ``<message_type>`` is string that identifies a message type, e.g,
``'ACK'``, ``'SERVER-INFO-REQ'``, etc. and ``<revision>`` is an integer that
identifies what revision of the message this version of the protocol uses.
A *protocol version* is a string of the form ``'<major>.<minor>'``. The
guidelines for updating the major or minor version are:
``<major>``
bump when new messages are added or deleted (and reset minor
version to zero)
``<minor>``
bump when existing message revisions change
.. data:: spec
:annotation:
A mapping of protocol versions to protocol specifications.
.. code-block:: python
{
"1.0" : (
("ACK", 1),
("OK", 1),
("ERROR", 1),
("EVENT", 1),
('SERVER-INFO-REPLY', 1),
('SERVER-INFO-REQ', 1),
('PULL-DOC-REQ', 1),
('PULL-DOC-REPLY', 1),
('PUSH-DOC', 1),
('PATCH-DOC', 1)
),
}
'''
###############################################################################
# #
# #
# #
# ******************************* #
# ****** !!! IMPORTANT !!! ****** #
# ******************************* #
# #
# #
# #
# ANY update to this file MUST be accompanied by the "PROTOCOL" tag. #
# #
# #
# #
# #
###############################################################################
# Please update the docstring above if anything here is changed
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'spec',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
spec = {
"1.0" : (
("ACK", 1),
("OK", 1),
("ERROR", 1),
("EVENT", 1),
('SERVER-INFO-REPLY', 1),
('SERVER-INFO-REQ', 1),
('PULL-DOC-REQ', 1),
('PULL-DOC-REPLY', 1),
('PUSH-DOC', 1),
('PATCH-DOC', 1)
),
}
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
261eb1845f82bd6f7bf6b3f5e1416a428adcfc25
|
a203fb3348c5113aec0d5be0038372a23993e381
|
/docs/source/asgi/examples/fastapi/movies/tables.py
|
e07b7585b97db0cf96a45f365631c5a190e6e812
|
[
"MIT"
] |
permissive
|
piccolo-orm/piccolo_admin
|
0c2aa2f5e423da884cff597dd5d4865658bf820d
|
0636086db5d0c99b3d20be512817fb1e8599b48c
|
refs/heads/master
| 2023-08-24T13:10:38.577849
| 2023-08-23T14:36:06
| 2023-08-23T14:36:06
| 193,770,368
| 246
| 35
|
MIT
| 2023-09-11T11:43:15
| 2019-06-25T19:24:31
|
Python
|
UTF-8
|
Python
| false
| false
| 219
|
py
|
tables.py
|
from piccolo.columns.column_types import ForeignKey, Varchar
from piccolo.table import Table
class Director(Table):
name = Varchar()
class Movie(Table):
title = Varchar()
director = ForeignKey(Director)
|
855bb9ee07f12aa916cf63bf138d06ddcefaee02
|
ae33222bcdecc375ff8800f0c6fee70d90120f60
|
/benchmarks/benchmarking/cases/regex_effbot_base.py
|
03a9d3d1696f909910dd376fe88145fed60666bf
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
bloomberg/memray
|
43d5296c6ff5f73b527d45468d4b28e74e92c561
|
427b60840cb406d011a650ce142845897cb542e2
|
refs/heads/main
| 2023-08-17T03:49:51.354674
| 2023-08-16T18:06:01
| 2023-08-16T19:43:46
| 479,491,550
| 10,817
| 343
|
Apache-2.0
| 2023-09-13T20:02:39
| 2022-04-08T18:04:11
|
Python
|
UTF-8
|
Python
| false
| false
| 4,677
|
py
|
regex_effbot_base.py
|
"""Benchmarks for Python's regex engine.
These are some of the original benchmarks used to tune Python's regex engine
in 2000 written by Fredrik Lundh. Retreived from
http://mail.python.org/pipermail/python-dev/2000-August/007797.html and
integrated into Unladen Swallow's pyperf.py in 2009 by David Laing.
These benchmarks are of interest since they helped to guide the original
optimization of the sre engine, and we shouldn't necessarily ignore them just
because they're "old".
"""
# Python imports
import re
# Local imports
USE_BYTES = False
def re_compile(s):
if USE_BYTES:
return re.compile(s.encode("latin1"))
else:
return re.compile(s)
# These are the regular expressions to be tested. These sync up,
# index-for-index with the list of strings generated by gen_string_table()
# below.
def gen_regex_table():
return [
re_compile("Python|Perl"),
re_compile("Python|Perl"),
re_compile("(Python|Perl)"),
re_compile("(?:Python|Perl)"),
re_compile("Python"),
re_compile("Python"),
re_compile(".*Python"),
re_compile(".*Python.*"),
re_compile(".*(Python)"),
re_compile(".*(?:Python)"),
re_compile("Python|Perl|Tcl"),
re_compile("Python|Perl|Tcl"),
re_compile("(Python|Perl|Tcl)"),
re_compile("(?:Python|Perl|Tcl)"),
re_compile("(Python)\\1"),
re_compile("(Python)\\1"),
re_compile("([0a-z][a-z0-9]*,)+"),
re_compile("(?:[0a-z][a-z0-9]*,)+"),
re_compile("([a-z][a-z0-9]*,)+"),
re_compile("(?:[a-z][a-z0-9]*,)+"),
re_compile(".*P.*y.*t.*h.*o.*n.*"),
]
def gen_string_table(n):
"""Generates the list of strings that will be used in the benchmarks.
All strings have repeated prefixes and suffices, and n specifies the
number of repetitions.
"""
strings = []
def append(s):
if USE_BYTES:
strings.append(s.encode("latin1"))
else:
strings.append(s)
append("-" * n + "Perl" + "-" * n)
append("P" * n + "Perl" + "P" * n)
append("-" * n + "Perl" + "-" * n)
append("-" * n + "Perl" + "-" * n)
append("-" * n + "Python" + "-" * n)
append("P" * n + "Python" + "P" * n)
append("-" * n + "Python" + "-" * n)
append("-" * n + "Python" + "-" * n)
append("-" * n + "Python" + "-" * n)
append("-" * n + "Python" + "-" * n)
append("-" * n + "Perl" + "-" * n)
append("P" * n + "Perl" + "P" * n)
append("-" * n + "Perl" + "-" * n)
append("-" * n + "Perl" + "-" * n)
append("-" * n + "PythonPython" + "-" * n)
append("P" * n + "PythonPython" + "P" * n)
append("-" * n + "a5,b7,c9," + "-" * n)
append("-" * n + "a5,b7,c9," + "-" * n)
append("-" * n + "a5,b7,c9," + "-" * n)
append("-" * n + "a5,b7,c9," + "-" * n)
append("-" * n + "Python" + "-" * n)
return strings
def init_benchmarks(n_values=None):
"""Initialize the strings we'll run the regexes against.
The strings used in the benchmark are prefixed and suffixed by
strings that are repeated n times.
The sequence n_values contains the values for n.
If n_values is None the values of n from the original benchmark
are used.
The generated list of strings is cached in the string_tables
variable, which is indexed by n.
Returns:
A list of string prefix/suffix lengths.
"""
if n_values is None:
n_values = (0, 5, 50, 250, 1000, 5000, 10000)
string_tables = {n: gen_string_table(n) for n in n_values}
regexs = gen_regex_table()
data = []
for n in n_values:
for id in range(len(regexs)):
regex = regexs[id]
string = string_tables[n][id]
data.append((regex, string))
return data
def bench_regex_effbot(loops):
if bench_regex_effbot.data is None:
bench_regex_effbot.data = init_benchmarks()
data = bench_regex_effbot.data
range_it = range(loops)
search = re.search
for _ in range_it:
# Runs all of the benchmarks for a given value of n.
for regex, string in data:
# search 10 times
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
# cached data, generated at the first call
bench_regex_effbot.data = None
def run_benchmark():
bench_regex_effbot(1)
if __name__ == "__main__":
run_benchmark()
|
2c2d621f71a114e3193be447aad05d5a01bc670d
|
23895eba556353a116d97a3e9fa60f7ed9c9f693
|
/Paths/Find Close Encounters of Orthogonal Line Segments.py
|
6ef8b20fa66aa6f5184038baaf02f47289d33472
|
[
"Apache-2.0"
] |
permissive
|
mekkablue/Glyphs-Scripts
|
9970200e6b7223be58ff9122dd519af176f210de
|
fe09b4cf3754bc10c3037c3312a19c1b909a74d6
|
refs/heads/master
| 2023-08-28T15:02:21.931491
| 2023-08-25T17:12:34
| 2023-08-25T17:12:34
| 2,517,418
| 322
| 108
|
Apache-2.0
| 2023-08-15T15:24:50
| 2011-10-05T07:12:37
|
Python
|
UTF-8
|
Python
| false
| false
| 9,317
|
py
|
Find Close Encounters of Orthogonal Line Segments.py
|
#MenuTitle: Find Close Encounters of Orthogonal Line Segments
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__ = """
Goes through all vertical and horizontal line segments, and finds pairs that are close, but do not align completely.
"""
import vanilla, sys, math
def angle(firstPoint, secondPoint):
xDiff = secondPoint.x - firstPoint.x
yDiff = secondPoint.y - firstPoint.y
return math.degrees(math.atan2(yDiff, xDiff))
class FindCloseEncounters(object):
prefID = "com.mekkablue.FindCloseEncounters"
prefDict = {
# "prefName": defaultValue,
"threshold": 2,
"includeComposites": True,
"includeNonExporting": True,
"excludeGlyphs": True,
"excludeGlyphsContaining": ".ornm, .dnom, .numr, superior, inferior, .blackCircled",
"reuseTab": True,
}
markerEmoji = "😰"
def __init__(self):
# Window 'self.w':
windowWidth = 370
windowHeight = 200
windowWidthResize = 500 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
(windowWidth, windowHeight), # default window size
"Find Close Encounters of Orthogonal Line Segments", # window title
minSize=(windowWidth, windowHeight), # minimum size (for resizing)
maxSize=(windowWidth + windowWidthResize, windowHeight + windowHeightResize), # maximum size (for resizing)
autosaveName=self.domain("mainwindow") # stores last window position and size
)
# UI elements:
linePos, inset, lineHeight = 12, 15, 22
self.w.descriptionText = vanilla.TextBox(
(inset, linePos + 2, -inset, 14), "Find orthogonal line segments that are close but not aligning:", sizeStyle='small', selectable=True
)
linePos += lineHeight
self.w.thresholdText = vanilla.TextBox((inset, linePos + 2, 120, 14), "Max distance in units:", sizeStyle='small', selectable=True)
self.w.threshold = vanilla.EditText((inset + 120, linePos - 1, 50, 19), self.prefDict["threshold"], callback=self.SavePreferences, sizeStyle='small')
linePos += lineHeight
self.w.includeComposites = vanilla.CheckBox(
(inset, linePos - 1, -inset, 20),
"Include composites (otherwise only glyphs with paths)",
value=self.prefDict["includeComposites"],
callback=self.SavePreferences,
sizeStyle='small'
)
linePos += lineHeight
self.w.includeNonExporting = vanilla.CheckBox(
(inset, linePos - 1, -inset, 20), "Include non-exporting glyphs", value=self.prefDict["includeNonExporting"], callback=self.SavePreferences, sizeStyle='small'
)
linePos += lineHeight
self.w.excludeGlyphs = vanilla.CheckBox((inset, linePos - 1, 160, 20), "Exclude glyphs containing:", value=True, callback=self.SavePreferences, sizeStyle='small')
self.w.excludeGlyphsContaining = vanilla.EditText(
(inset + 160, linePos - 1, -inset, 19), self.prefDict["excludeGlyphsContaining"], callback=self.SavePreferences, sizeStyle='small'
)
linePos += lineHeight
self.w.reuseTab = vanilla.CheckBox((inset, linePos - 1, -inset, 20), "Reuse current tab for report", value=False, callback=self.SavePreferences, sizeStyle='small')
linePos += lineHeight
# Run Button:
self.w.runButton = vanilla.Button((-100 - inset, -20 - inset, -inset, -inset), "Find", sizeStyle='regular', callback=self.FindCloseEncountersMain)
self.w.setDefaultButton(self.w.runButton)
# Load Settings:
if not self.LoadPreferences():
print("⚠️ ‘Find Close Encounters of Orthogonal Line Segments’ could not load preferences. Will resort to defaults.")
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def updateGUI(self, sender=None):
self.w.excludeGlyphsContaining.enable(self.pref("excludeGlyphs"))
self.w.runButton.enable(abs(float(self.pref("threshold"))) > 0)
def domain(self, prefName):
prefName = prefName.strip().strip(".")
return self.prefID + "." + prefName.strip()
def pref(self, prefName):
prefDomain = self.domain(prefName)
return Glyphs.defaults[prefDomain]
def SavePreferences(self, sender=None):
try:
# write current settings into prefs:
for prefName in self.prefDict.keys():
Glyphs.defaults[self.domain(prefName)] = getattr(self.w, prefName).get()
self.updateGUI()
return True
except:
import traceback
print(traceback.format_exc())
return False
def LoadPreferences(self):
try:
for prefName in self.prefDict.keys():
# register defaults:
Glyphs.registerDefault(self.domain(prefName), self.prefDict[prefName])
# load previously written prefs:
getattr(self.w, prefName).set(self.pref(prefName))
self.updateGUI()
return True
except:
import traceback
print(traceback.format_exc())
return False
def FindCloseEncountersMain(self, sender=None):
try:
# clear macro window log:
Glyphs.clearLog()
# update settings to the latest user input:
if not self.SavePreferences():
print("⚠️ ‘Find Close Encounters of Orthogonal Line Segments’ could not write preferences.")
# read prefs:
for prefName in self.prefDict.keys():
try:
setattr(sys.modules[__name__], prefName, self.pref(prefName))
except:
fallbackValue = self.prefDict[prefName]
print("⚠️ Could not set pref ‘%s’, resorting to default value: ‘%s’." % (prefName, fallbackValue))
setattr(sys.modules[__name__], prefName, fallbackValue)
thisFont = Glyphs.font # frontmost font
if thisFont is None:
Message(title="No Font Open", message="The script requires a font. Open a font and run the script again.", OKButton=None)
else:
filePath = thisFont.filepath
if filePath:
report = "%s\n📄 %s" % (filePath.lastPathComponent(), filePath)
else:
report = "%s\n⚠️ The font file has not been saved yet." % thisFont.familyName
print("Find Close Encounters of Orthogonal Line Segments Report for %s" % report)
print()
# query user prefs:
threshold = abs(float(self.pref("threshold")))
includeComposites = self.pref("includeComposites")
includeNonExporting = self.pref("includeNonExporting")
excludeGlyphs = self.pref("excludeGlyphs")
excludeGlyphsContaining = [particle.strip() for particle in self.pref("excludeGlyphsContaining").split(",")]
reuseTab = self.pref("reuseTab")
collectedLayers = []
for g in thisFont.glyphs:
# cleaning up existing guide markers
for l in g.layers:
if l.guides:
for i in range(len(l.guides) - 1, -1, -1):
guide = l.guides[i]
if guide.name and guide.name.startswith(self.markerEmoji):
del l.guides[i]
# see if glyph/layer needs to be skipped:
if not g.export and not includeNonExporting:
continue
if excludeGlyphs:
particleInGlyphName = [particle in g.name for particle in excludeGlyphsContaining]
if any(particleInGlyphName):
continue
for l in g.layers:
if not l.paths and not includeComposites:
continue
# look for line segments:
decomposedLayer = l.copyDecomposedLayer()
segmentDict = {
0: [],
90: [],
}
for path in decomposedLayer.paths:
for segment in path.segments:
if len(segment) == 2:
segmentAngle = int(angle(segment[0], segment.lastPoint()) % 180)
if segmentAngle in segmentDict.keys():
# 0 deg --> compare y
# 90 deg --> compare x
segmentDict[segmentAngle].append(segment.middlePoint()[int(1 - segmentAngle / 90)])
for segmentAngle in segmentDict.keys():
coords = sorted(set(segmentDict[segmentAngle]))
if len(coords) > 1:
for i in range(1, len(coords)):
prevCoord = coords[i - 1]
thisCoord = coords[i]
dist = abs(thisCoord - prevCoord)
if dist <= threshold:
print("%s 📐%i 🔢 %i ~ %i 🔠 %s (%s)" % (
self.markerEmoji,
segmentAngle,
prevCoord,
thisCoord,
g.name,
l.name,
))
for coord in prevCoord, thisCoord:
gd = GSGuide()
gd.angle = segmentAngle
gd.name = "%s %i" % (self.markerEmoji, coord)
gd.position = NSPoint(
0 if segmentAngle == 0 else coord,
0 if segmentAngle == 90 else coord,
)
l.guides.append(gd)
if not l in collectedLayers:
collectedLayers.append(l)
if collectedLayers:
if reuseTab and thisFont.currentTab:
tab = thisFont.currentTab
else:
tab = thisFont.newTab()
tab.layers = collectedLayers
else:
Message(
title="No close encounters found",
message="Congratulations! No non-aliging line segments not further than %iu in this font." % abs(threshold),
OKButton=None,
)
self.w.close() # delete if you want window to stay open
# Final report:
Glyphs.showNotification(
"%s: Done" % (thisFont.familyName),
"Find Close Encounters of Orthogonal Line Segments is finished. Details in Macro Window",
)
print("\nDone.")
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Find Close Encounters of Orthogonal Line Segments Error: %s" % e)
import traceback
print(traceback.format_exc())
FindCloseEncounters()
|
1152647304854e1c74a68d368f20efe9b1fca6af
|
9784a90cac667e8e0aaba0ca599b4255b215ec67
|
/gluon/gluoncv2/models/shufflenetv2.py
|
53f7cd3efd32e4a544efc374c68097a5b6326ef9
|
[
"MIT"
] |
permissive
|
osmr/imgclsmob
|
d2f48f01ca541b20119871393eca383001a96019
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
refs/heads/master
| 2022-07-09T14:24:37.591824
| 2021-12-14T10:15:31
| 2021-12-14T10:15:31
| 140,285,687
| 3,017
| 624
|
MIT
| 2022-07-04T15:18:37
| 2018-07-09T12:57:46
|
Python
|
UTF-8
|
Python
| false
| false
| 12,524
|
py
|
shufflenetv2.py
|
"""
ShuffleNet V2 for ImageNet-1K, implemented in Gluon.
Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
"""
__all__ = ['ShuffleNetV2', 'shufflenetv2_wd2', 'shufflenetv2_w1', 'shufflenetv2_w3d2', 'shufflenetv2_w2']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, depthwise_conv3x3, conv1x1_block, conv3x3_block, ChannelShuffle, SEBlock
class ShuffleUnit(HybridBlock):
"""
ShuffleNetV2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
downsample : bool
Whether do downsample.
use_se : bool
Whether to use SE block.
use_residual : bool
Whether to use residual connection.
"""
def __init__(self,
in_channels,
out_channels,
downsample,
use_se,
use_residual,
**kwargs):
super(ShuffleUnit, self).__init__(**kwargs)
self.downsample = downsample
self.use_se = use_se
self.use_residual = use_residual
mid_channels = out_channels // 2
with self.name_scope():
self.compress_conv1 = conv1x1(
in_channels=(in_channels if self.downsample else mid_channels),
out_channels=mid_channels)
self.compress_bn1 = nn.BatchNorm(in_channels=mid_channels)
self.dw_conv2 = depthwise_conv3x3(
channels=mid_channels,
strides=(2 if self.downsample else 1))
self.dw_bn2 = nn.BatchNorm(in_channels=mid_channels)
self.expand_conv3 = conv1x1(
in_channels=mid_channels,
out_channels=mid_channels)
self.expand_bn3 = nn.BatchNorm(in_channels=mid_channels)
if self.use_se:
self.se = SEBlock(channels=mid_channels)
if downsample:
self.dw_conv4 = depthwise_conv3x3(
channels=in_channels,
strides=2)
self.dw_bn4 = nn.BatchNorm(in_channels=in_channels)
self.expand_conv5 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.expand_bn5 = nn.BatchNorm(in_channels=mid_channels)
self.activ = nn.Activation("relu")
self.c_shuffle = ChannelShuffle(
channels=out_channels,
groups=2)
def hybrid_forward(self, F, x):
if self.downsample:
y1 = self.dw_conv4(x)
y1 = self.dw_bn4(y1)
y1 = self.expand_conv5(y1)
y1 = self.expand_bn5(y1)
y1 = self.activ(y1)
x2 = x
else:
y1, x2 = F.split(x, axis=1, num_outputs=2)
y2 = self.compress_conv1(x2)
y2 = self.compress_bn1(y2)
y2 = self.activ(y2)
y2 = self.dw_conv2(y2)
y2 = self.dw_bn2(y2)
y2 = self.expand_conv3(y2)
y2 = self.expand_bn3(y2)
y2 = self.activ(y2)
if self.use_se:
y2 = self.se(y2)
if self.use_residual and not self.downsample:
y2 = y2 + x2
x = F.concat(y1, y2, dim=1)
x = self.c_shuffle(x)
return x
class ShuffleInitBlock(HybridBlock):
"""
ShuffleNetV2 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(ShuffleInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0,
ceil_mode=True)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class ShuffleNetV2(HybridBlock):
"""
ShuffleNetV2 model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
use_se : bool, default False
Whether to use SE block.
use_residual : bool, default False
Whether to use residual connections.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
use_se=False,
use_residual=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ShuffleNetV2, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ShuffleInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
stage.add(ShuffleUnit(
in_channels=in_channels,
out_channels=out_channels,
downsample=downsample,
use_se=use_se,
use_residual=use_residual))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_shufflenetv2(width_scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ShuffleNetV2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 24
final_block_channels = 1024
layers = [4, 8, 4]
channels_per_layers = [116, 232, 464]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if width_scale > 1.5:
final_block_channels = int(final_block_channels * width_scale)
net = ShuffleNetV2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def shufflenetv2_wd2(**kwargs):
"""
ShuffleNetV2 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(12.0 / 29.0), model_name="shufflenetv2_wd2", **kwargs)
def shufflenetv2_w1(**kwargs):
"""
ShuffleNetV2 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=1.0, model_name="shufflenetv2_w1", **kwargs)
def shufflenetv2_w3d2(**kwargs):
"""
ShuffleNetV2 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(44.0 / 29.0), model_name="shufflenetv2_w3d2", **kwargs)
def shufflenetv2_w2(**kwargs):
"""
ShuffleNetV2 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(61.0 / 29.0), model_name="shufflenetv2_w2", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
shufflenetv2_wd2,
shufflenetv2_w1,
shufflenetv2_w3d2,
shufflenetv2_w2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenetv2_wd2 or weight_count == 1366792)
assert (model != shufflenetv2_w1 or weight_count == 2278604)
assert (model != shufflenetv2_w3d2 or weight_count == 4406098)
assert (model != shufflenetv2_w2 or weight_count == 7601686)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
|
f2931bb92adf0266227e3289d81ce57d31533f5a
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/build/android/gyp/java_cpp_enum.py
|
9098cfc82b1b0b969f906a525d21c99a4444a6d5
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 15,564
|
py
|
java_cpp_enum.py
|
#!/usr/bin/env python3
#
# Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
from datetime import date
import re
import optparse
import os
from string import Template
import sys
import textwrap
import zipfile
from util import build_utils
from util import java_cpp_utils
import action_helpers # build_utils adds //build to sys.path.
import zip_helpers
# List of C++ types that are compatible with the Java code generated by this
# script.
#
# This script can parse .idl files however, at present it ignores special
# rules such as [cpp_enum_prefix_override="ax_attr"].
ENUM_FIXED_TYPE_ALLOWLIST = [
'char', 'unsigned char', 'short', 'unsigned short', 'int', 'int8_t',
'int16_t', 'int32_t', 'uint8_t', 'uint16_t'
]
class EnumDefinition:
def __init__(self, original_enum_name=None, class_name_override=None,
enum_package=None, entries=None, comments=None, fixed_type=None):
self.original_enum_name = original_enum_name
self.class_name_override = class_name_override
self.enum_package = enum_package
self.entries = collections.OrderedDict(entries or [])
self.comments = collections.OrderedDict(comments or [])
self.prefix_to_strip = None
self.fixed_type = fixed_type
def AppendEntry(self, key, value):
if key in self.entries:
raise Exception('Multiple definitions of key %s found.' % key)
self.entries[key] = value
def AppendEntryComment(self, key, value):
if key in self.comments:
raise Exception('Multiple definitions of key %s found.' % key)
self.comments[key] = value
@property
def class_name(self):
return self.class_name_override or self.original_enum_name
def Finalize(self):
self._Validate()
self._AssignEntryIndices()
self._StripPrefix()
self._NormalizeNames()
def _Validate(self):
assert self.class_name
assert self.enum_package
assert self.entries
if self.fixed_type and self.fixed_type not in ENUM_FIXED_TYPE_ALLOWLIST:
raise Exception('Fixed type %s for enum %s not in allowlist.' %
(self.fixed_type, self.class_name))
def _AssignEntryIndices(self):
# Enums, if given no value, are given the value of the previous enum + 1.
if not all(self.entries.values()):
prev_enum_value = -1
for key, value in self.entries.items():
if not value:
self.entries[key] = prev_enum_value + 1
elif value in self.entries:
self.entries[key] = self.entries[value]
else:
try:
self.entries[key] = int(value)
except ValueError as e:
raise Exception('Could not interpret integer from enum value "%s" '
'for key %s.' % (value, key)) from e
prev_enum_value = self.entries[key]
def _StripPrefix(self):
prefix_to_strip = self.prefix_to_strip
if not prefix_to_strip:
shout_case = self.original_enum_name
shout_case = re.sub('(?!^)([A-Z]+)', r'_\1', shout_case).upper()
shout_case += '_'
prefixes = [shout_case, self.original_enum_name,
'k' + self.original_enum_name]
for prefix in prefixes:
if all(w.startswith(prefix) for w in self.entries.keys()):
prefix_to_strip = prefix
break
else:
prefix_to_strip = ''
def StripEntries(entries):
ret = collections.OrderedDict()
for k, v in entries.items():
stripped_key = k.replace(prefix_to_strip, '', 1)
if isinstance(v, str):
stripped_value = v.replace(prefix_to_strip, '')
else:
stripped_value = v
ret[stripped_key] = stripped_value
return ret
self.entries = StripEntries(self.entries)
self.comments = StripEntries(self.comments)
def _NormalizeNames(self):
self.entries = _TransformKeys(self.entries, java_cpp_utils.KCamelToShouty)
self.comments = _TransformKeys(self.comments, java_cpp_utils.KCamelToShouty)
def _TransformKeys(d, func):
"""Normalize keys in |d| and update references to old keys in |d| values."""
keys_map = {k: func(k) for k in d}
ret = collections.OrderedDict()
for k, v in d.items():
# Need to transform values as well when the entry value was explicitly set
# (since it could contain references to other enum entry values).
if isinstance(v, str):
# First check if a full replacement is available. This avoids issues when
# one key is a substring of another.
if v in d:
v = keys_map[v]
else:
for old_key, new_key in keys_map.items():
v = v.replace(old_key, new_key)
ret[keys_map[k]] = v
return ret
class DirectiveSet:
class_name_override_key = 'CLASS_NAME_OVERRIDE'
enum_package_key = 'ENUM_PACKAGE'
prefix_to_strip_key = 'PREFIX_TO_STRIP'
known_keys = [class_name_override_key, enum_package_key, prefix_to_strip_key]
def __init__(self):
self._directives = {}
def Update(self, key, value):
if key not in DirectiveSet.known_keys:
raise Exception("Unknown directive: " + key)
self._directives[key] = value
@property
def empty(self):
return len(self._directives) == 0
def UpdateDefinition(self, definition):
definition.class_name_override = self._directives.get(
DirectiveSet.class_name_override_key, '')
definition.enum_package = self._directives.get(
DirectiveSet.enum_package_key)
definition.prefix_to_strip = self._directives.get(
DirectiveSet.prefix_to_strip_key)
class HeaderParser:
single_line_comment_re = re.compile(r'\s*//\s*([^\n]*)')
multi_line_comment_start_re = re.compile(r'\s*/\*')
enum_line_re = re.compile(r'^\s*(\w+)(\s*\=\s*([^,\n]+))?,?')
enum_end_re = re.compile(r'^\s*}\s*;\.*$')
generator_error_re = re.compile(r'^\s*//\s+GENERATED_JAVA_(\w+)\s*:\s*$')
generator_directive_re = re.compile(
r'^\s*//\s+GENERATED_JAVA_(\w+)\s*:\s*([\.\w]+)$')
multi_line_generator_directive_start_re = re.compile(
r'^\s*//\s+GENERATED_JAVA_(\w+)\s*:\s*\(([\.\w]*)$')
multi_line_directive_continuation_re = re.compile(r'^\s*//\s+([\.\w]+)$')
multi_line_directive_end_re = re.compile(r'^\s*//\s+([\.\w]*)\)$')
optional_class_or_struct_re = r'(class|struct)?'
enum_name_re = r'(\w+)'
optional_fixed_type_re = r'(\:\s*(\w+\s*\w+?))?'
enum_start_re = re.compile(r'^\s*(?:\[cpp.*\])?\s*enum\s+' +
optional_class_or_struct_re + '\s*' + enum_name_re + '\s*' +
optional_fixed_type_re + '\s*{\s*')
enum_single_line_re = re.compile(
r'^\s*(?:\[cpp.*\])?\s*enum.*{(?P<enum_entries>.*)}.*$')
def __init__(self, lines, path=''):
self._lines = lines
self._path = path
self._enum_definitions = []
self._in_enum = False
self._current_definition = None
self._current_comments = []
self._generator_directives = DirectiveSet()
self._multi_line_generator_directive = None
self._current_enum_entry = ''
def _ApplyGeneratorDirectives(self):
self._generator_directives.UpdateDefinition(self._current_definition)
self._generator_directives = DirectiveSet()
def ParseDefinitions(self):
for line in self._lines:
self._ParseLine(line)
return self._enum_definitions
def _ParseLine(self, line):
if self._multi_line_generator_directive:
self._ParseMultiLineDirectiveLine(line)
elif not self._in_enum:
self._ParseRegularLine(line)
else:
self._ParseEnumLine(line)
def _ParseEnumLine(self, line):
if HeaderParser.multi_line_comment_start_re.match(line):
raise Exception('Multi-line comments in enums are not supported in ' +
self._path)
enum_comment = HeaderParser.single_line_comment_re.match(line)
if enum_comment:
comment = enum_comment.groups()[0]
if comment:
self._current_comments.append(comment)
elif HeaderParser.enum_end_re.match(line):
self._FinalizeCurrentEnumDefinition()
else:
self._AddToCurrentEnumEntry(line)
if ',' in line:
self._ParseCurrentEnumEntry()
def _ParseSingleLineEnum(self, line):
for entry in line.split(','):
self._AddToCurrentEnumEntry(entry)
self._ParseCurrentEnumEntry()
self._FinalizeCurrentEnumDefinition()
def _ParseCurrentEnumEntry(self):
if not self._current_enum_entry:
return
enum_entry = HeaderParser.enum_line_re.match(self._current_enum_entry)
if not enum_entry:
raise Exception('Unexpected error while attempting to parse %s as enum '
'entry.' % self._current_enum_entry)
enum_key = enum_entry.groups()[0]
enum_value = enum_entry.groups()[2]
self._current_definition.AppendEntry(enum_key, enum_value)
if self._current_comments:
self._current_definition.AppendEntryComment(
enum_key, ' '.join(self._current_comments))
self._current_comments = []
self._current_enum_entry = ''
def _AddToCurrentEnumEntry(self, line):
self._current_enum_entry += ' ' + line.strip()
def _FinalizeCurrentEnumDefinition(self):
if self._current_enum_entry:
self._ParseCurrentEnumEntry()
self._ApplyGeneratorDirectives()
self._current_definition.Finalize()
self._enum_definitions.append(self._current_definition)
self._current_definition = None
self._in_enum = False
def _ParseMultiLineDirectiveLine(self, line):
multi_line_directive_continuation = (
HeaderParser.multi_line_directive_continuation_re.match(line))
multi_line_directive_end = (
HeaderParser.multi_line_directive_end_re.match(line))
if multi_line_directive_continuation:
value_cont = multi_line_directive_continuation.groups()[0]
self._multi_line_generator_directive[1].append(value_cont)
elif multi_line_directive_end:
directive_name = self._multi_line_generator_directive[0]
directive_value = "".join(self._multi_line_generator_directive[1])
directive_value += multi_line_directive_end.groups()[0]
self._multi_line_generator_directive = None
self._generator_directives.Update(directive_name, directive_value)
else:
raise Exception('Malformed multi-line directive declaration in ' +
self._path)
def _ParseRegularLine(self, line):
enum_start = HeaderParser.enum_start_re.match(line)
generator_directive_error = HeaderParser.generator_error_re.match(line)
generator_directive = HeaderParser.generator_directive_re.match(line)
multi_line_generator_directive_start = (
HeaderParser.multi_line_generator_directive_start_re.match(line))
single_line_enum = HeaderParser.enum_single_line_re.match(line)
if generator_directive_error:
raise Exception('Malformed directive declaration in ' + self._path +
'. Use () for multi-line directives. E.g.\n' +
'// GENERATED_JAVA_ENUM_PACKAGE: (\n' +
'// foo.package)')
if generator_directive:
directive_name = generator_directive.groups()[0]
directive_value = generator_directive.groups()[1]
self._generator_directives.Update(directive_name, directive_value)
elif multi_line_generator_directive_start:
directive_name = multi_line_generator_directive_start.groups()[0]
directive_value = multi_line_generator_directive_start.groups()[1]
self._multi_line_generator_directive = (directive_name, [directive_value])
elif enum_start or single_line_enum:
if self._generator_directives.empty:
return
self._current_definition = EnumDefinition(
original_enum_name=enum_start.groups()[1],
fixed_type=enum_start.groups()[3])
self._in_enum = True
if single_line_enum:
self._ParseSingleLineEnum(single_line_enum.group('enum_entries'))
def DoGenerate(source_paths):
for source_path in source_paths:
enum_definitions = DoParseHeaderFile(source_path)
if not enum_definitions:
raise Exception('No enums found in %s\n'
'Did you forget prefixing enums with '
'"// GENERATED_JAVA_ENUM_PACKAGE: foo"?' %
source_path)
for enum_definition in enum_definitions:
output_path = java_cpp_utils.GetJavaFilePath(enum_definition.enum_package,
enum_definition.class_name)
output = GenerateOutput(source_path, enum_definition)
yield output_path, output
def DoParseHeaderFile(path):
with open(path) as f:
return HeaderParser(f.readlines(), path).ParseDefinitions()
def GenerateOutput(source_path, enum_definition):
template = Template("""
// Copyright ${YEAR} The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is autogenerated by
// ${SCRIPT_NAME}
// From
// ${SOURCE_PATH}
package ${PACKAGE};
import androidx.annotation.IntDef;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
@IntDef({
${INT_DEF}
})
@Retention(RetentionPolicy.SOURCE)
public @interface ${CLASS_NAME} {
${ENUM_ENTRIES}
}
""")
enum_template = Template(' int ${NAME} = ${VALUE};')
enum_entries_string = []
enum_names = []
for enum_name, enum_value in enum_definition.entries.items():
values = {
'NAME': enum_name,
'VALUE': enum_value,
}
enum_comments = enum_definition.comments.get(enum_name)
if enum_comments:
enum_comments_indent = ' * '
comments_line_wrapper = textwrap.TextWrapper(
initial_indent=enum_comments_indent,
subsequent_indent=enum_comments_indent,
width=100)
enum_entries_string.append(' /**')
enum_entries_string.append('\n'.join(
comments_line_wrapper.wrap(enum_comments)))
enum_entries_string.append(' */')
enum_entries_string.append(enum_template.substitute(values))
if enum_name != "NUM_ENTRIES":
enum_names.append(enum_definition.class_name + '.' + enum_name)
enum_entries_string = '\n'.join(enum_entries_string)
enum_names_indent = ' ' * 4
wrapper = textwrap.TextWrapper(initial_indent = enum_names_indent,
subsequent_indent = enum_names_indent,
width = 100)
enum_names_string = '\n'.join(wrapper.wrap(', '.join(enum_names)))
values = {
'CLASS_NAME': enum_definition.class_name,
'ENUM_ENTRIES': enum_entries_string,
'PACKAGE': enum_definition.enum_package,
'INT_DEF': enum_names_string,
'SCRIPT_NAME': java_cpp_utils.GetScriptName(),
'SOURCE_PATH': source_path,
'YEAR': str(date.today().year)
}
return template.substitute(values)
def DoMain(argv):
usage = 'usage: %prog [options] [output_dir] input_file(s)...'
parser = optparse.OptionParser(usage=usage)
parser.add_option('--srcjar',
help='When specified, a .srcjar at the given path is '
'created instead of individual .java files.')
options, args = parser.parse_args(argv)
if not args:
parser.error('Need to specify at least one input file')
input_paths = args
with action_helpers.atomic_output(options.srcjar) as f:
with zipfile.ZipFile(f, 'w', zipfile.ZIP_STORED) as srcjar:
for output_path, data in DoGenerate(input_paths):
zip_helpers.add_to_zip_hermetic(srcjar, output_path, data=data)
if __name__ == '__main__':
DoMain(sys.argv[1:])
|
07a31bc70ed43b6ae8ff30bf9fde6bc72beeed51
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/legacy_test/test_fuse_gemm_epilogue_pass.py
|
13480e3d75dedd9a8b51f1f6236e4a0281837c15
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 13,474
|
py
|
test_fuse_gemm_epilogue_pass.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 NVIDIA Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test cases for role makers."""
import unittest
import numpy as np
import paddle
from paddle.fluid import core
def compare(ref, res, atol, rtol):
ref = np.array(ref).flatten()
res = np.array(res).flatten()
tmp_ref = ref.astype(np.float64)
tol = atol + rtol * abs(tmp_ref)
diff = abs(res - ref)
indices = np.transpose(np.where(diff > tol))
if len(indices) == 0:
return True
return False
def verify_node_count(graph, node_name, target_count):
count = 0
for node in graph.nodes():
if node.name() == node_name:
count += 1
return count == target_count
class MultiFCLayer(paddle.nn.Layer):
def __init__(self, hidden, Activation):
super().__init__()
self.linear1 = paddle.nn.Linear(hidden, 4 * hidden)
self.linear2 = paddle.nn.Linear(4 * hidden, hidden)
self.linear3 = paddle.nn.Linear(hidden, hidden)
self.relu1 = Activation()
self.relu2 = Activation()
self.relu3 = Activation()
def forward(self, x, matmul_y, ele_y):
output = self.linear1(x)
output = self.relu1(output)
output = self.linear2(output)
output1 = paddle.matmul(output, matmul_y)
output = self.linear3(output)
output = self.relu2(output)
output = paddle.matmul(output, matmul_y)
output = paddle.add(output, ele_y)
output = self.relu3(output)
output = paddle.add(output, output1)
return output
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestFuseGemmEpilogueFWDBase(unittest.TestCase):
def setUp(self):
self.batch = 64
self.seqlen = 128
self.hidden = 768
paddle.enable_static()
self.main_prog = paddle.static.Program()
self.startup_prog = paddle.static.Program()
with paddle.static.program_guard(self.main_prog, self.startup_prog):
data = paddle.static.data(
name="_data",
shape=[-1, self.seqlen, self.hidden],
dtype='float32',
)
matmul_y = paddle.static.data(
name="_matmul_y",
shape=[1, self.hidden, self.hidden],
dtype='float32',
)
ele_y = paddle.static.data(
name="_ele_y",
shape=[
self.hidden,
],
dtype='float32',
)
multi_layer = MultiFCLayer(self.hidden, self._get_act_type()[0])
with paddle.static.amp.fp16_guard():
out = multi_layer(data, matmul_y, ele_y)
self.loss = paddle.mean(out)
self.data_arr = (
np.random.random((self.batch, self.seqlen, self.hidden)).astype(
"float32"
)
- 0.5
)
self.matmul_y_arr = (
np.random.random((1, self.hidden, self.hidden)).astype("float32")
- 0.5
)
self.ele_y_arr = (
np.random.random((self.hidden,)).astype("float32") - 0.5
)
self.place = paddle.CUDAPlace(0)
self.exe = paddle.static.Executor(self.place)
self.exe.run(self.startup_prog)
self._pre_test_hooks()
self.feed = {
"_data": self.data_arr,
"_matmul_y": self.matmul_y_arr,
"_ele_y": self.ele_y_arr,
}
self.reference = paddle.static.Executor(self.place).run(
self.main_prog, feed=self.feed, fetch_list=[self.loss.name]
)
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
def _test_output(self):
build_strategy = paddle.static.BuildStrategy()
build_strategy.fuse_gemm_epilogue = True
program = paddle.static.CompiledProgram(
self.main_prog, build_strategy=build_strategy
)
result = self.exe.run(
program, feed=self.feed, fetch_list=[self.loss.name]
)
self.assertTrue(
compare(self.reference, result, self.atol, self.rtol),
f"[{type(self).__name__}] outputs are miss-matched.",
)
self.assertTrue(
verify_node_count(program._graph, "fused_gemm_epilogue", 3),
"[{}] The number of fused_gemm_epilogue is miss-matched in the computing graph.".format(
type(self).__name__
),
)
act_fwd_name = self._get_act_type()[1]
self.assertTrue(
verify_node_count(program._graph, act_fwd_name, 1),
"[{}] The number of {} is miss-matched in the computing graph.".format(
type(self).__name__, act_fwd_name
),
)
def _pre_test_hooks(self):
self.atol = 1e-4
self.rtol = 1e-3
def _get_act_type(self):
return paddle.nn.ReLU, "relu"
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestFuseGemmEpilogueReluFWDFP32(TestFuseGemmEpilogueFWDBase):
def _pre_test_hooks(self):
self.atol = 1e-3
self.rtol = 1e-2
def _get_act_type(self):
return paddle.nn.ReLU, "relu"
def test_output(self):
self._test_output()
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestFuseGemmEpilogueReluFWDFP16(TestFuseGemmEpilogueReluFWDFP32):
def _pre_test_hooks(self):
self.atol = 1e-3
self.rtol = 1e-2
fp16_var_list = paddle.static.amp.cast_model_to_fp16(self.main_prog)
paddle.static.amp.cast_parameters_to_fp16(
self.place, self.main_prog, to_fp16_var_names=fp16_var_list
)
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestFuseGemmEpilogueGeluFWDFP32(TestFuseGemmEpilogueFWDBase):
def _pre_test_hooks(self):
self.atol = 1e-4
self.rtol = 1e-3
def _get_act_type(self):
return paddle.nn.GELU, "gelu"
def test_output(self):
self._test_output()
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestFuseGemmEpilogueGeluFWDFP16(TestFuseGemmEpilogueGeluFWDFP32):
def _pre_test_hooks(self):
self.atol = 1e-3
self.rtol = 1e-2
fp16_var_list = paddle.static.amp.cast_model_to_fp16(self.main_prog)
paddle.static.amp.cast_parameters_to_fp16(
self.place, self.main_prog, to_fp16_var_names=fp16_var_list
)
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestFuseGemmEpilogueBWDBase(unittest.TestCase):
def setUp(self):
self.batch = 64
self.seqlen = 128
self.hidden = 768
paddle.enable_static()
self.main_prog = paddle.static.Program()
self.startup_prog = paddle.static.Program()
with paddle.static.program_guard(self.main_prog, self.startup_prog):
data = paddle.static.data(
name="_data",
shape=[-1, self.seqlen, self.hidden],
dtype='float32',
)
matmul_y = paddle.static.data(
name="_matmul_y",
shape=[1, self.hidden, self.hidden],
dtype='float32',
)
ele_y = paddle.static.data(
name="_ele_y",
shape=[
self.hidden,
],
dtype='float32',
)
multi_layer = MultiFCLayer(self.hidden, self._get_act_type()[0])
with paddle.static.amp.fp16_guard():
out = multi_layer(data, matmul_y, ele_y)
self.loss = paddle.mean(out)
paddle.static.append_backward(loss=self.loss)
self.data_arr = (
np.random.random((self.batch, self.seqlen, self.hidden)).astype(
"float32"
)
- 0.5
)
self.matmul_y_arr = (
np.random.random((1, self.hidden, self.hidden)).astype("float32")
- 0.5
)
self.ele_y_arr = (
np.random.random((self.hidden,)).astype("float32") - 0.5
)
self.place = paddle.CUDAPlace(0)
self.exe = paddle.static.Executor(self.place)
self.exe.run(self.startup_prog)
self._pre_test_hooks()
self.feed = {
"_data": self.data_arr,
"_matmul_y": self.matmul_y_arr,
"_ele_y": self.ele_y_arr,
}
self.fetch = [
self.loss.name,
f'{multi_layer.linear1.full_name()}.w_0@GRAD',
f'{multi_layer.linear1.full_name()}.b_0@GRAD',
f'{multi_layer.linear2.full_name()}.w_0@GRAD',
f'{multi_layer.linear2.full_name()}.b_0@GRAD',
f'{multi_layer.linear3.full_name()}.w_0@GRAD',
f'{multi_layer.linear3.full_name()}.b_0@GRAD',
]
self.outs_ref = paddle.static.Executor(self.place).run(
self.main_prog, feed=self.feed, fetch_list=self.fetch
)
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
def _test_output(self):
build_strategy = paddle.static.BuildStrategy()
build_strategy.fuse_gemm_epilogue = True
program = paddle.static.CompiledProgram(
self.main_prog, build_strategy=build_strategy
)
outs_res = self.exe.run(program, feed=self.feed, fetch_list=self.fetch)
for ref, res in zip(self.outs_ref, outs_res):
self.assertTrue(
compare(ref, res, self.atol, self.rtol),
f"[{type(self).__name__}] output is miss-matched.",
)
self.assertTrue(
verify_node_count(program._graph, "fused_gemm_epilogue", 3),
"[{}] The number of fused_gemm_epilogue is miss-matched in the computing graph.".format(
type(self).__name__
),
)
self.assertTrue(
verify_node_count(program._graph, "fused_gemm_epilogue_grad", 3),
"[{}] The number of fused_gemm_epilogue_grad is miss-matched in the computing graph.".format(
type(self).__name__
),
)
_, act_fwd_name, act_bwd_name = self._get_act_type()
self.assertTrue(
verify_node_count(program._graph, act_fwd_name, 1),
"[{}] The number of {} is miss-matched in the computing graph.".format(
type(self).__name__, act_fwd_name
),
)
self.assertTrue(
verify_node_count(program._graph, act_bwd_name, 2),
"[{}] The number of {} is miss-matched in the computing graph.".format(
type(self).__name__, act_bwd_name
),
)
def _pre_test_hooks(self):
self.atol = 1e-4
self.rtol = 1e-3
def _get_act_type(self):
return paddle.nn.ReLU, "relu", "relu_grad"
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestFuseGemmEpilogueReLUBWDFP32(TestFuseGemmEpilogueBWDBase):
def _pre_test_hooks(self):
self.atol = 1e-4
self.rtol = 1e-3
def _get_act_type(self):
return paddle.nn.ReLU, "relu", "relu_grad"
def test_output(self):
self._test_output()
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestFuseGemmEpilogueReLUBWDFP16(TestFuseGemmEpilogueReLUBWDFP32):
def _pre_test_hooks(self):
self.atol = 1e-3
self.rtol = 1e-2
fp16_var_list = paddle.static.amp.cast_model_to_fp16(self.main_prog)
paddle.static.amp.cast_parameters_to_fp16(
self.place, self.main_prog, to_fp16_var_names=fp16_var_list
)
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestFuseGemmEpilogueGeLUBWDFP32(TestFuseGemmEpilogueBWDBase):
def _pre_test_hooks(self):
self.atol = 5e-4
self.rtol = 1e-3
def _get_act_type(self):
return paddle.nn.GELU, "gelu", "gelu_grad"
def test_output(self):
self._test_output()
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestFuseGemmEpilogueGeLUBWDFP16(TestFuseGemmEpilogueGeLUBWDFP32):
def _pre_test_hooks(self):
self.atol = 1e-3
self.rtol = 1e-2
fp16_var_list = paddle.static.amp.cast_model_to_fp16(self.main_prog)
paddle.static.amp.cast_parameters_to_fp16(
self.place, self.main_prog, to_fp16_var_names=fp16_var_list
)
if __name__ == "__main__":
np.random.seed(0)
unittest.main()
|
7ddef4a44b8d782d4a805d830fe269d982857ec3
|
21b1ebd3c489b99d834f08c63387b045116a01ed
|
/tests/unit/operations/test_upgradeops.py
|
4ef948b0ad0f0cb7ff50e44798a61e0616499d44
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
aws/aws-elastic-beanstalk-cli
|
8d1f34fbec3e89164d8b607666c0800c28e334f7
|
252101641a7b6acb5de17fafd6adf8b96418426f
|
refs/heads/master
| 2023-09-03T15:04:58.036979
| 2023-08-31T17:26:43
| 2023-08-31T17:26:43
| 175,470,423
| 149
| 84
|
Apache-2.0
| 2023-09-12T14:53:42
| 2019-03-13T17:42:43
|
Python
|
UTF-8
|
Python
| false
| false
| 24,058
|
py
|
test_upgradeops.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
import mock
import unittest
from ebcli.operations import upgradeops
from ebcli.objects.platform import PlatformVersion
from ebcli.objects.environment import Environment
class TestUpgradeOps(unittest.TestCase):
def setUp(self):
self.root_dir = os.getcwd()
if not os.path.exists('testDir'):
os.mkdir('testDir')
os.chdir('testDir')
def tearDown(self):
os.chdir(self.root_dir)
shutil.rmtree('testDir')
def test__get_warning_message__confirm(self):
self.assertIsNone(
upgradeops._get_warning_message(
confirm=True,
single=False,
rolling_enabled=False,
webserver=False,
noroll=False
)
)
def test__get_warning_message__single_instance(self):
self.assertEqual(
'This operation causes application downtime while Elastic Beanstalk '
'replaces the instance.',
upgradeops._get_warning_message(
confirm=False,
single=True,
rolling_enabled=False,
webserver=False,
noroll=False
)
)
def test__get_warning_message__not_rolling_enabled__noroll_set(self):
self.assertEqual(
'This operation causes application downtime while Elastic Beanstalk '
'replaces your instances.',
upgradeops._get_warning_message(
confirm=False,
single=False,
rolling_enabled=False,
webserver=False,
noroll=True
)
)
def test__get_warning_message__rolling_enabled(self):
self.assertEqual(
'This operation replaces your instances with minimal or zero downtime. '
'You may cancel the upgrade after it has started by typing "eb abort".',
upgradeops._get_warning_message(
confirm=False,
single=False,
rolling_enabled=True,
webserver=False,
noroll=False
)
)
def test__get_warning_message__not_rolling_enabled__noroll_not_specified__webserver_environment(self):
self.assertEqual(
'Elastic Beanstalk will enable Health-based rolling updates to avoid '
'application downtime while it replaces your instances. You may cancel '
'the upgrade after it has started by typing "eb abort". To upgrade '
'without rolling updates, type "eb upgrade --noroll".',
upgradeops._get_warning_message(
confirm=False,
single=False,
rolling_enabled=False,
webserver=True,
noroll=False
)
)
def test__get_warning_message__not_rolling_enabled__noroll_not_specified__non_webserver_environment(self):
self.assertEqual(
'Elastic Beanstalk will enable Time-based rolling updates to avoid '
'application downtime while it replaces your instances. You may cancel '
'the upgrade after it has started by typing "eb abort". To upgrade '
'without rolling updates, type "eb upgrade --noroll".',
upgradeops._get_warning_message(
confirm=False,
single=False,
rolling_enabled=False,
webserver=False,
noroll=False
)
)
def test__should_add_rolling__noroll_set(self):
self.assertFalse(upgradeops._should_add_rolling(single=False, rolling_enabled=False, noroll=True))
def test__should_add_rolling__single_environment(self):
self.assertFalse(upgradeops._should_add_rolling(single=True, rolling_enabled=False, noroll=False))
def test__should_add_rolling__rolling_enabled(self):
self.assertFalse(upgradeops._should_add_rolling(single=False, rolling_enabled=True, noroll=False))
def test__should_add_rolling(self):
self.assertTrue(upgradeops._should_add_rolling(single=False, rolling_enabled=False, noroll=False))
@mock.patch('ebcli.operations.upgradeops.elasticbeanstalk.get_environment_settings')
@mock.patch('ebcli.operations.upgradeops.solution_stack_ops.find_solution_stack_from_string')
@mock.patch('ebcli.operations.upgradeops.io.echo')
@mock.patch('ebcli.operations.upgradeops.io.validate_action')
@mock.patch('ebcli.operations.upgradeops.io.log_warning')
@mock.patch('ebcli.operations.upgradeops._get_warning_message')
@mock.patch('ebcli.operations.upgradeops._should_add_rolling')
@mock.patch('ebcli.operations.upgradeops.do_upgrade')
def test_upgrade_env__load_balanced_webserver_environment__rolling_update_not_enabled(
self,
do_upgrade_mock,
_should_add_rolling_mock,
_get_warning_message_mock,
log_warning_mock,
validate_action_mock,
echo_mock,
find_solution_stack_from_string_mock,
get_environment_settings_mock
):
find_solution_stack_from_string_mock.return_value = PlatformVersion(
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js '
'running on 64bit Amazon Linux/4.5.2'
)
describe_configuration_settings_response = {
'ConfigurationSettings': [
{
'PlatformArn': 'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.1',
'SolutionStackName': '64bit Amazon Linux 2017.09 v4.5.1 running Node.js',
'EnvironmentName': 'my-environment',
'Tier': {
'Type': 'Standard',
'Name': 'WebServer',
'Version': '1.0'
},
'OptionSettings': [
{
'ResourceName': 'AWSEBAutoScalingGroup',
'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',
'OptionName': 'RollingUpdateEnabled',
'Value': 'false'
}
]
}
]
}
get_environment_settings_mock.return_value = Environment.json_to_environment_object(
describe_configuration_settings_response['ConfigurationSettings'][0]
)
_get_warning_message_mock.return_value = 'some warning message'
_should_add_rolling_mock.return_value = True
upgradeops.upgrade_env('my-application', 'my-environment', 10, False, False)
get_environment_settings_mock.assert_called_once_with('my-application', 'my-environment')
echo_mock.assert_has_calls(
[
mock.call(),
mock.call('The environment "my-environment" will be updated to use the most recent platform version.'),
mock.call(
'Current platform:',
PlatformVersion(
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.1'
)
),
mock.call(
'Latest platform: ',
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2'
),
mock.call(),
mock.call('You can also change your platform version by typing "eb clone" and then "eb swap".'),
mock.call()
]
)
_get_warning_message_mock.assert_called_once_with(False, False, False, True, False)
log_warning_mock.assert_called_once_with('some warning message')
validate_action_mock.assert_called_once_with('To continue, type the environment name', 'my-environment')
_should_add_rolling_mock.assert_called_once_with(False, False, False)
do_upgrade_mock.assert_called_once_with(
'my-environment',
True,
10,
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2',
health_based=True,
platform_arn='arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2'
)
@mock.patch('ebcli.operations.upgradeops.elasticbeanstalk.get_environment_settings')
@mock.patch('ebcli.operations.upgradeops.solution_stack_ops.find_solution_stack_from_string')
@mock.patch('ebcli.operations.upgradeops.io.echo')
@mock.patch('ebcli.operations.upgradeops.io.validate_action')
@mock.patch('ebcli.operations.upgradeops.io.log_warning')
@mock.patch('ebcli.operations.upgradeops._get_warning_message')
@mock.patch('ebcli.operations.upgradeops._should_add_rolling')
@mock.patch('ebcli.operations.upgradeops.do_upgrade')
def test_upgrade_env__single_instance_webserver_environment__rolling_update_not_enabled(
self,
do_upgrade_mock,
_should_add_rolling_mock,
_get_warning_message_mock,
log_warning_mock,
validate_action_mock,
echo_mock,
find_solution_stack_from_string_mock,
get_environment_settings_mock
):
find_solution_stack_from_string_mock.return_value = PlatformVersion(
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js '
'running on 64bit Amazon Linux/4.5.2'
)
describe_configuration_settings_response = {
'ConfigurationSettings': [
{
'PlatformArn': 'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.1',
'SolutionStackName': '64bit Amazon Linux 2017.09 v4.5.1 running Node.js',
'EnvironmentName': 'my-environment',
'Tier': {
'Type': 'Standard',
'Name': 'WebServer',
'Version': '1.0'
},
'OptionSettings': [
{
'ResourceName': 'AWSEBAutoScalingGroup',
'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',
'OptionName': 'RollingUpdateEnabled',
'Value': 'false'
},
{
'Namespace': 'aws:elasticbeanstalk:environment',
'OptionName': 'EnvironmentType',
'Value': 'SingleInstance'
}
]
}
]
}
get_environment_settings_mock.return_value = Environment.json_to_environment_object(
describe_configuration_settings_response['ConfigurationSettings'][0]
)
_get_warning_message_mock.return_value = 'some warning message'
_should_add_rolling_mock.return_value = False
upgradeops.upgrade_env('my-application', 'my-environment', 10, False, False)
get_environment_settings_mock.assert_called_once_with('my-application', 'my-environment')
echo_mock.assert_has_calls(
[
mock.call(),
mock.call('The environment "my-environment" will be updated to use the most recent platform version.'),
mock.call(
'Current platform:',
PlatformVersion(
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.1'
)
),
mock.call(
'Latest platform: ',
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2'
),
mock.call(),
mock.call('You can also change your platform version by typing "eb clone" and then "eb swap".'),
mock.call()
]
)
_get_warning_message_mock.assert_called_once_with(False, True, False, True, False)
log_warning_mock.assert_called_once_with('some warning message')
validate_action_mock.assert_called_once_with('To continue, type the environment name', 'my-environment')
_should_add_rolling_mock.assert_called_once_with(True, False, False)
do_upgrade_mock.assert_called_once_with(
'my-environment',
False,
10,
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2',
health_based=True,
platform_arn='arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2'
)
@mock.patch('ebcli.operations.upgradeops.elasticbeanstalk.get_environment_settings')
@mock.patch('ebcli.operations.upgradeops.solution_stack_ops.find_solution_stack_from_string')
@mock.patch('ebcli.operations.upgradeops.io.echo')
@mock.patch('ebcli.operations.upgradeops.io.validate_action')
@mock.patch('ebcli.operations.upgradeops.io.log_warning')
@mock.patch('ebcli.operations.upgradeops._should_add_rolling')
@mock.patch('ebcli.operations.upgradeops.do_upgrade')
def test_upgrade_env__force_confirm(
self,
do_upgrade_mock,
_should_add_rolling_mock,
log_warning_mock,
validate_action_mock,
echo_mock,
find_solution_stack_from_string_mock,
get_environment_settings_mock
):
find_solution_stack_from_string_mock.return_value = PlatformVersion(
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js '
'running on 64bit Amazon Linux/4.5.2'
)
describe_configuration_settings_response = {
'ConfigurationSettings': [
{
'PlatformArn': 'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.1',
'SolutionStackName': '64bit Amazon Linux 2017.09 v4.5.1 running Node.js',
'EnvironmentName': 'my-environment',
'Tier': {
'Type': 'Standard',
'Name': 'WebServer',
'Version': '1.0'
},
'OptionSettings': [
{
'ResourceName': 'AWSEBAutoScalingGroup',
'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',
'OptionName': 'RollingUpdateEnabled',
'Value': 'false'
}
]
}
]
}
get_environment_settings_mock.return_value = Environment.json_to_environment_object(
describe_configuration_settings_response['ConfigurationSettings'][0]
)
_should_add_rolling_mock.return_value = True
upgradeops.upgrade_env('my-application', 'my-environment', 10, True, False)
get_environment_settings_mock.assert_called_once_with('my-application', 'my-environment')
echo_mock.assert_has_calls(
[
mock.call(),
mock.call('The environment "my-environment" will be updated to use the most recent platform version.'),
mock.call(
'Current platform:',
PlatformVersion(
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.1'
)
),
mock.call(
'Latest platform: ',
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2'
),
mock.call()
]
)
validate_action_mock.assert_not_called()
_should_add_rolling_mock.assert_called_once_with(False, False, False)
do_upgrade_mock.assert_called_once_with(
'my-environment',
True,
10,
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2',
health_based=True,
platform_arn='arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2'
)
@mock.patch('ebcli.operations.upgradeops.elasticbeanstalk.get_environment_settings')
@mock.patch('ebcli.operations.upgradeops.solution_stack_ops.find_solution_stack_from_string')
@mock.patch('ebcli.operations.upgradeops.io.echo')
@mock.patch('ebcli.operations.upgradeops.do_upgrade')
def test_upgrade_env__using_latest_paltform(
self,
do_upgrade_mock,
echo_mock,
find_solution_stack_from_string_mock,
get_environment_settings_mock
):
find_solution_stack_from_string_mock.return_value = PlatformVersion(
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js '
'running on 64bit Amazon Linux/4.5.2'
)
describe_configuration_settings_response = {
'ConfigurationSettings': [
{
'PlatformArn': 'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2',
'SolutionStackName': '64bit Amazon Linux 2017.09 v4.5.2 running Node.js',
'EnvironmentName': 'my-environment',
'Tier': {
'Type': 'Standard',
'Name': 'WebServer',
'Version': '1.0'
}
}
]
}
get_environment_settings_mock.return_value = Environment.json_to_environment_object(
describe_configuration_settings_response['ConfigurationSettings'][0]
)
upgradeops.upgrade_env('my-application', 'my-environment', 10, True, False)
get_environment_settings_mock.assert_called_once_with('my-application', 'my-environment')
echo_mock.assert_called_once_with('Environment already on most recent platform version.')
do_upgrade_mock.assert_not_called()
@mock.patch('ebcli.operations.upgradeops.commonops.update_environment')
@mock.patch('ebcli.operations.upgradeops.io.log_warning')
def test_do_upgrade__add_rolling(
self,
log_warning_mock,
update_environment_mock
):
upgradeops.do_upgrade(
'my-environment',
True,
10,
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2',
health_based=True,
platform_arn='arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2'
)
update_environment_mock.assert_called_once_with(
'my-environment',
[
{
'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',
'OptionName': 'RollingUpdateEnabled',
'Value': 'true'
},
{
'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',
'OptionName': 'RollingUpdateType',
'Value': 'Health'
}
],
None,
platform_arn='arn:aws:elasticbeanstalk:us-west-2::platform/Node.js '
'running on 64bit Amazon Linux/4.5.2',
timeout=10
)
log_warning_mock.assert_called_once_with(
'Enabling Health-based rolling updates to environment.'
)
@mock.patch('ebcli.operations.upgradeops.commonops.update_environment')
@mock.patch('ebcli.operations.upgradeops.io.log_warning')
def test_do_upgrade__update_environment_with_solution_stack(
self,
log_warning_mock,
update_environment_mock
):
upgradeops.do_upgrade(
'my-environment',
True,
10,
'64bit Amazon Linux 2017.09 v4.5.2 running Node.js',
health_based=True,
platform_arn=''
)
update_environment_mock.assert_called_once_with(
'my-environment',
[
{
'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',
'OptionName': 'RollingUpdateEnabled',
'Value': 'true'
},
{
'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',
'OptionName': 'RollingUpdateType',
'Value': 'Health'
}
],
None,
solution_stack_name='64bit Amazon Linux 2017.09 v4.5.2 running Node.js',
timeout=10
)
log_warning_mock.assert_called_once_with(
'Enabling Health-based rolling updates to environment.'
)
@mock.patch('ebcli.operations.upgradeops.commonops.update_environment')
@mock.patch('ebcli.operations.upgradeops.io.log_warning')
def test_do_upgrade__time_based_update(
self,
log_warning_mock,
update_environment_mock
):
upgradeops.do_upgrade(
'my-environment',
True,
10,
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2',
health_based=False,
platform_arn='arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2'
)
update_environment_mock.assert_called_once_with(
'my-environment',
[
{
'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',
'OptionName': 'RollingUpdateEnabled',
'Value': 'true'
},
{
'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',
'OptionName': 'RollingUpdateType',
'Value': 'Time'
}
],
None,
platform_arn='arn:aws:elasticbeanstalk:us-west-2::platform/Node.js '
'running on 64bit Amazon Linux/4.5.2',
timeout=10
)
log_warning_mock.assert_called_once_with(
'Enabling Time-based rolling updates to environment.'
)
@mock.patch('ebcli.operations.upgradeops.commonops.update_environment')
def test_do_upgrade__no_rolling_updates(
self,
update_environment_mock
):
upgradeops.do_upgrade(
'my-environment',
False,
10,
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2',
health_based=True,
platform_arn='arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2'
)
update_environment_mock.assert_called_once_with(
'my-environment',
None,
None,
platform_arn='arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 64bit Amazon Linux/4.5.2',
timeout=10
)
|
b87a130dc28ec2c2eb01868c77689bce6eba9071
|
bdaa910baf85fba41e44849d4037d9940e03e4f6
|
/popmon/pipeline/report.py
|
fe29b925610b1c2a4773b55af7a28b86b35c2947
|
[
"LicenseRef-scancode-free-unknown",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
ing-bank/popmon
|
428693596663405e2e3a290d4ae6afa96762b088
|
ac79d212a519368d01525950142e0a282f5287c3
|
refs/heads/master
| 2023-08-08T03:06:42.714926
| 2023-07-18T10:21:06
| 2023-07-18T10:24:07
| 258,180,016
| 463
| 38
|
MIT
| 2023-07-18T10:05:37
| 2020-04-23T11:21:14
|
Python
|
UTF-8
|
Python
| false
| false
| 11,282
|
py
|
report.py
|
# Copyright (c) 2023 ING Analytics Wholesale Banking
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import annotations
import logging
from histogrammar.dfinterface.make_histograms import get_bin_specs, make_histograms
from popmon.config import Settings
from popmon.pipeline.dataset_splitter import split_dataset
from popmon.pipeline.report_pipelines import ReportPipe, get_report_pipeline_class
from popmon.resources import templates_env
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(levelname)s [%(module)s]: %(message)s"
)
logger = logging.getLogger()
def stability_report(
hists,
settings: Settings | None = None,
reference=None,
**kwargs,
):
"""Create a data stability monitoring html report for given dict of input histograms.
:param dict hists: input histograms to be profiled and monitored over time.
:param popmon.config.Settings settings: popmon configuration object
:param reference: histograms used as reference. default is None
:param kwargs: when settings=None, parameters such as `features` and `time_axis` can be passed
:return: dict with results of reporting pipeline
"""
if settings is None:
settings = Settings(**kwargs)
# perform basic input checks
if not isinstance(hists, dict):
raise TypeError("hists should be a dict of histogrammar histograms.")
if isinstance(settings.time_axis, str) and len(settings.time_axis) == 0:
settings._set_time_axis_hists(hists)
# configuration and datastore for report pipeline
cfg = {
"hists_key": "hists",
"settings": settings,
}
datastore = {"hists": hists}
if settings.reference_type in ["external", "self_split"]:
cfg["ref_hists_key"] = "ref_hists"
datastore["ref_hists"] = reference
# execute reporting pipeline
pipeline = get_report_pipeline_class(settings.reference_type, reference)(**cfg)
result = pipeline.transform(datastore)
stability_report_result = StabilityReport(datastore=result)
return stability_report_result
def df_stability_report(
df,
settings: Settings | None | None = None,
time_width=None,
time_offset: int = 0,
var_dtype=None,
reference=None,
split=None,
**kwargs,
):
"""Create a data stability monitoring html report for given pandas or spark dataframe.
:param df: input pandas/spark dataframe to be profiled and monitored over time.
:param popmon.config.Settings settings: popmon configuration object
:param time_width: bin width of time axis. str or number (ns). note: bin_specs takes precedence. (optional)
.. code-block:: text
Examples: '1w', 3600e9 (number of ns),
anything understood by pd.Timedelta(time_width).value
:param time_offset: bin offset of time axis. str or number (ns). note: bin_specs takes precedence. (optional)
.. code-block:: text
Examples: '1-1-2020', 0 (number of ns since 1-1-1970),
anything parsed by pd.Timestamp(time_offset).value
:param dict var_dtype: dictionary with specified datatype per feature. auto-guessed when not provided.
:param reference: reference dataframe or histograms. default is None
:return: dict with results of reporting pipeline
"""
if settings is None:
settings = Settings(**kwargs)
if len(settings.time_axis) == 0:
settings._set_time_axis_dataframe(df)
logger.info(f'Time-axis automatically set to "{settings.time_axis}"')
if settings.time_axis not in df.columns:
raise ValueError(
f'time_axis "{settings.time_axis}" not found in columns of dataframe.'
)
if (
reference is not None
and not isinstance(reference, dict)
and settings.time_axis not in reference.columns
):
raise ValueError(
f'time_axis "{settings.time_axis}" not found in columns of reference dataframe.'
)
if settings.features is not None:
# by now time_axis is defined. ensure that all histograms start with it.
settings._ensure_features_time_axis()
# interpret time_width and time_offset
if time_width is not None:
if not isinstance(time_width, (str, int, float)):
raise TypeError
if not isinstance(time_offset, (str, int, float)):
raise TypeError
settings._set_bin_specs_by_time_width_and_offset(time_width, time_offset)
reference_hists = None
if settings.reference_type == "self" and split is not None and reference is None:
settings.reference_type = "self_split"
reference, df = split_dataset(df, split, settings.time_axis)
if reference is not None:
if settings.reference_type != "self_split":
settings.reference_type = "external"
if isinstance(reference, dict):
# 1. reference is dict of histograms
# extract features and bin_specs from reference histograms
reference_hists = reference
if settings.features is not None or settings.bin_specs != {}:
raise ValueError(
"When providing a reference, the `features` and `bin_specs` settings should be default (as they are overriden)"
)
settings.features = list(reference_hists.keys())
settings.bin_specs = get_bin_specs(reference_hists)
else:
# 2. reference is pandas or spark dataframe
# generate histograms and return updated features, bin_specs, time_axis, etc.
(
reference_hists,
settings.features,
settings.bin_specs,
settings.time_axis,
var_dtype,
) = make_histograms(
reference,
settings.features,
settings.binning,
settings.bin_specs,
settings.time_axis,
var_dtype,
ret_specs=True,
)
# use the same features, bin_specs, time_axis, etc as for reference hists
hists = make_histograms(
df,
features=settings.features,
binning=settings.binning,
bin_specs=settings.bin_specs,
time_axis=settings.time_axis,
var_dtype=var_dtype,
)
# generate data stability report
return stability_report(
hists=hists,
settings=settings,
reference=reference_hists,
)
class StabilityReport:
"""Representation layer of the report.
Stability report module wraps the representation functionality of the report
after running the pipeline and generating the report. Report can be represented
as a HTML string, HTML file or Jupyter notebook's cell output.
"""
def __init__(self, datastore, read_key: str = "html_report") -> None:
"""Initialize an instance of StabilityReport.
:param str read_key: key of HTML report data to read from data store. default is html_report.
"""
self.read_key = read_key
self.datastore = datastore
self.logger = logging.getLogger()
@property
def html_report(self):
return self.datastore[self.read_key]
def _repr_html_(self):
"""HTML representation of the class (report) embedded in an iframe.
:return HTML: HTML report in an iframe
"""
from IPython.core.display import display
return display(self.to_notebook_iframe())
def __repr__(self) -> str:
"""Override so that Jupyter Notebook does not print the object."""
return ""
def to_html(self, escape: bool = False):
"""HTML code representation of the report (represented as a string).
:param bool escape: escape characters which could conflict with other HTML code. default: False
:return str: HTML code of the report
"""
if escape:
import html
return html.escape(self.html_report)
return self.html_report
def to_file(self, filename) -> None:
"""Store HTML report in the local file system.
:param str filename: filename for the HTML report
"""
with open(filename, "w+") as file:
file.write(self.to_html())
def to_notebook_iframe(self, width: str = "100%", height: str = "100%"):
"""HTML representation of the class (report) embedded in an iframe.
:param str width: width of the frame to be shown
:param str height: height of the frame to be shown
:return HTML: HTML report in an iframe
"""
from IPython.core.display import HTML
# get iframe's snippet code, insert report's HTML code and display it as HTML
return HTML(
templates_env(
filename="notebook_iframe.html",
src=self.to_html(escape=True),
width=width,
height=height,
)
)
def regenerate(
self,
store_key: str = "html_report",
sections_key: str = "report_sections",
settings: Settings | None = None,
):
"""Regenerate HTML report with different plot settings
:param str sections_key: key to store sections data in the datastore. default is 'report_sections'.
:param str store_key: key to store the HTML report data in the datastore. default is 'html_report'
:param Settings settings: configuration to regenerate the report
:return HTML: HTML report in an iframe
"""
# basic checks
if not self.datastore:
self.logger.warning("Empty datastore, could not regenerate report.")
return None
# start from clean slate
if sections_key in self.datastore:
del self.datastore[sections_key]
if store_key in self.datastore:
del self.datastore[store_key]
if settings is None:
settings = Settings()
pipeline = ReportPipe(
sections_key=sections_key,
settings=settings,
)
result = pipeline.transform(self.datastore)
stability_report = StabilityReport(datastore=result)
return stability_report
|
8d45fd4b38f98de604ad8c5463ed0bf33e5e5999
|
ed62b03278a3dec0237e9a405e624baf11724469
|
/fastmri_recon/tests/models/subclassed_models/unet_complex_test.py
|
2733e41a32b1c845bc27d537dff483b9c7630732
|
[
"MIT"
] |
permissive
|
zaccharieramzi/fastmri-reproducible-benchmark
|
f93b460bade2b6301caa9526e5c6385369971366
|
4a4ec09524437d11153fc5a525621783689bed38
|
refs/heads/master
| 2023-05-01T01:27:19.117953
| 2022-05-04T11:31:11
| 2022-05-04T11:31:11
| 193,113,083
| 147
| 50
|
MIT
| 2023-04-12T12:17:00
| 2019-06-21T14:36:25
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 499
|
py
|
unet_complex_test.py
|
import pytest
import tensorflow as tf
from fastmri_recon.models.subclassed_models.unet import UnetComplex
@pytest.mark.parametrize('model_kwargs', [
{},
{'n_input_channels': 6},
{'res': True},
{'non_linearity': 'prelu'},
{'channel_attention_kwargs': {'dense': True}},
])
def test_cnn_complex_init_call(model_kwargs):
model = UnetComplex(**model_kwargs)
model(tf.zeros(
[1, 640, 320, model_kwargs.get('n_input_channels', 1)],
dtype=tf.complex64,
))
|
a380c6b8d611bbfbaabda932379b60154ba0759c
|
04667453a54bbefbbfc5b8cb5c2fb5392f9ca8c5
|
/json_schema_for_humans/schema/schema_keyword.py
|
1bd977d496d30e343dc200ce2190fd93bb7ee10d
|
[
"Apache-2.0"
] |
permissive
|
coveooss/json-schema-for-humans
|
74c375d385d124c6736ea7fe510a9b7a5015c13c
|
6a467492d697c4a44a409409ab0391a4a4acd291
|
refs/heads/main
| 2023-08-09T22:06:21.609887
| 2023-07-17T13:52:54
| 2023-07-17T13:52:54
| 202,809,346
| 371
| 81
|
NOASSERTION
| 2023-09-07T14:51:52
| 2019-08-16T22:58:32
|
Python
|
UTF-8
|
Python
| false
| false
| 745
|
py
|
schema_keyword.py
|
from enum import Enum
class SchemaKeyword(Enum):
REQUIRED = "required"
TITLE = "title"
CONTAINS = "contains"
EXAMPLES = "examples"
ITEMS = "items"
PREFIX_ITEMS = "prefixItems"
UNIQUE_ITEMS = "uniqueItems"
ADDITIONAL_ITEMS = "additionalItems"
MAX_ITEMS = "maxItems"
MIN_ITEMS = "minItems"
MAX_LENGTH = "maxLength"
MIN_LENGTH = "minLength"
PATTERN = "pattern"
CONST = "const"
ENUM = "enum"
ELSE = "else"
THEN = "then"
IF = "if"
NOT = "not"
ONE_OF = "oneOf"
ANY_OF = "anyOf"
ALL_OF = "allOf"
PROPERTIES = "properties"
PATTERN_PROPERTIES = "patternProperties"
ADDITIONAL_PROPERTIES = "additionalProperties"
FORMAT = "format"
TYPE = "type"
|
4027755ae2159a1895022e2223e273b3d89cf03f
|
23eafaa60b66769190227c80ac1b087697c72ed8
|
/server/test_devpi_server/conftest.py
|
55ff108423fd5fa814a0c6aa43b0dd80e4726fd5
|
[
"MIT"
] |
permissive
|
devpi/devpi
|
fe4d76f2ab57da3a766a9e13c51cd4f229598083
|
56c266744ddfd182d46ca480b787ab44a6ee4692
|
refs/heads/main
| 2023-09-04T02:57:22.075986
| 2023-08-06T10:32:46
| 2023-08-06T10:32:46
| 86,787,680
| 760
| 148
| null | 2023-09-09T19:53:10
| 2017-03-31T06:51:39
|
Python
|
UTF-8
|
Python
| false
| false
| 46,442
|
py
|
conftest.py
|
import re
from webtest.forms import Upload
import json
import webtest
import mimetypes
import subprocess
import pytest
import py
import requests
import socket
import sys
import time
from .functional import MappMixin
from .reqmock import reqmock, patch_reqsessionmock # noqa
from bs4 import BeautifulSoup
from contextlib import closing
from devpi_server import mirror
from devpi_server.config import get_pluginmanager
from devpi_server.main import XOM, parseoptions
from devpi_common.validation import normalize_name
from devpi_common.url import URL
from devpi_server.log import threadlog, thread_clear_log
from io import BytesIO
from pyramid.authentication import b64encode
from pyramid.httpexceptions import status_map
from queue import Queue as BaseQueue
from webtest import TestApp as TApp
from webtest import TestResponse
import hashlib
@pytest.fixture(scope="session")
def server_version():
from devpi_server import __version__
from devpi_common.metadata import parse_version
return parse_version(__version__)
def make_file_url(basename, content, stagename=None, baseurl="http://localhost/", add_hash=True):
from devpi_server.filestore import get_default_hash_spec, make_splitdir
hash_spec = get_default_hash_spec(content)
hashdir = "/".join(make_splitdir(hash_spec))
if add_hash:
s = "%s{stage}/+f/%s/%s#%s" % (baseurl, hashdir, basename, hash_spec)
else:
s = "%s{stage}/+f/%s/%s" % (baseurl, hashdir, basename)
if stagename is not None:
s = s.format(stage=stagename)
return s
class _TimeoutQueue(BaseQueue):
def get(self, timeout=2):
return BaseQueue.get(self, timeout=timeout)
log = threadlog
@pytest.fixture(autouse=True)
def _clear():
thread_clear_log()
LOWER_ARGON2_MEMORY_COST = 8
LOWER_ARGON2_PARALLELISM = 1
LOWER_ARGON2_TIME_COST = 1
@pytest.fixture(autouse=True)
def lower_argon2_parameters(monkeypatch):
from devpi_server.config import Config
import argon2
secret_parameters = argon2.Parameters(
type=argon2.low_level.Type.ID,
version=argon2.low_level.ARGON2_VERSION,
salt_len=16,
hash_len=16,
time_cost=LOWER_ARGON2_TIME_COST,
memory_cost=LOWER_ARGON2_MEMORY_COST,
parallelism=LOWER_ARGON2_PARALLELISM)
monkeypatch.setattr(
Config, "_secret_parameters", secret_parameters)
@pytest.fixture
def TimeoutQueue():
return _TimeoutQueue
@pytest.fixture()
def caplog(caplog):
import logging
""" enrich the pytest-catchlog funcarg. """
def getrecords(msgrex=None, minlevel="DEBUG"):
if msgrex is not None:
msgrex = re.compile(msgrex, re.DOTALL)
minlevelno = {"DEBUG": 10, "INFO": 20, "WARNING": 30,
"ERROR": 40, "FATAL": 50}.get(minlevel)
recs = []
for rec in caplog.records:
if rec.levelno < minlevelno:
continue
if msgrex is not None and not msgrex.search(rec.getMessage()):
continue
recs.append(rec)
return recs
caplog.getrecords = getrecords
caplog.set_level(logging.NOTSET)
return caplog
@pytest.fixture
def gentmp(request, tmpdir_factory):
cache = []
def gentmp(name=None):
if not cache:
prefix = re.sub(r"[\W]", "_", request.node.name)
basedir = tmpdir_factory.mktemp(prefix, numbered=True)
cache.append(basedir)
else:
basedir = cache[0]
if name:
return basedir.mkdir(name)
return py.path.local.make_numbered_dir(prefix="gentmp",
keep=0, rootdir=basedir, lock_timeout=None)
return gentmp
@pytest.fixture(autouse=True)
def auto_transact(request):
names = request.fixturenames
if ("xom" not in names and "keyfs" not in names) or (
request.node.get_closest_marker("notransaction")):
yield
return
keyfs = request.getfixturevalue("keyfs")
write = bool(request.node.get_closest_marker("writetransaction"))
keyfs.begin_transaction_in_thread(write=write)
yield
try:
keyfs.rollback_transaction_in_thread()
except AttributeError: # already finished within the test
pass
@pytest.fixture
def xom(request, makexom):
xom = makexom([])
request.addfinalizer(xom.keyfs.release_all_wait_tx)
return xom
def _speed_up_sqlite(cls):
old = cls.ensure_tables_exist
def make_unsynchronous(self, old=old):
conn = old(self)
with self.get_connection() as conn:
conn._sqlconn.execute("PRAGMA synchronous=OFF")
cls.ensure_tables_exist = make_unsynchronous
return old
@pytest.fixture(autouse=True, scope="session")
def speed_up_sqlite():
from devpi_server.keyfs_sqlite import Storage
old = _speed_up_sqlite(Storage)
yield
Storage.ensure_tables_exist = old
@pytest.fixture(autouse=True, scope="session")
def speed_up_sqlite_fs():
from devpi_server.keyfs_sqlite_fs import Storage
old = _speed_up_sqlite(Storage)
yield
Storage.ensure_tables_exist = old
@pytest.fixture(scope="session")
def mock():
from unittest import mock
return mock
@pytest.fixture(scope="session")
def storage_info(request):
from pydoc import locate
backend = getattr(request.config.option, 'backend', None)
if backend is None:
backend = 'devpi_server.keyfs_sqlite_fs'
plugin = locate(backend)
if plugin is None:
raise RuntimeError("Couldn't find storage backend '%s'" % backend)
result = plugin.devpiserver_storage_backend(settings=None)
result["_test_plugin"] = plugin
return result
@pytest.fixture(scope="session")
def storage(storage_info):
return storage_info['storage']
@pytest.fixture
def makexom(request, gentmp, httpget, monkeypatch, storage_info):
def makexom(opts=(), httpget=httpget, plugins=()): # noqa: PLR0912
from devpi_server import auth_basic
from devpi_server import auth_devpi
from devpi_server import model
from devpi_server import replica
from devpi_server import view_auth
from devpi_server import views
from devpi_server.interfaces import verify_connection_interface
plugins = [
plugin[0] if isinstance(plugin, tuple) else plugin
for plugin in plugins]
default_plugins = [
auth_basic, auth_devpi, mirror, model, replica, view_auth, views,
storage_info["_test_plugin"]]
for plugin in default_plugins:
if plugin not in plugins:
plugins.append(plugin)
pm = get_pluginmanager(load_entrypoints=False)
for plugin in plugins:
pm.register(plugin)
serverdir = gentmp()
if "--serverdir" in opts:
fullopts = ["devpi-server"] + list(opts)
else:
fullopts = ["devpi-server", "--serverdir", serverdir] + list(opts)
if request.node.get_closest_marker("with_replica_thread"):
fullopts.append("--master=http://localhost")
if not request.node.get_closest_marker("no_storage_option"):
if storage_info["name"] != "sqlite":
fullopts.append("--storage=%s" % storage_info["name"])
fullopts = [str(x) for x in fullopts]
config = parseoptions(pm, fullopts)
config.init_nodeinfo()
for marker in ("storage_with_filesystem",):
if request.node.get_closest_marker(marker):
info = config._storage_info()
markers = info.get("_test_markers", [])
if marker not in markers:
pytest.skip("The storage doesn't have marker '%s'." % marker)
if not request.node.get_closest_marker("no_storage_option"):
assert storage_info["storage"] is config.storage
if request.node.get_closest_marker("nomocking"):
xom = XOM(config)
else:
xom = XOM(config, httpget=httpget)
add_pypistage_mocks(monkeypatch, httpget)
# verify storage interface
with xom.keyfs.get_connection() as conn:
verify_connection_interface(conn)
# initialize default indexes
from devpi_server.main import init_default_indexes
if not xom.config.args.master_url:
init_default_indexes(xom)
if xom.is_replica() and request.node.get_closest_marker("with_replica_thread"):
xom.thread_pool.start_one(xom.replica_thread)
if request.node.get_closest_marker("start_threads"):
xom.thread_pool.start()
elif request.node.get_closest_marker("with_notifier"):
xom.thread_pool.start_one(xom.keyfs.notifier)
if not request.node.get_closest_marker("start_threads"):
# we always need the async_thread
xom.thread_pool.start_one(xom.async_thread)
request.addfinalizer(xom.thread_pool.shutdown)
request.addfinalizer(xom._close_sessions)
return xom
return makexom
@pytest.fixture
def replica_xom(request, makexom, secretfile):
from devpi_server.replica import register_key_subscribers
master_url = "http://localhost:3111"
xom = makexom(["--master", master_url, "--secretfile", secretfile.strpath])
register_key_subscribers(xom)
return xom
@pytest.fixture
def makefunctionaltestapp(request):
def makefunctionaltestapp(host_port):
mt = MyFunctionalTestApp(host_port)
mt.xom = None
return mt
return makefunctionaltestapp
@pytest.fixture
def maketestapp(request):
def maketestapp(xom):
app = xom.create_app()
mt = MyTestApp(app)
mt.xom = xom
return mt
return maketestapp
@pytest.fixture
def makemapp(request, maketestapp, makexom):
def makemapp(testapp=None, options=()):
if testapp is None:
testapp = maketestapp(makexom(options))
m = Mapp(testapp)
m.xom = testapp.xom
return m
return makemapp
@pytest.fixture
def httpget(pypiurls):
from .simpypi import make_simple_pkg_info
class MockHTTPGet:
def __init__(self):
self.url2response = {}
self._md5 = hashlib.md5()
self.call_log = []
async def async_httpget(self, url, allow_redirects, timeout=None, extra_headers=None):
response = self.__call__(url, allow_redirects, extra_headers, timeout=timeout)
if response.status_code < 300:
text = response.text
else:
text = None
return (response, text)
def __call__(self, url, allow_redirects=False, extra_headers=None, **kw):
class mockresponse:
def __init__(xself, url):
fakeresponse = self.url2response.get(url)
if isinstance(fakeresponse, list):
if not fakeresponse:
pytest.fail(
f"http_api call to {url} has no further replies")
fakeresponse = fakeresponse.pop(0)
if fakeresponse is None:
fakeresponse = dict(
status_code=404,
reason="Not Found")
fakeresponse["headers"] = requests.structures.CaseInsensitiveDict(
fakeresponse.setdefault("headers", {}))
xself.__dict__.update(fakeresponse)
if "url" not in fakeresponse:
xself.url = url
xself.allow_redirects = allow_redirects
if "content" in fakeresponse:
xself.raw = BytesIO(fakeresponse["content"])
xself.headers.setdefault('content-type', fakeresponse.get(
'content_type', 'text/html'))
if "etag" in fakeresponse:
fakeresponse["headers"]["ETag"] = fakeresponse["etag"]
def close(xself):
return
def json(xself):
return json.loads(xself.text)
@property
def status(xself):
return xself.status_code
def __repr__(xself):
return "<mockresponse %s url=%s>" % (xself.status_code,
xself.url)
r = mockresponse(url)
log.debug("returning %s", r)
self.call_log.append(dict(
url=url,
allow_redirects=allow_redirects,
extra_headers=extra_headers,
kw=kw,
response=r))
return r
def _prepare_kw(self, kw):
kw.setdefault("status_code", kw.pop("code", 200))
kw.setdefault("reason", getattr(
status_map.get(kw["status_code"]),
"title",
"Devpi Mock Error"))
def set(self, url, **kw):
""" Set a reply for all future uses. """
self._prepare_kw(kw)
log.debug("set mocking response %s %s", url, kw)
self.url2response[url] = kw
def add(self, url, **kw):
""" Add a one time use reply to the url. """
self._prepare_kw(kw)
log.debug("add mocking response %s %s", url, kw)
self.url2response.setdefault(url, []).append(kw)
def mockresponse(self, url, **kw):
self.set(url, **kw)
def mock_simple(self, name, text="", pkgver=None, hash_type=None,
pypiserial=10000, remoteurl=None, requires_python=None,
**kw):
ret, text = make_simple_pkg_info(
name, text=text, pkgver=pkgver, hash_type=hash_type,
pypiserial=pypiserial, requires_python=requires_python)
if remoteurl is None:
remoteurl = pypiurls.simple
headers = kw.setdefault("headers", {})
if pypiserial is not None:
headers["X-PYPI-LAST-SERIAL"] = str(pypiserial)
kw.setdefault("url", URL(remoteurl).joinpath(name).asdir().url)
if "etag" in kw:
etag = kw.pop("etag")
self.add(text=text, **kw, etag=etag)
self.add(text=text, **kw, status_code=304)
else:
self.mockresponse(text=text, **kw)
return ret
def _getmd5digest(self, s):
self._md5.update(s.encode("utf8"))
return self._md5.hexdigest()
return MockHTTPGet()
@pytest.fixture
def keyfs(xom):
return xom.keyfs
@pytest.fixture
def model(xom):
return xom.model
@pytest.fixture
def pypistage(devpiserver_makepypistage, xom):
return devpiserver_makepypistage(xom)
def add_pypistage_mocks(monkeypatch, httpget):
_projects = set()
# add some mocking helpers
mirror.MirrorStage.url2response = httpget.url2response
def mock_simple(self, name, text=None, pypiserial=10000, **kw):
cache_expire = kw.pop("cache_expire", True)
if cache_expire:
self.cache_retrieve_times.expire(name)
self.cache_retrieve_times.release(name)
add_to_projects = kw.pop("add_to_projects", True)
if add_to_projects:
self.mock_simple_projects(
_projects.union([name]), cache_expire=cache_expire)
return self.xom.httpget.mock_simple(
name, text=text, pypiserial=pypiserial, **kw)
monkeypatch.setattr(
mirror.MirrorStage, "mock_simple", mock_simple, raising=False)
def mock_simple_projects(self, projectlist, cache_expire=True):
if cache_expire:
self.cache_projectnames.expire()
_projects.clear()
_projects.update(projectlist)
t = "".join(
'<a href="%s">%s</a>\n' % (normalize_name(name), name)
for name in projectlist)
threadlog.debug("patching simple page with: %s" % t)
self.xom.httpget.mockresponse(self.mirror_url, code=200, text=t)
monkeypatch.setattr(
mirror.MirrorStage, "mock_simple_projects",
mock_simple_projects, raising=False)
def mock_extfile(self, path, content, **kw):
headers = {"content-length": len(content),
"content-type": mimetypes.guess_type(path),
"last-modified": "today"}
url = URL(self.mirror_url).joinpath(path)
return self.xom.httpget.mockresponse(
url.url, content=content, headers=headers, **kw)
monkeypatch.setattr(
mirror.MirrorStage, "mock_extfile", mock_extfile, raising=False)
@pytest.fixture
def pypiurls():
from devpi_server.main import _pypi_ixconfig_default
class MirrorURL:
def __init__(self):
self.simple = _pypi_ixconfig_default['mirror_url']
return MirrorURL()
@pytest.fixture
def mapp(makemapp, testapp):
return makemapp(testapp)
class Mapp(MappMixin):
def __init__(self, testapp):
self.testapp = testapp
self.current_stage = ""
def _getindexname(self, indexname):
if not indexname:
assert self.current_stage, "no index in use, none specified"
return self.current_stage
return indexname
def _wait_for_serial_in_result(self, r):
commit_serial = int(r.headers["X-DEVPI-SERIAL"])
self.xom.keyfs.notifier.wait_event_serial(commit_serial)
def makepkg(self, basename, content, name, version):
return content
def delete_user(self, user, code=200):
r = self.testapp.delete_json("/%s" % user, expect_errors=True)
assert r.status_code == code
def login(self, user="root", password="", code=200):
api = self.getapi()
r = self.testapp.post_json(
api.login,
{"user": user, "password": password},
expect_errors=True,
headers={'Accept': 'application/json'})
assert r.status_code == code
if code == 200:
password = r.json.get("result", r.json)["password"]
self.testapp.set_auth(user, password)
self.auth = user, password
def login_root(self):
self.login("root", "")
def logout(self):
self.auth = self.testapp.auth = None
def getuserlist(self):
r = self.testapp.get_json("/", {"indexes": False})
assert r.status_code == 200
return r.json["result"]
def getindexlist(self, user=None):
if user is None:
user = self.testapp.auth[0]
r = self.testapp.get_json("/%s" % user)
assert r.status_code == 200
name = r.json["result"]["username"]
result = {}
for index, data in r.json["result"].get("indexes", {}).items():
result["%s/%s" % (name, index)] = data
return result
def getpkglist(self, user=None, indexname=None):
indexname = self._getindexname(indexname)
if user is None:
user = self.testapp.auth[0]
r = self.testapp.get_json("/%s" % indexname)
assert r.status_code == 200
return r.json["result"]["projects"]
def getreleaseslist(self, name, code=200, user=None, indexname=None):
indexname = self._getindexname(indexname)
if user is None:
user = self.testapp.auth[0]
r = self.testapp.get_json("/%s/%s" % (indexname, name))
assert r.status_code == code
if r.status_code >= 300:
return
result = r.json["result"]
links = set()
for version in result.values():
for link in version["+links"]:
links.add(link["href"])
return sorted(links)
def downloadrelease(self, code, url):
r = self.testapp.get(url, expect_errors=True)
if isinstance(code, tuple):
assert r.status_code in code
else:
assert r.status_code == code
if r.status_code < 300:
return r.body
return r.json
def change_password(self, user, password):
r = self.testapp.patch_json("/%s" % user, dict(password=password))
assert r.status_code == 200
self.testapp.auth = (self.testapp.auth[0],
r.json["result"]["password"])
def create_user(self, user, password, email="hello@example.com", code=201):
reqdict = dict(password=password)
if email:
reqdict["email"] = email
r = self.testapp.put_json("/%s" % user, reqdict, expect_errors=True)
assert r.status_code == code
if code == 201:
res = r.json["result"]
assert res["username"] == user
assert res.get("email") == email
def modify_user(self, user, code=200, password=None, **kwargs):
reqdict = {}
if password:
reqdict["password"] = password
for key, value in kwargs.items():
reqdict[key] = value
r = self.testapp.patch_json("/%s" % user, reqdict, expect_errors=True)
assert r.status_code == code
if code == 200:
assert r.json == dict(message="user updated")
def create_user_fails(self, user, password, email="hello@example.com"):
with pytest.raises(webtest.AppError) as excinfo:
self.create_user(user, password)
assert "409" in excinfo.value.args[0]
def create_and_login_user(self, user="someuser", password="123"):
self.create_user(user, password)
self.login(user, password)
def use(self, stagename):
stagename = stagename.strip("/")
assert stagename.count("/") == 1, stagename
self.api = self.getapi(stagename)
self.api.stagename = stagename
self.current_stage = stagename
return self.api
def getjson(self, path, code=200):
r = self.testapp.get_json(path, {}, expect_errors=True)
assert r.status_code == code
if r.status_code == 302:
return r.headers["location"]
return r.json
def create_index(self, indexname, indexconfig=None, use=True, code=200):
if indexconfig is None:
indexconfig = {}
if "/" in indexname:
user, index = indexname.split("/")
else:
user, password = self.testapp.auth
index = indexname
r = self.testapp.put_json("/%s/%s" % (user, index), indexconfig,
expect_errors=True)
assert r.status_code == code
if code in (200,201):
assert r.json["result"]["type"] == indexconfig.get("type", "stage")
if use:
return self.use("%s/%s" % (user, index))
if code in (400,):
return r.json["message"]
def modify_index(self, indexname, indexconfig, code=200):
if "/" in indexname:
user, index = indexname.split("/")
else:
user, password = self.testapp.auth
index = indexname
r = self.testapp.patch_json("/%s/%s" % (user, index), indexconfig,
expect_errors=True)
assert r.status_code == code
if code in (200,201):
if isinstance(indexconfig, dict):
assert r.json["result"]["type"] == indexconfig.get("type", "stage")
return r.json["result"]
if code in (400,):
return r.json["message"]
def delete_index(self, indexname, code=201, waithooks=False):
if "/" in indexname:
user, index = indexname.split("/")
else:
user, password = self.testapp.auth
index = indexname
r = self.testapp.delete_json("/%s/%s" % (user, index),
expect_errors=True)
if waithooks:
self._wait_for_serial_in_result(r)
assert r.status_code == code
def set_custom_data(self, data, indexname=None):
return self.set_key_value("custom_data", data, indexname=indexname)
def set_key_value(self, key, value, indexname=None):
indexname = self._getindexname(indexname)
indexurl = "/" + indexname
r = self.testapp.get_json(indexurl)
result = r.json["result"]
result[key] = value
r = self.testapp.patch_json(indexurl, result)
assert r.status_code == 200
def set_indexconfig_option(self, key, value, indexname=None):
indexname = self._getindexname(indexname)
indexurl = "/" + indexname
r = self.testapp.get_json(indexurl)
result = r.json["result"]
result[key] = value
r = self.testapp.patch_json(indexurl, result)
assert r.status_code == 200
assert r.json["result"][key] == value
def set_mirror_whitelist(self, whitelist, indexname=None):
indexname = self._getindexname(indexname)
r = self.testapp.get_json("/%s" % indexname)
result = r.json["result"]
result["mirror_whitelist"] = whitelist
r = self.testapp.patch_json("/%s" % (indexname,), result)
assert r.status_code == 200
def set_acl(self, users, acltype="upload", indexname=None):
indexname = self._getindexname(indexname)
r = self.testapp.get_json("/%s" % indexname)
result = r.json["result"]
if not isinstance(users, list):
users = users.split(",")
assert isinstance(users, list)
result["acl_" + acltype] = users
r = self.testapp.patch_json("/%s" % (indexname,), result)
assert r.status_code == 200
def get_acl(self, acltype="upload", indexname=None):
indexname = self._getindexname(indexname)
r = self.testapp.get_json("/%s" % indexname)
return r.json["result"].get("acl_" + acltype, None)
def get_mirror_whitelist(self, indexname=None):
indexname = self._getindexname(indexname)
r = self.testapp.get_json("/%s" % indexname)
return r.json["result"]["mirror_whitelist"]
def delete_project(self, project, code=200, indexname=None,
waithooks=False):
indexname = self._getindexname(indexname)
r = self.testapp.delete_json("/%s/%s" % (indexname,
project), {}, expect_errors=True)
assert r.status_code == code
if waithooks:
self._wait_for_serial_in_result(r)
def set_versiondata(self, metadata, indexname=None, code=200,
waithooks=False,
set_whitelist=True):
indexname = self._getindexname(indexname)
metadata = metadata.copy()
metadata[":action"] = "submit"
r = self.testapp.post("/%s/" % indexname, metadata,
expect_errors=True)
assert r.status_code == code
if r.status_code == 200 and set_whitelist:
whitelist = set(self.get_mirror_whitelist(indexname=indexname))
whitelist.add(metadata["name"])
self.set_mirror_whitelist(sorted(whitelist), indexname=indexname)
if waithooks:
self._wait_for_serial_in_result(r)
return r
def upload_file_pypi(self, basename, content,
name=None, version=None, indexname=None,
register=True, code=200, waithooks=False,
set_whitelist=True):
assert isinstance(content, bytes)
indexname = self._getindexname(indexname)
#name_version = splitbasename(basename, checkarch=False)
#if not name:
# name = name_version[0]
#if not version:
# version = name_version[1]
if register and code == 200:
self.set_versiondata(
dict(name=name, version=version), set_whitelist=set_whitelist)
r = self.testapp.post("/%s/" % indexname,
{":action": "file_upload", "name": name, "version": version,
"content": Upload(basename, content)}, expect_errors=True)
assert r.status_code == code
if waithooks:
self._wait_for_serial_in_result(r)
# return the file url so users/callers can easily use it
# (probably the official server response should include the url)
r.file_url = make_file_url(basename, content, stagename=indexname)
r.file_url_no_hash = make_file_url(
basename, content, stagename=indexname, add_hash=False)
return r
def push(self, name, version, index, indexname=None, code=200):
indexname = self._getindexname(indexname)
req = dict(name=name, version=version, targetindex=index)
r = self.testapp.push(
'/%s' % indexname, json.dumps(req), expect_errors=True)
assert r.status_code == code
return r
def get_release_paths(self, project):
r = self.get_simple(project)
pkg_url = URL(r.request.url)
paths = [pkg_url.joinpath(link["href"]).path
for link in BeautifulSoup(r.body, "html.parser").find_all("a")]
return paths
def upload_doc(self, basename, content, name, version, indexname=None,
code=200, waithooks=False):
indexname = self._getindexname(indexname)
form = {":action": "doc_upload", "name": name,
"content": Upload(basename, content)}
if version:
form["version"] = version
r = self.testapp.post("/%s/" % indexname, form, expect_errors=True)
assert r.status_code == code
if waithooks:
self._wait_for_serial_in_result(r)
return r
def upload_toxresult(self, path, content, code=200, waithooks=False):
r = self.testapp.post(path, content, expect_errors=True)
assert r.status_code == code
if waithooks:
self._wait_for_serial_in_result(r)
return r
def get_simple(self, project, code=200):
r = self.testapp.get(self.api.simpleindex + project + '/',
expect_errors=True)
assert r.status_code == code
return r
@pytest.fixture
def noiter(monkeypatch, request):
l = []
@property
def body(self):
if self.headers["Content-Type"] != "application/octet-stream":
return self.body_old
if self.app_iter:
l.append(self.app_iter)
monkeypatch.setattr(TestResponse, "body_old", TestResponse.body,
raising=False)
monkeypatch.setattr(TestResponse, "body", body)
yield
for x in l:
x.close()
class MyTestApp(TApp):
auth = None
def __init__(self, *args, **kwargs):
super(MyTestApp, self).__init__(*args, **kwargs)
self.headers = {}
def set_auth(self, user, password):
self.auth = (user, password)
def set_header_default(self, name, value):
self.headers[str(name)] = str(value)
def _gen_request(self, method, url, params=None, headers=None, **kw):
headers = {} if headers is None else headers.copy()
if self.auth:
if not headers:
headers = kw["headers"] = {}
headers["X-Devpi-Auth"] = b64encode("%s:%s" % self.auth)
#print ("setting auth header %r %s %s" % (auth, method, url))
# fill headers with defaults
for name, val in self.headers.items():
headers.setdefault(name, val)
kw["headers"] = headers
if params is not None:
kw["params"] = params
return super(MyTestApp, self)._gen_request(method, url, **kw)
def post(self, *args, **kwargs):
code = kwargs.pop("code", None)
if code is not None and code >= 300:
kwargs.setdefault("expect_errors", True)
r = self._gen_request("POST", *args, **kwargs)
if code is not None:
assert r.status_code == code
return r
def push(self, url, params=None, **kw):
kw.setdefault("expect_errors", True)
return self._gen_request("POST", url, params=params, **kw)
def get(self, *args, **kwargs):
kwargs.setdefault("expect_errors", True)
accept = kwargs.pop("accept", None)
if accept is not None:
headers = kwargs.setdefault("headers", {})
headers[str("Accept")] = str(accept)
follow = kwargs.pop("follow", True)
response = self._gen_request("GET", *args, **kwargs)
if follow and response.status_code == 302:
assert response.location != args[0]
return self.get(response.location, *args[1:], **kwargs)
return response
def xget(self, code, *args, **kwargs):
if code == 302:
kwargs["follow"] = False
r = self.get(*args, **kwargs)
assert r.status_code == code
return r
def xdel(self, code, *args, **kwargs):
kwargs.setdefault("expect_errors", True)
r = self._gen_request("DELETE", *args, **kwargs)
assert r.status_code == code
return r
def get_json(self, *args, **kwargs):
headers = kwargs.setdefault("headers", {})
headers["Accept"] = "application/json"
return self.get(*args, **kwargs)
class FunctionalResponseWrapper(object):
def __init__(self, response):
self.res = response
@property
def status_code(self):
return self.res.status_code
@property
def body(self):
return self.res.content
@property
def json(self):
return self.res.json()
class MyFunctionalTestApp(MyTestApp):
def __init__(self, host_port):
import json
self.base_url = "http://%s:%s" % host_port
self.headers = {}
self.JSONEncoder = json.JSONEncoder
def _gen_request(self, method, url, params=None,
headers=None, extra_environ=None, status=None,
upload_files=None, expect_errors=False,
content_type=None):
headers = {} if headers is None else headers.copy()
if self.auth:
headers["X-Devpi-Auth"] = b64encode("%s:%s" % self.auth)
# fill headers with defaults
for name, val in self.headers.items():
headers.setdefault(name, val)
kw = dict(headers=headers)
if params and params is not webtest.utils.NoDefault:
if method.lower() in ('post', 'put', 'patch'):
kw['data'] = params
else:
kw['params'] = params
meth = getattr(requests, method.lower())
if '://' not in url:
url = self.base_url + url
r = meth(url, **kw)
return FunctionalResponseWrapper(r)
@pytest.fixture
def testapp(request, maketestapp, xom):
return maketestapp(xom)
def get_open_port(host):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
return port
def wait_for_port(host, port, timeout=60):
while timeout > 0:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.settimeout(1)
if s.connect_ex((host, port)) == 0:
return
time.sleep(1)
timeout -= 1
raise RuntimeError(
"The port %s on host %s didn't become accessible" % (port, host))
@pytest.fixture(scope="class")
def server_directory():
import tempfile
srvdir = py.path.local(
tempfile.mkdtemp(prefix='test-', suffix='-server-directory'))
yield srvdir
srvdir.remove(ignore_errors=True)
@pytest.fixture(scope="module")
def call_devpi_in_dir():
# let xproc find the correct executable instead of py.test
devpigenconfig = str(py.path.local.sysfind("devpi-gen-config"))
devpiimport = str(py.path.local.sysfind("devpi-import"))
devpiinit = str(py.path.local.sysfind("devpi-init"))
devpiserver = str(py.path.local.sysfind("devpi-server"))
def devpi(server_dir, args):
from devpi_server.genconfig import genconfig
from devpi_server.importexport import import_
from devpi_server.init import init
from devpi_server.main import main
from _pytest.monkeypatch import MonkeyPatch
from _pytest.pytester import RunResult
m = MonkeyPatch()
m.setenv("DEVPISERVER_SERVERDIR", getattr(server_dir, 'strpath', server_dir))
cap = py.io.StdCaptureFD()
cap.startall()
now = time.time()
if args[0] == 'devpi-gen-config':
m.setattr("sys.argv", [devpigenconfig])
entry_point = genconfig
elif args[0] == 'devpi-import':
m.setattr("sys.argv", [devpiimport])
entry_point = import_
elif args[0] == 'devpi-init':
m.setattr("sys.argv", [devpiinit])
entry_point = init
elif args[0] == 'devpi-server':
m.setattr("sys.argv", [devpiserver])
entry_point = main
try:
entry_point(argv=args)
finally:
m.undo()
out, err = cap.reset()
del cap
return RunResult(
0, out.split("\n"), err.split("\n"), time.time() - now)
return devpi
@pytest.fixture(scope="class")
def master_serverdir(server_directory):
return server_directory.join("master")
@pytest.fixture(scope="class")
def secretfile(server_directory):
import base64
import secrets
secretfile = server_directory.join('testserver.secret')
if not secretfile.exists():
secretfile.write(base64.b64encode(secrets.token_bytes(32)))
if sys.platform != "win32":
secretfile.chmod(0o600)
return secretfile
@pytest.fixture(scope="class")
def master_host_port(request, call_devpi_in_dir, master_serverdir, server_directory, secretfile, storage_info):
host = 'localhost'
port = get_open_port(host)
args = [
"devpi-server",
"--role", "master",
"--secretfile", secretfile.strpath,
"--argon2-memory-cost", str(LOWER_ARGON2_MEMORY_COST),
"--argon2-parallelism", str(LOWER_ARGON2_PARALLELISM),
"--argon2-time-cost", str(LOWER_ARGON2_TIME_COST),
"--host", host,
"--port", str(port),
"--requests-only"]
storage_args = [
"--serverdir", master_serverdir.strpath]
if storage_info["name"] != "sqlite":
storage_option = "--storage=%s" % storage_info["name"]
_get_test_storage_options = getattr(
storage_info["storage"], "_get_test_storage_options", None)
if _get_test_storage_options:
storage_options = _get_test_storage_options(master_serverdir)
storage_option = storage_option + storage_options
storage_args.append(storage_option)
if not master_serverdir.join('.nodeinfo').exists():
subprocess.check_call(
["devpi-init"] + storage_args)
p = subprocess.Popen(args + storage_args)
try:
wait_for_port(host, port)
yield (host, port)
finally:
p.terminate()
p.wait()
@pytest.fixture(scope="class")
def replica_serverdir(server_directory):
return server_directory.join("replica")
@pytest.fixture(scope="class")
def replica_host_port(request, call_devpi_in_dir, master_host_port, replica_serverdir, secretfile, storage_info):
host = 'localhost'
port = get_open_port(host)
args = [
"devpi-server",
"--secretfile", secretfile.strpath,
"--argon2-memory-cost", str(LOWER_ARGON2_MEMORY_COST),
"--argon2-parallelism", str(LOWER_ARGON2_PARALLELISM),
"--argon2-time-cost", str(LOWER_ARGON2_TIME_COST),
"--host", host, "--port", str(port)]
storage_args = [
"--serverdir", replica_serverdir.strpath]
if storage_info["name"] != "sqlite":
storage_option = "--storage=%s" % storage_info["name"]
_get_test_storage_options = getattr(
storage_info["storage"], "_get_test_storage_options", None)
if _get_test_storage_options:
storage_options = _get_test_storage_options(replica_serverdir)
storage_option = storage_option + storage_options
storage_args.append(storage_option)
if not replica_serverdir.join('.nodeinfo').exists():
subprocess.check_call([
"devpi-init",
"--role", "replica",
"--master-url", "http://%s:%s" % master_host_port] + storage_args)
p = subprocess.Popen(args + storage_args)
try:
wait_for_port(host, port)
yield (host, port)
finally:
p.terminate()
p.wait()
nginx_conf_content = """
worker_processes 1;
daemon off;
pid nginx.pid;
error_log nginx_error.log;
events {
worker_connections 32;
}
http {
access_log off;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 0;
include nginx-devpi.conf;
}
"""
@pytest.fixture(scope="class")
def adjust_nginx_conf_content():
def adjust_nginx_conf_content(content):
return content
return adjust_nginx_conf_content
def _nginx_host_port(host, port, call_devpi_in_dir, server_directory, adjust_nginx_conf_content):
# let xproc find the correct executable instead of py.test
nginx = py.path.local.sysfind("nginx")
if nginx is None:
pytest.skip("No nginx executable found.")
nginx = str(nginx)
orig_dir = server_directory.chdir()
try:
args = ["devpi-gen-config", "--host", host, "--port", str(port)]
if not server_directory.join('.nodeinfo').exists():
call_devpi_in_dir(server_directory.strpath, ["devpi-init"])
call_devpi_in_dir(
server_directory.strpath,
args)
finally:
orig_dir.chdir()
nginx_directory = server_directory.join("gen-config")
nginx_devpi_conf = nginx_directory.join("nginx-devpi.conf")
nginx_port = get_open_port(host)
nginx_devpi_conf_content = nginx_devpi_conf.read()
nginx_devpi_conf_content = nginx_devpi_conf_content.replace(
"listen 80;",
"listen %s;" % nginx_port)
nginx_devpi_conf_content = adjust_nginx_conf_content(nginx_devpi_conf_content)
nginx_devpi_conf.write(nginx_devpi_conf_content)
nginx_conf = nginx_directory.join("nginx.conf")
nginx_conf.write(nginx_conf_content)
try:
subprocess.check_output([
nginx, "-t",
"-c", nginx_conf.strpath,
"-p", nginx_directory.strpath], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print(e.output, file=sys.stderr)
raise
p = subprocess.Popen([
nginx, "-c", nginx_conf.strpath, "-p", nginx_directory.strpath])
return (p, nginx_port)
@pytest.fixture(scope="class")
def nginx_host_port(request, call_devpi_in_dir, server_directory, adjust_nginx_conf_content):
if sys.platform.startswith("win"):
pytest.skip("no nginx on windows")
# we need the skip above before master_host_port is called
(host, port) = request.getfixturevalue("master_host_port")
(p, nginx_port) = _nginx_host_port(
host, port, call_devpi_in_dir, server_directory, adjust_nginx_conf_content)
try:
wait_for_port(host, nginx_port)
yield (host, nginx_port)
finally:
p.terminate()
p.wait()
@pytest.fixture(scope="class")
def nginx_replica_host_port(request, call_devpi_in_dir, server_directory, adjust_nginx_conf_content):
if sys.platform.startswith("win"):
pytest.skip("no nginx on windows")
# we need the skip above before master_host_port is called
(host, port) = request.getfixturevalue("replica_host_port")
(p, nginx_port) = _nginx_host_port(
host, port, call_devpi_in_dir, server_directory, adjust_nginx_conf_content)
try:
wait_for_port(host, nginx_port)
yield (host, nginx_port)
finally:
p.terminate()
p.wait()
@pytest.fixture(scope="session")
def simpypiserver():
from .simpypi import httpserver, SimPyPIRequestHandler
import threading
host = 'localhost'
port = get_open_port(host)
server = httpserver.HTTPServer((host, port), SimPyPIRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
wait_for_port(host, port, 5)
print("Started simpypi server %s:%s" % server.server_address)
return server
@pytest.fixture
def simpypi(simpypiserver):
from .simpypi import SimPyPI
simpypiserver.simpypi = SimPyPI(simpypiserver.server_address)
return simpypiserver.simpypi
# incremental testing
def pytest_runtest_makereport(item, call):
if "incremental" in item.keywords:
if call.excinfo is not None:
parent = item.parent
parent._previousfailed = item
def pytest_runtest_setup(item):
if "incremental" in item.keywords:
previousfailed = getattr(item.parent, "_previousfailed", None)
if previousfailed is not None:
pytest.xfail("previous test failed (%s)" %previousfailed.name)
@pytest.fixture
def gen():
return Gen()
class Gen:
def __init__(self):
self._md5 = hashlib.md5()
def pypi_package_link(self, pkgname, md5=True):
link = "https://pypi.org/package/some/%s" % pkgname
if md5 is True:
self._md5.update(link.encode("utf8")) # basically random
link += "#md5=%s" % self._md5.hexdigest()
elif md5:
link += "#md5=%s" % md5
return URL(link)
@pytest.fixture
def pyramidconfig():
from pyramid.testing import setUp, tearDown
config = setUp()
yield config
tearDown()
@pytest.fixture
def dummyrequest(pyramidconfig):
from pyramid.testing import DummyRequest
request = DummyRequest()
pyramidconfig.begin(request=request)
yield request
@pytest.fixture
def blank_request():
from pyramid.request import Request
def blank_request(*args, **kwargs):
return Request.blank("/blankpath", *args, **kwargs)
return blank_request
@pytest.fixture(params=[None, "tox38"])
def tox_result_data(request):
from test_devpi_server.example import tox_result_data
import copy
tox_result_data = copy.deepcopy(tox_result_data)
if request.param == "tox38":
retcode = int(tox_result_data['testenvs']['py27']['test'][0]['retcode'])
tox_result_data['testenvs']['py27']['test'][0]['retcode'] = retcode
return tox_result_data
|
06cc6e49bca363980bc24f2b2d59596148c0c410
|
c22256d3e8d566e75e8246cc8b62db798e88e9a3
|
/core/migrations/0027_alter_timer_options_remove_timer_duration_and_more.py
|
ff174e73136c9c40e824fd36ea14ed7a6ab56825
|
[
"BSD-2-Clause-Views",
"BSD-2-Clause"
] |
permissive
|
babybuddy/babybuddy
|
20a4648397b2dbb105b09172f317764eb7ff4955
|
b5163c236019f3f77667e04e4ea09621593914fe
|
refs/heads/master
| 2023-08-25T10:14:02.341213
| 2023-08-15T16:18:06
| 2023-08-15T16:38:34
| 107,898,477
| 901
| 199
|
BSD-2-Clause
| 2023-08-15T16:38:35
| 2017-10-22T20:02:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
0027_alter_timer_options_remove_timer_duration_and_more.py
|
from django.db import migrations
def delete_inactive_timers(apps, schema_editor):
from core import models
for timer in models.Timer.objects.filter(active=False):
timer.delete()
class Migration(migrations.Migration):
dependencies = [
("core", "0026_alter_feeding_end_alter_feeding_start_and_more"),
]
operations = [
migrations.AlterModelOptions(
name="timer",
options={
"default_permissions": ("view", "add", "change", "delete"),
"ordering": ["-start"],
"verbose_name": "Timer",
"verbose_name_plural": "Timers",
},
),
migrations.RemoveField(
model_name="timer",
name="duration",
),
migrations.RemoveField(
model_name="timer",
name="end",
),
migrations.RunPython(
delete_inactive_timers, reverse_code=migrations.RunPython.noop
),
]
|
925b84899d2daaad1d0308d59c1e16c57110c798
|
7a7ee4b7c551ce92483e7162b3064f30219fbddc
|
/tests/manual_checks/fancy_indexing.py
|
8a109aade4faa956a39742a87acc2c7f2cefa20b
|
[
"Apache-2.0"
] |
permissive
|
mratsim/Arraymancer
|
ccd8267fa2869d73a5a028ecceabf9e96dfdb69c
|
e2df3dd7509588e3a863c690389649ced790344a
|
refs/heads/master
| 2023-09-02T06:39:26.116762
| 2023-08-31T15:56:39
| 2023-08-31T15:56:39
| 88,188,361
| 1,258
| 125
|
Apache-2.0
| 2023-09-12T13:09:07
| 2017-04-13T17:10:19
|
Nim
|
UTF-8
|
Python
| false
| false
| 5,553
|
py
|
fancy_indexing.py
|
import numpy as np
def index_select():
print('Index select')
print('--------------------------')
x = np.array([[ 4, 99, 2],
[ 3, 4, 99],
[ 1, 8, 7],
[ 8, 6, 8]])
print(x)
print('--------------------------')
print('x[:, [0, 2]]')
print(x[:, [0, 2]])
print('--------------------------')
print('x[[1, 3], :]')
print(x[[1, 3], :])
def masked_select():
print('Masked select')
print('--------------------------')
x = np.array([[ 4, 99, 2],
[ 3, 4, 99],
[ 1, 8, 7],
[ 8, 6, 8]])
print(x)
print('--------------------------')
print('x[x > 50]')
print(x[x > 50])
print('--------------------------')
print('x[x < 50]')
print(x[x < 50])
def masked_axis_select():
print('Masked axis select')
print('--------------------------')
x = np.array([[ 4, 99, 2],
[ 3, 4, 99],
[ 1, 8, 7],
[ 8, 6, 8]])
print(x)
print('--------------------------')
print('x[:, np.sum(x, axis = 0) > 50]')
print(x[:, np.sum(x, axis = 0) > 50])
print('--------------------------')
print('x[np.sum(x, axis = 1) > 50, :]')
print(x[np.sum(x, axis = 1) > 50, :])
# index_select()
# masked_select()
# masked_axis_select()
print('\n#########################################\n')
print('Fancy mutation')
def index_fill():
print('Index fill')
print('--------------------------')
x = np.array([[ 4, 99, 2],
[ 3, 4, 99],
[ 1, 8, 7],
[ 8, 6, 8]])
print(x)
print('--------------------------')
y = x.copy()
print('y[:, [0, 2]] = -100')
y[:, [0, 2]] = -100
print(y)
print('--------------------------')
y = x.copy()
print('y[[1, 3], :] = -100')
y[[1, 3], :] = -100
print(y)
print('--------------------------')
def masked_fill():
print('Masked fill')
print('--------------------------')
x = np.array([[ 4, 99, 2],
[ 3, 4, 99],
[ 1, 8, 7],
[ 8, 6, 8]])
print(x)
print('--------------------------')
y = x.copy()
print('y[y > 50] = -100')
y[y > 50] = -100
print(y)
print('--------------------------')
y = x.copy()
print('y[y < 50] = -100')
y[y < 50] = -100
print(y)
print('--------------------------')
def masked_axis_fill_value():
print('Masked axis fill with value')
print('--------------------------')
x = np.array([[ 4, 99, 2],
[ 3, 4, 99],
[ 1, 8, 7],
[ 8, 6, 8]])
print(x)
print('--------------------------')
y = x.copy()
print('y[:, y.sum(axis = 0) > 50] = -100')
y[:, y.sum(axis = 0) > 50] = -100
print(y)
print('--------------------------')
y = x.copy()
print('y[y.sum(axis = 1) > 50, :] = -100')
y[y.sum(axis = 1) > 50, :] = -100
print(y)
print('--------------------------')
def masked_axis_fill_tensor_invalid_1():
# ValueError: shape mismatch:
# value array of shape (4,) could not be broadcast
# to indexing result of shape (2,4)
print('Masked axis fill with tensor - invalid numpy syntax')
print('--------------------------')
x = np.array([[ 4, 99, 2],
[ 3, 4, 99],
[ 1, 8, 7],
[ 8, 6, 8]])
print(x)
print('--------------------------')
y = x.copy()
print('y[:, y.sum(axis = 0) > 50] = np.array([10, 20, 30, 40])')
y[:, y.sum(axis = 0) > 50] = np.array([10, 20, 30, 40])
print(y)
def masked_axis_fill_tensor_valid_1():
print('Masked axis fill with tensor - 1d tensor broadcasting')
print('--------------------------')
x = np.array([[ 4, 99, 2],
[ 3, 4, 99],
[ 1, 8, 7],
[ 8, 6, 8]])
print(x)
print('--------------------------')
y = x.copy()
print('y[:, y.sum(axis = 0) > 50] = np.array([[10], [20], [30], [40]])')
y[:, y.sum(axis = 0) > 50] = np.array([[10], [20], [30], [40]])
print(y)
print('--------------------------')
y = x.copy()
print('y[y.sum(axis = 1) > 50, :] = np.array([-10, -20, -30])')
y[y.sum(axis = 1) > 50, :] = np.array([-10, -20, -30])
print(y)
print('--------------------------')
def masked_axis_fill_tensor_valid_2():
print('Masked axis fill with tensor - multidimensional tensor')
print('--------------------------')
x = np.array([[ 4, 99, 2],
[ 3, 4, 99],
[ 1, 8, 7],
[ 8, 6, 8]])
print(x)
print('--------------------------')
y = x.copy()
print('y[:, y.sum(axis = 0) > 50] = np.array([[10, 50], [20, 60], [30, 70], [40, 80]])')
y[:, y.sum(axis = 0) > 50] = np.array([[10, 50],
[20, 60],
[30, 70],
[40, 80]])
print(y)
print('--------------------------')
y = x.copy()
print('y[y.sum(axis = 1) > 50, :] = np.array([-10, -20, -30], [-40, -50, -60])')
y[y.sum(axis = 1) > 50, :] = np.array([[-10, -20, -30],
[-40, -50, -60]])
print(y)
print('--------------------------')
# index_fill()
# masked_fill()
# masked_axis_fill_value()
masked_axis_fill_tensor_invalid_1()
# masked_axis_fill_tensor_valid_1()
# masked_axis_fill_tensor_valid_2()
|
3967894960bcea734e40fcb20cd44ec09f83c19e
|
017090be7ab186cb6b47f49e1066ac5cfec3a542
|
/tests/unit/neptune/new/types/atoms/test_git_ref.py
|
6d2fdfc5d2560898f32ce624c3413e37e93213d4
|
[
"Apache-2.0"
] |
permissive
|
neptune-ai/neptune-client
|
9a79f9d93c84b3a20114e6e49a80652930399ece
|
9b697ce548634c30dbc5881d4a0b223c8987515d
|
refs/heads/master
| 2023-08-18T01:48:22.634432
| 2023-08-17T11:55:57
| 2023-08-17T11:55:57
| 170,117,229
| 408
| 55
|
Apache-2.0
| 2023-09-13T12:51:03
| 2019-02-11T11:25:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
test_git_ref.py
|
#
# Copyright (c) 2023, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pathlib import Path
from neptune.types import GitRef
from neptune.vendor.lib_programname import get_path_executed_script
class TestGitRef:
def test_resolve_path_default(self):
assert GitRef().resolve_path() == get_path_executed_script().resolve()
def test_resolve_path_provided(self):
assert GitRef("path").resolve_path() == Path("path").resolve()
assert GitRef(Path("path")).resolve_path() == Path("path").resolve()
|
c8e7e6997ed090c5cee7971fa777efcfca242948
|
bf5acb19d44d031e2d8a9e37266acd55c5697863
|
/pybamm/models/full_battery_models/lead_acid/full.py
|
927f9a20281ecff935364801f35b96ca3eac4f7e
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
pybamm-team/PyBaMM
|
82ecf9bebb580aab1a4e67aa7d0297d2698a0b51
|
b4432b6da7331f992b1831912a9cf89be1f7578f
|
refs/heads/develop
| 2023-08-19T04:29:21.151964
| 2023-08-18T22:43:38
| 2023-08-18T22:43:38
| 155,538,761
| 713
| 362
|
BSD-3-Clause
| 2023-09-14T18:20:04
| 2018-10-31T10:26:29
|
Python
|
UTF-8
|
Python
| false
| false
| 5,199
|
py
|
full.py
|
#
# Lead-acid Full model
#
import pybamm
from .base_lead_acid_model import BaseModel
class Full(BaseModel):
"""
Porous electrode model for lead-acid, from :footcite:t:`Sulzer2019asymptotic`,
based on the Newman-Tiedemann model. See :class:`pybamm.lead_acid.BaseModel`
for more details.
"""
def __init__(self, options=None, name="Full model", build=True):
super().__init__(options, name)
self.set_external_circuit_submodel()
self.set_open_circuit_potential_submodel()
self.set_intercalation_kinetics_submodel()
self.set_interface_utilisation_submodel()
self.set_porosity_submodel()
self.set_active_material_submodel()
self.set_transport_efficiency_submodels()
self.set_convection_submodel()
self.set_electrolyte_submodel()
self.set_solid_submodel()
self.set_thermal_submodel()
self.set_side_reaction_submodels()
self.set_current_collector_submodel()
self.set_sei_submodel()
self.set_lithium_plating_submodel()
self.set_total_interface_submodel()
if build:
self.build_model()
pybamm.citations.register("Sulzer2019physical")
def set_porosity_submodel(self):
self.submodels["porosity"] = pybamm.porosity.ReactionDrivenODE(
self.param, self.options, False
)
def set_convection_submodel(self):
if self.options["convection"] == "none":
self.submodels[
"transverse convection"
] = pybamm.convection.transverse.NoConvection(self.param)
self.submodels[
"through-cell convection"
] = pybamm.convection.through_cell.NoConvection(self.param)
else:
if self.options["convection"] == "uniform transverse":
self.submodels[
"transverse convection"
] = pybamm.convection.transverse.Uniform(self.param)
elif self.options["convection"] == "full transverse":
self.submodels[
"transverse convection"
] = pybamm.convection.transverse.Full(self.param)
self.submodels[
"through-cell convection"
] = pybamm.convection.through_cell.Full(self.param)
def set_intercalation_kinetics_submodel(self):
for domain in ["negative", "positive"]:
intercalation_kinetics = self.get_intercalation_kinetics(domain)
self.submodels[f"{domain} interface"] = intercalation_kinetics(
self.param, domain, "lead-acid main", self.options, "primary"
)
def set_solid_submodel(self):
if self.options["surface form"] == "false":
submod_n = pybamm.electrode.ohm.Full(self.param, "negative")
submod_p = pybamm.electrode.ohm.Full(self.param, "positive")
else:
submod_n = pybamm.electrode.ohm.SurfaceForm(self.param, "negative")
submod_p = pybamm.electrode.ohm.SurfaceForm(self.param, "positive")
self.submodels["negative electrode potential"] = submod_n
self.submodels["positive electrode potential"] = submod_p
def set_electrolyte_submodel(self):
surf_form = pybamm.electrolyte_conductivity.surface_potential_form
self.submodels["electrolyte diffusion"] = pybamm.electrolyte_diffusion.Full(
self.param
)
if self.options["surface form"] == "false":
self.submodels[
"electrolyte conductivity"
] = pybamm.electrolyte_conductivity.Full(self.param)
surf_model = surf_form.Explicit
elif self.options["surface form"] == "differential":
surf_model = surf_form.FullDifferential
elif self.options["surface form"] == "algebraic":
surf_model = surf_form.FullAlgebraic
for domain in ["negative", "separator", "positive"]:
self.submodels[f"{domain} surface potential difference"] = surf_model(
self.param, domain, self.options
)
def set_side_reaction_submodels(self):
if self.options["hydrolysis"] == "true":
self.submodels["oxygen diffusion"] = pybamm.oxygen_diffusion.Full(
self.param
)
self.submodels["positive oxygen interface"] = pybamm.kinetics.ForwardTafel(
self.param, "positive", "lead-acid oxygen", self.options, "primary"
)
self.submodels[
"negative oxygen interface"
] = pybamm.kinetics.DiffusionLimited(
self.param, "negative", "lead-acid oxygen", self.options, order="full"
)
else:
self.submodels["oxygen diffusion"] = pybamm.oxygen_diffusion.NoOxygen(
self.param
)
self.submodels["positive oxygen interface"] = pybamm.kinetics.NoReaction(
self.param, "positive", "lead-acid oxygen", "primary"
)
self.submodels["negative oxygen interface"] = pybamm.kinetics.NoReaction(
self.param, "negative", "lead-acid oxygen", "primary"
)
|
74c4099a4cd3f524e0afbd0c3ec0a7c3e37dba52
|
6e964d46b8fab9bccbd199ea7ade41297282b0a7
|
/setup.py
|
2b515ed820cf0761cf3bf83c9df8275354ef5929
|
[
"MIT"
] |
permissive
|
donkirkby/live-py-plugin
|
1a4cb87a796983245094d7c97c3e72f3cea0c540
|
165b447cc1288c94f24f1e660e0c45a6ef476826
|
refs/heads/master
| 2023-08-29T15:14:37.585327
| 2023-07-23T21:12:19
| 2023-07-23T21:12:19
| 4,332,096
| 257
| 59
|
MIT
| 2023-09-09T18:18:40
| 2012-05-15T04:41:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
setup.py
|
import setuptools
with open("space_tracer.md") as f:
long_description = f.read()
about = {}
with open("plugin/PySrc/space_tracer/about.py") as f:
exec(f.read(), about)
# noinspection PyUnresolvedReferences
setuptools.setup(
name=about['__title__'],
version=about['__version__'],
author=about['__author__'],
author_email=about['__author_email__'],
description=about['__description__'],
long_description=long_description,
long_description_content_type="text/markdown",
url=about['__url__'],
packages=setuptools.find_packages('plugin/PySrc/'),
package_data={'space_tracer': ['py.typed']},
package_dir={'': 'plugin/PySrc/'},
entry_points=dict(console_scripts=[
'space_tracer = space_tracer:main']),
classifiers=[ # from https://pypi.org/classifiers/
"Intended Audience :: Developers",
"Topic :: Software Development :: Debuggers",
# Synchronize Python versions with py-build.yml workflow and tox.ini.
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Environment :: Console"
],
license="MIT",
project_urls={
'Bug Reports': 'https://github.com/donkirkby/live-py-plugin/issues',
'Source': 'https://github.com/donkirkby/live-py-plugin'}
)
|
8f17f458d6325aed081eded3ef7f90da47bc08fd
|
26bbcfdb811f7df13f7b5a95ba551da7adac4e9b
|
/src/certfuzz/campaign/campaign_base.py
|
804c29d777ced74c68974018708e06cf5828f433
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
CERTCC/certfuzz
|
080c3a5448a39d02049253fad96498ba50191586
|
892dae8676535b0ae5b77eea95ffbc21e9e1c959
|
refs/heads/develop
| 2022-11-11T06:12:09.032184
| 2020-06-10T19:57:26
| 2020-06-10T19:57:26
| 20,684,363
| 161
| 25
|
NOASSERTION
| 2023-05-10T14:27:00
| 2014-06-10T12:29:53
|
Python
|
UTF-8
|
Python
| false
| false
| 18,923
|
py
|
campaign_base.py
|
'''
Created on Feb 9, 2012
@organization: cert.org
'''
import abc
import logging
import os
import re
import shutil
import tempfile
import traceback
import signal
from certfuzz.campaign.errors import CampaignError
from certfuzz.file_handlers.seedfile_set import SeedfileSet
from certfuzz.file_handlers.errors import SeedfileSetError
from certfuzz.fuzztools import filetools
from certfuzz.runners.errors import RunnerArchitectureError, \
RunnerPlatformVersionError
from certfuzz.version import __version__
from certfuzz.file_handlers.tmp_reaper import TmpReaper
import gc
from certfuzz.config.simple_loader import load_and_fix_config
from certfuzz.helpers.misc import import_module_by_name
from certfuzz.fuzztools.object_caching import dump_obj_to_file,\
load_obj_from_file
import json
from certfuzz.fuzztools.filetools import write_file
logger = logging.getLogger(__name__)
class CampaignBase(object):
'''
Provides a fuzzing campaign object.
'''
__metaclass__ = abc.ABCMeta
def __init__(self, config_file, result_dir=None, debug=False):
'''
Typically one would invoke a campaign as follows:
with CampaignBase(params) as campaign:
campaign.go()
This will ensure that the runtime context is established properly, and
that any cleanup activities can be completed if exceptions occur.
@param config_file: path to a config file
@param result_dir: path to a result directory
(will be created if necessary)
@param campaign_cache: path to a cached json object to rebuild an
existing campaign
@param debug: boolean indicating whether we are in debug mode
'''
logger.debug('initialize %s', self.__class__.__name__)
# Read the cfg file
self.config_file = config_file
self.config = None
self.cached_state_file = None
self.debug = debug
self._version = __version__
self.testcases_seen = set()
self.runner_module_name = None
self.runner_module = None
self.runner_cls = None
self.seedfile_set = None
self.working_dir = None
self.seed_dir_local = None
# flag to indicate whether this is a fresh script start up or not
self.first_chunk = True
# TODO: consider making this configurable
self.status_interval = 100
self.gui_app = False
self.seed_interval = None
self.current_seed = None
self.outdir_base = None
self.outdir = None
self.sf_set_out = None
if result_dir:
self.outdir_base = os.path.abspath(result_dir)
self._read_config_file()
# Create a debugger timeout that allows for slack space to account
# for the difference between a zzuf-invoked iteration and a
# debugger-invoked iteration
debugger_timeout = self.config['runner']['runtimeout'] * 2
if debugger_timeout < 10:
debugger_timeout = 10
self.config['debugger']['runtimeout'] = debugger_timeout
self.campaign_id = self.config['campaign']['id']
self.current_seed = self.config['runoptions']['first_iteration']
self.seed_interval = self.config['runoptions']['seed_interval']
self.seed_dir_in = self.config['directories']['seedfile_dir']
if self.outdir_base is None:
# it wasn't spec'ed on the command line so use the config
self.outdir_base = self.config['directories']['results_dir']
self.work_dir_base = self.config['directories']['working_dir']
self.program = self.config['target']['program']
self.cmd_template = self.config['target']['cmdline_template']
_campaign_id_no_space = re.sub('\s', '_', self.campaign_id)
_campaign_id_with_underscores = re.sub('\W', '_', self.campaign_id)
self.outdir = os.path.join(self.outdir_base, _campaign_id_no_space)
logger.debug('outdir=%s', self.outdir)
self.sf_set_out = os.path.join(self.outdir, 'seedfiles')
if not self.cached_state_file:
cachefile = 'campaign_%s.json' % _campaign_id_with_underscores
self.cached_state_file = os.path.join(
self.work_dir_base, cachefile)
if not self.seed_interval:
self.seed_interval = 1
if not self.current_seed:
self.current_seed = 0
self.fuzzer_module_name = 'certfuzz.fuzzers.{}'.format(
self.config['fuzzer']['fuzzer'])
def _read_config_file(self):
logger.info('Reading config from %s', self.config_file)
self.config = load_and_fix_config(self.config_file)
logger.info(
'Using target program: %s', self.config['target']['program'])
@abc.abstractmethod
def _pre_enter(self):
'''
Callback for class-specific tasks that happen before
CampaignBase.__enter__() does its work. If self is modified it must
return self, otherwise no return value is needed.
@return: None or self
'''
@abc.abstractmethod
def _post_enter(self):
'''
Callback for class-specific tasks that happen after
CampaignBase.__enter__() does its work. If self is modified it must
return self, otherwise no return value is needed.
@return: None or self
'''
def __enter__(self):
'''
Creates a runtime context for the campaign.
'''
_result = self._pre_enter()
if _result is not None:
self = _result
self._check_prog()
self._setup_workdir()
self._set_fuzzer()
self._set_runner()
self._check_runner()
self._setup_output()
self._create_seedfile_set()
self._read_state()
_result = self._post_enter()
if _result is not None:
self = _result
return self
def _handle_common_errors(self, etype, value, mytraceback):
'''
Handles errors common to this class and all its subclasses
:param etype:
:param value:
'''
handled = False
if etype is KeyboardInterrupt:
logger.warning('Keyboard interrupt - exiting')
handled = True
elif etype is RunnerArchitectureError:
logger.error('Unsupported architecture: %s', value)
logger.error(
'Set "verify_architecture=false" in the runner section of your config to override this check')
handled = True
elif etype is RunnerPlatformVersionError:
logger.error('Unsupported platform: %s', value)
handled = True
elif etype is SeedfileSetError:
logger.error('No seedfiles available')
handled = True
return handled
def _handle_errors(self, etype, value, mytraceback):
'''
Callback to handle class-specific errors. If used, it should be
overridden by subclasses. Will be called after _handle_common_errors
:param etype:
:param value:
:param mytraceback:
'''
def _log_unhandled_exception(self, etype, value, mytraceback):
logger.debug('Unhandled exception:')
logger.debug(' type: %s', etype)
logger.debug(' value: %s', value)
for l in traceback.format_exception(etype, value, mytraceback):
logger.debug(l.rstrip())
@abc.abstractmethod
def _pre_exit(self):
'''
Implements methods to be completed prior to handling errors in the
__exit__ method. No return value.
'''
def __exit__(self, etype, value, mytraceback):
'''
Handles known exceptions gracefully, attempts to clean up temp files
before exiting.
'''
self._pre_exit()
# handle common errors
handled = self._handle_common_errors(etype, value, mytraceback)
if etype and not handled:
# call the class-specific error handler
handled = self._handle_errors(etype, value, mytraceback)
cleanup = True
if etype and not handled:
# if you got here, nothing has handled the error
# so log it and keep going
self._log_unhandled_exception(etype, value, mytraceback)
if self.debug and etype:
# short out if in debug mode and an error occurred
logger.debug('Skipping cleanup since we are in debug mode.')
return handled
# debug not set, so we should clean up
self._cleanup_workdir()
return handled
def _check_prog(self):
if not os.path.exists(self.program):
msg = 'Cannot find program "%s" (resolves to "%s")' % (
self.program, os.path.abspath(self.program))
raise CampaignError(msg)
def _set_fuzzer(self):
self.fuzzer_module = import_module_by_name(self.fuzzer_module_name)
self.fuzzer_cls = self.fuzzer_module._fuzzer_class
def _set_runner(self):
if self.runner_module_name:
self.runner_module = import_module_by_name(self.runner_module_name)
self.runner_cls = self.runner_module._runner_class
def _check_runner(self):
# try to run the runner module's check_runner method
try:
self.runner_module.check_runner()
except AttributeError:
# not a big deal if it's not there, just note it and keep going.
logger.warn(
'Runner module %s has no check_runner method. Skipping runner check.')
@property
def _version_file(self):
return os.path.join(self.outdir, 'version.txt')
def _write_version(self):
version_string = 'Results produced by %s v%s' % (__name__, __version__)
filetools.write_file(version_string, self._version_file)
def _setup_output(self):
# construct run output directory
filetools.make_directories(self.outdir)
# copy config to run output dir
filetools.copy_file(self.config_file, self.outdir)
self._write_version()
def _setup_workdir(self):
# make_directories silently skips existing dirs, so it's okay to call
# it even if work_dir_base already exists
filetools.make_directories(self.work_dir_base)
# now we're sure work_dir_base exists, so it's safe to create temp dirs
self.working_dir = tempfile.mkdtemp(
prefix='campaign_', dir=self.work_dir_base)
self.seed_dir_local = os.path.join(self.working_dir, 'seedfiles')
def _cleanup_workdir(self):
try:
shutil.rmtree(self.working_dir)
except:
pass
if os.path.exists(self.working_dir):
logger.warning(
"Unable to remove campaign working dir: %s", self.working_dir)
else:
logger.debug('Removed campaign working dir: %s', self.working_dir)
def _create_seedfile_set(self):
if self.seedfile_set is not None:
return
logger.info('Building seedfile set')
with SeedfileSet(campaign_id=self.campaign_id,
originpath=self.seed_dir_in,
localpath=self.seed_dir_local,
outputpath=self.sf_set_out) as sfset:
self.seedfile_set = sfset
def _read_cached_data(self, cachefile):
try:
with open(cachefile, 'rb') as fp:
cached_data = json.load(fp)
except (IOError, ValueError) as e:
logger.info(
'No cached campaign data found, will proceed as new campaign: %s', e)
return
return cached_data
def _restore_seedfile_scores(self, sf_scores):
for sf_md5, sf_score in sf_scores.iteritems():
# is this seedfile still around?
try:
arm_to_update = self.seedfile_set.arms[sf_md5]
except KeyError:
# if not, just skip it
logger.warning(
'Skipping seedfile score recovery for %s: maybe seedfile was removed?', sf_md5)
continue
cached_successes = sf_score['successes']
cached_trials = sf_score['trials']
arm_to_update.update(
successes=cached_successes, trials=cached_trials)
def _restore_rangefinder_scores(self, rf_scores):
for sf_md5, rangelist in rf_scores.iteritems():
# is this seedfile still around?
try:
sf_to_update = self.seedfile_set.things[sf_md5]
except KeyError:
logger.warning(
'Skipping rangefinder score recovery for %s: maybe seedfile was removed?', sf_md5)
continue
# if you got here, you have a seedfile to update
# we're going to need its rangefinder
rangefinder = sf_to_update.rangefinder
# construct a rangefinder key lookup table
rf_lookup = {}
for key, item in rangefinder.things.iteritems():
lookup_key = (item.min, item.max)
rf_lookup[lookup_key] = key
for r in rangelist:
# is this range still correct?
cached_rmin = r['range_key']['range_min']
cached_rmax = r['range_key']['range_max']
lkey = (cached_rmin, cached_rmax)
try:
rk = rf_lookup[lkey]
except KeyError:
logger.warning(
'Skipping rangefinder score recovery for %s range %s: range not found', sf_md5, lkey)
continue
# if you got here you have a matching range to update
# fyi: .arms and .things have the same keys
arm_to_update = rangefinder.arms[rk]
cached_successes = r['range_score']['successes']
cached_trials = r['range_score']['trials']
arm_to_update.update(
successes=cached_successes, trials=cached_trials)
def _restore_campaign_from_cache(self, cached_data):
self.current_seed = cached_data['current_seed']
self._restore_seedfile_scores(cached_data['seedfile_scores'])
self._restore_rangefinder_scores(cached_data['rangefinder_scores'])
logger.info('Restoring cached campaign data done')
def _read_state(self, cachefile=None):
if not cachefile:
cachefile = self.cached_state_file
cached_data = self._read_cached_data(cachefile)
if cached_data is None:
return
# check the timestamp
# if the cache is older than the current config file, we should
# ignore the cached data and just start fresh
cached_cfg_ts = cached_data['config_timestamp']
if self.config['config_timestamp'] != cached_cfg_ts:
logger.warning(
'Config file modified since campaign data cache was created. Discarding cached campaign data. Will proceed as new campaign.')
return 2
# if you got here, the cached file is ok to use
self._restore_campaign_from_cache(cached_data)
def _get_state_as_dict(self):
state = {'current_seed': self.current_seed,
'config_timestamp': self.config['config_timestamp'],
'seedfile_scores': self.seedfile_set.arms_as_dict(),
'rangefinder_scores': None
}
# add rangefinder scores from each seedfile
d = {}
for k, sf in self.seedfile_set.things.iteritems():
d[k] = []
for rk, rf in sf.rangefinder.things.iteritems():
arm = sf.rangefinder.arms[rk]
rkey = {'range_min': rf.min, 'range_max': rf.max}
rdata = {'range_key': rkey,
'range_score': dict(arm.__dict__)}
d[k].append(rdata)
state['rangefinder_scores'] = d
return state
def _get_state_as_json(self):
state = self._get_state_as_dict()
return json.dumps(state, indent=4, sort_keys=True)
def _save_state(self, cachefile=None):
if not cachefile:
cachefile = self.cached_state_file
state_as_json = self._get_state_as_json()
write_file(state_as_json, cachefile)
def _testcase_is_unique(self, testcase_id, exploitability='UNKNOWN'):
'''
If testcase_id represents a new testcase, add the testcase_id to testcases_seen
and return True. Otherwise return False.
@param testcase_id: the testcase_id to look up
@param exploitability: not used at this time
'''
if testcase_id not in self.testcases_seen:
self.testcases_seen.add(testcase_id)
logger.debug(
"%s did not exist in cache, testcase is unique", testcase_id)
return True
logger.debug('%s was found, not unique', testcase_id)
return False
def _keep_going(self):
'''
Returns True if a campaign should proceed. False otherwise.
'''
return True
def _do_interval(self):
'''
Implements a loop over a set of iterations
'''
# wipe the tmp dir clean to try to avoid filling the VM disk
TmpReaper().clean_tmp()
# choose seedfile
sf = self.seedfile_set.next_item()
logger.info('Selected seedfile: %s', sf.basename)
if (self.current_seed > 0) and (self.current_seed % self.status_interval == 0):
# cache our current state
self._save_state()
r = sf.rangefinder.next_item()
# rng_seed = int(sf.md5, 16)
interval_limit = self.current_seed + self.seed_interval
# start an iteration interval
# note that range does not include interval_limit
logger.debug(
'Starting interval %d-%d', self.current_seed, interval_limit)
for seednum in xrange(self.current_seed, interval_limit):
if sf.md5 not in self.seedfile_set.things:
# We've exhausted what we can do with this seedfile
break
self._do_iteration(sf, r, seednum)
del sf
# manually collect garbage
gc.collect()
self.current_seed = interval_limit
self.first_chunk = False
@abc.abstractmethod
def _do_iteration(self):
'''
Implements a single iteration of the fuzzing process.
'''
def signal_handler(self, signal, frame):
logger.debug('KeyboardInterrupt detected')
raise(KeyboardInterrupt)
def go(self):
'''
Starts campaign
'''
signal.signal(signal.SIGINT, self.signal_handler)
while self._keep_going():
self._do_interval()
|
1e55d918253a06be1ea378757ae836a006ad07ce
|
a0eb6744e6f7f509b96d21f0bc8b3f8387f6861c
|
/notebook/jupyter_precision_numpy.py
|
5a55a3ee39b67ef0edacfda4258ce070352fae51
|
[
"MIT"
] |
permissive
|
nkmk/python-snippets
|
a6c66bdf999502e52f4795a3074ced63bf440817
|
f9dd286a9cf93f474e20371f8fffc4732cb3c4d5
|
refs/heads/master
| 2023-08-03T04:20:05.606293
| 2023-07-26T13:21:11
| 2023-07-26T13:21:11
| 98,900,570
| 253
| 77
|
MIT
| 2020-10-25T01:12:53
| 2017-07-31T14:54:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 601
|
py
|
jupyter_precision_numpy.py
|
import numpy as np
a = np.array([0.123456789, 0.987654321])
a
# array([0.12345679, 0.98765432])
%precision 3
# '%.3f'
a
# array([0.123, 0.988])
print(a)
# [0.123 0.988]
print(np.get_printoptions()['precision'])
# 3
np.set_printoptions(precision=5)
a
# array([0.12346, 0.98765])
print(a)
# [0.12346 0.98765]
print(a[0])
# 0.123456789
%precision
# '%r'
a
# array([0.12345679, 0.98765432])
print(np.get_printoptions()['precision'])
# 8
%precision %.2e
# '%.2e'
a
# array([0.12345679, 0.98765432])
np.set_printoptions(formatter={'float': '{:.2e}'.format})
a
# array([1.23e-01, 9.88e-01])
|
0cfc4286aa3aa69d0627b0ddfc904678dd7eaae5
|
e7efae2b83216d9621bd93390959d652de779c3d
|
/proxysql/datadog_checks/proxysql/proxysql.py
|
0fd8d4f12175148b185e63219310503c95733177
|
[
"BSD-3-Clause",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
DataDog/integrations-core
|
ee1886cc7655972b2791e6ab8a1c62ab35afdb47
|
406072e4294edff5b46b513f0cdf7c2c00fac9d2
|
refs/heads/master
| 2023-08-31T04:08:06.243593
| 2023-08-30T18:22:10
| 2023-08-30T18:22:10
| 47,203,045
| 852
| 1,548
|
BSD-3-Clause
| 2023-09-14T16:39:54
| 2015-12-01T16:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,851
|
py
|
proxysql.py
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from contextlib import closing, contextmanager
import pymysql
import pymysql.cursors
from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative
from datadog_checks.base.utils.db import QueryManager
from .queries import (
STATS_COMMAND_COUNTERS,
STATS_MEMORY_METRICS,
STATS_MYSQL_BACKENDS,
STATS_MYSQL_CONNECTION_POOL,
STATS_MYSQL_GLOBAL,
STATS_MYSQL_QUERY_RULES,
STATS_MYSQL_USERS,
VERSION_METADATA,
)
from .ssl_utils import make_insecure_ssl_client_context
ADDITIONAL_METRICS_MAPPING = {
'command_counters_metrics': STATS_COMMAND_COUNTERS,
'connection_pool_metrics': STATS_MYSQL_CONNECTION_POOL,
'users_metrics': STATS_MYSQL_USERS,
'memory_metrics': STATS_MEMORY_METRICS,
'backends_metrics': STATS_MYSQL_BACKENDS,
'query_rules_metrics': STATS_MYSQL_QUERY_RULES,
}
class ProxysqlCheck(AgentCheck):
SERVICE_CHECK_NAME = "can_connect"
__NAMESPACE__ = "proxysql"
# This remapper is used to support legacy Proxysql integration config values
TLS_CONFIG_REMAPPER = {
'validate_hostname': {'name': 'tls_validate_hostname'},
}
def __init__(self, name, init_config, instances):
super(ProxysqlCheck, self).__init__(name, init_config, instances)
self.host = self.instance.get("host", "")
self.port = int(self.instance.get("port", 0))
self.user = self.instance.get("username", "")
self.password = str(self.instance.get("password", ""))
if not all((self.host, self.port, self.user, self.password)):
raise ConfigurationError("ProxySQL host, port, username and password are needed")
self.tls_verify = is_affirmative(self.instance.get('tls_verify', False))
self.connect_timeout = self.instance.get("connect_timeout", 10)
self.read_timeout = self.instance.get("read_timeout")
self.tags = self.instance.get("tags", [])
self.tags.append("proxysql_server:{}".format(self.host))
self.tags.append("proxysql_port:{}".format(self.port))
manager_queries = [STATS_MYSQL_GLOBAL]
if self.is_metadata_collection_enabled():
# Add the query to collect the ProxySQL version
manager_queries.append(VERSION_METADATA)
additional_metrics = self.instance.get("additional_metrics", [])
for additional_group in additional_metrics:
if additional_group not in ADDITIONAL_METRICS_MAPPING:
raise ConfigurationError(
"There is no additional metric group called '{}' for the ProxySQL integration, it should be one "
"of ({})".format(
additional_group,
", ".join(ADDITIONAL_METRICS_MAPPING),
)
)
manager_queries.append(ADDITIONAL_METRICS_MAPPING[additional_group])
self._connection = None
self._query_manager = QueryManager(self, self.execute_query_raw, queries=manager_queries, tags=self.tags)
self.check_initializations.append(self._query_manager.compile_queries)
def check(self, _):
with self.connect() as conn:
self._connection = conn
self._query_manager.execute()
def execute_query_raw(self, query):
with closing(self._connection.cursor()) as cursor:
cursor.execute(query)
if cursor.rowcount < 1:
self.log.warning("Failed to fetch records from query: `%s`.", query)
return []
return cursor.fetchall()
@contextmanager
def connect(self):
if self.tls_verify:
self.log.debug("Connecting to ProxySQL via SSL/TLS")
# If ca_cert is None, will load the default certificates
ssl_context = self.get_tls_context()
else:
self.log.debug("Connecting to ProxySQL without SSL/TLS")
ssl_context = make_insecure_ssl_client_context()
db = None
try:
db = pymysql.connect(
host=self.host,
user=self.user,
port=self.port,
passwd=self.password,
connect_timeout=self.connect_timeout,
read_timeout=self.read_timeout,
ssl=ssl_context,
)
self.log.debug("Connected to ProxySQL")
yield db
except Exception:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=self.tags)
self.log.exception("Can't connect to ProxySQL")
raise
else:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=self.tags)
finally:
if db:
db.close()
|
84376415d6f425c71e94e56b5e84f901a23a9fde
|
69818aed99b6bedf41ba6c65474b3825fb65cbcf
|
/tools/programs/grab_get.py
|
d6bf784b6861e97d103815c2cd287e35d2b3333b
|
[] |
no_license
|
Realm667/WolfenDoom
|
26ccebaed04f0815892d57e11b93873e94efd737
|
e829080145016a14ae32727f533624a65b88b0b2
|
refs/heads/master
| 2023-09-04T03:13:47.044568
| 2023-09-03T00:43:37
| 2023-09-03T00:43:37
| 30,589,550
| 276
| 55
| null | 2021-11-27T15:57:19
| 2015-02-10T11:43:37
|
C#
|
UTF-8
|
Python
| false
| false
| 618
|
py
|
grab_get.py
|
#!/usr/bin/env python3
"Inject a grAb chunk into a PNG file"
import argparse
import png as png_util
import struct
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=
"Get offsets from a PNG file")
parser.add_argument('png', help="The PNG file to get the grAb offset "
"info from")
args = parser.parse_args()
png = png_util.PNGFile()
png.read(args.png)
grab_index = png.chunk_index(b"grAb")
if grab_index >= 0:
x, y = struct.unpack(">2i", png.chunks[grab_index].data)
print(x, y)
|
7e795a734807629ec8d46b5660561baf1de254ac
|
4ece75389e2822d63699333f4595ade12bd8632d
|
/personal/compare_lines.py
|
98ac8eacddef59acd8c1c58dd423e55690b3f8e2
|
[
"MIT"
] |
permissive
|
Watchful1/PushshiftDumps
|
3a797a4dce87a8a04ac24623c02a56ab7d955c84
|
4a50ca6605eb88e82e2406cf57c2a160f8a091e3
|
refs/heads/master
| 2023-06-08T20:30:45.689214
| 2023-05-26T01:28:37
| 2023-05-26T01:28:37
| 403,231,429
| 144
| 30
|
MIT
| 2023-06-17T04:55:01
| 2021-09-05T06:17:43
|
Python
|
UTF-8
|
Python
| false
| false
| 775
|
py
|
compare_lines.py
|
import utils
import discord_logging
import os
import sys
from datetime import datetime
log = discord_logging.init_logging()
if __name__ == "__main__":
file_one = open(r"\\MYCLOUDPR4100\Public\reddit_final\RelationshipsOver35_comments_dump.txt", 'r')
file_two = open(r"\\MYCLOUDPR4100\Public\reddit_final\RelationshipsOver35_comments_mongo.txt", 'r')
file_lines = 0
while True:
file_lines += 1
line_one = file_one.readline().rstrip()
line_two = file_two.readline().rstrip()
if line_one != line_two:
log.info(f"lines not matching: {file_lines}")
log.info(line_one)
log.info(line_two)
#break
if file_lines % 100000 == 0:
log.info(f"{file_lines:,}")
if not line_one:
break
log.info(f"{file_lines:,}")
file_one.close()
file_two.close()
|
7a30d0ebc8091dfff8d39f8118b08705b1ad8493
|
749af8e81d5ccd2d8714a34434a9c77772df551b
|
/statsmodels/datasets/strikes/data.py
|
4eaa1513492b1df50bf962ae91acc96ef18f0d93
|
[
"BSD-3-Clause"
] |
permissive
|
statsmodels/statsmodels
|
98ca67192c08bcc611ed3a75edaded2c7181ab98
|
01b19d7d111b29c183f620ff0a949ef6391ff8ee
|
refs/heads/main
| 2023-09-05T13:05:49.497076
| 2023-09-01T10:54:50
| 2023-09-01T10:54:50
| 1,885,237
| 8,666
| 3,023
|
BSD-3-Clause
| 2023-09-13T17:51:48
| 2011-06-12T17:04:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
data.py
|
"""U.S. Strike Duration Data"""
from statsmodels.datasets import utils as du
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
This is a subset of the data used in Kennan (1985). It was originally
published by the Bureau of Labor Statistics.
::
Kennan, J. 1985. "The duration of contract strikes in US manufacturing.
`Journal of Econometrics` 28.1, 5-28.
"""
DESCRSHORT = """Contains data on the length of strikes in US manufacturing and
unanticipated industrial production."""
DESCRLONG = """Contains data on the length of strikes in US manufacturing and
unanticipated industrial production. The data is a subset of the data originally
used by Kennan. The data here is data for the months of June only to avoid
seasonal issues."""
#suggested notes
NOTE = """::
Number of observations - 62
Number of variables - 2
Variable name definitions::
duration - duration of the strike in days
iprod - unanticipated industrial production
"""
def load_pandas():
"""
Load the strikes data and return a Dataset class instance.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_pandas(data, endog_idx=0)
def load():
"""
Load the strikes data and return a Dataset class instance.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
"""
return load_pandas()
def _get_data():
return du.load_csv(__file__,'strikes.csv').astype(float)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.