blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7d1280b66eb2d11555e0da8bd69608aa9a3f6ebb
|
1180c0bfe29959d95f3c131e6e839950e528d4ee
|
/26/clamytoe/pytrack/pytrack.py
|
5dfb5b313f2d5f918fa0db219397e97d880174f0
|
[
"MIT"
] |
permissive
|
pybites/challenges
|
e3e461accd8e7f890aee8007ba5070086ef983fc
|
02b77652d0901e6e06cb9b1e7cb3e59c675445c2
|
refs/heads/community
| 2023-08-20T18:19:02.982214
| 2022-11-17T09:23:31
| 2022-11-17T09:23:31
| 78,264,928
| 764
| 3,115
| null | 2023-07-21T05:58:19
| 2017-01-07T07:17:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,451
|
py
|
pytrack.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from maya import get_localzone, MayaInterval, now, parse
from pytrack.models import Log, Project
STATE = ('INACTIVE', 'ACTIVE')
def get_projects(display=False):
projects = []
in_db = Project.select()
if in_db:
for project in Project.select():
projects.append(project)
if display:
if project.selected:
print('*[{}] {} {}: {}'.format(project.id, project.duration, STATE[project.status], project.name))
else:
print(' [{}] {} {}: {}'.format(project.id, project.duration, STATE[project.status], project.name))
else:
print('You are not currently tracking any projects.')
return projects
def get_selected(display=True):
"""Returns the currently selected project"""
selected = None
projects = get_projects()
for project in projects:
if project.selected:
selected = project
if display:
if selected:
print('Selected: {}'.format(selected.name))
else:
print('Selected: {}'.format(selected))
return selected
def get_active():
"""Returns the currently active project"""
active = None
projects = get_projects()
for project in projects:
if project.status == 1:
active = project
return active
def update_project(project):
"""Updates the duration of the project's time"""
duration = None
# set local timezone
timezone = get_localzone()
local_tz = timezone.zone
# collect all of the logs that are part of this project
logs = Log.select().where(Log.project_id == project.id)
# iterate over the logs and accumulate the duration of each log
for n, log in enumerate(logs):
start = parse(log.start_time).datetime(to_timezone=local_tz, naive=True)
stop = parse(log.stop_time).datetime(to_timezone=local_tz, naive=True)
if n == 0:
duration = MayaInterval.from_datetime(start, stop).timedelta
else:
duration += MayaInterval.from_datetime(start, stop).timedelta
# update the project
project.duration = duration
project.status = 0
project.save()
print('Deactivating: {} with total time of {}'.format(project.name, project.duration))
def add_project(name):
"""Add a new project"""
# ensure that there are no active projects
active = get_active()
if active:
print('There is an active project: [{}] {}'.format(active.id, active.name))
print('Please close that out before adding another project.')
else:
project = Project.create(name=name)
project.save()
print('Added Project: [{}] {}'.format(project.id, project.name))
select_project(project.id)
def select_project(id):
"""Marks the given project ID as selected"""
# ensure that there are no active projects
active = get_active()
if active:
print('Cannot make project selection while there is an active project!')
print('Currently tracking: {}'.format(active.name))
else:
projects = get_projects()
# iterate over projects to see if id is a valid entry
valid = False
for project in projects:
if project.id == id:
valid = True
if valid:
for project in projects:
if project.id == id:
project.selected = True
project.save()
print('Selected: [{}] {}'.format(project.id, project.name))
else:
# unselect all others
project.selected = False
project.save()
else:
print('[{}] is not a valid entry. \nChoose from the following:\n'.format(id))
_ = get_projects(display=True)
def remove_project(id, safe=True):
"""Remove the project by the entered ID"""
project = False
selected = 0
select = None
projects = get_projects()
for proj in projects:
if proj.id == id:
project = proj
selected = proj.selected
else:
select = proj.id
if project:
if safe:
print('About to remove [{}] {}'.format(project.id, project.name))
answer = input('Are you sure (y/n): ')
if 'y' in answer.lower():
project.delete_instance()
print('Removed [{}] {}'.format(project.id, project.name))
if selected and select:
select_project(select)
else:
print('Aborted')
else:
project.delete_instance()
if selected and select:
select_project(select)
else:
print('Project [{}] does not exists!'.format(id))
def reset_db(safe=True):
"""Reset the database"""
if safe:
print('WARNING: You are about to delete all records!')
answer = input('Are you sure (y/n/): ')
if 'y' in answer.lower():
p = Project.delete()
p.execute()
l = Log.delete()
l.execute()
print('All records have been removed.')
else:
print('Aborted')
else:
p = Project.delete()
p.execute()
l = Log.delete()
l.execute()
def start_tracking():
"""Starts active tracking of project"""
# ensure that there are no current active projects
active = get_active()
if active:
print('Already tracking {}!'.format(active.name))
else:
project = get_selected(display=False)
log = Log.create(project=project, start_time=now().datetime())
log.save()
project.status = 1
project.save()
print('Activating: {}'.format(project.name))
def stop_tracking():
"""Stops active tracking of project"""
# ensure that we are closing an active project
active = get_active()
if active:
logs = Log.select().where(Log.project_id == active.id)
# close out the log that doesn't have a stop_time entry
for log in logs:
if not log.stop_time:
log.stop_time = now().datetime()
log.save()
# update the project's status and duration time
update_project(active)
else:
print('There are currently no active projects...')
|
2feed536922631da290848d9286ebd4757c2328c
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/20_杂题/atc競プロ/AtCoder Beginner Contest/271/D - Flip and Adjust.py
|
092e9820efe549da20d4f431b3a4c10cce79f075
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,418
|
py
|
D - Flip and Adjust.py
|
"""
有n个卡牌,正面和反面都有一个权值,
你可以选择一个面朝上,最后使得n张牌朝上的总和为m,输出任意一组方案数。
dp复原
"""
import sys
sys.setrecursionlimit(int(1e9))
input = lambda: sys.stdin.readline().rstrip("\r\n")
MOD = 998244353
INF = int(4e18)
# 上に向けられた面に書かれた整数の総和がちょうど S となるようにカードを置くことができるか判定し、可能ならそのようなカードの置き方の一例を求めてください。
# !H:表 Head T:裏 Tail
if __name__ == "__main__":
n, s = map(int, input().split())
goods = [tuple(map(int, input().split())) for _ in range(n)]
dp = [[False] * (s + 1) for _ in range(n + 1)]
dp[0][0] = True
for i, (a, b) in enumerate(goods):
for cap in range(s + 1):
if dp[i][cap]:
if cap + a <= s:
dp[i + 1][cap + a] = True
if cap + b <= s:
dp[i + 1][cap + b] = True
if not dp[n][s]:
print("No")
exit(0)
print("Yes")
res = []
cur = s
for i in range(n - 1, -1, -1):
if cur - goods[i][0] >= 0 and dp[i][cur - goods[i][0]]:
res.append("H")
cur -= goods[i][0]
else:
res.append("T")
cur -= goods[i][1]
print("".join(res[::-1]))
|
7600c689f8d88e09b189ab4cc4fd9cb92ee80d12
|
e9f8704efe416f28dfd868f8a3c4ecf876809af7
|
/tests/importing/test_load_from_mgf.py
|
921fedee56a90cf0503bdc93ccab07720c6dac16
|
[
"Apache-2.0"
] |
permissive
|
matchms/matchms
|
8ee1285f19bb0bbd340562cc7d702803972eba5c
|
a161325b2edfa35e2a6f3fb2de30e1de171ba676
|
refs/heads/master
| 2023-09-01T20:08:05.453420
| 2023-09-01T11:30:07
| 2023-09-01T11:30:07
| 265,598,917
| 140
| 55
|
Apache-2.0
| 2023-09-12T08:48:10
| 2020-05-20T14:55:48
|
Python
|
UTF-8
|
Python
| false
| false
| 775
|
py
|
test_load_from_mgf.py
|
import os
from matchms import Spectrum
from matchms.importing import load_from_mgf
def test_load_from_mgf_using_filepath():
module_root = os.path.join(os.path.dirname(__file__), "..")
spectra_file = os.path.join(module_root, "testdata", "pesticides.mgf")
spectra = list(load_from_mgf(spectra_file))
assert len(spectra) > 0
assert isinstance(spectra[0], Spectrum)
def test_load_from_mgf_using_file():
module_root = os.path.join(os.path.dirname(__file__), "..")
spectra_filepath = os.path.join(module_root, "testdata", "pesticides.mgf")
with open(spectra_filepath, "r", encoding="utf-8") as spectra_file:
spectra = list(load_from_mgf(spectra_file))
assert len(spectra) > 0
assert isinstance(spectra[0], Spectrum)
|
0408d3750ea9a2fa7a419c17f79e8235aadcb99b
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowPmPortInterface/cli/equal/golden_output_expected.py
|
05c10e056f638b3e596654c5d12cf24d3e3a6e5b
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,229
|
py
|
golden_output_expected.py
|
expected_output={
"pm_port_info": {
"port": "1/24",
"pd": "0x7F837FEABD78",
"sw_idb": "0x7F837EBFA020(switch)",
"sb": "0x7F837EBFCA40",
"hw_idb": "0x7F837EBF8C38",
"if_num": 32,
"hw_if_index": 31,
"snmp_if_index": "32(32)",
"ptrunk_group": "0(port)",
"admin": "up(up)",
"line": "up(up)",
"oper_err": "none",
"port_mac": "683b.78f3.3118",
"idb_port_vlan": 1,
"def_vlan_id": 1,
"internal_vlan": "0x0",
"dtp_special": "no",
"pagp_special": "no",
"speed": "100M",
"duplex": "full",
"mode": "access",
"encap": "native",
"dtp_nonego": "FALSE",
"flow_ctrl_receive": "on",
"flow_ctrl_send": "off",
"link_flap_cnt": 0,
"dtp_flap_cnt": 0,
"pagp_flap_cnt": 0,
"unidirectional": "off",
"oper_vlan": 0,
"flag": 0,
"sm": "pm_port 1/24",
"running": "yes",
"state": "access_multi",
"last_transition": "(cfg_access_vvlanid)-> pagp_port_cleanup (cfg_access_vvlanid)-> pagp (cfg_access_vvlanid)-> pre_pagp_may_suspend (cfg_access_vvlanid)-> pagp_may_suspend (pagp_continue)-> start_pagp (pagp_continue)-> pagp (dont_bundle)-> pre_post_pagp (dont_bundle)-> post_pagp (dtp_access_multi)-> access_multi (bulk_sync)-> access_multi",
"vp": "1 100",
"vlans": "1 100",
"trunk_vlans": "1 100",
"fwd_vlans": 100,
"current_pruned_vlans": "none",
"previous_pruned_vlans": "none",
"protocols": "ip=on ipx=on misc=on other=on"
},
"config_values": {
"access_mode": "unknown",
"access_vlan_id": 1,
"native_vlan_id": 1,
"trunk_vlans": "1-4094",
"prune_vlans": "2-1001",
"primary_host_vlan": 32767,
"sec_host_vlan": 32767,
"pri_promiscuous_vlan": 32767,
"sec_prom_vlan": "none",
"speed": "auto",
"speed_auto": "auto-default",
"duplex": "auto",
"mode": "access",
"encap": "dot1q",
"nonego": "false",
"jumbo_cap": "true",
"jumbo": "false",
"mtu": 1500,
"sync_delay": 210,
"hol": "Enable",
"bcast_sup_level": 10000,
"mcast_sup_level": 10000,
"ucast_sup_level": 10000,
"disl": "off",
"dtp_nonego": "FALSE",
"media": "unknown",
"dualmode": 0,
"tdr_ever_run": "FALSE",
"tdr_in_progress": "FALSE",
"tdr_result_valid": "FALSE",
"tdr_error_code": 0,
"prbs_err_code": 0,
"prbs": "Stopped PRBS - port was admin down"
}
}
|
b23882baaa083123e74fc41dd0d112f4f06ce57a
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/oauth_integrations/migrations/0004_auto_20220304_1139.py
|
19d83b2b61bc38472435e0bf2669527ba678bf87
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 869
|
py
|
0004_auto_20220304_1139.py
|
# Generated by Django 2.2.27 on 2022-03-04 11:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('oauth_integrations', '0003_livegooglesheetrefreshstatus'),
]
operations = [
migrations.AlterField(
model_name='livegooglesheetrefreshstatus',
name='refresh_error_reason',
field=models.CharField(choices=[(None, 'No Error'), ('token', 'Invalid Token'), ('timeout', 'Data Timeout'), ('other', 'Other...')], default=None, max_length=16, null=True),
),
migrations.AlterField(
model_name='livegooglesheetrefreshstatus',
name='schedule',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oauth_integrations.LiveGoogleSheetSchedule'),
),
]
|
1e8079a0d47fc243c1b3bd1cfefe076a8ed70248
|
3dc647cd07a7361ed401e40d2b7cce8c826c8f6c
|
/Lib/copyreg.py
|
dfc463c49a389d59789c982846c3697c456646f1
|
[
"Python-2.0",
"CC-BY-4.0",
"MIT"
] |
permissive
|
RustPython/RustPython
|
5ddce4a9848b9de8c041ffd2634f83c0105d3f39
|
b864e5da1f18897fc884180b7093df5aa170024f
|
refs/heads/main
| 2023-09-04T12:38:29.458699
| 2023-09-03T12:33:42
| 2023-09-03T12:33:42
| 135,201,145
| 15,815
| 1,302
|
MIT
| 2023-09-14T08:11:45
| 2018-05-28T19:27:01
|
Rust
|
UTF-8
|
Python
| false
| false
| 7,135
|
py
|
copyreg.py
|
"""Helper to provide extensibility for pickle.
This is only useful to add pickle support for extension types defined in
C, not for instances of user-defined classes.
"""
__all__ = ["pickle", "constructor",
"add_extension", "remove_extension", "clear_extension_cache"]
dispatch_table = {}
def pickle(ob_type, pickle_function, constructor_ob=None):
if not callable(pickle_function):
raise TypeError("reduction functions must be callable")
dispatch_table[ob_type] = pickle_function
# The constructor_ob function is a vestige of safe for unpickling.
# There is no reason for the caller to pass it anymore.
if constructor_ob is not None:
constructor(constructor_ob)
def constructor(object):
if not callable(object):
raise TypeError("constructors must be callable")
# Example: provide pickling support for complex numbers.
try:
complex
except NameError:
pass
else:
def pickle_complex(c):
return complex, (c.real, c.imag)
pickle(complex, pickle_complex, complex)
# Support for pickling new-style objects
def _reconstructor(cls, base, state):
if base is object:
obj = object.__new__(cls)
else:
obj = base.__new__(cls, state)
if base.__init__ != object.__init__:
base.__init__(obj, state)
return obj
_HEAPTYPE = 1<<9
# Python code for object.__reduce_ex__ for protocols 0 and 1
def _reduce_ex(self, proto):
assert proto < 2
cls = self.__class__
for base in cls.__mro__:
if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
break
else:
base = object # not really reachable
if base is object:
state = None
else:
if base is cls:
raise TypeError(f"cannot pickle {cls.__name__!r} object")
state = base(self)
args = (cls, base, state)
try:
getstate = self.__getstate__
except AttributeError:
if getattr(self, "__slots__", None):
raise TypeError(f"cannot pickle {cls.__name__!r} object: "
f"a class that defines __slots__ without "
f"defining __getstate__ cannot be pickled "
f"with protocol {proto}") from None
try:
dict = self.__dict__
except AttributeError:
dict = None
else:
dict = getstate()
if dict:
return _reconstructor, args, dict
else:
return _reconstructor, args
# Helper for __reduce_ex__ protocol 2
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def __newobj_ex__(cls, args, kwargs):
"""Used by pickle protocol 4, instead of __newobj__ to allow classes with
keyword-only arguments to be pickled correctly.
"""
return cls.__new__(cls, *args, **kwargs)
def _slotnames(cls):
"""Return a list of slot names for a given class.
This needs to find slots defined by the class and its bases, so we
can't simply return the __slots__ attribute. We must walk down
the Method Resolution Order and concatenate the __slots__ of each
class found there. (This assumes classes don't modify their
__slots__ attribute to misrepresent their slots after the class is
defined.)
"""
# Get the value from a cache in the class if possible
names = cls.__dict__.get("__slotnames__")
if names is not None:
return names
# Not cached -- calculate the value
names = []
if not hasattr(cls, "__slots__"):
# This class has no slots
pass
else:
# Slots found -- gather slot names from all base classes
for c in cls.__mro__:
if "__slots__" in c.__dict__:
slots = c.__dict__['__slots__']
# if class has a single slot, it can be given as a string
if isinstance(slots, str):
slots = (slots,)
for name in slots:
# special descriptors
if name in ("__dict__", "__weakref__"):
continue
# mangled names
elif name.startswith('__') and not name.endswith('__'):
stripped = c.__name__.lstrip('_')
if stripped:
names.append('_%s%s' % (stripped, name))
else:
names.append(name)
else:
names.append(name)
# Cache the outcome in the class if at all possible
try:
cls.__slotnames__ = names
except:
pass # But don't die if we can't
return names
# A registry of extension codes. This is an ad-hoc compression
# mechanism. Whenever a global reference to <module>, <name> is about
# to be pickled, the (<module>, <name>) tuple is looked up here to see
# if it is a registered extension code for it. Extension codes are
# universal, so that the meaning of a pickle does not depend on
# context. (There are also some codes reserved for local use that
# don't have this restriction.) Codes are positive ints; 0 is
# reserved.
_extension_registry = {} # key -> code
_inverted_registry = {} # code -> key
_extension_cache = {} # code -> object
# Don't ever rebind those names: pickling grabs a reference to them when
# it's initialized, and won't see a rebinding.
def add_extension(module, name, code):
"""Register an extension code."""
code = int(code)
if not 1 <= code <= 0x7fffffff:
raise ValueError("code out of range")
key = (module, name)
if (_extension_registry.get(key) == code and
_inverted_registry.get(code) == key):
return # Redundant registrations are benign
if key in _extension_registry:
raise ValueError("key %s is already registered with code %s" %
(key, _extension_registry[key]))
if code in _inverted_registry:
raise ValueError("code %s is already in use for key %s" %
(code, _inverted_registry[code]))
_extension_registry[key] = code
_inverted_registry[code] = key
def remove_extension(module, name, code):
"""Unregister an extension code. For testing only."""
key = (module, name)
if (_extension_registry.get(key) != code or
_inverted_registry.get(code) != key):
raise ValueError("key %s is not registered with code %s" %
(key, code))
del _extension_registry[key]
del _inverted_registry[code]
if code in _extension_cache:
del _extension_cache[code]
def clear_extension_cache():
_extension_cache.clear()
# Standard extension code assignments
# Reserved ranges
# First Last Count Purpose
# 1 127 127 Reserved for Python standard library
# 128 191 64 Reserved for Zope
# 192 239 48 Reserved for 3rd parties
# 240 255 16 Reserved for private use (will never be assigned)
# 256 Inf Inf Reserved for future assignment
# Extension codes are assigned by the Python Software Foundation.
|
b2a33f5227e9ecaf6fdf0c30d7221d0de419f145
|
c2c212ba42ebfa35f3b6122344978bc94ec8fa67
|
/tests/test_matprat.py
|
41a5c3a247fb26daf6f325bbb156a9e9c125c296
|
[
"MIT"
] |
permissive
|
hhursev/recipe-scrapers
|
0cd6b7db4ef23ca825f2354f5d1ba76076a14813
|
8ced0227b3b16c532fc5ebf3060c99ee0452adab
|
refs/heads/main
| 2023-09-03T07:33:29.684121
| 2023-09-01T21:15:50
| 2023-09-01T21:15:50
| 42,446,168
| 1,276
| 443
|
MIT
| 2023-09-14T16:34:09
| 2015-09-14T12:05:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,728
|
py
|
test_matprat.py
|
from recipe_scrapers.matprat import Matprat
from tests import ScraperTest
class TestMatprat(ScraperTest):
scraper_class = Matprat
def test_host(self):
self.assertEqual("matprat.no", self.harvester_class.host())
def test_canonical_url(self):
self.assertEqual(
"https://www.matprat.no/oppskrifter/gjester/butter-chicken---indisk-smorkylling/",
self.harvester_class.canonical_url(),
)
def test_title(self):
self.assertEqual(
self.harvester_class.title(), "Butter chicken - indisk smørkylling"
)
def test_author(self):
self.assertEqual(self.harvester_class.author(), "MatPrat")
def test_total_time(self):
self.assertEqual(160, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("6 servings", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://images.matprat.no/dxgehtetqy",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertEqual(
[
"1 stk. kylling (ca. 1300 g)",
"4 båter hvitløk",
"0,5 ss revet frisk ingefær",
"2 ss sitronsaft",
"1 dl gresk yoghurt",
"0,5 ss chilipulver (helst indisk)",
"1 ss garam masala",
"1 ss rapsolje eller sennepsolje",
"100 g cashewnøtter",
"8 stk. tomat",
"2 ss olje",
"4 båter finhakket hvitløk",
"0,5 ss revet frisk ingefær",
"0,5 ss chilipulver (helst indisk)",
"2 stk. hel kardemomme",
"2 ts hel bukkehornkløver",
"2 ss flytende honning",
"1 stk. grønn chili",
"2 ss smør",
"1 dl fløte",
"0,5 potte frisk koriander",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
return self.assertEqual(
"""Fjern skinnet på kyllingen og tørk den godt. Kutt dype snitt i kjøttet på hele kyllingen. Finhakk hvitløk og bland med revet ingefær og sitronsaft, og gni kyllingen godt inn med blandingen. Dryss over litt salt. Sett kaldt i 20 minutter.\nBland sammen chilipulver, garam masala, olje og yoghurt. Tørk kyllingen med litt kjøkkenpapir og gni den godt inn med yoghurtblandingen. Plasser kyllingen på et fat og sett den kaldt i minst 4-6 timer, helst over natten.\nStek kyllingen midt i stekeovnen ved 200 °C, eller grill den til den er gyllenbrun og gjennomstekt, ca. 50 min. - 1 time.\nAvkjøl og plukk kjøttet av beina i store biter.\nLag smør- og tomatcurry: Bløtlegg cashewnøtter i litt lunkent vann i minst 30 minutter. Hell av vannet og mos nøttene i hurtigmikser eller med stavmikser. Sett til side.\nDel tomater i biter. Varm en sauteringspanne med olje og fres tomater, hvitløk, ingefær, chilipulver, Kardemomme og bukkehornkløver på middels varme til tomatene er helt myke.\nTilsett cashewpuré og bruk stavmikser eller hurtigmikser til å finmose sausen. Sil gjerne sausen gjennom en grov sikt for å få ut rester av skall, hvis du vil ha den ekstra fin.\nHa sausen tilbake i kjelen og la den småkoke i i noen minutter. Smak til med honning, finhakket grønn chili, salt og pepper. Visp inn fløte og romtemperert smør i den varme sausen.\nLegg kyllingbitene i sausen og la alt bli gjennomvarmt. Pynt med koriander.""",
self.harvester_class.instructions(),
)
def test_ratings(self):
self.assertEqual(5, self.harvester_class.ratings())
|
df3e05e552f79b02eec5c497ed0b48c66e19d614
|
bf8b77adbb1a6db404339bf70cead22b8e7ff633
|
/chi_annotator/task_center/task_center_webapi/apis/urls.py
|
656d4b162c5de7f931cf1252663237d2563fe56a
|
[
"Apache-2.0"
] |
permissive
|
deepwel/Chinese-Annotator
|
c93e628435b939e36cc7073f2b6d7a66b2d0a4a8
|
fb0b2814afbea1622f9270360b025928fed186cf
|
refs/heads/master
| 2023-09-06T00:17:26.893233
| 2023-07-17T06:14:57
| 2023-07-17T06:14:57
| 110,061,626
| 1,054
| 227
|
Apache-2.0
| 2023-07-17T06:14:59
| 2017-11-09T03:17:45
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 260
|
py
|
urls.py
|
from django.urls import path
from rest_framework.routers import DefaultRouter
from . import views
urlpatterns = [
# First index page ####################################################
path('test_connect/', views.test_connect, name='test_connect'),
]
|
a16bd30e87bfbe53fbdf046a723fe64414623bab
|
975b2d421d3661e6770b601929d5f11d981d8985
|
/msgraph/generated/models/time_off_request.py
|
c5783d955743d6015214ec4113e102214f9389b9
|
[
"MIT"
] |
permissive
|
microsoftgraph/msgraph-sdk-python
|
a7c551b85daadeebf76ec4ae12668664ea639b42
|
27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949
|
refs/heads/main
| 2023-09-03T21:45:27.989672
| 2023-08-31T06:22:18
| 2023-08-31T06:22:18
| 534,665,999
| 135
| 18
|
MIT
| 2023-09-14T11:04:11
| 2022-09-09T14:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,982
|
py
|
time_off_request.py
|
from __future__ import annotations
import datetime
from dataclasses import dataclass, field
from kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from .schedule_change_request import ScheduleChangeRequest
from .schedule_change_request import ScheduleChangeRequest
@dataclass
class TimeOffRequest(ScheduleChangeRequest):
# The OdataType property
odata_type: Optional[str] = "#microsoft.graph.timeOffRequest"
# The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
end_date_time: Optional[datetime.datetime] = None
# The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
start_date_time: Optional[datetime.datetime] = None
# The reason for the time off.
time_off_reason_id: Optional[str] = None
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> TimeOffRequest:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parse_node: The parse node to use to read the discriminator value and create the object
Returns: TimeOffRequest
"""
if not parse_node:
raise TypeError("parse_node cannot be null.")
return TimeOffRequest()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
from .schedule_change_request import ScheduleChangeRequest
from .schedule_change_request import ScheduleChangeRequest
fields: Dict[str, Callable[[Any], None]] = {
"endDateTime": lambda n : setattr(self, 'end_date_time', n.get_datetime_value()),
"startDateTime": lambda n : setattr(self, 'start_date_time', n.get_datetime_value()),
"timeOffReasonId": lambda n : setattr(self, 'time_off_reason_id', n.get_str_value()),
}
super_fields = super().get_field_deserializers()
fields.update(super_fields)
return fields
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if not writer:
raise TypeError("writer cannot be null.")
super().serialize(writer)
writer.write_datetime_value("endDateTime", self.end_date_time)
writer.write_datetime_value("startDateTime", self.start_date_time)
writer.write_str_value("timeOffReasonId", self.time_off_reason_id)
|
a8f92565f01b36a630b92804996a640b9251a6a6
|
fdbb74a95924e2677466614f6ab6e2bb13b2a95a
|
/third_party/python/Lib/test/ssltests.py
|
5073ae12204b1642b5bfbdb33bdb97ff90f58ef2
|
[
"ISC",
"Python-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
jart/cosmopolitan
|
fb11b5658939023977060a7c6c71a74093d9cb44
|
0d748ad58e1063dd1f8560f18a0c75293b9415b7
|
refs/heads/master
| 2023-09-06T09:17:29.303607
| 2023-09-02T03:49:13
| 2023-09-02T03:50:18
| 272,457,606
| 11,887
| 435
|
ISC
| 2023-09-14T17:47:58
| 2020-06-15T14:16:13
|
C
|
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
ssltests.py
|
# Convenience test module to run all of the OpenSSL-related tests in the
# standard library.
import ssl
import sys
import subprocess
TESTS = [
'test_asyncio', 'test_ensurepip.py', 'test_ftplib', 'test_hashlib',
'test_hmac', 'test_httplib', 'test_imaplib', 'test_nntplib',
'test_poplib', 'test_ssl', 'test_smtplib', 'test_smtpnet',
'test_urllib2_localnet', 'test_venv', 'test_xmlrpc'
]
def run_regrtests(*extra_args):
print(ssl.OPENSSL_VERSION)
args = [
sys.executable,
'-Werror', '-bb', # turn warnings into exceptions
'-m', 'test',
]
if not extra_args:
args.extend([
'-r', # randomize
'-w', # re-run failed tests with -v
'-u', 'network', # use network
'-u', 'urlfetch', # download test vectors
'-j', '0' # use multiple CPUs
])
else:
args.extend(extra_args)
args.extend(TESTS)
result = subprocess.call(args)
sys.exit(result)
if __name__ == '__main__':
run_regrtests(*sys.argv[1:])
|
1b0c6b6b80c5292f76505d5561fdf6d730178a22
|
2465564c2c22ab2bc210532d44efcbc22a9696fe
|
/timekeeper.py
|
7112682aadadb0e143cdd43fd542276ec0cdbcff
|
[
"MIT"
] |
permissive
|
mountwebs/timeglass
|
5eb4dd9b7501e8fb44c485e1bb1f23eaee47767b
|
2f527e190cb90199cdd3a29ea7625e1f561fe01c
|
refs/heads/master
| 2022-07-16T12:17:19.249978
| 2020-05-19T19:29:01
| 2020-05-19T19:29:01
| 259,140,875
| 114
| 7
|
MIT
| 2020-05-19T17:06:32
| 2020-04-26T21:51:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,562
|
py
|
timekeeper.py
|
import time
from datetime import timedelta
class Timer():
def __init__(self,seconds):
self.initial = seconds
self.remaining = seconds
self.elapsed = 0
self.pause = False
self.done = False
self.active = False
self.last_tick = time.time()
self.start_time = None
self.elapsed_at_pause = 0
def get_remaining_string(self):
conversion = timedelta(seconds=int(self.remaining))
return str(conversion)
def start(self):
self.active = True
self.pause = False
self.done = False
self.last_tick = time.time()
self.start_time = time.time()
def pause_timer(self):
self.active = False
self.pause = True
self.elapsed_at_pause = self.elapsed
def tick(self):
if self.remaining <= 0 and self.active:
self.remaining = 0
self.done = True
self.active = False
elif self.active and not self.done:
now = time.time()
self.elapsed = now - self.start_time + self.elapsed_at_pause
self.remaining = self.initial - self.elapsed
return True
return False
def set_time(self, seconds):
self.initial = seconds
self.reset()
def reset(self):
self.done = False
self.active = False
self.elapsed = 0
self.remaining = self.initial
self.elapsed_at_pause = 0
def __str__(self):
return(f"remaining: {self.remaining}, elapsed: {self.elapsed}")
|
ce638b2a37fdfc23527f3160d76c2576dfa85aaf
|
54c67306d63bb69a5cf381d12108d3dc98ae0f5d
|
/scripts/gsrc/check-for-conflicts.py
|
eb60925f25292138822e302f8eb694e6b3ee323c
|
[
"ISC"
] |
permissive
|
open-goal/jak-project
|
adf30a3459c24afda5b180e3abe1583c93458a37
|
d96dce27149fbf58586160cfecb634614f055943
|
refs/heads/master
| 2023-09-01T21:51:16.736237
| 2023-09-01T16:10:59
| 2023-09-01T16:10:59
| 289,585,720
| 1,826
| 131
|
ISC
| 2023-09-14T13:27:47
| 2020-08-22T23:55:21
|
Common Lisp
|
UTF-8
|
Python
| false
| false
| 810
|
py
|
check-for-conflicts.py
|
# Merge tools use specific algorithms or assumptions to detect conflicts
# and not all of them will obviously flag them, even if they use the standard format
#
# So this is to ensure no conflict markers get ignored in goal_src atleast
import os
files_with_unresolved_conflicts = []
for dirpath, subdirs, files in os.walk("./goal_src"):
for filename in files:
# Get the file contents
with open(os.path.join(dirpath, filename), "r") as f:
lines = f.readlines()
for line in lines:
if "<<<<<<<" in line:
files_with_unresolved_conflicts.append(os.path.join(dirpath, filename))
break
if len(files_with_unresolved_conflicts) == 0:
exit(0)
print("There are unresolved conflicts in ./goal_src/")
for file in files_with_unresolved_conflicts:
print(file)
exit(1)
|
e354b546b10ca6c2da6822023f4676ba0ddae46e
|
c9cdc07694c4cb60025f7a471d9f7baf06ea48ac
|
/museformer/museformer/attention/common/blocksparse_common_operations/av_mul/av_mul_1.py
|
f2f8b146913b28c8b57616592be1656015118682
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/muzic
|
60d48e562e0c196dd65932c7127801811d8ed2dc
|
bf469715c07c905d24319c10e9a93c5a7cb04979
|
refs/heads/main
| 2023-08-18T08:47:38.831559
| 2023-08-12T09:58:26
| 2023-08-12T09:58:26
| 373,462,930
| 3,453
| 327
|
MIT
| 2023-09-01T10:29:22
| 2021-06-03T10:06:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,875
|
py
|
av_mul_1.py
|
import torch
from .....blocksparse import BlocksparseMatMul
def do_sample_av_mul_base(self, sample_attn_weights, sample_v, sample_layout, real_part, sample_idx, tgt_len):
if sample_layout is None:
_, head, head_dim = sample_v.shape
return sample_v.new_zeros(tgt_len, 1, head, head_dim)
sample_v = sample_v.transpose(0, 1)[:, None] # (head, 1, reg_len, head_dim)
dsd_matmul_key = (real_part, 'dsd_matmul', self.layer_sv, sample_idx)
if dsd_matmul_key in self.instant_pocket:
dsd_matmul = self.instant_pocket[dsd_matmul_key]
else:
dsd_matmul = BlocksparseMatMul(sample_layout, self.block_size, 'dsd',
device=sample_v.device)
self.instant_pocket[dsd_matmul_key] = dsd_matmul
sample_out = dsd_matmul(sample_attn_weights, sample_v) # (head, 1, tgt_len, head_dim)
sample_out = sample_out.permute(2, 1, 0, 3) # (tgt_len, 1, head, head_dim)
return sample_out
def do_av_mul_for_part(self, attn_weights_inc_part, v, attn_mask, real_part, tgt_len):
attn_weights_for_part = attn_weights_inc_part[real_part]
# samples list of (head, head_selected_blocks, block, block)
bsz = len(attn_weights_for_part)
attn_mask = attn_mask[real_part]
result = []
for sample_idx in range(bsz):
sample_v = v[:, sample_idx]
sample_attn_weights = attn_weights_for_part[sample_idx] # (head, head_selected_blocks, block, block)
sample_layout = attn_mask[sample_idx][0]
sample_out = do_sample_av_mul_base(self, sample_attn_weights, sample_v, sample_layout, real_part,
sample_idx, tgt_len)
result.append(sample_out)
if bsz > 1:
result = torch.cat(result, dim=1) # (tgt_len, bsz, num_heads, head_dim)
else:
result = result[0].contiguous()
return result
|
19985df6aa038b957655d7c6a04680ad594514d2
|
3abc1fef99ac6ce0b845a1090fae7f6875fee729
|
/src/ralph/lib/polymorphic/tests/tests_models.py
|
3ff7825d6f4e11500b571b711d1fa967f31b433c
|
[
"Apache-2.0"
] |
permissive
|
allegro/ralph
|
5ff9165a202e836061c99e8af20214e0d651622f
|
b4a72356f527b1f12c7babd7465d2d7fa3ffb0d3
|
refs/heads/ng
| 2023-09-02T01:13:43.672554
| 2023-09-01T09:48:38
| 2023-09-01T09:48:38
| 4,359,038
| 1,970
| 617
|
Apache-2.0
| 2023-09-01T09:44:39
| 2012-05-17T14:04:57
|
Python
|
UTF-8
|
Python
| false
| false
| 6,951
|
py
|
tests_models.py
|
# -*- coding: utf-8 -*-
from django.contrib.contenttypes.models import ContentType
from django.db.models import Prefetch
from django.test import TestCase
from ralph.lib.polymorphic.models import Polymorphic
from ralph.lib.polymorphic.tests.models import (
PolymorphicModelBaseTest,
PolymorphicModelTest,
PolymorphicModelTest2,
SomeM2MModel,
SomethingRelated
)
class PolymorphicTestCase(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.sth_related = SomethingRelated.objects.create(name='Rel1')
cls.pol_1 = PolymorphicModelTest.objects.create(
name='Pol1',
sth_related=cls.sth_related
)
cls.pol_2 = PolymorphicModelTest.objects.create(
name='Pol2',
sth_related=cls.sth_related
)
cls.pol_3 = PolymorphicModelTest2.objects.create(
name='Pol3',
another_related=cls.sth_related,
)
def test_polymorphic_metaclass(self):
self.assertIn(
Polymorphic, list(getattr(self.pol_1, '_polymorphic_models'))
)
def test_content_type_save(self):
self.assertEqual(
self.pol_1.content_type,
ContentType.objects.get_for_model(PolymorphicModelTest)
)
def test_get_descendants_models(self):
base = PolymorphicModelBaseTest
self.assertIn(PolymorphicModelTest, base._polymorphic_descendants)
self.assertIn(PolymorphicModelTest2, base._polymorphic_descendants)
def test_polymorphic_queryset(self):
result = []
with self.assertNumQueries(7):
# queries:
# select PolymorphicModelBaseTest
# select content types
# select PolymorphicModelTest
# select SomethingRelated (from sth_related) x2
# select PolymorphicModelTest2
# select SomethingRelated (from another_related)
for item in PolymorphicModelBaseTest.polymorphic_objects.all():
result.append(str(item))
# just get related attribute to force fetching it from DB
item.sth_related
if isinstance(item, PolymorphicModelTest2):
item.another_related
self.assertIn(
'PolymorphicModelTest: {} ({})'.format(
self.pol_1.name, self.pol_1.pk
),
result
)
self.assertIn(
'PolymorphicModelTest2: {} ({})'.format(
self.pol_3.name, self.pol_3.pk
),
result
)
def test_polymorphic_queryset_with_select_related(self):
with self.assertNumQueries(4):
# queries:
# select PolymorphicModelBaseTest
# select content types
# select PolymorphicModelTest
# select PolymorphicModelTest2
for item in PolymorphicModelBaseTest.polymorphic_objects.polymorphic_select_related( # noqa
PolymorphicModelTest=['sth_related'],
PolymorphicModelTest2=['sth_related', 'another_related'],
):
# just get related attribute to force fetching it from DB
item.sth_related
if isinstance(item, PolymorphicModelTest2):
item.another_related
def test_polymorphic_queryset_ordering(self):
r = list(PolymorphicModelBaseTest.polymorphic_objects.order_by('-name'))
self.assertEqual(r, [self.pol_3, self.pol_2, self.pol_1])
def test_polymorphic_queryset_use_regular_iterator(self):
with self.assertNumQueries(1):
list(PolymorphicModelTest.polymorphic_objects.all())
def test_m2m_with_prefetch_related_on_polymorphic_object(self):
sm2mm_1 = SomeM2MModel.objects.create(name='abc')
sm2mm_1.polymorphics = [self.pol_1, self.pol_2]
sm2mm_1 = SomeM2MModel.objects.get(name='abc')
sm2mm_2 = SomeM2MModel.objects.create(name='def')
sm2mm_2.polymorphics = [self.pol_2, self.pol_3]
with self.assertNumQueries(5):
# 5 queries:
# 1) SomeM2MModel
# 2) Content Types (usually cached, but turned off in tests)
# 3) PolymorphicModelBaseTest ids
# 4) PolymorphicModelTest based on 3)
# 5) PolymorphicModelTest2 based on 3)
result = {
sm.name: sm for sm in
SomeM2MModel.objects.prefetch_related(Prefetch(
lookup='polymorphics',
queryset=PolymorphicModelBaseTest.polymorphic_objects.polymorphic_filter( # noqa
some_m2m__in=SomeM2MModel.objects.all()
).all(),
)).order_by('name')
}
self.assertCountEqual(
result['abc'].polymorphics.all(),
[self.pol_1, self.pol_2]
)
self.assertCountEqual(
[inst._meta.model for inst in result['abc'].polymorphics.all()],
[PolymorphicModelTest, PolymorphicModelTest]
)
self.assertCountEqual(
result['def'].polymorphics.all(),
[self.pol_2, self.pol_3]
)
self.assertCountEqual(
[inst._meta.model for inst in result['def'].polymorphics.all()],
[PolymorphicModelTest, PolymorphicModelTest2]
)
def test_m2m_with_prefetch_related_on_polymorphic_object_with_subset(self):
"""
Test if PolymorphicModelBaseTest instance is properly fetched in
prefetch_related when base model (SomeM2MModel in this case) is
filtered and polymorphic_filter still contains filter for all
SomeM2MModel objects.
"""
sm2mm_1 = SomeM2MModel.objects.create(name='abc')
sm2mm_1.polymorphics = [self.pol_1, self.pol_2]
sm2mm_1 = SomeM2MModel.objects.get(name='abc')
sm2mm_2 = SomeM2MModel.objects.create(name='def')
sm2mm_2.polymorphics = [self.pol_2, self.pol_3]
sm2mm_3 = SomeM2MModel.objects.create(name='xyz')
sm2mm_3.polymorphics = [self.pol_2]
sm2mm_4 = SomeM2MModel.objects.create(name='qwerty')
sm2mm_4.polymorphics = [self.pol_2]
with self.assertNumQueries(4):
result = {
sm.name: sm for sm in
SomeM2MModel.objects.filter(name='xyz').prefetch_related(
Prefetch(
lookup='polymorphics',
queryset=PolymorphicModelBaseTest.polymorphic_objects.polymorphic_filter( # noqa
some_m2m__in=SomeM2MModel.objects.all()
).all(),
)
).order_by('name')
}
self.assertCountEqual(
result['xyz'].polymorphics.all(),
[self.pol_2]
)
|
41e51b3d7b69df93dec316955210f920119d561e
|
98f26251ba440790de20466afd223178cf5d5e0e
|
/src/irl_maxent/__init__.py
|
8c8e3a521f9e2e810ffad98338c00d1c7cf1841f
|
[
"MIT"
] |
permissive
|
qzed/irl-maxent
|
edff21b1a5f09d75f1c1b76aa69ddb9a555ccc0f
|
0c49544a69228f0462d41c9b3312e550e394cbab
|
refs/heads/master
| 2023-07-06T12:00:06.789134
| 2023-06-25T14:06:00
| 2023-06-25T14:06:00
| 185,288,428
| 149
| 45
|
MIT
| 2022-04-12T21:23:25
| 2019-05-06T23:57:32
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 134
|
py
|
__init__.py
|
from . import gridworld
from . import maxent
from . import optimizer
from . import plot
from . import solver
from . import trajectory
|
2cece52fa1746592aafc9e431a6ba465f33928cd
|
7b5a7ff0aededc3afbe9ffc461e5747c26611efb
|
/isagenerator/disasmlib.py
|
af3af0bdbfceba87a3397130052603d8a476769a
|
[
"Apache-2.0"
] |
permissive
|
bitdefender/bddisasm
|
12ef02756235dcc29e2ebb62478d8b3d769e53bc
|
727c87ecc44a9035ca01e68a213d8460ee2aecd2
|
refs/heads/master
| 2023-08-05T12:04:25.453259
| 2023-07-21T07:14:31
| 2023-07-21T07:14:31
| 278,359,094
| 817
| 117
|
Apache-2.0
| 2023-06-27T11:57:17
| 2020-07-09T12:30:49
|
C
|
UTF-8
|
Python
| false
| false
| 58,061
|
py
|
disasmlib.py
|
#
# Copyright (c) 2020 Bitdefender
# SPDX-License-Identifier: Apache-2.0
#
import os
import sys
import re
import glob
valid_attributes = {
'MODRM', # Mod r/m is present.
'II64', # Instruction invalid in 64 bit mode.
'F64', # Operand size forced to 64 bit.
'D64', # Operand size defaults to 64 bit.
'O64', # Instruction valid only in 64 bit mode.
'SSECONDB', # Instruction has condition byte.
'COND', # Instruction has predicated encoded in lower 4 bit of the opcode.
'VSIB', # Instruction uses VSIB addressing.
'MIB', # Instruction uses MIB addressing.
'LIG', # *vex.L is ignored.
'WIG', # *vex.W is ignored.
'3DNOW', # Instruction uses 3dnow encoding.
'MMASK', # Instruction must have mask specified (mask cannot be k0).
'NOMZ', # Zeroing not allowed with memory addressing.
'LOCKSP', # Special lock - MOV CR on amd can use LOCK to access CR8 in 32 bit mode.
'NOL0', # Vector length 128 not supported.
'NOA16', # 16 bit addressing not supported.
'NO66', # 0x66 prefix causes #UD.
'NORIPREL', # RIP relative addressing not supported.
'VECT', # Vector instruction.
'S66', # 0x66 prefix changes length even if it is in special map (66, f2, f3).
'BITBASE', # Instruction uses bitbase addressing.
'AG', # Instruction uses address generation, no memory access.
'SHS', # Instruction accesses the shadow stack.
'MFR', # The Mod inside Mod R/M is forced to register. No SIB/disp present.
'CETT', # Instruction is CET tracked.
'OP1DEF', # Operand 1 is default (implicit).
'OP2DEF', # Operand 2 is default (implicit).
'OP2SEXO1', # Operand 2 is sign-extended to the size of the first operand.
'OP3SEXO1', # Operand 3 is sign-extended to the size of the first operand.
'OP1SEXDW', # Operand 1 is sign-extended to the size of the default word.
'PREFIX', # Prefix.
'SERIAL', # Instruction is serializing.
'SIBMEM', # Instruction uses sibmem addressing (AMX instructions).
'I67', # Ignore the address size override (0x67) prefix in 64 bit mode.
'IER', # Ignore embedded rounding for the instruction.
'IWO64', # The VEX/EVEX.W field is ignored outside 64 bit mode, and behaves as if it's 0.
}
#
# Explicit operands types.
#
valid_optype = [
'A', # Direct addressing. Used by far branches.
'B', # The vvvv field inside VEX/EVEX encodes a general purpose registr.
'C', # The reg field inside Mod R/M encodes a control register.
'D', # The reg field inside Mod R/M encodes a debug register.
'E', # The rm field inside Mod R/M encodes a general purpose register or memory.
'F', # Implicit flags register.
'G', # The reg field inside Mod R/M encodes a general purpose register.
'H', # The vvvv field inside VEX/EVEX encodes a SIMD register.
'I', # Immediate encoded in instruction bytes.
'J', # Relative offset encoded in instruction bytes.
'K', # The operand is the stack.
'L', # The upper 4-bit of an immediate encode a SIMD register.
'M', # The rm field inside Mod R/M encodes memory.
'N', # The rm field inside Mod R/M encodes a MMX register.
'O', # Moffset addressing.
'P', # The reg field inside Mod R/M encodes a MMX register.
'Q', # The rm field inside Mod R/M encodes a MMX register or memory.
'R', # The rm field inside Mod R/M encodes a general purpose register.
'S', # The reg field inside Mod R/M emcodes a segment register.
'T', # The reg field inside Mod R/M encodes a test register.
'U', # The rm field inside Mod R/M encodes a SIMD register.
'V', # The reg field inside Mod R/M encodes a SIMD register.
'W', # The rm field inside Mod R/M enocdes a SIMD register or memory.
'X', # DS:rSI addressing.
'Y', # ES:rDI addressing.
'Z', # The low 3 bits inside the opcode encode a general purpose register.
'rB', # The reg field inside Mod R/M enocdes a bound register.
'mB', # The rm field inside Mod R/M enocdes a bound register or memory.
'rK', # The reg field inside Mod R/M enocdes a mask register.
'vK', # The vvvv field inside VEX/EVEX encodes a mask register.
'mK', # The rm field inside Mod R/M encodes a mask register.
'aK', # The aaa field inside EVEX encodes a mask register.
'rM', # The reg field inside Mod R/M encodes the base address of a memory operand.
# Default segment is ES.
'mM', # The rm field inside Mod R/M encodes the base address of a memory operand.
# Default segment is DS.
'rT', # The reg field inside Mod R/M encodes a tile register (AMX extension).
'mT', # The rm field inside Mod R/M encodes a tile register (AMX extension).
'vT', # The vvvv field inside VEX/EVEX encodes a tile register (AMX extension).
'm2zI', # Bits [1,0] of the immediate byte which encodes the fourth register.
]
# Operand sizes.
# Unless otherwise stated, where multiple sizes are given, the correct size is selected by the
# operand size or vector length as follows:
# - the first size if operand size is 16-bit or vector length is 128-bit
# - the second size if operand size is 32-bit or vector length is 256-bit
# - the third size of the operand size is 64-bit or vector length is 512-bit.
# If only two sizes are given, only 16-bit and 32-bit operand sizes are considered, unles otherwise
# indicated.
# If only a size is given, that is available in all modes and with all operand sizes.
valid_opsize = [
'a', # 2 x 16 bits (16-bit opsize) or 2 x 32 bits (32-bit opsize).
# Fixed integer sizes.
'b', # 8 bits.
'w', # 16 bits.
'd', # 32 bits.
'q', # 64 bits.
# Variable integer sizes.
'z', # 16 bits (16-bit opsize) or 32 bits (32 or 64-bit opsize).
'v', # 16, 32 or 64 bits.
'y', # 64 bits (64-bit opsize), 32 bits othwerwise.
'yf', # 64 bits (64-bit mode), 32 bits (16, 32-bit opsize).
's', # 48 or 80 bits descriptor.
'p', # 32, 48 or 80 bits pointer.
'l', # 64 (16 or 32-bit opsize) or 128 bits (64-bit opsize).
# FPU sizes.
'fa', # 80 bits packed BCD.
'fw', # 16 bits real number.
'fd', # 32 bits real number.
'fq', # 64 bits real number.
'ft', # 80 bits real number.
'fe', # 14 bytes or 28 bytes FPU environment.
'fs', # 94 bytes or 108 bytes FPU state.
# SIMD sizes.
'dq', # 128 bits.
'qq', # 256 bits.
'oq', # 512 bits.
'ev', # 1/8 of vlen: 16, 32 or 64 bits.
'qv', # 1/4 of vlen: 32, 64 or 128 bits.
'hv', # 1/2 of vlen: 64, 128 or 256 bits.
'x', # 128 bits (128-bit vlen) or 256 bits (256-bit vlen).
'uv', # 256 bits (256-bit vlen) or 512 bits (512-bit vlen).
'fv', # 128, 256 or 512 bits.
'pd', # 128 or 256 bits.
'ps', # 128 or 256 bits.
'ph', # Packed FP16 values.
'sd', # 128 bits scalar element (double precision).
'ss', # 128 bits scalar element (single precision).
'sh', # FP16 Scalar element.
# VSIB addressing.
'vm32x', # VSIB addressing, using DWORD indices in XMM register, select 32/64 bit.
'vm32y', # VSIB addressing, using DWORD indices in YMM register, select 32/64 bit.
'vm32z', # VSIB addressing, using DWORD indices in ZMM register, select 32/64 bit.
'vm32h', # VSIB addressing, using DWORD indices in half register, select 32/64 bit.
'vm32n', # VSIB addressing, using DWORD indices in normal register, select 32/64 bit.
'vm64x', # VSIB addressing, using QWORD indices in XMM register, select 32/64 bit.
'vm64y', # VSIB addressing, using QWORD indices in YMM register, select 32/64 bit.
'vm64z', # VSIB addressing, using QWORD indices in ZMM register, select 32/64 bit.
'vm64h', # VSIB addressing, using QWORD indices in half register, select 32/64 bit.
'vm64n', # VSIB addressing, using QWORD indices in normal register, select 32/64 bit.
# MIB addressing.
'mib', # MIB addressing, the base & the index are used to form a pointer.
# Stack sizes and partial access.
'v2', # Two stack words.
'v3', # Three stack words.
'v4', # Four stack words.
'v5', # Five stack words.
'v8', # Eight stack words.
# Misc and special sizes.
'?', # Unknown operand size. Depends on many factors (for example, XSAVE).
'0', # Used for instructions that do not actually access any memory.
'asz', # The size of the operand is given by the current addressing mode.
'ssz', # The size of the operand is given by the current stack mode.
'rx', # 512 bytes extended state.
'cl', # 32/64/128 bytes - the size of one cache line.
'12', # 4 bytes (0) + 8 bytes (old SSP), used by SAVEPREVSSP.
't', # A tile register. The size varies depending on execution environment, but can be as high as 1K.
'384', # 384 bits representing a Key Locker handle.
'512', # 512 bits representing a Key Locker handle.
'4096', # 4096 bits representing an MSR address/value table.
]
# Implicit/fixed operands. Self explanatory.
valid_impops = {# register size
'AH' : ('AH', 'b'), # AH register.
'AL' : ('rAX', 'b'), # AL register.
'AX' : ('rAX', 'w'), # AX register.
'EAX' : ('rAX', 'd'), # EAX register.
'RAX' : ('rAX', 'q'), # RAX register.
'eAX' : ('rAX', 'z'), # AX or EAX register, depending on op size.
'rAX' : ('rAX', 'v'), # AX, EAX or RAX register, depending on op size.
'yAX' : ('rAX', 'y'), # EAX or RAX register, depending on op size.
'CL' : ('rCX', 'b'), # CL register.
'ECX' : ('rCX', 'd'), # ECX register.
'RCX' : ('rCX', 'q'), # RCX register.
'eCX' : ('rCX', 'z'), # CX or ECX register.
'rCX' : ('rCX', 'v'), # CX, ECX or RCX register, depending on op size.
'yCX' : ('rCX', 'y'), # ECX or RCX register, depending on op size.
'aCX' : ('rCX', 'asz'), # CX, ECX or RCX register, depedning on address size.
'DX' : ('rDX', 'w'), # DX register.
'EDX' : ('rDX', 'd'), # EDX register.
'RDX' : ('rDX', 'q'), # RDX register.
'eDX' : ('rDX', 'z'), # DX or EDX register, depending on op size.
'rDX' : ('rDX', 'v'), # DX, EDX or RDX register, depending on op size.
'yDX' : ('rDX', 'y'), # EDX or RDX register, depending on op size.
'EBX' : ('rBX', 'd'), # EBX register.
'RBX' : ('rBX', 'q'), # RBX register.
'rBX' : ('rBX', 'v'), # BX, EBX or RBX register, depending on op size.
'yBX' : ('rBX', 'y'), # EBX or RBX register, depending on op size.
'rBP' : ('rBP', 'v'), # BP, EBP or RBP register, depending on op size.
'sBP' : ('rBP', 'ssz'), # BP, EBP or RBP register, depending on stack size.
'rSP' : ('rSP', 'v'), # SP, ESP or RSP register, depending on op size.
'sSP' : ('rSP', 'ssz'), # SP, ESP or RSP register, depending on stack size.
'aSI' : ('rSI', 'asz'), # SI, ESI, or RSI register, depending on address size.
'aDI' : ('rDI', 'asz'), # DI, EDI, or RDI register, depending on address size.
'R8' : ('rR8', 'q'), # R8 register.
'R9' : ('rR9', 'q'), # R9 register.
'R11' : ('rR11', 'q'), # R11 register.
'rIP' : ('rIP', 'v'), # IP, EIP or RIP, depending on op size.
'yIP' : ('rIP', 'yf'), # EIP in 16/32 bit mode, or RIP in 64 bit mode.
'1' : ('1', 'b'), # Constant 1.
'XMM0' : ('XMM0', 'dq'), # XMM0 register.
'XMM1' : ('XMM1', 'dq'), # XMM1 register.
'XMM2' : ('XMM2', 'dq'), # XMM2 register.
'XMM3' : ('XMM3', 'dq'), # XMM3 register.
'XMM4' : ('XMM4', 'dq'), # XMM4 register.
'XMM5' : ('XMM5', 'dq'), # XMM5 register.
'XMM6' : ('XMM6', 'dq'), # XMM6 register.
'XMM7' : ('XMM7', 'dq'), # XMM7 register.
'ST(0)' : ('ST(0)', 'ft'), # ST(0) register.
'ST(i)' : ('ST(i)', 'ft'), # ST(1) register.
'CS' : ('CS', 'v'), # CS register.
'SS' : ('SS', 'v'), # SS register.
'DS' : ('DS', 'v'), # DS register.
'ES' : ('ES', 'v'), # ES register.
'FS' : ('FS', 'v'), # FS register.
'GS' : ('GS', 'v'), # GS register.
'CR0' : ('CR0', 'yf'), # CR0 register.
'XCR' : ('XCR', 'q'), # An XCR register.
'XCR0' : ('XCR0', 'q'), # XCR0 register.
'MSR' : ('MSR', 'q'), # A MSR.
'TSC' : ('TSC', 'q'), # TSC register.
'TSCAUX' : ('TSCAUX', 'q'), # TSXAUX register.
'SCS' : ('SCS', 'q'), # IA32_SYSNETER_CS register.
'SEIP' : ('SEIP', 'q'), # IA32_SYSENTER_EIP register.
'SESP' : ('SESP', 'q'), # IA32_SYSENTER_ESP register.
'FSBASE' : ('FSBASE', 'q'), # IA32_FS_BASE register.
'GSBASE' : ('GSBASE', 'q'), # IA32_GS_BASE register.
'KGSBASE' : ('KGSBASE', 'q'), # IA32_KERNEL_GS_BASE register.
'STAR' : ('STAR', 'q'), # IA32_STAR register.
'LSTAR' : ('LSTAR', 'q'), # IA32_LSTAR register.
'FMASK' : ('FMASK', 'q'), # IA32_FMASK register.
'GDTR' : ('GDTR', 's'), # GDT register.
'IDTR' : ('IDTR', 's'), # IDT register.
'LDTR' : ('LDTR', 'w'), # LDT register.
'TR' : ('TR', 'w'), # Task register.
'BANK' : ('BANK', '?'), # A register bank.
'X87CONTROL':('X87CONTROL', 'w'), # X87 control register.
'X87TAG' : ('X87TAG', 'w'), # X87 tag register.
'X87STATUS': ('X87STATUS', 'w'), # X87 status register.
'MXCSR' : ('MXCSR', 'd'), # MXCSR register.
'PKRU' : ('PKRU', 'd'), # PKRU register.
'SSP' : ('SSP', 'yf'), # Shadow stack pointer. 32 bit in protected/compat mode, 64 in long mode.
'SMT' : ('SMT', '4096'),# Source MSR table, encododed in [RSI], up to 4096 bits long (64 entries x 64 bits per entry).
'DMT' : ('DMT', '4096'),# Value MSR table, encododed in [RDI], up to 4096 bits long (64 entries x 64 bits per entry).
# Implicit memory operands.
'pAXb' : ('pAX', 'b'), # Implicit byte [rAX], used by MONITOR and MONITORX. Can be overriden.
'pCXdq' : ('pCX', 'dq'), # Implicit xmmword [rCX], used by RMPADJUST. Can be overriden.
'pBXALb' : ('pBXAL', 'b'), # Implicit [RBX + AL], as used by XLAT.
'pDIq' : ('pDI', 'q'), # Implicit qword [RDI].
'pDIdq' : ('pDI', 'dq'), # Implicit xmmword [RDI].
# Implicit shadow stack accesses.
'SHS' : ('SHS', 'q'), # Shadow stack (SSP) implicit access, 1 qword (use by CET instructions).
'SHS0' : ('SHS0', 'q'), # Shadow stack (IA32_PL0_SSP) implicit access, 1 qword (use by CET instructions).
'SHSI' : ('SHS', 'v2'), # Shadow stack load & discard, 2 elements (INCCSPD/INCSSPQ).
'SHSS' : ('SHS', '12'), # Shadow stack read & store 4 + 8 bytes (SAVEPREVSSP).
'SHS1' : ('SHSP', 'v'), # Shadow stack push/pop, 1 word.
'SHS2' : ('SHSP', 'v2'), # Shadow stack push/pop, 2 words.
'SHS3' : ('SHSP', 'v3'), # Shadow stack push/pop, 3 words.
'SHS4' : ('SHSP', 'v4'), # Shadow stack push/pop, 4 words.
# User Interrupt Flag.
'UIF' : ('UIF', 'b'), # User Interrupt Flag, stored with size of 1 byte, although it is 1 bit.
}
# If an operand type is not present here, than that operand is implicit & it's not encoded inside the instruction.
operand_encoding = {
'A' : 'D', # Immediate, encoded directly in the instruction bytes.
'B' : 'V', # VEX/EVEX.vvvv encoded general purpose register.
'C' : 'R', # Modrm.reg encoded control register.
'D' : 'R', # Modrm.reg encoded debug register.
'E' : 'M', # Modrm.rm encoded general purpose register or memory.
'G' : 'R', # Modrm.reg encoded general purpose register.
'H' : 'V', # VEX/EVEX.vvvv encoded vector register.
'I' : 'I', # Immediate, encoded directly in the instruction bytes.
'J' : 'D', # Relative offset, encoded directly in the instruction bytes.
'L' : 'L', # Register encoded in an immediate.
'M' : 'M', # Modrm.rm encoded memory.
'N' : 'M', # Modrm.rm encoded MMX register.
'O' : 'D', # Absolute memory encoded directly in the instruction.
'P' : 'R', # Modrm.reg encoded MMX register.
'Q' : 'M', # Modrm.rm encoded MMX register or memory.
'R' : 'M', # Modrm.rm encoded general purpose register.
'S' : 'R', # Modrm.reg encoded segment register.
'T' : 'R', # Modrm.reg encoded test register.
'U' : 'M', # Modrm.rm encoded vector register.
'V' : 'R', # Modrm.reg encoded vector register.
'W' : 'M', # Modrm.rm encoded vector register or memory.
'Z' : 'O', # General purpose register encoded in opcode low 3 bit.
'rB' : 'R', # Modrm.reg encoded bound register.
'mB' : 'M', # Modrm.rm encoded bound register or memory.
'rK' : 'R', # Modrm.reg encoded mask register.
'vK' : 'V', # VEX/EVEX.vvvv encoded mask register.
'mK' : 'M', # Modrm.rm encoded mask register or memory.
'aK' : 'A', # EVEX.aaa encoded mask register.
'mR' : 'R', # Modrm.reg encoded memory.
'mM' : 'M', # Modrm.rm encoded memory (always).
'1' : '1', # Constant 1.
'CL' : 'C', # CL register.
'ST(i)' : 'M', # Modrm.rm encoded FPU register.
}
valid_prefixes = [
'REP', # Rep prefix is accepted.
'REPC', # Conditional rep prefix is accepted.
'HLE', # Hardware Lock Elision accepted.
'BND', # Bound prefix accepted (MPX).
'LOCK', # Lock prefix accepted.
'BH', # Branch hints accepted.
'XACQUIRE', # Xacquire prefix accepted.
'XRELEASE', # Xrelease prefix accepted.
'HLEWOL', # HLE prefix is accepted without lock - used by MOV instructions.
'DNT', # Do Not Track prefix accepted (CET).
]
valid_access = [
'N', # No access.
'P', # Prefetch access.
'R', # Read.
'W', # Write.
'CR', # Conditional read.
'CW', # Conditional write.
'RW', # Read-Write.
'CRW', # Conditional Read-Write.
'RCW', # Read-Conditional Write.
'CRCW', # Conditional Read-Conditional Write.
]
valid_flags = [
'CF', # Carry.
'PF', # Parity.
'AF', # Auxiliary.
'ZF', # Zero.
'SF', # Sign.
'TF', # Trap.
'IF', # Interrupt.
'DF', # Direction.
'OF', # Overflow.
'IOPL', # I/O privilege level.
'NT', # Nested Task.
'RF', # Resume Flag.
'VM', # V8086 mmode.
'AC', # Alignment Check.
'VIF', # Virtual IF.
'VIP', # Virtual IP.
'ID' # CPUID ID flag.
]
valid_flag_op = [
'm', # modified.
't', # tested.
'0', # cleared.
'1', # set.
'u', # undefined.
'n', # not accessed.
]
valid_cpu_modes = [
'r0', # Ring 0.
'r1', # Ring 1.
'r2', # Ring 2.
'r3', # Ring 3.
'real', # Real mode.
'v8086', # V8086 mode.
'prot', # Protected mode.
'compat', # Compatibility mode.
'long', # Long mode.
'smm', # System Management Mode.
'smm_off', # Outside SMM.
'sgx', # Software Guard Extensions SGX enclave.
'sgx_off', # Outside SGX.
'tsx', # Transactional Synchronization Extensions.
'tsx_off', # Outside TSX.
'vmxr', # VMX root.
'vmxn', # VMX non-root.
'vmxr_seam', # VMX root SEAM.
'vmxn_seam', # VMX non-root SEAM.
'vmx_off', # Outside VMX operation.
]
valid_mode_groups = [
"ring",
"mode",
"vmx",
"other",
]
valid_ring_modes = [
"r0",
"r1",
"r2",
"r3",
]
valid_mode_modes = [
"real",
"v8086",
"prot",
"compat",
"long",
]
valid_vmx_modes = [
"vmxr",
"vmxn",
"vmxr_seam",
"vmxn_seam",
"vmx_off",
]
valid_other_modes = [
"smm",
"smm_off",
"sgx",
"sgx_off",
"tsx",
"tsx_off",
]
valid_mode_map = {
"ring" : valid_ring_modes,
"mode" : valid_mode_modes,
"vmx" : valid_vmx_modes,
"other" : valid_other_modes,
}
valid_decorators = [
'{K}', # Masking support.
'{z}', # Zeroing support.
'{sae}', # Surpress All Exceptions.
'{er}', # Embedded Rounding.
'|B32', # Broadcast 32.
'|B64', # Broadcast 64.
'|B16', # Broadcast 16.
]
valid_tuples = [
'fv', # Full Vector, Load+Op (Full Vector Dword/Qword).
'hv', # Half Vector, Load+Op (Half Vector).
'qv', # Quarter vector, Load+op (Quarter Vector, FP16)
'fvm', # Full Vector Memory, Load/store or subDword full vector.
'hvm', # Half Vector Memory, SubQword Conversion.
'qvm', # Quarter Vector Memory, SubDword Conversion.
'ovm', # Oct Vector Memory, SubWord Conversion.
'dup', # Dup, VMOVDDUP.
'm128', # Mem 128, Shift count from memory.
't1s8', # Tuple 1 Scalar, 8 bit, 1Tuple less than Full Vector.
't1s16', # Tuple 1 Scalar, 16 bit, 1Tuple less than Full Vector.
't1s', # Tuple 1 Scalar, 32/64 bit, 1Tuple less than Full Vector.
't1f', # Tuple 1 Fixed, 1 Tuple memsize not affected by EVEX.W.
't2', # Tuple 2, Broadcast (2 elements).
't4', # Tuple 4, Broadcast (4 elements).
't8', # Tuple 8, Broadcast (8 elements).
't1_4x',
]
absent_op = ['n/a', 'nil']
class InvalidEncodingException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ParseLineException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def reverse_dict(d):
r = {}
for k in d:
r[d[k]] = k
return r
def my_str(x):
if x is None:
return x
else:
return str(x)
#
# CPUID feature flags.
#
class CpuidFeatureFlag():
def __init__(self, finfo):
self.Name = finfo["name"]
self.Leaf = finfo["leaf"]
self.SubLeaf = finfo["subleaf"]
self.Reg = finfo["reg"]
self.Bit = finfo["bit"]
def __str__(self):
return "%s: %s, %s, %s, %s" % (self.Name, self.Leaf, self.SubLeaf, self.Reg, self.Bit)
#
# Operand description
#
class Operand():
def __init__(self, op, access, flags, imp = False):
self.Raw = op
self.Type = 0
self.Size = 0
self.Flags = flags
self.Decorators = []
self.Access = []
self.Block = 0
self.Encoding = 'S'
self.Implicit = imp
orig = op
# Handle block registers.
if op.endswith('+3'):
self.Block = 4
op = op.replace('+3', '')
elif op.endswith('+1'):
self.Block = 2
op = op.replace('+1', '')
else:
m = re.match(r'XMM(\d)-(\d)', op)
if m:
start = m.group(1)
end = m.group(2)
self.Block = int(end) - int(start) + 1
op = 'XMM' + start
# Handle the decorators.
for dec in valid_decorators:
if -1 != op.find(dec):
# Found decorator.
self.Decorators.append(dec)
# Remove it from the opstring.
op = op.replace(dec, "")
# Handle hard-coded operators - those that are implicit/are not encoded anywhere.
if op in valid_impops:
self.Type, self.Size = valid_impops[op][0], valid_impops[op][1]
# Now handle explicit operators.
else:
# Attempt a match inside the explicit operands map.
for opt in valid_optype:
if op.startswith(opt):
self.Type = opt
op = op.replace(opt, "")
break
# Now the operand size. After parsing the decorator and the operand type, we should be left with
# the operand size only.
if self.Type in ['rK', 'mK', 'vK', 'aK'] and not op in valid_opsize:
self.Size = 'q'
elif op in valid_opsize:
self.Size = op
else:
raise InvalidEncodingException('Invalid operand size specified: ' + orig)
if self.Type in operand_encoding:
self.Encoding = operand_encoding[self.Type]
elif self.Raw in operand_encoding:
self.Encoding = operand_encoding[self.Raw]
if imp and 'OPDEF' not in self.Flags:
self.Flags.append('OPDEF')
self.Access = access
def __str__(self):
if True:
return self.Raw
#
# Prefixes.
#
class Prefix():
def __init__(self, prefix):
self.Mnemonic = prefix["mnemonic"]
self.Encoding = prefix["encoding"]
def __str__(self):
return self.Mnemonic
#
# Instructions.
#
class Instruction():
def __init__(self, iinfo):
# Fill in raw instruction information
self.Mnemonic = iinfo["mnemonic"]
self.RawEnc = iinfo["encoding"]
self.Flags = iinfo["flags"]
self.Prefmap = iinfo["prefixes"]
self.Set = iinfo["set"]
self.Category = iinfo["cat"]
self.Class = iinfo["class"]
self.Rwm = iinfo["rwm"]
self.Id = iinfo["cff"] or self.Set
self.Tuple = iinfo["tuple"]
self.ExClass = iinfo["exclass"]
self.RevFlagsAccess = iinfo["flgaccess"]
self.Modes = iinfo["modes"]
self.FpuFlags = iinfo["fpuflg"]
# First redirecton class: opcodes
self.Opcodes = []
self.Prefixes = []
self.DecoFlags = []
# Second redirection class: Modrm
self.HasModrm = self.ModrmRedirAfterMpref = False
self.Mod = self.Reg = self.Rm = None
# Third redirection class: mandatory prefix.
self.Np = self.MustHave66 = self.MustHaveF2 = self.MustHaveF3 = False
# Fourth redirection class: operating mode
self.RedM16 = self.RedM32 = self.RedM64 = False
# Fifth redirection class: default operand size
self.RedDs16 = self.RedDs32 = self.RedDs64 = self.RedDDs64 = self.RedFDs64 = False
# Sixth redirection class: default address size
self.RedAs16 = self.RedAs32 = self.RedAs64 = False
# Seventh redirecton class: rex, rex.w, rep, repz, rip rel
self.RedRexB = self.RedRexW = self.RedRep = self.Red64 = self.RedF3 = self.RedRipRel = False
# Misc - vendor
self.Vendor = None
# Misc - feature.
self.Feature = None
# XOP, VEX and EVEX classes.
self.Vex = self.Xop = self.Evex = self.Mvex = False
self.M = self.P = self.L = self.W = None
# Now parse each info chunk and extract the actual data
for t in iinfo["encoding"].split(' '):
if '0x66' == t and not self.Opcodes and not (self.Xop or self.Vex or self.Evex):
self.Prefixes.append(0x66)
self.MustHave66 = True
elif '0xF3' == t and not self.Opcodes and not (self.Xop or self.Vex or self.Evex):
self.Prefixes.append(0xF3)
self.MustHaveF3 = True
elif '0xF2' == t and not self.Opcodes and not (self.Xop or self.Vex or self.Evex):
self.Prefixes.append(0xF2)
self.MustHaveF2 = True
elif 'NP' == t:
self.Np = True
elif 'a0xF3' == t:
self.Prefixes.append(0xF3)
self.RedF3 = True
elif 'o64' == t:
self.Red64 = True
elif 'rexw' == t:
self.RedRexW = True
elif 'rexb' == t:
self.RedRexB = True
elif 'rep' == t:
self.RedRep = True
elif 'riprel' == t:
self.RedRipRel = True
elif 'ds16' == t:
self.RedDs16 = True
elif 'ds32' == t:
self.RedDs32 = True
elif 'ds64' == t:
self.RedDs64 = True
elif 'dds64' == t:
self.RedDDs64 = True
elif 'fds64' == t:
self.RedFDs64 = True
elif 'as16' == t:
self.RedAs16 = True
elif 'as32' == t:
self.RedAs32 = True
elif 'as64' == t:
self.RedAs64 = True
elif t.startswith('/'):
self.HasModrm = True
self.Flags.append('MODRM')
if t.endswith(':mem'):
self.Mod = 'mem'
if t.endswith('reg'):
self.Mod = 'reg'
t = t.replace(':mem', '').replace(':reg', '')
for i in range(0, 8):
if '/%d' % i == t:
self.Reg = i
if re.match(r'0x[0-9a-fA-F]{2}', t[1:]):
mrm = int(t[1:], 16)
if 0xC0 == (mrm & 0xC0):
self.Mod = 'reg'
else:
self.Mod = 'mem'
self.Rm = mrm & 7
self.Reg = (mrm >> 3) & 7
elif 'modrm' == t:
self.HasModrm = True
self.Flags.append('MODRM')
elif t.startswith('mod:'):
self.Mod = t[4:]
if self.Mod not in ['mem', 'reg']:
raise InvalidEncodingException('Invalid encoding: illegal "mod" modifier')
elif t.startswith('reg:'):
self.Reg = t[4:]
if self.Reg not in ['0', '1', '2', '3', '4', '5', '6', '7']:
raise InvalidEncodingException('Invalid encoding: illegal "reg" value')
self.Reg = int(self.Reg)
elif t.startswith('rm:'):
self.Rm = t[3:]
if self.Rm not in ['0', '1', '2', '3', '4', '5', '6', '7']:
raise InvalidEncodingException('Invalid encoding: illegal "rm" value')
self.Rm = int(self.Rm)
elif t.startswith('modrmpmp'):
self.ModrmRedirAfterMpref = True
elif t == 'xop':
self.Xop = True
elif t == 'vex':
self.Vex = True
elif t == 'evex':
self.Evex = True
elif t == 'mvex':
self.Mvex = True
elif t.startswith('m:'):
self.M = t[2:]
if self.M not in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C']:
raise InvalidEncodingException('Invalid encoding: illegal "mmmmm" value')
self.M = int(self.M, 16)
elif t.startswith('p:'):
self.P = t[2:]
if not self.P in ['0', '1', '2', '3']:
raise InvalidEncodingException('Invalid encoding: illegal "pp" value!')
self.P = int(self.P)
elif t.startswith('l:'):
self.L = t[2:]
if self.L == '128':
self.L = 0
elif self.L == '256':
self.L = 1
elif self.L == '512':
self.L = 2
elif self.L == 'x':
self.L = None
elif self.L == 'i':
self.L = None
if 'LIG' not in self.Flags:
self.Flags.append('LIG')
elif self.L in ['0', '1', '2', '3']:
self.L = int(self.L)
else:
raise InvalidEncodingException('Invalid encoding: illegal "l" value!')
elif t.startswith('w:'):
self.W = self.RawW = t[2:]
if self.W == 'x':
self.W = None
elif self.W == 'i':
self.W = None
if 'WIG' not in self.Flags:
self.Flags.append('WIG')
elif self.W in ['0', '1']:
self.W = int(self.W)
else:
raise InvalidEncodingException('Invalid encoding: illegal "w" value!')
elif re.match(r'0x[0-9a-fA-F]{2}', t):
self.Opcodes.append(int(t, 16))
elif t in ['intel', 'amd', 'via', 'cyrix']:
self.Vendor = t
elif t in ['mpx', 'cet', 'cldm', 'piti']:
self.Feature = t
elif 'vsib' == t:
self.HasVsib = True
if 'VSIB' not in self.Flags:
self.Flags.append('VSIB')
elif 'mib' == t:
self.HasMib = True
if 'MIB' not in self.Flags:
self.Flags.append('MIB')
elif 'bitbase' == t:
self.HasBitbase = True
if 'BITBASE' not in self.Flags:
self.Flags.append('BITBASE')
elif 'sibmem' == t:
self.HasSibMem = True
if 'SIBMEM' not in self.Flags:
self.Flags.append('SIBMEM')
elif t in ['ib', 'iw', 'iz', 'iv', 'id', 'cb', 'cz', 'cv', 'cp', 'is4']:
# Not used for now, but they must be specified, for a complete instruction encoding specification.
pass
elif t.startswith('evex.'):
tokens2 = t.split('.')
self.Evex = True
self.M = self.P = self.L = self.W = 0
for t2 in tokens2[1:]:
# Handle the L specifier
if t2 == 'LIG':
self.L = None
if 'LIG' not in self.Flags:
self.Flags.append('LIG')
elif t2 == 'LANY':
self.L = None
elif t2 == '128' or t2 == 'LZ' or t2 == 'L0':
self.L = 0
elif t2 == '256' or t2 == 'L1':
self.L = 1
elif t2 == '512' or t2 == 'L2':
self.L = 2
# Handle the W specifier
elif t2 == 'WIG':
self.W = None
if 'WIG' not in self.Flags:
self.Flags.append('WIG')
elif t2 == 'WANY':
self.W = None
elif t2 == 'W0':
self.W = 0
elif t2 == 'W1':
self.W = 1
# Handle compressed prefix
elif t2 == '66':
self.P = 1
elif t2 == 'F3':
self.P = 2
elif t2 == 'F2':
self.P = 3
# Handle opcode map
elif t2 == '0F':
self.M = 1
elif t2 == '0F38':
self.M = 2
elif t2 == '0F3A':
self.M = 3
elif t2 in ['NDS', 'NDD', 'DDS']:
pass
else:
raise InvalidEncodingException('Invalid encoding: unknown evex token: %s/%s' % (t, t2))
else:
raise InvalidEncodingException('Invalid encoding: Unknown token: %s' % t)
# Pre-process the explicit operands. The mask register is contained as a decorator, but put it as a direct
# operand as well. The access flag is already present in rwm.
if len(iinfo["expops"]) >= 1 and iinfo["expops"][0].find("{K") > 0:
iinfo["expops"].insert(1, 'aKq')
# Parse the explicit instruction operands.
self.ExpOps = self.process_operands(iinfo["expops"], False)
# Parse the implicit instruction operands.
self.ImpOps = self.process_operands(iinfo["impops"], True)
# Post-process the operands. We fill up the flags with additional info based on the operands.
for op in self.ExpOps:
for deco in op.Decorators:
self.DecoFlags.append({'{K}':'MASK', '{z}':'ZERO', '{sae}':'SAE', '{er}':'ER', '|B32':'BROADCAST', '|B64':'BROADCAST', '|B16':'BROADCAST'}[deco])
if op.Type in ['U', 'V', 'W', 'H', 'L'] and 'VECT' not in self.Flags:
self.Flags.append('VECT')
# VEX, XOP, EVEX and MVEX instructions are not valid in real or v8086 modes.
if self.Vex or self.Xop or self.Evex or self.Mvex:
if 'real' in self.Modes:
self.Modes.remove('real')
if 'v8086' in self.Modes:
self.Modes.remove('v8086')
if 'long' not in self.Modes and 'II64' not in self.Flags:
self.Flags.append('II64')
if 'long' in self.Modes and 'prot' not in self.Modes and 'O64' not in self.Flags:
self.Flags.append('O64')
# Split the instruction into encoding entities.
e = self.split_encoding()
if self.Vex or self.Xop or self.Evex:
self.Spec = {
"mmmmm" : e[0],
"opcodes" : e[1],
"modrm" : e[2],
"pp" : e[3],
"l" : e[4],
"w" : e[5],
}
else:
self.Spec = {
"opcodes" : e[0],
"modrm" : e[1],
"mpre" : e[2],
"mode" : e[3],
"dsize" : e[4],
"asize" : e[5],
"opre" : e[6],
"vendor" : e[7],
"feature": e[8]
}
def process_operands(self, ops, imp = False):
p = 1
res = []
for op in ops:
if op in absent_op:
break
flags = []
if not imp:
for f in self.Flags:
if f.startswith('OP%d' % p):
flags.append('OP' + f[3:])
self.Flags.remove(f)
else:
flags.append('OPDEF')
if not imp:
res.append(Operand(op, self.Rwm[p - 1], flags, imp))
else:
res.append(Operand(op, self.Rwm[len(self.ExpOps) + p - 1], flags, imp))
p += 1
return res
def split_encoding(self):
if self.Vex or self.Xop or self.Evex or self.Mvex:
return self.split_encoding_vex()
else:
return self.split_encoding_legacy()
def split_encoding_vex(self):
# First, get the 'mmmmm' - VEX decoding table.
mmmmm = '%x' % self.M
# Now get the opcode. Should be only one.
opcodes = ['%02x' % x for x in self.Opcodes]
# Get the modrm redirections.
modrm = { "mod": self.Mod, "reg": my_str(self.Reg), "rm": my_str(self.Rm), "modpost": None }
# Get the pp, if any.
pp = my_str(self.P)
# Get the l, if any.
l = my_str(self.L)
# Get the w, if any.
w = my_str(self.W)
return (mmmmm, opcodes, modrm, pp, l, w)
def split_encoding_legacy(self):
# First redirection class, the opcode.
opcodes = ['%02x' % x for x in self.Opcodes]
# Second redirection class, modrm
modrm = { "mod": self.Mod, "reg": my_str(self.Reg), "rm": my_str(self.Rm), "modpost": None }
# Third redirection class, mandatory prefixes
mprefixes = []
if self.MustHaveF2:
mprefixes.append('F2')
if self.MustHaveF3:
mprefixes.append('F3')
if self.MustHave66:
mprefixes.append('66')
if self.Np:
mprefixes.append('NP')
if len(mprefixes) == 0 and (not (self.Xop or self.Vex or self.Evex or self.Mvex)) and\
(self.Opcodes[0] == 0x0F and self.Opcodes[1] in [0x3A, 0x38]):
mprefixes.append(None)
# Fourth redirection class, operating mode.
mode = []
if self.RedM16:
mode.append('m16')
elif self.RedM32:
mode.append('m32')
elif self.RedM64:
mode.append('m64')
# Fifth redirection class, default operand size.
dsize = []
if self.RedDs16:
dsize.append('ds16')
elif self.RedDs32:
dsize.append('ds32')
elif self.RedDs64:
dsize.append('ds64')
elif self.RedDDs64:
dsize.append('dds64')
elif self.RedFDs64:
dsize.append('fds64')
# Sixth redirection class, default address size.
asize = []
if self.RedAs16:
asize.append('as16')
elif self.RedAs32:
asize.append('as32')
elif self.RedAs64:
asize.append('as64')
# Seventh redirection class, REX prefix, REX.W, 64 bit mode, 0xF3, SIB. The important aspect here is that unlike
# the other classes, this is not exhaustive - if an instruction does not fit in any of the entries, it
# will default to index 0 (and it will not return invalid encoding, unless entry 0 is invalid).
oprefixes = []
if self.RedRexB:
oprefixes.append('rexb')
if self.RedRexW:
oprefixes.append('rexw')
if self.Red64:
oprefixes.append('64')
if self.RedF3:
oprefixes.append('aF3')
if self.RedRep:
oprefixes.append('rep')
if self.RedRipRel:
oprefixes.append('riprel')
# Vendor redirection, if any.
return (opcodes, modrm, mprefixes, mode, dsize, asize, oprefixes, self.Vendor, self.Feature)
def __str__(self):
# Get the operands
ops = ''
for o in self.ExpOps:
ops += o.__str__() + ','
ops = ops[:-1]
# Return a text reprezentation of the encoding
return (self.Mnemonic + ' ' + ops).strip()
def parse_entry(entry, template_flags = {}, template_cpuid = {}, template_modes = {}):
# Make sure this is not a comment. Skip comments.
if entry.startswith('#') or len(entry) < 4:
return None
# Preprocess: remove comments, CR/LF
entry = entry.replace('\x0D', '').replace('\x0A', '')
com = entry.find('#')
if -1 != com: x = entry[:com]
try:
# Space can't be the first character.
if entry[0] == ' ':
raise ParseLineException('Space cannot be the first character!')
components = entry.split(';')
if len(components) != 5:
raise ParseLineException('Expected 5 components per line, but found %d (missing semicolon?)!' % len(components))
mnemonic = components[0].strip()
expops = components[1].strip().split(',')
impops = components[2].strip().split(',')
encoding = components[3].strip()
misc = components[4].strip().split(',')
if len(expops) == 1 and expops[0] in absent_op:
expops = []
if len(impops) == 1 and impops[0] in absent_op:
impops = []
# Extract the flags, class, set, category, encoding, prefmap
attributes = prefmap = isaset = category = iclass = adop = rwm = None
cff = tuple = flgaccess = modes = exclass = fpuflg = None
for y in misc:
y = y.strip()
token, value = y.split(':')
# parse token
if token == 'a': # Instruction attributes.
attributes = value.split('|')
elif token == 'p': # Accepted prefixes.
prefmap = value.split('|')
elif token == 's': # Instruction set
isaset = value
elif token == 't': # Instruction type
category = value
elif token == 'c': # Instruction class. Defaults to the mnemonic if not specified.
iclass = value
elif token == 'w': # Read/write map
rwm = value.split('|')
elif token == 'i': # CPUID.
cff = value
elif token == 'l': # tuple
tuple = value
elif token == 'e':
exclass = value
elif token == 'f': # Flags access
flgaccess = []
for v in value.split('|'):
if v in template_flags:
flgaccess += template_flags[v].split('|')
else:
flgaccess.append(v)
elif token == 'u':
fpuflg = ['u', 'u', 'u', 'u'] # each one is undefined.
for v in value.split('|'):
flg, acc = v.split('=')
if flg not in ['C0', 'C1', 'C2', 'C3']:
raise ParseLineException('Unknown FPU flag: %s' % flg)
if acc not in ['0', '1', 'm', 'u']:
raise ParseLineException('Unknown FPU flag access: %s' % acc)
fpuflg[int(flg[1])] = acc
elif token == 'm': # CPU modes.
# Example: m:ring=0,1,2,3|vmx=root,nonroot|mode=real,v8086,smm,prot,compat,long|other=sgx,tsx
# Note: any group that is not specified is considered entirely valid
# Note: any group that is specified overrides all the other fields in the group; example:
# mode=real - this means the instruction is valid ONLY in real mode.
# mode=!v8086 - this means the instructiom is valid is ANY mode except for V8086
tmodes = []
for t in value.split('|'):
if t in template_modes:
tmodes += template_modes[t].split('|')
else:
tmodes.append(t)
modes = []
groups = {}
for g in valid_mode_groups:
groups[g] = {}
groups[g]["negated"] = False
groups[g]["specified"] = False
groups[g]["modes"] = []
for tm in tmodes:
m, v = tm.split('=')
for vx in v.split('+'):
negated = False
if vx.startswith('!'):
vx = vx[1:]
groups[m]["negated"] = True
if m not in valid_mode_groups:
raise ParseLineException('Unknown CPU mode group specified: %s' % m)
if vx not in valid_mode_map[m]:
raise ParseLineException('Mode %s is not valid for mode group %s; it can be one of [%s]' %
(vx, m, ','.join(valid_mode_map[m])))
groups[m]["specified"] = True
groups[m]["modes"].append(vx)
for g in groups:
if not groups[g]["specified"]:
modes += valid_mode_map[g]
elif not groups[g]["negated"]:
modes += groups[g]["modes"]
else:
modes += [x for x in valid_mode_map[g] if x not in groups[g]["modes"]]
else:
raise ParseLineException('Unknown token specified: %s' % token)
if attributes is None:
attributes = []
if prefmap is None:
prefmap = []
if isaset is None:
isaset = 'UNKNOWN'
if category is None:
category = 'UNKNOWN'
if iclass is None:
iclass = mnemonic
if rwm is None:
rwm = []
if cff is None:
cff = None
if modes is None:
# No mode specified, assume validity in all modes.
modes = []
modes += valid_cpu_modes
if flgaccess is None:
flgaccess = []
if fpuflg is None:
# fpuflg[x] is for Cx (fpuflg[0] = C0, fpuflg[1] = C1, etc.)
# u = undefined, m = modified, 0 = cleared to 0, 1 = set to 1.
fpuflg = ['u', 'u', 'u', 'u']
# Validate the tokens.
# The set can be anything.
# The type can be anything.
# The iclass can be missing, it will default to the mnemonic.
# The read/write map must have the same size as the number of operands.
if len(rwm) < len(expops) + len(impops):
raise ParseLineException('Invalid number of operand access specifiers: provided %d, expecting %d' %
(len(rwm), len(expops) + len(impops)))
for r in rwm:
if r not in valid_access:
raise ParseLineException('Unknown operand access specifier "%s", expecting one of [%s]' %
(r, ','.join(valid_access)))
# The CPUID can be anything, even if it doesn't match something specified in cpuid.dat.
# The modes must be one of the valid modes.
for m in modes:
if m.startswith('!'):
m = m[1:]
if m not in valid_cpu_modes:
raise ParseLineException('Unknown CPU mode specifier "%s", expecting one of [%s]' %
(m, ','.join(valid_cpu_modes)))
# Validate the prefixes.
for p in prefmap:
if p not in valid_prefixes:
raise ParseLineException('Unknown prefix specifier "%s", expecting one of [%s]' %
(p, ','.join(valid_prefixes)))
# Validate the tuples.
if tuple and tuple not in valid_tuples:
raise ParseLineException('Unknown tuple specifier "%s", expecting one of [%s]' %
(tuple, ','.join(valid_tuples)))
# Validate the attributes.
for a in attributes:
if a not in valid_attributes:
raise ParseLineException('Unknown attribute specifier "%s", expecting one of [%s]' %
(a, ','.join(valid_attributes)))
# Validate the flags.
revflg = {}
for m in valid_flag_op:
revflg[m] = []
for flg in flgaccess:
f, m = flg.split('=')
if m not in valid_flag_op:
raise ParseLineException('Unknow flag access specifier "%s", expecting one of [%s]' %
(m, ','.join(valid_flag_op)))
if f not in valid_flags:
raise ParseLineException('Unknow flag specifier "%s", expecting one of [%s]' %
(f, ','.join(valid_flas)))
revflg[m].append(f)
flgaccess = revflg
iinfo = {
"mnemonic" : mnemonic, # Mnemonic
"expops" : expops, # Explicit operands
"impops" : impops, # Implicit operands
"encoding" : encoding, # Encoding
"flags" : attributes, # Instruction attributes
"prefixes" : prefmap, # Accepted prefixes
"set" : isaset, # Instruction set
"cat" : category, # Instruction category
"class" : iclass, # Instruction class
"rwm" : rwm, # Read/write operands map
"cff" : cff, # CPUID feature flag
"tuple" : tuple, # Tuple type, for EVEX instruxtions
"exclass" : exclass, # Exception class, for SSE/VEX/EVEX instructions
"flgaccess" : flgaccess, # RFLAGS access
"modes" : modes, # Valid operating modes
"fpuflg" : fpuflg, # FPU flags access (C0, C1, C2, C3), valid for x87 instructions only
}
if 'PREFIX' in attributes:
return None
try:
ins = Instruction(iinfo)
except:
raise
except Exception as e:
raise
return ins
def parse_ins_file(fpath, template_flags = {}, template_cpuid = {}, template_modes = {}):
instructions = []
lcount = 0
for line in open(fpath, 'rt'):
lcount += 1
try:
ins = parse_entry(line, template_flags, template_cpuid, template_modes)
if ins: instructions.append(ins)
except Exception as e:
print('ERROR: Parsing failed at %s:%d: %s' % (fpath, lcount, e))
raise
return instructions
def parse_pre_file(fpath):
prefixes = []
for line in open(fpath, 'rt'):
# Ignore comments.
if line.startswith('#'):
continue
res = re.findall(r'([^\s]+)\s*\[\s*(0x[0-9a-fA-F]+)\]', line)
if not res:
continue
res = res[0]
pref = {}
pref["mnemonic"] = res[0]
pref["encoding"] = res[1]
prefixes.append(Prefix(pref))
return prefixes
def parse_cff_file(fpath):
features = []
for line in open(fpath, 'rt'):
if line.startswith('#'):
continue
res = re.findall(r'([^\s]+)\s+:\s+(0x[0-9a-fA-F]+),\s+(0x[0-9a-fA-F]+),\s+(EAX|ECX|EDX|EBX),\s+(\d+)', line)
if not res:
continue
res = res[0]
cffi = {}
cffi["name"] = res[0]
cffi["leaf"] = res[1]
cffi["subleaf"] = res[2]
cffi["reg"] = res[3]
cffi["bit"] = res[4]
features.append(CpuidFeatureFlag(cffi))
return features
def parse_flags_file(fpath):
flags = {}
for line in open(fpath, 'rt'):
if line.startswith('#'):
continue
res = re.findall(r'([^\s]+)\s+:([^$]+)', line)
if not res:
continue
res = res[0]
flags[res[0]] = res[1].strip('\n\r ')
return flags
def parse_modess_file(fpath):
modes = {}
for line in open(fpath, 'rt'):
if line.startswith('#'):
continue
res = re.findall(r'([^\s]+)\s+:([^$]+)', line)
if not res:
continue
res = res[0]
modes[res[0]] = res[1].strip('\n\r ')
return modes
#
# =============================================================================
# Main
# =============================================================================
#
if __name__ == "__main__":
if len(sys.argv) < 2:
print('Usage: %s defs-file' % os.path.basename(sys.argv[0]))
sys.exit(-1)
# Parse the flags file.
flags = parse_flags_file('%s/flags.dat' % sys.argv[1])
# Parse the cpuid feature flags and extract each feature
features = parse_cff_file('%s/cpuid.dat' % sys.argv[1])
# Parse the modes file.
modes = parse_modess_file('%s/modes.dat' % sys.argv[1])
# Parse the instruction file and extract the instructions
instructions = []
for fn in glob.glob('%s/table*.dat' % sys.argv[1]):
instructions += parse_ins_file(fn, flags, features, modes)
# Sort the instructions.
instructions = sorted(instructions, key = lambda x: x.Mnemonic)
for i in range(0, len(instructions)):
print(instructions[i])
features = sorted(features, key = lambda x: x.Name)
for i in range(0, len(features)):
print(features[i])
|
5bcf391b0b0120ae42310daf5dfaa8c143d1fece
|
97d7455fbaa56813e97cf601e4a23786d47c2e2c
|
/paasta_tools/async_utils.py
|
fa2eb8fb3d0c9f8745f5ab2d8b14e09ebbb28e72
|
[
"Apache-2.0"
] |
permissive
|
Yelp/paasta
|
9138fbb0beaaa6146520c1483144679f9d5d4941
|
6fafc7c86073f136e64b959b963994be3d6160ab
|
refs/heads/master
| 2023-08-17T00:00:47.610727
| 2023-08-10T21:40:26
| 2023-08-10T21:40:26
| 44,998,824
| 1,805
| 291
|
Apache-2.0
| 2023-09-13T20:40:04
| 2015-10-26T21:35:53
|
Python
|
UTF-8
|
Python
| false
| false
| 3,514
|
py
|
async_utils.py
|
import asyncio
import functools
import time
import weakref
from collections import defaultdict
from typing import AsyncIterable
from typing import Awaitable
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import TypeVar
T = TypeVar("T")
# NOTE: this method is not thread-safe due to lack of locking while checking
# and updating the cache
def async_ttl_cache(
ttl: Optional[float] = 300,
cleanup_self: bool = False,
*,
cache: Optional[Dict] = None,
) -> Callable[
[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner
]:
async def call_or_get_from_cache(cache, async_func, args_for_key, args, kwargs):
# Please note that anything which is put into `key` will be in the
# cache forever, potentially causing memory leaks. The most common
# case is the `self` arg pointing to a huge object. To mitigate that
# we're using `args_for_key`, which is supposed not contain any huge
# objects.
key = functools._make_key(args_for_key, kwargs, typed=False)
try:
future, last_update = cache[key]
if ttl is not None and time.time() - last_update > ttl:
raise KeyError
except KeyError:
future = asyncio.ensure_future(async_func(*args, **kwargs))
# set the timestamp to +infinity so that we always wait on the in-flight request.
cache[key] = (future, float("Inf"))
try:
value = await future
except Exception:
# Only update the cache if it's the same future we awaited and
# it hasn't already been updated by another coroutine
# Note also that we use get() in case the key was deleted from the
# cache by another coroutine
if cache.get(key) == (future, float("Inf")):
del cache[key]
raise
else:
if cache.get(key) == (future, float("Inf")):
cache[key] = (future, time.time())
return value
if cleanup_self:
instance_caches: Dict = cache if cache is not None else defaultdict(dict)
def on_delete(w):
del instance_caches[w]
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(self, *args, **kwargs):
w = weakref.ref(self, on_delete)
self_cache = instance_caches[w]
return await call_or_get_from_cache(
self_cache, wrapped, args, (self,) + args, kwargs
)
return inner
else:
cache2: Dict = (
cache if cache is not None else {}
) # Should be Dict[Any, T] but that doesn't work.
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(*args, **kwargs):
return await call_or_get_from_cache(cache2, wrapped, args, args, kwargs)
return inner
return outer
async def aiter_to_list(
aiter: AsyncIterable[T],
) -> List[T]:
return [x async for x in aiter]
def async_timeout(
seconds: int = 10,
) -> Callable[
[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner
]:
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(*args, **kwargs):
return await asyncio.wait_for(wrapped(*args, **kwargs), timeout=seconds)
return inner
return outer
|
dbefe24e0112f76e158afbdcc0c5e2822cfc88af
|
2f4605e878c073d7f735eed1d675c2ee454ad68e
|
/sdk/python/pulumi_kubernetes/events/v1/_inputs.py
|
72d5a50b0e1640d976f3c5b8d5898d094b62b50c
|
[
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-kubernetes
|
3c0c82e03a19f4077625d2ff6dae5ea4dbf90243
|
b5d76f0731383f39903f35a6c1566f2f4344c944
|
refs/heads/master
| 2023-08-17T16:57:11.845935
| 2023-08-16T00:55:18
| 2023-08-16T00:55:18
| 116,869,354
| 353
| 128
|
Apache-2.0
| 2023-09-13T21:42:01
| 2018-01-09T20:50:33
|
Java
|
UTF-8
|
Python
| false
| false
| 19,318
|
py
|
_inputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ... import core as _core
from ... import meta as _meta
__all__ = [
'EventSeriesPatchArgs',
'EventSeriesArgs',
'EventArgs',
]
@pulumi.input_type
class EventSeriesPatchArgs:
def __init__(__self__, *,
count: Optional[pulumi.Input[int]] = None,
last_observed_time: Optional[pulumi.Input[str]] = None):
"""
EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time. How often to update the EventSeries is up to the event reporters. The default event reporter in "k8s.io/client-go/tools/events/event_broadcaster.go" shows how this struct is updated on heartbeats and can guide customized reporter implementations.
:param pulumi.Input[int] count: count is the number of occurrences in this series up to the last heartbeat time.
:param pulumi.Input[str] last_observed_time: lastObservedTime is the time when last Event from the series was seen before last heartbeat.
"""
if count is not None:
pulumi.set(__self__, "count", count)
if last_observed_time is not None:
pulumi.set(__self__, "last_observed_time", last_observed_time)
@property
@pulumi.getter
def count(self) -> Optional[pulumi.Input[int]]:
"""
count is the number of occurrences in this series up to the last heartbeat time.
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "count", value)
@property
@pulumi.getter(name="lastObservedTime")
def last_observed_time(self) -> Optional[pulumi.Input[str]]:
"""
lastObservedTime is the time when last Event from the series was seen before last heartbeat.
"""
return pulumi.get(self, "last_observed_time")
@last_observed_time.setter
def last_observed_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_observed_time", value)
@pulumi.input_type
class EventSeriesArgs:
def __init__(__self__, *,
count: pulumi.Input[int],
last_observed_time: pulumi.Input[str]):
"""
EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time. How often to update the EventSeries is up to the event reporters. The default event reporter in "k8s.io/client-go/tools/events/event_broadcaster.go" shows how this struct is updated on heartbeats and can guide customized reporter implementations.
:param pulumi.Input[int] count: count is the number of occurrences in this series up to the last heartbeat time.
:param pulumi.Input[str] last_observed_time: lastObservedTime is the time when last Event from the series was seen before last heartbeat.
"""
pulumi.set(__self__, "count", count)
pulumi.set(__self__, "last_observed_time", last_observed_time)
@property
@pulumi.getter
def count(self) -> pulumi.Input[int]:
"""
count is the number of occurrences in this series up to the last heartbeat time.
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: pulumi.Input[int]):
pulumi.set(self, "count", value)
@property
@pulumi.getter(name="lastObservedTime")
def last_observed_time(self) -> pulumi.Input[str]:
"""
lastObservedTime is the time when last Event from the series was seen before last heartbeat.
"""
return pulumi.get(self, "last_observed_time")
@last_observed_time.setter
def last_observed_time(self, value: pulumi.Input[str]):
pulumi.set(self, "last_observed_time", value)
@pulumi.input_type
class EventArgs:
def __init__(__self__, *,
event_time: pulumi.Input[str],
action: Optional[pulumi.Input[str]] = None,
api_version: Optional[pulumi.Input[str]] = None,
deprecated_count: Optional[pulumi.Input[int]] = None,
deprecated_first_timestamp: Optional[pulumi.Input[str]] = None,
deprecated_last_timestamp: Optional[pulumi.Input[str]] = None,
deprecated_source: Optional[pulumi.Input['_core.v1.EventSourceArgs']] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None,
note: Optional[pulumi.Input[str]] = None,
reason: Optional[pulumi.Input[str]] = None,
regarding: Optional[pulumi.Input['_core.v1.ObjectReferenceArgs']] = None,
related: Optional[pulumi.Input['_core.v1.ObjectReferenceArgs']] = None,
reporting_controller: Optional[pulumi.Input[str]] = None,
reporting_instance: Optional[pulumi.Input[str]] = None,
series: Optional[pulumi.Input['EventSeriesArgs']] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.
:param pulumi.Input[str] event_time: eventTime is the time when this Event was first observed. It is required.
:param pulumi.Input[str] action: action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[int] deprecated_count: deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.
:param pulumi.Input[str] deprecated_first_timestamp: deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
:param pulumi.Input[str] deprecated_last_timestamp: deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
:param pulumi.Input['_core.v1.EventSourceArgs'] deprecated_source: deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input[str] note: note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.
:param pulumi.Input[str] reason: reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters.
:param pulumi.Input['_core.v1.ObjectReferenceArgs'] regarding: regarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.
:param pulumi.Input['_core.v1.ObjectReferenceArgs'] related: related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.
:param pulumi.Input[str] reporting_controller: reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.
:param pulumi.Input[str] reporting_instance: reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.
:param pulumi.Input['EventSeriesArgs'] series: series is data about the Event series this event represents or nil if it's a singleton Event.
:param pulumi.Input[str] type: type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events.
"""
pulumi.set(__self__, "event_time", event_time)
if action is not None:
pulumi.set(__self__, "action", action)
if api_version is not None:
pulumi.set(__self__, "api_version", 'events.k8s.io/v1')
if deprecated_count is not None:
pulumi.set(__self__, "deprecated_count", deprecated_count)
if deprecated_first_timestamp is not None:
pulumi.set(__self__, "deprecated_first_timestamp", deprecated_first_timestamp)
if deprecated_last_timestamp is not None:
pulumi.set(__self__, "deprecated_last_timestamp", deprecated_last_timestamp)
if deprecated_source is not None:
pulumi.set(__self__, "deprecated_source", deprecated_source)
if kind is not None:
pulumi.set(__self__, "kind", 'Event')
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if note is not None:
pulumi.set(__self__, "note", note)
if reason is not None:
pulumi.set(__self__, "reason", reason)
if regarding is not None:
pulumi.set(__self__, "regarding", regarding)
if related is not None:
pulumi.set(__self__, "related", related)
if reporting_controller is not None:
pulumi.set(__self__, "reporting_controller", reporting_controller)
if reporting_instance is not None:
pulumi.set(__self__, "reporting_instance", reporting_instance)
if series is not None:
pulumi.set(__self__, "series", series)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="eventTime")
def event_time(self) -> pulumi.Input[str]:
"""
eventTime is the time when this Event was first observed. It is required.
"""
return pulumi.get(self, "event_time")
@event_time.setter
def event_time(self, value: pulumi.Input[str]):
pulumi.set(self, "event_time", value)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[str]]:
"""
action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter(name="deprecatedCount")
def deprecated_count(self) -> Optional[pulumi.Input[int]]:
"""
deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.
"""
return pulumi.get(self, "deprecated_count")
@deprecated_count.setter
def deprecated_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "deprecated_count", value)
@property
@pulumi.getter(name="deprecatedFirstTimestamp")
def deprecated_first_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
"""
return pulumi.get(self, "deprecated_first_timestamp")
@deprecated_first_timestamp.setter
def deprecated_first_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deprecated_first_timestamp", value)
@property
@pulumi.getter(name="deprecatedLastTimestamp")
def deprecated_last_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
"""
return pulumi.get(self, "deprecated_last_timestamp")
@deprecated_last_timestamp.setter
def deprecated_last_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deprecated_last_timestamp", value)
@property
@pulumi.getter(name="deprecatedSource")
def deprecated_source(self) -> Optional[pulumi.Input['_core.v1.EventSourceArgs']]:
"""
deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.
"""
return pulumi.get(self, "deprecated_source")
@deprecated_source.setter
def deprecated_source(self, value: Optional[pulumi.Input['_core.v1.EventSourceArgs']]):
pulumi.set(self, "deprecated_source", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]:
"""
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def note(self) -> Optional[pulumi.Input[str]]:
"""
note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.
"""
return pulumi.get(self, "note")
@note.setter
def note(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "note", value)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
"""
reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters.
"""
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter
def regarding(self) -> Optional[pulumi.Input['_core.v1.ObjectReferenceArgs']]:
"""
regarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.
"""
return pulumi.get(self, "regarding")
@regarding.setter
def regarding(self, value: Optional[pulumi.Input['_core.v1.ObjectReferenceArgs']]):
pulumi.set(self, "regarding", value)
@property
@pulumi.getter
def related(self) -> Optional[pulumi.Input['_core.v1.ObjectReferenceArgs']]:
"""
related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.
"""
return pulumi.get(self, "related")
@related.setter
def related(self, value: Optional[pulumi.Input['_core.v1.ObjectReferenceArgs']]):
pulumi.set(self, "related", value)
@property
@pulumi.getter(name="reportingController")
def reporting_controller(self) -> Optional[pulumi.Input[str]]:
"""
reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.
"""
return pulumi.get(self, "reporting_controller")
@reporting_controller.setter
def reporting_controller(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reporting_controller", value)
@property
@pulumi.getter(name="reportingInstance")
def reporting_instance(self) -> Optional[pulumi.Input[str]]:
"""
reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.
"""
return pulumi.get(self, "reporting_instance")
@reporting_instance.setter
def reporting_instance(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reporting_instance", value)
@property
@pulumi.getter
def series(self) -> Optional[pulumi.Input['EventSeriesArgs']]:
"""
series is data about the Event series this event represents or nil if it's a singleton Event.
"""
return pulumi.get(self, "series")
@series.setter
def series(self, value: Optional[pulumi.Input['EventSeriesArgs']]):
pulumi.set(self, "series", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
|
21a582210037559c287ea1aa5cae2a1ebbf252de
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/doorbird/button.py
|
1c69429d3c70877ad3434443382def0981878ff6
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,661
|
py
|
button.py
|
"""Support for powering relays in a DoorBird video doorbell."""
from collections.abc import Callable
from dataclasses import dataclass
from doorbirdpy import DoorBird
from homeassistant.components.button import ButtonEntity, ButtonEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .entity import DoorBirdEntity
from .models import DoorBirdData
IR_RELAY = "__ir_light__"
@dataclass
class DoorbirdButtonEntityDescriptionMixin:
"""Mixin to describe a Doorbird Button entity."""
press_action: Callable[[DoorBird, str], None]
@dataclass
class DoorbirdButtonEntityDescription(
ButtonEntityDescription, DoorbirdButtonEntityDescriptionMixin
):
"""Class to describe a Doorbird Button entity."""
RELAY_ENTITY_DESCRIPTION = DoorbirdButtonEntityDescription(
key="relay",
press_action=lambda device, relay: device.energize_relay(relay),
icon="mdi:dip-switch",
)
IR_ENTITY_DESCRIPTION = DoorbirdButtonEntityDescription(
key="ir",
press_action=lambda device, _: device.turn_light_on(),
icon="mdi:lightbulb",
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the DoorBird button platform."""
config_entry_id = config_entry.entry_id
door_bird_data: DoorBirdData = hass.data[DOMAIN][config_entry_id]
relays = door_bird_data.door_station_info["RELAYS"]
entities = [
DoorBirdButton(door_bird_data, relay, RELAY_ENTITY_DESCRIPTION)
for relay in relays
]
entities.append(DoorBirdButton(door_bird_data, IR_RELAY, IR_ENTITY_DESCRIPTION))
async_add_entities(entities)
class DoorBirdButton(DoorBirdEntity, ButtonEntity):
"""A relay in a DoorBird device."""
entity_description: DoorbirdButtonEntityDescription
def __init__(
self,
door_bird_data: DoorBirdData,
relay: str,
entity_description: DoorbirdButtonEntityDescription,
) -> None:
"""Initialize a relay in a DoorBird device."""
super().__init__(door_bird_data)
self._relay = relay
self.entity_description = entity_description
if self._relay == IR_RELAY:
self._attr_name = "IR"
else:
self._attr_name = f"Relay {self._relay}"
self._attr_unique_id = f"{self._mac_addr}_{self._relay}"
def press(self) -> None:
"""Power the relay."""
self.entity_description.press_action(self._door_station.device, self._relay)
|
8f1715369d7252e96d75c6dd744724e037b904ca
|
6f36df6219f8e50374068bb4b3e1a5387c7a2f34
|
/fipy/meshes/nonUniformGrid2D.py
|
f0210924b734f2e27013766215065c0c36116c91
|
[
"NIST-PD"
] |
permissive
|
usnistgov/fipy
|
0a3db715fea452ae710eea3999d9cd42dfe76fe7
|
fdc17193bc293da7511be9021e6d4766757e1966
|
refs/heads/master
| 2023-08-31T21:59:36.611448
| 2023-06-27T16:28:58
| 2023-06-27T16:28:58
| 23,316,495
| 444
| 171
|
NOASSERTION
| 2023-09-06T19:21:19
| 2014-08-25T14:27:58
|
Python
|
UTF-8
|
Python
| false
| false
| 13,613
|
py
|
nonUniformGrid2D.py
|
"""
2D rectangular Mesh
"""
from __future__ import unicode_literals
__docformat__ = 'restructuredtext'
from fipy.tools import parallelComm
from fipy.meshes.mesh2D import Mesh2D
from fipy.meshes.builders import _NonuniformGrid2DBuilder
from fipy.meshes.representations.gridRepresentation import _Grid2DRepresentation
from fipy.meshes.topologies.gridTopology import _Grid2DTopology
__all__ = ["NonUniformGrid2D"]
from future.utils import text_to_native_str
__all__ = [text_to_native_str(n) for n in __all__]
class NonUniformGrid2D(Mesh2D):
"""
Creates a 2D grid mesh with horizontal faces numbered
first and then vertical faces.
"""
def __init__(self, dx=1., dy=1., nx=None, ny=None, overlap=2, communicator=parallelComm,
_RepresentationClass=_Grid2DRepresentation, _TopologyClass=_Grid2DTopology):
builder = _NonuniformGrid2DBuilder()
self.args = {
'dx': dx,
'dy': dy,
'nx': nx,
'ny': ny,
'overlap': overlap
}
if self.args['nx'] is None:
self.args['nx'] = len(self.args['dx'])
if self.args['ny'] is None:
self.args['ny'] = len(self.args['dy'])
builder.buildGridData([dx, dy], [nx, ny], overlap, communicator)
([self.dx, self.dy],
[self.nx, self.ny],
self.dim,
scale,
self.globalNumberOfCells,
self.globalNumberOfFaces,
self.overlap,
self.offset,
self.numberOfVertices,
self.numberOfFaces,
self.numberOfCells,
self.shape,
self.physicalShape,
self._meshSpacing,
self.numberOfHorizontalRows,
self.numberOfVerticalColumns,
self.numberOfHorizontalFaces,
vertices,
faces,
cells,
[self.Xoffset, self.Yoffset]) = builder.gridData
Mesh2D.__init__(self, vertices, faces, cells, communicator=communicator,
_RepresentationClass=_RepresentationClass, _TopologyClass=_TopologyClass)
self.scale = scale
def _test(self):
"""
These tests are not useful as documentation, but are here to ensure
everything works as expected.
>>> dx = 0.5
>>> dy = 2.
>>> nx = 3
>>> ny = 2
>>> mesh = NonUniformGrid2D(nx = nx, ny = ny, dx = dx, dy = dy)
>>> from fipy import numerix
>>> vertices = numerix.array(((0., 1., 2., 3., 0., 1., 2., 3., 0., 1., 2., 3.),
... (0., 0., 0., 0., 1., 1., 1., 1., 2., 2., 2., 2.)))
>>> vertices *= numerix.array(((dx,), (dy,)))
>>> print(numerix.allequal(vertices,
... mesh.vertexCoords)) # doctest: +PROCESSOR_0
True
>>> faces = numerix.array(((1, 2, 3, 4, 5, 6, 8, 9, 10, 0, 5, 6, 7, 4, 9, 10, 11),
... (0, 1, 2, 5, 6, 7, 9, 10, 11, 4, 1, 2, 3, 8, 5, 6, 7)))
>>> print(numerix.allequal(faces,
... mesh.faceVertexIDs)) # doctest: +PROCESSOR_0
True
>>> cells = numerix.array(((0, 1, 2, 3, 4, 5),
... (10, 11, 12, 14, 15, 16),
... (3, 4, 5, 6, 7, 8),
... (9, 10, 11, 13, 14, 15)))
>>> print(numerix.allequal(cells,
... mesh.cellFaceIDs)) # doctest: +PROCESSOR_0
True
>>> externalFaces = numerix.array((0, 1, 2, 6, 7, 8, 9, 12, 13, 16))
>>> print(numerix.allequal(externalFaces,
... numerix.nonzero(mesh.exteriorFaces))) # doctest: +PROCESSOR_0
True
>>> internalFaces = numerix.array((3, 4, 5, 10, 11, 14, 15))
>>> print(numerix.allequal(internalFaces,
... numerix.nonzero(mesh.interiorFaces))) # doctest: +PROCESSOR_0
True
>>> from fipy.tools.numerix import MA
>>> faceCellIds = MA.masked_values(((0, 1, 2, 0, 1, 2, 3, 4, 5, 0, 0, 1, 2, 3, 3, 4, 5),
... (-1, -1, -1, 3, 4, 5, -1, -1, -1, -1, 1, 2, -1, -1, 4, 5, -1)), -1)
>>> print(numerix.allequal(faceCellIds, mesh.faceCellIDs)) # doctest: +PROCESSOR_0
True
>>> faceAreas = numerix.array((dx, dx, dx, dx, dx, dx, dx, dx, dx,
... dy, dy, dy, dy, dy, dy, dy, dy))
>>> print(numerix.allclose(faceAreas, mesh._faceAreas, atol = 1e-10, rtol = 1e-10)) # doctest: +PROCESSOR_0
True
>>> faceCoords = numerix.take(vertices, faces, axis=1)
>>> faceCenters = (faceCoords[..., 0,:] + faceCoords[..., 1,:]) / 2.
>>> print(numerix.allclose(faceCenters, mesh.faceCenters, atol = 1e-10, rtol = 1e-10))
True
>>> faceNormals = numerix.array(((0., 0., 0., 0., 0., 0., 0., 0., 0., -1., 1., 1., 1., -1., 1., 1., 1.),
... (-1., -1., -1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.)))
>>> print(numerix.allclose(faceNormals, mesh.faceNormals, atol = 1e-10, rtol = 1e-10)) # doctest: +PROCESSOR_0
True
>>> cellToFaceOrientations = numerix.array(((1, 1, 1, -1, -1, -1),
... (1, 1, 1, 1, 1, 1),
... (1, 1, 1, 1, 1, 1),
... (1, -1, -1, 1, -1, -1)))
>>> print(numerix.allequal(cellToFaceOrientations, mesh._cellToFaceOrientations)) # doctest: +PROCESSOR_0
True
>>> cellVolumes = numerix.array((dx*dy, dx*dy, dx*dy, dx*dy, dx*dy, dx*dy))
>>> print(numerix.allclose(cellVolumes, mesh.cellVolumes, atol = 1e-10, rtol = 1e-10)) # doctest: +PROCESSOR_0
True
>>> cellCenters = numerix.array(((dx/2., 3.*dx/2., 5.*dx/2., dx/2., 3.*dx/2., 5.*dx/2.),
... (dy/2., dy/2., dy/2., 3.*dy/2., 3.*dy/2., 3.*dy/2.)))
>>> print(numerix.allclose(mesh.cellCenters, cellCenters, atol = 1e-10, rtol = 1e-10))
True
>>> faceToCellDistances = MA.masked_values(((dy / 2., dy / 2., dy / 2., dy / 2., dy / 2., dy / 2., dy / 2., dy / 2., dy / 2., dx / 2., dx / 2., dx / 2., dx / 2., dx / 2., dx / 2., dx / 2., dx / 2.),
... (-1, -1, -1, dy / 2., dy / 2., dy / 2., -1, -1, -1, -1, dx / 2., dx / 2., -1, -1, dx / 2., dx / 2., -1)), -1)
>>> print(numerix.allclose(faceToCellDistances, mesh._faceToCellDistances, atol = 1e-10, rtol = 1e-10)) # doctest: +PROCESSOR_0
True
>>> cellDistances = numerix.array((dy / 2., dy / 2., dy / 2.,
... dy, dy, dy,
... dy / 2., dy / 2., dy / 2.,
... dx / 2., dx, dx,
... dx / 2.,
... dx / 2., dx, dx,
... dx / 2.))
>>> print(numerix.allclose(cellDistances, mesh._cellDistances, atol = 1e-10, rtol = 1e-10)) # doctest: +PROCESSOR_0
True
>>> faceToCellDistanceRatios = faceToCellDistances[0] / cellDistances
>>> print(numerix.allclose(faceToCellDistanceRatios, mesh._faceToCellDistanceRatio, atol = 1e-10, rtol = 1e-10)) # doctest: +PROCESSOR_0
True
>>> areaProjections = faceNormals * faceAreas
>>> print(numerix.allclose(areaProjections, mesh._areaProjections, atol = 1e-10, rtol = 1e-10)) # doctest: +PROCESSOR_0
True
>>> tangents1 = numerix.array(((1., 1., 1., -1., -1., -1., -1., -1., -1., 0., 0., 0., 0., 0., 0., 0., 0.),
... (0., 0., 0., 0., 0., 0., 0., 0., 0., -1., 1., 1., 1., -1., 1., 1., 1.)))
>>> print(numerix.allclose(tangents1, mesh._faceTangents1, atol = 1e-10, rtol = 1e-10)) # doctest: +PROCESSOR_0
True
>>> tangents2 = numerix.zeros((2, 17), 'd')
>>> print(numerix.allclose(tangents2, mesh._faceTangents2, atol = 1e-10, rtol = 1e-10)) # doctest: +PROCESSOR_0
True
>>> cellToCellIDs = MA.masked_values(((-1, -1, -1, 0, 1, 2),
... (1, 2, -1, 4, 5, -1),
... (3, 4, 5, -1, -1, -1),
... (-1, 0, 1, -1, 3, 4)), -1)
>>> print(numerix.allequal(cellToCellIDs, mesh._cellToCellIDs)) # doctest: +PROCESSOR_0
True
>>> cellToCellDistances = MA.masked_values(((dy / 2., dy / 2., dy / 2., dy, dy, dy),
... ( dx, dx, dx / 2., dx, dx, dx / 2.),
... ( dy, dy, dy, dy / 2., dy / 2., dy / 2.),
... (dx / 2., dx, dx, dx / 2., dx, dx)), -1)
>>> print(numerix.allclose(cellToCellDistances, mesh._cellToCellDistances, atol = 1e-10, rtol = 1e-10)) # doctest: +PROCESSOR_0
True
>>> interiorCellIDs = numerix.array(())
>>> print(numerix.allequal(interiorCellIDs, mesh._interiorCellIDs)) # doctest: +PROCESSOR_0
True
>>> exteriorCellIDs = numerix.array((0, 1, 2, 3, 4, 5))
>>> print(numerix.allequal(exteriorCellIDs, mesh._exteriorCellIDs)) # doctest: +PROCESSOR_0
True
>>> cellNormals = numerix.array(((( 0, 0, 0, 0, 0, 0),
... ( 1, 1, 1, 1, 1, 1),
... ( 0, 0, 0, 0, 0, 0),
... (-1, -1, -1, -1, -1, -1)),
... ((-1, -1, -1, -1, -1, -1),
... ( 0, 0, 0, 0, 0, 0),
... ( 1, 1, 1, 1, 1, 1),
... ( 0, 0, 0, 0, 0, 0))))
>>> print(numerix.allclose(cellNormals, mesh._cellNormals, atol = 1e-10, rtol = 1e-10)) # doctest: +PROCESSOR_0
True
>>> cellAreaProjections = numerix.array(((( 0, 0, 0, 0, 0, 0),
... ( dy, dy, dy, dy, dy, dy),
... ( 0, 0, 0, 0, 0, 0),
... (-dy, -dy, -dy, -dy, -dy, -dy)),
... ((-dx, -dx, -dx, -dx, -dx, -dx),
... ( 0, 0, 0, 0, 0, 0),
... ( dx, dx, dx, dx, dx, dx),
... ( 0, 0, 0, 0, 0, 0))))
>>> print(numerix.allclose(cellAreaProjections, mesh._cellAreaProjections, atol = 1e-10, rtol = 1e-10)) # doctest: +PROCESSOR_0
True
>>> cellVertexIDs = MA.masked_values(((5, 6, 7, 9, 10, 11),
... (4, 5, 6, 8, 9, 10),
... (1, 2, 3, 5, 6, 7),
... (0, 1, 2, 4, 5, 6)), -1000)
>>> print(numerix.allclose(mesh._cellVertexIDs, cellVertexIDs)) # doctest: +PROCESSOR_0
True
>>> from fipy.tools import dump
>>> (f, filename) = dump.write(mesh, extension = '.gz')
>>> unpickledMesh = dump.read(filename, f)
>>> print(numerix.allclose(mesh.cellCenters, unpickledMesh.cellCenters))
True
Test for https://github.com/usnistgov/fipy/issues/364.
>>> from fipy.meshes.nonUniformGrid2D import NonUniformGrid2D
>>> m = NonUniformGrid2D(nx=1, ny=9, overlap=1)
>>> print(min(m.y) == 0.5) # doctest: +SERIAL
True
>>> print(min(m.y) == 3.5) # doctest: +PROCESSOR_1_OF_2
True
>>> print(min(m.y) == 5.5) # doctest: +PROCESSOR_2_OF_3
True
Ensure that ghost faces are excluded from accumulating operations
(#856). Four exterior surfaces of :math:`10\times 10` square mesh
should each have a total area of 10, regardless of partitioning.
>>> square = NonUniformGrid2D(nx=10, dx=1., ny=10, dy=1.)
>>> area = (square._faceAreas * square.facesBottom).sum()
>>> print(numerix.allclose(area, 10))
True
>>> area = (square._faceAreas * square.facesTop).sum()
>>> print(numerix.allclose(area, 10))
True
>>> area = (square._faceAreas * square.facesLeft).sum()
>>> print(numerix.allclose(area, 10))
True
>>> area = (square._faceAreas * square.facesRight).sum()
>>> print(numerix.allclose(area, 10))
True
"""
def _test():
import fipy.tests.doctestPlus
return fipy.tests.doctestPlus.testmod()
if __name__ == "__main__":
_test()
|
63e85f0413382595773f93c5b0a78456401a2a41
|
e3a97b316fdf07b170341da206163a865f9e812c
|
/python/kwiver/sprokit/tests/sprokit/pipeline/test-process_registry.py
|
165cfa31f7c820f4b916285151ffd787068cb514
|
[
"BSD-3-Clause"
] |
permissive
|
Kitware/kwiver
|
09133ede9d05c33212839cc29d396aa8ca21baaf
|
a422409b83f78f31cda486e448e8009513e75427
|
refs/heads/master
| 2023-08-28T10:41:58.077148
| 2023-07-28T21:18:52
| 2023-07-28T21:18:52
| 23,229,909
| 191
| 92
|
NOASSERTION
| 2023-06-26T17:18:20
| 2014-08-22T15:22:20
|
C++
|
UTF-8
|
Python
| false
| false
| 12,655
|
py
|
test-process_registry.py
|
#!/usr/bin/env python
#ckwg +28
# Copyright 2011-2020 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from kwiver.sprokit.util.test import expect_exception, find_tests, run_test, test_error
def test_import():
try:
from kwiver.vital.config import config
import kwiver.sprokit.pipeline.process_factory
except:
test_error("Failed to import the process_factory module")
def test_api_calls():
from kwiver.vital.config import config
from kwiver.vital.modules import modules
from kwiver.sprokit.pipeline import process
from kwiver.sprokit.pipeline import process_factory
modules.load_known_modules()
proc_type = 'orphan'
c = config.empty_config()
process_factory.create_process(proc_type, '')
process_factory.create_process(proc_type, '', c)
process_factory.types()
process_factory.description(proc_type)
process_factory.Process.property_no_threads
process_factory.Process.property_no_reentrancy
process_factory.Process.property_unsync_input
process_factory.Process.property_unsync_output
process_factory.Process.port_heartbeat
process_factory.Process.config_name
process_factory.Process.config_type
process_factory.Process.type_any
process_factory.Process.type_none
process_factory.Process.type_data_dependent
process_factory.Process.type_flow_dependent
process_factory.Process.flag_output_const
process_factory.Process.flag_input_static
process_factory.Process.flag_input_mutable
process_factory.Process.flag_input_nodep
process_factory.Process.flag_required
def example_process(check_init):
from kwiver.sprokit.pipeline import process
class PythonExample(process.PythonProcess):
def __init__(self, conf):
process.PythonProcess.__init__(self, conf)
self.ran_configure = check_init
self.ran_init = check_init
self.ran_reset = check_init
self.ran_step = check_init
self.ran_finalize = check_init
self.ran_reconfigure = check_init
self.ran_properties = check_init
self.ran_input_ports = check_init
self.ran_output_ports = check_init
self.ran_input_port_info = check_init
self.ran_output_port_info = check_init
self.ran_set_input_port_type = check_init
self.ran_set_output_port_type = check_init
self.ran_available_config = check_init
self.ran_conf_info = check_init
def _configure(self):
self.ran_configure = True
self._base_configure()
def _init(self):
self.ran_init = True
self._base_init()
def _reset(self):
self.ran_reset = True
self._base_reset()
def _step(self):
self.ran_step = True
self._base_step()
def _finalize(self):
self.ran_finalize = True
self._base_finalize()
def _reconfigure(self, conf):
self.ran_reconfigure = True
self._base_reconfigure(conf)
def _properties(self):
self.ran_properties = True
return self._base_properties()
def _input_ports(self):
self.ran_input_ports = True
return self._base_input_ports()
def _output_ports(self):
self.ran_output_ports = True
return self._base_output_ports()
def _input_port_info(self, port):
self.ran_input_port_info = True
return self._base_input_port_info(port)
def _output_port_info(self, port):
self.ran_output_port_info = True
return self._base_output_port_info(port)
def _set_input_port_type(self, port, type):
self.ran_set_input_port_type = True
return self._base_set_input_port_type(port, type)
def _set_output_port_type(self, port, type):
self.ran_set_output_port_type = True
return self._base_set_output_port_type(port, type)
def _available_config(self):
self.ran_available_config = True
return self._base_available_config()
def _config_info(self, key):
self.ran_conf_info = True
return self._base_config_info(key)
def __del__(self):
if not self.ran_configure:
test_error("_configure override was not called")
if not self.ran_init:
test_error("_init override was not called")
if not self.ran_reset:
test_error("_reset override was not called")
# TODO: See TODO below.
#if not self.ran_step:
# test_error("_step override was not called")
#if not self.ran_reconfigure:
# test_error("_reconfigure override was not called")
if not self.ran_properties:
test_error("_properties override was not called")
if not self.ran_input_ports:
test_error("_input_ports override was not called")
if not self.ran_output_ports:
test_error("_output_ports override was not called")
if not self.ran_input_port_info:
test_error("_input_port_info override was not called")
if not self.ran_output_port_info:
test_error("_output_port_info override was not called")
if not self.ran_set_input_port_type:
test_error("_set_input_port_type override was not called")
if not self.ran_set_output_port_type:
test_error("_set_output_port_type override was not called")
if not self.ran_available_config:
test_error("_available_config override was not called")
if not self.ran_conf_info:
test_error("_conf_info override was not called")
return PythonExample
def base_example_process():
from kwiver.sprokit.pipeline import process
class PythonBaseExample(process.PythonProcess):
def __init__(self, conf):
process.PythonProcess.__init__(self, conf)
def __del__(self):
pass
return PythonBaseExample
def base_example_process_cluster():
from kwiver.sprokit.pipeline import process
from kwiver.sprokit.pipeline import process_cluster
class PythonBaseClusterExample(process_cluster.PythonProcessCluster):
def __init__(self, conf):
process_cluster.PythonProcessCluster.__init__(self, conf)
def __del__(self):
pass
return PythonBaseClusterExample
def test_register():
from kwiver.vital.config import config
from kwiver.sprokit.pipeline import process
from kwiver.sprokit.pipeline import process_factory
proc_type = 'python_example'
proc_desc = 'simple description'
process_factory.add_process(proc_type, proc_desc, example_process(True))
if not proc_desc == process_factory.description(proc_type):
test_error("Description was not preserved when registering")
try:
p = process_factory.create_process(proc_type, '')
if p is None:
raise Exception()
except:
test_error("Could not create newly registered process type")
def test_register_cluster():
from kwiver.vital.config import config
from kwiver.sprokit.pipeline import process
from kwiver.sprokit.pipeline import process_cluster
from kwiver.sprokit.pipeline import process_factory
proc_type = 'python_example'
proc_desc = 'simple description'
process_factory.add_process(proc_type, proc_desc, base_example_process_cluster())
if not proc_desc == process_factory.description(proc_type):
test_error("Description was not preserved when registering")
p = None
try:
p = process_factory.create_process(proc_type, '')
if p is None:
raise Exception()
except BaseException:
import sys
e = sys.exc_info()[1]
test_error("Could not create newly registered process cluster type: %s" % str(e))
if process_cluster.cluster_from_process(p) is None:
test_error("A cluster process from the registry was not detected as a cluster process")
def test_wrapper_api():
from kwiver.vital.config import config
from kwiver.sprokit.pipeline import edge
from kwiver.sprokit.pipeline import process
from kwiver.sprokit.pipeline import process_factory
proc_type = 'python_example'
proc_desc = 'simple description'
proc_base_type = 'python_base_example'
proc_base_desc = 'simple base description'
iport = 'no_such_iport'
oport = 'no_such_oport'
key = 'no_such_key'
ptype = 'no_type'
process_factory.add_process(proc_type, proc_desc, example_process(False))
process_factory.add_process(proc_base_type, proc_base_desc, base_example_process())
def check_process(p):
if p is None:
test_error("Got a 'None' process")
return
p.properties()
p.input_ports()
p.output_ports()
expect_exception("asking for info on a non-existant input port", RuntimeError,
p.input_port_info, iport)
expect_exception("asking for info on a non-existant output port", RuntimeError,
p.output_port_info, oport)
e = edge.Edge()
expect_exception("connecting to a non-existant input port", RuntimeError,
p.connect_input_port, iport, e)
expect_exception("connecting to a non-existant output port", RuntimeError,
p.connect_output_port, oport, e)
p.available_config()
p.available_tunable_config()
expect_exception("asking for info on a non-existant config key", RuntimeError,
p.config_info, key)
expect_exception("setting a type on a non-existent input port", RuntimeError,
p.set_input_port_type, iport, ptype)
expect_exception("setting a type on a non-existent output port", RuntimeError,
p.set_output_port_type, oport, ptype)
p.reset()
p.configure()
p.init()
# TODO: Can't check this because the core frequency of the process
# cannot be set. Needs to be stepped within a pipeline to verify this.
# Enable the ran_step check in p.check when this is fixed.
#p.step()
# TODO: Can't check this because only the process_cluster base class
# and the pipeline may reconfigure a process. Needs to be stepped
# within a pipeline to verify this. Enable the ran_step check in
# p.check when this is fixed.
#p.reconfigure(reconf)
del p
p = process_factory.create_process(proc_type, '')
check_process(p)
p = process_factory.create_process(proc_base_type, '')
check_process(p)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
test_error("Expected two arguments")
sys.exit(1)
testname = sys.argv[1]
run_test(testname, find_tests(locals()))
|
25d45aef80dec156ec87dc8f584f71b54f81090a
|
a9fdace9236af6c73133fd8dddb80843697efc7d
|
/examples/detection/models/__init__.py
|
525223853a12c6683d9ffc89b48df9ebcf694790
|
[
"Apache-2.0"
] |
permissive
|
catalyst-team/catalyst
|
026c38f26dad471cd77347adbc13423b156a5d8b
|
e99f90655d0efcf22559a46e928f0f98c9807ebf
|
refs/heads/master
| 2023-08-26T23:12:49.277005
| 2022-04-29T04:19:24
| 2022-04-29T04:19:24
| 145,385,156
| 3,038
| 487
|
Apache-2.0
| 2023-08-12T03:40:14
| 2018-08-20T07:56:13
|
Python
|
UTF-8
|
Python
| false
| false
| 197
|
py
|
__init__.py
|
# flake8: noqa
from .centernet import CenterNet
from .ssd import SingleShotDetector
from .yolo_x import (
yolo_x_tiny,
yolo_x_small,
yolo_x_medium,
yolo_x_large,
yolo_x_big,
)
|
7b5418c75dc3b86270b21817b9f08c7c8cb34144
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-hilens/huaweicloudsdkhilens/v1/__init__.py
|
8f73fa59569a3618d44140d847d8f9e4f1b86b1b
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 746
|
py
|
__init__.py
|
# coding: utf-8
from __future__ import absolute_import
from huaweicloudsdkhilens.v1.hilens_client import HiLensClient
from huaweicloudsdkhilens.v1.hilens_async_client import HiLensAsyncClient
from huaweicloudsdkhilens.v1.model.get_device_alarm_array_object import GetDeviceAlarmArrayObject
from huaweicloudsdkhilens.v1.model.get_devices_list_array_object import GetDevicesListArrayObject
from huaweicloudsdkhilens.v1.model.list_device_alarms_request import ListDeviceAlarmsRequest
from huaweicloudsdkhilens.v1.model.list_device_alarms_response import ListDeviceAlarmsResponse
from huaweicloudsdkhilens.v1.model.list_devices_request import ListDevicesRequest
from huaweicloudsdkhilens.v1.model.list_devices_response import ListDevicesResponse
|
5360fbaa8b813e70f2ac7acf574a7a5988bf22e4
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/cognito/outputs.py
|
cc5eeabb5340e0aae04d18e556923ffaba486258
|
[
"MPL-2.0",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 104,793
|
py
|
outputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'IdentityPoolCognitoIdentityProvider',
'IdentityPoolRoleAttachmentRoleMapping',
'IdentityPoolRoleAttachmentRoleMappingMappingRule',
'ResourceServerScope',
'RiskConfigurationAccountTakeoverRiskConfiguration',
'RiskConfigurationAccountTakeoverRiskConfigurationActions',
'RiskConfigurationAccountTakeoverRiskConfigurationActionsHighAction',
'RiskConfigurationAccountTakeoverRiskConfigurationActionsLowAction',
'RiskConfigurationAccountTakeoverRiskConfigurationActionsMediumAction',
'RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfiguration',
'RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationBlockEmail',
'RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationMfaEmail',
'RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationNoActionEmail',
'RiskConfigurationCompromisedCredentialsRiskConfiguration',
'RiskConfigurationCompromisedCredentialsRiskConfigurationActions',
'RiskConfigurationRiskExceptionConfiguration',
'UserPoolAccountRecoverySetting',
'UserPoolAccountRecoverySettingRecoveryMechanism',
'UserPoolAdminCreateUserConfig',
'UserPoolAdminCreateUserConfigInviteMessageTemplate',
'UserPoolClientAnalyticsConfiguration',
'UserPoolClientTokenValidityUnits',
'UserPoolDeviceConfiguration',
'UserPoolEmailConfiguration',
'UserPoolLambdaConfig',
'UserPoolLambdaConfigCustomEmailSender',
'UserPoolLambdaConfigCustomSmsSender',
'UserPoolPasswordPolicy',
'UserPoolSchema',
'UserPoolSchemaNumberAttributeConstraints',
'UserPoolSchemaStringAttributeConstraints',
'UserPoolSmsConfiguration',
'UserPoolSoftwareTokenMfaConfiguration',
'UserPoolUserAttributeUpdateSettings',
'UserPoolUserPoolAddOns',
'UserPoolUsernameConfiguration',
'UserPoolVerificationMessageTemplate',
'GetUserPoolClientAnalyticsConfigurationResult',
'GetUserPoolClientTokenValidityUnitResult',
]
@pulumi.output_type
class IdentityPoolCognitoIdentityProvider(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "providerName":
suggest = "provider_name"
elif key == "serverSideTokenCheck":
suggest = "server_side_token_check"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in IdentityPoolCognitoIdentityProvider. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
IdentityPoolCognitoIdentityProvider.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
IdentityPoolCognitoIdentityProvider.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: Optional[str] = None,
provider_name: Optional[str] = None,
server_side_token_check: Optional[bool] = None):
"""
:param str client_id: The client ID for the Amazon Cognito Identity User Pool.
:param str provider_name: The provider name for an Amazon Cognito Identity User Pool.
:param bool server_side_token_check: Whether server-side token validation is enabled for the identity provider’s token or not.
"""
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if provider_name is not None:
pulumi.set(__self__, "provider_name", provider_name)
if server_side_token_check is not None:
pulumi.set(__self__, "server_side_token_check", server_side_token_check)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[str]:
"""
The client ID for the Amazon Cognito Identity User Pool.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="providerName")
def provider_name(self) -> Optional[str]:
"""
The provider name for an Amazon Cognito Identity User Pool.
"""
return pulumi.get(self, "provider_name")
@property
@pulumi.getter(name="serverSideTokenCheck")
def server_side_token_check(self) -> Optional[bool]:
"""
Whether server-side token validation is enabled for the identity provider’s token or not.
"""
return pulumi.get(self, "server_side_token_check")
@pulumi.output_type
class IdentityPoolRoleAttachmentRoleMapping(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "identityProvider":
suggest = "identity_provider"
elif key == "ambiguousRoleResolution":
suggest = "ambiguous_role_resolution"
elif key == "mappingRules":
suggest = "mapping_rules"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in IdentityPoolRoleAttachmentRoleMapping. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
IdentityPoolRoleAttachmentRoleMapping.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
IdentityPoolRoleAttachmentRoleMapping.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
identity_provider: str,
type: str,
ambiguous_role_resolution: Optional[str] = None,
mapping_rules: Optional[Sequence['outputs.IdentityPoolRoleAttachmentRoleMappingMappingRule']] = None):
"""
:param str identity_provider: A string identifying the identity provider, for example, "graph.facebook.com" or "cognito-idp.us-east-1.amazonaws.com/us-east-1_abcdefghi:app_client_id". Depends on `cognito_identity_providers` set on `cognito.IdentityPool` resource or a `cognito.IdentityProvider` resource.
:param str type: The role mapping type.
:param str ambiguous_role_resolution: Specifies the action to be taken if either no rules match the claim value for the Rules type, or there is no cognito:preferred_role claim and there are multiple cognito:roles matches for the Token type. `Required` if you specify Token or Rules as the Type.
:param Sequence['IdentityPoolRoleAttachmentRoleMappingMappingRuleArgs'] mapping_rules: The Rules Configuration to be used for mapping users to roles. You can specify up to 25 rules per identity provider. Rules are evaluated in order. The first one to match specifies the role.
"""
pulumi.set(__self__, "identity_provider", identity_provider)
pulumi.set(__self__, "type", type)
if ambiguous_role_resolution is not None:
pulumi.set(__self__, "ambiguous_role_resolution", ambiguous_role_resolution)
if mapping_rules is not None:
pulumi.set(__self__, "mapping_rules", mapping_rules)
@property
@pulumi.getter(name="identityProvider")
def identity_provider(self) -> str:
"""
A string identifying the identity provider, for example, "graph.facebook.com" or "cognito-idp.us-east-1.amazonaws.com/us-east-1_abcdefghi:app_client_id". Depends on `cognito_identity_providers` set on `cognito.IdentityPool` resource or a `cognito.IdentityProvider` resource.
"""
return pulumi.get(self, "identity_provider")
@property
@pulumi.getter
def type(self) -> str:
"""
The role mapping type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="ambiguousRoleResolution")
def ambiguous_role_resolution(self) -> Optional[str]:
"""
Specifies the action to be taken if either no rules match the claim value for the Rules type, or there is no cognito:preferred_role claim and there are multiple cognito:roles matches for the Token type. `Required` if you specify Token or Rules as the Type.
"""
return pulumi.get(self, "ambiguous_role_resolution")
@property
@pulumi.getter(name="mappingRules")
def mapping_rules(self) -> Optional[Sequence['outputs.IdentityPoolRoleAttachmentRoleMappingMappingRule']]:
"""
The Rules Configuration to be used for mapping users to roles. You can specify up to 25 rules per identity provider. Rules are evaluated in order. The first one to match specifies the role.
"""
return pulumi.get(self, "mapping_rules")
@pulumi.output_type
class IdentityPoolRoleAttachmentRoleMappingMappingRule(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "matchType":
suggest = "match_type"
elif key == "roleArn":
suggest = "role_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in IdentityPoolRoleAttachmentRoleMappingMappingRule. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
IdentityPoolRoleAttachmentRoleMappingMappingRule.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
IdentityPoolRoleAttachmentRoleMappingMappingRule.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
claim: str,
match_type: str,
role_arn: str,
value: str):
"""
:param str claim: The claim name that must be present in the token, for example, "isAdmin" or "paid".
:param str match_type: The match condition that specifies how closely the claim value in the IdP token must match Value.
:param str role_arn: The role ARN.
:param str value: A brief string that the claim must match, for example, "paid" or "yes".
"""
pulumi.set(__self__, "claim", claim)
pulumi.set(__self__, "match_type", match_type)
pulumi.set(__self__, "role_arn", role_arn)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def claim(self) -> str:
"""
The claim name that must be present in the token, for example, "isAdmin" or "paid".
"""
return pulumi.get(self, "claim")
@property
@pulumi.getter(name="matchType")
def match_type(self) -> str:
"""
The match condition that specifies how closely the claim value in the IdP token must match Value.
"""
return pulumi.get(self, "match_type")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> str:
"""
The role ARN.
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter
def value(self) -> str:
"""
A brief string that the claim must match, for example, "paid" or "yes".
"""
return pulumi.get(self, "value")
@pulumi.output_type
class ResourceServerScope(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "scopeDescription":
suggest = "scope_description"
elif key == "scopeName":
suggest = "scope_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResourceServerScope. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResourceServerScope.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResourceServerScope.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
scope_description: str,
scope_name: str):
"""
:param str scope_description: The scope description.
:param str scope_name: The scope name.
"""
pulumi.set(__self__, "scope_description", scope_description)
pulumi.set(__self__, "scope_name", scope_name)
@property
@pulumi.getter(name="scopeDescription")
def scope_description(self) -> str:
"""
The scope description.
"""
return pulumi.get(self, "scope_description")
@property
@pulumi.getter(name="scopeName")
def scope_name(self) -> str:
"""
The scope name.
"""
return pulumi.get(self, "scope_name")
@pulumi.output_type
class RiskConfigurationAccountTakeoverRiskConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "notifyConfiguration":
suggest = "notify_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RiskConfigurationAccountTakeoverRiskConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RiskConfigurationAccountTakeoverRiskConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RiskConfigurationAccountTakeoverRiskConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
actions: 'outputs.RiskConfigurationAccountTakeoverRiskConfigurationActions',
notify_configuration: 'outputs.RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfiguration'):
"""
:param 'RiskConfigurationAccountTakeoverRiskConfigurationActionsArgs' actions: Account takeover risk configuration actions. See details below.
:param 'RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationArgs' notify_configuration: The notify configuration used to construct email notifications. See details below.
"""
pulumi.set(__self__, "actions", actions)
pulumi.set(__self__, "notify_configuration", notify_configuration)
@property
@pulumi.getter
def actions(self) -> 'outputs.RiskConfigurationAccountTakeoverRiskConfigurationActions':
"""
Account takeover risk configuration actions. See details below.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter(name="notifyConfiguration")
def notify_configuration(self) -> 'outputs.RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfiguration':
"""
The notify configuration used to construct email notifications. See details below.
"""
return pulumi.get(self, "notify_configuration")
@pulumi.output_type
class RiskConfigurationAccountTakeoverRiskConfigurationActions(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "highAction":
suggest = "high_action"
elif key == "lowAction":
suggest = "low_action"
elif key == "mediumAction":
suggest = "medium_action"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RiskConfigurationAccountTakeoverRiskConfigurationActions. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RiskConfigurationAccountTakeoverRiskConfigurationActions.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RiskConfigurationAccountTakeoverRiskConfigurationActions.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
high_action: Optional['outputs.RiskConfigurationAccountTakeoverRiskConfigurationActionsHighAction'] = None,
low_action: Optional['outputs.RiskConfigurationAccountTakeoverRiskConfigurationActionsLowAction'] = None,
medium_action: Optional['outputs.RiskConfigurationAccountTakeoverRiskConfigurationActionsMediumAction'] = None):
"""
:param 'RiskConfigurationAccountTakeoverRiskConfigurationActionsHighActionArgs' high_action: Action to take for a high risk. See action block below.
:param 'RiskConfigurationAccountTakeoverRiskConfigurationActionsLowActionArgs' low_action: Action to take for a low risk. See action block below.
:param 'RiskConfigurationAccountTakeoverRiskConfigurationActionsMediumActionArgs' medium_action: Action to take for a medium risk. See action block below.
"""
if high_action is not None:
pulumi.set(__self__, "high_action", high_action)
if low_action is not None:
pulumi.set(__self__, "low_action", low_action)
if medium_action is not None:
pulumi.set(__self__, "medium_action", medium_action)
@property
@pulumi.getter(name="highAction")
def high_action(self) -> Optional['outputs.RiskConfigurationAccountTakeoverRiskConfigurationActionsHighAction']:
"""
Action to take for a high risk. See action block below.
"""
return pulumi.get(self, "high_action")
@property
@pulumi.getter(name="lowAction")
def low_action(self) -> Optional['outputs.RiskConfigurationAccountTakeoverRiskConfigurationActionsLowAction']:
"""
Action to take for a low risk. See action block below.
"""
return pulumi.get(self, "low_action")
@property
@pulumi.getter(name="mediumAction")
def medium_action(self) -> Optional['outputs.RiskConfigurationAccountTakeoverRiskConfigurationActionsMediumAction']:
"""
Action to take for a medium risk. See action block below.
"""
return pulumi.get(self, "medium_action")
@pulumi.output_type
class RiskConfigurationAccountTakeoverRiskConfigurationActionsHighAction(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "eventAction":
suggest = "event_action"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RiskConfigurationAccountTakeoverRiskConfigurationActionsHighAction. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RiskConfigurationAccountTakeoverRiskConfigurationActionsHighAction.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RiskConfigurationAccountTakeoverRiskConfigurationActionsHighAction.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
event_action: str,
notify: bool):
"""
:param str event_action: The action to take in response to the account takeover action. Valid values are `BLOCK`, `MFA_IF_CONFIGURED`, `MFA_REQUIRED` and `NO_ACTION`.
"""
pulumi.set(__self__, "event_action", event_action)
pulumi.set(__self__, "notify", notify)
@property
@pulumi.getter(name="eventAction")
def event_action(self) -> str:
"""
The action to take in response to the account takeover action. Valid values are `BLOCK`, `MFA_IF_CONFIGURED`, `MFA_REQUIRED` and `NO_ACTION`.
"""
return pulumi.get(self, "event_action")
@property
@pulumi.getter
def notify(self) -> bool:
return pulumi.get(self, "notify")
@pulumi.output_type
class RiskConfigurationAccountTakeoverRiskConfigurationActionsLowAction(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "eventAction":
suggest = "event_action"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RiskConfigurationAccountTakeoverRiskConfigurationActionsLowAction. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RiskConfigurationAccountTakeoverRiskConfigurationActionsLowAction.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RiskConfigurationAccountTakeoverRiskConfigurationActionsLowAction.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
event_action: str,
notify: bool):
"""
:param str event_action: The action to take in response to the account takeover action. Valid values are `BLOCK`, `MFA_IF_CONFIGURED`, `MFA_REQUIRED` and `NO_ACTION`.
"""
pulumi.set(__self__, "event_action", event_action)
pulumi.set(__self__, "notify", notify)
@property
@pulumi.getter(name="eventAction")
def event_action(self) -> str:
"""
The action to take in response to the account takeover action. Valid values are `BLOCK`, `MFA_IF_CONFIGURED`, `MFA_REQUIRED` and `NO_ACTION`.
"""
return pulumi.get(self, "event_action")
@property
@pulumi.getter
def notify(self) -> bool:
return pulumi.get(self, "notify")
@pulumi.output_type
class RiskConfigurationAccountTakeoverRiskConfigurationActionsMediumAction(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "eventAction":
suggest = "event_action"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RiskConfigurationAccountTakeoverRiskConfigurationActionsMediumAction. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RiskConfigurationAccountTakeoverRiskConfigurationActionsMediumAction.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RiskConfigurationAccountTakeoverRiskConfigurationActionsMediumAction.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
event_action: str,
notify: bool):
"""
:param str event_action: The action to take in response to the account takeover action. Valid values are `BLOCK`, `MFA_IF_CONFIGURED`, `MFA_REQUIRED` and `NO_ACTION`.
"""
pulumi.set(__self__, "event_action", event_action)
pulumi.set(__self__, "notify", notify)
@property
@pulumi.getter(name="eventAction")
def event_action(self) -> str:
"""
The action to take in response to the account takeover action. Valid values are `BLOCK`, `MFA_IF_CONFIGURED`, `MFA_REQUIRED` and `NO_ACTION`.
"""
return pulumi.get(self, "event_action")
@property
@pulumi.getter
def notify(self) -> bool:
return pulumi.get(self, "notify")
@pulumi.output_type
class RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sourceArn":
suggest = "source_arn"
elif key == "blockEmail":
suggest = "block_email"
elif key == "from":
suggest = "from_"
elif key == "mfaEmail":
suggest = "mfa_email"
elif key == "noActionEmail":
suggest = "no_action_email"
elif key == "replyTo":
suggest = "reply_to"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
source_arn: str,
block_email: Optional['outputs.RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationBlockEmail'] = None,
from_: Optional[str] = None,
mfa_email: Optional['outputs.RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationMfaEmail'] = None,
no_action_email: Optional['outputs.RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationNoActionEmail'] = None,
reply_to: Optional[str] = None):
"""
:param str source_arn: The Amazon Resource Name (ARN) of the identity that is associated with the sending authorization policy. This identity permits Amazon Cognito to send for the email address specified in the From parameter.
:param 'RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationBlockEmailArgs' block_email: Email template used when a detected risk event is blocked. See notify email type below.
:param str from_: The email address that is sending the email. The address must be either individually verified with Amazon Simple Email Service, or from a domain that has been verified with Amazon SES.
:param 'RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationMfaEmailArgs' mfa_email: The multi-factor authentication (MFA) email template used when MFA is challenged as part of a detected risk. See notify email type below.
:param 'RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationNoActionEmailArgs' no_action_email: The email template used when a detected risk event is allowed. See notify email type below.
:param str reply_to: The destination to which the receiver of an email should reply to.
"""
pulumi.set(__self__, "source_arn", source_arn)
if block_email is not None:
pulumi.set(__self__, "block_email", block_email)
if from_ is not None:
pulumi.set(__self__, "from_", from_)
if mfa_email is not None:
pulumi.set(__self__, "mfa_email", mfa_email)
if no_action_email is not None:
pulumi.set(__self__, "no_action_email", no_action_email)
if reply_to is not None:
pulumi.set(__self__, "reply_to", reply_to)
@property
@pulumi.getter(name="sourceArn")
def source_arn(self) -> str:
"""
The Amazon Resource Name (ARN) of the identity that is associated with the sending authorization policy. This identity permits Amazon Cognito to send for the email address specified in the From parameter.
"""
return pulumi.get(self, "source_arn")
@property
@pulumi.getter(name="blockEmail")
def block_email(self) -> Optional['outputs.RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationBlockEmail']:
"""
Email template used when a detected risk event is blocked. See notify email type below.
"""
return pulumi.get(self, "block_email")
@property
@pulumi.getter(name="from")
def from_(self) -> Optional[str]:
"""
The email address that is sending the email. The address must be either individually verified with Amazon Simple Email Service, or from a domain that has been verified with Amazon SES.
"""
return pulumi.get(self, "from_")
@property
@pulumi.getter(name="mfaEmail")
def mfa_email(self) -> Optional['outputs.RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationMfaEmail']:
"""
The multi-factor authentication (MFA) email template used when MFA is challenged as part of a detected risk. See notify email type below.
"""
return pulumi.get(self, "mfa_email")
@property
@pulumi.getter(name="noActionEmail")
def no_action_email(self) -> Optional['outputs.RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationNoActionEmail']:
"""
The email template used when a detected risk event is allowed. See notify email type below.
"""
return pulumi.get(self, "no_action_email")
@property
@pulumi.getter(name="replyTo")
def reply_to(self) -> Optional[str]:
"""
The destination to which the receiver of an email should reply to.
"""
return pulumi.get(self, "reply_to")
@pulumi.output_type
class RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationBlockEmail(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "htmlBody":
suggest = "html_body"
elif key == "textBody":
suggest = "text_body"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationBlockEmail. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationBlockEmail.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationBlockEmail.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
html_body: str,
subject: str,
text_body: str):
"""
:param str html_body: The email HTML body.
:param str subject: The email subject.
:param str text_body: The email text body.
"""
pulumi.set(__self__, "html_body", html_body)
pulumi.set(__self__, "subject", subject)
pulumi.set(__self__, "text_body", text_body)
@property
@pulumi.getter(name="htmlBody")
def html_body(self) -> str:
"""
The email HTML body.
"""
return pulumi.get(self, "html_body")
@property
@pulumi.getter
def subject(self) -> str:
"""
The email subject.
"""
return pulumi.get(self, "subject")
@property
@pulumi.getter(name="textBody")
def text_body(self) -> str:
"""
The email text body.
"""
return pulumi.get(self, "text_body")
@pulumi.output_type
class RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationMfaEmail(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "htmlBody":
suggest = "html_body"
elif key == "textBody":
suggest = "text_body"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationMfaEmail. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationMfaEmail.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationMfaEmail.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
html_body: str,
subject: str,
text_body: str):
"""
:param str html_body: The email HTML body.
:param str subject: The email subject.
:param str text_body: The email text body.
"""
pulumi.set(__self__, "html_body", html_body)
pulumi.set(__self__, "subject", subject)
pulumi.set(__self__, "text_body", text_body)
@property
@pulumi.getter(name="htmlBody")
def html_body(self) -> str:
"""
The email HTML body.
"""
return pulumi.get(self, "html_body")
@property
@pulumi.getter
def subject(self) -> str:
"""
The email subject.
"""
return pulumi.get(self, "subject")
@property
@pulumi.getter(name="textBody")
def text_body(self) -> str:
"""
The email text body.
"""
return pulumi.get(self, "text_body")
@pulumi.output_type
class RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationNoActionEmail(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "htmlBody":
suggest = "html_body"
elif key == "textBody":
suggest = "text_body"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationNoActionEmail. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationNoActionEmail.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RiskConfigurationAccountTakeoverRiskConfigurationNotifyConfigurationNoActionEmail.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
html_body: str,
subject: str,
text_body: str):
"""
:param str html_body: The email HTML body.
:param str subject: The email subject.
:param str text_body: The email text body.
"""
pulumi.set(__self__, "html_body", html_body)
pulumi.set(__self__, "subject", subject)
pulumi.set(__self__, "text_body", text_body)
@property
@pulumi.getter(name="htmlBody")
def html_body(self) -> str:
"""
The email HTML body.
"""
return pulumi.get(self, "html_body")
@property
@pulumi.getter
def subject(self) -> str:
"""
The email subject.
"""
return pulumi.get(self, "subject")
@property
@pulumi.getter(name="textBody")
def text_body(self) -> str:
"""
The email text body.
"""
return pulumi.get(self, "text_body")
@pulumi.output_type
class RiskConfigurationCompromisedCredentialsRiskConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "eventFilters":
suggest = "event_filters"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RiskConfigurationCompromisedCredentialsRiskConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RiskConfigurationCompromisedCredentialsRiskConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RiskConfigurationCompromisedCredentialsRiskConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
actions: 'outputs.RiskConfigurationCompromisedCredentialsRiskConfigurationActions',
event_filters: Optional[Sequence[str]] = None):
"""
:param 'RiskConfigurationCompromisedCredentialsRiskConfigurationActionsArgs' actions: The compromised credentials risk configuration actions. See details below.
:param Sequence[str] event_filters: Perform the action for these events. The default is to perform all events if no event filter is specified. Valid values are `SIGN_IN`, `PASSWORD_CHANGE`, and `SIGN_UP`.
"""
pulumi.set(__self__, "actions", actions)
if event_filters is not None:
pulumi.set(__self__, "event_filters", event_filters)
@property
@pulumi.getter
def actions(self) -> 'outputs.RiskConfigurationCompromisedCredentialsRiskConfigurationActions':
"""
The compromised credentials risk configuration actions. See details below.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter(name="eventFilters")
def event_filters(self) -> Optional[Sequence[str]]:
"""
Perform the action for these events. The default is to perform all events if no event filter is specified. Valid values are `SIGN_IN`, `PASSWORD_CHANGE`, and `SIGN_UP`.
"""
return pulumi.get(self, "event_filters")
@pulumi.output_type
class RiskConfigurationCompromisedCredentialsRiskConfigurationActions(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "eventAction":
suggest = "event_action"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RiskConfigurationCompromisedCredentialsRiskConfigurationActions. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RiskConfigurationCompromisedCredentialsRiskConfigurationActions.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RiskConfigurationCompromisedCredentialsRiskConfigurationActions.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
event_action: str):
"""
:param str event_action: The event action. Valid values are `BLOCK` or `NO_ACTION`.
"""
pulumi.set(__self__, "event_action", event_action)
@property
@pulumi.getter(name="eventAction")
def event_action(self) -> str:
"""
The event action. Valid values are `BLOCK` or `NO_ACTION`.
"""
return pulumi.get(self, "event_action")
@pulumi.output_type
class RiskConfigurationRiskExceptionConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "blockedIpRangeLists":
suggest = "blocked_ip_range_lists"
elif key == "skippedIpRangeLists":
suggest = "skipped_ip_range_lists"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RiskConfigurationRiskExceptionConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RiskConfigurationRiskExceptionConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RiskConfigurationRiskExceptionConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
blocked_ip_range_lists: Optional[Sequence[str]] = None,
skipped_ip_range_lists: Optional[Sequence[str]] = None):
"""
:param Sequence[str] blocked_ip_range_lists: Overrides the risk decision to always block the pre-authentication requests.
The IP range is in CIDR notation, a compact representation of an IP address and its routing prefix.
Can contain a maximum of 200 items.
:param Sequence[str] skipped_ip_range_lists: Risk detection isn't performed on the IP addresses in this range list.
The IP range is in CIDR notation.
Can contain a maximum of 200 items.
"""
if blocked_ip_range_lists is not None:
pulumi.set(__self__, "blocked_ip_range_lists", blocked_ip_range_lists)
if skipped_ip_range_lists is not None:
pulumi.set(__self__, "skipped_ip_range_lists", skipped_ip_range_lists)
@property
@pulumi.getter(name="blockedIpRangeLists")
def blocked_ip_range_lists(self) -> Optional[Sequence[str]]:
"""
Overrides the risk decision to always block the pre-authentication requests.
The IP range is in CIDR notation, a compact representation of an IP address and its routing prefix.
Can contain a maximum of 200 items.
"""
return pulumi.get(self, "blocked_ip_range_lists")
@property
@pulumi.getter(name="skippedIpRangeLists")
def skipped_ip_range_lists(self) -> Optional[Sequence[str]]:
"""
Risk detection isn't performed on the IP addresses in this range list.
The IP range is in CIDR notation.
Can contain a maximum of 200 items.
"""
return pulumi.get(self, "skipped_ip_range_lists")
@pulumi.output_type
class UserPoolAccountRecoverySetting(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "recoveryMechanisms":
suggest = "recovery_mechanisms"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolAccountRecoverySetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolAccountRecoverySetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolAccountRecoverySetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
recovery_mechanisms: Optional[Sequence['outputs.UserPoolAccountRecoverySettingRecoveryMechanism']] = None):
"""
:param Sequence['UserPoolAccountRecoverySettingRecoveryMechanismArgs'] recovery_mechanisms: List of Account Recovery Options of the following structure:
"""
if recovery_mechanisms is not None:
pulumi.set(__self__, "recovery_mechanisms", recovery_mechanisms)
@property
@pulumi.getter(name="recoveryMechanisms")
def recovery_mechanisms(self) -> Optional[Sequence['outputs.UserPoolAccountRecoverySettingRecoveryMechanism']]:
"""
List of Account Recovery Options of the following structure:
"""
return pulumi.get(self, "recovery_mechanisms")
@pulumi.output_type
class UserPoolAccountRecoverySettingRecoveryMechanism(dict):
def __init__(__self__, *,
name: str,
priority: int):
"""
:param str name: Name of the user pool.
The following arguments are optional:
:param int priority: Positive integer specifying priority of a method with 1 being the highest priority.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "priority", priority)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the user pool.
The following arguments are optional:
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> int:
"""
Positive integer specifying priority of a method with 1 being the highest priority.
"""
return pulumi.get(self, "priority")
@pulumi.output_type
class UserPoolAdminCreateUserConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowAdminCreateUserOnly":
suggest = "allow_admin_create_user_only"
elif key == "inviteMessageTemplate":
suggest = "invite_message_template"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolAdminCreateUserConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolAdminCreateUserConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolAdminCreateUserConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allow_admin_create_user_only: Optional[bool] = None,
invite_message_template: Optional['outputs.UserPoolAdminCreateUserConfigInviteMessageTemplate'] = None):
"""
:param bool allow_admin_create_user_only: Set to True if only the administrator is allowed to create user profiles. Set to False if users can sign themselves up via an app.
:param 'UserPoolAdminCreateUserConfigInviteMessageTemplateArgs' invite_message_template: Invite message template structure. Detailed below.
"""
if allow_admin_create_user_only is not None:
pulumi.set(__self__, "allow_admin_create_user_only", allow_admin_create_user_only)
if invite_message_template is not None:
pulumi.set(__self__, "invite_message_template", invite_message_template)
@property
@pulumi.getter(name="allowAdminCreateUserOnly")
def allow_admin_create_user_only(self) -> Optional[bool]:
"""
Set to True if only the administrator is allowed to create user profiles. Set to False if users can sign themselves up via an app.
"""
return pulumi.get(self, "allow_admin_create_user_only")
@property
@pulumi.getter(name="inviteMessageTemplate")
def invite_message_template(self) -> Optional['outputs.UserPoolAdminCreateUserConfigInviteMessageTemplate']:
"""
Invite message template structure. Detailed below.
"""
return pulumi.get(self, "invite_message_template")
@pulumi.output_type
class UserPoolAdminCreateUserConfigInviteMessageTemplate(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "emailMessage":
suggest = "email_message"
elif key == "emailSubject":
suggest = "email_subject"
elif key == "smsMessage":
suggest = "sms_message"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolAdminCreateUserConfigInviteMessageTemplate. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolAdminCreateUserConfigInviteMessageTemplate.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolAdminCreateUserConfigInviteMessageTemplate.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
email_message: Optional[str] = None,
email_subject: Optional[str] = None,
sms_message: Optional[str] = None):
"""
:param str email_message: Message template for email messages. Must contain `{username}` and `{####}` placeholders, for username and temporary password, respectively.
:param str email_subject: Subject line for email messages.
:param str sms_message: Message template for SMS messages. Must contain `{username}` and `{####}` placeholders, for username and temporary password, respectively.
"""
if email_message is not None:
pulumi.set(__self__, "email_message", email_message)
if email_subject is not None:
pulumi.set(__self__, "email_subject", email_subject)
if sms_message is not None:
pulumi.set(__self__, "sms_message", sms_message)
@property
@pulumi.getter(name="emailMessage")
def email_message(self) -> Optional[str]:
"""
Message template for email messages. Must contain `{username}` and `{####}` placeholders, for username and temporary password, respectively.
"""
return pulumi.get(self, "email_message")
@property
@pulumi.getter(name="emailSubject")
def email_subject(self) -> Optional[str]:
"""
Subject line for email messages.
"""
return pulumi.get(self, "email_subject")
@property
@pulumi.getter(name="smsMessage")
def sms_message(self) -> Optional[str]:
"""
Message template for SMS messages. Must contain `{username}` and `{####}` placeholders, for username and temporary password, respectively.
"""
return pulumi.get(self, "sms_message")
@pulumi.output_type
class UserPoolClientAnalyticsConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "applicationArn":
suggest = "application_arn"
elif key == "applicationId":
suggest = "application_id"
elif key == "externalId":
suggest = "external_id"
elif key == "roleArn":
suggest = "role_arn"
elif key == "userDataShared":
suggest = "user_data_shared"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolClientAnalyticsConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolClientAnalyticsConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolClientAnalyticsConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
application_arn: Optional[str] = None,
application_id: Optional[str] = None,
external_id: Optional[str] = None,
role_arn: Optional[str] = None,
user_data_shared: Optional[bool] = None):
"""
:param str application_arn: Application ARN for an Amazon Pinpoint application. Conflicts with `external_id` and `role_arn`.
:param str application_id: Application ID for an Amazon Pinpoint application.
:param str external_id: ID for the Analytics Configuration. Conflicts with `application_arn`.
:param str role_arn: ARN of an IAM role that authorizes Amazon Cognito to publish events to Amazon Pinpoint analytics. Conflicts with `application_arn`.
:param bool user_data_shared: If set to `true`, Amazon Cognito will include user data in the events it publishes to Amazon Pinpoint analytics.
"""
if application_arn is not None:
pulumi.set(__self__, "application_arn", application_arn)
if application_id is not None:
pulumi.set(__self__, "application_id", application_id)
if external_id is not None:
pulumi.set(__self__, "external_id", external_id)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if user_data_shared is not None:
pulumi.set(__self__, "user_data_shared", user_data_shared)
@property
@pulumi.getter(name="applicationArn")
def application_arn(self) -> Optional[str]:
"""
Application ARN for an Amazon Pinpoint application. Conflicts with `external_id` and `role_arn`.
"""
return pulumi.get(self, "application_arn")
@property
@pulumi.getter(name="applicationId")
def application_id(self) -> Optional[str]:
"""
Application ID for an Amazon Pinpoint application.
"""
return pulumi.get(self, "application_id")
@property
@pulumi.getter(name="externalId")
def external_id(self) -> Optional[str]:
"""
ID for the Analytics Configuration. Conflicts with `application_arn`.
"""
return pulumi.get(self, "external_id")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[str]:
"""
ARN of an IAM role that authorizes Amazon Cognito to publish events to Amazon Pinpoint analytics. Conflicts with `application_arn`.
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter(name="userDataShared")
def user_data_shared(self) -> Optional[bool]:
"""
If set to `true`, Amazon Cognito will include user data in the events it publishes to Amazon Pinpoint analytics.
"""
return pulumi.get(self, "user_data_shared")
@pulumi.output_type
class UserPoolClientTokenValidityUnits(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accessToken":
suggest = "access_token"
elif key == "idToken":
suggest = "id_token"
elif key == "refreshToken":
suggest = "refresh_token"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolClientTokenValidityUnits. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolClientTokenValidityUnits.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolClientTokenValidityUnits.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
access_token: Optional[str] = None,
id_token: Optional[str] = None,
refresh_token: Optional[str] = None):
"""
:param str access_token: Time unit in for the value in `access_token_validity`, defaults to `hours`.
:param str id_token: Time unit in for the value in `id_token_validity`, defaults to `hours`.
:param str refresh_token: Time unit in for the value in `refresh_token_validity`, defaults to `days`.
"""
if access_token is not None:
pulumi.set(__self__, "access_token", access_token)
if id_token is not None:
pulumi.set(__self__, "id_token", id_token)
if refresh_token is not None:
pulumi.set(__self__, "refresh_token", refresh_token)
@property
@pulumi.getter(name="accessToken")
def access_token(self) -> Optional[str]:
"""
Time unit in for the value in `access_token_validity`, defaults to `hours`.
"""
return pulumi.get(self, "access_token")
@property
@pulumi.getter(name="idToken")
def id_token(self) -> Optional[str]:
"""
Time unit in for the value in `id_token_validity`, defaults to `hours`.
"""
return pulumi.get(self, "id_token")
@property
@pulumi.getter(name="refreshToken")
def refresh_token(self) -> Optional[str]:
"""
Time unit in for the value in `refresh_token_validity`, defaults to `days`.
"""
return pulumi.get(self, "refresh_token")
@pulumi.output_type
class UserPoolDeviceConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "challengeRequiredOnNewDevice":
suggest = "challenge_required_on_new_device"
elif key == "deviceOnlyRememberedOnUserPrompt":
suggest = "device_only_remembered_on_user_prompt"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolDeviceConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolDeviceConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolDeviceConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
challenge_required_on_new_device: Optional[bool] = None,
device_only_remembered_on_user_prompt: Optional[bool] = None):
"""
:param bool challenge_required_on_new_device: Whether a challenge is required on a new device. Only applicable to a new device.
:param bool device_only_remembered_on_user_prompt: Whether a device is only remembered on user prompt. `false` equates to "Always" remember, `true` is "User Opt In," and not using a `device_configuration` block is "No."
"""
if challenge_required_on_new_device is not None:
pulumi.set(__self__, "challenge_required_on_new_device", challenge_required_on_new_device)
if device_only_remembered_on_user_prompt is not None:
pulumi.set(__self__, "device_only_remembered_on_user_prompt", device_only_remembered_on_user_prompt)
@property
@pulumi.getter(name="challengeRequiredOnNewDevice")
def challenge_required_on_new_device(self) -> Optional[bool]:
"""
Whether a challenge is required on a new device. Only applicable to a new device.
"""
return pulumi.get(self, "challenge_required_on_new_device")
@property
@pulumi.getter(name="deviceOnlyRememberedOnUserPrompt")
def device_only_remembered_on_user_prompt(self) -> Optional[bool]:
"""
Whether a device is only remembered on user prompt. `false` equates to "Always" remember, `true` is "User Opt In," and not using a `device_configuration` block is "No."
"""
return pulumi.get(self, "device_only_remembered_on_user_prompt")
@pulumi.output_type
class UserPoolEmailConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "configurationSet":
suggest = "configuration_set"
elif key == "emailSendingAccount":
suggest = "email_sending_account"
elif key == "fromEmailAddress":
suggest = "from_email_address"
elif key == "replyToEmailAddress":
suggest = "reply_to_email_address"
elif key == "sourceArn":
suggest = "source_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolEmailConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolEmailConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolEmailConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
configuration_set: Optional[str] = None,
email_sending_account: Optional[str] = None,
from_email_address: Optional[str] = None,
reply_to_email_address: Optional[str] = None,
source_arn: Optional[str] = None):
"""
:param str configuration_set: Email configuration set name from SES.
:param str email_sending_account: Email delivery method to use. `COGNITO_DEFAULT` for the default email functionality built into Cognito or `DEVELOPER` to use your Amazon SES configuration.
:param str from_email_address: Sender’s email address or sender’s display name with their email address (e.g., `john@example.com`, `John Smith <john@example.com>` or `\\"John Smith Ph.D.\\" <john@example.com>`). Escaped double quotes are required around display names that contain certain characters as specified in [RFC 5322](https://tools.ietf.org/html/rfc5322).
:param str reply_to_email_address: REPLY-TO email address.
:param str source_arn: ARN of the SES verified email identity to use. Required if `email_sending_account` is set to `DEVELOPER`.
"""
if configuration_set is not None:
pulumi.set(__self__, "configuration_set", configuration_set)
if email_sending_account is not None:
pulumi.set(__self__, "email_sending_account", email_sending_account)
if from_email_address is not None:
pulumi.set(__self__, "from_email_address", from_email_address)
if reply_to_email_address is not None:
pulumi.set(__self__, "reply_to_email_address", reply_to_email_address)
if source_arn is not None:
pulumi.set(__self__, "source_arn", source_arn)
@property
@pulumi.getter(name="configurationSet")
def configuration_set(self) -> Optional[str]:
"""
Email configuration set name from SES.
"""
return pulumi.get(self, "configuration_set")
@property
@pulumi.getter(name="emailSendingAccount")
def email_sending_account(self) -> Optional[str]:
"""
Email delivery method to use. `COGNITO_DEFAULT` for the default email functionality built into Cognito or `DEVELOPER` to use your Amazon SES configuration.
"""
return pulumi.get(self, "email_sending_account")
@property
@pulumi.getter(name="fromEmailAddress")
def from_email_address(self) -> Optional[str]:
"""
Sender’s email address or sender’s display name with their email address (e.g., `john@example.com`, `John Smith <john@example.com>` or `\\"John Smith Ph.D.\\" <john@example.com>`). Escaped double quotes are required around display names that contain certain characters as specified in [RFC 5322](https://tools.ietf.org/html/rfc5322).
"""
return pulumi.get(self, "from_email_address")
@property
@pulumi.getter(name="replyToEmailAddress")
def reply_to_email_address(self) -> Optional[str]:
"""
REPLY-TO email address.
"""
return pulumi.get(self, "reply_to_email_address")
@property
@pulumi.getter(name="sourceArn")
def source_arn(self) -> Optional[str]:
"""
ARN of the SES verified email identity to use. Required if `email_sending_account` is set to `DEVELOPER`.
"""
return pulumi.get(self, "source_arn")
@pulumi.output_type
class UserPoolLambdaConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createAuthChallenge":
suggest = "create_auth_challenge"
elif key == "customEmailSender":
suggest = "custom_email_sender"
elif key == "customMessage":
suggest = "custom_message"
elif key == "customSmsSender":
suggest = "custom_sms_sender"
elif key == "defineAuthChallenge":
suggest = "define_auth_challenge"
elif key == "kmsKeyId":
suggest = "kms_key_id"
elif key == "postAuthentication":
suggest = "post_authentication"
elif key == "postConfirmation":
suggest = "post_confirmation"
elif key == "preAuthentication":
suggest = "pre_authentication"
elif key == "preSignUp":
suggest = "pre_sign_up"
elif key == "preTokenGeneration":
suggest = "pre_token_generation"
elif key == "userMigration":
suggest = "user_migration"
elif key == "verifyAuthChallengeResponse":
suggest = "verify_auth_challenge_response"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolLambdaConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolLambdaConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolLambdaConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
create_auth_challenge: Optional[str] = None,
custom_email_sender: Optional['outputs.UserPoolLambdaConfigCustomEmailSender'] = None,
custom_message: Optional[str] = None,
custom_sms_sender: Optional['outputs.UserPoolLambdaConfigCustomSmsSender'] = None,
define_auth_challenge: Optional[str] = None,
kms_key_id: Optional[str] = None,
post_authentication: Optional[str] = None,
post_confirmation: Optional[str] = None,
pre_authentication: Optional[str] = None,
pre_sign_up: Optional[str] = None,
pre_token_generation: Optional[str] = None,
user_migration: Optional[str] = None,
verify_auth_challenge_response: Optional[str] = None):
"""
:param str create_auth_challenge: ARN of the lambda creating an authentication challenge.
:param 'UserPoolLambdaConfigCustomEmailSenderArgs' custom_email_sender: A custom email sender AWS Lambda trigger. See custom_email_sender Below.
:param str custom_message: Custom Message AWS Lambda trigger.
:param 'UserPoolLambdaConfigCustomSmsSenderArgs' custom_sms_sender: A custom SMS sender AWS Lambda trigger. See custom_sms_sender Below.
:param str define_auth_challenge: Defines the authentication challenge.
:param str kms_key_id: The Amazon Resource Name of Key Management Service Customer master keys. Amazon Cognito uses the key to encrypt codes and temporary passwords sent to CustomEmailSender and CustomSMSSender.
:param str post_authentication: Post-authentication AWS Lambda trigger.
:param str post_confirmation: Post-confirmation AWS Lambda trigger.
:param str pre_authentication: Pre-authentication AWS Lambda trigger.
:param str pre_sign_up: Pre-registration AWS Lambda trigger.
:param str pre_token_generation: Allow to customize identity token claims before token generation.
:param str user_migration: User migration Lambda config type.
:param str verify_auth_challenge_response: Verifies the authentication challenge response.
"""
if create_auth_challenge is not None:
pulumi.set(__self__, "create_auth_challenge", create_auth_challenge)
if custom_email_sender is not None:
pulumi.set(__self__, "custom_email_sender", custom_email_sender)
if custom_message is not None:
pulumi.set(__self__, "custom_message", custom_message)
if custom_sms_sender is not None:
pulumi.set(__self__, "custom_sms_sender", custom_sms_sender)
if define_auth_challenge is not None:
pulumi.set(__self__, "define_auth_challenge", define_auth_challenge)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if post_authentication is not None:
pulumi.set(__self__, "post_authentication", post_authentication)
if post_confirmation is not None:
pulumi.set(__self__, "post_confirmation", post_confirmation)
if pre_authentication is not None:
pulumi.set(__self__, "pre_authentication", pre_authentication)
if pre_sign_up is not None:
pulumi.set(__self__, "pre_sign_up", pre_sign_up)
if pre_token_generation is not None:
pulumi.set(__self__, "pre_token_generation", pre_token_generation)
if user_migration is not None:
pulumi.set(__self__, "user_migration", user_migration)
if verify_auth_challenge_response is not None:
pulumi.set(__self__, "verify_auth_challenge_response", verify_auth_challenge_response)
@property
@pulumi.getter(name="createAuthChallenge")
def create_auth_challenge(self) -> Optional[str]:
"""
ARN of the lambda creating an authentication challenge.
"""
return pulumi.get(self, "create_auth_challenge")
@property
@pulumi.getter(name="customEmailSender")
def custom_email_sender(self) -> Optional['outputs.UserPoolLambdaConfigCustomEmailSender']:
"""
A custom email sender AWS Lambda trigger. See custom_email_sender Below.
"""
return pulumi.get(self, "custom_email_sender")
@property
@pulumi.getter(name="customMessage")
def custom_message(self) -> Optional[str]:
"""
Custom Message AWS Lambda trigger.
"""
return pulumi.get(self, "custom_message")
@property
@pulumi.getter(name="customSmsSender")
def custom_sms_sender(self) -> Optional['outputs.UserPoolLambdaConfigCustomSmsSender']:
"""
A custom SMS sender AWS Lambda trigger. See custom_sms_sender Below.
"""
return pulumi.get(self, "custom_sms_sender")
@property
@pulumi.getter(name="defineAuthChallenge")
def define_auth_challenge(self) -> Optional[str]:
"""
Defines the authentication challenge.
"""
return pulumi.get(self, "define_auth_challenge")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[str]:
"""
The Amazon Resource Name of Key Management Service Customer master keys. Amazon Cognito uses the key to encrypt codes and temporary passwords sent to CustomEmailSender and CustomSMSSender.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter(name="postAuthentication")
def post_authentication(self) -> Optional[str]:
"""
Post-authentication AWS Lambda trigger.
"""
return pulumi.get(self, "post_authentication")
@property
@pulumi.getter(name="postConfirmation")
def post_confirmation(self) -> Optional[str]:
"""
Post-confirmation AWS Lambda trigger.
"""
return pulumi.get(self, "post_confirmation")
@property
@pulumi.getter(name="preAuthentication")
def pre_authentication(self) -> Optional[str]:
"""
Pre-authentication AWS Lambda trigger.
"""
return pulumi.get(self, "pre_authentication")
@property
@pulumi.getter(name="preSignUp")
def pre_sign_up(self) -> Optional[str]:
"""
Pre-registration AWS Lambda trigger.
"""
return pulumi.get(self, "pre_sign_up")
@property
@pulumi.getter(name="preTokenGeneration")
def pre_token_generation(self) -> Optional[str]:
"""
Allow to customize identity token claims before token generation.
"""
return pulumi.get(self, "pre_token_generation")
@property
@pulumi.getter(name="userMigration")
def user_migration(self) -> Optional[str]:
"""
User migration Lambda config type.
"""
return pulumi.get(self, "user_migration")
@property
@pulumi.getter(name="verifyAuthChallengeResponse")
def verify_auth_challenge_response(self) -> Optional[str]:
"""
Verifies the authentication challenge response.
"""
return pulumi.get(self, "verify_auth_challenge_response")
@pulumi.output_type
class UserPoolLambdaConfigCustomEmailSender(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "lambdaArn":
suggest = "lambda_arn"
elif key == "lambdaVersion":
suggest = "lambda_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolLambdaConfigCustomEmailSender. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolLambdaConfigCustomEmailSender.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolLambdaConfigCustomEmailSender.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
lambda_arn: str,
lambda_version: str):
"""
:param str lambda_arn: The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send email notifications to users.
:param str lambda_version: The Lambda version represents the signature of the "request" attribute in the "event" information Amazon Cognito passes to your custom email Lambda function. The only supported value is `V1_0`.
"""
pulumi.set(__self__, "lambda_arn", lambda_arn)
pulumi.set(__self__, "lambda_version", lambda_version)
@property
@pulumi.getter(name="lambdaArn")
def lambda_arn(self) -> str:
"""
The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send email notifications to users.
"""
return pulumi.get(self, "lambda_arn")
@property
@pulumi.getter(name="lambdaVersion")
def lambda_version(self) -> str:
"""
The Lambda version represents the signature of the "request" attribute in the "event" information Amazon Cognito passes to your custom email Lambda function. The only supported value is `V1_0`.
"""
return pulumi.get(self, "lambda_version")
@pulumi.output_type
class UserPoolLambdaConfigCustomSmsSender(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "lambdaArn":
suggest = "lambda_arn"
elif key == "lambdaVersion":
suggest = "lambda_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolLambdaConfigCustomSmsSender. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolLambdaConfigCustomSmsSender.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolLambdaConfigCustomSmsSender.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
lambda_arn: str,
lambda_version: str):
"""
:param str lambda_arn: The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send SMS notifications to users.
:param str lambda_version: The Lambda version represents the signature of the "request" attribute in the "event" information Amazon Cognito passes to your custom SMS Lambda function. The only supported value is `V1_0`.
"""
pulumi.set(__self__, "lambda_arn", lambda_arn)
pulumi.set(__self__, "lambda_version", lambda_version)
@property
@pulumi.getter(name="lambdaArn")
def lambda_arn(self) -> str:
"""
The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send SMS notifications to users.
"""
return pulumi.get(self, "lambda_arn")
@property
@pulumi.getter(name="lambdaVersion")
def lambda_version(self) -> str:
"""
The Lambda version represents the signature of the "request" attribute in the "event" information Amazon Cognito passes to your custom SMS Lambda function. The only supported value is `V1_0`.
"""
return pulumi.get(self, "lambda_version")
@pulumi.output_type
class UserPoolPasswordPolicy(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "minimumLength":
suggest = "minimum_length"
elif key == "requireLowercase":
suggest = "require_lowercase"
elif key == "requireNumbers":
suggest = "require_numbers"
elif key == "requireSymbols":
suggest = "require_symbols"
elif key == "requireUppercase":
suggest = "require_uppercase"
elif key == "temporaryPasswordValidityDays":
suggest = "temporary_password_validity_days"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolPasswordPolicy. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolPasswordPolicy.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolPasswordPolicy.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
minimum_length: Optional[int] = None,
require_lowercase: Optional[bool] = None,
require_numbers: Optional[bool] = None,
require_symbols: Optional[bool] = None,
require_uppercase: Optional[bool] = None,
temporary_password_validity_days: Optional[int] = None):
"""
:param int minimum_length: Minimum length of the password policy that you have set.
:param bool require_lowercase: Whether you have required users to use at least one lowercase letter in their password.
:param bool require_numbers: Whether you have required users to use at least one number in their password.
:param bool require_symbols: Whether you have required users to use at least one symbol in their password.
:param bool require_uppercase: Whether you have required users to use at least one uppercase letter in their password.
:param int temporary_password_validity_days: In the password policy you have set, refers to the number of days a temporary password is valid. If the user does not sign-in during this time, their password will need to be reset by an administrator.
"""
if minimum_length is not None:
pulumi.set(__self__, "minimum_length", minimum_length)
if require_lowercase is not None:
pulumi.set(__self__, "require_lowercase", require_lowercase)
if require_numbers is not None:
pulumi.set(__self__, "require_numbers", require_numbers)
if require_symbols is not None:
pulumi.set(__self__, "require_symbols", require_symbols)
if require_uppercase is not None:
pulumi.set(__self__, "require_uppercase", require_uppercase)
if temporary_password_validity_days is not None:
pulumi.set(__self__, "temporary_password_validity_days", temporary_password_validity_days)
@property
@pulumi.getter(name="minimumLength")
def minimum_length(self) -> Optional[int]:
"""
Minimum length of the password policy that you have set.
"""
return pulumi.get(self, "minimum_length")
@property
@pulumi.getter(name="requireLowercase")
def require_lowercase(self) -> Optional[bool]:
"""
Whether you have required users to use at least one lowercase letter in their password.
"""
return pulumi.get(self, "require_lowercase")
@property
@pulumi.getter(name="requireNumbers")
def require_numbers(self) -> Optional[bool]:
"""
Whether you have required users to use at least one number in their password.
"""
return pulumi.get(self, "require_numbers")
@property
@pulumi.getter(name="requireSymbols")
def require_symbols(self) -> Optional[bool]:
"""
Whether you have required users to use at least one symbol in their password.
"""
return pulumi.get(self, "require_symbols")
@property
@pulumi.getter(name="requireUppercase")
def require_uppercase(self) -> Optional[bool]:
"""
Whether you have required users to use at least one uppercase letter in their password.
"""
return pulumi.get(self, "require_uppercase")
@property
@pulumi.getter(name="temporaryPasswordValidityDays")
def temporary_password_validity_days(self) -> Optional[int]:
"""
In the password policy you have set, refers to the number of days a temporary password is valid. If the user does not sign-in during this time, their password will need to be reset by an administrator.
"""
return pulumi.get(self, "temporary_password_validity_days")
@pulumi.output_type
class UserPoolSchema(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "attributeDataType":
suggest = "attribute_data_type"
elif key == "developerOnlyAttribute":
suggest = "developer_only_attribute"
elif key == "numberAttributeConstraints":
suggest = "number_attribute_constraints"
elif key == "stringAttributeConstraints":
suggest = "string_attribute_constraints"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolSchema. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolSchema.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolSchema.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
attribute_data_type: str,
name: str,
developer_only_attribute: Optional[bool] = None,
mutable: Optional[bool] = None,
number_attribute_constraints: Optional['outputs.UserPoolSchemaNumberAttributeConstraints'] = None,
required: Optional[bool] = None,
string_attribute_constraints: Optional['outputs.UserPoolSchemaStringAttributeConstraints'] = None):
"""
:param str attribute_data_type: Attribute data type. Must be one of `Boolean`, `Number`, `String`, `DateTime`.
:param str name: Name of the attribute.
:param bool developer_only_attribute: Whether the attribute type is developer only.
:param bool mutable: Whether the attribute can be changed once it has been created.
:param 'UserPoolSchemaNumberAttributeConstraintsArgs' number_attribute_constraints: Configuration block for the constraints for an attribute of the number type. Detailed below.
:param bool required: Whether a user pool attribute is required. If the attribute is required and the user does not provide a value, registration or sign-in will fail.
:param 'UserPoolSchemaStringAttributeConstraintsArgs' string_attribute_constraints: Constraints for an attribute of the string type. Detailed below.
"""
pulumi.set(__self__, "attribute_data_type", attribute_data_type)
pulumi.set(__self__, "name", name)
if developer_only_attribute is not None:
pulumi.set(__self__, "developer_only_attribute", developer_only_attribute)
if mutable is not None:
pulumi.set(__self__, "mutable", mutable)
if number_attribute_constraints is not None:
pulumi.set(__self__, "number_attribute_constraints", number_attribute_constraints)
if required is not None:
pulumi.set(__self__, "required", required)
if string_attribute_constraints is not None:
pulumi.set(__self__, "string_attribute_constraints", string_attribute_constraints)
@property
@pulumi.getter(name="attributeDataType")
def attribute_data_type(self) -> str:
"""
Attribute data type. Must be one of `Boolean`, `Number`, `String`, `DateTime`.
"""
return pulumi.get(self, "attribute_data_type")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the attribute.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="developerOnlyAttribute")
def developer_only_attribute(self) -> Optional[bool]:
"""
Whether the attribute type is developer only.
"""
return pulumi.get(self, "developer_only_attribute")
@property
@pulumi.getter
def mutable(self) -> Optional[bool]:
"""
Whether the attribute can be changed once it has been created.
"""
return pulumi.get(self, "mutable")
@property
@pulumi.getter(name="numberAttributeConstraints")
def number_attribute_constraints(self) -> Optional['outputs.UserPoolSchemaNumberAttributeConstraints']:
"""
Configuration block for the constraints for an attribute of the number type. Detailed below.
"""
return pulumi.get(self, "number_attribute_constraints")
@property
@pulumi.getter
def required(self) -> Optional[bool]:
"""
Whether a user pool attribute is required. If the attribute is required and the user does not provide a value, registration or sign-in will fail.
"""
return pulumi.get(self, "required")
@property
@pulumi.getter(name="stringAttributeConstraints")
def string_attribute_constraints(self) -> Optional['outputs.UserPoolSchemaStringAttributeConstraints']:
"""
Constraints for an attribute of the string type. Detailed below.
"""
return pulumi.get(self, "string_attribute_constraints")
@pulumi.output_type
class UserPoolSchemaNumberAttributeConstraints(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxValue":
suggest = "max_value"
elif key == "minValue":
suggest = "min_value"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolSchemaNumberAttributeConstraints. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolSchemaNumberAttributeConstraints.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolSchemaNumberAttributeConstraints.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_value: Optional[str] = None,
min_value: Optional[str] = None):
"""
:param str max_value: Maximum value of an attribute that is of the number data type.
:param str min_value: Minimum value of an attribute that is of the number data type.
"""
if max_value is not None:
pulumi.set(__self__, "max_value", max_value)
if min_value is not None:
pulumi.set(__self__, "min_value", min_value)
@property
@pulumi.getter(name="maxValue")
def max_value(self) -> Optional[str]:
"""
Maximum value of an attribute that is of the number data type.
"""
return pulumi.get(self, "max_value")
@property
@pulumi.getter(name="minValue")
def min_value(self) -> Optional[str]:
"""
Minimum value of an attribute that is of the number data type.
"""
return pulumi.get(self, "min_value")
@pulumi.output_type
class UserPoolSchemaStringAttributeConstraints(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxLength":
suggest = "max_length"
elif key == "minLength":
suggest = "min_length"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolSchemaStringAttributeConstraints. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolSchemaStringAttributeConstraints.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolSchemaStringAttributeConstraints.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_length: Optional[str] = None,
min_length: Optional[str] = None):
"""
:param str max_length: Maximum length of an attribute value of the string type.
:param str min_length: Minimum length of an attribute value of the string type.
"""
if max_length is not None:
pulumi.set(__self__, "max_length", max_length)
if min_length is not None:
pulumi.set(__self__, "min_length", min_length)
@property
@pulumi.getter(name="maxLength")
def max_length(self) -> Optional[str]:
"""
Maximum length of an attribute value of the string type.
"""
return pulumi.get(self, "max_length")
@property
@pulumi.getter(name="minLength")
def min_length(self) -> Optional[str]:
"""
Minimum length of an attribute value of the string type.
"""
return pulumi.get(self, "min_length")
@pulumi.output_type
class UserPoolSmsConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "externalId":
suggest = "external_id"
elif key == "snsCallerArn":
suggest = "sns_caller_arn"
elif key == "snsRegion":
suggest = "sns_region"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolSmsConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolSmsConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolSmsConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
external_id: str,
sns_caller_arn: str,
sns_region: Optional[str] = None):
"""
:param str external_id: External ID used in IAM role trust relationships. For more information about using external IDs, see [How to Use an External ID When Granting Access to Your AWS Resources to a Third Party](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html).
:param str sns_caller_arn: ARN of the Amazon SNS caller. This is usually the IAM role that you've given Cognito permission to assume.
:param str sns_region: The AWS Region to use with Amazon SNS integration. You can choose the same Region as your user pool, or a supported Legacy Amazon SNS alternate Region. Amazon Cognito resources in the Asia Pacific (Seoul) AWS Region must use your Amazon SNS configuration in the Asia Pacific (Tokyo) Region. For more information, see [SMS message settings for Amazon Cognito user pools](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-sms-settings.html).
"""
pulumi.set(__self__, "external_id", external_id)
pulumi.set(__self__, "sns_caller_arn", sns_caller_arn)
if sns_region is not None:
pulumi.set(__self__, "sns_region", sns_region)
@property
@pulumi.getter(name="externalId")
def external_id(self) -> str:
"""
External ID used in IAM role trust relationships. For more information about using external IDs, see [How to Use an External ID When Granting Access to Your AWS Resources to a Third Party](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html).
"""
return pulumi.get(self, "external_id")
@property
@pulumi.getter(name="snsCallerArn")
def sns_caller_arn(self) -> str:
"""
ARN of the Amazon SNS caller. This is usually the IAM role that you've given Cognito permission to assume.
"""
return pulumi.get(self, "sns_caller_arn")
@property
@pulumi.getter(name="snsRegion")
def sns_region(self) -> Optional[str]:
"""
The AWS Region to use with Amazon SNS integration. You can choose the same Region as your user pool, or a supported Legacy Amazon SNS alternate Region. Amazon Cognito resources in the Asia Pacific (Seoul) AWS Region must use your Amazon SNS configuration in the Asia Pacific (Tokyo) Region. For more information, see [SMS message settings for Amazon Cognito user pools](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-sms-settings.html).
"""
return pulumi.get(self, "sns_region")
@pulumi.output_type
class UserPoolSoftwareTokenMfaConfiguration(dict):
def __init__(__self__, *,
enabled: bool):
"""
:param bool enabled: Boolean whether to enable software token Multi-Factor (MFA) tokens, such as Time-based One-Time Password (TOTP). To disable software token MFA When `sms_configuration` is not present, the `mfa_configuration` argument must be set to `OFF` and the `software_token_mfa_configuration` configuration block must be fully removed.
"""
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
Boolean whether to enable software token Multi-Factor (MFA) tokens, such as Time-based One-Time Password (TOTP). To disable software token MFA When `sms_configuration` is not present, the `mfa_configuration` argument must be set to `OFF` and the `software_token_mfa_configuration` configuration block must be fully removed.
"""
return pulumi.get(self, "enabled")
@pulumi.output_type
class UserPoolUserAttributeUpdateSettings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "attributesRequireVerificationBeforeUpdates":
suggest = "attributes_require_verification_before_updates"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolUserAttributeUpdateSettings. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolUserAttributeUpdateSettings.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolUserAttributeUpdateSettings.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
attributes_require_verification_before_updates: Sequence[str]):
"""
:param Sequence[str] attributes_require_verification_before_updates: A list of attributes requiring verification before update. If set, the provided value(s) must also be set in `auto_verified_attributes`. Valid values: `email`, `phone_number`.
"""
pulumi.set(__self__, "attributes_require_verification_before_updates", attributes_require_verification_before_updates)
@property
@pulumi.getter(name="attributesRequireVerificationBeforeUpdates")
def attributes_require_verification_before_updates(self) -> Sequence[str]:
"""
A list of attributes requiring verification before update. If set, the provided value(s) must also be set in `auto_verified_attributes`. Valid values: `email`, `phone_number`.
"""
return pulumi.get(self, "attributes_require_verification_before_updates")
@pulumi.output_type
class UserPoolUserPoolAddOns(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "advancedSecurityMode":
suggest = "advanced_security_mode"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolUserPoolAddOns. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolUserPoolAddOns.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolUserPoolAddOns.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
advanced_security_mode: str):
"""
:param str advanced_security_mode: Mode for advanced security, must be one of `OFF`, `AUDIT` or `ENFORCED`.
"""
pulumi.set(__self__, "advanced_security_mode", advanced_security_mode)
@property
@pulumi.getter(name="advancedSecurityMode")
def advanced_security_mode(self) -> str:
"""
Mode for advanced security, must be one of `OFF`, `AUDIT` or `ENFORCED`.
"""
return pulumi.get(self, "advanced_security_mode")
@pulumi.output_type
class UserPoolUsernameConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "caseSensitive":
suggest = "case_sensitive"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolUsernameConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolUsernameConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolUsernameConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
case_sensitive: bool):
"""
:param bool case_sensitive: Whether username case sensitivity will be applied for all users in the user pool through Cognito APIs.
"""
pulumi.set(__self__, "case_sensitive", case_sensitive)
@property
@pulumi.getter(name="caseSensitive")
def case_sensitive(self) -> bool:
"""
Whether username case sensitivity will be applied for all users in the user pool through Cognito APIs.
"""
return pulumi.get(self, "case_sensitive")
@pulumi.output_type
class UserPoolVerificationMessageTemplate(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "defaultEmailOption":
suggest = "default_email_option"
elif key == "emailMessage":
suggest = "email_message"
elif key == "emailMessageByLink":
suggest = "email_message_by_link"
elif key == "emailSubject":
suggest = "email_subject"
elif key == "emailSubjectByLink":
suggest = "email_subject_by_link"
elif key == "smsMessage":
suggest = "sms_message"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserPoolVerificationMessageTemplate. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserPoolVerificationMessageTemplate.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserPoolVerificationMessageTemplate.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
default_email_option: Optional[str] = None,
email_message: Optional[str] = None,
email_message_by_link: Optional[str] = None,
email_subject: Optional[str] = None,
email_subject_by_link: Optional[str] = None,
sms_message: Optional[str] = None):
"""
:param str default_email_option: Default email option. Must be either `CONFIRM_WITH_CODE` or `CONFIRM_WITH_LINK`. Defaults to `CONFIRM_WITH_CODE`.
:param str email_message: Email message template. Must contain the `{####}` placeholder. Conflicts with `email_verification_message` argument.
:param str email_message_by_link: Email message template for sending a confirmation link to the user, it must contain the `{##Click Here##}` placeholder.
:param str email_subject: Subject line for the email message template. Conflicts with `email_verification_subject` argument.
:param str email_subject_by_link: Subject line for the email message template for sending a confirmation link to the user.
:param str sms_message: SMS message template. Must contain the `{####}` placeholder. Conflicts with `sms_verification_message` argument.
"""
if default_email_option is not None:
pulumi.set(__self__, "default_email_option", default_email_option)
if email_message is not None:
pulumi.set(__self__, "email_message", email_message)
if email_message_by_link is not None:
pulumi.set(__self__, "email_message_by_link", email_message_by_link)
if email_subject is not None:
pulumi.set(__self__, "email_subject", email_subject)
if email_subject_by_link is not None:
pulumi.set(__self__, "email_subject_by_link", email_subject_by_link)
if sms_message is not None:
pulumi.set(__self__, "sms_message", sms_message)
@property
@pulumi.getter(name="defaultEmailOption")
def default_email_option(self) -> Optional[str]:
"""
Default email option. Must be either `CONFIRM_WITH_CODE` or `CONFIRM_WITH_LINK`. Defaults to `CONFIRM_WITH_CODE`.
"""
return pulumi.get(self, "default_email_option")
@property
@pulumi.getter(name="emailMessage")
def email_message(self) -> Optional[str]:
"""
Email message template. Must contain the `{####}` placeholder. Conflicts with `email_verification_message` argument.
"""
return pulumi.get(self, "email_message")
@property
@pulumi.getter(name="emailMessageByLink")
def email_message_by_link(self) -> Optional[str]:
"""
Email message template for sending a confirmation link to the user, it must contain the `{##Click Here##}` placeholder.
"""
return pulumi.get(self, "email_message_by_link")
@property
@pulumi.getter(name="emailSubject")
def email_subject(self) -> Optional[str]:
"""
Subject line for the email message template. Conflicts with `email_verification_subject` argument.
"""
return pulumi.get(self, "email_subject")
@property
@pulumi.getter(name="emailSubjectByLink")
def email_subject_by_link(self) -> Optional[str]:
"""
Subject line for the email message template for sending a confirmation link to the user.
"""
return pulumi.get(self, "email_subject_by_link")
@property
@pulumi.getter(name="smsMessage")
def sms_message(self) -> Optional[str]:
"""
SMS message template. Must contain the `{####}` placeholder. Conflicts with `sms_verification_message` argument.
"""
return pulumi.get(self, "sms_message")
@pulumi.output_type
class GetUserPoolClientAnalyticsConfigurationResult(dict):
def __init__(__self__, *,
application_arn: str,
application_id: str,
external_id: str,
role_arn: str,
user_data_shared: bool):
"""
:param str application_arn: (Optional) Application ARN for an Amazon Pinpoint application. Conflicts with `external_id` and `role_arn`.
:param str application_id: (Optional) Application ID for an Amazon Pinpoint application.
:param str external_id: (Optional) ID for the Analytics Configuration. Conflicts with `application_arn`.
:param str role_arn: (Optional) ARN of an IAM role that authorizes Amazon Cognito to publish events to Amazon Pinpoint analytics. Conflicts with `application_arn`.
:param bool user_data_shared: (Optional) If set to `true`, Amazon Cognito will include user data in the events it publishes to Amazon Pinpoint analytics.
"""
pulumi.set(__self__, "application_arn", application_arn)
pulumi.set(__self__, "application_id", application_id)
pulumi.set(__self__, "external_id", external_id)
pulumi.set(__self__, "role_arn", role_arn)
pulumi.set(__self__, "user_data_shared", user_data_shared)
@property
@pulumi.getter(name="applicationArn")
def application_arn(self) -> str:
"""
(Optional) Application ARN for an Amazon Pinpoint application. Conflicts with `external_id` and `role_arn`.
"""
return pulumi.get(self, "application_arn")
@property
@pulumi.getter(name="applicationId")
def application_id(self) -> str:
"""
(Optional) Application ID for an Amazon Pinpoint application.
"""
return pulumi.get(self, "application_id")
@property
@pulumi.getter(name="externalId")
def external_id(self) -> str:
"""
(Optional) ID for the Analytics Configuration. Conflicts with `application_arn`.
"""
return pulumi.get(self, "external_id")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> str:
"""
(Optional) ARN of an IAM role that authorizes Amazon Cognito to publish events to Amazon Pinpoint analytics. Conflicts with `application_arn`.
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter(name="userDataShared")
def user_data_shared(self) -> bool:
"""
(Optional) If set to `true`, Amazon Cognito will include user data in the events it publishes to Amazon Pinpoint analytics.
"""
return pulumi.get(self, "user_data_shared")
@pulumi.output_type
class GetUserPoolClientTokenValidityUnitResult(dict):
def __init__(__self__, *,
access_token: str,
id_token: str,
refresh_token: str):
"""
:param str access_token: (Optional) Time unit in for the value in `access_token_validity`, defaults to `hours`.
:param str id_token: (Optional) Time unit in for the value in `id_token_validity`, defaults to `hours`.
:param str refresh_token: (Optional) Time unit in for the value in `refresh_token_validity`, defaults to `days`.
"""
pulumi.set(__self__, "access_token", access_token)
pulumi.set(__self__, "id_token", id_token)
pulumi.set(__self__, "refresh_token", refresh_token)
@property
@pulumi.getter(name="accessToken")
def access_token(self) -> str:
"""
(Optional) Time unit in for the value in `access_token_validity`, defaults to `hours`.
"""
return pulumi.get(self, "access_token")
@property
@pulumi.getter(name="idToken")
def id_token(self) -> str:
"""
(Optional) Time unit in for the value in `id_token_validity`, defaults to `hours`.
"""
return pulumi.get(self, "id_token")
@property
@pulumi.getter(name="refreshToken")
def refresh_token(self) -> str:
"""
(Optional) Time unit in for the value in `refresh_token_validity`, defaults to `days`.
"""
return pulumi.get(self, "refresh_token")
|
7b9fd475d59aa9d4cd5d54b51f84827cc027614c
|
40dd8330e5f78c4348bbddc2c5acfd59d793dd51
|
/mmseg/models/utils/res_layer.py
|
3dd7a6f75a168f2f7e3c61f82d309b1cf0d502bc
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmsegmentation
|
0d12092312e2c465ede1fd7dd9847b6f2b37049c
|
30a3f94f3e2916e27fa38c67cc3b8c69c1893fe8
|
refs/heads/main
| 2023-09-04T10:54:52.299711
| 2023-07-24T07:28:21
| 2023-07-24T07:28:21
| 272,133,018
| 6,534
| 2,375
|
Apache-2.0
| 2023-09-14T01:22:32
| 2020-06-14T04:32:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,384
|
py
|
res_layer.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmengine.model import Sequential
from torch import nn as nn
class ResLayer(Sequential):
"""ResLayer to build ResNet style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
multi_grid (int | None): Multi grid dilation rates of last
stage. Default: None
contract_dilation (bool): Whether contract first dilation of each layer
Default: False
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
dilation=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
multi_grid=None,
contract_dilation=False,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
if avg_down:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
if multi_grid is None:
if dilation > 1 and contract_dilation:
first_dilation = dilation // 2
else:
first_dilation = dilation
else:
first_dilation = multi_grid[0]
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
dilation=first_dilation,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
dilation=dilation if multi_grid is None else multi_grid[i],
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super().__init__(*layers)
|
ca49a40de3e05eba0787737878e095a6427434d9
|
1299ffaa8bb1cd13db0ed53598b638ec36c555ac
|
/tests/dicts/io/test_io_dict_plist.py
|
bb08902b7b05fc421c2cdc3943b73c10a548696a
|
[
"MIT"
] |
permissive
|
fabiocaccamo/python-benedict
|
c93240bf526696c7b11043fef898a461d3fd6f14
|
27d76331a00fff1fffe7890a77ffd93c8833aeda
|
refs/heads/main
| 2023-08-31T04:06:24.451591
| 2023-08-22T20:59:04
| 2023-08-22T20:59:04
| 187,202,744
| 1,118
| 51
|
MIT
| 2023-09-08T12:43:04
| 2019-05-17T11:13:40
|
Python
|
UTF-8
|
Python
| false
| false
| 7,755
|
py
|
test_io_dict_plist.py
|
import datetime as dt
from benedict.dicts.io import IODict
from .test_io_dict import io_dict_test_case
class io_dict_plist_test_case(io_dict_test_case):
"""
This class describes an IODict / plist test case.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._dict = {
"aString": "Doodah",
"aList": [
"A",
"B",
12,
32.1,
[1, 2, 3],
],
"aFloat": 0.1,
"anInt": 728,
"aDict": {
"anotherString": "<hello & hi there!>",
"aThirdString": "M\xe4ssig, Ma\xdf",
"aTrueValue": True,
"aFalseValue": False,
},
"someData": bytes("<binary gunk>", encoding="utf-8"),
"someMoreData": bytes("<lots of binary gunk>" * 10, encoding="utf-8"),
"aDate": dt.datetime(
1985, 4, 3, 23, 55
), # dt.datetime.fromtimestamp(481413300),
}
# self._dict = {
# 'aString': 'Doodah',
# 'aList': ['A', 'B', 12, 32.1, [1, 2, 3]],
# 'aFloat': 0.1,
# 'anInt': 728,
# 'aDict': {
# 'anotherString': '<hello & hi there!>',
# 'aThirdString': 'M\xe4ssig, Ma\xdf',
# 'aTrueValue': True,
# 'aFalseValue': False,
# },
# 'someData': b'<binary gunk>',
# 'someMoreData': b'<lots of binary gunk>' * 10,
# 'aDate': dt.datetime.fromtimestamp(481406100),
# }
self._plist = """
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>aDate</key>
<date>1985-04-03T23:55:00Z</date>
<key>aDict</key>
<dict>
<key>aFalseValue</key>
<false/>
<key>aThirdString</key>
<string>Mässig, Maß</string>
<key>aTrueValue</key>
<true/>
<key>anotherString</key>
<string><hello & hi there!></string>
</dict>
<key>aFloat</key>
<real>0.1</real>
<key>aList</key>
<array>
<string>A</string>
<string>B</string>
<integer>12</integer>
<real>32.1</real>
<array>
<integer>1</integer>
<integer>2</integer>
<integer>3</integer>
</array>
</array>
<key>aString</key>
<string>Doodah</string>
<key>anInt</key>
<integer>728</integer>
<key>someData</key>
<data>
PGJpbmFyeSBndW5rPg==
</data>
<key>someMoreData</key>
<data>
PGxvdHMgb2YgYmluYXJ5IGd1bms+PGxvdHMgb2YgYmluYXJ5IGd1bms+PGxvdHMgb2Yg
YmluYXJ5IGd1bms+PGxvdHMgb2YgYmluYXJ5IGd1bms+PGxvdHMgb2YgYmluYXJ5IGd1
bms+PGxvdHMgb2YgYmluYXJ5IGd1bms+PGxvdHMgb2YgYmluYXJ5IGd1bms+PGxvdHMg
b2YgYmluYXJ5IGd1bms+PGxvdHMgb2YgYmluYXJ5IGd1bms+PGxvdHMgb2YgYmluYXJ5
IGd1bms+
</data>
</dict>
</plist>
"""
def test_from_plist_with_valid_data(self):
j = self._plist
# static method
d = IODict.from_plist(j)
self.assertTrue(isinstance(d, dict))
self.assertEqual(d.get("aDate"), self._dict.get("aDate"))
self.assertEqual(d, self._dict)
# constructor
d = IODict(j, format="plist")
self.assertTrue(isinstance(d, dict))
self.assertEqual(d, self._dict)
def test_from_plist_with_invalid_data(self):
j = "Lorem ipsum est in ea occaecat nisi officia."
# static method
with self.assertRaises(ValueError):
IODict.from_plist(j)
# constructor
with self.assertRaises(ValueError):
IODict(j, format="plist")
def test_from_plist_with_valid_file_valid_content(self):
filepath = self.input_path("valid-content.plist")
# static method
d = IODict.from_plist(filepath)
self.assertTrue(isinstance(d, dict))
# constructor
d = IODict(filepath, format="plist")
self.assertTrue(isinstance(d, dict))
# constructor with format autodetection
d = IODict(filepath)
self.assertTrue(isinstance(d, dict))
def test_from_plist_with_valid_file_valid_content_invalid_format(self):
filepath = self.input_path("valid-content.base64")
with self.assertRaises(ValueError):
IODict.from_plist(filepath)
filepath = self.input_path("valid-content.csv")
with self.assertRaises(ValueError):
IODict.from_plist(filepath)
filepath = self.input_path("valid-content.json")
with self.assertRaises(ValueError):
IODict.from_plist(filepath)
filepath = self.input_path("valid-content.pickle")
with self.assertRaises(ValueError):
IODict.from_plist(filepath)
filepath = self.input_path("valid-content.qs")
with self.assertRaises(ValueError):
IODict.from_plist(filepath)
filepath = self.input_path("valid-content.toml")
with self.assertRaises(ValueError):
IODict.from_plist(filepath)
filepath = self.input_path("valid-content.xml")
with self.assertRaises(ValueError):
IODict.from_plist(filepath)
filepath = self.input_path("valid-content.yml")
with self.assertRaises(ValueError):
IODict.from_plist(filepath)
def test_from_plist_with_valid_file_invalid_content(self):
filepath = self.input_path("invalid-content.plist")
# static method
with self.assertRaises(ValueError):
IODict.from_plist(filepath)
# constructor
with self.assertRaises(ValueError):
IODict(filepath, format="plist")
def test_from_plist_with_invalid_file(self):
filepath = self.input_path("invalid-file.plist")
# static method
with self.assertRaises(ValueError):
IODict.from_plist(filepath)
# constructor
with self.assertRaises(ValueError):
IODict(filepath, format="plist")
def test_from_plist_with_valid_url_valid_content(self):
url = self.input_url("valid-content.plist")
# static method
d = IODict.from_plist(url)
self.assertTrue(isinstance(d, dict))
# constructor
d = IODict(url, format="plist")
self.assertTrue(isinstance(d, dict))
# constructor with format autodetection
d = IODict(url)
self.assertTrue(isinstance(d, dict))
def test_from_plist_with_valid_url_invalid_content(self):
url = "https://github.com/fabiocaccamo/python-benedict"
# static method
with self.assertRaises(ValueError):
IODict.from_plist(url)
# constructor
with self.assertRaises(ValueError):
IODict(url, format="plist")
def test_from_plist_with_invalid_url(self):
url = "https://github.com/fabiocaccamo/python-benedict-invalid"
# static method
with self.assertRaises(ValueError):
IODict.from_plist(url)
# constructor
with self.assertRaises(ValueError):
IODict(url, format="plist")
def test_to_plist(self):
# example data taken from:
# https://docs.python.org/3/library/plistlib.html#examples
d = IODict(self._dict)
s = d.to_plist()
# print(s)
self.assertEqual(d, IODict.from_plist(s))
def test_to_plist_file(self):
d = IODict(self._dict)
filepath = self.output_path("test_to_plist_file.plist")
d.to_plist(filepath=filepath)
self.assertFileExists(filepath)
self.assertEqual(d, IODict.from_plist(filepath))
|
02f07fca283ed706211b05c87f3b03c9cec7d643
|
ca54045faebf354767db22518faf2cbe17797f86
|
/cmake/simgen/sim_collection.py
|
d4ca850c0c7f7b3e5e059a596b9ca0a3229295a4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
open-simh/simh
|
f659873614bf54538f2c4fb7d92c8673ddcdfb85
|
d4f85d01bdf7301d5f7a2c4e51c3a84024561b26
|
refs/heads/master
| 2023-09-05T07:04:48.971084
| 2023-09-04T04:39:17
| 2023-09-04T04:39:17
| 498,052,344
| 288
| 87
|
NOASSERTION
| 2023-09-10T11:14:16
| 2022-05-30T18:16:34
|
C
|
UTF-8
|
Python
| false
| false
| 6,282
|
py
|
sim_collection.py
|
import pprint
import simgen.parse_makefile as SPM
import simgen.basic_simulator as SBS
import simgen.vax_simulators as VAXen
import simgen.utils as SU
## Special variables that should __not__ expand into their definitions:
_special_vars = frozenset(['DISPLAYL',
'DISPLAYVT',
'DISPLAY340',
'DISPLAYNG',
'DISPLAYIII'])
## Map simulator name to its class, for special cases
_special_simulators = {
"besm6": SBS.BESM6Simulator,
"i650": SBS.IBM650Simulator,
"ibm1130": SBS.IBM1130Simulator,
"pdp10-ka": SBS.KA10Simulator,
"vax": VAXen.VAXSimulator,
"vax730": VAXen.BasicVAXSimulator
}
ignored_display_macros = {
'DISPLAYVT': ['${DISPLAYD}/vt11.c'],
'DISPLAY340': ['${DISPLAYD}/type340.c'],
'DISPLAYNG': ['${DISPLAYD}/ng.c'],
'DISPLAYIII': ['${DISPLAYD}/iii.c']
}
def get_simulator_ctor(name):
"""Return the class object for special case simulators, otherwise
return the base 'SIMHBasicSimulator'
"""
return _special_simulators.get(name) or SBS.SIMHBasicSimulator
class SimCollection:
"""A collection of simulators.
"""
def __init__(self, dir_macro):
self.source_macros = {}
self.macro_uses = {}
self.simulators = {}
def get_simulator(self, name, dir_macro, _dir_path, test_name, buildrom):
sim = self.simulators.get(name)
if sim is None:
sim = (get_simulator_ctor(name))(name, dir_macro, test_name, buildrom)
self.simulators[name] = sim
return sim
def add_source_macro(self, macro, macro_def, sim):
if macro not in self.source_macros:
self.source_macros[macro] = macro_def
used = self.macro_uses.get(macro)
if used is None:
self.macro_uses[macro] = []
used = self.macro_uses[macro]
used.append(sim)
def get_simulator_vars(self, debug=0):
simvars = set()
ignored = set(self.source_macros.keys())
for macval in self.source_macros.values():
## This could be replaced by a functools.reduce()
for val in macval:
simvars = simvars.union(set(SPM.extract_variables(val)))
for sim in self.simulators.values():
simvars = simvars.union(sim.get_source_vars().union(sim.get_include_vars()))
simvars = simvars.difference(ignored).difference(_special_vars)
SU.emit_debug(debug, 2, 'simvars {0}'.format(simvars))
return simvars
def write_simulators(self, stream, debug=0, test_label='default'):
## Emit source macros
dontexpand = set([smac for smac, uses in self.macro_uses.items() if smac not in ignored_display_macros and len(uses) > 1])
SU.emit_debug(debug, 2, "{0}: dontexpand {1}".format(self.__class__.__name__, dontexpand))
if len(dontexpand) > 0:
smac_sorted = list(dontexpand)
smac_sorted.sort()
for smac in smac_sorted:
stream.write('\n\n')
stream.write('set({0}\n'.format(smac))
stream.write('\n'.join([' ' * 4 + f for f in self.source_macros[smac]]))
stream.write(')')
stream.write('\n\n')
## Emit the simulators
simnames = list(self.simulators.keys())
simnames.sort()
SU.emit_debug(debug, 2, "{0}: Writing {1}".format(self.__class__.__name__, simnames))
for simname in simnames:
sim = self.simulators[simname]
## Patch up the simulator source lists, expanding macros that aren't
## in the macro sources:
sim.sources = self.expand_sources(sim.sources, dontexpand, debug)
stream.write('\n')
sim.write_simulator(stream, 0, test_label)
def write_unit_tests(self, stream, debug=0, test_label='default'):
dontexpand = set([smac for smac, uses in self.macro_uses.items() if len(uses) > 1])
simnames = list(self.simulators.keys())
simnames.sort()
SU.emit_debug(debug, 2, "{0}: Writing {1}".format(self.__class__.__name__, simnames))
for simname in simnames:
sim = self.simulators[simname]
## Patch up the simulator source lists, expanding macros that aren't
## in the macro sources:
sim.sources = self.expand_sources(sim.sources, dontexpand, debug)
sim.write_unit_test(stream, 0, test_label)
def expand_sources(self, srcs, dontexpand, debug=0):
updated_srcs = []
for src in srcs:
SU.emit_debug(debug, 2, "{0}: Source {1}".format(self.__class__.__name__, src))
m = SPM._var_rx.match(src)
if m and m[1] not in dontexpand.union(_special_vars):
SU.emit_debug(debug, 2, "{0}: Expanding {1}".format(self.__class__.__name__, m[1]))
varexp = self.source_macros.get(m[1])
if varexp is not None:
updated_srcs.extend(self.source_macros[m[1]])
else:
print('!! Could not expand {0}'.format(m[1]))
else:
updated_srcs.append(src)
if updated_srcs == srcs:
return srcs
else:
return self.expand_sources(updated_srcs, dontexpand, debug)
def __len__(self):
return len(self.simulators)
if '_dispatch' in pprint.PrettyPrinter.__dict__:
def simcoll_pprinter(pprinter, simcoll, stream, indent, allowance, context, level):
cls = simcoll.__class__
stream.write(cls.__name__ + '(')
indent += len(cls.__name__) + 1
pprinter._format(simcoll.source_macros, stream, indent, allowance + 2, context, level)
stream.write(',\n' + ' ' * indent)
uses_dict = dict([(sim, len(uses)) for (sim, uses) in simcoll.macro_uses.items()])
pprinter._format(uses_dict, stream, indent, allowance + 2, context, level)
stream.write(',\n' + ' ' * indent)
pprinter._format(simcoll.simulators, stream, indent, allowance + 2, context, level)
stream.write(')')
pprint.PrettyPrinter._dispatch[SimCollection.__repr__] = simcoll_pprinter
|
2ca5a3f6a8904e588cd71a8858eb4247df066b50
|
eda6e7b8f399dedcdb960f4b48a2134b978f8d83
|
/bnpy/init/FromLP.py
|
3aaf5945df9ec9da60112614118fb6a69392031c
|
[
"BSD-3-Clause"
] |
permissive
|
bnpy/bnpy
|
8ed61bc4fe2f0ed99e0254c11a21c27c0cee59b2
|
ffc2242427451aa6a61dcac1473c47577a5ade6f
|
refs/heads/master
| 2023-08-16T06:49:58.716279
| 2022-10-15T15:59:12
| 2022-10-15T15:59:12
| 75,731,181
| 197
| 54
|
NOASSERTION
| 2023-07-21T20:59:10
| 2016-12-06T12:56:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,915
|
py
|
FromLP.py
|
'''
FromLP.py
Initialize global params of a bnpy model using a set of local parameters
'''
import numpy as np
from bnpy.init.FromTruth import convertLPFromHardToSoft
import logging
Log = logging.getLogger('bnpy')
Log.setLevel(logging.DEBUG)
def init_global_params(hmodel, Data, initname='', initLP=None,
**kwargs):
''' Initialize (in-place) the global params of the given hmodel.
Parameters
-------
hmodel : bnpy.HModel
model object to initialize
Data : bnpy.data.DataObj
Dataset to use to drive initialization.
hmodel.obsModel dimensions must match this dataset.
initname : str, ['contigblocksLP', 'sacbLP']
name for the routine to use
Post Condition
--------
hmodel has valid global parameters.
'''
if isinstance(initLP, dict):
return initHModelFromLP(hmodel, Data, initLP)
elif initname == 'sacbLP':
Log.info('Initialization: Sequential Allocation of Contig Blocks')
SS = initSS_SeqAllocContigBlocks(Data, hmodel, **kwargs)
hmodel.update_global_params(SS)
return None
elif initname == 'contigblocksLP':
LP = makeLP_ContigBlocks(Data, **kwargs)
return initHModelFromLP(hmodel, Data, LP)
else:
raise ValueError('Unrecognized initname: %s' % (initname))
def initHModelFromLP(hmodel, Data, LP):
''' Initialize provided bnpy HModel given data and local params.
Executes summary step and global step given the provided LP.
Post Condition
------
hmodel has valid global parameters.
'''
if 'resp' not in LP:
if 'Z' not in LP:
raise ValueError("Bad LP. Require either 'resp' or 'Z' fields.")
LP = convertLPFromHardToSoft(LP, Data)
assert 'resp' in LP
if hasattr(hmodel.allocModel, 'initLPFromResp'):
LP = hmodel.allocModel.initLPFromResp(Data, LP)
SS = hmodel.get_global_suff_stats(Data, LP)
hmodel.update_global_params(SS)
def makeLP_ContigBlocks(Data, K=0, KperSeq=None, initNumSeq=None, **kwargs):
''' Create local parameters via a contiguous block hard segmentation.
Divide chosen sequences up into KperSeq contiguous blocks,
each block evenly sized, and assign each block to a unique state.
Returns
-------
LP : dict of local parameters
* resp : 2D array, Natom x K
'''
if initNumSeq is None:
initNumSeq = Data.nDoc
initNumSeq = np.minimum(initNumSeq, Data.nDoc)
if KperSeq is None:
assert K > 0
KperSeq = int(np.ceil(K / float(initNumSeq)))
if KperSeq * initNumSeq > K:
print('WARNING: using initial K larger than suggested.')
K = KperSeq * initNumSeq
assert KperSeq > 0
# Select subset of all sequences to use for initialization
if initNumSeq == Data.nDoc:
chosenSeqIDs = np.arange(initNumSeq)
else:
chosenSeqIDs = PRNG.choice(Data.nDoc, initNumSeq, replace=False)
# Make hard segmentation at each chosen sequence
resp = np.zeros((Data.nObs, K))
jstart = 0
for n in chosenSeqIDs:
start = int(Data.doc_range[n])
curT = Data.doc_range[n + 1] - start
# Determine how long each block is for blocks 0, 1, ... KperSeq-1
cumsumBlockSizes = calcBlockSizesForCurSeq(KperSeq, curT)
for j in range(KperSeq):
Tstart = start + cumsumBlockSizes[j]
Tend = start + cumsumBlockSizes[j + 1]
resp[Tstart:Tend, jstart + j] = 1.0
jstart = jstart + j + 1
return dict(resp=resp)
def calcBlockSizesForCurSeq(KperSeq, curT):
''' Divide a sequence of length curT into KperSeq contig blocks
Examples
---------
>> calcBlockSizesForCurSeq(3, 20)
[0, 7, 14, 20]
Returns
---------
c : 1D array, size KperSeq+1
* block t indices are selected by c[t]:c[t+1]
'''
blockSizes = (curT // KperSeq) * np.ones(KperSeq)
remMass = curT - np.sum(blockSizes)
blockSizes[:remMass] += 1
cumsumBlockSizes = np.cumsum(np.hstack([0, blockSizes]))
return np.asarray(cumsumBlockSizes, dtype=np.int32)
def initSS_SeqAllocContigBlocks(Data, hmodel, **kwargs):
if 'seed' in kwargs:
seed = int(kwargs['seed'])
else:
seed = 0
# Traverse sequences in a random order
PRNG = np.random.RandomState(seed)
assert hasattr(Data, 'nDoc')
randOrderIDs = list(range(Data.nDoc))
PRNG.shuffle(randOrderIDs)
SS = None
for orderID, n in enumerate(randOrderIDs):
hmodel, SS = initSingleSeq_SeqAllocContigBlocks(
n, Data, hmodel,
SS=SS,
**kwargs)
if orderID == len(randOrderIDs) - 1 \
or (orderID + 1) % 5 == 0 or orderID < 2:
Log.info(' seq. %3d/%d | Ktotal=%d'
% (orderID + 1, len(randOrderIDs), SS.K))
return SS
|
677d98e5a6ca6f94f06c279884232439dd2898d6
|
e26973efecccf121ec6e6b1b39f1dc35021571cb
|
/runtests.py
|
a53352c32242c79bf3c148ad51d27230df5c7337
|
[
"BSD-2-Clause"
] |
permissive
|
jambonsw/django-improved-user
|
d9eeffd06d14fc22dcdd03b775b040ccd8c0c29c
|
26bb15e0de9957364f5ce5841ef49f9a059128f4
|
refs/heads/development
| 2023-08-30T23:29:39.003139
| 2022-05-19T11:27:12
| 2022-05-19T11:27:12
| 71,849,663
| 126
| 19
|
BSD-2-Clause
| 2023-03-06T12:57:04
| 2016-10-25T02:00:47
|
Python
|
UTF-8
|
Python
| false
| false
| 3,892
|
py
|
runtests.py
|
#!/usr/bin/env python3
"""Utility script to setup Django and run tests against package"""
import sys
from os.path import dirname, join
try:
from django import setup
from django.apps import apps
from django.conf import settings
from django.core.management import execute_from_command_line
except ImportError:
print(
"Could not load Django.\n"
"Try running `flit install --symlink` before `./runtests.py`\n"
"or run `make test` (or `make tox`) for an all in one solution",
)
exit(-1)
try:
import improved_user # noqa: F401 pylint: disable=unused-import
except ImportError:
print(
"Could not load improved_user!\n"
"Try running `flit install --symlink` before `./runtests.py`\n"
"or run `make test` (or `make tox`) for an all in one solution",
)
exit(-1)
def configure_django():
"""Configure Django before tests"""
settings.configure(
SECRET_KEY="m-4Umd2!_nQQX.Ux6dYaiffmgRFpFxri!hqmffqBAhuAu*-!9n",
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
},
},
INSTALLED_APPS=[
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.sites",
"improved_user.apps.ImprovedUserConfig",
],
SITE_ID=1,
AUTH_USER_MODEL="improved_user.User",
FIXTURE_DIRS=(join(dirname(__file__), "tests", "fixtures"),),
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.request",
],
},
}
],
MIDDLEWARE=[
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
],
)
setup()
def run_test_suite(*args):
"""Run the test suite"""
test_args = list(args) or []
execute_from_command_line(["manage.py", "test"] + test_args)
def check_missing_migrations():
"""Check that user model and migration files are in sync"""
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import (
NonInteractiveMigrationQuestioner as Questioner,
)
from django.db.migrations.state import ProjectState
loader = MigrationLoader(None, ignore_no_migrations=True)
conflicts = loader.detect_conflicts()
if conflicts:
raise Exception(
"Migration conflicts detected. Please fix your migrations."
)
questioner = Questioner(dry_run=True, specified_apps=None)
autodetector = MigrationAutodetector(
loader.project_state(),
ProjectState.from_apps(apps),
questioner,
)
changes = autodetector.changes(
graph=loader.graph,
trim_to_apps=None,
convert_apps=None,
migration_name=None,
)
if changes:
raise Exception(
"Migration changes detected. "
"Please update or add to the migration file as appropriate"
)
print("Migration-checker detected no problems.")
if __name__ == "__main__":
configure_django()
check_missing_migrations()
run_test_suite(*sys.argv[1:])
|
54fa143b6cd4aaf4f308dc643cda61bc49c295b5
|
749af8e81d5ccd2d8714a34434a9c77772df551b
|
/statsmodels/sandbox/tsa/__init__.py
|
aa72b549e7d93cc6737eaf6966d3e52a6c744aaf
|
[
"BSD-3-Clause"
] |
permissive
|
statsmodels/statsmodels
|
98ca67192c08bcc611ed3a75edaded2c7181ab98
|
01b19d7d111b29c183f620ff0a949ef6391ff8ee
|
refs/heads/main
| 2023-09-05T13:05:49.497076
| 2023-09-01T10:54:50
| 2023-09-01T10:54:50
| 1,885,237
| 8,666
| 3,023
|
BSD-3-Clause
| 2023-09-13T17:51:48
| 2011-06-12T17:04:50
|
Python
|
UTF-8
|
Python
| false
| false
| 840
|
py
|
__init__.py
|
'''functions and classes time series analysis
Status
------
work in progress
arima.py
^^^^^^^^
ARIMA : initial class, uses conditional least squares, needs merging with new class
arma2ar
arma2ma
arma_acf
arma_acovf
arma_generate_sample
arma_impulse_response
deconvolve
index2lpol
lpol2index
mcarma22
movstat.py
^^^^^^^^^^
I had tested the next group against matlab, but where are the tests ?
acf
acovf
ccf
ccovf
pacf_ols
pacf_yw
These hat incorrect array size, were my first implementation, slow compared
to cumsum version in la and cython version in pandas.
These need checking, and merging/comparing with new class MovStats
check_movorder
expandarr
movmean :
movmoment : corrected cutoff
movorder
movvar
'''
#from arima import *
from .movstat import movorder, movmean, movvar, movmoment # noqa:F401
#from stattools import *
|
6be75bba09a8292f7389c7b15ed6b95a646b9ab9
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/olap/ByConity/tests/testflows/map_type/tests/feature.py
|
5d7c900d59110e469dd2a36bcaae3e86451a7069
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 64,379
|
py
|
feature.py
|
# -*- coding: utf-8 -*-
import time
from testflows.core import *
from testflows.asserts import error
from map_type.requirements import *
from map_type.tests.common import *
@TestOutline
def select_map(self, map, output, exitcode=0, message=None):
"""Create a map using select statement.
"""
node = self.context.node
with When("I create a map using select", description=map):
r = node.query(f"SELECT {map}", exitcode=exitcode, message=message)
with Then("I expect output to match", description=output):
assert r.output == output, error()
@TestOutline
def table_map(self, type, data, select, filter, exitcode, message, check_insert=False, order_by=None):
"""Check using a map column in a table.
"""
uid = getuid()
node = self.context.node
if order_by is None:
order_by = "m"
with Given(f"table definition with {type}"):
sql = "CREATE TABLE {name} (m " + type + ") ENGINE = MergeTree() ORDER BY " + order_by
with And(f"I create a table", description=sql):
table = create_table(name=uid, statement=sql)
with When("I insert data into the map column"):
if check_insert:
node.query(f"INSERT INTO {table} VALUES {data}", exitcode=exitcode, message=message)
else:
node.query(f"INSERT INTO {table} VALUES {data}")
if not check_insert:
with And("I try to read from the table"):
node.query(f"SELECT {select} FROM {table} WHERE {filter} FORMAT JSONEachRow", exitcode=exitcode, message=message)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Key_String("1.0")
)
@Examples("map output", [
("map('',1)", "{'':1}", Name("empty string")),
("map('hello',1)", "{'hello':1}", Name("non-empty string")),
("map('Gãńdåłf_Thê_Gręât',1)", "{'Gãńdåłf_Thê_Gręât':1}", Name("utf-8 string")),
("map('hello there',1)", "{'hello there':1}", Name("multi word string")),
("map('hello',1,'there',2)", "{'hello':1,'there':2}", Name("multiple keys")),
("map(toString(1),1)", "{'1':1}", Name("toString")),
("map(toFixedString('1',1),1)", "{'1':1}", Name("toFixedString")),
("map(toNullable('1'),1)", "{'1':1}", Name("Nullable")),
("map(toNullable(NULL),1)", "{NULL:1}", Name("Nullable(NULL)")),
("map(toLowCardinality('1'),1)", "{'1':1}", Name("LowCardinality(String)")),
("map(toLowCardinality(toFixedString('1',1)),1)", "{'1':1}", Name("LowCardinality(FixedString)")),
], row_format="%20s,%20s")
def select_map_with_key_string(self, map, output):
"""Create a map using select that has key string type.
"""
select_map(map=map, output=output)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Value_String("1.0")
)
@Examples("map output", [
("map('key','')", "{'key':''}", Name("empty string")),
("map('key','hello')", "{'key':'hello'}", Name("non-empty string")),
("map('key','Gãńdåłf_Thê_Gręât')", "{'key':'Gãńdåłf_Thê_Gręât'}", Name("utf-8 string")),
("map('key','hello there')", "{'key':'hello there'}", Name("multi word string")),
("map('key','hello','key2','there')", "{'key':'hello','key2':'there'}", Name("multiple keys")),
("map('key',toString(1))", "{'key':'1'}", Name("toString")),
("map('key',toFixedString('1',1))", "{'key':'1'}", Name("toFixedString")),
("map('key',toNullable('1'))", "{'key':'1'}", Name("Nullable")),
("map('key',toNullable(NULL))", "{'key':NULL}", Name("Nullable(NULL)")),
("map('key',toLowCardinality('1'))", "{'key':'1'}", Name("LowCardinality(String)")),
("map('key',toLowCardinality(toFixedString('1',1)))", "{'key':'1'}", Name("LowCardinality(FixedString)")),
], row_format="%20s,%20s")
def select_map_with_value_string(self, map, output):
"""Create a map using select that has value string type.
"""
select_map(map=map, output=output)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Value_Array("1.0")
)
@Examples("map output", [
("map('key',[])", "{'key':[]}", Name("empty Array")),
("map('key',[1,2,3])", "{'key':[1,2,3]}", Name("non-empty array of ints")),
("map('key',['1','2','3'])", "{'key':['1','2','3']}", Name("non-empty array of strings")),
("map('key',[map(1,2),map(2,3)])", "{'key':[{1:2},{2:3}]}", Name("non-empty array of maps")),
("map('key',[map(1,[map(1,[1])]),map(2,[map(2,[3])])])", "{'key':[{1:[{1:[1]}]},{2:[{2:[3]}]}]}", Name("non-empty array of maps of array of maps")),
])
def select_map_with_value_array(self, map, output):
"""Create a map using select that has value array type.
"""
select_map(map=map, output=output)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Value_Integer("1.0")
)
@Examples("map output", [
("(map(1,127,2,0,3,-128))", '{1:127,2:0,3:-128}', Name("Int8")),
("(map(1,0,2,255))", '{1:0,2:255}', Name("UInt8")),
("(map(1,32767,2,0,3,-32768))", '{1:32767,2:0,3:-32768}', Name("Int16")),
("(map(1,0,2,65535))", '{1:0,2:65535}', Name("UInt16")),
("(map(1,2147483647,2,0,3,-2147483648))", '{1:2147483647,2:0,3:-2147483648}', Name("Int32")),
("(map(1,0,2,4294967295))", '{1:0,2:4294967295}', Name("UInt32")),
("(map(1,9223372036854775807,2,0,3,-9223372036854775808))", '{1:"9223372036854775807",2:"0",3:"-9223372036854775808"}', Name("Int64")),
("(map(1,0,2,18446744073709551615))", '{1:0,2:18446744073709551615}', Name("UInt64")),
("(map(1,170141183460469231731687303715884105727,2,0,3,-170141183460469231731687303715884105728))", '{1:1.7014118346046923e38,2:0,3:-1.7014118346046923e38}', Name("Int128")),
("(map(1,57896044618658097711785492504343953926634992332820282019728792003956564819967,2,0,3,-57896044618658097711785492504343953926634992332820282019728792003956564819968))", '{1:5.78960446186581e76,2:0,3:-5.78960446186581e76}', Name("Int256")),
("(map(1,0,2,115792089237316195423570985008687907853269984665640564039457584007913129639935))", '{1:0,2:1.157920892373162e77}', Name("UInt256")),
("(map(1,toNullable(1)))", '{1:1}', Name("toNullable")),
("(map(1,toNullable(NULL)))", '{1:NULL}', Name("toNullable(NULL)")),
])
def select_map_with_value_integer(self, map, output):
"""Create a map using select that has value integer type.
"""
select_map(map=map, output=output)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Key_Integer("1.0")
)
@Examples("map output", [
("(map(127,1,0,1,-128,1))", '{127:1,0:1,-128:1}', Name("Int8")),
("(map(0,1,255,1))", '{0:1,255:1}', Name("UInt8")),
("(map(32767,1,0,1,-32768,1))", '{32767:1,0:1,-32768:1}', Name("Int16")),
("(map(0,1,65535,1))", '{0:1,65535:1}', Name("UInt16")),
("(map(2147483647,1,0,1,-2147483648,1))", '{2147483647:1,0:1,-2147483648:1}', Name("Int32")),
("(map(0,1,4294967295,1))", '{0:1,4294967295:1}', Name("UInt32")),
("(map(9223372036854775807,1,0,1,-9223372036854775808,1))", '{"9223372036854775807":1,"0":1,"-9223372036854775808":1}', Name("Int64")),
("(map(0,1,18446744073709551615,1))", '{0:1,18446744073709551615:1}', Name("UInt64")),
("(map(170141183460469231731687303715884105727,1,0,1,-170141183460469231731687303715884105728,1))", '{1.7014118346046923e38:1,0:1,-1.7014118346046923e38:1}', Name("Int128")),
("(map(57896044618658097711785492504343953926634992332820282019728792003956564819967,1,0,1,-57896044618658097711785492504343953926634992332820282019728792003956564819968,1))", '{5.78960446186581e76:1,0:1,-5.78960446186581e76:1}', Name("Int256")),
("(map(0,1,115792089237316195423570985008687907853269984665640564039457584007913129639935,1))", '{0:1,1.157920892373162e77:1}', Name("UInt256")),
("(map(toNullable(1),1))", '{1:1}', Name("toNullable")),
("(map(toNullable(NULL),1))", '{NULL:1}', Name("toNullable(NULL)")),
])
def select_map_with_key_integer(self, map, output):
"""Create a map using select that has key integer type.
"""
select_map(map=map, output=output)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Key_String("1.0")
)
@Examples("type data output", [
("Map(String, Int8)", "('2020-01-01', map('',1))", '{"d":"2020-01-01","m":{"":1}}', Name("empty string")),
("Map(String, Int8)", "('2020-01-01', map('hello',1))", '{"d":"2020-01-01","m":{"hello":1}}', Name("non-empty string")),
("Map(String, Int8)", "('2020-01-01', map('Gãńdåłf_Thê_Gręât',1))", '{"d":"2020-01-01","m":{"Gãńdåłf_Thê_Gręât":1}}', Name("utf-8 string")),
("Map(String, Int8)", "('2020-01-01', map('hello there',1))", '{"d":"2020-01-01","m":{"hello there":1}}', Name("multi word string")),
("Map(String, Int8)", "('2020-01-01', map('hello',1,'there',2))", '{"d":"2020-01-01","m":{"hello":1,"there":2}}', Name("multiple keys")),
("Map(String, Int8)", "('2020-01-01', map(toString(1),1))", '{"d":"2020-01-01","m":{"1":1}}', Name("toString")),
("Map(FixedString(1), Int8)", "('2020-01-01', map(toFixedString('1',1),1))", '{"d":"2020-01-01","m":{"1":1}}', Name("FixedString")),
("Map(Nullable(String), Int8)", "('2020-01-01', map(toNullable('1'),1))", '{"d":"2020-01-01","m":{"1":1}}', Name("Nullable")),
("Map(Nullable(String), Int8)", "('2020-01-01', map(toNullable(NULL),1))", '{"d":"2020-01-01","m":{null:1}}', Name("Nullable(NULL)")),
("Map(LowCardinality(String), Int8)", "('2020-01-01', map(toLowCardinality('1'),1))", '{"d":"2020-01-01","m":{"1":1}}', Name("LowCardinality(String)")),
("Map(LowCardinality(String), Int8)", "('2020-01-01', map('1',1))", '{"d":"2020-01-01","m":{"1":1}}', Name("LowCardinality(String) cast from String")),
("Map(LowCardinality(String), LowCardinality(String))", "('2020-01-01', map('1','1'))", '{"d":"2020-01-01","m":{"1":"1"}}', Name("LowCardinality(String) for key and value")),
("Map(LowCardinality(FixedString(1)), Int8)", "('2020-01-01', map(toLowCardinality(toFixedString('1',1)),1))", '{"d":"2020-01-01","m":{"1":1}}', Name("LowCardinality(FixedString)")),
])
def table_map_with_key_string(self, type, data, output):
"""Check what values we can insert into map type column with key string.
"""
insert_into_table(type=type, data=data, output=output)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Key_String("1.0")
)
@Examples("type data output select", [
("Map(String, Int8)", "('2020-01-01', map('',1))", '{"m":1}', "m[''] AS m", Name("empty string")),
("Map(String, Int8)", "('2020-01-01', map('hello',1))", '{"m":1}', "m['hello'] AS m", Name("non-empty string")),
("Map(String, Int8)", "('2020-01-01', map('Gãńdåłf_Thê_Gręât',1))", '{"m":1}', "m['Gãńdåłf_Thê_Gręât'] AS m", Name("utf-8 string")),
("Map(String, Int8)", "('2020-01-01', map('hello there',1))", '{"m":1}', "m['hello there'] AS m", Name("multi word string")),
("Map(String, Int8)", "('2020-01-01', map('hello',1,'there',2))", '{"m":1}', "m['hello'] AS m", Name("multiple keys")),
("Map(String, Int8)", "('2020-01-01', map(toString(1),1))", '{"m":1}', "m['1'] AS m", Name("toString")),
("Map(FixedString(1), Int8)", "('2020-01-01', map(toFixedString('1',1),1))", '{"m":1}', "m['1'] AS m", Name("FixedString")),
("Map(Nullable(String), Int8)", "('2020-01-01', map(toNullable('1'),1))", '{"m":1}}', "m['1'] AS m", Name("Nullable")),
("Map(Nullable(String), Int8)", "('2020-01-01', map(toNullable(NULL),1))", '{"m":1}', "m[null] AS m", Name("Nullable(NULL)")),
("Map(LowCardinality(String), Int8)", "('2020-01-01', map(toLowCardinality('1'),1))", '{"m":1}}', "m['1'] AS m", Name("LowCardinality(String)")),
("Map(LowCardinality(String), Int8)", "('2020-01-01', map('1',1))", '{"m":1}', "m['1'] AS m", Name("LowCardinality(String) cast from String")),
("Map(LowCardinality(String), LowCardinality(String))", "('2020-01-01', map('1','1'))", '{"m":"1"}', "m['1'] AS m", Name("LowCardinality(String) for key and value")),
("Map(LowCardinality(FixedString(1)), Int8)", "('2020-01-01', map(toLowCardinality(toFixedString('1',1)),1))", '{"m":1}', "m['1'] AS m", Name("LowCardinality(FixedString)")),
])
def table_map_select_key_with_key_string(self, type, data, output, select):
"""Check what values we can insert into map type column with key string and if key can be selected.
"""
insert_into_table(type=type, data=data, output=output, select=select)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Value_String("1.0")
)
@Examples("type data output", [
("Map(String, String)", "('2020-01-01', map('key',''))", '{"d":"2020-01-01","m":{"key":""}}', Name("empty string")),
("Map(String, String)", "('2020-01-01', map('key','hello'))", '{"d":"2020-01-01","m":{"key":"hello"}}', Name("non-empty string")),
("Map(String, String)", "('2020-01-01', map('key','Gãńdåłf_Thê_Gręât'))", '{"d":"2020-01-01","m":{"key":"Gãńdåłf_Thê_Gręât"}}', Name("utf-8 string")),
("Map(String, String)", "('2020-01-01', map('key', 'hello there'))", '{"d":"2020-01-01","m":{"key":"hello there"}}', Name("multi word string")),
("Map(String, String)", "('2020-01-01', map('key','hello','key2','there'))", '{"d":"2020-01-01","m":{"key":"hello","key2":"there"}}', Name("multiple keys")),
("Map(String, String)", "('2020-01-01', map('key', toString(1)))", '{"d":"2020-01-01","m":{"key":"1"}}', Name("toString")),
("Map(String, FixedString(1))", "('2020-01-01', map('key',toFixedString('1',1)))", '{"d":"2020-01-01","m":{"key":"1"}}', Name("FixedString")),
("Map(String, Nullable(String))", "('2020-01-01', map('key',toNullable('1')))", '{"d":"2020-01-01","m":{"key":"1"}}', Name("Nullable")),
("Map(String, Nullable(String))", "('2020-01-01', map('key',toNullable(NULL)))", '{"d":"2020-01-01","m":{"key":null}}', Name("Nullable(NULL)")),
("Map(String, LowCardinality(String))", "('2020-01-01', map('key',toLowCardinality('1')))", '{"d":"2020-01-01","m":{"key":"1"}}', Name("LowCardinality(String)")),
("Map(String, LowCardinality(String))", "('2020-01-01', map('key','1'))", '{"d":"2020-01-01","m":{"key":"1"}}', Name("LowCardinality(String) cast from String")),
("Map(LowCardinality(String), LowCardinality(String))", "('2020-01-01', map('1','1'))", '{"d":"2020-01-01","m":{"1":"1"}}', Name("LowCardinality(String) for key and value")),
("Map(String, LowCardinality(FixedString(1)))", "('2020-01-01', map('key',toLowCardinality(toFixedString('1',1))))", '{"d":"2020-01-01","m":{"key":"1"}}', Name("LowCardinality(FixedString)"))
])
def table_map_with_value_string(self, type, data, output):
"""Check what values we can insert into map type column with value string.
"""
insert_into_table(type=type, data=data, output=output)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Value_String("1.0")
)
@Examples("type data output", [
("Map(String, String)", "('2020-01-01', map('key',''))", '{"m":""}', Name("empty string")),
("Map(String, String)", "('2020-01-01', map('key','hello'))", '{"m":"hello"}', Name("non-empty string")),
("Map(String, String)", "('2020-01-01', map('key','Gãńdåłf_Thê_Gręât'))", '{"m":"Gãńdåłf_Thê_Gręât"}', Name("utf-8 string")),
("Map(String, String)", "('2020-01-01', map('key', 'hello there'))", '{"m":"hello there"}', Name("multi word string")),
("Map(String, String)", "('2020-01-01', map('key','hello','key2','there'))", '{"m":"hello"}', Name("multiple keys")),
("Map(String, String)", "('2020-01-01', map('key', toString(1)))", '{"m":"1"}', Name("toString")),
("Map(String, FixedString(1))", "('2020-01-01', map('key',toFixedString('1',1)))", '{"m":"1"}', Name("FixedString")),
("Map(String, Nullable(String))", "('2020-01-01', map('key',toNullable('1')))", '{"m":"1"}', Name("Nullable")),
("Map(String, Nullable(String))", "('2020-01-01', map('key',toNullable(NULL)))", '{"m":null}', Name("Nullable(NULL)")),
("Map(String, LowCardinality(String))", "('2020-01-01', map('key',toLowCardinality('1')))", '{"m":"1"}', Name("LowCardinality(String)")),
("Map(String, LowCardinality(String))", "('2020-01-01', map('key','1'))", '{"m":"1"}', Name("LowCardinality(String) cast from String")),
("Map(LowCardinality(String), LowCardinality(String))", "('2020-01-01', map('key','1'))", '{"m":"1"}', Name("LowCardinality(String) for key and value")),
("Map(String, LowCardinality(FixedString(1)))", "('2020-01-01', map('key',toLowCardinality(toFixedString('1',1))))", '{"m":"1"}', Name("LowCardinality(FixedString)"))
])
def table_map_select_key_with_value_string(self, type, data, output):
"""Check what values we can insert into map type column with value string and if it can be selected by key.
"""
insert_into_table(type=type, data=data, output=output, select="m['key'] AS m")
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Value_Integer("1.0")
)
@Examples("type data output", [
("Map(Int8, Int8)", "('2020-01-01', map(1,127,2,0,3,-128))", '{"d":"2020-01-01","m":{"1":127,"2":0,"3":-128}}', Name("Int8")),
("Map(Int8, UInt8)", "('2020-01-01', map(1,0,2,255))", '{"d":"2020-01-01","m":{"1":0,"2":255}}', Name("UInt8")),
("Map(Int8, Int16)", "('2020-01-01', map(1,127,2,0,3,-128))", '{"d":"2020-01-01","m":{"1":32767,"2":0,"3":-32768}}', Name("Int16")),
("Map(Int8, UInt16)", "('2020-01-01', map(1,0,2,65535))", '{"d":"2020-01-01","m":{"1":0,"2":65535}}', Name("UInt16")),
("Map(Int8, Int32)", "('2020-01-01', map(1,127,2,0,3,-128))", '{"d":"2020-01-01","m":{"1":2147483647,"2":0,"3":-2147483648}}', Name("Int32")),
("Map(Int8, UInt32)", "('2020-01-01', map(1,0,2,4294967295))", '{"d":"2020-01-01","m":{"1":0,"2":4294967295}}', Name("UInt32")),
("Map(Int8, Int64)", "('2020-01-01', map(1,9223372036854775807,2,0,3,-9223372036854775808))", '{"d":"2020-01-01","m":{1:"9223372036854775807",2:"0",3:"-9223372036854775808"}}', Name("Int64")),
("Map(Int8, UInt64)", "('2020-01-01', map(1,0,2,18446744073709551615))", '{"d":"2020-01-01","m":{1:"0",2:"18446744073709551615"}}', Name("UInt64")),
("Map(Int8, Int128)", "('2020-01-01', map(1,170141183460469231731687303715884105727,2,0,3,-170141183460469231731687303715884105728))", '{"d":"2020-01-01","m":{1:"170141183460469231731687303715884105727",2:"0",3:"-170141183460469231731687303715884105728"}}', Name("Int128")),
("Map(Int8, Int256)", "('2020-01-01', map(1,57896044618658097711785492504343953926634992332820282019728792003956564819967,2,0,3,-57896044618658097711785492504343953926634992332820282019728792003956564819968))", '{"d":"2020-01-01","m":{1:"57896044618658097711785492504343953926634992332820282019728792003956564819967",2:"0",3:"-57896044618658097711785492504343953926634992332820282019728792003956564819968"}}', Name("Int256")),
("Map(Int8, UInt256)", "('2020-01-01', map(1,0,2,115792089237316195423570985008687907853269984665640564039457584007913129639935))", '{"d":"2020-01-01","m":{1:"0",2:"115792089237316195423570985008687907853269984665640564039457584007913129639935"}}', Name("UInt256")),
("Map(Int8, Nullable(Int8))", "('2020-01-01', map(1,toNullable(1)))", '{"d":"2020-01-01","m":{"1":1}}', Name("toNullable")),
("Map(Int8, Nullable(Int8))", "('2020-01-01', map(1,toNullable(NULL)))", '{"d":"2020-01-01","m":{"1":null}}', Name("toNullable(NULL)")),
])
def table_map_with_value_integer(self, type, data, output):
"""Check what values we can insert into map type column with value integer.
"""
insert_into_table(type=type, data=data, output=output)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Value_Array("1.0")
)
@Examples("type data output", [
("Map(String, Array(Int8))", "('2020-01-01', map('key',[]))", '{"d":"2020-01-01","m":{"key":[]}}', Name("empty array")),
("Map(String, Array(Int8))", "('2020-01-01', map('key',[1,2,3]))", '{"d":"2020-01-01","m":{"key":[1,2,3]}}', Name("non-empty array of ints")),
("Map(String, Array(String))", "('2020-01-01', map('key',['1','2','3']))", '{"d":"2020-01-01","m":{"key":["1","2","3"]}}', Name("non-empty array of strings")),
("Map(String, Array(Map(Int8, Int8)))", "('2020-01-01', map('key',[map(1,2),map(2,3)]))", '{"d":"2020-01-01","m":{"key":[{"1":2},{"2":3}]}}', Name("non-empty array of maps")),
("Map(String, Array(Map(Int8, Array(Map(Int8, Array(Int8))))))", "('2020-01-01', map('key',[map(1,[map(1,[1])]),map(2,[map(2,[3])])]))", '{"d":"2020-01-01","m":{"key":[{"1":[{"1":[1]}]},{"2":[{"2":[3]}]}]}}', Name("non-empty array of maps of array of maps")),
])
def table_map_with_value_array(self, type, data, output):
"""Check what values we can insert into map type column with value Array.
"""
insert_into_table(type=type, data=data, output=output)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Key_Integer("1.0")
)
@Examples("type data output", [
("Map(Int8, Int8)", "('2020-01-01', map(127,1,0,1,-128,1))", '{"d":"2020-01-01","m":{"127":1,"0":1,"-128":1}}', Name("Int8")),
("Map(UInt8, Int8)", "('2020-01-01', map(0,1,255,1))", '{"d":"2020-01-01","m":{"0":1,"255":1}}', Name("UInt8")),
("Map(Int16, Int8)", "('2020-01-01', map(127,1,0,1,-128,1))", '{"d":"2020-01-01","m":{"32767":1,"0":1,"-32768":1}}', Name("Int16")),
("Map(UInt16, Int8)", "('2020-01-01', map(0,1,65535,1))", '{"d":"2020-01-01","m":{"0":1,"65535":1}}', Name("UInt16")),
("Map(Int32, Int8)", "('2020-01-01', map(2147483647,1,0,1,-2147483648,1))", '{"d":"2020-01-01","m":{"2147483647":1,"0":1,"-2147483648":1}}', Name("Int32")),
("Map(UInt32, Int8)", "('2020-01-01', map(0,1,4294967295,1))", '{"d":"2020-01-01","m":{"0":1,"4294967295":1}}', Name("UInt32")),
("Map(Int64, Int8)", "('2020-01-01', map(9223372036854775807,1,0,1,-9223372036854775808,1))", '{"d":"2020-01-01","m":{"9223372036854775807":1,"0":1,"-9223372036854775808":1}}', Name("Int64")),
("Map(UInt64, Int8)", "('2020-01-01', map(0,1,18446744073709551615,1))", '{"d":"2020-01-01","m":{"0":1,"18446744073709551615":1}}', Name("UInt64")),
("Map(Int128, Int8)", "('2020-01-01', map(170141183460469231731687303715884105727,1,0,1,-170141183460469231731687303715884105728,1))", '{"d":"2020-01-01","m":{170141183460469231731687303715884105727:1,0:1,"-170141183460469231731687303715884105728":1}}', Name("Int128")),
("Map(Int256, Int8)", "('2020-01-01', map(57896044618658097711785492504343953926634992332820282019728792003956564819967,1,0,1,-57896044618658097711785492504343953926634992332820282019728792003956564819968,1))", '{"d":"2020-01-01","m":{"57896044618658097711785492504343953926634992332820282019728792003956564819967":1,"0":1,"-57896044618658097711785492504343953926634992332820282019728792003956564819968":1}}', Name("Int256")),
("Map(UInt256, Int8)", "('2020-01-01', map(0,1,115792089237316195423570985008687907853269984665640564039457584007913129639935,1))", '{"d":"2020-01-01","m":{"0":1,"115792089237316195423570985008687907853269984665640564039457584007913129639935":1}}', Name("UInt256")),
("Map(Nullable(Int8), Int8)", "('2020-01-01', map(toNullable(1),1))", '{"d":"2020-01-01","m":{1:1}}', Name("toNullable")),
("Map(Nullable(Int8), Int8)", "('2020-01-01', map(toNullable(NULL),1))", '{"d":"2020-01-01","m":{null:1}}', Name("toNullable(NULL)")),
])
def table_map_with_key_integer(self, type, data, output):
"""Check what values we can insert into map type column with key integer.
"""
insert_into_table(type=type, data=data, output=output)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Key_Integer("1.0")
)
@Examples("type data output select", [
("Map(Int8, Int8)", "('2020-01-01', map(127,1,0,1,-128,1))", '{"m":1}', "m[127] AS m", Name("Int8")),
("Map(UInt8, Int8)", "('2020-01-01', map(0,1,255,1))", '{"m":2}', "(m[255] + m[0]) AS m", Name("UInt8")),
("Map(Int16, Int8)", "('2020-01-01', map(127,1,0,1,-128,1))", '{"m":3}', "(m[-128] + m[0] + m[-128]) AS m", Name("Int16")),
("Map(UInt16, Int8)", "('2020-01-01', map(0,1,65535,1))", '{"m":2}', "(m[0] + m[65535]) AS m", Name("UInt16")),
("Map(Int32, Int8)", "('2020-01-01', map(2147483647,1,0,1,-2147483648,1))", '{"m":3}', "(m[2147483647] + m[0] + m[-2147483648]) AS m", Name("Int32")),
("Map(UInt32, Int8)", "('2020-01-01', map(0,1,4294967295,1))", '{"m":2}', "(m[0] + m[4294967295]) AS m", Name("UInt32")),
("Map(Int64, Int8)", "('2020-01-01', map(9223372036854775807,1,0,1,-9223372036854775808,1))", '{"m":3}', "(m[9223372036854775807] + m[0] + m[-9223372036854775808]) AS m", Name("Int64")),
("Map(UInt64, Int8)", "('2020-01-01', map(0,1,18446744073709551615,1))", '{"m":2}', "(m[0] + m[18446744073709551615]) AS m", Name("UInt64")),
("Map(Int128, Int8)", "('2020-01-01', map(170141183460469231731687303715884105727,1,0,1,-170141183460469231731687303715884105728,1))", '{"m":3}', "(m[170141183460469231731687303715884105727] + m[0] + m[-170141183460469231731687303715884105728]) AS m", Name("Int128")),
("Map(Int256, Int8)", "('2020-01-01', map(57896044618658097711785492504343953926634992332820282019728792003956564819967,1,0,1,-57896044618658097711785492504343953926634992332820282019728792003956564819968,1))", '{"m":3}', "(m[57896044618658097711785492504343953926634992332820282019728792003956564819967] + m[0] + m[-57896044618658097711785492504343953926634992332820282019728792003956564819968]) AS m", Name("Int256")),
("Map(UInt256, Int8)", "('2020-01-01', map(0,1,115792089237316195423570985008687907853269984665640564039457584007913129639935,1))", '{"m":2}', "(m[0] + m[115792089237316195423570985008687907853269984665640564039457584007913129639935]) AS m", Name("UInt256")),
("Map(Nullable(Int8), Int8)", "('2020-01-01', map(toNullable(1),1))", '{"m":1}', "m[1] AS m", Name("toNullable")),
("Map(Nullable(Int8), Int8)", "('2020-01-01', map(toNullable(NULL),1))", '{"m":1}', "m[null] AS m", Name("toNullable(NULL)")),
])
def table_map_select_key_with_key_integer(self, type, data, output, select):
"""Check what values we can insert into map type column with key integer and if we can use the key to select the value.
"""
insert_into_table(type=type, data=data, output=output, select=select)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_ArrayOfMaps("1.0"),
RQ_SRS_018_ClickHouse_Map_DataType_NestedWithMaps("1.0")
)
@Examples("type data output partition_by", [
("Array(Map(String, Int8))",
"('2020-01-01', [map('hello',1),map('hello',1,'there',2)])",
'{"d":"2020-01-01","m":[{"hello":1},{"hello":1,"there":2}]}',
"m",
Name("Array(Map(String, Int8))")),
("Nested(x Map(String, Int8))",
"('2020-01-01', [map('hello',1)])",
'{"d":"2020-01-01","m.x":[{"hello":1}]}',
"m.x",
Name("Nested(x Map(String, Int8)"))
])
def table_with_map_inside_another_type(self, type, data, output, partition_by):
"""Check what values we can insert into a type that has map type.
"""
insert_into_table(type=type, data=data, output=output, partition_by=partition_by)
@TestOutline
def insert_into_table(self, type, data, output, partition_by="m", select="*"):
"""Check we can insert data into a table.
"""
uid = getuid()
node = self.context.node
with Given(f"table definition with {type}"):
sql = "CREATE TABLE {name} (d DATE, m " + type + ") ENGINE = MergeTree() PARTITION BY " + partition_by + " ORDER BY d"
with Given(f"I create a table", description=sql):
table = create_table(name=uid, statement=sql)
with When("I insert data", description=data):
sql = f"INSERT INTO {table} VALUES {data}"
node.query(sql)
with And("I select rows from the table"):
r = node.query(f"SELECT {select} FROM {table} FORMAT JSONEachRow")
with Then("I expect output to match", description=output):
assert r.output == output, error()
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_MixedKeyOrValueTypes("1.0")
)
def select_map_with_invalid_mixed_key_and_value_types(self):
"""Check that creating a map with mixed key types fails.
"""
node = self.context.node
exitcode = 130
message = "DB::Exception: There is no supertype for types String, UInt8 because some of them are String/FixedString and some of them are not"
with Check("attempt to create a map using SELECT with mixed key types then it fails"):
node.query("SELECT map('hello',1,2,3)", exitcode=exitcode, message=message)
with Check("attempt to create a map using SELECT with mixed value types then it fails"):
node.query("SELECT map(1,'hello',2,2)", exitcode=exitcode, message=message)
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_InvalidNumberOfArguments("1.0")
)
def select_map_with_invalid_number_of_arguments(self):
"""Check that creating a map with invalid number of arguments fails.
"""
node = self.context.node
exitcode = 42
message = "DB::Exception: Function map requires even number of arguments"
with When("I create a map using SELECT with invalid number of arguments"):
node.query("SELECT map(1,2,3)", exitcode=exitcode, message=message)
@TestScenario
def select_map_empty(self):
"""Check that we can can create a empty map by not passing any arguments.
"""
node = self.context.node
with When("I create a map using SELECT with no arguments"):
r = node.query("SELECT map()")
with Then("it should create an empty map"):
assert r.output == "{}", error()
@TestScenario
def insert_invalid_mixed_key_and_value_types(self):
"""Check that inserting a map with mixed key or value types fails.
"""
uid = getuid()
node = self.context.node
exitcode = 130
message = "DB::Exception: There is no supertype for types String, UInt8 because some of them are String/FixedString and some of them are not"
with Given(f"table definition with {type}"):
sql = "CREATE TABLE {name} (d DATE, m Map(String, Int8)) ENGINE = MergeTree() PARTITION BY m ORDER BY d"
with And(f"I create a table", description=sql):
table = create_table(name=uid, statement=sql)
with When("I insert a map with mixed key types then it should fail"):
sql = f"INSERT INTO {table} VALUES ('2020-01-01', map('hello',1,2,3))"
node.query(sql, exitcode=exitcode, message=message)
with When("I insert a map with mixed value types then it should fail"):
sql = f"INSERT INTO {table} VALUES ('2020-01-01', map(1,'hello',2,2))"
node.query(sql, exitcode=exitcode, message=message)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_DuplicatedKeys("1.0")
)
@Examples("type data output", [
("Map(String, String)",
"('2020-01-01', map('hello','there','hello','over there'))",
'{"d":"2020-01-01","m":{"hello":"there","hello":"over there"}}',
Name("Map(String, String))")),
("Map(Int64, String)",
"('2020-01-01', map(12345,'there',12345,'over there'))",
'{"d":"2020-01-01","m":{"12345":"there","12345":"over there"}}',
Name("Map(Int64, String))")),
])
def table_map_with_duplicated_keys(self, type, data, output):
"""Check that map supports duplicated keys.
"""
insert_into_table(type=type, data=data, output=output)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_DuplicatedKeys("1.0")
)
@Examples("map output", [
("map('hello','there','hello','over there')", "{'hello':'there','hello':'over there'}", Name("String")),
("map(12345,'there',12345,'over there')", "{12345:'there',12345:'over there'}", Name("Integer"))
])
def select_map_with_duplicated_keys(self, map, output):
"""Check creating a map with duplicated keys.
"""
select_map(map=map, output=output)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval_KeyNotFound("1.0")
)
def select_map_key_not_found(self):
node = self.context.node
with When("map is empty"):
node.query("SELECT map() AS m, m[1]", exitcode=43, message="DB::Exception: Illegal types of arguments")
with When("map has integer values"):
r = node.query("SELECT map(1,2) AS m, m[2] FORMAT Values")
with Then("zero should be returned for key that is not found"):
assert r.output == "({1:2},0)", error()
with When("map has string values"):
r = node.query("SELECT map(1,'2') AS m, m[2] FORMAT Values")
with Then("empty string should be returned for key that is not found"):
assert r.output == "({1:'2'},'')", error()
with When("map has array values"):
r = node.query("SELECT map(1,[2]) AS m, m[2] FORMAT Values")
with Then("empty array be returned for key that is not found"):
assert r.output == "({1:[2]},[])", error()
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval_KeyNotFound("1.0")
)
@Examples("type data select exitcode message", [
("Map(UInt8, UInt8), y Int8", "(y) VALUES (1)", "m[1] AS v", 0, '{"v":0}', Name("empty map")),
("Map(UInt8, UInt8)", "VALUES (map(1,2))", "m[2] AS v", 0, '{"v":0}', Name("map has integer values")),
("Map(UInt8, String)", "VALUES (map(1,'2'))", "m[2] AS v", 0, '{"v":""}', Name("map has string values")),
("Map(UInt8, Array(Int8))", "VALUES (map(1,[2]))", "m[2] AS v", 0, '{"v":[]}', Name("map has array values")),
])
def table_map_key_not_found(self, type, data, select, exitcode, message, order_by=None):
"""Check values returned from a map column when key is not found.
"""
uid = getuid()
node = self.context.node
if order_by is None:
order_by = "m"
with Given(f"table definition with {type}"):
sql = "CREATE TABLE {name} (m " + type + ") ENGINE = MergeTree() ORDER BY " + order_by
with And(f"I create a table", description=sql):
table = create_table(name=uid, statement=sql)
with When("I insert data into the map column"):
node.query(f"INSERT INTO {table} {data}")
with And("I try to read from the table"):
node.query(f"SELECT {select} FROM {table} FORMAT JSONEachRow", exitcode=exitcode, message=message)
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval_KeyInvalid("1.0")
)
def invalid_key(self):
"""Check when key is not valid.
"""
node = self.context.node
with When("I try to use an integer key that is too large"):
node.query("SELECT map(1,2) AS m, m[256]", exitcode=43, message="DB::Exception: Illegal types of arguments")
with When("I try to use an integer key that is negative when key is unsigned"):
node.query("SELECT map(1,2) AS m, m[-1]", exitcode=43, message="DB::Exception: Illegal types of arguments")
with When("I try to use a string key when key is an integer"):
node.query("SELECT map(1,2) AS m, m['1']", exitcode=43, message="DB::Exception: Illegal types of arguments")
with When("I try to use an integer key when key is a string"):
r = node.query("SELECT map('1',2) AS m, m[1]", exitcode=43, message="DB::Exception: Illegal types of arguments")
with When("I try to use an empty key when key is a string"):
r = node.query("SELECT map('1',2) AS m, m[]", exitcode=62, message="DB::Exception: Syntax error: failed at position")
with When("I try to use wrong type conversion in key"):
r = node.query("SELECT map(1,2) AS m, m[toInt8('1')]", exitcode=43, message="DB::Exception: Illegal types of arguments")
with When("in array of maps I try to use an integer key that is negative when key is unsigned"):
node.query("SELECT [map(1,2)] AS m, m[1][-1]", exitcode=43, message="DB::Exception: Illegal types of arguments")
with When("I try to use a NULL key when key is not nullable"):
r = node.query("SELECT map(1,2) AS m, m[NULL] FORMAT Values")
with Then("it should return NULL"):
assert r.output == "({1:2},NULL)", error()
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval_KeyInvalid("1.0")
)
@Examples("type data select exitcode message order_by", [
("Map(UInt8, UInt8)", "(map(1,2))", "m[256] AS v", 0, '{"v":0}', "m", Name("key too large)")),
("Map(UInt8, UInt8)", "(map(1,2))", "m[-1] AS v", 0, '{"v":0}', "m", Name("key is negative")),
("Map(UInt8, UInt8)", "(map(1,2))", "m['1'] AS v", 43, "DB::Exception: Illegal types of arguments", "m", Name("string when key is integer")),
("Map(String, UInt8)", "(map('1',2))", "m[1] AS v", 43, "DB::Exception: Illegal types of arguments", "m", Name("integer when key is string")),
("Map(String, UInt8)", "(map('1',2))", "m[] AS v", 62, "DB::Exception: Syntax error: failed at position", "m", Name("empty when key is string")),
("Map(UInt8, UInt8)", "(map(1,2))", "m[toInt8('1')] AS v", 0, '{"v":2}', "m", Name("wrong type conversion when key is integer")),
("Map(String, UInt8)", "(map('1',2))", "m[toFixedString('1',1)] AS v", 0, '{"v":2}', "m", Name("wrong type conversion when key is string")),
("Map(UInt8, UInt8)", "(map(1,2))", "m[NULL] AS v", 0, '{"v":null}', "m", Name("NULL key when key is not nullable")),
("Array(Map(UInt8, UInt8))", "([map(1,2)])", "m[1]['1'] AS v", 43, "DB::Exception: Illegal types of arguments", "m", Name("string when key is integer in array of maps")),
("Nested(x Map(UInt8, UInt8))", "([map(1,2)])", "m.x[1]['1'] AS v", 43, "DB::Exception: Illegal types of arguments", "m.x", Name("string when key is integer in nested map")),
])
def table_map_invalid_key(self, type, data, select, exitcode, message, order_by="m"):
"""Check selecting values from a map column using an invalid key.
"""
uid = getuid()
node = self.context.node
with Given(f"table definition with {type}"):
sql = "CREATE TABLE {name} (m " + type + ") ENGINE = MergeTree() ORDER BY " + order_by
with And(f"I create a table", description=sql):
table = create_table(name=uid, statement=sql)
with When("I insert data into the map column"):
node.query(f"INSERT INTO {table} VALUES {data}")
with And("I try to read from the table"):
node.query(f"SELECT {select} FROM {table} FORMAT JSONEachRow", exitcode=exitcode, message=message)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval("1.0")
)
@Examples("type data select filter exitcode message order_by", [
("Map(UInt8, UInt8)", "(map(1,1)),(map(1,2)),(map(2,3))", "m[1] AS v", "1=1 ORDER BY m[1]", 0, '{"v":0}\n{"v":1}\n{"v":2}', None,
Name("select the same key from all the rows")),
("Map(String, String)", "(map('a','b')),(map('c','d','e','f')),(map('e','f'))", "m", "m = map('e','f','c','d')", 0, '', None,
Name("filter rows by map having different pair order")),
("Map(String, String)", "(map('a','b')),(map('c','d','e','f')),(map('e','f'))", "m", "m = map('c','d','e','f')", 0, '{"m":{"c":"d","e":"f"}}', None,
Name("filter rows by map having the same pair order")),
("Map(String, String)", "(map('a','b')),(map('e','f'))", "m", "m = map()", 0, '', None,
Name("filter rows by empty map")),
("Map(String, Int8)", "(map('a',1,'b',2)),(map('a',2)),(map('b',3))", "m", "m['a'] = 1", 0, '{"m":{"a":1,"b":2}}', None,
Name("filter rows by map key value")),
("Map(String, Int8)", "(map('a',1,'b',2)),(map('a',2)),(map('b',3))", "m", "m['a'] = 1 AND m['b'] = 2", 0, '{"m":{"a":1,"b":2}}', None,
Name("filter rows by map multiple key value combined with AND")),
("Map(String, Int8)", "(map('a',1,'b',2)),(map('a',2)),(map('b',3))", "m", "m['a'] = 1 OR m['b'] = 3", 0, '{"m":{"a":1,"b":2}}\n{"m":{"b":3}}', None,
Name("filter rows by map multiple key value combined with OR")),
("Map(String, Array(Int8))", "(map('a',[])),(map('b',[1])),(map('c',[2]))", "m['b'] AS v", "m['b'] IN ([1],[2])", 0, '{"v":[1]}', None,
Name("filter rows by map array value using IN")),
("Map(String, Nullable(String))", "(map('a',NULL)),(map('a',1))", "m", "isNull(m['a']) = 1", 0, '{"m":{"a":null}}', None,
Name("select map with nullable value"))
])
def table_map_queries(self, type, data, select, filter, exitcode, message, order_by=None):
"""Check retrieving map values and using maps in queries.
"""
uid = getuid()
node = self.context.node
if order_by is None:
order_by = "m"
with Given(f"table definition with {type}"):
sql = "CREATE TABLE {name} (m " + type + ") ENGINE = MergeTree() ORDER BY " + order_by
with And(f"I create a table", description=sql):
table = create_table(name=uid, statement=sql)
with When("I insert data into the map column"):
node.query(f"INSERT INTO {table} VALUES {data}")
with And("I try to read from the table"):
node.query(f"SELECT {select} FROM {table} WHERE {filter} FORMAT JSONEachRow", exitcode=exitcode, message=message)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Invalid_Nullable("1.0"),
RQ_SRS_018_ClickHouse_Map_DataType_Invalid_NothingNothing("1.0")
)
@Examples("type exitcode message", [
("Nullable(Map(String, String))",
43, "DB::Exception: Nested type Map(String,String) cannot be inside Nullable type",
Name("nullable map")),
("Map(Nothing, Nothing)",
37, "DB::Exception: Column `m` with type Map(Nothing,Nothing) is not allowed in key expression, it's not comparable",
Name("map with nothing type for key and value"))
])
def table_map_unsupported_types(self, type, exitcode, message):
"""Check creating a table with unsupported map column types.
"""
uid = getuid()
node = self.context.node
try:
with When(f"I create a table definition with {type}"):
sql = f"CREATE TABLE {uid} (m " + type + ") ENGINE = MergeTree() ORDER BY m"
node.query(sql, exitcode=exitcode, message=message)
finally:
with Finally("drop table if any"):
node.query(f"DROP TABLE IF EXISTS {uid}")
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_TupleOfArraysToMap("1.0"),
RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_TupleOfArraysMap_Invalid("1.0")
)
@Examples("tuple type exitcode message", [
("([1, 2, 3], ['Ready', 'Steady', 'Go'])", "Map(UInt8, String)",
0, "{1:'Ready',2:'Steady',3:'Go'}", Name("int -> int")),
("([1, 2, 3], ['Ready', 'Steady', 'Go'])", "Map(String, String)",
0, "{'1':'Ready','2':'Steady','3':'Go'}", Name("int -> string")),
("(['1', '2', '3'], ['Ready', 'Steady', 'Go'])", "Map(UInt8, String)",
0, "{1:'Ready',187:'Steady',143:'Go'}", Name("string -> int")),
("([],[])", "Map(String, String)",
0, "{}", Name("empty arrays to map str:str")),
("([],[])", "Map(UInt8, Array(Int8))",
0, "{}", Name("empty arrays to map uint8:array")),
("([[1]],['hello'])", "Map(String, String)",
0, "{'[1]':'hello'}", Name("array -> string")),
("([(1,2),(3,4)])", "Map(UInt8, UInt8)",
0, "{1:2,3:4}", Name("array of two tuples")),
("([1, 2], ['Ready', 'Steady', 'Go'])", "Map(UInt8, String)",
53, "DB::Exception: CAST AS Map can only be performed from tuple of arrays with equal sizes",
Name("unequal array sizes")),
])
def cast_tuple_of_two_arrays_to_map(self, tuple, type, exitcode, message):
"""Check casting Tuple(Array, Array) to a map type.
"""
node = self.context.node
with When("I try to cast tuple", description=tuple):
node.query(f"SELECT CAST({tuple}, '{type}') AS map", exitcode=exitcode, message=message)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_TupleOfArraysToMap("1.0"),
RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_TupleOfArraysMap_Invalid("1.0")
)
@Examples("tuple type exitcode message check_insert", [
("(([1, 2, 3], ['Ready', 'Steady', 'Go']))", "Map(UInt8, String)",
0, '{"m":{"1":"Ready","2":"Steady","3":"Go"}}', False, Name("int -> int")),
("(([1, 2, 3], ['Ready', 'Steady', 'Go']))", "Map(String, String)",
0, '{"m":{"1":"Ready","2":"Steady","3":"Go"}}', False, Name("int -> string")),
("((['1', '2', '3'], ['Ready', 'Steady', 'Go']))", "Map(UInt8, String)",
0, '', True, Name("string -> int")),
("(([],[]))", "Map(String, String)",
0, '{"m":{}}', False, Name("empty arrays to map str:str")),
("(([],[]))", "Map(UInt8, Array(Int8))",
0, '{"m":{}}', False, Name("empty arrays to map uint8:array")),
("(([[1]],['hello']))", "Map(String, String)",
53, 'DB::Exception: Type mismatch in IN or VALUES section', True, Name("array -> string")),
("(([(1,2),(3,4)]))", "Map(UInt8, UInt8)",
0, '{"m":{"1":2,"3":4}}', False, Name("array of two tuples")),
("(([1, 2], ['Ready', 'Steady', 'Go']))", "Map(UInt8, String)",
53, "DB::Exception: CAST AS Map can only be performed from tuple of arrays with equal sizes", True,
Name("unequal array sizes")),
])
def table_map_cast_tuple_of_arrays_to_map(self, tuple, type, exitcode, message, check_insert):
"""Check converting Tuple(Array, Array) into map on insert into a map type column.
"""
table_map(type=type, data=tuple, select="*", filter="1=1", exitcode=exitcode, message=message, check_insert=check_insert)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_ArrayOfTuplesToMap("1.0"),
RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_ArrayOfTuplesToMap_Invalid("1.0")
)
@Examples("tuple type exitcode message", [
("([(1,2),(3,4)])", "Map(UInt8, UInt8)", 0, "{1:2,3:4}",
Name("array of two tuples")),
("([(1,2),(3)])", "Map(UInt8, UInt8)", 130,
"DB::Exception: There is no supertype for types Tuple(UInt8, UInt8), UInt8 because some of them are Tuple and some of them are not",
Name("not a tuple")),
("([(1,2),(3,)])", "Map(UInt8, UInt8)", 130,
"DB::Exception: There is no supertype for types Tuple(UInt8, UInt8), Tuple(UInt8) because Tuples have different sizes",
Name("invalid tuple")),
])
def cast_array_of_two_tuples_to_map(self, tuple, type, exitcode, message):
"""Check casting Array(Tuple(K,V)) to a map type.
"""
node = self.context.node
with When("I try to cast tuple", description=tuple):
node.query(f"SELECT CAST({tuple}, '{type}') AS map", exitcode=exitcode, message=message)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_ArrayOfTuplesToMap("1.0"),
RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_ArrayOfTuplesToMap_Invalid("1.0")
)
@Examples("tuple type exitcode message check_insert", [
("(([(1,2),(3,4)]))", "Map(UInt8, UInt8)", 0, '{"m":{"1":2,"3":4}}', False,
Name("array of two tuples")),
("(([(1,2),(3)]))", "Map(UInt8, UInt8)", 130,
"DB::Exception: There is no supertype for types Tuple(UInt8, UInt8), UInt8 because some of them are Tuple and some of them are not", True,
Name("not a tuple")),
("(([(1,2),(3,)]))", "Map(UInt8, UInt8)", 130,
"DB::Exception: There is no supertype for types Tuple(UInt8, UInt8), Tuple(UInt8) because Tuples have different sizes", True,
Name("invalid tuple")),
])
def table_map_cast_array_of_two_tuples_to_map(self, tuple, type, exitcode, message, check_insert):
"""Check converting Array(Tuple(K,V),...) into map on insert into a map type column.
"""
table_map(type=type, data=tuple, select="*", filter="1=1", exitcode=exitcode, message=message, check_insert=check_insert)
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Keys_InlineDefinedMap("1.0")
)
def subcolumns_keys_using_inline_defined_map(self):
node = self.context.node
exitcode = 47
message = "DB::Exception: Missing columns: 'c.keys'"
with When("I try to access keys sub-column using an inline defined map"):
node.query("SELECT map( 'aa', 4, '44' , 5) as c, c.keys", exitcode=exitcode, message=message)
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Values_InlineDefinedMap("1.0")
)
def subcolumns_values_using_inline_defined_map(self):
node = self.context.node
exitcode = 47
message = "DB::Exception: Missing columns: 'c.values'"
with When("I try to access values sub-column using an inline defined map"):
node.query("SELECT map( 'aa', 4, '44' , 5) as c, c.values", exitcode=exitcode, message=message)
@TestOutline(Scenario)
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Keys("1.0"),
RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Keys_ArrayFunctions("1.0"),
RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Values("1.0"),
RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Values_ArrayFunctions("1.0")
)
@Examples("type data select filter exitcode message", [
# keys
("Map(String, String)", "(map('a','b','c','d')),(map('e','f'))", "m.keys AS keys", "1=1",
0, '{"keys":["a","c"]}\n{"keys":["e"]}', Name("select keys")),
("Map(String, String)", "(map('a','b','c','d')),(map('e','f'))", "m.keys AS keys", "has(m.keys, 'e')",
0, '{"keys":["e"]}', Name("filter by using keys in an array function")),
("Map(String, String)", "(map('a','b','c','d')),(map('e','f'))", "has(m.keys, 'e') AS r", "1=1",
0, '{"r":0}\n{"r":1}', Name("column that uses keys in an array function")),
# values
("Map(String, String)", "(map('a','b','c','d')),(map('e','f'))", "m.values AS values", "1=1",
0, '{"values":["b","d"]}\n{"values":["f"]}', Name("select values")),
("Map(String, String)", "(map('a','b','c','d')),(map('e','f'))", "m.values AS values", "has(m.values, 'f')",
0, '{"values":["f"]}', Name("filter by using values in an array function")),
("Map(String, String)", "(map('a','b','c','d')),(map('e','f'))", "has(m.values, 'f') AS r", "1=1",
0, '{"r":0}\n{"r":1}', Name("column that uses values in an array function"))
])
def subcolumns(self, type, data, select, filter, exitcode, message, order_by=None):
"""Check usage of sub-columns in queries.
"""
table_map(type=type, data=data, select=select, filter=filter, exitcode=exitcode, message=message, order_by=order_by)
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Functions_Length("1.0")
)
def length(self):
"""Check usage of length function with map data type.
"""
table_map(type="Map(String, String)",
data="(map('a','b','c','d')),(map('e','f'))",
select="length(m) AS len, m",
filter="length(m) = 1",
exitcode=0, message='{"len":"1","m":{"e":"f"}}')
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Functions_Empty("1.0")
)
def empty(self):
"""Check usage of empty function with map data type.
"""
table_map(type="Map(String, String)",
data="(map('e','f'))",
select="empty(m) AS em, m",
filter="empty(m) <> 1",
exitcode=0, message='{"em":0,"m":{"e":"f"}}')
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Functions_NotEmpty("1.0")
)
def notempty(self):
"""Check usage of notEmpty function with map data type.
"""
table_map(type="Map(String, String)",
data="(map('e','f'))",
select="notEmpty(m) AS em, m",
filter="notEmpty(m) = 1",
exitcode=0, message='{"em":1,"m":{"e":"f"}}')
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_MapAdd("1.0")
)
def cast_from_mapadd(self):
"""Check converting the result of mapAdd function to a map data type.
"""
select_map(map="CAST(mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])), 'Map(Int8, Int8)')", output="{1:2,2:2}")
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_MapSubstract("1.0")
)
def cast_from_mapsubstract(self):
"""Check converting the result of mapSubstract function to a map data type.
"""
select_map(map="CAST(mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt32(2), 1])), 'Map(Int8, Int8)')", output="{1:-1,2:0}")
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_MapPopulateSeries("1.0")
)
def cast_from_mappopulateseries(self):
"""Check converting the result of mapPopulateSeries function to a map data type.
"""
select_map(map="CAST(mapPopulateSeries([1,2,4], [11,22,44], 5), 'Map(Int8, Int8)')", output="{1:11,2:22,3:0,4:44,5:0}")
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Functions_MapContains("1.0")
)
def mapcontains(self):
"""Check usages of mapContains function with map data type.
"""
node = self.context.node
with Example("key in map"):
table_map(type="Map(String, String)",
data="(map('e','f')),(map('a','b'))",
select="m",
filter="mapContains(m, 'a')",
exitcode=0, message='{"m":{"a":"b"}}')
with Example("key not in map"):
table_map(type="Map(String, String)",
data="(map('e','f')),(map('a','b'))",
select="m",
filter="NOT mapContains(m, 'a')",
exitcode=0, message='{"m":{"e":"f"}}')
with Example("null key not in map"):
table_map(type="Map(Nullable(String), String)",
data="(map('e','f')),(map('a','b'))",
select="m",
filter="mapContains(m, NULL)",
exitcode=0, message='')
with Example("null key in map"):
table_map(type="Map(Nullable(String), String)",
data="(map('e','f')),(map('a','b')),(map(NULL,'c'))",
select="m",
filter="mapContains(m, NULL)",
exitcode=0, message='{null:"c"}')
with Example("select nullable key"):
node.query("SELECT map(NULL, 1, 2, 3) AS m, mapContains(m, toNullable(toUInt8(2)))", exitcode=0, message="{2:3}")
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Functions_MapKeys("1.0")
)
def mapkeys(self):
"""Check usages of mapKeys function with map data type.
"""
with Example("key in map"):
table_map(type="Map(String, String)",
data="(map('e','f')),(map('a','b'))",
select="m",
filter="has(mapKeys(m), 'a')",
exitcode=0, message='{"m":{"a":"b"}}')
with Example("key not in map"):
table_map(type="Map(String, String)",
data="(map('e','f')),(map('a','b'))",
select="m",
filter="NOT has(mapKeys(m), 'a')",
exitcode=0, message='{"m":{"e":"f"}}')
with Example("null key not in map"):
table_map(type="Map(Nullable(String), String)",
data="(map('e','f')),(map('a','b'))",
select="m",
filter="has(mapKeys(m), NULL)",
exitcode=0, message='')
with Example("null key in map"):
table_map(type="Map(Nullable(String), String)",
data="(map('e','f')),(map('a','b')),(map(NULL,'c'))",
select="m",
filter="has(mapKeys(m), NULL)",
exitcode=0, message='{"m":{null:"c"}}')
with Example("select keys from column"):
table_map(type="Map(Nullable(String), String)",
data="(map('e','f')),(map('a','b')),(map(NULL,'c'))",
select="mapKeys(m) AS keys",
filter="1 = 1",
exitcode=0, message='{"keys":["a"]}\n{"keys":["e"]}\n{"keys":[null]}')
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Functions_MapValues("1.0")
)
def mapvalues(self):
"""Check usages of mapValues function with map data type.
"""
with Example("value in map"):
table_map(type="Map(String, String)",
data="(map('e','f')),(map('a','b'))",
select="m",
filter="has(mapValues(m), 'b')",
exitcode=0, message='{"m":{"a":"b"}}')
with Example("value not in map"):
table_map(type="Map(String, String)",
data="(map('e','f')),(map('a','b'))",
select="m",
filter="NOT has(mapValues(m), 'b')",
exitcode=0, message='{"m":{"e":"f"}}')
with Example("null value not in map"):
table_map(type="Map(String, Nullable(String))",
data="(map('e','f')),(map('a','b'))",
select="m",
filter="has(mapValues(m), NULL)",
exitcode=0, message='')
with Example("null value in map"):
table_map(type="Map(String, Nullable(String))",
data="(map('e','f')),(map('a','b')),(map('c',NULL))",
select="m",
filter="has(mapValues(m), NULL)",
exitcode=0, message='{"m":{"c":null}}')
with Example("select values from column"):
table_map(type="Map(String, Nullable(String))",
data="(map('e','f')),(map('a','b')),(map('c',NULL))",
select="mapValues(m) AS values",
filter="1 = 1",
exitcode=0, message='{"values":["b"]}\n{"values":[null]}\n{"values":["f"]}')
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Functions_InlineDefinedMap("1.0")
)
def functions_with_inline_defined_map(self):
"""Check that a map defined inline inside the select statement
can be used with functions that work with maps.
"""
with Example("mapKeys"):
select_map(map="map(1,2,3,4) as map, mapKeys(map) AS keys", output="{1:2,3:4}\t[1,3]")
with Example("mapValyes"):
select_map(map="map(1,2,3,4) as map, mapValues(map) AS values", output="{1:2,3:4}\t[2,4]")
with Example("mapContains"):
select_map(map="map(1,2,3,4) as map, mapContains(map, 1) AS contains", output="{1:2,3:4}\t1")
@TestScenario
def empty_map(self):
"""Check creating of an empty map `{}` using the map() function
when inserting data into a map type table column.
"""
table_map(type="Map(String, String)",
data="(map('e','f')),(map())",
select="m",
filter="1=1",
exitcode=0, message='{"m":{}}\n{"m":{"e":"f"}}')
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Performance_Vs_TupleOfArrays("1.0")
)
def performance_vs_two_tuple_of_arrays(self, len=10, rows=6000000):
"""Check performance of using map data type vs Tuple(Array, Array).
"""
uid = getuid()
node = self.context.node
with Given(f"table with Tuple(Array(Int8),Array(Int8))"):
sql = "CREATE TABLE {name} (pairs Tuple(Array(Int8),Array(Int8))) ENGINE = MergeTree() ORDER BY pairs"
tuple_table = create_table(name=f"tuple_{uid}", statement=sql)
with And(f"table with Map(Int8,Int8)"):
sql = "CREATE TABLE {name} (pairs Map(Int8,Int8)) ENGINE = MergeTree() ORDER BY pairs"
map_table = create_table(name=f"map_{uid}", statement=sql)
with When("I insert data into table with tuples"):
keys = range(len)
values = range(len)
start_time = time.time()
node.query(f"INSERT INTO {tuple_table} SELECT ({keys},{values}) FROM numbers({rows})")
tuple_insert_time = time.time() - start_time
metric("tuple insert time", tuple_insert_time, "sec")
with When("I insert data into table with a map"):
keys = range(len)
values = range(len)
start_time = time.time()
node.query(f"INSERT INTO {map_table} SELECT ({keys},{values}) FROM numbers({rows})")
map_insert_time = time.time() - start_time
metric("map insert time", map_insert_time, "sec")
with And("I retrieve particular key value from table with tuples"):
start_time = time.time()
node.query(f"SELECT sum(arrayFirst((v, k) -> k = {len-1}, tupleElement(pairs, 2), tupleElement(pairs, 1))) AS sum FROM {tuple_table}",
exitcode=0, message=f"{rows*(len-1)}")
tuple_select_time = time.time() - start_time
metric("tuple(array, array) select time", tuple_select_time, "sec")
with And("I retrieve particular key value from table with map"):
start_time = time.time()
node.query(f"SELECT sum(pairs[{len-1}]) AS sum FROM {map_table}",
exitcode=0, message=f"{rows*(len-1)}")
map_select_time = time.time() - start_time
metric("map select time", map_select_time, "sec")
metric("insert difference", (1 - map_insert_time/tuple_insert_time) * 100, "%")
metric("select difference", (1 - map_select_time/tuple_select_time) * 100, "%")
@TestScenario
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType_Performance_Vs_ArrayOfTuples("1.0")
)
def performance_vs_array_of_tuples(self, len=10, rows=6000000):
"""Check performance of using map data type vs Array(Tuple(K,V)).
"""
uid = getuid()
node = self.context.node
with Given(f"table with Array(Tuple(K,V))"):
sql = "CREATE TABLE {name} (pairs Array(Tuple(Int8, Int8))) ENGINE = MergeTree() ORDER BY pairs"
array_table = create_table(name=f"tuple_{uid}", statement=sql)
with And(f"table with Map(Int8,Int8)"):
sql = "CREATE TABLE {name} (pairs Map(Int8,Int8)) ENGINE = MergeTree() ORDER BY pairs"
map_table = create_table(name=f"map_{uid}", statement=sql)
with When("I insert data into table with an array of tuples"):
pairs = list(zip(range(len),range(len)))
start_time = time.time()
node.query(f"INSERT INTO {array_table} SELECT ({pairs}) FROM numbers({rows})")
array_insert_time = time.time() - start_time
metric("array insert time", array_insert_time, "sec")
with When("I insert data into table with a map"):
keys = range(len)
values = range(len)
start_time = time.time()
node.query(f"INSERT INTO {map_table} SELECT ({keys},{values}) FROM numbers({rows})")
map_insert_time = time.time() - start_time
metric("map insert time", map_insert_time, "sec")
with And("I retrieve particular key value from table with an array of tuples"):
start_time = time.time()
node.query(f"SELECT sum(arrayFirst((v) -> v.1 = {len-1}, pairs).2) AS sum FROM {array_table}",
exitcode=0, message=f"{rows*(len-1)}")
array_select_time = time.time() - start_time
metric("array(tuple(k,v)) select time", array_select_time, "sec")
with And("I retrieve particular key value from table with map"):
start_time = time.time()
node.query(f"SELECT sum(pairs[{len-1}]) AS sum FROM {map_table}",
exitcode=0, message=f"{rows*(len-1)}")
map_select_time = time.time() - start_time
metric("map select time", map_select_time, "sec")
metric("insert difference", (1 - map_insert_time/array_insert_time) * 100, "%")
metric("select difference", (1 - map_select_time/array_select_time) * 100, "%")
@TestScenario
def performance(self, len=10, rows=6000000):
"""Check insert and select performance of using map data type.
"""
uid = getuid()
node = self.context.node
with Given("table with Map(Int8,Int8)"):
sql = "CREATE TABLE {name} (pairs Map(Int8,Int8)) ENGINE = MergeTree() ORDER BY pairs"
map_table = create_table(name=f"map_{uid}", statement=sql)
with When("I insert data into table with a map"):
values = [x for pair in zip(range(len),range(len)) for x in pair]
start_time = time.time()
node.query(f"INSERT INTO {map_table} SELECT (map({','.join([str(v) for v in values])})) FROM numbers({rows})")
map_insert_time = time.time() - start_time
metric("map insert time", map_insert_time, "sec")
with And("I retrieve particular key value from table with map"):
start_time = time.time()
node.query(f"SELECT sum(pairs[{len-1}]) AS sum FROM {map_table}",
exitcode=0, message=f"{rows*(len-1)}")
map_select_time = time.time() - start_time
metric("map select time", map_select_time, "sec")
# FIXME: add tests for different table engines
@TestFeature
@Name("tests")
@Requirements(
RQ_SRS_018_ClickHouse_Map_DataType("1.0"),
RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map("1.0")
)
def feature(self, node="clickhouse1"):
self.context.node = self.context.cluster.node(node)
for scenario in loads(current_module(), Scenario):
scenario()
|
6945e23a43adb5020f30975cecea9ed8e131ed73
|
184b40438287d124117dcd48cf9abdab71e116c7
|
/src/cosmic_ray/tools/html.py
|
f00893d83e27771f58aa964fe5214e6b83b243bc
|
[
"MIT"
] |
permissive
|
sixty-north/cosmic-ray
|
45bea97513eb75d00c514e0df2c5c3d156268f0f
|
aa63d36ef84659d941c22da9d4d39ae0408d488e
|
refs/heads/master
| 2023-08-26T07:57:57.939516
| 2023-03-14T13:49:49
| 2023-03-14T13:49:49
| 34,157,278
| 569
| 77
|
MIT
| 2023-01-02T07:58:54
| 2015-04-18T07:44:21
|
Python
|
UTF-8
|
Python
| false
| false
| 13,871
|
py
|
html.py
|
"A tool for generating HTML reports."
import datetime
from itertools import chain
import click
from yattag import Doc
from cosmic_ray.work_db import WorkDB, use_db
from cosmic_ray.work_item import TestOutcome
from cosmic_ray.tools.survival_rate import kills_count, survival_rate
@click.command()
@click.option("--only-completed/--not-only-completed", default=False)
@click.option("--skip-success/--include-success", default=False)
@click.argument("session-file", type=click.Path(dir_okay=False, readable=True, exists=True))
def report_html(only_completed, skip_success, session_file):
"""Print an HTML formatted report of test results."""
with use_db(session_file, WorkDB.Mode.open) as db:
doc = _generate_html_report(db, only_completed, skip_success)
print(doc.getvalue())
# TODO: Redo this with jinja?
def _generate_html_report(db, only_completed, skip_success):
# pylint: disable=too-many-statements
doc, tag, text = Doc().tagtext()
doc.asis("<!DOCTYPE html>")
with tag("html", lang="en"):
with tag("head"):
doc.stag("meta", charset="utf-8")
doc.stag("meta", name="viewport", content="width=device-width, initial-scale=1, shrink-to-fit=no")
doc.stag(
"link",
rel="stylesheet",
href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css",
integrity="sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T",
crossorigin="anonymous",
)
with tag("title"):
text("Cosmic Ray Report")
with tag("body"):
with tag("div", klass="container"):
with tag("h1"):
with tag("p", klass="text-dark"):
text("Cosmic Ray Report")
all_items = db.completed_work_items
if not only_completed:
incomplete = ((item, None) for item in db.pending_work_items)
all_items = chain(all_items, incomplete)
with tag("div", klass="container"):
# Summary info
_generate_summary(doc, db)
# Job list
_generate_job_list(doc, db, skip_success)
with tag("script"):
doc.attr(src="https://code.jquery.com/jquery-3.3.1.slim.min.js")
doc.attr(("integrity", "sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo"))
doc.attr(("crossorigin", "anonymous"))
with tag("script"):
doc.attr(src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js")
doc.attr(("integrity", "sha384-UO2eT0CpHqdSJQ6hJty5KVphtPhzWj9WO1clHTMGa3JDZwrnQq4sF86dIHNDz0W1"))
doc.attr(("crossorigin", "anonymous"))
with tag("script"):
doc.attr(src="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js")
doc.attr(("integrity", "sha384-JjSmVgyd0p3pXB1rRibZUAYoIIy6OrQ6VrjIEaFf/nJGzIxFDsf4x0xIM+B07jRM"))
doc.attr(("crossorigin", "anonymous"))
with tag("script", type="text/javascript"):
doc.asis(
"$('div.job_list___sub_multi_collapse').on('shown.bs.collapse',"
" function () {"
" correct_behavior_functional_buttons();"
" });"
"$('div.job_list___sub_multi_collapse').on('hidden.bs.collapse',"
" function () {"
" correct_behavior_functional_buttons();"
" });"
"function correct_behavior_functional_buttons() {"
" var expand = false;"
" var collapse = false;"
" $('a.job_list___sub_multi_heading').each(function(index) {"
" if ($(this).attr('aria-expanded') == 'false') {"
" expand = true;"
" return false;"
" };"
" });"
" $('a.job_list___sub_multi_heading').each(function(index) {"
" if ($(this).attr('aria-expanded') == 'true') {"
" collapse = true;"
" return false;"
" };"
" });"
" if (expand) {"
" $('div#job_item_expand_all').css('display', 'inline-block');"
" } else {"
" $('div#job_item_expand_all').css('display', 'none');"
" };"
" if (collapse) {"
" $('div#job_item_collapse_all').css('display', 'inline-block');"
" } else {"
" $('div#job_item_collapse_all').css('display', 'none');"
" };"
" };"
"correct_behavior_functional_buttons();"
)
return doc
def _generate_job_list(doc, db, skip_success):
doc, tag, text = doc.tagtext()
with tag("div", klass="mb-1", id="job_list___accordion"):
with tag("div", klass="card"):
with tag(
"a",
("data-toggle", "collapse"),
("data-target", "#job_list___collapse_1"),
("aria-expanded", "false"),
("aria-controls", "job_list___collapse_1"),
href="#",
):
with tag("div", klass="card-header", id="job_list___heading_1"):
with tag("button", klass="btn btn-outline-dark"):
with tag("h4", klass="m-0"):
text("Job list")
with tag(
"div",
("aria-labelledby", "job_list___heading_1"),
("data-parent", "#job_list___accordion"),
klass="collapse",
id="job_list___collapse_1",
):
with tag("div", klass="card-body"):
with tag("div", klass="text-right mb-1"):
with tag("div", klass="mx-1", id="job_item_expand_all"):
with tag(
"a",
href="#",
onclick="$('div.job_list___sub_multi_collapse').collapse('show');",
):
with tag("button", klass="btn btn-outline-dark"):
with tag("span"):
text("Expand All")
with tag("div", klass="mx-1", id="job_item_collapse_all"):
with tag(
"a",
href="#",
onclick="$('div.job_list___sub_multi_collapse').collapse('hide');",
):
with tag("button", klass="btn btn-outline-dark"):
with tag("span"):
text("Collapse All")
# Job item
all_items = db.completed_work_items
for index, (work_item, result) in enumerate(all_items, start=1):
_generate_work_item_card(doc, index, work_item, result, skip_success)
# flake8: noqa: C901
def _generate_work_item_card(doc, index, work_item, result, skip_success):
doc, tag, text = doc.tagtext()
if result is not None:
if result.is_killed:
if result.test_outcome == TestOutcome.INCOMPETENT:
level = "info"
else:
level = "success"
if skip_success:
return
else:
level = "danger"
with tag("div", klass="mb-1", id="job_list___sub_accordion_{}".format(index)):
with tag("div", klass="card"):
with tag(
"a",
("data-toggle", "collapse"),
("data-target", "#job_list___sub_collapse_{}_1".format(index)),
("aria-expanded", "false"),
("aria-controls", "job_list___sub_collapse_{}_1".format(index)),
href="#",
klass="job_list___sub_multi_heading",
):
with tag(
"div",
("role", "alert"),
klass="card-header alert-{}".format(level),
id="job_list___sub_heading_{}_1".format(index),
):
with tag("button", klass="btn btn-outline-{}".format(level)):
with tag("span", klass="job_id"):
text("{} : Job ID {}".format(index, work_item.job_id))
with tag(
"div",
("aria-labelledby", "job_list___sub_heading_{}_1".format(index)),
("data-parent", "#job_list___sub_accordion_{}".format(index)),
klass="collapse job_list___sub_multi_collapse",
id="job_list___sub_collapse_{}_1".format(index),
):
with tag("div", klass="card-body"):
with tag("div", klass="work-item"):
with tag(
"div",
klass="alert alert-{} test-outcome".format(level),
role="alert",
):
if result is not None:
if not result.is_killed:
with tag("p"):
text("SURVIVED")
with tag("p"):
text("worker outcome: {}".format(result.worker_outcome))
with tag("p"):
text("test outcome: {}".format(result.test_outcome))
else:
with tag("p"):
text("No result")
for mutation in work_item.mutations:
with tag("pre", klass="location"):
with tag(
"a",
href=pycharm_url(str(mutation.module_path), mutation.start_pos[0]),
klass="text-secondary",
):
with tag("button", klass="btn btn-outline-dark"):
text(
"{}, start pos: {}, end pos: {}".format(
mutation.module_path,
mutation.start_pos,
mutation.end_pos,
)
)
with tag("pre"):
text("operator: {}, occurrence: {}".format(mutation.operator_name, mutation.occurrence))
if result is not None:
if result.diff:
with tag("div", klass="alert alert-secondary"):
with tag("pre", klass="diff"):
text(result.diff)
if result.output:
with tag("div", klass="alert alert-secondary"):
with tag("pre", klass="diff"):
text(result.output)
def _generate_summary(doc, db):
doc, tag, text = doc.tagtext()
num_items = db.num_work_items
num_complete = db.num_results
with tag("div", klass="mb-1", id="summary_info___accordion"):
with tag("div", klass="card"):
with tag(
"a",
("data-toggle", "collapse"),
("data-target", "#summary_info___collapse_1"),
("aria-expanded", "true"),
("aria-controls", "summary_info___collapse_1"),
href="#",
):
with tag("div", klass="card-header", id="summary_info___heading_1"):
with tag("button", klass="btn btn-outline-dark"):
with tag("h4", klass="m-0"):
text("Summary info")
with tag(
"div",
("aria-labelledby", "summary_info___heading_1"),
("data-parent", "#summary_info___accordion"),
klass="collapse show",
id="summary_info___collapse_1",
):
with tag("div", klass="card-body"):
with tag("p"):
text("Date time: {}".format(datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
with tag("p"):
text("Total jobs: {}".format(num_items))
if num_complete > 0:
with tag("p"):
text("Complete: {} ({:.2f}%)".format(num_complete, num_complete / num_items * 100))
with tag("p"):
num_killed = kills_count(db)
text("Surviving mutants: {} ({:.2f}%)".format(num_complete - num_killed, survival_rate(db)))
else:
with tag("p"):
text("No jobs completed")
def pycharm_url(filename, line_number):
"Get a URL for opening a file in Pycharm."
return "pycharm://open?file={}&line={}".format(filename, line_number)
|
94d646cb0989cf6c9c8cc1801d9f65878e1a6e61
|
1577e1cf4e89584a125cffb855ca50a9654c6d55
|
/pyobjc/pyobjc/pyobjc-framework-Cocoa-2.5.1/PyObjCTest/test_nsrunningapplication.py
|
48c6f007d87b39c3ba7073ddddbe01aa533a33a7
|
[
"MIT"
] |
permissive
|
apple-open-source/macos
|
a4188b5c2ef113d90281d03cd1b14e5ee52ebffb
|
2d2b15f13487673de33297e49f00ef94af743a9a
|
refs/heads/master
| 2023-08-01T11:03:26.870408
| 2023-03-27T00:00:00
| 2023-03-27T00:00:00
| 180,595,052
| 124
| 24
| null | 2022-12-27T14:54:09
| 2019-04-10T14:06:23
| null |
UTF-8
|
Python
| false
| false
| 1,306
|
py
|
test_nsrunningapplication.py
|
from PyObjCTools.TestSupport import *
from AppKit import *
class TestNSRunningApplication (TestCase):
@min_os_level('10.6')
def testConstants(self):
self.assertEqual(NSApplicationActivateAllWindows, 1<<0)
self.assertEqual(NSApplicationActivateIgnoringOtherApps, 1<<1)
self.assertEqual(NSApplicationActivationPolicyRegular, 0)
self.assertEqual(NSApplicationActivationPolicyAccessory, 1)
self.assertEqual(NSApplicationActivationPolicyProhibited, 2)
@min_os_level('10.6')
def testMethods(self):
self.assertResultIsBOOL(NSRunningApplication.isTerminated)
self.assertResultIsBOOL(NSRunningApplication.isFinishedLaunching)
self.assertResultIsBOOL(NSRunningApplication.isHidden)
self.assertResultIsBOOL(NSRunningApplication.isActive)
self.assertResultIsBOOL(NSRunningApplication.hide)
self.assertResultIsBOOL(NSRunningApplication.unhide)
self.assertResultIsBOOL(NSRunningApplication.activateWithOptions_)
self.assertResultIsBOOL(NSRunningApplication.terminate)
self.assertResultIsBOOL(NSRunningApplication.forceTerminate)
@min_os_level('10.7')
def testMethods(self):
self.assertResultIsBOOL(NSRunningApplication.ownsMenuBar)
if __name__ == "__main__":
main()
|
2ae3eb4068e374e87a4c7523145928899a5e281d
|
bbd69601912a3361d788efd03a47f9d4e3bac09e
|
/demo/MaskedNumCtrl.py
|
987939510770faf76d828e309d5ea222d59d3454
|
[] |
no_license
|
wxWidgets/Phoenix
|
56929484460a0399a8f1d9582bc77c20aa14748d
|
a1184286703cf24c4b88e5bc14cf2979c1b1ea00
|
refs/heads/master
| 2023-09-01T07:10:17.437093
| 2023-08-31T05:38:01
| 2023-08-31T05:38:01
| 5,078,061
| 2,268
| 677
| null | 2023-09-09T17:06:59
| 2012-07-17T06:22:25
|
Python
|
UTF-8
|
Python
| false
| false
| 15,516
|
py
|
MaskedNumCtrl.py
|
#!/usr/bin/env python
import string
import sys
import traceback
import wx
from wx.lib import masked
#----------------------------------------------------------------------
class TestPanel( wx.Panel ):
def __init__( self, parent, log ):
wx.Panel.__init__( self, parent, -1 )
self.log = log
panel = wx.Panel( self, -1 )
header = wx.StaticText(panel, -1, """\
This shows the various options for masked.NumCtrl.
The controls at the top reconfigure the resulting control at the bottom.
""")
header.SetForegroundColour( "Blue" )
intlabel = wx.StaticText( panel, -1, "Integer width:" )
self.integerwidth = masked.NumCtrl(
panel, value=10, integerWidth=2, allowNegative=False
)
fraclabel = wx.StaticText( panel, -1, "Fraction width:" )
self.fractionwidth = masked.NumCtrl(
panel, value=0, integerWidth=2, allowNegative=False
)
groupcharlabel = wx.StaticText( panel,-1, "Grouping char:" )
self.groupchar = masked.TextCtrl(
panel, -1, value=',', mask='*', includeChars = ' ', excludeChars = '-()0123456789',
formatcodes='F', emptyInvalid=False, validRequired=True
)
decimalcharlabel = wx.StaticText( panel,-1, "Decimal char:" )
self.decimalchar = masked.TextCtrl(
panel, -1, value='.', mask='&', excludeChars = '-()',
formatcodes='F', emptyInvalid=True, validRequired=True
)
self.set_min = wx.CheckBox( panel, -1, "Set minimum value:" )
# Create this masked.NumCtrl using factory, to show how:
self.min = masked.Ctrl( panel, integerWidth=5, fractionWidth=2, controlType=masked.controlTypes.NUMBER )
self.min.Enable( False )
self.set_max = wx.CheckBox( panel, -1, "Set maximum value:" )
self.max = masked.NumCtrl( panel, integerWidth=5, fractionWidth=2 )
self.max.Enable( False )
self.limit_target = wx.CheckBox( panel, -1, "Limit control" )
self.limit_on_field_change = wx.CheckBox( panel, -1, "Limit on field change" )
self.allow_none = wx.CheckBox( panel, -1, "Allow empty control" )
self.group_digits = wx.CheckBox( panel, -1, "Group digits" )
self.group_digits.SetValue( True )
self.allow_negative = wx.CheckBox( panel, -1, "Allow negative values" )
self.allow_negative.SetValue( True )
self.use_parens = wx.CheckBox( panel, -1, "Use parentheses" )
self.select_on_entry = wx.CheckBox( panel, -1, "Select on entry" )
self.select_on_entry.SetValue( True )
label = wx.StaticText( panel, -1, "Resulting numeric control:" )
font = label.GetFont()
font.SetWeight(wx.FONTWEIGHT_BOLD)
label.SetFont(font)
self.target_ctl = masked.NumCtrl( panel, -1, name="target control" )
label_numselect = wx.StaticText( panel, -1, """\
Programmatically set the above
value entry ctrl:""")
self.numselect = wx.ComboBox(panel, -1, choices = [ '0', '111', '222.22', '-3', '54321.666666666', '-1353.978',
'1234567', '-1234567', '123456789', '-123456789.1',
'1234567890.', '-9876543210.9' ])
grid1 = wx.FlexGridSizer( cols=4 )
grid1.Add( intlabel, 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
grid1.Add( self.integerwidth, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
grid1.Add( groupcharlabel, 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
grid1.Add( self.groupchar, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
grid1.Add( fraclabel, 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
grid1.Add( self.fractionwidth, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
grid1.Add( decimalcharlabel, 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
grid1.Add( self.decimalchar, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
grid1.Add( self.set_min, 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
grid1.Add( self.min, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
grid1.Add( (5,5), 0, wx.ALIGN_LEFT|wx.ALL, 5)
grid1.Add( (5,5), 0, wx.ALIGN_LEFT|wx.ALL, 5)
grid1.Add( self.set_max, 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
grid1.Add( self.max, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
grid1.Add( (5,5), 0, wx.ALIGN_LEFT|wx.ALL, 5)
grid1.Add( (5,5), 0, wx.ALIGN_LEFT|wx.ALL, 5)
grid1.Add( self.limit_target, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
grid1.Add( self.limit_on_field_change, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
hbox1 = wx.BoxSizer( wx.HORIZONTAL )
hbox1.Add( (17,5), 0, wx.ALIGN_LEFT|wx.ALL, 5)
hbox1.Add( self.allow_none, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
grid1.Add( hbox1, 0, wx.ALIGN_LEFT|wx.ALL, 5)
grid1.Add( (5,5), 0, wx.ALIGN_LEFT|wx.ALL, 5)
grid1.Add( self.group_digits, 0, wx.ALIGN_LEFT|wx.LEFT, 5 )
grid1.Add( self.allow_negative, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
hbox2 = wx.BoxSizer( wx.HORIZONTAL )
hbox2.Add( (17,5), 0, wx.ALIGN_LEFT|wx.ALL, 5)
hbox2.Add( self.use_parens, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
grid1.Add( hbox2, 0, wx.ALIGN_LEFT|wx.ALL, 5)
grid1.Add( (5,5), 0, wx.ALIGN_LEFT|wx.ALL, 5)
grid1.Add( self.select_on_entry, 0, wx.ALIGN_LEFT|wx.LEFT, 5 )
grid1.Add( (5,5), 0, wx.ALIGN_LEFT|wx.ALL, 5)
grid1.Add( (5,5), 0, wx.ALIGN_LEFT|wx.ALL, 5)
grid1.Add( (5,5), 0, wx.ALIGN_LEFT|wx.ALL, 5)
grid2 = wx.FlexGridSizer( cols=2 )
grid2.Add( label, 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
grid2.Add( self.target_ctl, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
grid2.Add( (5,5), 0, wx.ALIGN_LEFT|wx.ALL, 5)
grid2.Add( (5,5), 0, wx.ALIGN_LEFT|wx.ALL, 5)
grid2.Add( label_numselect, 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
grid2.Add( self.numselect, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
grid2.Add( (5,5), 0, wx.ALIGN_LEFT|wx.ALL, 5)
grid2.Add( (5,5), 0, wx.ALIGN_LEFT|wx.ALL, 5)
grid2.AddGrowableCol(1)
self.outer_box = wx.BoxSizer( wx.VERTICAL )
self.outer_box.Add(header, 0, wx.ALIGN_LEFT|wx.TOP|wx.LEFT, 20)
self.outer_box.Add( grid1, 0, wx.ALIGN_CENTRE|wx.LEFT|wx.BOTTOM|wx.RIGHT, 20 )
self.outer_box.Add( grid2, 0, wx.ALIGN_LEFT|wx.ALL, 20 )
self.grid2 = grid2
panel.SetAutoLayout( True )
panel.SetSizer( self.outer_box )
self.outer_box.Fit( panel )
panel.Move( (50,10) )
self.panel = panel
self.Bind(masked.EVT_NUM, self.OnSetIntWidth, self.integerwidth )
self.Bind(masked.EVT_NUM, self.OnSetFractionWidth, self.fractionwidth )
self.Bind(wx.EVT_TEXT, self.OnSetGroupChar, self.groupchar )
self.Bind(wx.EVT_TEXT, self.OnSetDecimalChar, self.decimalchar )
self.Bind(wx.EVT_CHECKBOX, self.OnSetMin, self.set_min )
self.Bind(wx.EVT_CHECKBOX, self.OnSetMax, self.set_max )
self.Bind(masked.EVT_NUM, self.SetTargetMinMax, self.min )
self.Bind(masked.EVT_NUM, self.SetTargetMinMax, self.max )
self.Bind(wx.EVT_CHECKBOX, self.OnSetLimited, self.limit_target )
self.Bind(wx.EVT_CHECKBOX, self.OnSetLimitOnFieldChange, self.limit_on_field_change )
self.Bind(wx.EVT_CHECKBOX, self.OnSetAllowNone, self.allow_none )
self.Bind(wx.EVT_CHECKBOX, self.OnSetGroupDigits, self.group_digits )
self.Bind(wx.EVT_CHECKBOX, self.OnSetAllowNegative, self.allow_negative )
self.Bind(wx.EVT_CHECKBOX, self.OnSetUseParens, self.use_parens )
self.Bind(wx.EVT_CHECKBOX, self.OnSetSelectOnEntry, self.select_on_entry )
self.Bind(masked.EVT_NUM, self.OnTargetChange, self.target_ctl )
self.Bind(wx.EVT_COMBOBOX, self.OnNumberSelect, self.numselect )
def OnSetIntWidth(self, event ):
width = self.integerwidth.GetValue()
if width < 1:
self.log.write("integer width must be positive\n")
self.integerwidth.SetForegroundColour(wx.RED)
else:
self.integerwidth.SetForegroundColour(wx.BLACK)
self.log.write("setting integer width to %d\n" % width)
self.target_ctl.SetParameters( integerWidth = width)
# Now resize and fit the dialog as appropriate:
self.grid2.SetItemMinSize(self.target_ctl, self.target_ctl.GetSize())
self.outer_box.Fit( self.panel )
self.outer_box.SetSizeHints( self.panel )
def OnSetFractionWidth(self, event ):
width = self.fractionwidth.GetValue()
self.log.write("setting fraction width to %d\n" % width)
self.target_ctl.SetParameters( fractionWidth = width)
# Now resize and fit the dialog as appropriate:
self.grid2.SetItemMinSize(self.target_ctl, self.target_ctl.GetSize())
self.outer_box.Fit( self.panel )
self.outer_box.SetSizeHints( self.panel )
def OnSetGroupChar( self, event ):
char = self.groupchar.GetValue()
if self.target_ctl.GetDecimalChar() == char:
self.log.write("group and decimal chars must be different\n")
self.groupchar.SetForegroundColour(wx.RED)
else:
self.groupchar.SetForegroundColour(wx.BLACK)
self.log.write("setting group char to %s\n" % char)
self.target_ctl.SetGroupChar( char )
def OnSetDecimalChar( self, event ):
char = self.decimalchar.GetValue()
if self.target_ctl.GetGroupChar() == char:
self.log.write("group and decimal chars must be different\n")
self.decimalchar.SetForegroundColour(wx.RED)
else:
self.decimalchar.SetForegroundColour(wx.BLACK)
self.log.write("setting decimal char to %s\n" % char)
self.target_ctl.SetDecimalChar( char )
def OnSetMin( self, event ):
self.min.Enable( self.set_min.GetValue() )
self.SetTargetMinMax()
def OnSetMax( self, event ):
self.max.Enable( self.set_max.GetValue() )
self.SetTargetMinMax()
def OnSetLimited( self, event ):
limited = self.limit_target.GetValue()
self.target_ctl.SetLimited( limited )
limit_on_field_change = self.limit_on_field_change.GetValue()
if limited and limit_on_field_change:
self.limit_on_field_change.SetValue(False)
self.target_ctl.SetLimitOnFieldChange( False )
self.SetTargetMinMax()
def OnSetLimitOnFieldChange( self, event ):
limit_on_field_change = self.limit_on_field_change.GetValue()
self.target_ctl.SetLimitOnFieldChange( limit_on_field_change )
limited = self.limit_target.GetValue()
if limited and limit_on_field_change:
self.limit_target.SetValue(False)
self.target_ctl.SetLimited( False )
def SetTargetMinMax( self, event=None ):
min = max = None
if self.set_min.GetValue():
min = self.min.GetValue()
if self.set_max.GetValue():
max = self.max.GetValue()
cur_min, cur_max = self.target_ctl.GetBounds()
if min != cur_min and not self.target_ctl.SetMin( min ):
if self.target_ctl.GetMax() is None and cur_max > min:
self.log.write( "min (%d) won't fit in control -- bound not set\n" % min )
else:
self.log.write( "min (%d) > current max (%d) -- bound not set\n" % ( min, self.target_ctl.GetMax() ) )
self.min.SetParameters( signedForegroundColour=wx.RED, foregroundColour=wx.RED )
else:
self.min.SetParameters( signedForegroundColour=wx.BLACK, foregroundColour=wx.BLACK )
self.min.Refresh()
if max != cur_max and not self.target_ctl.SetMax( max ):
if self.target_ctl.GetMax() is None and cur_min < max:
self.log.write( "max (%d) won't fit in control -- bound not set\n" % max )
else:
self.log.write( "max (%d) < current min (%d) -- bound not set\n" % ( max, self.target_ctl.GetMin() ) )
self.max.SetParameters( signedForegroundColour=wx.RED, foregroundColour=wx.RED )
else:
self.max.SetParameters( signedForegroundColour=wx.BLACK, foregroundColour=wx.BLACK )
self.max.Refresh()
if min != cur_min or max != cur_max:
new_min, new_max = self.target_ctl.GetBounds()
self.log.write( "current min, max: (%s, %s)\n" % ( str(new_min), str(new_max) ) )
def OnSetAllowNone( self, event ):
self.target_ctl.SetAllowNone( self.allow_none.GetValue() )
def OnSetGroupDigits( self, event ):
self.target_ctl.SetGroupDigits( self.group_digits.GetValue() )
# Now resize and fit the dialog as appropriate:
self.grid2.SetItemMinSize(self.target_ctl, self.target_ctl.GetSize())
self.outer_box.Fit( self.panel )
self.outer_box.SetSizeHints( self.panel )
def OnSetAllowNegative( self, event ):
if self.allow_negative.GetValue():
self.use_parens.Enable(True)
self.target_ctl.SetParameters(allowNegative=True,
useParensForNegatives = self.use_parens.GetValue())
else:
self.target_ctl.SetAllowNegative(False)
# Now resize and fit the dialog as appropriate:
self.grid2.SetItemMinSize(self.target_ctl, self.target_ctl.GetSize())
self.outer_box.Fit( self.panel )
self.outer_box.SetSizeHints( self.panel )
def OnSetUseParens( self, event ):
self.target_ctl.SetUseParensForNegatives( self.use_parens.GetValue() )
# Now resize and fit the dialog as appropriate:
self.grid2.SetItemMinSize(self.target_ctl, self.target_ctl.GetSize())
self.outer_box.Fit( self.panel )
self.outer_box.SetSizeHints( self.panel )
def OnSetSelectOnEntry( self, event ):
self.target_ctl.SetSelectOnEntry( self.select_on_entry.GetValue() )
def OnTargetChange( self, event ):
ctl = event.GetEventObject()
value = ctl.GetValue()
ib_str = [ " (out of bounds)", "" ]
self.log.write( "value = %s (%s)%s\n" % ( repr(value), repr(type(value)), ib_str[ ctl.IsInBounds(value) ] ) )
def OnNumberSelect( self, event ):
value = event.GetString()
if value:
if value.find('.') != -1:
numvalue = float(value)
else:
numvalue = int(value)
else:
numvalue = value # try to clear the value again
try:
self.target_ctl.SetValue(numvalue)
except:
type, value, tb = sys.exc_info()
for line in traceback.format_exception_only(type, value):
self.log.write(line)
#----------------------------------------------------------------------
def runTest( frame, nb, log ):
win = TestPanel( nb, log )
return win
#----------------------------------------------------------------------
import wx.lib.masked.numctrl as mnum
overview = """<html>
<PRE><FONT SIZE=-1>
""" + mnum.__doc__ + """
</FONT></PRE>"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
|
d0013095386705bc4218f1757e79cc3d5c61a826
|
8a4b17f89a8b24e6e1f8073f4df9d3bee7c546ec
|
/tests/support/__init__.py
|
6ba871daf2835ce4bdf296c1603f1fd7fa6d182d
|
[
"Apache-2.0"
] |
permissive
|
biocommons/hgvs
|
f8a600f15657b7f6aaa7c913d55d3acc43c1cb51
|
697b32bba2b191c3e10c44d408030927f031c03e
|
refs/heads/main
| 2023-08-19T12:40:15.503258
| 2023-05-23T21:46:06
| 2023-05-23T21:46:06
| 84,496,560
| 228
| 95
|
Apache-2.0
| 2023-09-14T05:01:26
| 2017-03-09T22:58:27
|
Python
|
UTF-8
|
Python
| false
| false
| 35
|
py
|
__init__.py
|
CACHE = "tests/data/cache-py3.hdp"
|
556f9b4fc3317719d9eb09e0ded7e33dd4156087
|
20dda4f19ec777d1a69ae20b5e2a48b9b28bb4a4
|
/flexbe_testing/src/flexbe_testing/test/import_only_state.py
|
6d2a99feb524a55fcd68de337ecfa4ed19237159
|
[] |
permissive
|
team-vigir/flexbe_behavior_engine
|
fd94ac2b75bfef6ca318d700d94b76f16cfd6552
|
6028c8585d852be55f4512024dcca5caa53e57c2
|
refs/heads/main
| 2023-05-12T20:25:50.388882
| 2022-03-09T22:19:43
| 2022-03-09T22:19:43
| 38,892,260
| 131
| 72
|
BSD-3-Clause
| 2023-06-23T03:06:37
| 2015-07-10T17:06:37
|
Python
|
UTF-8
|
Python
| false
| false
| 261
|
py
|
import_only_state.py
|
#!/usr/bin/env python
import rospy
from flexbe_core import EventState
class ImportOnlyState(EventState):
def __init__(self):
'''Constructor'''
super(ImportOnlyState, self).__init__(outcomes=['done'])
raise Exception('Test should be import only!')
|
411c33e1f18641c1973cd6b2e0f1ae2ec8387b0a
|
3c6bca8e9f4be0034e30673eeaf4659c59f6b93e
|
/tests/test_ssl.py
|
b9f768d95d7730fbd009210d1bd434f1395883a7
|
[
"MIT"
] |
permissive
|
miguelgrinberg/greenletio
|
4e846647e02d9879e3914340af9a7e98051b599c
|
db38bd2c5315cd0b9d515665f19256424fe655ee
|
refs/heads/main
| 2023-08-25T17:08:16.093632
| 2023-07-12T06:59:05
| 2023-07-12T06:59:05
| 279,851,837
| 143
| 8
|
MIT
| 2023-07-25T18:22:58
| 2020-07-15T11:36:43
|
Python
|
UTF-8
|
Python
| false
| false
| 2,510
|
py
|
test_ssl.py
|
import asyncio
import sys
import unittest
from greenletio.core import bridge, async_
from greenletio.green import socket, ssl
# Tests in this module use server and client certificates
#
# To generate server certificate:
# openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365
# -nodes -subj "/CN=example.com"
#
# To generate client certificate:
# openssl req -x509 -newkey rsa:4096 -keyout client.key -out client.crt
# -days 365 -nodes -subj "/CN=example.com"
if not hasattr(asyncio, 'create_task'):
asyncio.create_task = asyncio.ensure_future
class TestSSL(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
bridge.stop()
def test_sendall_recv(self):
var = None
@async_
def server():
server_socket = socket.socket()
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('127.0.0.1', 7000))
server_socket.listen(5)
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.load_cert_chain('tests/server.crt', 'tests/server.key')
context.load_verify_locations('tests/client.crt')
ssl_socket = context.wrap_socket(server_socket, server_side=True)
conn, _ = ssl_socket.accept()
data = conn.recv(1024)
conn.sendall(data.upper())
conn.close()
ssl_socket.close()
@async_
def client():
nonlocal var
client_socket = socket.socket()
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH,
cafile='tests/server.crt')
context.load_cert_chain('tests/client.crt', 'tests/client.key')
ssl_socket = context.wrap_socket(client_socket,
server_hostname='example.com')
ssl_socket.connect(('127.0.0.1', 7000))
ssl_socket.sendall(b'hello')
var = ssl_socket.recv(1024)
ssl_socket.close()
async def main():
nonlocal var
asyncio.create_task(server())
asyncio.create_task(client())
while var is None:
await asyncio.sleep(0)
if sys.platform == 'win32':
loop = asyncio.SelectorEventLoop()
asyncio.set_event_loop(loop)
asyncio.get_event_loop().run_until_complete(main())
assert var == b'HELLO'
|
28a4b5f059c6a204ad49e150549f8e6e94200da0
|
e6e99b42bf2912cac9b6f16441031d51073c9092
|
/downloader_cli/__version__.py
|
0efd2876a832869fcb7dbd1267d7d1b6a3d7cbd0
|
[
"MIT"
] |
permissive
|
deepjyoti30/downloader-cli
|
0b256928ab90eeb11a5dc7f57653fdc8c2d33e50
|
801091d019f6913b219d0189ef6ce8e24677902b
|
refs/heads/master
| 2022-06-01T14:59:51.749283
| 2022-05-27T05:16:49
| 2022-05-27T05:16:49
| 221,208,977
| 333
| 32
|
MIT
| 2022-01-17T13:40:22
| 2019-11-12T12:07:34
|
Python
|
UTF-8
|
Python
| false
| false
| 64
|
py
|
__version__.py
|
"""Contiain the version of the package"""
__version__ = "0.3.3"
|
e21bcf5793db0086a3984fc9df5e816dc225dff3
|
10cb11f83e1c8b51b9d72c28d6259a56ff1a97c8
|
/samcli/commands/package/__init__.py
|
6e45897bbb676ddab042df1fed189038ff25c44a
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] |
permissive
|
aws/aws-sam-cli
|
6d4411aacf7f861e75e5cf4882a32858797a276d
|
b297ff015f2b69d7c74059c2d42ece1c29ea73ee
|
refs/heads/develop
| 2023-08-30T23:28:36.179932
| 2023-08-30T21:58:26
| 2023-08-30T21:58:26
| 92,205,085
| 1,402
| 470
|
Apache-2.0
| 2023-09-14T21:14:23
| 2017-05-23T18:16:23
|
Python
|
UTF-8
|
Python
| false
| false
| 93
|
py
|
__init__.py
|
"""
`sam package` command
"""
# Expose the cli object here
from .command import cli # noqa
|
76eb6edec09f3fdf33fee275e049e226fcc45add
|
e61e664d95af3b93150cda5b92695be6551d2a7c
|
/vega/algorithms/nas/opt_nas/__init__.py
|
fe30849ffaaec917b56f1242f089e9702ad1a4ed
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
huawei-noah/vega
|
44aaf8bb28b45f707ed6cd4e871ba70fc0c04846
|
12e37a1991eb6771a2999fe0a46ddda920c47948
|
refs/heads/master
| 2023-09-01T20:16:28.746745
| 2023-02-15T09:36:59
| 2023-02-15T09:36:59
| 273,667,533
| 850
| 184
|
NOASSERTION
| 2023-02-15T09:37:01
| 2020-06-20T08:20:06
|
Python
|
UTF-8
|
Python
| false
| false
| 66
|
py
|
__init__.py
|
from .ops_nas import OperatorSearchSpace, OperatorReplaceCallback
|
676644d1dfd3538487e7442d84d5661cb3680db3
|
952dc66c61966f099756cdb6c2d13b40352f63cc
|
/zerver/lib/pysa.py
|
c0fa12484a2a42419c2200cb81c8a70db7dca054
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
zulip/zulip
|
5ae6aad35fd9f72996c0a2a9cdd674400966ebf6
|
965a25d91b6ee2db54038f5df855215fa25146b0
|
refs/heads/main
| 2023-08-28T23:43:00.971110
| 2023-08-28T16:47:09
| 2023-08-28T19:33:02
| 43,160,685
| 20,239
| 8,996
|
Apache-2.0
| 2023-09-14T20:57:47
| 2015-09-25T16:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 95
|
py
|
pysa.py
|
from typing import TypeVar
T = TypeVar("T")
def mark_sanitized(arg: T) -> T:
return arg
|
d11269f755ddba1488e3cfd0dc8bfcd4d2142072
|
1b94c7cfd66804fe8d40b5def35e4b9b18d69ba2
|
/stubs/google/appengine/datastore/datastore_index_xml.pyi
|
46d0aa6a00acd4a19730ee162267126a446a7a3a
|
[
"MIT"
] |
permissive
|
the-blue-alliance/the-blue-alliance
|
3dc210a9611ce9b240907ffd420f78040318dcdc
|
6d42f3cdb2f785d192f2871419e58aaae3445029
|
refs/heads/py3
| 2023-08-22T21:02:36.398100
| 2023-08-22T19:14:01
| 2023-08-22T19:14:01
| 888,427
| 344
| 263
|
MIT
| 2023-09-14T18:35:20
| 2010-09-04T20:34:11
|
HTML
|
UTF-8
|
Python
| false
| false
| 593
|
pyi
|
datastore_index_xml.pyi
|
from google.appengine.api.validation import ValidationError as ValidationError
from google.appengine.datastore.datastore_index import Index as Index, IndexDefinitions as IndexDefinitions, Property as Property
from typing import Any
MISSING_KIND: str
BAD_DIRECTION: str
BAD_MODE: str
NAME_MISSING: str
MODE_AND_DIRECTION_SPECIFIED: str
MODE_AND_ANCESTOR_SPECIFIED: str
def IndexesXmlToIndexDefinitions(xml_str): ...
def IsAutoGenerated(xml_str): ...
class IndexesXmlParser:
indexes: Any
errors: Any
def Parse(self, xml_str): ...
def ProcessIndexNode(self, node) -> None: ...
|
ea631181b950e97e4f18c8821737e9958ac1b61d
|
40282fc3afc28166ce01cdf2240d445a1930f2b0
|
/plugins/ipynb/ipynb.py
|
1e625a119964e6517ced7f272999d0797ddb4503
|
[
"MIT"
] |
permissive
|
Harvard-IACS/2020-CS109A
|
7dac61f88aefe9647fe7e3eabb3dc6ef85cc8d73
|
665100fec24309edb818a51bc8c29db2912d370f
|
refs/heads/master
| 2022-07-31T18:05:47.127653
| 2021-11-17T22:30:00
| 2021-11-17T22:30:00
| 287,811,847
| 114
| 123
|
MIT
| 2022-05-04T06:26:14
| 2020-08-15T19:28:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 72
|
py
|
ipynb.py
|
"""
This file is needed to make pelican work :)
"""
from .core import *
|
39ac2e31f129ac600a6a057558477a197aaa9f62
|
bfa4447ec5e92017aec95ee6d349d91b5733afca
|
/otter/utils.py
|
bea73199924ab876332c28b5bfe0af21230e86c4
|
[
"BSD-3-Clause"
] |
permissive
|
ucbds-infra/otter-grader
|
4020c14614fc62a93ce564c6b8ad88269defac97
|
e6ece6b53ef2291f2724ff9965f09d910ad10e7e
|
refs/heads/master
| 2023-08-23T22:46:15.793814
| 2023-08-18T21:53:52
| 2023-08-18T21:53:52
| 208,363,438
| 112
| 62
|
BSD-3-Clause
| 2023-09-12T00:01:41
| 2019-09-13T23:40:57
|
Python
|
UTF-8
|
Python
| false
| false
| 11,413
|
py
|
utils.py
|
"""Various utilities for Otter-Grader"""
import importlib
import logging
import logging.handlers
import os
import pathlib
import random
import re
import string
import shutil
import tempfile
import yaml
from contextlib import contextmanager, redirect_stdout
from functools import lru_cache
from IPython import get_ipython
# TODO: migrate other uses to this constant
NBFORMAT_VERSION = 4
"""the version of the Jupyter notebook format to use"""
NOTEBOOK_METADATA_KEY = "otter"
"""the key used for all Otter metadata added to a notebook"""
REQUIRE_CONFIRMATION_NO_PDF_EXPORT_KEY = "require_no_pdf_confirmation"
"""
the key in Otter's notebook metadata for requiring students to acknowledge that their notebook could
not be exported as a PDF before creating the submission zip file
"""
NO_PDF_EXPORT_MESSAGE_KEY = "export_pdf_failure_message"
"""
the key in Otter's notebook metadata for the message to show if a notebook cannot be exported as a
PDF
"""
@contextmanager
def hide_outputs():
"""
Context manager for hiding outputs from ``display()`` calls. IPython handles matplotlib outputs
specially, so those are supressed too.
"""
ipy = get_ipython()
if ipy is None:
# Not running inside ipython!
yield
return
old_formatters = ipy.display_formatter.formatters
ipy.display_formatter.formatters = {}
try:
yield
finally:
ipy.display_formatter.formatters = old_formatters
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
"""
Used to generate a dynamic variable name for grading functions
This function generates a random name using the given length and character set.
Args:
size (``int``): length of output name
chars (``str``, optional): set of characters used to create function name
Returns:
``str``: randomized string name for grading function
"""
return ''.join(random.choice(chars) for _ in range(size))
def get_variable_type(obj):
"""
Returns the fully-qualified type string of an object ``obj``
Args:
obj (object): the object in question
Returns:
``str``: the fully-qualified type string
"""
return type(obj).__module__ + "." + type(obj).__name__
def get_relpath(src, dst):
"""
Returns the relative path from ``src`` to ``dst``
Args:
src (``pathlib.Path``): the source directory
dst (``pathlib.Path``): the destination directory
Returns:
``pathlib.Path``: the relative path
"""
# osrc = src
ups = 0
while True:
try:
dst.relative_to(src)
break
except ValueError:
src = src.parent
ups += 1
return pathlib.Path(("../" * ups) / dst.relative_to(src))
@contextmanager
def chdir(new_dir):
"""
Create a context with a different working directory, resetting the working directory on exit.
Args:
new_dir (path-like): the directory for the context
"""
curr_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(curr_dir)
def get_source(cell):
"""
Returns the source code of a cell in a way that works for both nbformat and JSON
Args:
cell (``nbformat.NotebookNode``): notebook cell
Returns:
``list`` of ``str``: each line of the cell source stripped of ending line breaks
"""
source = cell.source
if isinstance(source, str):
return re.split("\r?\n", source)
elif isinstance(source, list):
return [line.strip("\r\n") for line in source]
raise TypeError(f"Unknown cell source type: {type(source)}")
@contextmanager
def nullcontext():
"""
Yields an empty context. Added because ``contextlib.nullcontext`` was added in Python 3.7, so
earlier versions of Python require this patch.
"""
yield
@contextmanager
def load_default_file(provided_fn, default_fn, default_disabled=False):
"""
Reads the contents of a file with an optional default path. If ``proivided_fn`` is not specified
and ``default_fn`` is an existing file path, the contents of ``default_fn`` are read in place
of ``provided_fn``. The use of ``default_fn`` can be disabled by setting ``default_disabled``
to ``True``.
"""
if provided_fn is None and os.path.isfile(default_fn) and not default_disabled:
provided_fn = default_fn
if provided_fn is not None:
if not os.path.isfile(provided_fn):
raise FileNotFoundError(f"Could not find specified file: {provided_fn}")
with open(provided_fn) as f:
yield f.read()
else:
yield
def print_full_width(char, mid_text="", whitespace=" ", ret_str=False, **kwargs):
"""
Prints a character at the full terminal width. If ``mid_text`` is supplied, this text is printed
in the middle of the terminal, surrounded by ``whitespace``. Additional kwargs passed to
``print``.
If ``ret_str`` is true, the string is returned; if not, it is printed directly to the console.
"""
cols, _ = shutil.get_terminal_size()
if mid_text:
left = cols - len(mid_text) - 2 * len(whitespace)
if left <= 0:
left = 2
l, r = left // 2, left // 2
if left % 2 == 1:
r += 1
out = char * l + whitespace + mid_text + whitespace + char * r
else:
out = char * cols
if ret_str:
return out
print(out, **kwargs)
def assert_path_exists(path_tuples):
"""
Ensure that a series of file paths exist and are of a specific type, or raise a ``ValueError``.
Elements of ``path_tuples`` should be 2-tuples where the first element is a string representing
the file path and the second element is ``True`` if the path should be a directory, ``False`` if
it should be a file, and ``None`` if it doesn't matter.
Args:
path_tuples (``list[tuple[str, bool]]``): the list of paths as described above
Raises:
``FileNotFoundError``: if the path does not exist or it is not of the correct type
"""
for path, is_dir in path_tuples:
if not os.path.exists(path):
raise FileNotFoundError(f"Path {path} does not exist")
if is_dir and not os.path.isdir(path):
raise FileNotFoundError(f"Path {path} is not a directory")
if is_dir is False and not os.path.isfile(path):
raise FileNotFoundError(f"Path {path} is not a file")
def knit_rmd_file(rmd_path, pdf_path):
"""
Use ``rpy2`` and ``rmarkdown::render`` to knit an RMarkdown file to a PDF, allowing errors.
Args:
rmd_path (``str``): the path to the Rmd file
pdf_path (``str``): the path at which to write the PDF
"""
from rpy2.robjects.packages import importr
with tempfile.NamedTemporaryFile(mode="w", suffix=".Rmd") as ntf:
with open(rmd_path) as f:
contents = f.read()
contents = "```{r cache = F, include = F}\nknitr::opts_chunk$set(error = TRUE)\n```\n" + \
contents
ntf.write(contents)
ntf.seek(0)
pdf_path = os.path.abspath(pdf_path)
rmarkdown = importr("rmarkdown")
rmarkdown.render(ntf.name, "pdf_document", pdf_path)
class loggers:
_instances = {}
_log_level = logging.WARNING
_formatter = logging.Formatter("[%(levelname)s %(name)s.%(funcName)s] %(message)s")
_socket_handler = None
@staticmethod
def __new__(cls, *args, **kwargs):
raise NotImplementedError("This class is not meant to be instantiated")
@classmethod
def send_logs(cls, host, port):
"""
Add a ``SocketHandler`` to all loggers that sends their logs to a TCP socket at the
specified host and port.
"""
cls._socket_handler = logging.handlers.SocketHandler(host, port)
for logger in cls._instances.values():
logger.addHandler(cls._socket_handler)
@classmethod
def get_logger(cls, name):
"""
Retrieve ``logging.Logger`` with name ``name`` and return it, setting the log level to the
class log level.
"""
if name in cls._instances:
return cls._instances[name]
logger = logging.getLogger(name)
logger.propagate = False # prevent child loggers from inheriting the handler
logger.setLevel(cls._log_level)
handler = logging.StreamHandler()
handler.setFormatter(cls._formatter)
logger.addHandler(handler)
if cls._socket_handler:
logger.addHandler(cls._socket_handler)
cls._instances[name] = logger
return logger
@classmethod
def get_level(cls):
"""
Return the current log level of these loggers.
"""
return cls._log_level
@classmethod
def set_level(cls, log_level):
"""
Set the log levels for all ``Logger``s created by this class (existing and future).
"""
cls._log_level = log_level
for logger in cls._instances.values():
logger.setLevel(log_level)
@classmethod
@contextmanager
def level_context(cls, log_level):
"""
Set the log level to a new value temporarily in a context.
"""
curr_level = cls.get_level()
cls.set_level(log_level)
yield
cls.set_level(curr_level)
@classmethod
def reset_level(cls):
"""
Set the log levels for all ``Loggers`` created by this class (existing and future) back to
``logging.WARNING``.
"""
cls.set_level(logging.WARNING)
class Loggable:
"""
A class for inheriting from that provides a logger via a class- and instance-accessible field.
"""
_logger_instance = None
@classmethod
def _load_logger(cls):
"""
Set-up the ``_logger`` field.
"""
if cls._logger_instance is None:
name = cls.__module__ + "." + cls.__name__
cls._logger_instance = loggers.get_logger(name)
@property
def _logger(self):
"""
``logging.Logger``: the logger instance for this class
"""
return self._get_logger()
@classmethod
def _get_logger(cls):
"""
Load and return the logger for this class.
Returns:
``logging.Logger``: the logger instance for this class
"""
cls._load_logger()
return cls._logger_instance
@lru_cache(None)
def import_or_raise(module):
"""
Import a module or raise an ``ImportError`` if it is unable to be imported. Return values are
stored in an LRU cache.
"""
try:
return importlib.import_module(module)
except:
raise ImportError(f"Could not import required module: {module}")
class _CorrectIndentationDumper(yaml.Dumper):
def increase_indent(self, flow=False, *args, **kwargs):
return super().increase_indent(flow=flow, indentless=False)
def dump_yaml(o, **kwargs):
"""
Dump an object to a YAML string using the ``_CorrectIndentationDumper`` dumper.
Args:
o (``object``): the object to dump
**kwargs: additional keyword arguments passed to ``yaml.dump``
Returns:
``str``: the YAML representation of ``o``
"""
return yaml.dump(o, sort_keys=False, Dumper=_CorrectIndentationDumper, **kwargs)
|
eb89777b00b5234a7283ed105f6111c5215503e1
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/g-code-testing/g_code_parsing/g_code_functionality_defs/smoothie/set_max_speed_g_code_functionality_def.py
|
ced60700ccac4a0f0fd48db502bf18078444eada
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 920
|
py
|
set_max_speed_g_code_functionality_def.py
|
from typing import Dict
from string import Template
from g_code_parsing.g_code_functionality_defs.g_code_functionality_def_base import (
GCodeFunctionalityDefBase,
)
class SetMaxSpeedGCodeFunctionalityDef(GCodeFunctionalityDefBase):
# Using this list to output string in specific order
EXPECTED_ARGS = ["X", "Y", "Z", "A", "B", "C"]
VAL_DEFINED_MESSAGE = Template("$name-Axis: $speed")
@classmethod
def _generate_command_explanation(cls, g_code_args: Dict[str, str]) -> str:
message_list = []
for arg in cls.EXPECTED_ARGS:
g_code_arg_val = g_code_args.get(arg)
if g_code_arg_val is not None:
message_list.append(
cls.VAL_DEFINED_MESSAGE.substitute(name=arg, speed=g_code_arg_val)
)
return "Setting the max speed for the following axes:\n\t" + "\n\t".join(
message_list
)
|
5dc4173f0e7c1152f4af2a7e2ec7b91d8d2fc61a
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/angle/src/libANGLE/renderer/vulkan/gen_vk_mandatory_format_support_table.py
|
f8233b038e5f78c04d1f0881e99e6db2d142f81a
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 5,361
|
py
|
gen_vk_mandatory_format_support_table.py
|
#!/usr/bin/python3
# Copyright 2018 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# gen_vk_mandatory_format_support_table.py:
# Code generation for mandatory formats supported by Vulkan.
# NOTE: don't run this script directly. Run scripts/run_code_generation.py.
import sys
sys.path.append('..')
import angle_format
import xml.etree.ElementTree as etree
import sys, os
TEMPLATE_TABLE_AUTOGEN_CPP = """// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name} using data from {input_file_name} and
// the vk.xml file situated at
// /third_party/vulkan-validation-layers/src/scripts/vk.xml
//
// Copyright 2020 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// {out_file_name}:
// Queries for full Vulkan mandatory format support information based on VK format.
#include "libANGLE/renderer/vulkan/vk_format_utils.h"
using namespace angle;
namespace rx
{{
namespace vk
{{
namespace
{{
constexpr VkFormatFeatureFlagBits BLIT_DST = VK_FORMAT_FEATURE_BLIT_DST_BIT;
constexpr VkFormatFeatureFlagBits BLIT_SRC = VK_FORMAT_FEATURE_BLIT_SRC_BIT;
constexpr VkFormatFeatureFlagBits COLOR_ATTACHMENT = VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT;
constexpr VkFormatFeatureFlagBits COLOR_ATTACHMENT_BLEND = VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT;
constexpr VkFormatFeatureFlagBits DEPTH_STENCIL_ATTACHMENT = VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
constexpr VkFormatFeatureFlagBits SAMPLED_IMAGE = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
constexpr VkFormatFeatureFlagBits SAMPLED_IMAGE_FILTER_LINEAR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
constexpr VkFormatFeatureFlagBits STORAGE_IMAGE = VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT;
constexpr VkFormatFeatureFlagBits STORAGE_IMAGE_ATOMIC = VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT;
constexpr VkFormatFeatureFlagBits STORAGE_TEXEL_BUFFER = VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT;
constexpr VkFormatFeatureFlagBits STORAGE_TEXEL_BUFFER_ATOMIC = VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT;
constexpr VkFormatFeatureFlagBits UNIFORM_TEXEL_BUFFER = VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
constexpr VkFormatFeatureFlagBits VERTEX_BUFFER = VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT;
using namespace angle;
constexpr FormatMap<VkFormatProperties> kFormatProperties = {{
{format_case_data}
}};
}} // anonymous namespace
const VkFormatProperties& GetMandatoryFormatSupport(FormatID formatID)
{{
return kFormatProperties[formatID];
}}
}} // namespace vk
}} // namespace rx
"""
TEMPLATE_FORMAT_PROPERTY = """{{FormatID::{format_id}, {{0, {optimal_features}, {buffer_features}}}}}"""
def script_relative(path):
return os.path.join(os.path.dirname(sys.argv[0]), path)
def gen_format_case(format_id, vk_format, vk_map):
def de(str):
return str.replace("VK_FORMAT_FEATURE_", "").replace("_BIT", "")
if vk_format in vk_map and len(vk_map[vk_format]) > 0:
# Check which feature is a buffer feature or not.
buffer_features = [de(x) for x in vk_map[vk_format] if x.find("_BUFFER_") != -1]
optimal_features = [de(x) for x in vk_map[vk_format] if x.find("_BUFFER_") == -1]
optimal_features_str = "|".join(sorted(optimal_features)) if len(optimal_features) else "0"
buffer_features_str = "|".join(sorted(buffer_features)) if len(buffer_features) else "0"
else:
optimal_features_str = "0"
buffer_features_str = "0"
return TEMPLATE_FORMAT_PROPERTY.format(
format_id=format_id,
vk_format=vk_format,
optimal_features=optimal_features_str,
buffer_features=buffer_features_str)
def main():
input_file_name = 'vk_mandatory_format_support_data.json'
vk_format_map_path = 'vk_format_map.json'
out_file_name = 'vk_mandatory_format_support_table_autogen.cpp'
vk_xml_file = '../../../../third_party/vulkan-deps/vulkan-headers/src/registry/vk.xml'
# auto_script parameters.
if len(sys.argv) > 1:
inputs = [
'../angle_format.py',
input_file_name,
vk_format_map_path,
vk_xml_file,
]
outputs = [out_file_name]
if sys.argv[1] == 'inputs':
print(','.join(inputs))
elif sys.argv[1] == 'outputs':
print(','.join(outputs))
else:
print('Invalid script parameters')
return 1
return 0
tree = etree.parse(script_relative(vk_xml_file))
root = tree.getroot()
vk_format_enums = root.findall(".//enums[@name='VkFormat']/enum")
vk_map = angle_format.load_json(input_file_name)
vk_format_map = angle_format.load_json(vk_format_map_path)
vk_cases = [
gen_format_case(format_id, vk_format, vk_map)
for format_id, vk_format in sorted(vk_format_map["map"].items())
]
output_cpp = TEMPLATE_TABLE_AUTOGEN_CPP.format(
format_case_data=",\n".join(vk_cases),
script_name=os.path.basename(__file__),
out_file_name=out_file_name,
input_file_name=input_file_name)
with open(out_file_name, 'wt') as out_file:
out_file.write(output_cpp)
out_file.close()
return 0
if __name__ == '__main__':
sys.exit(main())
|
cd500eadfd08e22d9f47c1354303afe9a946ed86
|
68b20a02c7aa438c65eed5b8095ca566383f2343
|
/emotion-cause-extraction/ECPE-2D/utils/prepare_data.py
|
ea9efbf958a1831bc9a7b4663dc24b5d9e7884bd
|
[
"MIT"
] |
permissive
|
declare-lab/conv-emotion
|
68163b8ac523e346715d2b2649a6e97430c30325
|
8851bbde8bedd0fe07beec72d74b3b3624c9c729
|
refs/heads/master
| 2023-08-23T03:18:47.452652
| 2022-06-29T01:51:41
| 2022-06-29T01:51:41
| 155,637,781
| 791
| 210
|
MIT
| 2023-06-12T21:32:08
| 2018-10-31T23:54:41
|
Python
|
UTF-8
|
Python
| false
| false
| 13,705
|
py
|
prepare_data.py
|
# encoding: utf-8
# @author: zxding
# email: d.z.x@qq.com
import codecs
import random
import numpy as np
import pickle as pk
from sklearn.metrics import precision_score, recall_score, f1_score
import time
def print_time():
print('\n----------{}----------'.format(time.strftime("%Y-%m-%d %X", time.localtime())))
def batch_index(length, batch_size, test=False):
index = list(range(length))
if not test: np.random.shuffle(index)
for i in range(int( (length + batch_size -1) / batch_size ) ):
ret = index[i * batch_size : (i + 1) * batch_size]
if not test and len(ret) < batch_size : break
yield ret
def load_w2v(embedding_dim, embedding_dim_pos, data_file_path, embedding_path):
print('\nload embedding...')
words = []
inputFile = open(data_file_path, 'r')
while True:
line = inputFile.readline()
if line == '': break
d_len = int(line.strip().split()[1])
inputFile.readline()
for i in range(d_len):
words.extend(inputFile.readline().strip().split(',')[-1].split())
words = set(words)
word_idx = dict((c, k + 1) for k, c in enumerate(words))
word_idx_rev = dict((k + 1, c) for k, c in enumerate(words))
w2v = {}
inputFile = open(embedding_path, 'r')
inputFile.readline()
for line in inputFile.readlines():
line = line.strip().split()
w, ebd = line[0], line[1:]
w2v[w] = ebd
embedding = [list(np.zeros(embedding_dim))]
hit = 0
for item in words:
if item in w2v:
vec = list(map(float, w2v[item]))
hit += 1
else:
vec = list(np.random.rand(embedding_dim) / 5. - 0.1) # 从均匀分布[-0.1,0.1]中随机取
embedding.append(vec)
print('w2v_file: {}\nall_words: {} hit_words: {}'.format(embedding_path, len(words), hit))
embedding_pos = [list(np.zeros(embedding_dim_pos))]
embedding_pos.extend( [list(np.random.normal(loc=0.0, scale=0.1, size=embedding_dim_pos)) for i in range(200)] )
embedding, embedding_pos = np.array(embedding), np.array(embedding_pos)
print("embedding.shape: {} embedding_pos.shape: {}".format(embedding.shape, embedding_pos.shape))
print("load embedding done!\n")
return word_idx_rev, word_idx, embedding, embedding_pos
def load_data(input_file, word_idx, max_doc_len, max_sen_len):
print('load data_file: {}'.format(input_file))
doc_id, y_emotion, y_cause, y_pairs, x, sen_len, doc_len = [[] for i in range(7)]
n_cut = 0
inputFile = open(input_file, 'r')
while True:
line = inputFile.readline()
if line == '': break
line = line.strip().split()
doc_id.append(line[0])
d_len = int(line[1])
pairs = eval('[' + inputFile.readline().strip() + ']')
doc_len.append(d_len)
y_pairs.append(pairs)
emo, cause = zip(*pairs)
y_em, y_ca, sen_len_tmp, x_tmp = np.zeros((max_doc_len, 2)), np.zeros((max_doc_len, 2)), np.zeros(max_doc_len,dtype=np.int32), np.zeros((max_doc_len, max_sen_len),dtype=np.int32)
for i in range(d_len):
y_em[i][int(i+1 in emo)]=1
y_ca[i][int(i+1 in cause)]=1
words = inputFile.readline().strip().split(',')[-1]
sen_len_tmp[i] = min(len(words.split()), max_sen_len)
for j, word in enumerate(words.split()):
if j >= max_sen_len:
n_cut += 1
break
x_tmp[i][j] = int(word_idx[word])
y_emotion.append(y_em)
y_cause.append(y_ca)
x.append(x_tmp)
sen_len.append(sen_len_tmp)
print('n_cut {}'.format(n_cut))
return doc_id, y_emotion, y_cause, y_pairs, x, sen_len, doc_len
def get_y_pair_CR(doc_len, max_doc_len, y_pairs):
y_pair = []
for i in range(len(doc_len)):
y_tmp = np.zeros((max_doc_len*max_doc_len, 2))
for j in range(doc_len[i]):
for k in range(doc_len[i]):
if (j+1,k+1) in y_pairs[i]:
y_tmp[j*max_doc_len+k][1] = 1
else :
y_tmp[j*max_doc_len+k][0] = 1
y_pair.append(y_tmp)
return y_pair
def get_y_pair_WC(doc_len, max_doc_len, window_size, y_pairs):
y_pair, pair_cnt, pair_left_cnt = [], 0, 0
for i in range(len(doc_len)):
y_tmp = np.zeros((max_doc_len*(window_size*2+1), 2))
for j in range(doc_len[i]):
for k in range(-window_size,window_size+1):
if (j+k) in range(doc_len[i]):
if (j+1,j+k+1) in y_pairs[i]:
y_tmp[j*(window_size*2+1)+k+window_size][1] = 1
else :
y_tmp[j*(window_size*2+1)+k+window_size][0] = 1
y_pair.append(y_tmp)
for j, k in y_pairs[i]:
pair_cnt += 1
if k-j not in range(-window_size,window_size+1):
pair_left_cnt += 1
print('pair_cnt {}, pair_left_cnt {}'.format(pair_cnt, pair_left_cnt))
return y_pair, pair_left_cnt
def load_data_CR(input_file, word_idx, max_doc_len = 75, max_sen_len = 45):
doc_id, y_emotion, y_cause, y_pairs, x, sen_len, doc_len = load_data(input_file, word_idx, max_doc_len, max_sen_len)
y_pair = get_y_pair_CR(doc_len, max_doc_len, y_pairs)
y_emotion, y_cause, y_pair, x, sen_len, doc_len = map(np.array, [y_emotion, y_cause, y_pair, x, sen_len, doc_len])
for var in ['y_emotion', 'y_cause', 'y_pair', 'x', 'sen_len', 'doc_len']:
print('{}.shape {}'.format(var, eval(var).shape))
print('load data done!\n')
return doc_id, y_emotion, y_cause, y_pair, y_pairs, x, sen_len, doc_len
def load_data_WC(input_file, word_idx, max_doc_len = 75, max_sen_len = 45, window_size = 3):
doc_id, y_emotion, y_cause, y_pairs, x, sen_len, doc_len = load_data(input_file, word_idx, max_doc_len, max_sen_len)
y_pair, pair_left_cnt = get_y_pair_WC(doc_len, max_doc_len, window_size, y_pairs)
y_emotion, y_cause, y_pair, x, sen_len, doc_len = map(np.array, [y_emotion, y_cause, y_pair, x, sen_len, doc_len])
for var in ['y_emotion', 'y_cause', 'y_pair', 'x', 'sen_len', 'doc_len']:
print('{}.shape {}'.format(var, eval(var).shape))
print('load data done!\n')
return doc_id, y_emotion, y_cause, y_pair, y_pairs, x, sen_len, doc_len, pair_left_cnt
def cal_prf(pred_y, true_y, doc_len, average='binary'):
pred_num, acc_num, true_num = 0, 0, 0
for i in range(pred_y.shape[0]):
for j in range(doc_len[i]):
if pred_y[i][j]:
pred_num += 1
if true_y[i][j]:
true_num += 1
if pred_y[i][j] and true_y[i][j]:
acc_num += 1
p, r = acc_num/(pred_num+1e-8), acc_num/(true_num+1e-8)
f = 2*p*r/(p+r+1e-8)
return p, r, f
def _pair_prf_CR(pred_y, true_y, doc_len, nonneutral, threshold = 0.5):
pred_num, acc_num, true_num = 0, 0, 0
max_doc_len = int(np.sqrt(pred_y.shape[1]))
for i in range(pred_y.shape[0]):
for j in range(doc_len[i]):
for k in range(doc_len[i]):
idx = j*max_doc_len+k
if nonneutral[i][j][1] == 1:
if pred_y[i][idx][1] > threshold:
pred_num += 1
if true_y[i][idx][1]>0.5:
true_num += 1
if true_y[i][idx][1]>0.5 and pred_y[i][idx][1] > threshold:
acc_num += 1
p, r = acc_num/(pred_num+1e-8), acc_num/(true_num+1e-8)
f = 2*p*r/(p+r+1e-8)
return p, r, f
def pair_prf_CR(pred_y_pair, true_y_pair, doc_len_batch, nonneutral, threshold = 0.5):
p_p, p_r, p_f = _pair_prf_CR(pred_y_pair, true_y_pair, doc_len_batch, nonneutral, threshold = threshold)
pred_y_pair = 1 - pred_y_pair
true_y_pair = 1 - true_y_pair
n_p, n_r, n_f = _pair_prf_CR(pred_y_pair, true_y_pair, doc_len_batch, nonneutral, threshold = threshold)
return p_f, n_f, (n_f + p_f) / 2
def pair_prf_WC(pred_y, true_y, doc_len, pair_left_cnt = 0, threshold = 0.5, window_size = 3):
p_p, p_r, p_f = _pair_prf_WC(pred_y, true_y, doc_len, pair_left_cnt, threshold, window_size)
pred_y = 1 - pred_y
true_y = 1 - true_y
n_p, n_r, n_f = _pair_prf_WC(pred_y, true_y, doc_len, pair_left_cnt, threshold, window_size)
return p_f, n_f, (n_f + p_f) / 2
def _pair_prf_WC(pred_y, true_y, doc_len, pair_left_cnt = 0, threshold = 0.5, window_size = 3):
pred_num, acc_num, true_num = 0, 0, pair_left_cnt
for i in range(pred_y.shape[0]):
for j in range(doc_len[i]*(window_size*2+1)):
if max(true_y[i][j]) > 1e-8:
if pred_y[i][j][1] > threshold:
pred_num += 1
if true_y[i][j][1]>0.5:
true_num += 1
if true_y[i][j][1]>0.5 and pred_y[i][j][1] > threshold:
acc_num += 1
p, r = acc_num/(pred_num+1e-8), acc_num/(true_num+1e-8)
f = 2*p*r/(p+r+1e-8)
return p, r, f
def bert_word2id(words, max_sen_len_bert, tokenizer, i, x_tmp, sen_len_tmp):
# 首先转换成unicode
tokens_a, ret = tokenizer.tokenize(words), 0
if len(tokens_a) > max_sen_len_bert - 2:
ret += 1
tokens_a = tokens_a[0:(max_sen_len_bert - 2)]
tokens_a = ["[CLS]"] + tokens_a + ["[SEP]"]
input_ids = tokenizer.convert_tokens_to_ids(tokens_a)
sen_len_tmp[i] = len(input_ids)
for j in range(len(input_ids)):
x_tmp[i][j] = input_ids[j]
return ret
def load_data_bert(input_file, tokenizer, word_idx, max_doc_len, max_sen_len_bert, max_sen_len):
print('load data_file: {}'.format(input_file))
doc_id, y_emotion, y_cause, y_pairs, x_bert, sen_len_bert, x, sen_len, doc_len = [[] for i in range(9)]
choice_len = []
n_cut = 0
inputFile = open(input_file, 'r')
while True:
line = inputFile.readline()
if line == '': break
line = line.strip().split()
doc_id.append(line[0])
d_len = int(line[1])
pairs = eval('[' + inputFile.readline().strip() + ']')
doc_len.append(d_len)
y_pairs.append(pairs)
emo, cause = zip(*pairs)
y_emotion_tmp, y_cause_tmp = np.zeros((max_doc_len, 2)), np.zeros((max_doc_len, 2))
x_bert_tmp, sen_len_bert_tmp = np.zeros((max_doc_len, max_sen_len_bert),dtype=np.int32), np.zeros(max_doc_len,dtype=np.int32)
x_tmp, sen_len_tmp = np.zeros((max_doc_len, max_sen_len),dtype=np.int32), np.zeros(max_doc_len,dtype=np.int32)
choice_len_tmp = np.zeros(max_doc_len, dtype=np.int32)
for i in range(d_len):
y_emotion_tmp[i][int(i+1 in emo)]=1
y_cause_tmp[i][int(i+1 in cause)]=1
text = inputFile.readline().strip().split(',')
words = text[-1]
n_cut += bert_word2id(words, max_sen_len_bert, tokenizer, i, x_bert_tmp, sen_len_bert_tmp)
sen_len_tmp[i] = min(len(words.split()), max_sen_len)
choice_len_tmp[i] = min(int(text[0]), max_sen_len)
for j, word in enumerate(words.split()):
if j >= max_sen_len:
break
x_tmp[i][j] = int(word_idx[word])
y_emotion.append(y_emotion_tmp)
y_cause.append(y_cause_tmp)
x_bert.append(x_bert_tmp)
sen_len_bert.append(sen_len_bert_tmp)
x.append(x_tmp)
sen_len.append(sen_len_tmp)
choice_len.append(choice_len_tmp)
print('n_cut {}'.format(n_cut))
return doc_id, y_emotion, y_cause, y_pairs, x_bert, sen_len_bert, x, sen_len, doc_len, choice_len
def load_data_CR_Bert(input_file, tokenizer, word_idx, max_doc_len = 75, max_sen_len_bert = 60, max_sen_len = 30):
doc_id, y_emotion, y_cause, y_pairs, x_bert, sen_len_bert, x, sen_len, doc_len, choice_len = load_data_bert(input_file, tokenizer, word_idx, max_doc_len, max_sen_len_bert, max_sen_len)
y_pair = get_y_pair_CR(doc_len, max_doc_len, y_pairs)
y_emotion, y_cause, y_pair, x_bert, sen_len_bert, x, sen_len, doc_len, choice_len = map(np.array, [y_emotion, y_cause, y_pair, x_bert, sen_len_bert, x, sen_len, doc_len, choice_len])
for var in ['y_emotion', 'y_cause', 'y_pair', 'x_bert', 'sen_len_bert', 'x', 'sen_len', 'doc_len', 'choice_len']:
print('{}.shape {}'.format(var, eval(var).shape))
print('load data done!\n')
return doc_id, y_emotion, y_cause, y_pair, y_pairs, x_bert, sen_len_bert, x, sen_len, doc_len, choice_len
def load_data_WC_Bert(input_file, tokenizer, word_idx, max_doc_len = 75, max_sen_len_bert = 60, max_sen_len = 30, window_size = 3):
doc_id, y_emotion, y_cause, y_pairs, x_bert, sen_len_bert, x, sen_len, doc_len, choice_len = load_data_bert(input_file, tokenizer, word_idx, max_doc_len, max_sen_len_bert, max_sen_len)
y_pair, pair_left_cnt = get_y_pair_WC(doc_len, max_doc_len, window_size, y_pairs)
y_emotion, y_cause, y_pair, x_bert, sen_len_bert, x, sen_len, doc_len, choice_len = map(np.array, [y_emotion, y_cause, y_pair, x_bert, sen_len_bert, x, sen_len, doc_len, choice_len])
for var in ['y_emotion', 'y_cause', 'y_pair', 'x_bert', 'sen_len_bert', 'x', 'sen_len', 'doc_len', 'choice_len']:
print('{}.shape {}'.format(var, eval(var).shape))
print('load data done!\n')
return doc_id, y_emotion, y_cause, y_pair, y_pairs, x_bert, sen_len_bert, x, sen_len, doc_len, pair_left_cnt, choice_len
|
4c5a4ee6cc84c25ac0f63a051811db538951421d
|
8a87f5b889a9ce7d81421515f06d9c9cbf6ce64a
|
/3rdParty/V8/v7.9.317/src/snapshot/DEPS
|
810dfd6e84e91eac83f001735e2a828b0ad50364
|
[
"bzip2-1.0.6",
"BSD-3-Clause",
"Apache-2.0",
"SunPro",
"ICU",
"Zlib",
"GPL-1.0-or-later",
"OpenSSL",
"ISC",
"LicenseRef-scancode-gutenberg-2020",
"MIT",
"GPL-2.0-only",
"CC0-1.0",
"BSL-1.0",
"LicenseRef-scancode-autoconf-simple-exception",
"LicenseRef-scancode-pcre",
"Bison-exception-2.2",
"LicenseRef-scancode-public-domain",
"JSON",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-4-Clause",
"Python-2.0",
"LGPL-2.1-or-later"
] |
permissive
|
arangodb/arangodb
|
0980625e76c56a2449d90dcb8d8f2c485e28a83b
|
43c40535cee37fc7349a21793dc33b1833735af5
|
refs/heads/devel
| 2023-08-31T09:34:47.451950
| 2023-08-31T07:25:02
| 2023-08-31T07:25:02
| 2,649,214
| 13,385
| 982
|
Apache-2.0
| 2023-09-14T17:02:16
| 2011-10-26T06:42:00
|
C++
|
UTF-8
|
Python
| false
| false
| 98
|
DEPS
|
specific_include_rules = {
"mksnapshot\.cc": [
"+include/libplatform/libplatform.h",
],
}
|
|
18d24b4f5fef41c4f73a35fd44ef91e994b7a851
|
9468849850c7c2b2040835eb9496bfb716a98c21
|
/cea/optimization/slave/seasonal_storage/SolarPowerHandler_incl_Losses.py
|
f1c9c26b3d5b5b549308a7a412f6f006614864cd
|
[
"MIT"
] |
permissive
|
architecture-building-systems/CityEnergyAnalyst
|
e6532c0c794538dbb665366ccf6d783e0d9d1345
|
b84bcefdfdfc2bc0e009b5284b74391a957995ac
|
refs/heads/master
| 2023-08-30T19:57:47.445797
| 2023-08-25T13:30:28
| 2023-08-25T13:30:28
| 49,491,341
| 166
| 60
|
MIT
| 2023-09-11T11:10:00
| 2016-01-12T10:02:17
|
Python
|
UTF-8
|
Python
| false
| false
| 16,405
|
py
|
SolarPowerHandler_incl_Losses.py
|
"""
Slave Sub Function - Treat solar power!
In this file, all sub-functions are stored that are used for storage design and operation.
They are called by either the operation or optimization of storage.
"""
import numpy as np
from cea.constants import *
from cea.optimization.constants import *
def StorageGateway(Q_PVT_gen_W, Q_SC_ET_gen_W, Q_SC_FP_gen_W, Q_server_gen_W, Q_network_demand_W, P_HP_max_W):
"""
This function is a first filter for solar energy handling:
If there is excess solar power, this will be specified and stored.
If there is not enough solar power, the lack will be calculated.
:param Q_solar_available_Wh: solar energy available at a given time step
:param Q_network_demand_W: network load at a given time step
:param P_HP_max_W: storage??
:type Q_solar_available_Wh: float
:type Q_network_demand_W: float
:type P_HP_max_W: float
:return:Q_to_storage: Thermal Energy going to the Storage Tanks (excl. conversion losses)
Q_from_storage: Thermal Energy required from storage (excl conversion losses)
to__storage: = 1 --> go to storage
= 0 --> ask energy from storage or other plant
:rtype: float, float, int
"""
Q_SC_FP_to_directload_W = 0
Q_SC_FP_to_storage_W = 0
Q_to_storage_W = 0
storage_active_flag = 0
if Q_server_gen_W <= Q_network_demand_W:
Q_network_demand_W = Q_network_demand_W - Q_server_gen_W
Q_server_to_directload_W = Q_server_gen_W
Q_server_to_storage_W = 0
else:
Q_network_demand_W = max(Q_network_demand_W - Q_server_gen_W, 0)
Q_to_storage_W = Q_to_storage_W + Q_server_gen_W - Q_network_demand_W + Q_PVT_gen_W + Q_SC_ET_gen_W + Q_SC_FP_gen_W
storage_active_flag = 1
Q_server_to_directload_W = Q_network_demand_W
Q_server_to_storage_W = Q_server_gen_W - Q_network_demand_W
Q_PVT_to_directload_W = 0
Q_PVT_to_storage_W = Q_PVT_gen_W
Q_SC_ET_to_directload_W = 0
Q_SC_ET_to_storage_W = Q_SC_ET_gen_W
Q_SC_FP_to_directload_W = 0
Q_SC_FP_to_storage_W = Q_SC_FP_gen_W
if Q_PVT_gen_W <= Q_network_demand_W:
Q_network_demand_W = Q_network_demand_W - Q_PVT_gen_W
Q_PVT_to_directload_W = Q_PVT_gen_W
Q_PVT_to_storage_W = 0
else:
Q_network_demand_W = max(Q_network_demand_W - Q_PVT_gen_W, 0)
Q_to_storage_W = Q_to_storage_W + Q_PVT_gen_W - Q_network_demand_W + Q_SC_ET_gen_W + Q_SC_FP_gen_W
storage_active_flag = 1
Q_PVT_to_directload_W = Q_network_demand_W
Q_PVT_to_storage_W = Q_PVT_gen_W - Q_network_demand_W
Q_SC_ET_to_directload_W = 0
Q_SC_ET_to_storage_W = Q_SC_ET_gen_W
Q_SC_FP_to_directload_W = 0
Q_SC_FP_to_storage_W = Q_SC_FP_gen_W
if Q_SC_ET_gen_W <= Q_network_demand_W:
Q_network_demand_W = Q_network_demand_W - Q_SC_ET_gen_W
Q_SC_ET_to_directload_W = Q_SC_ET_gen_W
Q_SC_ET_to_storage_W = 0
else:
Q_network_demand_W = max(Q_network_demand_W - Q_SC_ET_gen_W, 0)
Q_to_storage_W = Q_to_storage_W + Q_SC_ET_gen_W - Q_network_demand_W
storage_active_flag = 1
Q_SC_ET_to_directload_W = Q_network_demand_W
Q_SC_ET_to_storage_W = Q_SC_ET_gen_W - Q_network_demand_W
Q_SC_FP_to_directload_W = 0
Q_SC_FP_to_storage_W = Q_SC_FP_gen_W
if Q_SC_FP_gen_W <= Q_network_demand_W:
Q_network_demand_W = Q_network_demand_W - Q_SC_FP_gen_W
else:
Q_network_demand_W = max(Q_network_demand_W - Q_SC_FP_gen_W, 0)
Q_to_storage_W = Q_to_storage_W + Q_SC_FP_gen_W - Q_network_demand_W
storage_active_flag = 1
Q_SC_FP_to_directload_W = Q_network_demand_W
Q_SC_FP_to_storage_W = Q_SC_FP_gen_W - Q_network_demand_W
Q_from_storage_W = Q_network_demand_W
if Q_to_storage_W < (Q_PVT_to_storage_W + Q_SC_FP_to_storage_W + Q_SC_ET_to_storage_W):
print (Q_to_storage_W)
if STORAGE_MAX_UPTAKE_LIMIT_FLAG == 1:
if Q_to_storage_W >= P_HP_max_W:
Q_to_storage_W = P_HP_max_W
# print "Storage charging at full power!"
if Q_from_storage_W >= P_HP_max_W:
Q_from_storage_W = P_HP_max_W
# print "Storage discharging at full power!"
return Q_to_storage_W, \
Q_from_storage_W, \
storage_active_flag, \
Q_server_to_directload_W, \
Q_server_to_storage_W, \
Q_PVT_to_directload_W, \
Q_PVT_to_storage_W, \
Q_SC_ET_to_directload_W, \
Q_SC_ET_to_storage_W, \
Q_SC_FP_to_directload_W, \
Q_SC_FP_to_storage_W
def Temp_before_Powerplant(Q_network_demand, Q_solar_available, mdot_DH, T_return_DH):
"""
USE ONLY IF Q solar is not sufficient!
This function derives the temperature just before the power plant, after solar energy is injected.
:param Q_network_demand: network load at a given time step
:param Q_solar_available: solar energy available at a given time step
:param mdot_DH: ??
:param T_return_DH: ??
:type Q_network_demand: float
:type Q_solar_available: float
:type mdot_DH: float
:type T_return_DH: float
:return: temperature before powerplant
:rtype: float
"""
if Q_network_demand < Q_solar_available:
T_before_PP = T_return_DH
T_before_PP = T_return_DH + Q_solar_available / (mdot_DH * HEAT_CAPACITY_OF_WATER_JPERKGK)
return T_before_PP
def Storage_Charger(T_storage_old_K, Q_to_storage_lossfree_W, T_DH_ret_K, Q_in_storage_old_W, STORAGE_SIZE_m3, context):
"""
calculates the temperature of storage when charging
Q_to_storage_new_W = including losses
:param T_storage_old_K:
:param Q_to_storage_lossfree_W:
:param T_DH_ret_K:
:param Q_in_storage_old_W:
:param STORAGE_SIZE_m3:
:param context:
:type T_storage_old_K: float
:type Q_to_storage_lossfree_W: float
:type T_DH_ret_K: float
:type Q_in_storage_old_W: float
:type STORAGE_SIZE_m3: float
:type context: string
:return: T_storage_new, Q_to_storage_new_W, E_aux, Q_in_storage_new ??
:rtype: float, float, float, float ??
"""
MS_Var = context
if T_storage_old_K > T_DH_ret_K:
COP_th = T_storage_old_K / (T_storage_old_K - T_DH_ret_K)
COP = HP_ETA_EX * COP_th
E_aux_W = Q_to_storage_lossfree_W * (1 + MS_Var.Storage_conv_loss) * (
1 / COP) # assuming the losses occur after the heat pump
Q_to_storage_new_W = (E_aux_W + Q_to_storage_lossfree_W) * (1 - MS_Var.Storage_conv_loss)
# print "HP operation Charging"
else:
E_aux_W = 0
Q_to_storage_new_W = Q_to_storage_lossfree_W * (1 - MS_Var.Storage_conv_loss)
# print "HEX charging"
Q_in_storage_new_W = Q_in_storage_old_W + Q_to_storage_new_W
T_storage_new_K = MS_Var.T_storage_zero + Q_in_storage_new_W * WH_TO_J / (
float(STORAGE_SIZE_m3) * float(HEAT_CAPACITY_OF_WATER_JPERKGK) * float(DENSITY_OF_WATER_AT_60_DEGREES_KGPERM3))
return T_storage_new_K, Q_to_storage_new_W, E_aux_W, Q_in_storage_new_W
def Storage_DeCharger(T_storage_old_K, Q_from_storage_req_W, T_DH_sup_K, Q_in_storage_old_W, STORAGE_SIZE, context):
"""
discharging of the storage, no outside thermal losses in the model
:param T_storage_old_K:
:param Q_from_storage_req_W:
:param T_DH_sup_K:
:param Q_in_storage_old_W:
:param STORAGE_SIZE:
:param context:
:type T_storage_old_K:
:type Q_from_storage_req_W:
:type T_DH_sup_K:
:type Q_in_storage_old_W:
:type STORAGE_SIZE:
:type context:
:return:
:rtype:
"""
MS_Var = context
if T_DH_sup_K > T_storage_old_K: # using a heat pump if the storage temperature is below the desired distribution temperature
COP_th = T_DH_sup_K / (T_DH_sup_K - T_storage_old_K) # take average temp of old and new as low temp
COP = HP_ETA_EX * COP_th
# print COP
E_aux_W = Q_from_storage_req_W / COP * (1 + MS_Var.Storage_conv_loss)
Q_from_storage_used_W = Q_from_storage_req_W * (1 - 1 / COP) * (1 + MS_Var.Storage_conv_loss)
# print "HP operation de-Charging"
# print "Wh used from Storage", Q_from_storage_used
else: # assume perfect heat exchanger that provides the heat to the distribution
Q_from_storage_used_W = Q_from_storage_req_W * (1 + MS_Var.Storage_conv_loss)
E_aux_W = 0.0
COP = 0.0
# print "HEX-Operation Decharging"
Q_in_storage_new_W = Q_in_storage_old_W - Q_from_storage_used_W
T_storage_new_K = MS_Var.T_storage_zero + Q_in_storage_new_W * WH_TO_J / (
float(STORAGE_SIZE) * float(HEAT_CAPACITY_OF_WATER_JPERKGK) * float(DENSITY_OF_WATER_AT_60_DEGREES_KGPERM3))
# print Q_in_storage_new, "energy in storage left"
return E_aux_W, Q_from_storage_used_W, Q_in_storage_new_W, T_storage_new_K, COP
def Storage_Loss(T_storage_old_K, T_amb_K, STORAGE_SIZE_m3, context, T_ground):
"""
Calculates the storage Loss for every time step, assume D : H = 3 : 1
:param T_storage_old_K: temperature of storage at time step, without any losses
:param T_amb_K: ambient temperature
:param STORAGE_SIZE_m3:
:param context:
:type T_storage_old_K: float
:type T_amb_K: float
:type STORAGE_SIZE_m3: float
:type context:
:return: Energy loss due to non perfect insulation in Wh/h
:rtype: float
"""
MS_Var = context
V_storage_m3 = STORAGE_SIZE_m3
H_storage_m = (2.0 * V_storage_m3 / (9.0 * np.pi)) ** (1.0 / 3.0) # assume 3 : 1 (D : H)
# D_storage = 3.0 * H_storage
A_storage_ground_m2 = V_storage_m3 / H_storage_m
if V_storage_m3 == 0:
A_storage_rest_m2 = 0
else:
A_storage_rest_m2 = 2.0 * (H_storage_m * np.pi * V_storage_m3) ** (1.0 / 2.0)
Q_loss_uppersurf_W = MS_Var.alpha_loss * A_storage_ground_m2 * (T_storage_old_K - T_amb_K)
Q_loss_rest_W = MS_Var.alpha_loss * A_storage_rest_m2 * (T_storage_old_K - T_ground) # calculated by EnergyPRO
Q_loss_W = abs(float(Q_loss_uppersurf_W + Q_loss_rest_W))
T_loss_K = abs(float(Q_loss_W / (STORAGE_SIZE_m3 * HEAT_CAPACITY_OF_WATER_JPERKGK * DENSITY_OF_WATER_AT_60_DEGREES_KGPERM3 * WH_TO_J)))
return Q_loss_W, T_loss_K
def Storage_Operator(Q_PVT_gen_W, Q_SC_ET_gen_W, Q_SC_FP_gen_W, Q_server_gen_W, Q_network_demand_W,
T_storage_old_K, T_DH_sup_K, T_amb_K, Q_in_storage_old_W, T_DH_return_K,
mdot_DH_kgpers, STORAGE_SIZE_m3, context, P_HP_max_W, T_ground_K):
"""
:param Q_solar_available_Wh:
:param Q_network_demand_W:
:param T_storage_old_K:
:param T_DH_sup_K:
:param T_amb_K:
:param Q_in_storage_old_W:
:param T_DH_return_K:
:param mdot_DH_kgpers:
:param STORAGE_SIZE_m3:
:param context:
:param P_HP_max_W:
:type Q_solar_available_Wh:
:type Q_network_demand_W:
:type T_storage_old_K:
:type T_DH_sup_K:
:type T_amb_K:
:type Q_in_storage_old_W:
:type T_DH_return_K:
:type mdot_DH_kgpers:
:type STORAGE_SIZE_m3:
:type context:
:type P_HP_max_W:
:return:
:rtype:
"""
Q_to_storage_W, \
Q_from_storage_W, \
storage_active_flag, \
Q_server_to_directload_W, \
Q_server_to_storage_W, \
Q_PVT_to_directload_W, \
Q_PVT_to_storage_W, \
Q_SC_ET_to_directload_W, \
Q_SC_ET_to_storage_W, \
Q_SC_FP_to_directload_W, \
Q_SC_FP_to_storage_W = StorageGateway(Q_PVT_gen_W, Q_SC_ET_gen_W, Q_SC_FP_gen_W,
Q_server_gen_W, Q_network_demand_W,
P_HP_max_W)
Q_missing_W = 0 # amount of heat required from heating plants
Q_from_storage_req_W = 0
E_aux_dech_W = 0
E_aux_ch_W = 0
# mdot_DH_missing_kgpers = Q_network_demand_W # TODO: TO DELETE
if storage_active_flag == 1: # charging the storage
T_storage_new_K, Q_to_storage_new_W, E_aux_ch_W, Q_in_storage_new_W = \
Storage_Charger(T_storage_old_K, Q_to_storage_W, T_DH_return_K, Q_in_storage_old_W, STORAGE_SIZE_m3,
context)
# calculating thermal loss
Q_loss_W, T_loss_K = Storage_Loss(T_storage_old_K, T_amb_K, STORAGE_SIZE_m3, context, T_ground_K)
T_storage_new_K -= T_loss_K
Q_in_storage_new_W -= Q_loss_W
Q_from_storage_req_W = 0
mdot_DH_missing_kgpers = 0
else: # discharging the storage
if Q_in_storage_old_W > 0.0: # Start de-Charging
E_aux_dech_W, Q_from_storage_req_W, Q_in_storage_new_W, T_storage_new_K, COP = \
Storage_DeCharger(T_storage_old_K, Q_from_storage_W, T_DH_sup_K, Q_in_storage_old_W,
STORAGE_SIZE_m3, context)
# calculating thermal loss
Q_loss_W, T_loss_K = Storage_Loss(T_storage_old_K, T_amb_K, STORAGE_SIZE_m3, context, T_ground_K)
T_storage_new_K -= T_loss_K
Q_in_storage_new_W = Q_in_storage_old_W - Q_loss_W - Q_from_storage_req_W
if Q_network_demand_W == 0:
mdot_DH_missing_kgpers = 0
else:
mdot_DH_missing_kgpers = mdot_DH_kgpers * (Q_network_demand_W - Q_from_storage_req_W) / Q_network_demand_W # TODO: CHECK CALCULATION
if Q_in_storage_new_W < 0:
# if storage is almost empty after the discharge calculation, only discharge the amount that is possible
# to not go below 10 degC
Q_from_storage_poss = Q_in_storage_old_W
E_aux_dech_W, Q_from_storage_req_W, Q_in_storage_new_W, T_storage_new_K, COP = \
Storage_DeCharger(T_storage_old_K, Q_from_storage_poss, T_DH_sup_K, Q_in_storage_old_W,
STORAGE_SIZE_m3, context)
Q_loss_W, T_loss_K = Storage_Loss(T_storage_old_K, T_amb_K, STORAGE_SIZE_m3, context, T_ground_K)
Q_missing_W = Q_network_demand_W - (Q_PVT_to_directload_W +
Q_SC_ET_to_directload_W +
Q_SC_FP_to_directload_W +
Q_server_to_directload_W) - Q_from_storage_req_W
Q_in_storage_new_W = Q_in_storage_old_W - Q_loss_W - Q_from_storage_req_W
T_storage_new_K -= T_loss_K
if Q_network_demand_W == 0:
mdot_DH_missing_kgpers = 0
else:
mdot_DH_missing_kgpers = mdot_DH_kgpers * (Q_missing_W / Q_network_demand_W)
else: # neither storage charging nor decharging
E_aux_ch_W = 0
E_aux_dech_W = 0
Q_loss_W, T_loss_K = Storage_Loss(T_storage_old_K, T_amb_K, STORAGE_SIZE_m3, context, T_ground_K)
T_storage_new_K = T_storage_old_K - T_loss_K
Q_in_storage_new_W = Q_in_storage_old_W - Q_loss_W
Q_missing_W = Q_network_demand_W - (Q_PVT_to_directload_W +
Q_SC_ET_to_directload_W +
Q_SC_FP_to_directload_W +
Q_server_to_directload_W
)
if Q_missing_W < 0: # catch numerical errors (leading to very low (absolute) negative numbers)
Q_missing_W = 0
if Q_network_demand_W == 0:
mdot_DH_missing_kgpers = 0
else:
mdot_DH_missing_kgpers = mdot_DH_kgpers * (Q_missing_W / Q_network_demand_W)
# print "mdot_DH_missing", mdot_DH_missing
return Q_in_storage_new_W, \
T_storage_new_K,\
Q_from_storage_req_W,\
Q_to_storage_W, \
E_aux_ch_W,\
E_aux_dech_W, \
Q_missing_W, \
Q_loss_W,\
mdot_DH_missing_kgpers, \
Q_server_to_directload_W,\
Q_server_to_storage_W, \
Q_PVT_to_directload_W, \
Q_PVT_to_storage_W, \
Q_SC_ET_to_directload_W,\
Q_SC_ET_to_storage_W, \
Q_SC_FP_to_directload_W, \
Q_SC_FP_to_storage_W
|
00c53c28e124fcbbdab1a54a217d09bccf1ce54c
|
3c41443364da8b44c74dce08ef94a1acd1b66b3e
|
/api/registrations/views.py
|
d3e7f8664dcb5c1c9180d93a49473620764a2a59
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
CenterForOpenScience/osf.io
|
71d9540be7989f7118a33e15bc4a6ce2d2492ac1
|
a3e0a0b9ddda5dd75fc8248d58f3bcdeece0323e
|
refs/heads/develop
| 2023-09-04T03:21:14.970917
| 2023-08-31T14:49:20
| 2023-08-31T14:49:20
| 10,199,599
| 683
| 390
|
Apache-2.0
| 2023-09-14T17:07:52
| 2013-05-21T15:53:37
|
Python
|
UTF-8
|
Python
| false
| false
| 37,843
|
py
|
views.py
|
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import ValidationError, NotFound, PermissionDenied
from framework.auth.oauth_scopes import CoreScopes
from osf.models import Registration, OSFUser, RegistrationProvider, OutcomeArtifact
from osf.utils.permissions import WRITE_NODE
from osf.utils.workflows import ApprovalStates
from api.base import permissions as base_permissions
from api.base import generic_bulk_views as bulk_views
from api.base.exceptions import Gone
from api.base.filters import ListFilterMixin
from api.base.views import (
JSONAPIBaseView,
BaseChildrenList,
BaseContributorDetail,
BaseContributorList,
BaseNodeLinksDetail,
BaseNodeLinksList,
WaterButlerMixin,
)
from api.base.serializers import HideIfWithdrawal, LinkedRegistrationsRelationshipSerializer
from api.base.serializers import LinkedNodesRelationshipSerializer
from api.base.pagination import NodeContributorPagination
from api.base.exceptions import Conflict
from api.base.parsers import (
JSONAPIRelationshipParser,
JSONAPIMultipleRelationshipsParser,
JSONAPIRelationshipParserForRegularJSON,
JSONAPIMultipleRelationshipsParserForRegularJSON,
)
from api.base.utils import (
get_user_auth,
default_node_list_permission_queryset,
is_bulk_request,
is_truthy,
)
from api.comments.serializers import RegistrationCommentSerializer, CommentCreateSerializer
from api.draft_registrations.views import DraftMixin
from api.identifiers.serializers import RegistrationIdentifierSerializer
from api.nodes.views import NodeIdentifierList, NodeBibliographicContributorsList, NodeSubjectsList, NodeSubjectsRelationship
from api.users.views import UserMixin
from api.users.serializers import UserSerializer
from api.nodes.permissions import (
ReadOnlyIfRegistration,
ContributorDetailPermissions,
ContributorOrPublic,
ContributorOrPublicForRelationshipPointers,
AdminOrPublic,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
from api.registrations.permissions import ContributorOrModerator, ContributorOrModeratorOrPublic
from api.registrations.serializers import (
RegistrationSerializer,
RegistrationDetailSerializer,
RegistrationContributorsSerializer,
RegistrationCreateSerializer,
RegistrationStorageProviderSerializer,
)
from api.nodes.filters import NodesFilterMixin
from api.nodes.views import (
NodeMixin, NodeRegistrationsList, NodeLogList,
NodeCommentsList, NodeStorageProvidersList, NodeFilesList, NodeFileDetail,
NodeInstitutionsList, NodeForksList, NodeWikiList, LinkedNodesList,
NodeViewOnlyLinksList, NodeViewOnlyLinkDetail, NodeCitationDetail, NodeCitationStyleDetail,
NodeLinkedRegistrationsList, NodeLinkedByNodesList, NodeLinkedByRegistrationsList, NodeInstitutionsRelationship,
)
from api.registrations.serializers import RegistrationNodeLinksSerializer, RegistrationFileSerializer
from api.wikis.serializers import RegistrationWikiSerializer
from api.base.utils import get_object_or_error
from api.actions.serializers import RegistrationActionSerializer
from api.requests.serializers import RegistrationRequestSerializer
from framework.sentry import log_exception
from osf.utils.permissions import ADMIN
from api.providers.permissions import MustBeModerator
from api.providers.views import ProviderMixin
from api.registrations import annotations
from api.resources import annotations as resource_annotations
from api.resources.permissions import RegistrationResourceListPermission
from api.resources.serializers import ResourceSerializer
from api.schema_responses import annotations as schema_response_annotations
from api.schema_responses.permissions import (
MODERATOR_VISIBLE_STATES,
RegistrationSchemaResponseListPermission,
)
from api.schema_responses.serializers import RegistrationSchemaResponseSerializer
class RegistrationMixin(NodeMixin):
"""Mixin with convenience methods for retrieving the current registration based on the
current URL. By default, fetches the current registration based on the node_id kwarg.
"""
serializer_class = RegistrationSerializer
node_lookup_url_kwarg = 'node_id'
def get_node(self, check_object_permissions=True, **annotations):
guid = self.kwargs[self.node_lookup_url_kwarg]
node = Registration.objects.filter(guids___id=guid).annotate(**annotations)
try:
node = node.get()
except Registration.DoesNotExist:
raise NotFound
if node.deleted:
raise Gone(detail='The requested registration is no longer available.')
if check_object_permissions:
self.check_object_permissions(self.request, node)
return node
class RegistrationList(JSONAPIBaseView, generics.ListCreateAPIView, bulk_views.BulkUpdateJSONAPIView, NodesFilterMixin, DraftMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_list).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
serializer_class = RegistrationSerializer
view_category = 'registrations'
view_name = 'registration-list'
ordering = ('-modified',)
model_class = Registration
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
# overrides BulkUpdateJSONAPIView
def get_serializer_class(self):
"""
Use RegistrationDetailSerializer which requires 'id'
"""
if self.request.method in ('PUT', 'PATCH'):
return RegistrationDetailSerializer
elif self.request.method == 'POST':
return RegistrationCreateSerializer
else:
return RegistrationSerializer
# overrides NodesFilterMixin
def get_default_queryset(self):
return default_node_list_permission_queryset(
user=self.request.user,
model_cls=Registration,
revision_state=annotations.REVISION_STATE,
**resource_annotations.make_open_practice_badge_annotations(),
)
def is_blacklisted(self):
query_params = self.parse_query_params(self.request.query_params)
for key, field_names in query_params.items():
for field_name, data in field_names.items():
field = self.serializer_class._declared_fields.get(field_name)
if isinstance(field, HideIfWithdrawal):
return True
return False
# overrides ListAPIView, ListBulkCreateJSONAPIView
def get_queryset(self):
# For bulk requests, queryset is formed from request body.
if is_bulk_request(self.request):
auth = get_user_auth(self.request)
registrations = Registration.objects.filter(guids___id__in=[registration['id'] for registration in self.request.data])
# If skip_uneditable=True in query_params, skip nodes for which the user
# does not have EDIT permissions.
if is_truthy(self.request.query_params.get('skip_uneditable', False)):
return Registration.objects.get_nodes_for_user(auth.user, WRITE_NODE, registrations)
for registration in registrations:
if not registration.can_edit(auth):
raise PermissionDenied
return registrations
blacklisted = self.is_blacklisted()
registrations = self.get_queryset_from_request()
# If attempting to filter on a blacklisted field, exclude withdrawals.
if blacklisted:
registrations = registrations.exclude(retraction__isnull=False)
return registrations.select_related(
'root',
'root__embargo',
'root__embargo_termination_approval',
'root__retraction',
'root__registration_approval',
)
# overrides ListCreateJSONAPIView
def perform_create(self, serializer):
"""Create a registration from a draft.
"""
draft_id = self.request.data.get('draft_registration', None) or self.request.data.get('draft_registration_id', None)
draft = self.get_draft(draft_id)
user = get_user_auth(self.request).user
# A user have admin perms on the draft to register
if draft.has_permission(user, ADMIN):
try:
serializer.save(draft=draft)
except ValidationError as e:
log_exception()
raise e
else:
raise PermissionDenied(
'You must be an admin contributor on the draft registration to create a registration.',
)
def check_branched_from(self, draft):
# Overrides DraftMixin - no node_id in kwargs
return
class RegistrationDetail(JSONAPIBaseView, generics.RetrieveUpdateAPIView, RegistrationMixin, WaterButlerMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_read).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrModeratorOrPublic,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
serializer_class = RegistrationDetailSerializer
view_category = 'registrations'
view_name = 'registration-detail'
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
# overrides RetrieveAPIView
def get_object(self):
registration = self.get_node(
revision_state=annotations.REVISION_STATE,
**resource_annotations.make_open_practice_badge_annotations(),
)
if not registration.is_registration:
raise ValidationError('This is not a registration.')
return registration
def get_serializer_context(self):
context = super().get_serializer_context()
show_counts = is_truthy(self.request.query_params.get('related_counts', False))
if show_counts:
registration = self.get_object()
context['meta'] = {
'templated_by_count': registration.templated_list.count(),
}
return context
class RegistrationContributorsList(BaseContributorList, RegistrationMixin, UserMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_contributors_list).
"""
view_category = 'registrations'
view_name = 'registration-contributors'
pagination_class = NodeContributorPagination
serializer_class = RegistrationContributorsSerializer
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
permission_classes = (
ContributorDetailPermissions,
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
def get_default_queryset(self):
node = self.get_node(check_object_permissions=False)
return node.contributor_set.all().prefetch_related('user__guids')
class RegistrationContributorDetail(BaseContributorDetail, RegistrationMixin, UserMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_contributors_read).
"""
view_category = 'registrations'
view_name = 'registration-contributor-detail'
serializer_class = RegistrationContributorsSerializer
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
permission_classes = (
ContributorDetailPermissions,
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
class RegistrationBibliographicContributorsList(NodeBibliographicContributorsList, RegistrationMixin):
pagination_class = NodeContributorPagination
serializer_class = RegistrationContributorsSerializer
view_category = 'registrations'
view_name = 'registration-bibliographic-contributors'
class RegistrationImplicitContributorsList(JSONAPIBaseView, generics.ListAPIView, ListFilterMixin, RegistrationMixin):
permission_classes = (
AdminOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_CONTRIBUTORS_READ]
required_write_scopes = [CoreScopes.NULL]
model_class = OSFUser
serializer_class = UserSerializer
view_category = 'registrations'
view_name = 'registration-implicit-contributors'
ordering = ('contributor___order',) # default ordering
def get_default_queryset(self):
node = self.get_node()
return node.parent_admin_contributors
def get_queryset(self):
queryset = self.get_queryset_from_request()
return queryset
class RegistrationChildrenList(BaseChildrenList, generics.ListAPIView, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_children_list).
"""
view_category = 'registrations'
view_name = 'registration-children'
serializer_class = RegistrationSerializer
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
model_class = Registration
class RegistrationCitationDetail(NodeCitationDetail, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_citations_list).
"""
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
view_category = 'registrations'
view_name = 'registration-citation'
class RegistrationCitationStyleDetail(NodeCitationStyleDetail, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_citation_read).
"""
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
view_category = 'registrations'
view_name = 'registration-style-citation'
class RegistrationForksList(NodeForksList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_forks_list).
"""
view_category = 'registrations'
view_name = 'registration-forks'
class RegistrationCommentsList(NodeCommentsList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_comments_list).
"""
serializer_class = RegistrationCommentSerializer
view_category = 'registrations'
view_name = 'registration-comments'
def get_serializer_class(self):
if self.request.method == 'POST':
return CommentCreateSerializer
else:
return RegistrationCommentSerializer
class RegistrationLogList(NodeLogList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_logs_list).
"""
view_category = 'registrations'
view_name = 'registration-logs'
class RegistrationStorageProvidersList(NodeStorageProvidersList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_providers_list).
"""
serializer_class = RegistrationStorageProviderSerializer
view_category = 'registrations'
view_name = 'registration-storage-providers'
class RegistrationNodeLinksList(BaseNodeLinksList, RegistrationMixin):
"""Node Links to other nodes. *Writeable*.
Node Links act as pointers to other nodes. Unlike Forks, they are not copies of nodes;
Node Links are a direct reference to the node that they point to.
##Node Link Attributes
`type` is "node_links"
None
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Relationships
### Target Node
This endpoint shows the target node detail and is automatically embedded.
##Actions
###Adding Node Links
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "node_links", # required
"relationships": {
"nodes": {
"data": {
"type": "nodes", # required
"id": "{target_node_id}", # required
}
}
}
}
}
Success: 201 CREATED + node link representation
To add a node link (a pointer to another node), issue a POST request to this endpoint. This effectively creates a
relationship between the node and the target node. The target node must be described as a relationship object with
a "data" member, containing the nodes `type` and the target node `id`.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
#This Request/Response
"""
view_category = 'registrations'
view_name = 'registration-pointers'
serializer_class = RegistrationNodeLinksSerializer
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
# TODO: This class doesn't exist
# model_class = Pointer
class RegistrationNodeLinksDetail(BaseNodeLinksDetail, RegistrationMixin):
"""Node Link details. *Writeable*.
Node Links act as pointers to other nodes. Unlike Forks, they are not copies of nodes;
Node Links are a direct reference to the node that they point to.
##Attributes
`type` is "node_links"
None
##Links
*None*
##Relationships
###Target node
This endpoint shows the target node detail and is automatically embedded.
##Actions
###Remove Node Link
Method: DELETE
URL: /links/self
Query Params: <none>
Success: 204 No Content
To remove a node link from a node, issue a DELETE request to the `self` link. This request will remove the
relationship between the node and the target node, not the nodes themselves.
##Query Params
*None*.
#This Request/Response
"""
view_category = 'registrations'
view_name = 'registration-pointer-detail'
serializer_class = RegistrationNodeLinksSerializer
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
# TODO: this class doesn't exist
# model_class = Pointer
# overrides RetrieveAPIView
def get_object(self):
registration = self.get_node()
if not registration.is_registration:
raise ValidationError('This is not a registration.')
return registration
class RegistrationLinkedByNodesList(NodeLinkedByNodesList, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-linked-by-nodes'
class RegistrationLinkedByRegistrationsList(NodeLinkedByRegistrationsList, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-linked-by-registrations'
class RegistrationRegistrationsList(NodeRegistrationsList, RegistrationMixin):
"""List of registrations of a registration."""
view_category = 'registrations'
view_name = 'registration-registrations'
class RegistrationFilesList(NodeFilesList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_files_list).
"""
view_category = 'registrations'
view_name = 'registration-files'
ordering_fields = ['modified', 'name', 'date_modified']
serializer_class = RegistrationFileSerializer
class RegistrationFileDetail(NodeFileDetail, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_files_read).
"""
view_category = 'registrations'
view_name = 'registration-file-detail'
serializer_class = RegistrationFileSerializer
class RegistrationInstitutionsList(NodeInstitutionsList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_institutions_list).
"""
view_category = 'registrations'
view_name = 'registration-institutions'
class RegistrationSubjectsList(NodeSubjectsList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_subjects_list).
"""
view_category = 'registrations'
view_name = 'registration-subjects'
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
class RegistrationSubjectsRelationship(NodeSubjectsRelationship, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_subjects_relationship).
"""
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
view_category = 'registrations'
view_name = 'registration-relationships-subjects'
class RegistrationInstitutionsRelationship(NodeInstitutionsRelationship, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_institutions_relationship).
"""
view_category = 'registrations'
view_name = 'registration-relationships-institutions'
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
AdminOrPublic,
)
class RegistrationWikiList(NodeWikiList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_wikis_list).
"""
view_category = 'registrations'
view_name = 'registration-wikis'
serializer_class = RegistrationWikiSerializer
class RegistrationLinkedNodesList(LinkedNodesList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_linked_nodes_list).
"""
view_category = 'registrations'
view_name = 'linked-nodes'
class RegistrationLinkedNodesRelationship(JSONAPIBaseView, generics.RetrieveAPIView, RegistrationMixin):
""" Relationship Endpoint for Nodes -> Linked Node relationships
Used to retrieve the ids of the linked nodes attached to this collection. For each id, there
exists a node link that contains that node.
##Actions
"""
view_category = 'registrations'
view_name = 'node-pointer-relationship'
permission_classes = (
ContributorOrPublicForRelationshipPointers,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReadOnlyIfRegistration,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = LinkedNodesRelationshipSerializer
parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON, )
def get_object(self):
node = self.get_node(check_object_permissions=False)
auth = get_user_auth(self.request)
obj = {
'data': [
linked_node for linked_node in
node.linked_nodes.filter(is_deleted=False).exclude(type='osf.collection').exclude(type='osf.registration')
if linked_node.can_view(auth)
], 'self': node,
}
self.check_object_permissions(self.request, obj)
return obj
class RegistrationLinkedRegistrationsRelationship(JSONAPIBaseView, generics.RetrieveAPIView, RegistrationMixin):
"""Relationship Endpoint for Registration -> Linked Registration relationships. *Read-only*
Used to retrieve the ids of the linked registrations attached to this collection. For each id, there
exists a node link that contains that registration.
"""
view_category = 'registrations'
view_name = 'node-registration-pointer-relationship'
permission_classes = (
ContributorOrPublicForRelationshipPointers,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReadOnlyIfRegistration,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = LinkedRegistrationsRelationshipSerializer
parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON,)
def get_object(self):
node = self.get_node(check_object_permissions=False)
auth = get_user_auth(self.request)
obj = {
'data': [
linked_registration for linked_registration in
node.linked_nodes.filter(is_deleted=False, type='osf.registration').exclude(type='osf.collection')
if linked_registration.can_view(auth)
],
'self': node,
}
self.check_object_permissions(self.request, obj)
return obj
class RegistrationLinkedRegistrationsList(NodeLinkedRegistrationsList, RegistrationMixin):
"""List of registrations linked to this registration. *Read-only*.
Linked registrations are the registration nodes pointed to by node links.
<!--- Copied Spiel from RegistrationDetail -->
Registrations are read-only snapshots of a project. This view shows details about the given registration.
Each resource contains the full representation of the registration, meaning additional requests to an individual
registration's detail view are not necessary. A withdrawn registration will display a limited subset of information,
namely, title, description, created, registration, withdrawn, date_registered, withdrawal_justification, and
registration supplement. All other fields will be displayed as null. Additionally, the only relationships permitted
to be accessed for a withdrawn registration are the contributors - other relationships will return a 403.
##Linked Registration Attributes
<!--- Copied Attributes from RegistrationDetail -->
Registrations have the "registrations" `type`.
name type description
=======================================================================================================
title string title of the registered project or component
description string description of the registered node
category string bode category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the registered node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
fork boolean is this project a fork?
registration boolean has this project been registered? (always true - may be deprecated in future versions)
collection boolean is this registered node a collection? (always false - may be deprecated in future versions)
node_license object details of the license applied to the node
year string date range of the license
copyright_holders array of strings holders of the applied license
public boolean has this registration been made publicly-visible?
withdrawn boolean has this registration been withdrawn?
date_registered iso8601 timestamp timestamp that the registration was created
embargo_end_date iso8601 timestamp when the embargo on this registration will be lifted (if applicable)
withdrawal_justification string reasons for withdrawing the registration
pending_withdrawal boolean is this registration pending withdrawal?
pending_withdrawal_approval boolean is this registration pending approval?
pending_embargo_approval boolean is the associated Embargo awaiting approval by project admins?
registered_meta dictionary registration supplementary information
registration_supplement string registration template
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
serializer_class = RegistrationSerializer
view_category = 'registrations'
view_name = 'linked-registrations'
class RegistrationViewOnlyLinksList(NodeViewOnlyLinksList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_view_only_links_list).
"""
required_read_scopes = [CoreScopes.REGISTRATION_VIEW_ONLY_LINKS_READ]
required_write_scopes = [CoreScopes.REGISTRATION_VIEW_ONLY_LINKS_WRITE]
view_category = 'registrations'
view_name = 'registration-view-only-links'
class RegistrationViewOnlyLinkDetail(NodeViewOnlyLinkDetail, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_view_only_links_read).
"""
required_read_scopes = [CoreScopes.REGISTRATION_VIEW_ONLY_LINKS_READ]
required_write_scopes = [CoreScopes.REGISTRATION_VIEW_ONLY_LINKS_WRITE]
view_category = 'registrations'
view_name = 'registration-view-only-link-detail'
class RegistrationIdentifierList(RegistrationMixin, NodeIdentifierList):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_identifiers_list).
"""
serializer_class = RegistrationIdentifierSerializer
class RegistrationActionList(JSONAPIBaseView, ListFilterMixin, generics.ListCreateAPIView, ProviderMixin):
provider_class = RegistrationProvider
permission_classes = (
drf_permissions.IsAuthenticated,
base_permissions.TokenHasScope,
ContributorOrModerator,
)
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
required_read_scopes = [CoreScopes.ACTIONS_READ]
required_write_scopes = [CoreScopes.ACTIONS_WRITE]
view_category = 'registrations'
view_name = 'registration-actions-list'
serializer_class = RegistrationActionSerializer
ordering = ('-created',)
node_lookup_url_kwarg = 'node_id'
def get_registration(self):
registration = get_object_or_error(
Registration,
self.kwargs[self.node_lookup_url_kwarg],
self.request,
check_deleted=False,
)
# May raise a permission denied
self.check_object_permissions(self.request, registration)
return registration
def get_default_queryset(self):
return self.get_registration().actions.all()
def get_queryset(self):
return self.get_queryset_from_request()
def perform_create(self, serializer):
target = serializer.validated_data['target']
self.check_object_permissions(self.request, target)
if not target.provider.is_reviewed:
raise Conflict(f'{target.provider.name } is an umoderated provider. If you believe this is an error, contact OSF Support.')
serializer.save(user=self.request.user)
class RegistrationRequestList(JSONAPIBaseView, ListFilterMixin, generics.ListCreateAPIView, RegistrationMixin, ProviderMixin):
provider_class = RegistrationProvider
required_read_scopes = [CoreScopes.NODE_REQUESTS_READ]
required_write_scopes = [CoreScopes.NULL]
permission_classes = (
drf_permissions.IsAuthenticated,
base_permissions.TokenHasScope,
MustBeModerator,
)
view_category = 'registrations'
view_name = 'registration-requests-list'
serializer_class = RegistrationRequestSerializer
def get_default_queryset(self):
return self.get_node().requests.all()
def get_queryset(self):
return self.get_queryset_from_request()
class RegistrationSchemaResponseList(JSONAPIBaseView, generics.ListAPIView, ListFilterMixin, RegistrationMixin):
required_read_scopes = [CoreScopes.READ_SCHEMA_RESPONSES]
required_write_scopes = [CoreScopes.NULL]
permission_classes = (
RegistrationSchemaResponseListPermission,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ExcludeWithdrawals,
)
view_category = 'registrations'
view_name = 'schema-responses-list'
serializer_class = RegistrationSchemaResponseSerializer
def get_object(self):
return self.get_node()
def get_default_queryset(self):
'''Return all SchemaResponses on the Registration that should be visible to the user.
For contributors to the Registration, this should be all of its SchemaResponses.
For moderators, this should be all PENDING_MODERATION or APPROVED SchemaResponses
For all others, this should be only the APPROVED responses.
'''
user = self.request.user
registration = self.get_node()
# Get the SchemaResponses from the root
all_responses = registration.root.schema_responses.annotate(
is_pending_current_user_approval=(
schema_response_annotations.is_pending_current_user_approval(user)
),
is_original_response=schema_response_annotations.IS_ORIGINAL_RESPONSE,
)
is_contributor = registration.has_permission(user, 'read') if user else False
if is_contributor:
return all_responses
is_moderator = (
user and
registration.is_moderated and
user.has_perm('view_submissions', registration.provider)
)
if is_moderator:
return all_responses.filter(
reviews_state__in=[state.db_name for state in MODERATOR_VISIBLE_STATES],
)
return all_responses.filter(reviews_state=ApprovalStates.APPROVED.db_name)
def get_queryset(self):
return self.get_queryset_from_request()
class RegistrationResourceList(JSONAPIBaseView, generics.ListAPIView, ListFilterMixin, RegistrationMixin):
permission_classes = (
RegistrationResourceListPermission,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.READ_REGISTRATION_RESOURCES]
required_write_scopes = [CoreScopes.WRITE_REGISTRATION_RESOURCES]
view_category = 'registrations'
view_name = 'resource-list'
serializer_class = ResourceSerializer
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON)
def get_node(self):
return super().get_node(check_object_permissions=False)
def get_default_queryset(self):
root_registration = self.get_node()
return OutcomeArtifact.objects.for_registration(root_registration).filter(
finalized=True,
deleted__isnull=True,
)
def get_queryset(self):
return self.get_queryset_from_request()
def get_permissions_proxy(self):
return self.get_node()
|
76f7006cec8b4dce881a764873775cde00f894ad
|
1f3715bc6c1137b605959ea938aae18ce357ea18
|
/pubgate/db/user.py
|
bde702b5828bf138907af84761409ac48c87ff69
|
[
"BSD-3-Clause"
] |
permissive
|
autogestion/pubgate
|
72453b09cc2a7070a1c4aa2c8089fca4fc25706a
|
e9a1850a60bb34aac59542b97f730c08dc8d46fa
|
refs/heads/master
| 2022-12-09T23:40:06.857427
| 2020-06-28T20:01:01
| 2020-06-28T20:01:01
| 137,607,267
| 110
| 8
|
BSD-3-Clause
| 2022-12-08T03:14:56
| 2018-06-16T20:03:57
|
Python
|
UTF-8
|
Python
| false
| false
| 3,102
|
py
|
user.py
|
from sanic_motor import BaseModel
from simple_bcrypt import generate_password_hash
# import flask_admin
# from flask_admin.contrib.pymongo.view import ModelView
from pubgate.utils.user import UserUtils
from pubgate.db.boxes import Outbox, Inbox
from pubgate.db.managers import BaseManager
def actor_clean(data):
return [item["activity"]["object"]["actor"] for item in data]
def actor_clean_inbox(data):
return [item["activity"]["object"]["object"] for item in data]
def actor_clean_liked(data):
return [item["activity"]["object"] for item in data]
class User(BaseModel, UserUtils, BaseManager):
__coll__ = 'users'
__unique_fields__ = ['name']
# @classmethod
# async def get(cls, name):
# user = await cls.find_one(dict(name=name))
# return user
@classmethod
async def create(cls, user_data, base_url):
user_data["name"] = user_data.pop("username")
user_data["password"] = generate_password_hash(user_data["password"])
user_data["alias"] = f"{base_url}/{user_data['name']}"
user_data["uri"] = f"{base_url}/@{user_data['name']}"
await cls.insert_one(user_data)
user = await cls.find_one({"name": user_data['name']})
return user
def follow_filter(self, model):
return {
"deleted": False,
**model.by_user(self.name),
"activity.type": "Accept",
"activity.object.type": "Follow"
}
async def followers_get(self):
data = await Outbox.find(filter=self.follow_filter(Outbox))
return list(set(actor_clean(data.objects)))
async def followers_paged(self, request):
return await self.get_ordered(request, Outbox,
self.follow_filter(Outbox),
actor_clean, self.followers)
async def following_paged(self, request):
return await self.get_ordered(request, Inbox,
self.follow_filter(Inbox),
actor_clean_inbox, self.following)
async def liked_paged(self, request):
filters = {
"deleted": False,
**Outbox.by_user(self.name),
"activity.type": "Like"
}
return await self.get_ordered(request, Outbox, filters,
actor_clean_liked, self.liked)
async def outbox_paged(self, request):
filters = {
"deleted": False,
**Outbox.by_user(self.name),
"activity.type": {'$in': ["Create", "Announce", "Like"]}
}
return await self.get_ordered(request, Outbox, filters,
self.activity_clean, self.outbox)
async def inbox_paged(self, request):
filters = {
"deleted": False,
**Inbox.by_user(self.name),
"activity.type": {'$in': ["Create", "Announce", "Like"]}
}
return await self.get_ordered(request, Inbox, filters,
self.activity_clean, self.inbox)
|
cf73fd92e072b22c37d60250bd590c8c1a44ce49
|
4e6f104621ce510157eda4ccce1984dbebd0d4fa
|
/tests/test_saved_analysis.py
|
0ffbd584f3b1097fc493153b602a414fdcfde1d6
|
[
"MIT"
] |
permissive
|
AMDResearch/omniperf
|
c9d8b145e8cdc99c82ac999da19596375e4683a2
|
eb3dc981b0e2ec3a8844145da55b600b86cca26d
|
refs/heads/main
| 2023-08-18T12:21:10.181384
| 2023-08-17T17:53:25
| 2023-08-17T17:53:25
| 561,919,887
| 106
| 20
|
MIT
| 2023-09-11T19:16:10
| 2022-11-04T19:53:55
|
Python
|
UTF-8
|
Python
| false
| false
| 37,638
|
py
|
test_saved_analysis.py
|
import sys
import pandas as pd
import glob
def compare(prev, cur):
prev_csvs = glob.glob(prev + "/*")
cur_csvs = glob.glob(cur + "/*")
for prev_csv in prev_csvs:
csv_name = prev_csv[prev_csv.rfind("/") + 1 :]
with open(prev_csv, "r") as csv1, open(
cur + "/" + csv_name, "r"
) as csv2: # Import CSV files
import1 = csv1.readlines()
import2 = csv2.readlines()
for row in import2:
if row not in import1:
sys.exit(1)
##################################################
## Generated tests ##
## Meant to run after test_analyze_workloads ##
##################################################
def test_saved_D_str_inv1_mi100():
compare(
"cmake/workloads/D_str_inv1/mi100/prev_analysis",
"cmake/workloads/D_str_inv1/mi100/saved_analysis",
)
def test_saved_D_str_inv1_mi200():
compare(
"cmake/workloads/D_str_inv1/mi200/prev_analysis",
"cmake/workloads/D_str_inv1/mi200/saved_analysis",
)
def test_saved_dev01p3_mi100():
compare(
"cmake/workloads/dev01p3/mi100/prev_analysis",
"cmake/workloads/dev01p3/mi100/saved_analysis",
)
def test_saved_SQC_mi100():
compare(
"cmake/workloads/SQC/mi100/prev_analysis",
"cmake/workloads/SQC/mi100/saved_analysis",
)
def test_saved_SQC_mi200():
compare(
"cmake/workloads/SQC/mi200/prev_analysis",
"cmake/workloads/SQC/mi200/saved_analysis",
)
def test_saved_Axes2_mi100():
compare(
"cmake/workloads/Axes2/mi100/prev_analysis",
"cmake/workloads/Axes2/mi100/saved_analysis",
)
def test_saved_Axes2_mi200():
compare(
"cmake/workloads/Axes2/mi200/prev_analysis",
"cmake/workloads/Axes2/mi200/saved_analysis",
)
def test_saved_no_roof_SQ_mi100():
compare(
"cmake/workloads/no_roof_SQ/mi100/prev_analysis",
"cmake/workloads/no_roof_SQ/mi100/saved_analysis",
)
def test_saved_no_roof_SQ_mi200():
compare(
"cmake/workloads/no_roof_SQ/mi200/prev_analysis",
"cmake/workloads/no_roof_SQ/mi200/saved_analysis",
)
def test_saved_CPF_mi100():
compare(
"cmake/workloads/CPF/mi100/prev_analysis",
"cmake/workloads/CPF/mi100/saved_analysis",
)
def test_saved_CPF_mi200():
compare(
"cmake/workloads/CPF/mi200/prev_analysis",
"cmake/workloads/CPF/mi200/saved_analysis",
)
def test_saved_no_roof_LDS_mi100():
compare(
"cmake/workloads/no_roof_LDS/mi100/prev_analysis",
"cmake/workloads/no_roof_LDS/mi100/saved_analysis",
)
def test_saved_no_roof_LDS_mi200():
compare(
"cmake/workloads/no_roof_LDS/mi200/prev_analysis",
"cmake/workloads/no_roof_LDS/mi200/saved_analysis",
)
def test_saved_D_str_inv4_mi100():
compare(
"cmake/workloads/D_str_inv4/mi100/prev_analysis",
"cmake/workloads/D_str_inv4/mi100/saved_analysis",
)
def test_saved_D_str_inv4_mi200():
compare(
"cmake/workloads/D_str_inv4/mi200/prev_analysis",
"cmake/workloads/D_str_inv4/mi200/saved_analysis",
)
def test_saved_roof_only_K_int_inv2_mi200():
compare(
"cmake/workloads/roof_only_K_int_inv2/mi200/prev_analysis",
"cmake/workloads/roof_only_K_int_inv2/mi200/saved_analysis",
)
def test_saved_no_roof_SPI_mi100():
compare(
"cmake/workloads/no_roof_SPI/mi100/prev_analysis",
"cmake/workloads/no_roof_SPI/mi100/saved_analysis",
)
def test_saved_no_roof_SPI_mi200():
compare(
"cmake/workloads/no_roof_SPI/mi200/prev_analysis",
"cmake/workloads/no_roof_SPI/mi200/saved_analysis",
)
def test_saved_no_roof_K_str_valid_2_mi100():
compare(
"cmake/workloads/no_roof_K_str_valid_2/mi100/prev_analysis",
"cmake/workloads/no_roof_K_str_valid_2/mi100/saved_analysis",
)
def test_saved_no_roof_K_str_valid_2_mi200():
compare(
"cmake/workloads/no_roof_K_str_valid_2/mi200/prev_analysis",
"cmake/workloads/no_roof_K_str_valid_2/mi200/saved_analysis",
)
def test_saved_no_roof_mixbench1_mi100():
compare(
"cmake/workloads/no_roof_mixbench1/mi100/prev_analysis",
"cmake/workloads/no_roof_mixbench1/mi100/saved_analysis",
)
def test_saved_no_roof_mixbench1_mi200():
compare(
"cmake/workloads/no_roof_mixbench1/mi200/prev_analysis",
"cmake/workloads/no_roof_mixbench1/mi200/saved_analysis",
)
def test_saved_no_roof_TA_mi100():
compare(
"cmake/workloads/no_roof_TA/mi100/prev_analysis",
"cmake/workloads/no_roof_TA/mi100/saved_analysis",
)
def test_saved_no_roof_TA_mi200():
compare(
"cmake/workloads/no_roof_TA/mi200/prev_analysis",
"cmake/workloads/no_roof_TA/mi200/saved_analysis",
)
def test_saved_no_roof_CPF_mi100():
compare(
"cmake/workloads/no_roof_CPF/mi100/prev_analysis",
"cmake/workloads/no_roof_CPF/mi100/saved_analysis",
)
def test_saved_no_roof_CPF_mi200():
compare(
"cmake/workloads/no_roof_CPF/mi200/prev_analysis",
"cmake/workloads/no_roof_CPF/mi200/saved_analysis",
)
def test_saved_no_roof_CPC_mi100():
compare(
"cmake/workloads/no_roof_CPC/mi100/prev_analysis",
"cmake/workloads/no_roof_CPC/mi100/saved_analysis",
)
def test_saved_no_roof_CPC_mi200():
compare(
"cmake/workloads/no_roof_CPC/mi200/prev_analysis",
"cmake/workloads/no_roof_CPC/mi200/saved_analysis",
)
def test_saved_K_str_inv3_mi100():
compare(
"cmake/workloads/K_str_inv3/mi100/prev_analysis",
"cmake/workloads/K_str_inv3/mi100/saved_analysis",
)
def test_saved_K_str_inv3_mi200():
compare(
"cmake/workloads/K_str_inv3/mi200/prev_analysis",
"cmake/workloads/K_str_inv3/mi200/saved_analysis",
)
def test_saved_LDS_mi100():
compare(
"cmake/workloads/LDS/mi100/prev_analysis",
"cmake/workloads/LDS/mi100/saved_analysis",
)
def test_saved_LDS_mi200():
compare(
"cmake/workloads/LDS/mi200/prev_analysis",
"cmake/workloads/LDS/mi200/saved_analysis",
)
def test_saved_no_roof_K_str_valid_3_mi100():
compare(
"cmake/workloads/no_roof_K_str_valid_3/mi100/prev_analysis",
"cmake/workloads/no_roof_K_str_valid_3/mi100/saved_analysis",
)
def test_saved_roof_only_D_int_inv2_mi200():
compare(
"cmake/workloads/roof_only_D_int_inv2/mi200/prev_analysis",
"cmake/workloads/roof_only_D_int_inv2/mi200/saved_analysis",
)
def test_saved_roof_only_K_str_inv1_mi200():
compare(
"cmake/workloads/roof_only_K_str_inv1/mi200/prev_analysis",
"cmake/workloads/roof_only_K_str_inv1/mi200/saved_analysis",
)
def test_saved_roof_only_SQC_mi200():
compare(
"cmake/workloads/roof_only_SQC/mi200/prev_analysis",
"cmake/workloads/roof_only_SQC/mi200/saved_analysis",
)
def test_saved_no_roof_Axes2_mi100():
compare(
"cmake/workloads/no_roof_Axes2/mi100/prev_analysis",
"cmake/workloads/no_roof_Axes2/mi100/saved_analysis",
)
def test_saved_no_roof_Axes2_mi200():
compare(
"cmake/workloads/no_roof_Axes2/mi200/prev_analysis",
"cmake/workloads/no_roof_Axes2/mi200/saved_analysis",
)
def test_saved_HBM_mi100():
compare(
"cmake/workloads/HBM/mi100/prev_analysis",
"cmake/workloads/HBM/mi100/saved_analysis",
)
def test_saved_HBM_mi200():
compare(
"cmake/workloads/HBM/mi200/prev_analysis",
"cmake/workloads/HBM/mi200/saved_analysis",
)
def test_saved_roof_only_TA_CPC_mi200():
compare(
"cmake/workloads/roof_only_TA_CPC/mi200/prev_analysis",
"cmake/workloads/roof_only_TA_CPC/mi200/saved_analysis",
)
def test_saved_roof_only_D_val_int_mi200():
compare(
"cmake/workloads/roof_only_D_val_int/mi200/prev_analysis",
"cmake/workloads/roof_only_D_val_int/mi200/saved_analysis",
)
def test_saved_no_roof_L2_mi100():
compare(
"cmake/workloads/no_roof_L2/mi100/prev_analysis",
"cmake/workloads/no_roof_L2/mi100/saved_analysis",
)
def test_saved_no_roof_L2_mi200():
compare(
"cmake/workloads/no_roof_L2/mi200/prev_analysis",
"cmake/workloads/no_roof_L2/mi200/saved_analysis",
)
def test_saved_L2_mi100():
compare(
"cmake/workloads/L2/mi100/prev_analysis",
"cmake/workloads/L2/mi100/saved_analysis",
)
def test_saved_L2_mi200():
compare(
"cmake/workloads/L2/mi200/prev_analysis",
"cmake/workloads/L2/mi200/saved_analysis",
)
def test_saved_no_roof_dev1_mi100():
compare(
"cmake/workloads/no_roof_dev1/mi100/prev_analysis",
"cmake/workloads/no_roof_dev1/mi100/saved_analysis",
)
def test_saved_no_roof_dev1_mi200():
compare(
"cmake/workloads/no_roof_dev1/mi200/prev_analysis",
"cmake/workloads/no_roof_dev1/mi200/saved_analysis",
)
def test_saved_roof_only_K_str_inv3_mi200():
compare(
"cmake/workloads/roof_only_K_str_inv3/mi200/prev_analysis",
"cmake/workloads/roof_only_K_str_inv3/mi200/saved_analysis",
)
def test_saved_roof_only_K_str_valid_1_mi200():
compare(
"cmake/workloads/roof_only_K_str_valid_1/mi200/prev_analysis",
"cmake/workloads/roof_only_K_str_valid_1/mi200/saved_analysis",
)
def test_saved_roof_only_CPC_mi200():
compare(
"cmake/workloads/roof_only_CPC/mi200/prev_analysis",
"cmake/workloads/roof_only_CPC/mi200/saved_analysis",
)
def test_saved_no_roof_Axes3_mi100():
compare(
"cmake/workloads/no_roof_Axes3/mi100/prev_analysis",
"cmake/workloads/no_roof_Axes3/mi100/saved_analysis",
)
def test_saved_no_roof_Axes3_mi200():
compare(
"cmake/workloads/no_roof_Axes3/mi200/prev_analysis",
"cmake/workloads/no_roof_Axes3/mi200/saved_analysis",
)
def test_saved_no_roof_D_str_inv3_mi100():
compare(
"cmake/workloads/no_roof_D_str_inv3/mi100/prev_analysis",
"cmake/workloads/no_roof_D_str_inv3/mi100/saved_analysis",
)
def test_saved_no_roof_D_str_inv3_mi200():
compare(
"cmake/workloads/no_roof_D_str_inv3/mi200/prev_analysis",
"cmake/workloads/no_roof_D_str_inv3/mi200/saved_analysis",
)
def test_saved_no_roof_D_int_inv2_mi100():
compare(
"cmake/workloads/no_roof_D_int_inv2/mi100/prev_analysis",
"cmake/workloads/no_roof_D_int_inv2/mi100/saved_analysis",
)
def test_saved_no_roof_D_int_inv2_mi200():
compare(
"cmake/workloads/no_roof_D_int_inv2/mi200/prev_analysis",
"cmake/workloads/no_roof_D_int_inv2/mi200/saved_analysis",
)
def test_saved_TD_mi100():
compare(
"cmake/workloads/TD/mi100/prev_analysis",
"cmake/workloads/TD/mi100/saved_analysis",
)
def test_saved_TD_mi200():
compare(
"cmake/workloads/TD/mi200/prev_analysis",
"cmake/workloads/TD/mi200/saved_analysis",
)
def test_saved_roof_only_D_int_inv1_mi200():
compare(
"cmake/workloads/roof_only_D_int_inv1/mi200/prev_analysis",
"cmake/workloads/roof_only_D_int_inv1/mi200/saved_analysis",
)
def test_saved_D_val_int2_mi100():
compare(
"cmake/workloads/D_val_int2/mi100/prev_analysis",
"cmake/workloads/D_val_int2/mi100/saved_analysis",
)
def test_saved_D_val_int2_mi200():
compare(
"cmake/workloads/D_val_int2/mi200/prev_analysis",
"cmake/workloads/D_val_int2/mi200/saved_analysis",
)
def test_saved_no_roof_mixbench2_mi100():
compare(
"cmake/workloads/no_roof_mixbench2/mi100/prev_analysis",
"cmake/workloads/no_roof_mixbench2/mi100/saved_analysis",
)
def test_saved_no_roof_mixbench2_mi200():
compare(
"cmake/workloads/no_roof_mixbench2/mi200/prev_analysis",
"cmake/workloads/no_roof_mixbench2/mi200/saved_analysis",
)
def test_saved_roof_only_SPI_mi200():
compare(
"cmake/workloads/roof_only_SPI/mi200/prev_analysis",
"cmake/workloads/roof_only_SPI/mi200/saved_analysis",
)
def test_saved_no_roof_D_val_int2_mi100():
compare(
"cmake/workloads/no_roof_D_val_int2/mi100/prev_analysis",
"cmake/workloads/no_roof_D_val_int2/mi100/saved_analysis",
)
def test_saved_no_roof_D_val_int2_mi200():
compare(
"cmake/workloads/no_roof_D_val_int2/mi200/prev_analysis",
"cmake/workloads/no_roof_D_val_int2/mi200/saved_analysis",
)
def test_saved_K_str_inv1_mi100():
compare(
"cmake/workloads/K_str_inv1/mi100/prev_analysis",
"cmake/workloads/K_str_inv1/mi100/saved_analysis",
)
def test_saved_K_str_inv1_mi200():
compare(
"cmake/workloads/K_str_inv1/mi200/prev_analysis",
"cmake/workloads/K_str_inv1/mi200/saved_analysis",
)
def test_saved_roof_only_TA_mi200():
compare(
"cmake/workloads/roof_only_TA/mi200/prev_analysis",
"cmake/workloads/roof_only_TA/mi200/saved_analysis",
)
def test_saved_K_str_valid_3_mi100():
compare(
"cmake/workloads/K_str_valid_3/mi100/prev_analysis",
"cmake/workloads/K_str_valid_3/mi100/saved_analysis",
)
def test_saved_SQ_mi100():
compare(
"cmake/workloads/SQ/mi100/prev_analysis",
"cmake/workloads/SQ/mi100/saved_analysis",
)
def test_saved_SQ_mi200():
compare(
"cmake/workloads/SQ/mi200/prev_analysis",
"cmake/workloads/SQ/mi200/saved_analysis",
)
def test_saved_no_roof_D_str_inv1_mi100():
compare(
"cmake/workloads/no_roof_D_str_inv1/mi100/prev_analysis",
"cmake/workloads/no_roof_D_str_inv1/mi100/saved_analysis",
)
def test_saved_no_roof_D_str_inv1_mi200():
compare(
"cmake/workloads/no_roof_D_str_inv1/mi200/prev_analysis",
"cmake/workloads/no_roof_D_str_inv1/mi200/saved_analysis",
)
def test_saved_no_roof_dev01p3_mi100():
compare(
"cmake/workloads/no_roof_dev01p3/mi100/prev_analysis",
"cmake/workloads/no_roof_dev01p3/mi100/saved_analysis",
)
def test_saved_roof_only_D_val_int2_mi200():
compare(
"cmake/workloads/roof_only_D_val_int2/mi200/prev_analysis",
"cmake/workloads/roof_only_D_val_int2/mi200/saved_analysis",
)
def test_saved_no_roof_D_str_inv4_mi100():
compare(
"cmake/workloads/no_roof_D_str_inv4/mi100/prev_analysis",
"cmake/workloads/no_roof_D_str_inv4/mi100/saved_analysis",
)
def test_saved_no_roof_D_str_inv4_mi200():
compare(
"cmake/workloads/no_roof_D_str_inv4/mi200/prev_analysis",
"cmake/workloads/no_roof_D_str_inv4/mi200/saved_analysis",
)
def test_saved_roof_only_CPF_mi200():
compare(
"cmake/workloads/roof_only_CPF/mi200/prev_analysis",
"cmake/workloads/roof_only_CPF/mi200/saved_analysis",
)
def test_saved_mixbench_mi100():
compare(
"cmake/workloads/mixbench/mi100/prev_analysis",
"cmake/workloads/mixbench/mi100/saved_analysis",
)
def test_saved_mixbench_mi200():
compare(
"cmake/workloads/mixbench/mi200/prev_analysis",
"cmake/workloads/mixbench/mi200/saved_analysis",
)
def test_saved_roof_only_D_str_inv4_mi200():
compare(
"cmake/workloads/roof_only_D_str_inv4/mi200/prev_analysis",
"cmake/workloads/roof_only_D_str_inv4/mi200/saved_analysis",
)
def test_saved_no_roof_kernels_mi100():
compare(
"cmake/workloads/no_roof_kernels/mi100/prev_analysis",
"cmake/workloads/no_roof_kernels/mi100/saved_analysis",
)
def test_saved_no_roof_kernels_mi200():
compare(
"cmake/workloads/no_roof_kernels/mi200/prev_analysis",
"cmake/workloads/no_roof_kernels/mi200/saved_analysis",
)
def test_saved_roof_only_TCC_mi200():
compare(
"cmake/workloads/roof_only_TCC/mi200/prev_analysis",
"cmake/workloads/roof_only_TCC/mi200/saved_analysis",
)
def test_saved_TA_CPC_mi100():
compare(
"cmake/workloads/TA_CPC/mi100/prev_analysis",
"cmake/workloads/TA_CPC/mi100/saved_analysis",
)
def test_saved_TA_CPC_mi200():
compare(
"cmake/workloads/TA_CPC/mi200/prev_analysis",
"cmake/workloads/TA_CPC/mi200/saved_analysis",
)
def test_saved_roof_only_SQ_mi200():
compare(
"cmake/workloads/roof_only_SQ/mi200/prev_analysis",
"cmake/workloads/roof_only_SQ/mi200/saved_analysis",
)
def test_saved_K_int_inv2_mi100():
compare(
"cmake/workloads/K_int_inv2/mi100/prev_analysis",
"cmake/workloads/K_int_inv2/mi100/saved_analysis",
)
def test_saved_K_int_inv2_mi200():
compare(
"cmake/workloads/K_int_inv2/mi200/prev_analysis",
"cmake/workloads/K_int_inv2/mi200/saved_analysis",
)
def test_saved_roof_only_TCP_mi200():
compare(
"cmake/workloads/roof_only_TCP/mi200/prev_analysis",
"cmake/workloads/roof_only_TCP/mi200/saved_analysis",
)
def test_saved_roof_only_K_str_valid_2_mi200():
compare(
"cmake/workloads/roof_only_K_str_valid_2/mi200/prev_analysis",
"cmake/workloads/roof_only_K_str_valid_2/mi200/saved_analysis",
)
def test_saved_D_int_inv2_mi100():
compare(
"cmake/workloads/D_int_inv2/mi100/prev_analysis",
"cmake/workloads/D_int_inv2/mi100/saved_analysis",
)
def test_saved_D_int_inv2_mi200():
compare(
"cmake/workloads/D_int_inv2/mi200/prev_analysis",
"cmake/workloads/D_int_inv2/mi200/saved_analysis",
)
def test_saved_roof_only_Axes3_mi200():
compare(
"cmake/workloads/roof_only_Axes3/mi200/prev_analysis",
"cmake/workloads/roof_only_Axes3/mi200/saved_analysis",
)
def test_saved_dev0_mi100():
compare(
"cmake/workloads/dev0/mi100/prev_analysis",
"cmake/workloads/dev0/mi100/saved_analysis",
)
def test_saved_dev0_mi200():
compare(
"cmake/workloads/dev0/mi200/prev_analysis",
"cmake/workloads/dev0/mi200/saved_analysis",
)
def test_saved_roof_only_K_str_inv2_mi200():
compare(
"cmake/workloads/roof_only_K_str_inv2/mi200/prev_analysis",
"cmake/workloads/roof_only_K_str_inv2/mi200/saved_analysis",
)
def test_saved_Axes1_mi100():
compare(
"cmake/workloads/Axes1/mi100/prev_analysis",
"cmake/workloads/Axes1/mi100/saved_analysis",
)
def test_saved_Axes1_mi200():
compare(
"cmake/workloads/Axes1/mi200/prev_analysis",
"cmake/workloads/Axes1/mi200/saved_analysis",
)
def test_saved_roof_only_HBM_mi200():
compare(
"cmake/workloads/roof_only_HBM/mi200/prev_analysis",
"cmake/workloads/roof_only_HBM/mi200/saved_analysis",
)
def test_saved_D_val_int_mi100():
compare(
"cmake/workloads/D_val_int/mi100/prev_analysis",
"cmake/workloads/D_val_int/mi100/saved_analysis",
)
def test_saved_D_val_int_mi200():
compare(
"cmake/workloads/D_val_int/mi200/prev_analysis",
"cmake/workloads/D_val_int/mi200/saved_analysis",
)
def test_saved_no_roof_TCC_mi100():
compare(
"cmake/workloads/no_roof_TCC/mi100/prev_analysis",
"cmake/workloads/no_roof_TCC/mi100/saved_analysis",
)
def test_saved_no_roof_TCC_mi200():
compare(
"cmake/workloads/no_roof_TCC/mi200/prev_analysis",
"cmake/workloads/no_roof_TCC/mi200/saved_analysis",
)
def test_saved_no_roof_SQC_mi100():
compare(
"cmake/workloads/no_roof_SQC/mi100/prev_analysis",
"cmake/workloads/no_roof_SQC/mi100/saved_analysis",
)
def test_saved_no_roof_SQC_mi200():
compare(
"cmake/workloads/no_roof_SQC/mi200/prev_analysis",
"cmake/workloads/no_roof_SQC/mi200/saved_analysis",
)
def test_saved_roof_only_TD_mi200():
compare(
"cmake/workloads/roof_only_TD/mi200/prev_analysis",
"cmake/workloads/roof_only_TD/mi200/saved_analysis",
)
def test_saved_no_roof_K_int_inv1_mi100():
compare(
"cmake/workloads/no_roof_K_int_inv1/mi100/prev_analysis",
"cmake/workloads/no_roof_K_int_inv1/mi100/saved_analysis",
)
def test_saved_no_roof_K_int_inv1_mi200():
compare(
"cmake/workloads/no_roof_K_int_inv1/mi200/prev_analysis",
"cmake/workloads/no_roof_K_int_inv1/mi200/saved_analysis",
)
def test_saved_no_roof_Axes1_mi100():
compare(
"cmake/workloads/no_roof_Axes1/mi100/prev_analysis",
"cmake/workloads/no_roof_Axes1/mi100/saved_analysis",
)
def test_saved_no_roof_Axes1_mi200():
compare(
"cmake/workloads/no_roof_Axes1/mi200/prev_analysis",
"cmake/workloads/no_roof_Axes1/mi200/saved_analysis",
)
def test_saved_SPI_mi100():
compare(
"cmake/workloads/SPI/mi100/prev_analysis",
"cmake/workloads/SPI/mi100/saved_analysis",
)
def test_saved_SPI_mi200():
compare(
"cmake/workloads/SPI/mi200/prev_analysis",
"cmake/workloads/SPI/mi200/saved_analysis",
)
def test_saved_roof_only_D_str_inv3_mi200():
compare(
"cmake/workloads/roof_only_D_str_inv3/mi200/prev_analysis",
"cmake/workloads/roof_only_D_str_inv3/mi200/saved_analysis",
)
def test_saved_no_roof_D_val_int_mi100():
compare(
"cmake/workloads/no_roof_D_val_int/mi100/prev_analysis",
"cmake/workloads/no_roof_D_val_int/mi100/saved_analysis",
)
def test_saved_no_roof_D_val_int_mi200():
compare(
"cmake/workloads/no_roof_D_val_int/mi200/prev_analysis",
"cmake/workloads/no_roof_D_val_int/mi200/saved_analysis",
)
def test_saved_K_str_inv2_mi100():
compare(
"cmake/workloads/K_str_inv2/mi100/prev_analysis",
"cmake/workloads/K_str_inv2/mi100/saved_analysis",
)
def test_saved_K_str_inv2_mi200():
compare(
"cmake/workloads/K_str_inv2/mi200/prev_analysis",
"cmake/workloads/K_str_inv2/mi200/saved_analysis",
)
def test_saved_CPC_mi100():
compare(
"cmake/workloads/CPC/mi100/prev_analysis",
"cmake/workloads/CPC/mi100/saved_analysis",
)
def test_saved_CPC_mi200():
compare(
"cmake/workloads/CPC/mi200/prev_analysis",
"cmake/workloads/CPC/mi200/saved_analysis",
)
def test_saved_roof_only_dispatches_mi200():
compare(
"cmake/workloads/roof_only_dispatches/mi200/prev_analysis",
"cmake/workloads/roof_only_dispatches/mi200/saved_analysis",
)
def test_saved_roof_only_mixbench2_mi200():
compare(
"cmake/workloads/roof_only_mixbench2/mi200/prev_analysis",
"cmake/workloads/roof_only_mixbench2/mi200/saved_analysis",
)
def test_saved_Axes4_mi100():
compare(
"cmake/workloads/Axes4/mi100/prev_analysis",
"cmake/workloads/Axes4/mi100/saved_analysis",
)
def test_saved_Axes4_mi200():
compare(
"cmake/workloads/Axes4/mi200/prev_analysis",
"cmake/workloads/Axes4/mi200/saved_analysis",
)
def test_saved_no_roof_TCP_mi100():
compare(
"cmake/workloads/no_roof_TCP/mi100/prev_analysis",
"cmake/workloads/no_roof_TCP/mi100/saved_analysis",
)
def test_saved_no_roof_TCP_mi200():
compare(
"cmake/workloads/no_roof_TCP/mi200/prev_analysis",
"cmake/workloads/no_roof_TCP/mi200/saved_analysis",
)
def test_saved_roof_only_LDS_mi200():
compare(
"cmake/workloads/roof_only_LDS/mi200/prev_analysis",
"cmake/workloads/roof_only_LDS/mi200/saved_analysis",
)
def test_saved_invdev_mi100():
compare(
"cmake/workloads/invdev/mi100/prev_analysis",
"cmake/workloads/invdev/mi100/saved_analysis",
)
def test_saved_invdev_mi200():
compare(
"cmake/workloads/invdev/mi200/prev_analysis",
"cmake/workloads/invdev/mi200/saved_analysis",
)
def test_saved_no_roof_dev0_mi100():
compare(
"cmake/workloads/no_roof_dev0/mi100/prev_analysis",
"cmake/workloads/no_roof_dev0/mi100/saved_analysis",
)
def test_saved_no_roof_dev0_mi200():
compare(
"cmake/workloads/no_roof_dev0/mi200/prev_analysis",
"cmake/workloads/no_roof_dev0/mi200/saved_analysis",
)
def test_saved_roof_only_Axes1_mi200():
compare(
"cmake/workloads/roof_only_Axes1/mi200/prev_analysis",
"cmake/workloads/roof_only_Axes1/mi200/saved_analysis",
)
def test_saved_roof_only_invdev_mi200():
compare(
"cmake/workloads/roof_only_invdev/mi200/prev_analysis",
"cmake/workloads/roof_only_invdev/mi200/saved_analysis",
)
def test_saved_roof_only_D_str_inv2_mi200():
compare(
"cmake/workloads/roof_only_D_str_inv2/mi200/prev_analysis",
"cmake/workloads/roof_only_D_str_inv2/mi200/saved_analysis",
)
def test_saved_no_roof_K_str_inv3_mi100():
compare(
"cmake/workloads/no_roof_K_str_inv3/mi100/prev_analysis",
"cmake/workloads/no_roof_K_str_inv3/mi100/saved_analysis",
)
def test_saved_no_roof_K_str_inv3_mi200():
compare(
"cmake/workloads/no_roof_K_str_inv3/mi200/prev_analysis",
"cmake/workloads/no_roof_K_str_inv3/mi200/saved_analysis",
)
def test_saved_no_roof_K_str_inv2_mi100():
compare(
"cmake/workloads/no_roof_K_str_inv2/mi100/prev_analysis",
"cmake/workloads/no_roof_K_str_inv2/mi100/saved_analysis",
)
def test_saved_no_roof_K_str_inv2_mi200():
compare(
"cmake/workloads/no_roof_K_str_inv2/mi200/prev_analysis",
"cmake/workloads/no_roof_K_str_inv2/mi200/saved_analysis",
)
def test_saved_D_str_inv2_mi100():
compare(
"cmake/workloads/D_str_inv2/mi100/prev_analysis",
"cmake/workloads/D_str_inv2/mi100/saved_analysis",
)
def test_saved_D_str_inv2_mi200():
compare(
"cmake/workloads/D_str_inv2/mi200/prev_analysis",
"cmake/workloads/D_str_inv2/mi200/saved_analysis",
)
def test_saved_kernels_mi100():
compare(
"cmake/workloads/kernels/mi100/prev_analysis",
"cmake/workloads/kernels/mi100/saved_analysis",
)
def test_saved_kernels_mi200():
compare(
"cmake/workloads/kernels/mi200/prev_analysis",
"cmake/workloads/kernels/mi200/saved_analysis",
)
def test_saved_no_roof_Axes4_mi100():
compare(
"cmake/workloads/no_roof_Axes4/mi100/prev_analysis",
"cmake/workloads/no_roof_Axes4/mi100/saved_analysis",
)
def test_saved_no_roof_Axes4_mi200():
compare(
"cmake/workloads/no_roof_Axes4/mi200/prev_analysis",
"cmake/workloads/no_roof_Axes4/mi200/saved_analysis",
)
def test_saved_CMD_INV_mi100():
compare(
"cmake/workloads/CMD_INV/mi100/prev_analysis",
"cmake/workloads/CMD_INV/mi100/saved_analysis",
)
def test_saved_K_int_inv1_mi100():
compare(
"cmake/workloads/K_int_inv1/mi100/prev_analysis",
"cmake/workloads/K_int_inv1/mi100/saved_analysis",
)
def test_saved_K_int_inv1_mi200():
compare(
"cmake/workloads/K_int_inv1/mi200/prev_analysis",
"cmake/workloads/K_int_inv1/mi200/saved_analysis",
)
def test_saved_mixbench2_mi100():
compare(
"cmake/workloads/mixbench2/mi100/prev_analysis",
"cmake/workloads/mixbench2/mi100/saved_analysis",
)
def test_saved_mixbench2_mi200():
compare(
"cmake/workloads/mixbench2/mi200/prev_analysis",
"cmake/workloads/mixbench2/mi200/saved_analysis",
)
def test_saved_roof_only_Double_N_flag_mi200():
compare(
"cmake/workloads/roof_only_Double_N_flag/mi200/prev_analysis",
"cmake/workloads/roof_only_Double_N_flag/mi200/saved_analysis",
)
def test_saved_no_roof_TD_mi100():
compare(
"cmake/workloads/no_roof_TD/mi100/prev_analysis",
"cmake/workloads/no_roof_TD/mi100/saved_analysis",
)
def test_saved_no_roof_TD_mi200():
compare(
"cmake/workloads/no_roof_TD/mi200/prev_analysis",
"cmake/workloads/no_roof_TD/mi200/saved_analysis",
)
def test_saved_TCC_mi100():
compare(
"cmake/workloads/TCC/mi100/prev_analysis",
"cmake/workloads/TCC/mi100/saved_analysis",
)
def test_saved_TCC_mi200():
compare(
"cmake/workloads/TCC/mi200/prev_analysis",
"cmake/workloads/TCC/mi200/saved_analysis",
)
def test_saved_roof_only_dev0_mi200():
compare(
"cmake/workloads/roof_only_dev0/mi200/prev_analysis",
"cmake/workloads/roof_only_dev0/mi200/saved_analysis",
)
def test_saved_no_roof_D_str_inv2_mi100():
compare(
"cmake/workloads/no_roof_D_str_inv2/mi100/prev_analysis",
"cmake/workloads/no_roof_D_str_inv2/mi100/saved_analysis",
)
def test_saved_no_roof_D_str_inv2_mi200():
compare(
"cmake/workloads/no_roof_D_str_inv2/mi200/prev_analysis",
"cmake/workloads/no_roof_D_str_inv2/mi200/saved_analysis",
)
def test_saved_roof_only_L2_mi200():
compare(
"cmake/workloads/roof_only_L2/mi200/prev_analysis",
"cmake/workloads/roof_only_L2/mi200/saved_analysis",
)
def test_saved_no_roof_TA_CPC_mi100():
compare(
"cmake/workloads/no_roof_TA_CPC/mi100/prev_analysis",
"cmake/workloads/no_roof_TA_CPC/mi100/saved_analysis",
)
def test_saved_no_roof_TA_CPC_mi200():
compare(
"cmake/workloads/no_roof_TA_CPC/mi200/prev_analysis",
"cmake/workloads/no_roof_TA_CPC/mi200/saved_analysis",
)
def test_saved_no_roof_Double_N_flag_mi100():
compare(
"cmake/workloads/no_roof_Double_N_flag/mi100/prev_analysis",
"cmake/workloads/no_roof_Double_N_flag/mi100/saved_analysis",
)
def test_saved_no_roof_Double_N_flag_mi200():
compare(
"cmake/workloads/no_roof_Double_N_flag/mi200/prev_analysis",
"cmake/workloads/no_roof_Double_N_flag/mi200/saved_analysis",
)
def test_saved_Double_N_flag_mi100():
compare(
"cmake/workloads/Double_N_flag/mi100/prev_analysis",
"cmake/workloads/Double_N_flag/mi100/saved_analysis",
)
def test_saved_Double_N_flag_mi200():
compare(
"cmake/workloads/Double_N_flag/mi200/prev_analysis",
"cmake/workloads/Double_N_flag/mi200/saved_analysis",
)
def test_saved_roof_only_K_int_inv1_mi200():
compare(
"cmake/workloads/roof_only_K_int_inv1/mi200/prev_analysis",
"cmake/workloads/roof_only_K_int_inv1/mi200/saved_analysis",
)
def test_saved_no_roof_K_str_valid_1_mi100():
compare(
"cmake/workloads/no_roof_K_str_valid_1/mi100/prev_analysis",
"cmake/workloads/no_roof_K_str_valid_1/mi100/saved_analysis",
)
def test_saved_no_roof_K_str_valid_1_mi200():
compare(
"cmake/workloads/no_roof_K_str_valid_1/mi200/prev_analysis",
"cmake/workloads/no_roof_K_str_valid_1/mi200/saved_analysis",
)
def test_saved_roof_only_mixbench1_mi200():
compare(
"cmake/workloads/roof_only_mixbench1/mi200/prev_analysis",
"cmake/workloads/roof_only_mixbench1/mi200/saved_analysis",
)
def test_saved_dev1_mi100():
compare(
"cmake/workloads/dev1/mi100/prev_analysis",
"cmake/workloads/dev1/mi100/saved_analysis",
)
def test_saved_dev1_mi200():
compare(
"cmake/workloads/dev1/mi200/prev_analysis",
"cmake/workloads/dev1/mi200/saved_analysis",
)
def test_saved_no_roof_K_str_inv1_mi100():
compare(
"cmake/workloads/no_roof_K_str_inv1/mi100/prev_analysis",
"cmake/workloads/no_roof_K_str_inv1/mi100/saved_analysis",
)
def test_saved_no_roof_K_str_inv1_mi200():
compare(
"cmake/workloads/no_roof_K_str_inv1/mi200/prev_analysis",
"cmake/workloads/no_roof_K_str_inv1/mi200/saved_analysis",
)
def test_saved_K_str_valid_1_mi100():
compare(
"cmake/workloads/K_str_valid_1/mi100/prev_analysis",
"cmake/workloads/K_str_valid_1/mi100/saved_analysis",
)
def test_saved_K_str_valid_1_mi200():
compare(
"cmake/workloads/K_str_valid_1/mi200/prev_analysis",
"cmake/workloads/K_str_valid_1/mi200/saved_analysis",
)
def test_saved_mixbench1_mi100():
compare(
"cmake/workloads/mixbench1/mi100/prev_analysis",
"cmake/workloads/mixbench1/mi100/saved_analysis",
)
def test_saved_mixbench1_mi200():
compare(
"cmake/workloads/mixbench1/mi200/prev_analysis",
"cmake/workloads/mixbench1/mi200/saved_analysis",
)
def test_saved_no_roof_CMD_INV_mi100():
compare(
"cmake/workloads/no_roof_CMD_INV/mi100/prev_analysis",
"cmake/workloads/no_roof_CMD_INV/mi100/saved_analysis",
)
def test_saved_roof_only_D_str_inv1_mi200():
compare(
"cmake/workloads/roof_only_D_str_inv1/mi200/prev_analysis",
"cmake/workloads/roof_only_D_str_inv1/mi200/saved_analysis",
)
def test_saved_no_roof_HBM_mi100():
compare(
"cmake/workloads/no_roof_HBM/mi100/prev_analysis",
"cmake/workloads/no_roof_HBM/mi100/saved_analysis",
)
def test_saved_no_roof_HBM_mi200():
compare(
"cmake/workloads/no_roof_HBM/mi200/prev_analysis",
"cmake/workloads/no_roof_HBM/mi200/saved_analysis",
)
def test_saved_roof_only_kernels_mi200():
compare(
"cmake/workloads/roof_only_kernels/mi200/prev_analysis",
"cmake/workloads/roof_only_kernels/mi200/saved_analysis",
)
def test_saved_D_int_inv1_mi100():
compare(
"cmake/workloads/D_int_inv1/mi100/prev_analysis",
"cmake/workloads/D_int_inv1/mi100/saved_analysis",
)
def test_saved_D_int_inv1_mi200():
compare(
"cmake/workloads/D_int_inv1/mi200/prev_analysis",
"cmake/workloads/D_int_inv1/mi200/saved_analysis",
)
def test_saved_K_str_valid_2_mi100():
compare(
"cmake/workloads/K_str_valid_2/mi100/prev_analysis",
"cmake/workloads/K_str_valid_2/mi100/saved_analysis",
)
def test_saved_K_str_valid_2_mi200():
compare(
"cmake/workloads/K_str_valid_2/mi200/prev_analysis",
"cmake/workloads/K_str_valid_2/mi200/saved_analysis",
)
def test_saved_TCP_mi100():
compare(
"cmake/workloads/TCP/mi100/prev_analysis",
"cmake/workloads/TCP/mi100/saved_analysis",
)
def test_saved_TCP_mi200():
compare(
"cmake/workloads/TCP/mi200/prev_analysis",
"cmake/workloads/TCP/mi200/saved_analysis",
)
def test_saved_Axes3_mi100():
compare(
"cmake/workloads/Axes3/mi100/prev_analysis",
"cmake/workloads/Axes3/mi100/saved_analysis",
)
def test_saved_Axes3_mi200():
compare(
"cmake/workloads/Axes3/mi200/prev_analysis",
"cmake/workloads/Axes3/mi200/saved_analysis",
)
def test_saved_no_roof_invdev_mi100():
compare(
"cmake/workloads/no_roof_invdev/mi100/prev_analysis",
"cmake/workloads/no_roof_invdev/mi100/saved_analysis",
)
def test_saved_no_roof_invdev_mi200():
compare(
"cmake/workloads/no_roof_invdev/mi200/prev_analysis",
"cmake/workloads/no_roof_invdev/mi200/saved_analysis",
)
def test_saved_no_roof_dispatches_mi100():
compare(
"cmake/workloads/no_roof_dispatches/mi100/prev_analysis",
"cmake/workloads/no_roof_dispatches/mi100/saved_analysis",
)
def test_saved_no_roof_dispatches_mi200():
compare(
"cmake/workloads/no_roof_dispatches/mi200/prev_analysis",
"cmake/workloads/no_roof_dispatches/mi200/saved_analysis",
)
def test_saved_D_str_inv3_mi100():
compare(
"cmake/workloads/D_str_inv3/mi100/prev_analysis",
"cmake/workloads/D_str_inv3/mi100/saved_analysis",
)
def test_saved_D_str_inv3_mi200():
compare(
"cmake/workloads/D_str_inv3/mi200/prev_analysis",
"cmake/workloads/D_str_inv3/mi200/saved_analysis",
)
def test_saved_TA_mi100():
compare(
"cmake/workloads/TA/mi100/prev_analysis",
"cmake/workloads/TA/mi100/saved_analysis",
)
def test_saved_TA_mi200():
compare(
"cmake/workloads/TA/mi200/prev_analysis",
"cmake/workloads/TA/mi200/saved_analysis",
)
def test_saved_no_roof_D_int_inv1_mi100():
compare(
"cmake/workloads/no_roof_D_int_inv1/mi100/prev_analysis",
"cmake/workloads/no_roof_D_int_inv1/mi100/saved_analysis",
)
def test_saved_no_roof_D_int_inv1_mi200():
compare(
"cmake/workloads/no_roof_D_int_inv1/mi200/prev_analysis",
"cmake/workloads/no_roof_D_int_inv1/mi200/saved_analysis",
)
def test_saved_dispatches_mi100():
compare(
"cmake/workloads/dispatches/mi100/prev_analysis",
"cmake/workloads/dispatches/mi100/saved_analysis",
)
def test_saved_dispatches_mi200():
compare(
"cmake/workloads/dispatches/mi200/prev_analysis",
"cmake/workloads/dispatches/mi200/saved_analysis",
)
def test_saved_roof_only_dev1_mi200():
compare(
"cmake/workloads/roof_only_dev1/mi200/prev_analysis",
"cmake/workloads/roof_only_dev1/mi200/saved_analysis",
)
def test_saved_no_roof_K_int_inv2_mi100():
compare(
"cmake/workloads/no_roof_K_int_inv2/mi100/prev_analysis",
"cmake/workloads/no_roof_K_int_inv2/mi100/saved_analysis",
)
def test_saved_no_roof_K_int_inv2_mi200():
compare(
"cmake/workloads/no_roof_K_int_inv2/mi200/prev_analysis",
"cmake/workloads/no_roof_K_int_inv2/mi200/saved_analysis",
)
|
ed664fa612a27bf681cd068d8f836fd9f8ee38ba
|
38e277526f58eb08ac12257c0297b9d46c421c75
|
/himl/inject_secrets.py
|
029c2f514325d1053d201f2af9c8b99c1f643caf
|
[
"Apache-2.0"
] |
permissive
|
adobe/himl
|
fdfdbc83caea55d67df8a3bf885b56d80b6bf8f9
|
eb447d157ddd02ef41a07ae0d705e4d3bf6e8d23
|
refs/heads/master
| 2023-09-04T03:35:15.369780
| 2023-08-06T19:35:55
| 2023-08-06T19:35:55
| 202,412,608
| 107
| 31
|
Apache-2.0
| 2023-09-12T12:51:41
| 2019-08-14T19:23:44
|
Python
|
UTF-8
|
Python
| false
| false
| 2,581
|
py
|
inject_secrets.py
|
# Copyright 2019 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import re
from .secret_resolvers import AggregatedSecretResolver
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
class SecretInjector(object):
"""
Resolve secrets in the form:
{{ssm.path(/aam/artifactory/grafana/password).aws_profile(aam-npe)}}
or
{{vault.kv2.path(ethos/k8s-ethos-config/thrash/aws/ClusterIngressTLS).field(Key)}}
"""
def __init__(self, default_aws_profile=None):
self.resolver = AggregatedSecretResolver(default_aws_profile)
def is_interpolation(self, value):
return value.startswith('{{') and value.endswith('}}')
@lru_cache(maxsize=2048)
def inject_secret(self, line):
"""
Check if value is an interpolation and try to resolve it.
Uses a cache, in order to not fetch same secret multiple times.
"""
if not self.is_interpolation(line):
return line
# remove {{ and }}
updated_line = line[2:-2]
# parse each key/value (eg. path=my_pwd)
parts = self.split_dot_not_within_parentheses(updated_line)
if len(parts) <= 1:
return line
secret_type = parts[0]
secret_params = {}
for part in parts:
if '(' not in part:
secret_params[part] = None
else:
key = part.split('(')[0]
value = part.split('(')[1].split(')')[0]
secret_params[key] = value
if self.resolver.supports(secret_type):
return self.resolver.resolve(secret_type, secret_params)
else:
return line
def split_dot_not_within_parentheses(self, line):
"""
s3.bucket(my-bucket).path(path/to/file.txt).aws_profile(myprofile)
will result in:
['s3', 'bucket(my-bucket)', 'path(path/to/file.txt)']
"""
pattern = r'\.\s*(?![^()]*\))'
return re.split(pattern, line)
|
583e972b0ecc9df4714db729c1963c7078c01b4b
|
5fdfa2069b1aa05f61852b498328366d3dcfeb2a
|
/2021_05_12/dojo.py
|
7eb53fb625b64e99359d1f22ec025f90def48713
|
[
"MIT"
] |
permissive
|
globocom/dojo
|
5110b5ed86734d49fd0934d8701d5016e7e27e0d
|
8df96c932f61645e9717197e5b58ca60909c7fc1
|
refs/heads/master
| 2022-07-21T17:59:16.133549
| 2022-06-22T18:17:01
| 2022-06-22T18:17:01
| 2,145,424
| 121
| 40
|
MIT
| 2022-02-17T17:21:46
| 2011-08-02T22:11:54
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,314
|
py
|
dojo.py
|
import time
def main():
cells = 50000
func1(cells)
# Function func1 finished in 12.51378083
func2(cells)
# Function func2 finished in 0.20699382
func3(cells)
# Function func3 finished in 0.00020552
func4(cells)
#Function func4 finished in 0.00004649
func5(cells)
#Function func5 finished in 0.00002432
return True
def time_decorator(func):
def wrapper(arg):
start = time.time()
result = func(arg)
finish = time.time()
print(f"Function {func.__name__} finished in {finish - start:.8f}")
return result
return wrapper
@time_decorator
def func1(cells):
su = 0
for i in range(cells):
su += 2**i
return su//12//1000
for i in range(cells):
su += 1<<i
return su//12//1000
# shift bits
# 1: 0001
# 2: 0010 <-
# 4: 0100
# 8: 1000 # 2^3
#
# def func2(cells):
# su = 0
@time_decorator
def func2(cells):
su = 0
for i in range(cells):
su += 1<<i
return su//12//1000
@time_decorator
def func3(cells):
su = 2 ** cells -1
return su//12//1000
@time_decorator
def func4(cells):
su = (1 << cells) - 1
return su//12//1000
@time_decorator
def func5(cells):
su = (1 << cells) - 1
return su//12000
if __name__ == "__main__":
main()
|
b8ffbb96f21e85cda1994133aea61fbb05e5107f
|
b056263a62d97b90103a5fa89adb90ed765ac72a
|
/meetings/read_minutes.py
|
389199de65949905d89b63d2c34d1cdee48e8204
|
[
"Apache-2.0"
] |
permissive
|
ansible/community
|
faa67ac0e5731e89d250cbad276906ce0cd86a07
|
8cd4f2e31bd32d28b2602de64f9d4521c69c70bf
|
refs/heads/main
| 2023-06-22T07:26:15.032372
| 2023-06-13T09:59:30
| 2023-06-13T09:59:30
| 52,299,866
| 527
| 164
|
Apache-2.0
| 2023-09-14T10:33:22
| 2016-02-22T19:35:08
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,834
|
py
|
read_minutes.py
|
#!/usr/bin/env python3
import argparse
import requests
TEMPLATE = """
{date}
==========
{content}
Logs
----
{logs}
"""
LOG_MAP = [
("Minutes:", "html"),
("Minutes (text):", "txt"),
("Log:", "log.html"),
]
def read(url):
date = url.split("/")[-2]
minutes = requests.get(url).text
sections = minutes.split("\n\n\n\n")
topics = format_topics(sections[1])
actions = format_actions(sections[2])
content = topics
if actions:
content += f"\n\n{actions}"
logs = []
for prefix, extension in LOG_MAP:
mangled_url = url.rsplit(".", 1)[0]
logs.append(f"{prefix} {mangled_url}.{extension}")
print(TEMPLATE.format(date=date, content=content, logs="\n".join(logs)))
def format_topics(summary):
topics = []
for line in summary.split("\n"):
if not line.strip():
continue
if line[0] == "*":
topics.append((clean_line(line), []))
elif line.startswith(" *"):
topics[-1][1].append(line)
elif line.startswith(" "):
topics[-1][1][-1] += f" {line.strip()}"
topics_text = []
for topic_name, items in topics:
items = [clean_line(item) for item in items]
if not any(items):
continue
topics_text.extend(["", topic_name, '-' * len(topic_name), ""])
for item in items:
if item:
topics_text.append(f"* {item}")
return "\n".join(topics_text)
def format_actions(action_items):
actions = []
last_action = None
for line in action_items.split("\n"):
if line.startswith("* "):
assignee, action = line[2:].split(" ", 1)
last_action = len(actions)
actions.append(f"- [ ] @{assignee} {action}")
elif line.startswith(" ") and last_action is not None:
actions[last_action] += f" {line[2:]}"
else:
last_action = None
if any(actions):
actions = ["Actions", "-------", ""] + actions
return "\n".join(actions)
def clean_line(line):
trimmed = line.strip()[2:]
if trimmed.startswith("ACTION:"):
# Actions get handled later
return ""
trimmed = trimmed.replace("AGREED:", ":+1:")
trimmed = trimmed.replace("IDEA:", "💡")
trimmed = trimmed.replace("LINK:", "")
return trimmed.rsplit(" (", 1)[0].strip()
def txt_url(string):
if string[-4:] != ".txt":
raise argparse.ArgumentTypeError("URL must end in `.txt`")
return string
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Fetches MeetBot minutes and generates a Markdown summary for adding to GitHub")
parser.add_argument("url", type=txt_url, help="URL to 'Minutes (text)' log. Must end with `.txt`")
args = parser.parse_args()
read(args.url)
|
484fc3f9ae706e2ac39ccef30dead75c838e3317
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayCommerceEducateCampusIdentityQueryResponse.py
|
84d0e4d9fcc97be75dbe5133716c77ac9f7ae7c3
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 841
|
py
|
AlipayCommerceEducateCampusIdentityQueryResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceEducateCampusIdentityQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceEducateCampusIdentityQueryResponse, self).__init__()
self._college_online_tag = None
@property
def college_online_tag(self):
return self._college_online_tag
@college_online_tag.setter
def college_online_tag(self, value):
self._college_online_tag = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceEducateCampusIdentityQueryResponse, self).parse_response_content(response_content)
if 'college_online_tag' in response:
self.college_online_tag = response['college_online_tag']
|
50918f129705c1f4ac69d3ca4df26471b754ddd0
|
e7bf1ff05319acc59bba5af5890041bd82c3e197
|
/mne/io/egi/tests/test_egi.py
|
7ed2743249ddcf5a5b1c8fd11d81b04122f3de00
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mne-tools/mne-python
|
7e8d7e945dfbbee6432a4955cf050fa823f2d34b
|
f44636f00666b8eb869417960926d01690ff4f42
|
refs/heads/main
| 2023-09-04T03:05:37.402100
| 2023-09-03T14:15:18
| 2023-09-03T14:15:18
| 1,301,584
| 2,437
| 1,418
|
BSD-3-Clause
| 2023-09-14T19:23:38
| 2011-01-28T03:31:13
|
Python
|
UTF-8
|
Python
| false
| false
| 20,110
|
py
|
test_egi.py
|
# Authors: Denis A. Engemann <denis.engemann@gmail.com>
# simplified BSD-3 license
from copy import deepcopy
from pathlib import Path
import os
import shutil
from datetime import datetime, timezone
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import pytest
from scipy import io as sio
from mne import find_events, pick_types
from mne.io import read_raw_egi, read_evokeds_mff, read_raw_fif
from mne._fiff.constants import FIFF
from mne.io.egi.egi import _combine_triggers
from mne.io.tests.test_raw import _test_raw_reader
from mne.utils import object_diff
from mne.datasets.testing import data_path, requires_testing_data
base_dir = Path(__file__).parent / "data"
egi_fname = base_dir / "test_egi.raw"
egi_txt_fname = base_dir / "test_egi.txt"
testing_path = data_path(download=False)
egi_path = testing_path / "EGI"
egi_mff_fname = egi_path / "test_egi.mff"
egi_mff_pns_fname = egi_path / "test_egi_pns.mff"
egi_pause_fname = egi_path / "test_egi_multiepoch_paused.mff"
egi_eprime_pause_fname = egi_path / "test_egi_multiepoch_eprime.mff"
egi_pause_w1337_fname = egi_path / "w1337_20191014_105416.mff"
egi_mff_evoked_fname = egi_path / "test_egi_evoked.mff"
egi_txt_evoked_cat1_fname = egi_path / "test_egi_evoked_cat1.txt"
egi_txt_evoked_cat2_fname = egi_path / "test_egi_evoked_cat2.txt"
# absolute event times from NetStation
egi_pause_events = {
"AM40": [7.224, 11.928, 14.413, 16.848],
"bgin": [6.121, 8.434, 13.369, 15.815, 18.094],
"FIX+": [6.225, 10.929, 13.414, 15.849],
"ITI+": [8.293, 12.997, 15.482, 17.918],
}
# absolute epoch times
egi_pause_skips = [(1304000.0, 1772000.0), (8660000.0, 12296000.0)]
egi_eprime_pause_events = {
"AM40": [6.049, 8.434, 10.936, 13.321],
"bgin": [4.902, 7.381, 9.901, 12.268, 14.619],
"FIX+": [5.050, 7.435, 9.937, 12.322],
"ITI+": [7.185, 9.503, 12.005, 14.391],
}
egi_eprime_pause_skips = [(1344000.0, 1804000.0)]
egi_pause_w1337_events = None
egi_pause_w1337_skips = [(21956000.0, 40444000.0), (60936000.0, 89332000.0)]
@requires_testing_data
@pytest.mark.parametrize(
"fname, skip_times, event_times",
[
(egi_pause_fname, egi_pause_skips, egi_pause_events),
(egi_eprime_pause_fname, egi_eprime_pause_skips, egi_eprime_pause_events),
(egi_pause_w1337_fname, egi_pause_w1337_skips, egi_pause_w1337_events),
],
)
def test_egi_mff_pause(fname, skip_times, event_times):
"""Test EGI MFF with pauses."""
if fname == egi_pause_w1337_fname:
# too slow to _test_raw_reader
raw = read_raw_egi(fname).load_data()
else:
with pytest.warns(RuntimeWarning, match="Acquisition skips detected"):
raw = _test_raw_reader(
read_raw_egi,
input_fname=fname,
test_scaling=False, # XXX probably some bug
test_rank="less",
)
assert raw.info["sfreq"] == 250.0 # true for all of these files
assert len(raw.annotations) == len(skip_times)
# assert event onsets match expected times
if event_times is None:
with pytest.raises(ValueError, match="Consider using .*events_from"):
find_events(raw)
else:
events = find_events(raw)
for event_type in event_times.keys():
ns_samples = np.floor(np.array(event_times[event_type]) * raw.info["sfreq"])
assert_array_equal(
events[events[:, 2] == raw.event_id[event_type], 0], ns_samples
)
# read some data from the middle of the skip, assert it's all zeros
stim_picks = pick_types(raw.info, meg=False, stim=True, exclude=())
other_picks = np.setdiff1d(np.arange(len(raw.ch_names)), stim_picks)
for ii, annot in enumerate(raw.annotations):
assert annot["description"] == "BAD_ACQ_SKIP"
start, stop = raw.time_as_index(
[annot["onset"], annot["onset"] + annot["duration"]]
)
data, _ = raw[:, start:stop]
assert_array_equal(data[other_picks], 0.0)
if event_times is not None:
assert raw.ch_names[-1] == "STI 014"
assert not np.array_equal(data[stim_picks], 0.0)
# assert skips match expected onset and duration
skip = (
(start + 1) / raw.info["sfreq"] * 1e6,
(stop + 1) / raw.info["sfreq"] * 1e6,
)
assert skip == skip_times[ii]
@requires_testing_data
@pytest.mark.parametrize(
"fname",
[
egi_pause_fname,
egi_eprime_pause_fname,
egi_pause_w1337_fname,
],
)
def test_egi_mff_pause_chunks(fname, tmp_path):
"""Test that on-demand of all short segments works (via I/O)."""
fname_temp = tmp_path / "test_raw.fif"
raw_data = read_raw_egi(fname, preload=True).get_data()
raw = read_raw_egi(fname)
with pytest.warns(RuntimeWarning, match="Acquisition skips detected"):
raw.save(fname_temp)
del raw
raw_data_2 = read_raw_fif(fname_temp).get_data()
assert_allclose(raw_data, raw_data_2)
@requires_testing_data
def test_io_egi_mff():
"""Test importing EGI MFF simple binary files."""
# want vars for n chans
n_ref = 1
n_eeg = 128
n_card = 3
raw = read_raw_egi(egi_mff_fname, include=None)
assert "RawMff" in repr(raw)
assert raw.orig_format == "single"
include = ["DIN1", "DIN2", "DIN3", "DIN4", "DIN5", "DIN7"]
raw = _test_raw_reader(
read_raw_egi,
input_fname=egi_mff_fname,
include=include,
channel_naming="EEG %03d",
test_scaling=False, # XXX probably some bug
)
assert raw.info["sfreq"] == 1000.0
assert len(raw.info["dig"]) == n_card + n_eeg + n_ref
assert raw.info["dig"][0]["ident"] == FIFF.FIFFV_POINT_LPA
assert raw.info["dig"][0]["kind"] == FIFF.FIFFV_POINT_CARDINAL
assert raw.info["dig"][3]["kind"] == FIFF.FIFFV_POINT_EEG
assert raw.info["dig"][-1]["ident"] == 129
# This is not a custom reference, it's consistent across all channels
assert raw.info["custom_ref_applied"] == FIFF.FIFFV_MNE_CUSTOM_REF_OFF
ref_loc = raw.info["dig"][-1]["r"]
eeg_picks = pick_types(raw.info, eeg=True)
assert len(eeg_picks) == n_eeg + n_ref # 129
# ref channel should store its own loc as ref location, so't test it
for i in eeg_picks:
loc = raw.info["chs"][i]["loc"]
assert loc[:3].any(), loc[:3]
assert_array_equal(loc[3:6], ref_loc, err_msg=f"{i}")
assert raw.info["device_info"]["type"] == "HydroCel GSN 128 1.0"
assert "eeg" in raw
# test our custom channel naming logic functionality
eeg_chan = [c for c in raw.ch_names if "EEG" in c]
assert len(eeg_chan) == n_eeg # 128: VREF will not match in comprehension
assert "STI 014" in raw.ch_names
events = find_events(raw, stim_channel="STI 014")
assert len(events) == 8
assert np.unique(events[:, 1])[0] == 0
assert np.unique(events[:, 0])[0] != 0
assert np.unique(events[:, 2])[0] != 0
with pytest.raises(ValueError, match="Could not find event"):
read_raw_egi(egi_mff_fname, include=["Foo"])
with pytest.raises(ValueError, match="Could not find event"):
read_raw_egi(egi_mff_fname, exclude=["Bar"])
for ii, k in enumerate(include, 1):
assert k in raw.event_id
assert raw.event_id[k] == ii
def test_io_egi():
"""Test importing EGI simple binary files."""
# test default
with open(egi_txt_fname) as fid:
data = np.loadtxt(fid)
t = data[0]
data = data[1:]
data *= 1e-6 # µV
with pytest.warns(RuntimeWarning, match="Did not find any event code"):
raw = read_raw_egi(egi_fname, include=None)
# The reader should accept a Path, too.
with pytest.warns(RuntimeWarning, match="Did not find any event code"):
raw = read_raw_egi(Path(egi_fname), include=None)
assert "RawEGI" in repr(raw)
data_read, t_read = raw[:256]
assert_allclose(t_read, t)
assert_allclose(data_read, data, atol=1e-10)
include = ["TRSP", "XXX1"]
raw = _test_raw_reader(
read_raw_egi,
input_fname=egi_fname,
include=include,
test_rank="less",
test_scaling=False, # XXX probably some bug
)
assert "eeg" in raw
assert raw.orig_format == "single"
eeg_chan = [c for c in raw.ch_names if c.startswith("E")]
assert len(eeg_chan) == 256
picks = pick_types(raw.info, eeg=True)
assert len(picks) == 256
assert "STI 014" in raw.ch_names
events = find_events(raw, stim_channel="STI 014")
assert len(events) == 2 # ground truth
assert np.unique(events[:, 1])[0] == 0
assert np.unique(events[:, 0])[0] != 0
assert np.unique(events[:, 2])[0] != 0
triggers = np.array([[0, 1, 1, 0], [0, 0, 1, 0]])
# test trigger functionality
triggers = np.array([[0, 1, 0, 0], [0, 0, 1, 0]])
events_ids = [12, 24]
new_trigger = _combine_triggers(triggers, events_ids)
assert_array_equal(np.unique(new_trigger), np.unique([0, 12, 24]))
pytest.raises(ValueError, read_raw_egi, egi_fname, include=["Foo"], preload=False)
pytest.raises(ValueError, read_raw_egi, egi_fname, exclude=["Bar"], preload=False)
for ii, k in enumerate(include, 1):
assert k in raw.event_id
assert raw.event_id[k] == ii
@requires_testing_data
def test_io_egi_pns_mff(tmp_path):
"""Test importing EGI MFF with PNS data."""
raw = read_raw_egi(egi_mff_pns_fname, include=None, preload=True, verbose="error")
assert "RawMff" in repr(raw)
pns_chans = pick_types(raw.info, ecg=True, bio=True, emg=True)
assert len(pns_chans) == 7
names = [raw.ch_names[x] for x in pns_chans]
pns_names = [
"Resp. Temperature",
"Resp. Pressure",
"ECG",
"Body Position",
"Resp. Effort Chest",
"Resp. Effort Abdomen",
"EMG-Leg",
]
_test_raw_reader(
read_raw_egi,
input_fname=egi_mff_pns_fname,
channel_naming="EEG %03d",
verbose="error",
test_rank="less",
test_scaling=False, # XXX probably some bug
)
assert names == pns_names
mat_names = [
"Resp_Temperature",
"Resp_Pressure",
"ECG",
"Body_Position",
"Resp_Effort_Chest",
"Resp_Effort_Abdomen",
"EMGLeg",
]
egi_fname_mat = testing_path / "EGI" / "test_egi_pns.mat"
mc = sio.loadmat(egi_fname_mat)
for ch_name, ch_idx, mat_name in zip(pns_names, pns_chans, mat_names):
print("Testing {}".format(ch_name))
mc_key = [x for x in mc.keys() if mat_name in x][0]
cal = raw.info["chs"][ch_idx]["cal"]
mat_data = mc[mc_key] * cal
raw_data = raw[ch_idx][0]
assert_array_equal(mat_data, raw_data)
# EEG missing
new_mff = tmp_path / "temp.mff"
shutil.copytree(egi_mff_pns_fname, new_mff)
read_raw_egi(new_mff, verbose="error")
os.remove(new_mff / "info1.xml")
os.remove(new_mff / "signal1.bin")
with pytest.raises(FileNotFoundError, match="Could not find any EEG"):
read_raw_egi(new_mff, verbose="error")
@requires_testing_data
@pytest.mark.parametrize("preload", (True, False))
def test_io_egi_pns_mff_bug(preload):
"""Test importing EGI MFF with PNS data (BUG)."""
egi_fname_mff = testing_path / "EGI" / "test_egi_pns_bug.mff"
with pytest.warns(RuntimeWarning, match="EGI PSG sample bug"):
raw = read_raw_egi(
egi_fname_mff, include=None, preload=preload, verbose="warning"
)
assert len(raw.annotations) == 1
assert_allclose(raw.annotations.duration, [0.004])
assert_allclose(raw.annotations.onset, [13.948])
egi_fname_mat = testing_path / "EGI" / "test_egi_pns.mat"
mc = sio.loadmat(egi_fname_mat)
pns_chans = pick_types(raw.info, ecg=True, bio=True, emg=True)
pns_names = [
"Resp. Temperature"[:15],
"Resp. Pressure",
"ECG",
"Body Position",
"Resp. Effort Chest"[:15],
"Resp. Effort Abdomen"[:15],
"EMG-Leg",
]
mat_names = [
"Resp_Temperature"[:15],
"Resp_Pressure",
"ECG",
"Body_Position",
"Resp_Effort_Chest"[:15],
"Resp_Effort_Abdomen"[:15],
"EMGLeg",
]
for ch_name, ch_idx, mat_name in zip(pns_names, pns_chans, mat_names):
print("Testing {}".format(ch_name))
mc_key = [x for x in mc.keys() if mat_name in x][0]
cal = raw.info["chs"][ch_idx]["cal"]
mat_data = mc[mc_key] * cal
mat_data[:, -1] = 0 # The MFF has one less sample, the last one
raw_data = raw[ch_idx][0]
assert_array_equal(mat_data, raw_data)
@requires_testing_data
def test_io_egi_crop_no_preload():
"""Test crop non-preloaded EGI MFF data (BUG)."""
raw = read_raw_egi(egi_mff_fname, preload=False)
raw.crop(17.5, 20.5)
raw.load_data()
raw_preload = read_raw_egi(egi_mff_fname, preload=True)
raw_preload.crop(17.5, 20.5)
raw_preload.load_data()
assert_allclose(raw._data, raw_preload._data)
@pytest.mark.filterwarnings("ignore::FutureWarning")
@requires_testing_data
@pytest.mark.parametrize(
"idx, cond, tmax, signals, bads",
[
(
0,
"Category 1",
0.016,
egi_txt_evoked_cat1_fname,
["E8", "E11", "E17", "E28", "ECG"],
),
(1, "Category 2", 0.0, egi_txt_evoked_cat2_fname, ["VREF", "EMG"]),
],
)
def test_io_egi_evokeds_mff(idx, cond, tmax, signals, bads):
"""Test reading evoked MFF file."""
pytest.importorskip("mffpy", "0.5.7")
# expected n channels
n_eeg = 256
n_ref = 1
n_card = 3
n_pns = 2 # 1 ECG + 1 EMG
# Test reading all conditions from evokeds
evokeds = read_evokeds_mff(egi_mff_evoked_fname)
assert len(evokeds) == 2
# Test reading list of conditions from evokeds
evokeds = read_evokeds_mff(egi_mff_evoked_fname, condition=[0, 1])
assert len(evokeds) == 2
# Test invalid condition
with pytest.raises(ValueError) as exc_info:
read_evokeds_mff(egi_mff_evoked_fname, condition="Invalid Condition")
message = (
"Invalid value for the 'condition' parameter provided as "
"category name. Allowed values are 'Category 1' and "
"'Category 2', but got 'Invalid Condition' instead."
)
assert str(exc_info.value) == message
with pytest.raises(ValueError) as exc_info:
read_evokeds_mff(egi_mff_evoked_fname, condition=2)
message = (
'"condition" parameter (2), provided as epoch index, '
"is out of range for available epochs (2)."
)
assert str(exc_info.value) == message
with pytest.raises(TypeError) as exc_info:
read_evokeds_mff(egi_mff_evoked_fname, condition=1.2)
message = '"condition" parameter must be either int or str.'
assert str(exc_info.value) == message
# Test reading evoked data from single condition
evoked_cond = read_evokeds_mff(egi_mff_evoked_fname, condition=cond)
evoked_idx = read_evokeds_mff(egi_mff_evoked_fname, condition=idx)
for evoked in [evoked_cond, evoked_idx]:
assert evoked.comment == cond
assert evoked.nave == 3
assert evoked.tmin == 0.0
assert evoked.tmax == tmax
# Check signal data
data = np.loadtxt(signals, ndmin=2).T * 1e-6 # convert to volts
assert_allclose(evoked_cond.data, data, atol=1e-12)
assert_allclose(evoked_idx.data, data, atol=1e-12)
# Check info
assert object_diff(evoked_cond.info, evoked_idx.info) == ""
assert evoked_cond.info["description"] == cond
assert evoked_cond.info["bads"] == bads
assert len(evoked_cond.info["ch_names"]) == n_eeg + n_ref + n_pns # 259
assert "ECG" in evoked_cond.info["ch_names"]
assert "EMG" in evoked_cond.info["ch_names"]
assert "ecg" in evoked_cond
assert "emg" in evoked_cond
pick_eeg = pick_types(evoked_cond.info, eeg=True, exclude=[])
assert len(pick_eeg) == n_eeg + n_ref # 257
assert evoked_cond.info["nchan"] == n_eeg + n_ref + n_pns # 259
assert evoked_cond.info["sfreq"] == 250.0
assert not evoked_cond.info["custom_ref_applied"]
assert len(evoked_cond.info["dig"]) == n_card + n_eeg + n_ref
assert evoked_cond.info["device_info"]["type"] == "HydroCel GSN 256 1.0"
@pytest.mark.filterwarnings("ignore::FutureWarning")
@requires_testing_data
def test_read_evokeds_mff_bad_input():
"""Test errors are thrown when reading invalid input file."""
pytest.importorskip("mffpy", "0.5.7")
# Test file that is not an MFF
with pytest.raises(ValueError) as exc_info:
read_evokeds_mff(egi_fname)
message = 'fname must be an MFF file with extension ".mff".'
assert str(exc_info.value) == message
# Test continuous MFF
with pytest.raises(ValueError) as exc_info:
read_evokeds_mff(egi_mff_fname)
message = (
f"{egi_mff_fname} is a continuous MFF file. "
"fname must be the path to an averaged MFF file."
)
assert str(exc_info.value) == message
@requires_testing_data
def test_egi_coord_frame():
"""Test that EGI coordinate frame is changed to head."""
info = read_raw_egi(egi_mff_fname).info
want_idents = (
FIFF.FIFFV_POINT_LPA,
FIFF.FIFFV_POINT_NASION,
FIFF.FIFFV_POINT_RPA,
)
for ii, want in enumerate(want_idents):
d = info["dig"][ii]
assert d["kind"] == FIFF.FIFFV_POINT_CARDINAL
assert d["ident"] == want
loc = d["r"]
if ii == 0:
assert 0.05 < -loc[0] < 0.1, "LPA"
assert_allclose(loc[1:], 0, atol=1e-7, err_msg="LPA")
elif ii == 1:
assert 0.05 < loc[1] < 0.11, "Nasion"
assert_allclose(loc[::2], 0, atol=1e-7, err_msg="Nasion")
else:
assert ii == 2
assert 0.05 < loc[0] < 0.1, "RPA"
assert_allclose(loc[1:], 0, atol=1e-7, err_msg="RPA")
for d in info["dig"][3:]:
assert d["kind"] == FIFF.FIFFV_POINT_EEG
@requires_testing_data
@pytest.mark.parametrize(
"fname, timestamp, utc_offset",
[
(egi_mff_fname, "2017-02-23T11:35:13.220824+01:00", "+0100"),
(egi_mff_pns_fname, "2017-09-20T09:55:44.072000+01:00", "+0100"),
(egi_eprime_pause_fname, "2018-07-30T10:46:09.621673-04:00", "-0400"),
(egi_pause_w1337_fname, "2019-10-14T10:54:27.395210-07:00", "-0700"),
],
)
def test_meas_date(fname, timestamp, utc_offset):
"""Test meas date conversion."""
raw = read_raw_egi(fname, verbose="warning")
dt = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%f%z")
measdate = dt.astimezone(timezone.utc)
hour_local = int(dt.strftime("%H"))
hour_utc = int(raw.info["meas_date"].strftime("%H"))
local_utc_diff = hour_local - hour_utc
assert raw.info["meas_date"] == measdate
assert raw.info["utc_offset"] == utc_offset
assert local_utc_diff == int(utc_offset[:-2])
@requires_testing_data
@pytest.mark.parametrize(
"fname, standard_montage",
[
(egi_mff_fname, "GSN-HydroCel-129"), # 129 chan EGI file
(egi_mff_pns_fname, "GSN-HydroCel-257"), # 257 chan EGI file
],
)
def test_set_standard_montage_mff(fname, standard_montage):
"""Test setting a standard montage."""
raw = read_raw_egi(fname, verbose="warning")
n_eeg = int(standard_montage.split("-")[-1])
n_dig = n_eeg + 3
dig_before_mon = deepcopy(raw.info["dig"])
assert len(dig_before_mon) == n_dig
ref_loc = dig_before_mon[-1]["r"]
picks = pick_types(raw.info, eeg=True)
assert len(picks) == n_eeg
for pick in picks:
assert_allclose(raw.info["chs"][pick]["loc"][3:6], ref_loc)
raw.set_montage(standard_montage, match_alias=True, on_missing="ignore")
dig_after_mon = raw.info["dig"]
# No dig entries should have been dropped while setting montage
assert len(dig_before_mon) == n_dig
assert len(dig_after_mon) == n_dig
# Check that the reference remained
for pick in picks:
assert_allclose(raw.info["chs"][pick]["loc"][3:6], ref_loc)
|
e1f30f54b69c386cd1bb49f936440a65ee37d3ee
|
0db05f7b843e8450bafd5ae23f8f70f9a9a8c151
|
/Src/StdLib/Lib/test/test_ttk_guionly.py
|
3f7ff65f2472dac75deb7d501f4ea672f1e77b26
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
IronLanguages/ironpython2
|
9c7f85bd8e6bca300e16f8c92f6384cecb979a6a
|
d00111890ce41b9791cb5bc55aedd071240252c4
|
refs/heads/master
| 2023-01-21T21:17:59.439654
| 2023-01-13T01:52:15
| 2023-01-13T01:52:15
| 91,620,472
| 1,171
| 288
|
Apache-2.0
| 2023-01-13T01:52:16
| 2017-05-17T21:11:51
|
Python
|
UTF-8
|
Python
| false
| false
| 999
|
py
|
test_ttk_guionly.py
|
import os
import unittest
from test import test_support
# Skip this test if _tkinter wasn't built or gui resource is not available.
test_support.import_module('_tkinter')
test_support.requires('gui')
this_dir = os.path.dirname(os.path.abspath(__file__))
lib_tk_test = os.path.abspath(os.path.join(this_dir, os.path.pardir,
'lib-tk', 'test'))
with test_support.DirsOnSysPath(lib_tk_test):
import runtktests
import Tkinter as tkinter
import ttk
from _tkinter import TclError
root = None
try:
root = tkinter.Tk()
button = ttk.Button(root)
button.destroy()
del button
except TclError as msg:
# assuming ttk is not available
raise unittest.SkipTest("ttk not available: %s" % msg)
finally:
if root is not None:
root.destroy()
del root
def test_main():
with test_support.DirsOnSysPath(lib_tk_test):
test_support.run_unittest(
*runtktests.get_tests(text=False, packages=['test_ttk']))
if __name__ == '__main__':
test_main()
|
0738d02533c82996e3bc09cc7a17c37619d700b3
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/python_modules/dagster/dagster/_serdes/utils.py
|
0844979b99a30f8718d191df522e73ad22c9da96
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 674
|
py
|
utils.py
|
import hashlib
from typing import NamedTuple, Optional
from .serdes import WhitelistMap, serialize_value
def create_snapshot_id(snapshot: NamedTuple, whitelist_map: Optional[WhitelistMap] = None) -> str:
kwargs = dict(whitelist_map=whitelist_map) if whitelist_map else {}
json_rep = serialize_value(snapshot, **kwargs)
return hash_str(json_rep)
def hash_str(in_str: str) -> str:
m = hashlib.sha1() # so that hexdigest is 40, not 64 bytes
m.update(in_str.encode("utf-8"))
return m.hexdigest()
def serialize_pp(value: NamedTuple) -> str:
"""Serialize and pretty print."""
return serialize_value(value, indent=2, separators=(",", ": "))
|
b37f148cc3686621b2fbf435b3c2e74f24a37496
|
3d262f92d72d1e91485af583cd4613c46bf02835
|
/Raspberry-Pi-Sonoff/Main.py
|
a780a014612206f7e08423fd17efb9ab7f3b5870
|
[
"MIT"
] |
permissive
|
Arbazkhan4712/Python-Quarantine-Projects
|
7b6e3933e442174435a37b7800263daecc3fae7d
|
1ce699cca76fb83b00785f02607f68e939285164
|
refs/heads/master
| 2023-05-12T02:26:19.493520
| 2021-12-16T06:27:34
| 2021-12-16T06:27:34
| 249,693,718
| 296
| 132
|
MIT
| 2023-05-01T22:16:45
| 2020-03-24T11:47:10
|
Python
|
UTF-8
|
Python
| false
| false
| 539
|
py
|
Main.py
|
from flask import Flask, render_template, request, redirect
from gpiozero import LED
from time import sleep
led = LED(2)
app = Flask(__name__)
@app.route("/")
def home():
if led.value == 1:
status = 'ON'
else:
status = 'OFF'
return render_template('home.html', status=status)
@app.route("/on")
def on():
led.on()
return "LED on"
@app.route("/off")
def off():
led.off()
return "LED off"
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000)
|
3728ac071dec7d7b88eb2c3ace6ace953b9a5779
|
f9e7d65cb784c01a0200145ba8d289afe41d4a56
|
/extra/usb_power/stats_manager.py
|
69fb33f292e1b4f923dd32edd73dffdb7c603ed2
|
[
"BSD-3-Clause"
] |
permissive
|
FrameworkComputer/EmbeddedController
|
ad7086769e87d0a4179eae96a7c9ff5e383ff54e
|
f6d6b927eed71550d3475411cfc3e59abe5cef2a
|
refs/heads/hx20-hx30
| 2023-08-08T20:45:10.621169
| 2023-05-26T07:03:59
| 2023-05-26T07:03:59
| 447,021,040
| 846
| 48
|
BSD-3-Clause
| 2023-05-26T07:04:59
| 2022-01-12T00:11:14
|
C
|
UTF-8
|
Python
| false
| false
| 11,449
|
py
|
stats_manager.py
|
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Calculates statistics for lists of data and pretty print them."""
# Note: This is a py2/3 compatible file.
from __future__ import print_function
import collections
import json
import logging
import math
import os
import numpy
STATS_PREFIX = '@@'
NAN_TAG = '*'
NAN_DESCRIPTION = '%s domains contain NaN samples' % NAN_TAG
LONG_UNIT = {
'': 'N/A',
'mW': 'milliwatt',
'uW': 'microwatt',
'mV': 'millivolt',
'uA': 'microamp',
'uV': 'microvolt'
}
class StatsManagerError(Exception):
"""Errors in StatsManager class."""
pass
class StatsManager(object):
"""Calculates statistics for several lists of data(float).
Example usage:
>>> stats = StatsManager(title='Title Banner')
>>> stats.AddSample(TIME_KEY, 50.0)
>>> stats.AddSample(TIME_KEY, 25.0)
>>> stats.AddSample(TIME_KEY, 40.0)
>>> stats.AddSample(TIME_KEY, 10.0)
>>> stats.AddSample(TIME_KEY, 10.0)
>>> stats.AddSample('frobnicate', 11.5)
>>> stats.AddSample('frobnicate', 9.0)
>>> stats.AddSample('foobar', 11111.0)
>>> stats.AddSample('foobar', 22222.0)
>>> stats.CalculateStats()
>>> print(stats.SummaryToString())
` @@--------------------------------------------------------------
` @@ Title Banner
@@--------------------------------------------------------------
@@ NAME COUNT MEAN STDDEV MAX MIN
@@ sample_msecs 4 31.25 15.16 50.00 10.00
@@ foobar 2 16666.50 5555.50 22222.00 11111.00
@@ frobnicate 2 10.25 1.25 11.50 9.00
` @@--------------------------------------------------------------
Attributes:
_data: dict of list of readings for each domain(key)
_unit: dict of unit for each domain(key)
_smid: id supplied to differentiate data output to other StatsManager
instances that potentially save to the same directory
if smid all output files will be named |smid|_|fname|
_title: title to add as banner to formatted summary. If no title,
no banner gets added
_order: list of formatting order for domains. Domains not listed are
displayed in sorted order
_hide_domains: collection of domains to hide when formatting summary string
_accept_nan: flag to indicate if NaN samples are acceptable
_nan_domains: set to keep track of which domains contain NaN samples
_summary: dict of stats per domain (key): min, max, count, mean, stddev
_logger = StatsManager logger
Note:
_summary is empty until CalculateStats() is called, and is updated when
CalculateStats() is called.
"""
# pylint: disable=W0102
def __init__(self, smid='', title='', order=[], hide_domains=[],
accept_nan=True):
"""Initialize infrastructure for data and their statistics."""
self._title = title
self._data = collections.defaultdict(list)
self._unit = collections.defaultdict(str)
self._smid = smid
self._order = order
self._hide_domains = hide_domains
self._accept_nan = accept_nan
self._nan_domains = set()
self._summary = {}
self._logger = logging.getLogger('StatsManager')
def AddSample(self, domain, sample):
"""Add one sample for a domain.
Args:
domain: the domain name for the sample.
sample: one time sample for domain, expect type float.
Raises:
StatsManagerError: if trying to add NaN and |_accept_nan| is false
"""
try:
sample = float(sample)
except ValueError:
# if we don't accept nan this will be caught below
self._logger.debug('sample %s for domain %s is not a number. Making NaN',
sample, domain)
sample = float('NaN')
if not self._accept_nan and math.isnan(sample):
raise StatsManagerError('accept_nan is false. Cannot add NaN sample.')
self._data[domain].append(sample)
if math.isnan(sample):
self._nan_domains.add(domain)
def SetUnit(self, domain, unit):
"""Set the unit for a domain.
There can be only one unit for each domain. Setting unit twice will
overwrite the original unit.
Args:
domain: the domain name.
unit: unit of the domain.
"""
if domain in self._unit:
self._logger.warning('overwriting the unit of %s, old unit is %s, new '
'unit is %s.', domain, self._unit[domain], unit)
self._unit[domain] = unit
def CalculateStats(self):
"""Calculate stats for all domain-data pairs.
First erases all previous stats, then calculate stats for all data.
"""
self._summary = {}
for domain, data in self._data.items():
data_np = numpy.array(data)
self._summary[domain] = {
'mean': numpy.nanmean(data_np),
'min': numpy.nanmin(data_np),
'max': numpy.nanmax(data_np),
'stddev': numpy.nanstd(data_np),
'count': data_np.size,
}
def SummaryToString(self, prefix=STATS_PREFIX):
"""Format summary into a string, ready for pretty print.
See class description for format example.
Args:
prefix: start every row in summary string with prefix, for easier reading.
Returns:
formatted summary string.
"""
headers = ('NAME', 'COUNT', 'MEAN', 'STDDEV', 'MAX', 'MIN')
table = [headers]
# determine what domains to display & and the order
domains_to_display = set(self._summary.keys()) - set(self._hide_domains)
display_order = [key for key in self._order if key in domains_to_display]
domains_to_display -= set(display_order)
display_order.extend(sorted(domains_to_display))
nan_in_output = False
for domain in display_order:
stats = self._summary[domain]
if not domain.endswith(self._unit[domain]):
domain = '%s_%s' % (domain, self._unit[domain])
if domain in self._nan_domains:
domain = '%s%s' % (domain, NAN_TAG)
nan_in_output = True
row = [domain]
row.append(str(stats['count']))
for entry in headers[2:]:
row.append('%.2f' % stats[entry.lower()])
table.append(row)
max_col_width = []
for col_idx in range(len(table[0])):
col_item_widths = [len(row[col_idx]) for row in table]
max_col_width.append(max(col_item_widths))
formatted_lines = []
for row in table:
formatted_row = prefix + ' '
for i in range(len(row)):
formatted_row += row[i].rjust(max_col_width[i] + 2)
formatted_lines.append(formatted_row)
if nan_in_output:
formatted_lines.append('%s %s' % (prefix, NAN_DESCRIPTION))
if self._title:
line_length = len(formatted_lines[0])
dec_length = len(prefix)
# trim title to be at most as long as the longest line without the prefix
title = self._title[:(line_length - dec_length)]
# line is a seperator line consisting of -----
line = '%s%s' % (prefix, '-' * (line_length - dec_length))
# prepend the prefix to the centered title
padded_title = '%s%s' % (prefix, title.center(line_length)[dec_length:])
formatted_lines = [line, padded_title, line] + formatted_lines + [line]
formatted_output = '\n'.join(formatted_lines)
return formatted_output
def GetSummary(self):
"""Getter for summary."""
return self._summary
def _MakeUniqueFName(self, fname):
"""prepend |_smid| to fname & rotate fname to ensure uniqueness.
Before saving a file through the StatsManager, make sure that the filename
is unique, first by prepending the smid if any and otherwise by appending
increasing integer suffixes until the filename is unique.
If |smid| is defined /path/to/example/file.txt becomes
/path/to/example/{smid}_file.txt.
The rotation works by changing /path/to/example/somename.txt to
/path/to/example/somename1.txt if the first one already exists on the
system.
Note: this is not thread-safe. While it makes sense to use StatsManager
in a threaded data-collection, the data retrieval should happen in a
single threaded environment to ensure files don't get potentially clobbered.
Args:
fname: filename to ensure uniqueness.
Returns:
{smid_}fname{tag}.ext
the smid portion gets prepended if |smid| is defined
the tag portion gets appended if necessary to ensure unique fname
"""
fdir = os.path.dirname(fname)
base, ext = os.path.splitext(os.path.basename(fname))
if self._smid:
base = '%s_%s' % (self._smid, base)
unique_fname = os.path.join(fdir, '%s%s' % (base, ext))
tag = 0
while os.path.exists(unique_fname):
old_fname = unique_fname
unique_fname = os.path.join(fdir, '%s%d%s' % (base, tag, ext))
self._logger.warning('Attempted to store stats information at %s, but '
'file already exists. Attempting to store at %s '
'now.', old_fname, unique_fname)
tag += 1
return unique_fname
def SaveSummary(self, directory, fname='summary.txt', prefix=STATS_PREFIX):
"""Save summary to file.
Args:
directory: directory to save the summary in.
fname: filename to save summary under.
prefix: start every row in summary string with prefix, for easier reading.
Returns:
full path of summary save location
"""
summary_str = self.SummaryToString(prefix=prefix) + '\n'
if not os.path.exists(directory):
os.makedirs(directory)
fname = self._MakeUniqueFName(os.path.join(directory, fname))
with open(fname, 'w') as f:
f.write(summary_str)
return fname
def SaveSummaryJSON(self, directory, fname='summary.json'):
"""Save summary (only MEAN) into a JSON file.
Args:
directory: directory to save the JSON summary in.
fname: filename to save summary under.
Returns:
full path of summary save location
"""
data = {}
for domain in self._summary:
unit = LONG_UNIT.get(self._unit[domain], self._unit[domain])
data_entry = {'mean': self._summary[domain]['mean'], 'unit': unit}
data[domain] = data_entry
if not os.path.exists(directory):
os.makedirs(directory)
fname = self._MakeUniqueFName(os.path.join(directory, fname))
with open(fname, 'w') as f:
json.dump(data, f)
return fname
def GetRawData(self):
"""Getter for all raw_data."""
return self._data
def SaveRawData(self, directory, dirname='raw_data'):
"""Save raw data to file.
Args:
directory: directory to create the raw data folder in.
dirname: folder in which raw data live.
Returns:
list of full path of each domain's raw data save location
"""
if not os.path.exists(directory):
os.makedirs(directory)
dirname = os.path.join(directory, dirname)
if not os.path.exists(dirname):
os.makedirs(dirname)
fnames = []
for domain, data in self._data.items():
if not domain.endswith(self._unit[domain]):
domain = '%s_%s' % (domain, self._unit[domain])
fname = self._MakeUniqueFName(os.path.join(dirname, '%s.txt' % domain))
with open(fname, 'w') as f:
f.write('\n'.join('%.2f' % sample for sample in data) + '\n')
fnames.append(fname)
return fnames
|
8380b83b3b56807a22ef7db073a8aa74c767f85f
|
eeabb0f83e700da0b4c71b62e7af4c17db158543
|
/test-crates/pyo3-mixed/check_installed/check_installed.py
|
428b7f811e823c3044946bd7b927bd7e6a93b6b8
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
PyO3/maturin
|
9a9a79d419a6638e6aae0dfffe07c021649f999d
|
cffc837c456f403ad38a395f1743e851bfc345b6
|
refs/heads/main
| 2023-09-05T07:05:41.910307
| 2023-09-05T05:20:41
| 2023-09-05T05:20:41
| 141,846,986
| 2,331
| 182
|
Apache-2.0
| 2023-09-12T13:50:23
| 2018-07-21T21:27:46
|
Rust
|
UTF-8
|
Python
| false
| false
| 2,117
|
py
|
check_installed.py
|
#!/usr/bin/env python3
import json
import os.path
import platform
import sys
from pathlib import Path
from subprocess import check_output
from boltons.strutils import slugify
import pyo3_mixed
assert pyo3_mixed.get_42() == 42
assert slugify("First post! Hi!!!!~1 ") == "first_post_hi_1"
script_name = "print_cli_args"
args = ["a", "b", "c"]
[rust_args, python_args] = check_output([script_name, *args], text=True).splitlines()
# The rust vec debug format is also valid json
rust_args = json.loads(rust_args)
python_args = json.loads(python_args)
# On alpine/musl, rust_args is empty so we skip all tests on musl
if len(rust_args) > 0:
# On linux we get sys.executable, windows resolve the path and mac os gives us a third
# path (
# {prefix}/Python.framework/Versions/3.10/Resources/Python.app/Contents/MacOS/Python
# vs
# {prefix}/Python.framework/Versions/3.10/bin/python3.10
# on cirrus ci)
# On windows, cpython resolves while pypy doesn't.
# The script for cpython is actually a distinct file from the system interpreter for
# windows and mac
if platform.system() == "Linux":
assert os.path.samefile(rust_args[0], sys.executable), (
rust_args,
sys.executable,
os.path.realpath(rust_args[0]),
os.path.realpath(sys.executable),
)
# Windows can't decide if it's with or without .exe, FreeBSB just doesn't work for some reason
if platform.system() in ["Darwin", "Linux"]:
# Unix venv layout (and hopefully also on more exotic platforms)
print_cli_args = str(Path(sys.prefix).joinpath("bin").joinpath(script_name))
assert rust_args[1] == print_cli_args, (rust_args, print_cli_args)
assert python_args[0] == print_cli_args, (python_args, print_cli_args)
# FreeBSB just doesn't work for some reason
if platform.system() in ["Darwin", "Linux", "Windows"]:
# Rust contains the python executable as first argument but python does not
assert rust_args[2:] == args, rust_args
assert python_args[1:] == args, python_args
print("SUCCESS")
|
ec82582beed6466782afeab7d3592d262c130cd3
|
6f2fef1b207299681f8d67d3831c400bb91de04b
|
/data_collection/gazette/spiders/sc_capao_alto.py
|
4f20081e7233ee83c7f1698a404cda0378d5fe0a
|
[
"MIT"
] |
permissive
|
okfn-brasil/querido-diario
|
76177747aa5ad47e99514f38402e6bc747b9a715
|
548a9b1b2718dc78ba8ccb06b36cf337543ad71d
|
refs/heads/main
| 2023-08-22T04:26:30.798196
| 2023-08-18T14:12:37
| 2023-08-18T14:12:37
| 127,598,755
| 402
| 233
|
MIT
| 2023-09-14T18:56:02
| 2018-04-01T05:01:21
|
Python
|
UTF-8
|
Python
| false
| false
| 197
|
py
|
sc_capao_alto.py
|
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScCapaoAltoSpider(FecamGazetteSpider):
name = "sc_capao_alto"
FECAM_QUERY = "cod_entidade:63"
TERRITORY_ID = "4203253"
|
1acde067c23f155b435b7d67ca2900b14ce35807
|
3c2cc8910c4a333a44d2d7b22489ef8d5ddb6a13
|
/src/zvt/ui/__init__.py
|
7eeeff8822f170b37939e5ba7bd1bc6c4fa0c77d
|
[
"MIT"
] |
permissive
|
zvtvz/zvt
|
6341dc765177b1e99727207f1608b730cbbb705a
|
03aee869fd432bb933d59ba419401cfc11501392
|
refs/heads/master
| 2023-08-28T10:05:29.185590
| 2023-08-01T10:19:03
| 2023-08-01T10:19:03
| 179,451,497
| 2,782
| 922
|
MIT
| 2023-04-04T09:31:03
| 2019-04-04T08:06:57
|
Python
|
UTF-8
|
Python
| false
| false
| 435
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
import os
import dash
import dash_bootstrap_components as dbc
assets_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "assets"))
zvt_app = dash.Dash(
__name__,
meta_tags=[{"name": "viewport", "content": "width=device-width"}],
assets_folder=assets_path,
external_stylesheets=[dbc.themes.BOOTSTRAP],
)
zvt_app.config.suppress_callback_exceptions = True
server = zvt_app.server
|
cb6b4c5a69e31d68ae60216e52216bf294ca89fc
|
2d5a3cde8291c1f733f63b83e3d02f77321a9f12
|
/python/mysql/demo_mysqlconnector.py
|
3e8124e7e80263740a6b3fa568f4f1a025c4890d
|
[
"BSD-3-Clause"
] |
permissive
|
DataDog/trace-examples
|
99d5e6e0984beefb08a2a3ead0dc35e19798d932
|
121636bbae446fb93f56c14a83ba819faf327d1f
|
refs/heads/master
| 2023-08-19T00:30:52.632661
| 2023-03-23T15:14:13
| 2023-03-23T15:14:13
| 61,754,713
| 106
| 73
|
BSD-3-Clause
| 2023-03-08T14:06:45
| 2016-06-22T22:08:05
|
Python
|
UTF-8
|
Python
| false
| false
| 993
|
py
|
demo_mysqlconnector.py
|
import logging.config
from ddtrace import Pin, patch
import mysql.connector
logging.config.dictConfig({
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'ddtrace': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
})
# If not patched yet, you can patch mysql specifically
patch(mysql=True)
# This will report a span with the default settings
conn = mysql.connector.connect(user="test", password="test", host="localhost", port=3306, database="test")
cursor = conn.cursor()
cursor.execute("SHOW TABLES")
# Use a pin to specify metadata related to this connection
Pin.override(conn, service='mysql-users')
|
6aa7ecebd200cbb01bca75b9a61fea240fd4dc38
|
26c6d869abdc3a797dd3564165ff1744be58794c
|
/tests/test_digest_ha1_password.py
|
51f1c31a1d7824925767de0401acd3c55ae3ec62
|
[
"MIT"
] |
permissive
|
miguelgrinberg/Flask-HTTPAuth
|
8b00a40d3e01fdb4839c30d999649cb17cf8d85d
|
36fe7aa6cdce721a01e32c416593f8437c8ac386
|
refs/heads/main
| 2023-08-14T13:07:17.006849
| 2023-06-23T18:29:28
| 2023-06-23T18:29:28
| 10,136,908
| 1,237
| 267
|
MIT
| 2023-04-27T09:21:22
| 2013-05-18T05:10:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,425
|
py
|
test_digest_ha1_password.py
|
import unittest
from hashlib import md5 as basic_md5
from flask import Flask
from flask_httpauth import HTTPDigestAuth
from werkzeug.http import parse_dict_header
def md5(str):
if type(str).__name__ == 'str':
str = str.encode('utf-8')
return basic_md5(str)
def get_ha1(user, pw, realm):
a1 = user + ":" + realm + ":" + pw
return md5(a1).hexdigest()
class HTTPAuthTestCase(unittest.TestCase):
def setUp(self):
app = Flask(__name__)
app.config['SECRET_KEY'] = 'my secret'
digest_auth_ha1_pw = HTTPDigestAuth(use_ha1_pw=True)
@digest_auth_ha1_pw.get_password
def get_digest_password(username):
if username == 'susan':
return get_ha1(username, 'hello', digest_auth_ha1_pw.realm)
elif username == 'john':
return get_ha1(username, 'bye', digest_auth_ha1_pw.realm)
else:
return None
@app.route('/')
def index():
return 'index'
@app.route('/digest_ha1_pw')
@digest_auth_ha1_pw.login_required
def digest_auth_ha1_pw_route():
return 'digest_auth_ha1_pw:' + digest_auth_ha1_pw.username()
self.app = app
self.client = app.test_client()
def test_digest_ha1_pw_auth_login_valid(self):
response = self.client.get('/digest_ha1_pw')
self.assertTrue(response.status_code == 401)
header = response.headers.get('WWW-Authenticate')
auth_type, auth_info = header.split(None, 1)
d = parse_dict_header(auth_info)
a1 = 'john:' + d['realm'] + ':bye'
ha1 = md5(a1).hexdigest()
a2 = 'GET:/digest_ha1_pw'
ha2 = md5(a2).hexdigest()
a3 = ha1 + ':' + d['nonce'] + ':' + ha2
auth_response = md5(a3).hexdigest()
response = self.client.get(
'/digest_ha1_pw', headers={
'Authorization': 'Digest username="john",realm="{0}",'
'nonce="{1}",uri="/digest_ha1_pw",'
'response="{2}",'
'opaque="{3}"'.format(d['realm'],
d['nonce'],
auth_response,
d['opaque'])})
self.assertEqual(response.data, b'digest_auth_ha1_pw:john')
|
62431c1e527bb2f38e4362b73243710c78255ee8
|
cf2756bdc6c366715f8e8bc953ba4b8e19765a4a
|
/docs/dalapi/extension.py
|
befe3cd99947f3f405b865e30aa86c2abab804aa
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Intel",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"MIT",
"Zlib"
] |
permissive
|
oneapi-src/oneDAL
|
c88b1f59218aa3b3b624a7b9f457bfc5823d583b
|
f4abbf2a18e27fa4165eb6b91b3456b5039e03a6
|
refs/heads/master
| 2023-09-06T00:47:52.411627
| 2023-09-05T22:29:42
| 2023-09-05T22:29:42
| 54,928,587
| 260
| 115
|
Apache-2.0
| 2023-09-14T17:51:26
| 2016-03-28T22:39:32
|
C++
|
UTF-8
|
Python
| false
| false
| 7,312
|
py
|
extension.py
|
# file: extension.py
#===============================================================================
# Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
import os
import time
from typing import (Dict, Tuple, Text)
from . import doxypy
from . import utils
from . import roles
from . import directives
from . import transformers
class PathResolver(object):
def __init__(self, app,
relative_doxyfile_dir,
relative_sources_dir):
self.base_dir = app.confdir
self.doxyfile_dir = self.absjoin(self.base_dir, relative_doxyfile_dir)
self.sources_dir = self.absjoin(self.base_dir, relative_sources_dir)
self.doxygen_xml = self.absjoin(self.doxyfile_dir, 'doxygen', 'xml')
def __call__(self, relative_name):
return self.absjoin(self.base_dir, relative_name)
def absjoin(self, *args):
return os.path.abspath(os.path.join(*args))
class ProjectWatcher(object):
def __init__(self, ctx, path_resolver):
self.ctx = ctx
self._path_resolver = path_resolver
self._xml_timer = utils.FileModificationTimer(
path_resolver.doxygen_xml, '*.xml')
self._hpp_timer = utils.FileModificationTimer(
path_resolver.sources_dir, '*.hpp')
self._doxygen = utils.ProcessHandle(
'doxygen', path_resolver.doxyfile_dir)
def link_docname(self, docname):
full_path = self._path_resolver(f'{docname}.rst')
self._linked_docnames[docname] = (full_path, time.time())
def get_outdated_docnames(self, modified_docnames):
# We do not need to check the modified documents,
# they should be updated by Sphinx in any way
for docname in modified_docnames:
if docname in self._linked_docnames:
del self._linked_docnames[docname]
xml_mtime = self._xml_timer()
hpp_mtime = self._hpp_timer()
if xml_mtime < hpp_mtime:
self.ctx.log('Run Doxygen')
self._doxygen.run()
outdated_docnames = []
for docname, info in self._linked_docnames.items():
_, link_time = info
if (self.ctx.always_rebuild or
link_time < xml_mtime or link_time < hpp_mtime):
outdated_docnames.append(docname)
if self.ctx.debug:
for docname in outdated_docnames:
self.ctx.log('OUTDATED', docname)
return outdated_docnames
def _update_linked_docnames(self):
relevant_linked_docnames = {}
for docname, info in self._linked_docnames.items():
docfilename = info[0]
if os.path.exists(docfilename):
relevant_linked_docnames[docname] = info
self._linked_docnames = relevant_linked_docnames
@property
def _linked_docnames(self) -> Dict[Text, Tuple[Text, float]]:
if not hasattr(self.ctx.app.env, 'dalapi_linked_docnames'):
self.ctx.app.env.dalapi_linked_docnames = {}
return self.ctx.app.env.dalapi_linked_docnames
@_linked_docnames.setter
def _linked_docnames(self, value):
self.ctx.app.env.dalapi_linked_docnames = value
class Context(object):
def __init__(self, app):
self.app = app
self._index = None
self._watcher = None
self._doxygen = None
self._listing = None
self._path_resolver = None
self._is_listing_enabled = False
self._read_env()
def configure(self, relative_doxyfile_dir, relative_sources_dir, is_listing_enabled):
self._path_resolver = PathResolver(
self.app,
relative_doxyfile_dir,
relative_sources_dir
)
self._is_listing_enabled = is_listing_enabled
@property
def current_docname(self):
return self.app.env.docname
@property
def index(self) -> doxypy.Index:
if self._index is None:
self._index = doxypy.index(self.path_resolver.doxygen_xml,
name_transformer=transformers.NameTransformer(),
transformer_passes= [
transformers.PropertyTransformer(),
transformers.RstDescriptionTransformer(),
]
)
return self._index
@property
def watcher(self) -> ProjectWatcher:
if self._watcher is None:
self._watcher = ProjectWatcher(self, self.path_resolver)
return self._watcher
@property
def listing(self) -> doxypy.ListingReader:
if self._listing is None:
self._listing = doxypy.ListingReader(self.path_resolver.sources_dir)
return self._listing
@property
def listing_enabled(self) -> bool:
return self._is_listing_enabled
@property
def path_resolver(self):
if not self._path_resolver:
raise Exception('Context is not configured')
return self._path_resolver
def log(self, *args):
if self.debug:
print('[dalapi]:', *args)
def _read_env(self):
def get_env_flag(env_var):
value = os.environ.get(env_var, '0')
return value.lower() in ['1', 'yes', 'y']
self.debug = get_env_flag('DALAPI_DEBUG')
self.always_rebuild = get_env_flag('DALAPI_ALWAYS_REBUILD')
class EventHandler(object):
def __init__(self, ctx: Context):
self.ctx = ctx
def env_get_outdated(self, app, env, added, changed, removed):
return self.ctx.watcher.get_outdated_docnames(added | changed | removed)
def get_config_values(self, app):
self.ctx.configure(
relative_doxyfile_dir=app.config.onedal_relative_doxyfile_dir,
relative_sources_dir=app.config.onedal_relative_sources_dir,
is_listing_enabled=app.config.onedal_enable_listing
)
def setup(app):
ctx = Context(app)
app.add_role('capterm', roles.capterm_role)
app.add_role('txtref', roles.txtref_role)
app.add_directive('onedal_class', directives.ClassDirective(ctx))
app.add_directive('onedal_func', directives.FunctionDirective(ctx))
app.add_directive('onedal_code', directives.ListingDirective(ctx))
app.add_directive('onedal_tags_namespace', directives.TagsNamespaceDirective(ctx))
app.add_directive('onedal_enumclass', directives.EnumClassDirective(ctx))
app.add_config_value('onedal_relative_doxyfile_dir', '.', 'env')
app.add_config_value('onedal_relative_sources_dir', '.', 'env')
app.add_config_value('onedal_enable_listing', True, 'env')
handler = EventHandler(ctx)
app.connect("builder-inited", handler.get_config_values)
app.connect('env-get-outdated', handler.env_get_outdated)
|
7d77b45a7afb1cbfe3557246bd49d2cc9f9bc56a
|
a2b20597759990445081057d35d113434cfcf970
|
/stubs/integration_test/fixture_stubs/django/utils/functional.pyi
|
db6bae5e8877567c034fa0a468773d73b579b287
|
[
"MIT"
] |
permissive
|
facebook/pyre-check
|
34059599c02b65605c574f13555229f3b931fd4e
|
fe8ccedc572cc1faa1fd01e9138f65e982875002
|
refs/heads/main
| 2023-09-03T19:10:11.587028
| 2023-09-02T07:40:35
| 2023-09-02T07:40:35
| 110,274,488
| 6,703
| 575
|
MIT
| 2023-09-13T17:02:32
| 2017-11-10T17:31:36
|
OCaml
|
UTF-8
|
Python
| false
| false
| 667
|
pyi
|
functional.pyi
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
from typing import Any, TypeVar
_T = TypeVar("_T")
def curry(_curried_func, *args, **kwargs) -> Any: ...
def memoize(func: _T, cache, num_args) -> _T: ...
class cached_property:
func: Any
name: str
def __get__(self, instance, type=...) -> Any: ...
# In the future, we'll properly type promises as parameteric types.
Promise = Any
def lazy(func: _T, *resultclasses) -> _T: ...
class LazyObject: ...
def partition(predicate: Any, values: Any) -> Any: ...
|
246ac239e0aefe49414524f13332c0fda887e628
|
11f7558e56bcfb742495cec766baeea7650a3103
|
/python/Multi-Service/speech_call_center.py
|
7d33ee5b74b8c7bc5cb483dee5bdb2d88dc77e10
|
[
"MIT"
] |
permissive
|
Azure-Samples/cognitive-services-quickstart-code
|
ee6936fae3bdfc902e6e8c74080f598845eb9c49
|
3ec40229ae753720605319e2e4d0955f9039449a
|
refs/heads/master
| 2023-08-21T21:19:28.114774
| 2023-06-06T20:12:38
| 2023-06-06T20:12:38
| 198,896,181
| 323
| 511
|
MIT
| 2023-09-08T06:29:25
| 2019-07-25T20:20:09
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,224
|
py
|
speech_call_center.py
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
import os
from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
import azure.cognitiveservices.speech as speechsdk
from msrest.authentication import CognitiveServicesCredentials
'''
Azure Speech recognition and Text Analytics sample.
Performs one-shot speech recognition from the default microphone and then analyzes that response text.
Inlcude these libraries:
pip install --upgrade azure-cognitiveservices-speech
pip install azure-cognitiveservices-language-textanalytics
Speech SDK: https://docs.microsoft.com/en-us/python/api/azure-cognitiveservices-speech/?view=azure-python
Text Analytics SDK: https://docs.microsoft.com/en-us/python/api/azure-cognitiveservices-language-textanalytics/?view=azure-python
Text Analytics: https://azuresdkdocs.blob.core.windows.net/$web/python/azure-ai-textanalytics/1.0.0b1/azure.ai.textanalytics.html
'''
speech_subscription_key = 'PASTE_YOUR_SPEECH_SUBSCRIPTION_KEY_HERE'
# Set this to the region for your Speech resource (for example, westus, eastus, and so on).
speech_region = 'westus'
text_analytics_endpoint = 'PASTE_YOUR_TEXT_ANALYTICS_ENDPOINT_HERE'
text_analytics_subscription_key = 'PASTE_YOUR_TEXT_ANALYTICS_SUBSCRIPTION_KEY_HERE'
# Authenticate, you may need to change the region to your own.
speech_config = speechsdk.SpeechConfig(subscription=speech_subscription_key, region=speech_region)
# Creates a speech recognizer using a microphone as audio input.
# The default language is "en-us".
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config)
text_analytics_client = TextAnalyticsClient(text_analytics_endpoint, CognitiveServicesCredentials(text_analytics_subscription_key))
# Starts speech recognition, and returns after a single utterance is recognized.
# For long-running multi-utterance recognition, use start_continuous_recognition() instead.
print('Speak a phrase into your microphone...')
result = speech_recognizer.recognize_once()
print()
# Check the result
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
print("Recognized: {}".format(result.text))
elif result.reason == speechsdk.ResultReason.NoMatch:
print("No speech could be recognized")
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print("Speech Recognition canceled: {}".format(
cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print("Error details: {}".format(
cancellation_details.error_details))
print()
# Text Analytics, analyze the Speech to Text response in terms of sentiment.
documents = [{"id": "1", "language": "en", "text": result.text}]
analytics_response = text_analytics_client.sentiment(documents)
print("Document Sentiment: {}".format(analytics_response.sentiment))
print("Overall scores: positive={0:.3f}; neutral={1:.3f}; negative={2:.3f} \n".format(
analytics_response.document_scores.positive,
analytics_response.document_scores.neutral,
analytics_response.document_scores.negative,
))
|
ca2a64f7577ffd58f4c6c0120483021d355b164f
|
0e4860fecfdd34a3255003cc8c8df086c14083dd
|
/python/source_code/AutomatePython/07-regex/stripRe.py
|
c4077ea92e48eb91e027eb59efd42105fbcc6fc5
|
[] |
no_license
|
anzhihe/learning
|
503ab9a58f280227011da5eaa4b14b46c678e6f3
|
66f7f801e1395207778484e1543ea26309d4b354
|
refs/heads/master
| 2023-08-08T11:42:11.983677
| 2023-07-29T09:19:47
| 2023-07-29T09:19:47
| 188,768,643
| 1,443
| 617
| null | 2023-08-24T02:10:34
| 2019-05-27T04:04:10
|
Python
|
UTF-8
|
Python
| false
| false
| 241
|
py
|
stripRe.py
|
import re
def stripRe(s):
#stripRe = re.compile(r'^\s+\(.*\)\s+$')
stripRegex = re.compile(r'^\s+(.*)\s+$')
mo = stripRegex.search(s)
return mo.group(1)
strTest = ' hello world '
print(strTest)
print(stripRe(strTest))
|
6902619908bee3a92d600a5b4b25903b1b97b885
|
31cf77b4c0342c6148b35ae2613d5e2501d5e755
|
/src/encoded/tests/test_audit_file.py
|
1df86a070fb976e57a8aac8750d9a60917264624
|
[
"MIT"
] |
permissive
|
ENCODE-DCC/encoded
|
096de8a6d60c959a783cc9517f1d60bd6c21b71f
|
80e05610c79b46d0890228555bb03e436b2fef11
|
refs/heads/dev
| 2023-08-08T15:45:07.493187
| 2023-08-03T20:01:24
| 2023-08-03T20:01:24
| 7,045,549
| 110
| 69
|
MIT
| 2023-09-12T23:59:45
| 2012-12-07T00:52:21
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 45,161
|
py
|
test_audit_file.py
|
import pytest
def collect_audit_errors(result, error_types=None):
errors = result.json['audit']
errors_list = []
if error_types:
for error_type in error_types:
errors_list.extend(errors[error_type])
else:
for error_type in errors:
errors_list.extend(errors[error_type])
return errors_list
def test_audit_file_mismatched_paired_with(testapp, file1, file4):
testapp.patch_json(file1['@id'], {
'run_type': 'paired-ended', 'paired_end': '2', 'paired_with': file4['uuid']})
res = testapp.get(file1['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] ==
'inconsistent paired_with' for error in errors_list)
def test_audit_file_inconsistent_paired_with(testapp, file1, file3):
testapp.patch_json(file1['@id'], {
'run_type': 'paired-ended', 'paired_end': '1', 'paired_with': file3['uuid']})
testapp.patch_json(file3['@id'], {
'run_type': 'paired-ended', 'paired_end': '1'})
res = testapp.get(file1['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] ==
'inconsistent paired_with' for error in errors_list)
testapp.patch_json(file1['@id'], {
'run_type': 'paired-ended', 'paired_end': '2', 'paired_with': file3['uuid']})
testapp.patch_json(file3['@id'], {
'run_type': 'paired-ended', 'paired_end': '2', 'paired_with': file1['uuid']})
res2 = testapp.get(file1['@id'] + '@@index-data')
errors = res2.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] ==
'inconsistent paired_with' for error in errors_list)
testapp.patch_json(file1['@id'], {
'run_type': 'paired-ended', 'paired_end': '1'})
res3 = testapp.get(file1['@id'] + '@@index-data')
errors = res3.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(error['category'] !=
'inconsistent paired_with' for error in errors_list)
def test_audit_missing_paired_with(testapp, file2, file4):
testapp.patch_json(file2['@id'], {
'run_type': 'paired-ended', 'paired_end': '1'})
res = testapp.get(file2['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] ==
'missing paired_with' for error in errors_list)
testapp.patch_json(file2['@id'], {
'paired_with': file4['uuid']})
res2 = testapp.get(file2['@id'] + '@@index-data')
errors = res2.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(error['category'] !=
'missing paired_with' for error in errors_list)
def test_audit_paired_with_non_fastq(testapp, file1, file6, platform1):
testapp.patch_json(
file1['@id'],
{
'run_type': 'paired-ended',
'paired_end': '1'
}
)
testapp.patch_json(
file6['@id'],
{
'run_type': 'paired-ended',
'platform': platform1['uuid'],
'paired_end': '2',
'paired_with': file1['uuid']
}
)
res = testapp.get(file1['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(
error['category'] == 'paired with non-fastq'
for error in errors_list
)
def test_audit_paired_with_fastq(testapp, file1, file4):
testapp.patch_json(
file1['@id'],
{
'run_type': 'paired-ended',
'paired_end': '1'
}
)
testapp.patch_json(
file4['@id'],
{
'run_type': 'paired-ended',
'paired_end': '2',
'paired_with': file1['uuid']
}
)
res2 = testapp.get(file1['@id'] + '@@index-data')
errors = res2.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(
error['category'] != 'paired with non-fastq'
for error in errors_list
)
def test_audit_file_inconsistent_read_count_paired_with(testapp, file1, file4):
testapp.patch_json(file1['@id'], {
'run_type': 'paired-ended',
'read_count': 20,
'paired_end': '2',
'paired_with': file4['uuid']})
testapp.patch_json(file4['@id'], {
'read_count': 21})
res = testapp.get(file1['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] ==
'inconsistent read count' for error in errors_list)
testapp.patch_json(file4['@id'], {
'read_count': 20})
res2 = testapp.get(file1['@id'] + '@@index-data')
errors = res2.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(error['category'] !=
'inconsistent read count' for error in errors_list)
def test_audit_file_missing_controlled_by(testapp, file3):
res = testapp.get(file3['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] ==
'missing controlled_by' for error in errors_list)
def test_audit_file_mismatched_controlled_by(testapp, file1):
res = testapp.get(file1['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] ==
'inconsistent control' for error in errors_list)
def test_audit_file_read_length_controlled_by(testapp, file1_2,
file2, file_exp,
file_exp2, ileum):
testapp.patch_json(file1_2['@id'], {'read_length': 50,
'run_type': 'single-ended'})
testapp.patch_json(file2['@id'], {'read_length': 150,
'run_type': 'single-ended'})
testapp.patch_json(file1_2['@id'], {'controlled_by': [file2['@id']]})
testapp.patch_json(file_exp['@id'], {
'possible_controls': [file_exp2['@id']]})
testapp.patch_json(file_exp2['@id'], {'assay_term_name': 'RAMPAGE',
'biosample_ontology': ileum['uuid']})
res = testapp.get(file1_2['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] ==
'inconsistent control read length' for error in errors_list)
def test_audit_file_read_length_controlled_by_exclusion(testapp, file1_2,
file2, file_exp,
file_exp2, ileum):
testapp.patch_json(file1_2['@id'], {'read_length': 50,
'run_type': 'single-ended'})
testapp.patch_json(file2['@id'], {'read_length': 52,
'run_type': 'single-ended'})
testapp.patch_json(file1_2['@id'], {'controlled_by': [file2['@id']]})
testapp.patch_json(file_exp['@id'], {
'possible_controls': [file_exp2['@id']]})
testapp.patch_json(file_exp2['@id'], {'assay_term_name': 'RAMPAGE',
'biosample_ontology': ileum['uuid']})
res = testapp.get(file1_2['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] !=
'inconsistent control read length' for error in errors_list)
def test_audit_file_replicate_match_inconsistent(testapp, file1, file_rep2):
testapp.patch_json(file1['@id'], {'replicate': file_rep2['uuid']})
res = testapp.get(file1['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] ==
'inconsistent replicate' for error in errors_list)
def test_audit_file_replicate_match_consistent(testapp, file1, file_rep2):
#testapp.patch_json(file1['@id'], {'replicate': file_rep2['uuid']})
res = testapp.get(file1['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(error['category'] !=
'inconsistent replicate' for error in errors_list)
'''
def test_audit_modERN_missing_step_run(testapp, file_exp, file3, award):
testapp.patch_json(award['@id'], {'rfa': 'modERN'})
testapp.patch_json(file_exp['@id'], {'assay_term_name': 'ChIP-seq'})
testapp.patch_json(file3['@id'], {'dataset': file_exp['@id'], 'file_format': 'bam',
'assembly': 'ce10', 'output_type': 'alignments'})
res = testapp.get(file3['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'missing step_run' for error in errors_list)
def test_audit_modERN_missing_derived_from(testapp, file_exp, file3, award, analysis_step_version_bam, analysis_step_bam, analysis_step_run_bam):
testapp.patch_json(award['@id'], {'rfa': 'modERN'})
testapp.patch_json(file_exp['@id'], {'assay_term_name': 'ChIP-seq'})
testapp.patch_json(file3['@id'], {'dataset': file_exp['@id'], 'file_format': 'bam', 'assembly': 'ce10',
'output_type': 'alignments', 'step_run': analysis_step_run_bam['@id']})
res = testapp.get(file3['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'missing derived_from' for error in errors_list)
def test_audit_modERN_wrong_step_run(testapp, file_exp, file3, file4, award, analysis_step_version_bam, analysis_step_bam, analysis_step_run_bam):
testapp.patch_json(award['@id'], {'rfa': 'modERN'})
testapp.patch_json(file_exp['@id'], {'assay_term_name': 'ChIP-seq'})
testapp.patch_json(file3['@id'], {'dataset': file_exp['@id'], 'file_format': 'bed',
'file_format_type': 'narrowPeak', 'output_type': 'peaks',
'step_run': analysis_step_run_bam['@id'], 'assembly': 'ce11',
'derived_from': [file4['@id']]})
res = testapp.get(file3['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'wrong step_run for peaks' for error in errors_list)
def test_audit_modERN_unexpected_step_run(testapp, file_exp, file2, award, analysis_step_run_bam):
testapp.patch_json(award['@id'], {'rfa': 'modERN'})
testapp.patch_json(file_exp['@id'], {'assay_term_name': 'ChIP-seq'})
testapp.patch_json(file2['@id'], {'dataset': file_exp['@id'], 'step_run': analysis_step_run_bam['@id']})
res = testapp.get(file2['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'unexpected step_run' for error in errors_list)
'''
def test_audit_file_assembly(testapp, file6, file7):
testapp.patch_json(file6['@id'], {'assembly': 'GRCh38'})
testapp.patch_json(file7['@id'], {'derived_from': [file6['@id']],
'assembly': 'hg19'})
res = testapp.get(file7['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent assembly'
for error in errors_list)
def test_audit_file_step_run(testapp, bam_file, analysis_step_run_bam):
res = testapp.get(bam_file['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'missing analysis_step_run'
for error in errors_list)
testapp.patch_json(bam_file['@id'], {'step_run': analysis_step_run_bam['@id']})
res = testapp.get(bam_file['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(error['category'] != 'missing analysis_step_run'
for error in errors_list)
def test_audit_raw_file_step_run(testapp, file_fastq_2, file_subreads_posted, analysis_step_run_bam):
# https://encodedcc.atlassian.net/browse/ENCD-5927
res = testapp.get(file_fastq_2['@id'] + '@@index-data')
assert all(error['category'] != 'missing analysis_step_run'
for error in collect_audit_errors(res))
testapp.patch_json(file_fastq_2['@id'], {'derived_from': [file_subreads_posted['@id']]})
res = testapp.get(file_fastq_2['@id'] + '@@index-data')
assert any(error['category'] == 'missing analysis_step_run'
for error in collect_audit_errors(res))
testapp.patch_json(file_fastq_2['@id'], {'step_run': analysis_step_run_bam['@id']})
res = testapp.get(file_fastq_2['@id'] + '@@index-data')
assert all(error['category'] != 'missing analysis_step_run'
for error in collect_audit_errors(res))
def test_audit_file_derived_from_empty(testapp, file7):
res = testapp.get(file7['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'missing derived_from'
for error in errors_list)
def test_audit_file_bam_derived_from_no_fastq(testapp, file7, file6):
testapp.patch_json(file6['@id'], {'derived_from': [file7['@id']],
'status': 'released',
'file_format': 'bam',
'assembly': 'hg19'})
testapp.patch_json(
file7['@id'], {'file_format': 'tsv', 'assembly': 'hg19'})
res = testapp.get(file6['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'missing derived_from'
for error in errors_list)
def test_audit_file_bam_derived_from_fastq(testapp, file4, file6):
testapp.patch_json(file6['@id'], {'derived_from': [file4['@id']],
'status': 'released',
'file_format': 'bam',
'assembly': 'hg19'})
res = testapp.get(file6['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(error['category'] != 'missing derived_from'
for error in errors_list)
def test_audit_file_bam_derived_from_csfasta(testapp, file4, file6):
testapp.patch_json(file6['@id'], {'derived_from': [file4['@id']],
'status': 'released',
'file_format': 'bam',
'assembly': 'hg19'})
testapp.patch_json(file4['@id'], {'file_format': 'csfasta'})
res = testapp.get(file6['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(error['category'] != 'missing derived_from'
for error in errors_list)
def test_audit_file_bam_derived_from_csqual(testapp, file4, file6):
testapp.patch_json(file6['@id'], {'derived_from': [file4['@id']],
'status': 'released',
'file_format': 'bam',
'assembly': 'hg19'})
testapp.patch_json(file4['@id'], {'file_format': 'csqual'})
res = testapp.get(file6['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(error['category'] != 'missing derived_from'
for error in errors_list)
def test_audit_file_released_bam_derived_from_revoked_fastq(testapp, file4, file6):
testapp.patch_json(file6['@id'], {'derived_from': [file4['@id']],
'status': 'released',
'file_format': 'bam',
'assembly': 'hg19'})
testapp.patch_json(file4['@id'], {'status': 'revoked'})
res = testapp.get(file6['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'missing derived_from'
for error in errors_list)
def test_audit_file_deleted_bam_derived_from_revoked_fastq(testapp, file4, file6):
testapp.patch_json(file6['@id'], {'derived_from': [file4['@id']],
'status': 'deleted',
'file_format': 'bam',
'assembly': 'hg19'})
testapp.patch_json(file4['@id'], {'status': 'revoked'})
res = testapp.get(file6['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(error['category'] != 'missing derived_from'
for error in errors_list)
def test_audit_file_released_bam_derived_from_deleted_fastq(testapp, file4, file6):
testapp.patch_json(file6['@id'], {'derived_from': [file4['@id']],
'status': 'released',
'file_format': 'bam',
'assembly': 'hg19'})
testapp.patch_json(file4['@id'], {'status': 'deleted'})
res = testapp.get(file6['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'missing derived_from'
for error in errors_list)
def test_audit_file_deleted_bam_derived_from_deleted_fastq(testapp, file4, file6):
testapp.patch_json(file6['@id'], {'derived_from': [file4['@id']],
'status': 'deleted',
'file_format': 'bam',
'assembly': 'hg19'})
testapp.patch_json(file4['@id'], {'status': 'deleted'})
res = testapp.get(file6['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(error['category'] != 'missing derived_from'
for error in errors_list)
def test_audit_file_deleted_bam_derived_from_released_fastq(testapp, file4, file6):
testapp.patch_json(file6['@id'], {'derived_from': [file4['@id']],
'status': 'deleted',
'file_format': 'bam',
'assembly': 'hg19'})
testapp.patch_json(file4['@id'], {'status': 'released'})
res = testapp.get(file6['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(error['category'] != 'missing derived_from'
for error in errors_list)
def test_audit_file_missing_derived_from_ignores_replaced_bams(testapp, file4, file6):
testapp.patch_json(file6['@id'], {'derived_from': [file4['@id']],
'status': 'replaced',
'file_format': 'bam',
'assembly': 'hg19'})
testapp.patch_json(file4['@id'], {'status': 'deleted'})
res = testapp.get('/{}/@@index-data'.format(file6['uuid']))
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(error['category'] != 'missing derived_from'
for error in errors_list)
def test_audit_file_bam_derived_from_bam_no_fastq(testapp, file7, file6):
testapp.patch_json(file6['@id'], {'derived_from': [file7['@id']],
'status': 'released',
'file_format': 'bam',
'assembly': 'hg19'})
testapp.patch_json(
file7['@id'], {'file_format': 'bam', 'assembly': 'hg19'})
res = testapp.get(file6['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(error['category'] != 'missing derived_from'
for error in errors_list)
def test_audit_file_bam_derived_from_revoked_bam_no_fastq(testapp, file7, file6):
testapp.patch_json(file6['@id'], {'derived_from': [file7['@id']],
'status': 'released',
'file_format': 'bam',
'assembly': 'hg19'})
testapp.patch_json(file7['@id'], {'file_format': 'bam',
'assembly': 'hg19',
'status': 'revoked'})
res = testapp.get(file6['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'missing derived_from'
for error in errors_list)
def test_audit_file_revoked_bam_derived_from_revoked_bam_no_fastq(testapp, file7, file6):
testapp.patch_json(file6['@id'], {'derived_from': [file7['@id']],
'status': 'revoked',
'file_format': 'bam',
'assembly': 'hg19'})
testapp.patch_json(file7['@id'], {'file_format': 'bam',
'assembly': 'hg19',
'status': 'revoked'})
res = testapp.get(file6['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(error['category'] != 'missing derived_from'
for error in errors_list)
def test_audit_file_bam_derived_from_different_experiment(testapp, file6, file4, file_exp):
testapp.patch_json(file4['@id'], {'dataset': file_exp['@id']})
testapp.patch_json(file6['@id'], {'derived_from': [file4['@id']],
'assembly': 'hg19',
'status': 'released'})
res = testapp.get(file6['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent derived_from'
for error in errors_list)
'''
def test_audit_file_md5sum(testapp, file1):
testapp.patch_json(file1['@id'], {'md5sum': 'some_random_text'})
res = testapp.get(file1['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent md5sum'
for error in errors_list)
'''
def test_audit_file_missing_derived_from_audit_with_made_up_status(testapp, file4, file6):
# This tests that a status that's not in STATUS_LEVEL dict won't break missing derived_from
# audit.
testapp.patch_json(file6['@id'], {'derived_from': [file4['@id']],
'status': 'released',
'file_format': 'bam',
'assembly': 'hg19'})
testapp.patch_json(file4['@id'] + '?validate=false',
{'status': 'new made up status'})
res = testapp.get(file6['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(error['category'] != 'missing derived_from'
for error in errors_list)
def test_audit_file_duplicate_quality_metrics(testapp,
file_exp,
file2,
file6,
chipseq_bam_quality_metric,
chipseq_bam_quality_metric_2,
analysis_step_run_bam):
testapp.patch_json(
file_exp['@id'],
{
'assay_term_name': 'ChIP-seq'
}
)
testapp.patch_json(
chipseq_bam_quality_metric['@id'],
{
'quality_metric_of': [file6['@id']],
'processing_stage': 'filtered'
}
)
testapp.patch_json(
chipseq_bam_quality_metric_2['@id'],
{
'quality_metric_of': [file6['@id']],
'processing_stage': 'filtered'
}
)
testapp.patch_json(
file6['@id'],
{
'dataset': file_exp['@id'],
'file_format': 'bam',
'output_type': 'alignments',
'assembly': 'GRCh38',
'derived_from': [file2['@id']],
'step_run': analysis_step_run_bam['@id']
}
)
res = testapp.get(file6['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = [error for v in errors.values() for error in v]
assert any(
error['category'] == 'duplicate quality metric'
for error in errors_list
)
def test_audit_file_no_duplicate_quality_metrics(testapp,
file_exp,
file2,
file6,
chipseq_bam_quality_metric,
chipseq_bam_quality_metric_2,
analysis_step_run_bam):
testapp.patch_json(
file_exp['@id'],
{
'assay_term_name': 'ChIP-seq'
}
)
testapp.patch_json(
chipseq_bam_quality_metric['@id'],
{
'quality_metric_of': [file6['@id']],
'processing_stage': 'filtered'
}
)
testapp.patch_json(
chipseq_bam_quality_metric_2['@id'],
{
'quality_metric_of': [file6['@id']],
'processing_stage': 'unfiltered'
}
)
testapp.patch_json(
file6['@id'],
{
'dataset': file_exp['@id'],
'file_format': 'bam',
'output_type': 'alignments',
'assembly': 'GRCh38',
'derived_from': [file2['@id']],
'step_run': analysis_step_run_bam['@id']
}
)
res = testapp.get(file6['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = [error for v in errors.values() for error in v]
assert all(
error['category'] != 'duplicate quality metric'
for error in errors_list
)
def test_audit_public_file_in_private_bucket(testapp, dummy_request, file_with_external_sheet):
testapp.patch_json(
file_with_external_sheet['@id'],
{
'status': 'released'
}
)
dummy_request.registry.settings['pds_public_bucket'] = 'pds_public_bucket_test'
dummy_request.registry.settings['pds_private_bucket'] = 'pds_private_bucket_test'
res = testapp.get(file_with_external_sheet['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = [error for v in errors.values() for error in v if error['category'] == 'incorrect file bucket']
assert errors_list
assert errors_list[0]['detail'].split('to')[-1].strip() == 's3://pds_public_bucket_test/xyz.bed'
def test_audit_public_file_in_public_bucket(testapp, dummy_request, public_file_with_public_external_sheet):
dummy_request.registry.settings['pds_public_bucket'] = 'pds_public_bucket_test'
dummy_request.registry.settings['pds_private_bucket'] = 'pds_private_bucket_test'
res = testapp.get(public_file_with_public_external_sheet['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = [error for v in errors.values() for error in v]
assert all([
error['category'] != 'incorrect file bucket'
for error in errors_list]
)
def test_audit_private_file_in_public_bucket(testapp, dummy_request, file_with_external_sheet):
testapp.patch_json(
file_with_external_sheet['@id'],
{
'status': 'deleted'
}
)
dummy_request.registry.settings['pds_public_bucket'] = 'pds_public_bucket_test'
dummy_request.registry.settings['pds_private_bucket'] = 'pds_private_bucket_test'
res = testapp.get(file_with_external_sheet['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = [error for v in errors.values() for error in v]
assert errors_list
assert errors_list[0]['detail'].split('to')[-1].strip() == 's3://pds_private_bucket_test/xyz.bed'
def test_audit_file_statuses_in_s3_statuses(testapp):
# Make sure public_s3_statuses and private_s3_statuses lists in File item include
# all statuses in File schema, except upload failed and content error.
from encoded.types.file import File
public_s3_statuses = File.public_s3_statuses
private_s3_statuses = File.private_s3_statuses
assert public_s3_statuses
assert private_s3_statuses
file_schema = testapp.get('/profiles/file.json').json
file_statuses = file_schema.get('properties', {}).get('status', {}).get('enum')
assert file_statuses
file_statuses = [f for f in file_statuses if f not in ['content error', 'upload failed', 'uploading']]
# If this fails sync public/private_s3_statuses with statuses in file schema.
assert not set(file_statuses) - set(public_s3_statuses + private_s3_statuses)
def test_audit_incorrect_bucket_file_no_external_sheet(testapp, dummy_request, file_with_no_external_sheet):
testapp.patch_json(
file_with_no_external_sheet['@id'],
{
'status': 'released'
}
)
dummy_request.registry.settings['pds_public_bucket'] = 'pds_public_bucket_test'
dummy_request.registry.settings['pds_private_bucket'] = 'pds_private_bucket_test'
res = testapp.get(file_with_no_external_sheet['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = [error for v in errors.values() for error in v if error['category'] == 'incorrect file bucket']
assert not errors_list
def test_audit_uploading_file_no_incorrect_bucket_audit(testapp, dummy_request, file_with_external_sheet):
testapp.patch_json(
file_with_external_sheet['@id'],
{
'status': 'uploading'
}
)
dummy_request.registry.settings['pds_public_bucket'] = 'pds_public_bucket_test'
dummy_request.registry.settings['pds_private_bucket'] = 'pds_private_bucket_test'
res = testapp.get(file_with_external_sheet['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = [error for v in errors.values() for error in v if error['category'] == 'incorrect file bucket']
assert not errors_list
def test_audit_incorrect_file_bucket_no_audit_restricted_file(testapp, dummy_request, file_with_external_sheet):
testapp.patch_json(
file_with_external_sheet['@id'],
{
'status': 'released',
'restricted': True
}
)
dummy_request.registry.settings['pds_public_bucket'] = 'pds_public_bucket_test'
dummy_request.registry.settings['pds_private_bucket'] = 'pds_private_bucket_test'
res = testapp.get(file_with_external_sheet['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = [error for v in errors.values() for error in v if error['category'] == 'incorrect file bucket']
assert not errors_list
def test_audit_incorrect_file_bucket_no_audit_no_file_available_true(testapp, dummy_request, file_with_external_sheet):
testapp.patch_json(
file_with_external_sheet['@id'],
{
'status': 'released',
'no_file_available': True
}
)
dummy_request.registry.settings['pds_public_bucket'] = 'pds_public_bucket_test'
dummy_request.registry.settings['pds_private_bucket'] = 'pds_private_bucket_test'
res = testapp.get(file_with_external_sheet['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = [error for v in errors.values() for error in v if error['category'] == 'incorrect file bucket']
assert not errors_list
def test_audit_incorrect_file_bucket_has_audit_no_file_available_false(testapp, dummy_request, file_with_external_sheet):
testapp.patch_json(
file_with_external_sheet['@id'],
{
'status': 'released',
'no_file_available': False
}
)
dummy_request.registry.settings['pds_public_bucket'] = 'pds_public_bucket_test'
dummy_request.registry.settings['pds_private_bucket'] = 'pds_private_bucket_test'
res = testapp.get(file_with_external_sheet['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = [error for v in errors.values() for error in v if error['category'] == 'incorrect file bucket']
assert errors_list
def test_audit_read_structure(testapp, file1_2):
testapp.patch_json(
file1_2['@id'],
{
'read_structure': [{
'sequence_element': 'adapter',
'start': -5,
'end': -1
}]
}
)
res = testapp.get(file1_2['@id'] + '@@index-data')
errors = [error for v in res.json['audit'].values() for error in v]
assert all(
error['category'] != 'invalid read_structure'
for error in errors
)
testapp.patch_json(
file1_2['@id'],
{
'read_structure': [{
'sequence_element': 'adapter',
'start': 0,
'end': 5
}]
}
)
res = testapp.get(file1_2['@id'] + '@@index-data')
errors = [error for v in res.json['audit'].values() for error in v]
assert any(
error['category'] == 'invalid read_structure'
and error['detail'].startswith('The read_stucture is 1-based.')
for error in errors
)
testapp.patch_json(
file1_2['@id'],
{
'read_structure': [{
'sequence_element': 'adapter',
'start': 1,
'end': -1
}]
}
)
res = testapp.get(file1_2['@id'] + '@@index-data')
errors = [error for v in res.json['audit'].values() for error in v]
assert any(
error['category'] == 'invalid read_structure'
and error['detail'].startswith(
'The start coordinate is bigger than the end coordinate'
)
for error in errors
)
def test_audit_matching_md5sum(testapp, file7, file6):
testapp.patch_json(
file7['@id'],
{
'matching_md5sum': [file6['@id']]
}
)
res = testapp.get(file7['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent matching_md5sum'
for error in errors_list)
testapp.patch_json(
file6['@id'],
{
'lab': '/labs/encode-processing-pipeline/',
'md5sum': '91be74b6e11515394507f4ebfa66d78a',
}
)
res = testapp.get(file7['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'matching md5 sums'
for error in errors_list)
def test_audit_self_matching_md5sum(testapp, file7):
testapp.patch_json(
file7['@id'],
{
'matching_md5sum': [file7['@id']]
}
)
res = testapp.get(file7['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent matching_md5sum'
for error in errors_list)
def test_audit_correct_index(testapp, fastq_index,
single_fastq_indexed,
pacbio_fastq_indexed,
second_fastq_indexed,
correct_paired_fastq_indexed):
# One SE fastq is allowed (must belong to same dataset)
res = testapp.get(fastq_index['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert 'inconsistent index file' not in (error['category']
for error in errors_list)
# One PacBio or Oxford Nanopore fastq is allowed
testapp.patch_json(
fastq_index['@id'],
{
'index_of': [
pacbio_fastq_indexed['@id']]
}
)
res = testapp.get(fastq_index['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert 'inconsistent index file' not in (error['category']
for error in errors_list)
# 2 PE fastq paired_with each other are allowed
testapp.patch_json(
fastq_index['@id'],
{
'index_of': [
second_fastq_indexed['@id'],
correct_paired_fastq_indexed['@id']]
}
)
res = testapp.get(fastq_index['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert 'inconsistent index file' not in (error['category']
for error in errors_list)
def test_audit_incorrect_index(testapp,
fastq_index,
single_fastq_indexed,
second_fastq_indexed,
incorrect_paired_fastq_indexed,
pacbio_fastq_indexed,
oxford_nanopore_fastq_indexed,
bam_file, ATAC_experiment):
# One SE and one PE fastq together is disallowed
testapp.patch_json(
fastq_index['@id'],
{
'index_of': [
single_fastq_indexed['@id'],
second_fastq_indexed['@id']]
}
)
res = testapp.get(fastq_index['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent index file'
and 'both single- and paired-end fastq files' in error['detail']
for error in errors_list)
# Two PE fastq that aren't paired with each other is disallowed
testapp.patch_json(
fastq_index['@id'],
{
'index_of': [
incorrect_paired_fastq_indexed['@id'],
second_fastq_indexed['@id']]
}
)
res = testapp.get(fastq_index['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent index file'
and 'fastq that are not paired with each other' in error['detail']
for error in errors_list)
# A PE fastq without its mate is disallowed
testapp.patch_json(
fastq_index['@id'],
{
'index_of': [
incorrect_paired_fastq_indexed['@id']]
}
)
res = testapp.get(fastq_index['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent index file'
and 'index_of only one paired-end fastq file' in error['detail']
for error in errors_list)
# Any non-Illumina fastq with a Illumina fastq is disallowed
testapp.patch_json(
fastq_index['@id'],
{
'index_of': [
pacbio_fastq_indexed['@id'],
single_fastq_indexed['@id']]
}
)
res = testapp.get(fastq_index['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent index file'
and 'fastq files sequenced using different platforms' in error['detail']
and 'multiple non-Illumina fastq' not in error['detail']
for error in errors_list)
# Any non-fastq file is disallowed
testapp.patch_json(
fastq_index['@id'],
{
'index_of': [
bam_file['@id']]
}
)
res = testapp.get(fastq_index['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent index file'
and 'incorrectly specified for non-fastq file(s)' in error['detail']
for error in errors_list)
# Indexed files from other experiments are disallowed
testapp.patch_json(
single_fastq_indexed['@id'],
{
'dataset': ATAC_experiment['@id']
}
)
testapp.patch_json(
fastq_index['@id'],
{
'index_of': [
single_fastq_indexed['@id']]
}
)
res = testapp.get(fastq_index['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent index file'
and 'is from experiment' in error['detail']
for error in errors_list)
# Two non-Illumina fastq are disallowed (PacBio, Oxford Nanopore)
testapp.patch_json(
fastq_index['@id'],
{
'index_of': [
pacbio_fastq_indexed['@id'],
oxford_nanopore_fastq_indexed['@id']]
}
)
res = testapp.get(fastq_index['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent index file'
and 'multiple non-Illumina fastq' in error['detail']
for error in errors_list)
def test_audit_index_reads_read_structure(testapp, fastq_index):
res = testapp.get(fastq_index['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = [error for v in errors.values() for error in v if error['category'] == 'missing read structure']
assert errors_list
testapp.patch_json(
fastq_index['@id'],
{
'read_structure': [{
'sequence_element': 'cell barcode',
'start': 1,
'end': 20
}]
}
)
res = testapp.get(fastq_index['@id'] + '@@index-data')
errors = [error for v in res.json['audit'].values() for error in v]
assert not any(
error['category'] == 'missing read structure' for error in errors
)
|
abd4d182e1819cc95acf8581af0d504a5fce607c
|
965efc4d7a83c2b5592417aa7e0d25a51f5a8108
|
/backend/metering_billing/migrations/0099_remove_historicalsubscriptionrecord_flat_fee_already_billed_and_more.py
|
3b2fe959860d72ffd18f663d9d3fc964ee2daa45
|
[
"MIT"
] |
permissive
|
uselotus/lotus
|
f4ee23bb828605215f18aacd1d6fcff8e0986c53
|
c065fb33ee1a870d72bbd2adfddc08d50ca049b6
|
refs/heads/main
| 2023-08-17T03:38:35.770580
| 2023-07-26T18:50:17
| 2023-07-26T18:50:17
| 516,192,901
| 1,447
| 100
|
MIT
| 2023-06-25T22:53:06
| 2022-07-21T02:06:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,680
|
py
|
0099_remove_historicalsubscriptionrecord_flat_fee_already_billed_and_more.py
|
# Generated by Django 4.0.5 on 2022-12-05 00:29
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
(
"metering_billing",
"0098_rename_plan_version_historicalsubscriptionrecord_billing_plan_and_more",
),
]
operations = [
migrations.RemoveField(
model_name="historicalsubscriptionrecord",
name="flat_fee_already_billed",
),
migrations.RemoveField(
model_name="historicalsubscriptionrecord",
name="prorated_flat_costs_dict",
),
migrations.RemoveField(
model_name="invoicelineitem",
name="associated_plan_version",
),
migrations.RemoveField(
model_name="subscriptionrecord",
name="flat_fee_already_billed",
),
migrations.RemoveField(
model_name="subscriptionrecord",
name="prorated_flat_costs_dict",
),
migrations.AddField(
model_name="historicalsubscriptionrecord",
name="flat_fee_behavior",
field=models.CharField(
choices=[
("refund", "Refund"),
("prorate", "Prorate"),
("charge_full", "Charge Full"),
],
default="prorate",
max_length=20,
),
),
migrations.AddField(
model_name="historicalsubscriptionrecord",
name="invoice_usage_charges",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="invoicelineitem",
name="associated_subscription_record",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="line_items",
to="metering_billing.subscription",
),
),
migrations.AddField(
model_name="invoicelineitem",
name="chargeable_item_type",
field=models.CharField(
blank=True,
choices=[
("usage_charge", "Usage Charge"),
("recurring_charge", "Recurring Charge"),
("one_time_charge", "One Time Charge"),
("plan_adjustment", "Plan Adjustment"),
("customer_adjustment", "Customer Adjustment"),
],
max_length=40,
null=True,
),
),
migrations.AddField(
model_name="subscriptionrecord",
name="flat_fee_behavior",
field=models.CharField(
choices=[
("refund", "Refund"),
("prorate", "Prorate"),
("charge_full", "Charge Full"),
],
default="prorate",
max_length=20,
),
),
migrations.AddField(
model_name="subscriptionrecord",
name="invoice_usage_charges",
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name="historicalinvoice",
name="subscription",
field=models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="metering_billing.subscription",
),
),
migrations.AlterField(
model_name="invoice",
name="subscription",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="invoices",
to="metering_billing.subscription",
),
),
migrations.AlterField(
model_name="invoicelineitem",
name="billing_type",
field=models.CharField(
blank=True,
choices=[("in_arrears", "In Arrears"), ("in_advance", "In Advance")],
max_length=40,
null=True,
),
),
migrations.AlterField(
model_name="invoicelineitem",
name="invoice",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="line_items",
to="metering_billing.invoice",
),
),
]
|
f7e142bac8ccf9296c23720b3ecd995b95e7d766
|
5b6ba0f288b1e2ac236af846a9bf546a63228476
|
/mmtbx/refinement/real_space/weight.py
|
ac2bfd3f8ca5020afd95b3b3edc68d8b70d76ccb
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
cctbx/cctbx_project
|
5b547b416cadbdf95cca21dace9f54272a08d98a
|
7f4dfb6c873fd560920f697cbfd8a5ff6eed82fa
|
refs/heads/master
| 2023-08-17T17:44:05.077010
| 2023-08-16T22:40:22
| 2023-08-16T22:40:22
| 39,508,026
| 206
| 131
|
NOASSERTION
| 2023-09-14T17:12:55
| 2015-07-22T13:36:27
|
Python
|
UTF-8
|
Python
| false
| false
| 4,093
|
py
|
weight.py
|
from __future__ import absolute_import, division, print_function
from cctbx.array_family import flex
import mmtbx.refinement.real_space.individual_sites
import random
from six.moves import range
class run(object):
def __init__(
self,
map_data,
xray_structure,
pdb_hierarchy,
geometry_restraints_manager,
gradients_method="fd",
ncs_groups=None,
rms_bonds_limit=0.015,
rms_angles_limit=2.0,
real_space_gradients_delta=1./4,
max_iterations = 100,
range_size=10,
n_ranges=10,
default_weight=50):
"""
Fast determination of optimal data/restraints weight for real-space refinement
of individual sites.
"""
self.msg_strings = []
# split chains into chunks
result = []
for model in pdb_hierarchy.models():
for chain in model.chains():
if(chain.is_protein() or chain.is_na()):
residue_range_sel = flex.size_t()
cntr = 0
for rg in chain.residue_groups():
i_seqs = rg.atoms().extract_i_seq()
cntr += 1
if(cntr<10):
residue_range_sel.extend(i_seqs)
else:
result.append(residue_range_sel)
residue_range_sel = flex.size_t()
residue_range_sel.extend(i_seqs)
cntr = 0
if(len(result)==0):
assert residue_range_sel.size()>0
result.append(residue_range_sel)
self.msg_strings.append("number of chunks: %d"%len(result))
# randomly pick chunks
random_chunks = []
if(len(result)>0):
if len(result) <= n_ranges:
# just try them all, no need to randomize
random_chunks = list(range(len(result)))
else:
while len(random_chunks) <= n_ranges:
# Put only unique choices until got enough lenght.
# Could be slightly slow when len(random_chunks) slightly > n_ranges
rc = random.choice(range(len(result)))
if rc not in random_chunks:
random_chunks.append(rc)
self.msg_strings.append("random chunks:"%random_chunks)
# setup refinery
xrs_dc = xray_structure.deep_copy_scatterers()
sel_all = flex.bool(xrs_dc.scatterers().size(), True)
grm_dc = geometry_restraints_manager.select(sel_all)
ro = mmtbx.refinement.real_space.individual_sites.box_refinement_manager(
xray_structure = xrs_dc,
target_map = map_data,
geometry_restraints_manager = grm_dc.geometry,
real_space_gradients_delta = real_space_gradients_delta,
max_iterations = max_iterations,
ncs_groups = ncs_groups,
gradients_method = gradients_method)
optimal_weights = flex.double()
# loop over chunks: determine best weight for each chunk
if(len(result)==0):
random_chunks = [None]
for chunk in random_chunks:
if(chunk is None): sel = flex.bool(xrs_dc.scatterers().size(), True)
else:
sel = result[chunk]
sel = flex.bool(xrs_dc.scatterers().size(), sel)
ro.refine(
selection = sel,
rms_bonds_limit = rms_bonds_limit,
rms_angles_limit = rms_angles_limit)
self.msg_strings.append("chunk %s optimal weight: %9.4f"%(
str(chunk), ro.weight_optimal))
if(ro.weight_optimal is not None):
optimal_weights.append(ro.weight_optimal)
# select overall best weight
sel = flex.sort_permutation(optimal_weights)
optimal_weights = optimal_weights.select(sel)
self.weight = flex.mean_default(
optimal_weights[:optimal_weights.size()//2], default_weight)
#mean = flex.mean(optimal_weights)
#sel = optimal_weights < mean*3
#sel &= optimal_weights > mean/3
#if(sel.count(True)>0):
# optimal_weights = optimal_weights.select(sel)
#self.weight = flex.mean_default(optimal_weights, default_weight)
self.msg_strings.append("overall best weight: %9.4f"%self.weight)
def show(self, log, prefix=""):
for m in self.msg_strings:
print("%s %s"%(prefix, m), file=log)
|
dcee52d512abcdf976b383a95e3876aa14bbfbd5
|
ab85c30d0dbcf7e369fe6116100501f4b30e1f43
|
/lagom/experimental/definitions.py
|
0ef8f2ba392788ee7a9fd2d7a6fc7c4343784a49
|
[
"MIT"
] |
permissive
|
meadsteve/lagom
|
3bac4a87dc68f17abb0c1f867f6d93f9e9a56804
|
e8929861f18a0d295cdcc6385a898319c7e2f702
|
refs/heads/master
| 2023-07-22T08:05:27.086831
| 2023-07-17T09:15:57
| 2023-07-17T09:15:57
| 189,738,708
| 203
| 11
|
MIT
| 2023-07-17T09:15:59
| 2019-06-01T13:54:42
|
Python
|
UTF-8
|
Python
| false
| false
| 644
|
py
|
definitions.py
|
"""
Similar to the main definitions module but these definitions do not
yet have a stable interface.
"""
from ..definitions import X, ConstructionWithContainer
from ..interfaces import SpecialDepDefinition, ReadableContainer
class PlainFunction(SpecialDepDefinition[X]):
"""Preserves a function without any dep injection performed on it"""
callable_func: X
def __init__(self, callable_func: X):
""""""
self.callable_func = callable_func
def get_instance(self, _container: ReadableContainer) -> X:
return self.callable_func
class AsyncConstructionWithContainer(ConstructionWithContainer):
pass
|
63eb90ee19440927951bcfc43c958d4f4c25fb4f
|
52b89a544a47d514cd1e15196ee6cf01c1148072
|
/aws_lambda_builders/workflows/java_maven/maven_resolver.py
|
3a0a1edf0bb7fc35d7a4963c6c5ca54c6b6c99bf
|
[
"Apache-2.0"
] |
permissive
|
aws/aws-lambda-builders
|
1c2d8c32d464a2326fa9685c514a915ff8c2f427
|
c90c9bc8953aa684a23a3b438c7a71cc41cce809
|
refs/heads/develop
| 2023-08-31T02:19:45.135785
| 2023-08-30T17:12:27
| 2023-08-30T17:12:27
| 156,287,142
| 144
| 65
|
Apache-2.0
| 2023-09-11T19:05:13
| 2018-11-05T21:42:39
|
Python
|
UTF-8
|
Python
| false
| false
| 632
|
py
|
maven_resolver.py
|
"""
Maven executable resolution
"""
from aws_lambda_builders.workflows.java.utils import OSUtils
class MavenResolver(object):
def __init__(self, executable_search_paths=None, os_utils=None):
self.binary = "mvn"
self.executables = [self.binary]
self.executable_search_paths = executable_search_paths
self.os_utils = os_utils if os_utils else OSUtils()
@property
def exec_paths(self):
paths = self.os_utils.which("mvn", executable_search_paths=self.executable_search_paths)
if not paths:
raise ValueError("No Maven executable found!")
return paths
|
26ffd37e07d3e053e4bfc852ec6167d951e60650
|
20283acdba47ebcdc339dee36a4aba07064add73
|
/web/ckpts/dumper/checkpoint_dumper.py
|
0152ac75cd2d38076bf9fac193ca12b16fc1bad8
|
[
"MIT"
] |
permissive
|
chrisdonahue/wavegan
|
95bc928f28c32f5cbdf63e2662335f12a1394bf0
|
40e85e8adf6207ee194e976f844e257970c6c5dc
|
refs/heads/master
| 2022-12-02T11:44:32.063120
| 2022-11-27T22:29:32
| 2022-11-27T22:29:32
| 120,957,530
| 1,278
| 289
|
MIT
| 2019-03-05T05:42:19
| 2018-02-09T21:27:42
|
Python
|
UTF-8
|
Python
| false
| false
| 3,897
|
py
|
checkpoint_dumper.py
|
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This script defines CheckpointDumper class.
This class serves as a base class for other deeplearning checkpoint dumper
classes and defines common methods, attributes etc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import re
import string
class CheckpointDumper(object):
"""Base Checkpoint Dumper class.
Attributes
----------
checkpoint_file : str
Path to the model checkpoint
FILENAME_CHARS : str
Allowed file char names
manifest : dict
Manifest file defining variables
output_dir : str
Output directory path
remove_variables_regex : str
Regex expression for variables to be ignored
remove_variables_regex_re : sre.SRE_Pattern
Compiled `remove variable` regex
"""
FILENAME_CHARS = string.ascii_letters + string.digits + '_'
def __init__(self, checkpoint_file, output_dir, remove_variables_regex):
"""Constructs object for Checkpoint Dumper.
Parameters
----------
checkpoint_file : str
Path to the model checkpoint
output_dir : str
Output directory path
remove_variables_regex : str
Regex expression for variables to be ignored
"""
self.checkpoint_file = os.path.expanduser(checkpoint_file)
self.output_dir = os.path.expanduser(output_dir)
self.remove_variables_regex = remove_variables_regex
self.manifest = {}
self.remove_variables_regex_re = re.compile(self.remove_variables_regex)
self.make_dir(self.output_dir)
@staticmethod
def make_dir(directory):
"""Makes directory if not existing.
Parameters
----------
directory : str
Path to directory
"""
if not os.path.exists(directory):
os.makedirs(directory)
def should_ignore(self, name):
"""Checks whether name should be ignored or not.
Parameters
----------
name : str
Name to be checked
Returns
-------
bool
Whether to ignore the name or not
"""
return self.remove_variables_regex and re.match(self.remove_variables_regex_re, name)
def dump_weights(self, variable_name, filename, shape, weights):
"""Creates a file with given name and dumps byte weights in it.
Parameters
----------
variable_name : str
Name of given variable
filename : str
File name for given variable
shape : list
Shape of given variable
weights : ndarray
Weights for given variable
"""
self.manifest[variable_name] = {'filename': filename, 'shape': shape}
print('Writing variable ' + variable_name + '...')
with open(os.path.join(self.output_dir, filename), 'wb') as f:
f.write(weights.tobytes())
def dump_manifest(self, filename='manifest.json'):
"""Creates a manifest file with given name and dumps meta information
related to model.
Parameters
----------
filename : str, optional
Manifest file name
"""
manifest_fpath = os.path.join(self.output_dir, filename)
print('Writing manifest to ' + manifest_fpath)
with open(manifest_fpath, 'w') as f:
f.write(json.dumps(self.manifest, indent=2, sort_keys=True))
|
7ed4d4bbca4037e520a188d297a31a0970f3e516
|
554718851656376ad2bceb282de30459167ffeb2
|
/smdebug/core/reader.py
|
37b83cd3e4edec88680c058fa8638611d20e45ce
|
[
"Apache-2.0"
] |
permissive
|
awslabs/sagemaker-debugger
|
d6ae6a6177a6cb457972772e2b3021e8a9dcc621
|
37ecf0aaeb24ab2adbe7f0ad664d0e50fa4154f2
|
refs/heads/master
| 2023-09-05T05:20:02.458427
| 2023-04-20T20:48:11
| 2023-04-20T20:48:11
| 222,554,670
| 162
| 89
|
Apache-2.0
| 2023-08-23T14:31:27
| 2019-11-18T22:12:36
|
Python
|
UTF-8
|
Python
| false
| false
| 3,531
|
py
|
reader.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""APIs for logging data in the event file."""
# First Party
from smdebug.core.tfevent.event_file_reader import EventFileReader, get_tensor_data
from smdebug.exceptions import SMDebugNotImplementedError
# Local
from .utils import match_inc
class FileReader:
def __init__(self, fname, wtype="tfevent"):
"""Creates a `FileWriter` and an file.
On construction the summary writer creates a new event file in `logdir`.
Parameters
----------
logdir : str
Directory where event file will be written.
max_queue : int
Size of the queue for pending events and summaries.
flush_secs: Number
How often, in seconds, to flush the pending events and summaries to disk.
filename_suffix : str
Every event file's name is suffixed with `filename_suffix` if provided.
"""
if wtype == "tfevent":
self._reader = EventFileReader(fname=fname)
else:
raise SMDebugNotImplementedError("Only tfevent format is supported for now.")
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Make usable with "with" statement."""
self._reader.__exit__(exc_type, exc_value, traceback)
def read_tensors(self, check="minimal"):
if check is True:
check = "minimal"
return self._reader.read_tensors(check=check)
def read_events(self, check="minimal", regex_list=None):
"""
Args:
check: default value = 'minimal'
regex_list: default value = None
When check is 'minimal' the crc checksum of the read payload is compared with CHECKSUM_MAGIC_BYTES.
Returns: List of scalar events. Each scalar event is a dictionary containing following keys:
scalar_event{
"timestamp"
"step"
"name"
"value"
}
"""
if check.lower() == "minimal":
check = "minimal"
tf_events = self._reader.read_events(check=check)
scalar_events = list()
for tf_event in tf_events:
for v in tf_event.summary.value:
event_name = v.tag
if regex_list is None or match_inc(event_name, regex_list):
scalar_events.append(
{
"timestamp": tf_event.wall_time,
"step": tf_event.step,
"name": event_name,
"value": get_tensor_data(v.tensor),
}
)
return scalar_events
|
2263f1200fc2f1d3b9e545d5cfc7f55057ef5c1a
|
fb05fb9f9f7fe7eb91072ad62c10200cae10acc6
|
/src/borg/helpers/process.py
|
ff828f30a9c2a8e91bfc3c5a517c367f826a5c27
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
borgbackup/borg
|
c83f2a34e8bcc19859e9696a9425cbf4e23a743c
|
4ded3620c5e9cd930d2e07e912af6c894abe6d5d
|
refs/heads/master
| 2023-09-03T20:36:44.300124
| 2023-09-03T18:49:50
| 2023-09-03T18:49:50
| 35,517,126
| 10,379
| 1,053
|
NOASSERTION
| 2023-09-14T21:52:33
| 2015-05-12T23:10:47
|
Python
|
UTF-8
|
Python
| false
| false
| 14,327
|
py
|
process.py
|
import contextlib
import os
import os.path
import shlex
import signal
import subprocess
import sys
import time
import traceback
from .. import __version__
from ..platformflags import is_win32
from ..logger import create_logger
logger = create_logger()
from ..helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_SIGNAL_BASE, Error
@contextlib.contextmanager
def _daemonize():
from ..platform import get_process_id
old_id = get_process_id()
pid = os.fork()
if pid:
exit_code = EXIT_SUCCESS
try:
yield old_id, None
except _ExitCodeException as e:
exit_code = e.exit_code
finally:
logger.debug("Daemonizing: Foreground process (%s, %s, %s) is now dying." % old_id)
os._exit(exit_code)
os.setsid()
pid = os.fork()
if pid:
os._exit(0)
os.chdir("/")
os.close(0)
os.close(1)
fd = os.open(os.devnull, os.O_RDWR)
os.dup2(fd, 0)
os.dup2(fd, 1)
new_id = get_process_id()
try:
yield old_id, new_id
finally:
# Close / redirect stderr to /dev/null only now
# for the case that we want to log something before yield returns.
os.close(2)
os.dup2(fd, 2)
def daemonize():
"""Detach process from controlling terminal and run in background
Returns: old and new get_process_id tuples
"""
with _daemonize() as (old_id, new_id):
return old_id, new_id
@contextlib.contextmanager
def daemonizing(*, timeout=5):
"""Like daemonize(), but as context manager.
The with-body is executed in the background process,
while the foreground process survives until the body is left
or the given timeout is exceeded. In the latter case a warning is
reported by the foreground.
Context variable is (old_id, new_id) get_process_id tuples.
An exception raised in the body is reported by the foreground
as a warning as well as propagated outside the body in the background.
In case of a warning, the foreground exits with exit code EXIT_WARNING
instead of EXIT_SUCCESS.
"""
with _daemonize() as (old_id, new_id):
if new_id is None:
# The original / parent process, waiting for a signal to die.
logger.debug("Daemonizing: Foreground process (%s, %s, %s) is waiting for background process..." % old_id)
exit_code = EXIT_SUCCESS
# Indeed, SIGHUP and SIGTERM handlers should have been set on archiver.run(). Just in case...
with signal_handler("SIGINT", raising_signal_handler(KeyboardInterrupt)), signal_handler(
"SIGHUP", raising_signal_handler(SigHup)
), signal_handler("SIGTERM", raising_signal_handler(SigTerm)):
try:
if timeout > 0:
time.sleep(timeout)
except SigTerm:
# Normal termination; expected from grandchild, see 'os.kill()' below
pass
except SigHup:
# Background wants to indicate a problem; see 'os.kill()' below,
# log message will come from grandchild.
exit_code = EXIT_WARNING
except KeyboardInterrupt:
# Manual termination.
logger.debug("Daemonizing: Foreground process (%s, %s, %s) received SIGINT." % old_id)
exit_code = EXIT_SIGNAL_BASE + 2
except BaseException as e:
# Just in case...
logger.warning(
"Daemonizing: Foreground process received an exception while waiting:\n"
+ "".join(traceback.format_exception(e.__class__, e, e.__traceback__))
)
exit_code = EXIT_WARNING
else:
logger.warning("Daemonizing: Background process did not respond (timeout). Is it alive?")
exit_code = EXIT_WARNING
finally:
# Don't call with-body, but die immediately!
# return would be sufficient, but we want to pass the exit code.
raise _ExitCodeException(exit_code)
# The background / grandchild process.
sig_to_foreground = signal.SIGTERM
logger.debug("Daemonizing: Background process (%s, %s, %s) is starting..." % new_id)
try:
yield old_id, new_id
except BaseException as e:
sig_to_foreground = signal.SIGHUP
logger.warning(
"Daemonizing: Background process raised an exception while starting:\n"
+ "".join(traceback.format_exception(e.__class__, e, e.__traceback__))
)
raise e
else:
logger.debug("Daemonizing: Background process (%s, %s, %s) has started." % new_id)
finally:
try:
os.kill(old_id[1], sig_to_foreground)
except BaseException as e:
logger.error(
"Daemonizing: Trying to kill the foreground process raised an exception:\n"
+ "".join(traceback.format_exception(e.__class__, e, e.__traceback__))
)
class _ExitCodeException(BaseException):
def __init__(self, exit_code):
self.exit_code = exit_code
class SignalException(BaseException):
"""base class for all signal-based exceptions"""
class SigHup(SignalException):
"""raised on SIGHUP signal"""
class SigTerm(SignalException):
"""raised on SIGTERM signal"""
@contextlib.contextmanager
def signal_handler(sig, handler):
"""
when entering context, set up signal handler <handler> for signal <sig>.
when leaving context, restore original signal handler.
<sig> can bei either a str when giving a signal.SIGXXX attribute name (it
won't crash if the attribute name does not exist as some names are platform
specific) or a int, when giving a signal number.
<handler> is any handler value as accepted by the signal.signal(sig, handler).
"""
if isinstance(sig, str):
sig = getattr(signal, sig, None)
if sig is not None:
orig_handler = signal.signal(sig, handler)
try:
yield
finally:
if sig is not None:
signal.signal(sig, orig_handler)
def raising_signal_handler(exc_cls):
def handler(sig_no, frame):
# setting SIG_IGN avoids that an incoming second signal of this
# kind would raise a 2nd exception while we still process the
# exception handler for exc_cls for the 1st signal.
signal.signal(sig_no, signal.SIG_IGN)
raise exc_cls
return handler
class SigIntManager:
def __init__(self):
self._sig_int_triggered = False
self._action_triggered = False
self._action_done = False
self.ctx = signal_handler("SIGINT", self.handler)
def __bool__(self):
# this will be True (and stay True) after the first Ctrl-C/SIGINT
return self._sig_int_triggered
def action_triggered(self):
# this is True to indicate that the action shall be done
return self._action_triggered
def action_done(self):
# this will be True after the action has completed
return self._action_done
def action_completed(self):
# this must be called when the action triggered is completed,
# to avoid repeatedly triggering the action.
self._action_triggered = False
self._action_done = True
def handler(self, sig_no, stack):
# handle the first ctrl-c / SIGINT.
self.__exit__(None, None, None)
self._sig_int_triggered = True
self._action_triggered = True
def __enter__(self):
self.ctx.__enter__()
def __exit__(self, exception_type, exception_value, traceback):
# restore the original ctrl-c handler, so the next ctrl-c / SIGINT does the normal thing:
if self.ctx:
self.ctx.__exit__(exception_type, exception_value, traceback)
self.ctx = None
# global flag which might trigger some special behaviour on first ctrl-c / SIGINT,
# e.g. if this is interrupting "borg create", it shall try to create a checkpoint.
sig_int = SigIntManager()
def ignore_sigint():
"""
Ignore SIGINT, see also issue #6912.
Ctrl-C will send a SIGINT to both the main process (borg) and subprocesses
(e.g. ssh for remote ssh:// repos), but often we do not want the subprocess
getting killed (e.g. because it is still needed to shut down borg cleanly).
To avoid that: Popen(..., preexec_fn=ignore_sigint)
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def popen_with_error_handling(cmd_line: str, log_prefix="", **kwargs):
"""
Handle typical errors raised by subprocess.Popen. Return None if an error occurred,
otherwise return the Popen object.
*cmd_line* is split using shlex (e.g. 'gzip -9' => ['gzip', '-9']).
Log messages will be prefixed with *log_prefix*; if set, it should end with a space
(e.g. log_prefix='--some-option: ').
Does not change the exit code.
"""
assert not kwargs.get("shell"), "Sorry pal, shell mode is a no-no"
try:
command = shlex.split(cmd_line)
if not command:
raise ValueError("an empty command line is not permitted")
except ValueError as ve:
logger.error("%s%s", log_prefix, ve)
return
logger.debug("%scommand line: %s", log_prefix, command)
try:
return subprocess.Popen(command, **kwargs)
except FileNotFoundError:
logger.error("%sexecutable not found: %s", log_prefix, command[0])
return
except PermissionError:
logger.error("%spermission denied: %s", log_prefix, command[0])
return
def is_terminal(fd=sys.stdout):
return hasattr(fd, "isatty") and fd.isatty() and (not is_win32 or "ANSICON" in os.environ)
def prepare_subprocess_env(system, env=None):
"""
Prepare the environment for a subprocess we are going to create.
:param system: True for preparing to invoke system-installed binaries,
False for stuff inside the pyinstaller environment (like borg, python).
:param env: optionally give a environment dict here. if not given, default to os.environ.
:return: a modified copy of the environment
"""
env = dict(env if env is not None else os.environ)
if system:
# a pyinstaller binary's bootloader modifies LD_LIBRARY_PATH=/tmp/_MEIXXXXXX,
# but we do not want that system binaries (like ssh or other) pick up
# (non-matching) libraries from there.
# thus we install the original LDLP, before pyinstaller has modified it:
lp_key = "LD_LIBRARY_PATH"
lp_orig = env.get(lp_key + "_ORIG") # pyinstaller >= 20160820 / v3.2.1 has this
if lp_orig is not None:
env[lp_key] = lp_orig
else:
# We get here in 2 cases:
# 1. when not running a pyinstaller-made binary.
# in this case, we must not kill LDLP.
# 2. when running a pyinstaller-made binary and there was no LDLP
# in the original env (in this case, the pyinstaller bootloader
# does *not* put ..._ORIG into the env either).
# in this case, we must kill LDLP.
# We can recognize this via sys.frozen and sys._MEIPASS being set.
lp = env.get(lp_key)
if lp is not None and getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS"):
env.pop(lp_key)
# security: do not give secrets to subprocess
env.pop("BORG_PASSPHRASE", None)
# for information, give borg version to the subprocess
env["BORG_VERSION"] = __version__
return env
@contextlib.contextmanager
def create_filter_process(cmd, stream, stream_close, inbound=True):
if cmd:
# put a filter process between stream and us (e.g. a [de]compression command)
# inbound: <stream> --> filter --> us
# outbound: us --> filter --> <stream>
filter_stream = stream
filter_stream_close = stream_close
env = prepare_subprocess_env(system=True)
# There is no deadlock potential here (the subprocess docs warn about this), because
# communication with the process is a one-way road, i.e. the process can never block
# for us to do something while we block on the process for something different.
if inbound:
proc = popen_with_error_handling(
cmd,
stdout=subprocess.PIPE,
stdin=filter_stream,
log_prefix="filter-process: ",
env=env,
preexec_fn=None if is_win32 else ignore_sigint,
)
else:
proc = popen_with_error_handling(
cmd,
stdin=subprocess.PIPE,
stdout=filter_stream,
log_prefix="filter-process: ",
env=env,
preexec_fn=None if is_win32 else ignore_sigint,
)
if not proc:
raise Error(f"filter {cmd}: process creation failed")
stream = proc.stdout if inbound else proc.stdin
# inbound: do not close the pipe (this is the task of the filter process [== writer])
# outbound: close the pipe, otherwise the filter process would not notice when we are done.
stream_close = not inbound
try:
yield stream
except Exception:
# something went wrong with processing the stream by borg
logger.debug("Exception, killing the filter...")
if cmd:
proc.kill()
borg_succeeded = False
raise
else:
borg_succeeded = True
finally:
if stream_close:
stream.close()
if cmd:
logger.debug("Done, waiting for filter to die...")
rc = proc.wait()
logger.debug("filter cmd exited with code %d", rc)
if filter_stream_close:
filter_stream.close()
if borg_succeeded and rc:
# if borg did not succeed, we know that we killed the filter process
raise Error("filter %s failed, rc=%d" % (cmd, rc))
|
772b96c3af7276b63a0e6e15844ed6fccfcd1a65
|
2e4e193a74a5e0acee1374adc4085f81c14e7a33
|
/ocotillo/model_loader.py
|
62c170765754f6e6f76d6c452b829721df98e69f
|
[
"Apache-2.0"
] |
permissive
|
neonbjb/ocotillo
|
84045592cf6e7cfeb95387db57366a3da319c808
|
3fa8513436a1a41c82069be31a317ca6da229110
|
refs/heads/main
| 2023-05-22T02:26:34.410019
| 2022-05-19T17:03:28
| 2022-05-19T17:03:28
| 447,307,140
| 194
| 19
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,645
|
py
|
model_loader.py
|
import os
from time import time
import torch
from transformers import Wav2Vec2ForCTC, Wav2Vec2FeatureExtractor, Wav2Vec2CTCTokenizer, Wav2Vec2Processor
def load_model(device, phonetic=False, use_torchscript=False):
"""
Utility function to load the model and corresponding processor to the specified device. Supports loading
torchscript models when they have been pre-built (which is accomplished by running this file.)
"""
model_name = "facebook/wav2vec2-lv-60-espeak-cv-ft" if phonetic else "jbetker/wav2vec2-large-robust-ft-libritts-voxpopuli"
if use_torchscript:
model = trace_torchscript_model(model_name.split('/')[-1].replace('-', '_'), 'cuda' if 'cuda' in device else 'cpu')
model = model.to(device)
else:
model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device)
model.config.return_dict = False
model.eval()
if phonetic:
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
else:
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f"facebook/wav2vec2-large-960h")
tokenizer = Wav2Vec2CTCTokenizer.from_pretrained('jbetker/tacotron-symbols')
processor = Wav2Vec2Processor(feature_extractor, tokenizer)
return model, processor
def trace_torchscript_model(model_name, dev_type='cpu', load_from_cache=True):
output_trace_cache_file = f'torchscript/traced_{model_name}_{dev_type}.pth'
if load_from_cache and os.path.exists(output_trace_cache_file):
return torch.jit.load(output_trace_cache_file)
print("Model hasn't been traced. Doing so now.")
model, extractor = load_model(dev_type, use_torchscript=False)
with torch.autocast(dev_type) and torch.no_grad():
traced_model = torch.jit.trace(model, (torch.randn((1,16000), device=dev_type)))
os.makedirs('torchscript', exist_ok=True)
torch.jit.save(traced_model, output_trace_cache_file)
print("Done tracing.")
return model
def trace_onnx_model(dev_type='cpu'):
model, extractor = load_model(dev_type, use_torchscript=False)
torch.onnx.export(model, torch.randn((2,16000), device=dev_type), 'ocotillo.onnx', export_params=True, opset_version=13,
do_constant_folding=True, input_names=['input'], output_names=['logits'],
dynamic_axes={'input': {0: 'batch_size', 1: 'input_length'}, 'output': {0: 'batch_size', 1: 'sequence_length'}})
def test_onnx_model():
# Test whether the model can be loaded and use the ONNX checker.
import onnx
model = onnx.load("ocotillo.onnx")
onnx.checker.check_model(model)
import onnxruntime
from ocotillo.utils import load_audio
from tqdm import tqdm
onnx_model = onnxruntime.InferenceSession('../ocotillo.onnx')
torch_model, _ = load_model('cpu', use_torchscript=True)
audio = load_audio('data/obama.mp3', 16000).unsqueeze(0)
audio_norm = (audio - audio.mean()) / torch.sqrt(audio.var() + 1e-7)
with torch.no_grad():
start = time()
for k in tqdm(range(100)):
logits = torch_model(audio_norm)[0]
print(f'Elapsed torchscript: {time()-start}')
tokens = torch.argmax(logits, dim=-1)
onnx_inputs = {'input': audio_norm.numpy()}
start = time()
for k in tqdm(range(100)):
onnx_outputs = onnx_model.run(None, onnx_inputs)
print(f'Elapsed ONNX: {time() - start}')
onnx_tokens = torch.argmax(torch.tensor(onnx_outputs[0]), dim=-1)
assert torch.all(onnx_tokens == tokens)
if __name__ == '__main__':
trace_onnx_model()
test_onnx_model()
|
67332c2d2540f0efb07768631538b9ff6cf71cc1
|
cfdd6d24f7139d057d13afe32f1dc1de64b33c3f
|
/src/pyglow/constants.py
|
5da036ed3539b2a48581eb4ec88b740787baf60a
|
[
"MIT"
] |
permissive
|
timduly4/pyglow
|
cec15d7afe7f90dc5f2f019626f63622da4c3de0
|
1988757f3b6a4bd5ed98266a3fb1dc64f2513fc5
|
refs/heads/master
| 2023-05-10T19:02:52.777677
| 2023-05-02T19:07:51
| 2023-05-02T19:07:51
| 12,006,247
| 105
| 60
|
MIT
| 2023-05-02T19:09:55
| 2013-08-09T17:06:55
|
Fortran
|
UTF-8
|
Python
| false
| false
| 139
|
py
|
constants.py
|
import os
# Pyglow version:
VERSION = '2.2'
# Directory of pyglow files:
DIR_FILE = os.path.dirname(__file__)
# NaN:
nan = float('nan')
|
6bbdb3b43f49d9290dbe0608e54617c83dd99e19
|
d41c7f7f0730e5802c3cd5c58213674b4e7ef11e
|
/attach_objects.py
|
a35bae8da70a8309479f42ee0abeb74b97b2c981
|
[] |
no_license
|
Tlousky/blender_scripts
|
98c3bd6014d1c086f89dcb46d47445cf3d665bee
|
d79725302e9f9e14869d5a9a3c5717f4dbfb1a64
|
refs/heads/master
| 2022-11-28T04:51:15.975390
| 2022-11-26T12:34:21
| 2022-11-26T12:34:21
| 8,910,696
| 167
| 42
| null | 2022-11-26T12:34:22
| 2013-03-20T17:53:04
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 972
|
py
|
attach_objects.py
|
# Go over a list of objects, and attatch them to another object via shrinkwrap constraint
import bpy
objs = [ o for o in bpy.context.scene.objects if 'House' in o.name ]
counter = 1
for o in objs:
bpy.ops.object.select_all(action='DESELECT')
o.select = True
bpy.context.scene.objects.active = o
bpy.context.scene.cursor_location = bpy.context.object.location
bpy.ops.object.empty_add()
e = bpy.context.scene.objects[ bpy.context.object.name ]
bpy.ops.object.select_all(action='DESELECT')
o.select = True
e.select = True
bpy.context.scene.objects.active = e
bpy.ops.object.parent_set(keep_transform=True)
bpy.ops.object.select_all(action='DESELECT')
e.select = True
bpy.context.scene.objects.active = e
const = bpy.context.object.constraints.new('SHRINKWRAP')
const.target = bpy.context.scene.objects['Sphere']
print( "done with object %s of %s" % (counter, len(objs) ) )
counter += 1
|
6893647a2c8d8e05e7575b3e4dd3df3bf5859a68
|
057e555023112b7409de8e3e1908670cbce01d5b
|
/frag_gt/frag_gt/tests/src/test_scorers.py
|
cb22137cf896a7ead54cceb391970202c2609727
|
[
"MIT"
] |
permissive
|
BenevolentAI/guacamol_baselines
|
4826ebd3b5d5fe0b0b5dd8e7065094ee25d28e95
|
44d24c53f3acf9266eb2fb06dbff909836549291
|
refs/heads/master
| 2023-08-07T07:13:08.549075
| 2022-02-22T10:54:16
| 2022-02-22T10:54:16
| 157,866,504
| 108
| 39
|
MIT
| 2023-03-24T23:15:14
| 2018-11-16T12:45:46
|
Python
|
UTF-8
|
Python
| false
| false
| 390
|
py
|
test_scorers.py
|
from frag_gt.src.scorers import MolecularWeightScorer
def test_molecular_weight_scorer():
# Given
smiles = 'c1ccccc1'
# When
scoring_function = MolecularWeightScorer()
list_score = scoring_function.score_list([smiles])[0]
single_score = scoring_function.score(smiles)
# Then
assert list_score == single_score
assert single_score == 78.11399999999999
|
c91acd33dadebdda65a3c275bda39b917fff3363
|
92da6d0aab06e2d8ed6ec4b7e81d01c77b875bb8
|
/gui/configuration_tab.py
|
542755e725acbc0fe7676ece20d2362dee88082c
|
[] |
no_license
|
Quitten/Autorize
|
b4629b295da1b20d6b9344471e321c9fefbaec0f
|
65b5cfdf764a92c053994ebc0ce295868fcc6d1e
|
refs/heads/master
| 2023-08-10T09:09:33.216274
| 2023-04-11T17:37:35
| 2023-04-11T17:37:35
| 30,739,183
| 764
| 162
| null | 2023-06-18T20:55:55
| 2015-02-13T03:47:50
|
Python
|
UTF-8
|
Python
| false
| false
| 9,286
|
py
|
configuration_tab.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from javax.swing import DefaultComboBoxModel
from java.awt.event import ActionListener
from javax.swing import SwingUtilities
from javax.swing import JToggleButton
from javax.swing import JScrollPane
from javax.swing import JTabbedPane
from javax.swing import JOptionPane
from javax.swing import JSplitPane
from javax.swing import JComboBox
from javax.swing import JTextArea
from javax.swing import JCheckBox
from javax.swing import JButton
from javax.swing import JPanel
from javax.swing import JLabel
from table import UpdateTableEDT
class ConfigurationTab():
def __init__(self, extender):
self._extender = extender
def draw(self):
""" init configuration tab
"""
self.DEFUALT_REPLACE_TEXT = "Cookie: Insert=injected; cookie=or;\nHeader: here"
self._extender.startButton = JToggleButton("Autorize is off",
actionPerformed=self.startOrStop)
self._extender.startButton.setBounds(10, 20, 230, 30)
self._extender.clearButton = JButton("Clear List", actionPerformed=self.clearList)
self._extender.clearButton.setBounds(10, 80, 100, 30)
self._extender.autoScroll = JCheckBox("Auto Scroll")
self._extender.autoScroll.setBounds(145, 80, 130, 30)
self._extender.ignore304 = JCheckBox("Ignore 304/204 status code responses")
self._extender.ignore304.setBounds(280, 5, 300, 30)
self._extender.ignore304.setSelected(True)
self._extender.prevent304 = JCheckBox("Prevent 304 Not Modified status code")
self._extender.prevent304.setBounds(280, 25, 300, 30)
self._extender.interceptRequestsfromRepeater = JCheckBox("Intercept requests from Repeater")
self._extender.interceptRequestsfromRepeater.setBounds(280, 45, 300, 30)
self._extender.doUnauthorizedRequest = JCheckBox("Check unauthenticated")
self._extender.doUnauthorizedRequest.setBounds(280, 65, 300, 30)
self._extender.doUnauthorizedRequest.setSelected(True)
self._extender.replaceQueryParam = JCheckBox("Replace query params", actionPerformed=self.replaceQueryHanlder)
self._extender.replaceQueryParam.setBounds(280, 85, 300, 30)
self._extender.replaceQueryParam.setSelected(False)
self._extender.saveHeadersButton = JButton("Add",
actionPerformed=self.saveHeaders)
self._extender.saveHeadersButton.setBounds(315, 115, 80, 30)
self._extender.removeHeadersButton = JButton("Remove",
actionPerformed=self.removeHeaders)
self._extender.removeHeadersButton.setBounds(400, 115, 80, 30)
savedHeadersTitles = self.getSavedHeadersTitles()
self._extender.savedHeadersTitlesCombo = JComboBox(savedHeadersTitles)
self._extender.savedHeadersTitlesCombo.addActionListener(SavedHeaderChange(self._extender))
self._extender.savedHeadersTitlesCombo.setBounds(10, 115, 300, 30)
self._extender.replaceString = JTextArea(self.DEFUALT_REPLACE_TEXT, 5, 30)
self._extender.replaceString.setWrapStyleWord(True)
self._extender.replaceString.setLineWrap(True)
scrollReplaceString = JScrollPane(self._extender.replaceString)
scrollReplaceString.setVerticalScrollBarPolicy(JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED)
scrollReplaceString.setBounds(10, 150, 470, 150)
fromLastRequestLabel = JLabel("From last request:")
fromLastRequestLabel.setBounds(10, 305, 250, 30)
self._extender.fetchCookiesHeaderButton = JButton("Fetch Cookies header",
actionPerformed=self.fetchCookiesHeader)
self._extender.fetchCookiesHeaderButton.setEnabled(False)
self._extender.fetchCookiesHeaderButton.setBounds(10, 330, 220, 30)
self._extender.fetchAuthorizationHeaderButton = JButton("Fetch Authorization header",
actionPerformed=self.fetchAuthorizationHeader)
self._extender.fetchAuthorizationHeaderButton.setEnabled(False)
self._extender.fetchAuthorizationHeaderButton.setBounds(260, 330, 220, 30)
self._extender.filtersTabs = JTabbedPane()
self._extender.filtersTabs = self._extender.filtersTabs
self._extender.filtersTabs.addTab("Enforcement Detector", self._extender.EDPnl)
self._extender.filtersTabs.addTab("Detector Unauthenticated", self._extender.EDPnlUnauth)
self._extender.filtersTabs.addTab("Interception Filters", self._extender.filtersPnl)
self._extender.filtersTabs.addTab("Match/Replace", self._extender.MRPnl)
self._extender.filtersTabs.addTab("Table Filter", self._extender.filterPnl)
self._extender.filtersTabs.addTab("Save/Restore", self._extender.exportPnl)
self._extender.filtersTabs.setSelectedIndex(2)
self._extender.filtersTabs.setBounds(0, 350, 2000, 700)
self.config_pnl = JPanel()
self.config_pnl.setBounds(0, 0, 1000, 1000)
self.config_pnl.setLayout(None)
self.config_pnl.add(self._extender.startButton)
self.config_pnl.add(self._extender.clearButton)
self.config_pnl.add(scrollReplaceString)
self.config_pnl.add(fromLastRequestLabel)
self.config_pnl.add(self._extender.saveHeadersButton)
self.config_pnl.add(self._extender.removeHeadersButton)
self.config_pnl.add(self._extender.savedHeadersTitlesCombo)
self.config_pnl.add(self._extender.fetchCookiesHeaderButton)
self.config_pnl.add(self._extender.fetchAuthorizationHeaderButton)
self.config_pnl.add(self._extender.autoScroll)
self.config_pnl.add(self._extender.interceptRequestsfromRepeater)
self.config_pnl.add(self._extender.ignore304)
self.config_pnl.add(self._extender.prevent304)
self.config_pnl.add(self._extender.doUnauthorizedRequest)
self.config_pnl.add(self._extender.replaceQueryParam)
self._extender._cfg_splitpane = JSplitPane(JSplitPane.VERTICAL_SPLIT)
self._extender._cfg_splitpane.setResizeWeight(0.5)
self._extender._cfg_splitpane.setBounds(0, 0, 1000, 1000)
self._extender._cfg_splitpane.setRightComponent(self._extender.filtersTabs)
self._extender._cfg_splitpane.setLeftComponent(self.config_pnl)
def startOrStop(self, event):
if self._extender.startButton.getText() == "Autorize is off":
self._extender.startButton.setText("Autorize is on")
self._extender.startButton.setSelected(True)
self._extender.intercept = 1
else:
self._extender.startButton.setText("Autorize is off")
self._extender.startButton.setSelected(False)
self._extender.intercept = 0
def clearList(self, event):
self._extender._lock.acquire()
oldSize = self._extender._log.size()
self._extender._log.clear()
SwingUtilities.invokeLater(UpdateTableEDT(self._extender,"delete",0, oldSize - 1))
self._extender._lock.release()
def replaceQueryHanlder(self, event):
if self._extender.replaceQueryParam.isSelected():
self._extender.replaceString.setText("paramName=paramValue")
else:
self._extender.replaceString.setText(self.DEFUALT_REPLACE_TEXT)
def saveHeaders(self, event):
savedHeadersTitle = JOptionPane.showInputDialog("Please provide saved headers title:")
self._extender.savedHeaders.append({'title': savedHeadersTitle, 'headers': self._extender.replaceString.getText()})
self._extender.savedHeadersTitlesCombo.setModel(DefaultComboBoxModel(self.getSavedHeadersTitles()))
self._extender.savedHeadersTitlesCombo.getModel().setSelectedItem(savedHeadersTitle)
def removeHeaders(self, event):
model = self._extender.savedHeadersTitlesCombo.getModel()
selectedItem = model.getSelectedItem()
if selectedItem == "Temporary headers":
return
delObject = None
for savedHeaderObj in self._extender.savedHeaders:
if selectedItem == savedHeaderObj['title']:
delObject = savedHeaderObj
self._extender.savedHeaders.remove(delObject)
model.removeElement(selectedItem)
def getSavedHeadersTitles(self):
titles = []
for savedHeaderObj in self._extender.savedHeaders:
titles.append(savedHeaderObj['title'])
return titles
def fetchCookiesHeader(self, event):
if self._extender.lastCookiesHeader:
self._extender.replaceString.setText(self._extender.lastCookiesHeader)
def fetchAuthorizationHeader(self, event):
if self._extender.lastAuthorizationHeader:
self._extender.replaceString.setText(self._extender.lastAuthorizationHeader)
class SavedHeaderChange(ActionListener):
def __init__(self, extender):
self._extender = extender
def actionPerformed(self, e):
selectedTitle = self._extender.savedHeadersTitlesCombo.getSelectedItem()
headers = [x for x in self._extender.savedHeaders if x['title'] == selectedTitle]
self._extender.replaceString.setText(headers[0]['headers'])
|
b97331e873bf59ef1724a6631c0440980f8a7e61
|
6b97499b67fa3de10eb9449f4805100cfca0bbbd
|
/account/conf.py
|
ff711b1f3e83a93d02b2d703ecf5757732149e92
|
[
"MIT"
] |
permissive
|
pinax/django-user-accounts
|
c9b9b74bf14c1f117766b89829055c35cf794345
|
a69832facfb511ee9347fea7bc4303a8729c97a7
|
refs/heads/master
| 2023-08-17T06:51:55.779202
| 2023-02-09T09:22:00
| 2023-02-09T09:22:00
| 3,682,622
| 976
| 334
|
MIT
| 2023-09-14T18:07:35
| 2012-03-10T21:41:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,948
|
py
|
conf.py
|
import importlib
from django.conf import settings # noqa
from django.core.exceptions import ImproperlyConfigured
from account.languages import LANGUAGES
from account.timezones import TIMEZONES
from appconf import AppConf
def load_path_attr(path):
i = path.rfind(".")
module, attr = path[:i], path[i + 1:]
try:
mod = importlib.import_module(module)
except ImportError as e:
raise ImproperlyConfigured("Error importing {0}: '{1}'".format(module, e))
try:
attr = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured("Module '{0}' does not define a '{1}'".format(module, attr))
return attr
class AccountAppConf(AppConf):
OPEN_SIGNUP = True
LOGIN_URL = "account_login"
LOGOUT_URL = "account_logout"
SIGNUP_REDIRECT_URL = "/"
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
PASSWORD_CHANGE_REDIRECT_URL = "account_password"
PASSWORD_RESET_REDIRECT_URL = "account_login"
PASSWORD_RESET_TOKEN_URL = "account_password_reset_token"
PASSWORD_EXPIRY = 0
PASSWORD_USE_HISTORY = False
PASSWORD_STRIP = True
REMEMBER_ME_EXPIRY = 60 * 60 * 24 * 365 * 10
USER_DISPLAY = lambda user: user.username # noqa
CREATE_ON_SAVE = True
EMAIL_UNIQUE = True
EMAIL_CONFIRMATION_REQUIRED = False
EMAIL_CONFIRMATION_EMAIL = True
EMAIL_CONFIRMATION_EXPIRE_DAYS = 3
EMAIL_CONFIRMATION_AUTO_LOGIN = False
EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = "account_login"
EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = None
EMAIL_CONFIRMATION_URL = "account_confirm_email"
SETTINGS_REDIRECT_URL = "account_settings"
NOTIFY_ON_PASSWORD_CHANGE = True
DELETION_EXPUNGE_HOURS = 48
DEFAULT_HTTP_PROTOCOL = "https"
HOOKSET = "account.hooks.AccountDefaultHookSet"
TIMEZONES = TIMEZONES
LANGUAGES = LANGUAGES
def configure_hookset(self, value):
return load_path_attr(value)()
|
91238da51b27bc950b6fa4830b9f8b85b00ef337
|
554718851656376ad2bceb282de30459167ffeb2
|
/smdebug/rules/action/message_action.py
|
36d1005b09a0ede36858483e796aff7b9a2b37b5
|
[
"Apache-2.0"
] |
permissive
|
awslabs/sagemaker-debugger
|
d6ae6a6177a6cb457972772e2b3021e8a9dcc621
|
37ecf0aaeb24ab2adbe7f0ad664d0e50fa4154f2
|
refs/heads/master
| 2023-09-05T05:20:02.458427
| 2023-04-20T20:48:11
| 2023-04-20T20:48:11
| 222,554,670
| 162
| 89
|
Apache-2.0
| 2023-08-23T14:31:27
| 2019-11-18T22:12:36
|
Python
|
UTF-8
|
Python
| false
| false
| 5,703
|
py
|
message_action.py
|
# Standard Library
import json
import os
# Third Party
import boto3
# First Party
from smdebug.core.logger import get_logger
# action :
# {name:'sms' or 'email', 'endpoint':'phone or emailid'}
class MessageAction:
def __init__(self, rule_name, message_type, message_endpoint):
self._topic_name = "SMDebugRules"
self._logger = get_logger()
if message_type == "sms" or message_type == "email":
self._protocol = message_type
else:
self._protocol = None
self._logger.info(
f"Unsupported message type:{message_type} in MessageAction. Returning"
)
return
self._message_endpoint = message_endpoint
# Below 2 is to help in tests
self._last_send_mesg_response = None
self._last_subscription_response = None
env_region_name = os.getenv("AWS_REGION", "us-east-1")
self._sns_client = boto3.client("sns", region_name=env_region_name)
self._topic_arn = self._create_sns_topic_if_not_exists()
self._subscribe_mesgtype_endpoint()
self._logger.info(
f"Registering messageAction with protocol:{self._protocol} endpoint:{self._message_endpoint} and topic_arn:{self._topic_arn} region:{env_region_name}"
)
self._rule_name = rule_name
def _create_sns_topic_if_not_exists(self):
try:
topic = self._sns_client.create_topic(Name=self._topic_name)
self._logger.info(
f"topic_name: {self._topic_name} , creating topic returned response:{topic}"
)
if topic:
return topic["TopicArn"]
except Exception as e:
self._logger.info(
f"Caught exception while creating topic:{self._topic_name} exception is: \n {e}"
)
return None
def _check_subscriptions(self, topic_arn, protocol, endpoint):
try:
next_token = "random"
subs = self._sns_client.list_subscriptions()
sub_array = subs["Subscriptions"]
while next_token is not None:
for sub_dict in sub_array:
proto = sub_dict["Protocol"]
ep = sub_dict["Endpoint"]
topic = sub_dict["TopicArn"]
if proto == protocol and topic == topic_arn and ep == endpoint:
self._logger.info(f"Existing Subscription found: {sub_dict}")
return True
if "NextToken" in subs:
next_token = subs["NextToken"]
subs = self._sns_client.list_subscriptions(NextToken=next_token)
sub_array = subs["Subscriptions"]
continue
else:
next_token = None
except Exception as e:
self._logger.info(
f"Caught exception while list subscription topic:{self._topic_name} exception is: \n {e}"
)
return False
def _subscribe_mesgtype_endpoint(self):
response = None
try:
if self._topic_arn and self._protocol and self._message_endpoint:
filter_policy = {}
if self._protocol == "sms":
filter_policy["phone_num"] = [self._message_endpoint]
else:
filter_policy["email"] = [self._message_endpoint]
if not self._check_subscriptions(
self._topic_arn, self._protocol, self._message_endpoint
):
response = self._sns_client.subscribe(
TopicArn=self._topic_arn,
Protocol=self._protocol, # sms or email
Endpoint=self._message_endpoint, # phone number or email addresss
Attributes={"FilterPolicy": json.dumps(filter_policy)},
ReturnSubscriptionArn=False, # True means always return ARN
)
else:
response = f"Subscription exists for topic:{self._topic_arn}, protocol:{self._protocol}, endpoint:{self._message_endpoint}"
except Exception as e:
self._logger.info(
f"Caught exception while subscribing endpoint on topic:{self._topic_arn} exception is: \n {e}"
)
self._logger.info(f"response for sns subscribe is {response} ")
self._last_subscription_response = response
def _send_message(self, message):
response = None
message = f"SMDebugRule:{self._rule_name} fired. {message}"
try:
if self._protocol == "sms":
msg_attributes = {
"phone_num": {"DataType": "String", "StringValue": self._message_endpoint}
}
else:
msg_attributes = {
"email": {"DataType": "String", "StringValue": self._message_endpoint}
}
response = self._sns_client.publish(
TopicArn=self._topic_arn,
Message=message,
Subject=f"SMDebugRule:{self._rule_name} fired",
# MessageStructure='json',
MessageAttributes=msg_attributes,
)
except Exception as e:
self._logger.info(
f"Caught exception while publishing message on topic:{self._topic_arn} exception is: \n {e}"
)
self._logger.info(f"Response of send message:{response}")
self._last_send_mesg_response = response
return response
def invoke(self, message):
self._send_message(message)
|
1bb6b0129eaa5692a07fdcecda09489b4b337829
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/psi/ErrorInParameterList.py
|
7520dc9c0d8fbaf0556a292a99437aafc7988595
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 60
|
py
|
ErrorInParameterList.py
|
def select2 (self,filds=None, from=''):
print (sql)
|
e7a0b8fc74f7caca0c635573bdac2b3e7dfb4758
|
4d3a077a439df835ce475efe7824ef6d3046b81c
|
/script/vul/mysql/mysql_burst.py
|
f763bf132004c67ddac446c836edc5ce8ec68acf
|
[] |
no_license
|
orleven/Tentacle
|
f00bc62278e462a3be4bfc4378f34c95d5419617
|
0b364caa7272030e03b876caf71bc9026e3ba57a
|
refs/heads/master
| 2023-09-01T11:07:33.020640
| 2023-08-22T03:37:35
| 2023-08-22T03:37:35
| 85,373,049
| 383
| 129
| null | 2023-08-21T09:26:03
| 2017-03-18T03:27:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,380
|
py
|
mysql_burst.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: orleven
import aiomysql
from lib.core.env import *
from lib.core.enums import ServicePortMap
from script import BaseScript
class Script(BaseScript):
def __init__(self):
BaseScript.__init__(self)
self.service_type = ServicePortMap.MYSQL
def load_dict(self):
username_txt_path = self.parameter.get("U", None)
if username_txt_path:
self.username_list = self.read_file(username_txt_path)
else:
self.username_list = self.get_default_dict("mysql_usernames.txt")
password_txt_path = self.parameter.get("P", None)
if password_txt_path:
self.password_list = self.read_file(password_txt_path)
else:
self.password_list = self.get_default_dict("mysql_passwords.txt")
async def prove(self):
if self.base_url is None:
async for (username, password) in self.generate_auth_dict(self.username_list, self.password_list):
try:
async with aiomysql.create_pool(host=self.host, port=self.port, user=username,
password=password, timeout=self.timeout) as res:
yield username + "/" + password
return
except Exception:
pass
|
048a87cec69cccdd28e672959e4eeb09efa4d170
|
0032d988541e85c47b5034c20ecf88220dde5a95
|
/openbook_communities/tests/views/community/banned_users/test_views.py
|
0f59b659ef56550f26f9a02ece0c3165f39f1eb4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
OkunaOrg/okuna-api
|
eabd37fef9d2be59b590ed8d72bee084ac377997
|
f87d8e80d2f182c01dbce68155ded0078ee707e4
|
refs/heads/master
| 2022-02-04T21:31:10.577601
| 2021-12-28T18:20:39
| 2021-12-28T18:20:39
| 151,052,951
| 185
| 92
|
MIT
| 2022-01-13T01:00:40
| 2018-10-01T07:44:46
|
Python
|
UTF-8
|
Python
| false
| false
| 24,949
|
py
|
test_views.py
|
import random
from django.urls import reverse
from faker import Faker
from rest_framework import status
from openbook_common.tests.models import OpenbookAPITestCase
import logging
import json
from openbook_common.tests.helpers import make_user, make_authentication_headers_for_user, \
make_community
logger = logging.getLogger(__name__)
fake = Faker()
class Communitybanned_usersAPITest(OpenbookAPITestCase):
def test_cannot_retrieve_banned_users_of_private_community(self):
"""
should not be able to retrieve the banned users of a private community and return 400
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='T')
community_name = community.name
user_to_ban = make_user()
other_user.ban_user_with_username_from_community_with_name(username=user_to_ban.username,
community_name=community_name)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_retrieve_banned_users_of_public_community(self):
"""
should not be able to retrieve the banned users of a public community and return 400
:return:
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
community_name = community.name
user_to_ban = make_user()
other_user.ban_user_with_username_from_community_with_name(username=user_to_ban.username,
community_name=community_name)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_retrieve_banned_users_of_community_member_of(self):
"""
should not be able to retrieve the banned users of a community member of and return 400
:return:
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
community_name = community.name
user.join_community_with_name(community_name)
user_to_ban = make_user()
other_user.ban_user_with_username_from_community_with_name(username=user_to_ban.username,
community_name=community_name)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_can_retrieve_banned_users_of_community_if_admin(self):
"""
should be able to retrieve the banned users of a community if is an admin and return 200
:return:
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user)
community_name = community.name
user.join_community_with_name(community_name)
other_user.add_administrator_with_username_to_community_with_name(username=user.username,
community_name=community.name)
amount_of_banned_users = 5
banned_users_ids = []
for i in range(0, amount_of_banned_users):
community_member = make_user()
other_user.ban_user_with_username_from_community_with_name(username=community_member.username,
community_name=community_name)
banned_users_ids.append(community_member.pk)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_banned_users = json.loads(response.content)
self.assertEqual(len(response_banned_users), len(banned_users_ids))
for response_banned_user in response_banned_users:
response_member_id = response_banned_user.get('id')
self.assertIn(response_member_id, banned_users_ids)
def test_can_retrieve_banned_users_of_community_if_mod(self):
"""
should be able to retrieve the banned users of a community if is a moderator and return 200
:return:
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user)
community_name = community.name
user.join_community_with_name(community_name)
other_user.add_moderator_with_username_to_community_with_name(username=user.username,
community_name=community.name)
amount_of_banned_users = 5
banned_users_ids = []
for i in range(0, amount_of_banned_users):
community_member = make_user()
other_user.ban_user_with_username_from_community_with_name(username=community_member.username,
community_name=community_name)
banned_users_ids.append(community_member.pk)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_banned_users = json.loads(response.content)
self.assertEqual(len(response_banned_users), len(banned_users_ids))
for response_banned_user in response_banned_users:
response_member_id = response_banned_user.get('id')
self.assertIn(response_member_id, banned_users_ids)
def _get_url(self, community_name):
return reverse('community-banned-users', kwargs={
'community_name': community_name
})
class BanCommunityUserAPITest(OpenbookAPITestCase):
def test_can_ban_user_from_community_if_mod(self):
"""
should be able to ban user from a community if is moderator and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
community_name = community.name
user.join_community_with_name(community_name)
other_user.add_moderator_with_username_to_community_with_name(username=user.username,
community_name=community.name)
user_to_ban = make_user()
url = self._get_url(community_name=community.name)
response = self.client.post(url, {
'username': user_to_ban.username
}, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(user_to_ban.is_banned_from_community_with_name(community.name))
def test_logs_user_banned(self):
"""
should create a log when a community user is banned
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
community_name = community.name
user.join_community_with_name(community_name)
other_user.add_moderator_with_username_to_community_with_name(username=user.username,
community_name=community.name)
user_to_ban = make_user()
url = self._get_url(community_name=community.name)
self.client.post(url, {
'username': user_to_ban.username
}, **headers)
self.assertTrue(community.logs.filter(action_type='B',
source_user=user,
target_user=user_to_ban).exists())
def test_cant_ban_user_from_community_if_already_banned(self):
"""
should not be able to ban user from a community if is already banned and return 400
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community = make_community(creator=user, type='P')
community_name = community.name
user_to_ban = make_user()
user.ban_user_with_username_from_community_with_name(username=user_to_ban.username,
community_name=community_name)
url = self._get_url(community_name=community.name)
response = self.client.post(url, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(user_to_ban.is_banned_from_community_with_name(community.name))
def test_can_ban_user_from_community_if_admin(self):
"""
should be able to ban user from a community if is admin and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
community_name = community.name
user.join_community_with_name(community_name)
other_user.add_administrator_with_username_to_community_with_name(username=user.username,
community_name=community.name)
user_to_ban = make_user()
url = self._get_url(community_name=community.name)
response = self.client.post(url, {
'username': user_to_ban.username
}, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(user_to_ban.is_banned_from_community_with_name(community.name))
def test_cant_ban_user_from_community_if_member(self):
"""
should not be able to ban user from a community if is member and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
community_name = community.name
user.join_community_with_name(community_name)
user_to_ban = make_user()
url = self._get_url(community_name=community.name)
response = self.client.post(url, {
'username': user_to_ban.username
}, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(user_to_ban.is_banned_from_community_with_name(community.name))
def test_cant_ban_user_from_community(self):
"""
should not be able to ban user from a community and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
user_to_ban = make_user()
url = self._get_url(community_name=community.name)
response = self.client.post(url, {
'username': user_to_ban.username
}, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(user_to_ban.is_banned_from_community_with_name(community.name))
def test_ban_user_makes_it_no_longer_a_member_of_community(self):
"""
should remove membership of a user when banned from a community
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user)
community_name = community.name
user.join_community_with_name(community_name)
other_user.add_moderator_with_username_to_community_with_name(username=user.username,
community_name=community.name)
user_to_ban = make_user()
url = self._get_url(community_name=community.name)
response = self.client.post(url, {
'username': user_to_ban.username
}, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertFalse(user_to_ban.is_member_of_community_with_name(community_name))
def _get_url(self, community_name):
return reverse('community-ban-user', kwargs={
'community_name': community_name
})
class UnbanCommunityUserAPITest(OpenbookAPITestCase):
def test_can_unban_user_from_community_if_mod(self):
"""
should be able to unban user from a community if is moderator and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
community_name = community.name
user.join_community_with_name(community_name)
other_user.add_moderator_with_username_to_community_with_name(username=user.username,
community_name=community.name)
user_to_unban = make_user()
other_user.ban_user_with_username_from_community_with_name(username=user_to_unban.username,
community_name=community_name)
url = self._get_url(community_name=community.name)
response = self.client.post(url, {
'username': user_to_unban.username
}, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertFalse(user_to_unban.is_banned_from_community_with_name(community.name))
def test_logs_user_unbanned(self):
"""
should create a log when a community user is unbanned
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
community_name = community.name
user.join_community_with_name(community_name)
other_user.add_moderator_with_username_to_community_with_name(username=user.username,
community_name=community.name)
user_to_unban = make_user()
other_user.ban_user_with_username_from_community_with_name(username=user_to_unban.username,
community_name=community_name)
url = self._get_url(community_name=community.name)
self.client.post(url, {
'username': user_to_unban.username
}, **headers)
self.assertTrue(community.logs.filter(action_type='U',
source_user=user,
target_user=user_to_unban).exists())
def test_cant_unban_user_from_community_if_already_banned(self):
"""
should not be able to unban user from a community if is not banned and return 400
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
community_name = community.name
user.join_community_with_name(community_name)
other_user.add_moderator_with_username_to_community_with_name(username=user.username,
community_name=community.name)
user_to_unban = make_user()
url = self._get_url(community_name=community.name)
response = self.client.post(url, {
'username': user_to_unban.username
}, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(user_to_unban.is_banned_from_community_with_name(community.name))
def test_can_unban_user_from_community_if_admin(self):
"""
should be able to unban user from a community if is admin and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
community_name = community.name
user.join_community_with_name(community_name)
other_user.add_administrator_with_username_to_community_with_name(username=user.username,
community_name=community.name)
user_to_unban = make_user()
other_user.ban_user_with_username_from_community_with_name(username=user_to_unban.username,
community_name=community_name)
url = self._get_url(community_name=community.name)
response = self.client.post(url, {
'username': user_to_unban.username
}, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertFalse(user_to_unban.is_banned_from_community_with_name(community.name))
def test_cant_unban_user_from_community_if_member(self):
"""
should not be able to unban user from a community if is member and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
community_name = community.name
user.join_community_with_name(community_name)
user_to_unban = make_user()
other_user.ban_user_with_username_from_community_with_name(username=user_to_unban.username,
community_name=community_name)
url = self._get_url(community_name=community.name)
response = self.client.post(url, {
'username': user_to_unban.username
}, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(user_to_unban.is_banned_from_community_with_name(community.name))
def test_cant_ban_user_from_community(self):
"""
should not be able to ban user from a community and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
community_name = community.name
user_to_unban = make_user()
other_user.ban_user_with_username_from_community_with_name(username=user_to_unban.username,
community_name=community_name)
url = self._get_url(community_name=community.name)
response = self.client.post(url, {
'username': user_to_unban.username
}, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(user_to_unban.is_banned_from_community_with_name(community.name))
def _get_url(self, community_name):
return reverse('community-unban-user', kwargs={
'community_name': community_name
})
class SearchCommunityBannedUsersAPITests(OpenbookAPITestCase):
"""
SearchCommunityBannedUsersAPITests
"""
def test_can_search_community_banned_users_by_name(self):
"""
should be able to search for community banned users by their name and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community = make_community(creator=user)
amount_of_community_banned_users_to_search_for = 5
for i in range(0, amount_of_community_banned_users_to_search_for):
banned_user = make_user()
banned_user.join_community_with_name(community_name=community.name)
user.ban_user_with_username_from_community_with_name(username=banned_user.username,
community_name=community.name)
banned_user_username = banned_user.profile.name
amount_of_characters_to_query = random.randint(1, len(banned_user_username))
query = banned_user_username[0:amount_of_characters_to_query]
final_query = ''
for character in query:
final_query = final_query + (character.upper() if fake.boolean() else character.lower())
url = self._get_url(community_name=community.name)
response = self.client.get(url, {
'query': final_query
}, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_banned_users = json.loads(response.content)
response_banned_users_count = len(response_banned_users)
if response_banned_users_count == 1:
# Our community creator was not retrieved
self.assertEqual(response_banned_users_count, 1)
retrieved_banned_user = response_banned_users[0]
self.assertEqual(retrieved_banned_user['id'], banned_user.id)
else:
# Our community creator was retrieved too
for response_banned_user in response_banned_users:
response_banned_user_id = response_banned_user['id']
self.assertTrue(
response_banned_user_id == banned_user.id or response_banned_user_id == user.id)
user.unban_user_with_username_from_community_with_name(username=banned_user.username,
community_name=community.name)
def test_can_search_community_banned_users_by_username(self):
"""
should be able to search for community banned_users by their username and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community = make_community(creator=user)
amount_of_community_banned_users_to_search_for = 5
for i in range(0, amount_of_community_banned_users_to_search_for):
banned_user = make_user()
banned_user.join_community_with_name(community_name=community.name)
user.ban_user_with_username_from_community_with_name(username=banned_user.username,
community_name=community.name)
banned_user_username = banned_user.username
amount_of_characters_to_query = random.randint(1, len(banned_user_username))
query = banned_user_username[0:amount_of_characters_to_query]
final_query = ''
for character in query:
final_query = final_query + (character.upper() if fake.boolean() else character.lower())
url = self._get_url(community_name=community.name)
response = self.client.get(url, {
'query': final_query
}, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_banned_users = json.loads(response.content)
response_banned_users_count = len(response_banned_users)
if response_banned_users_count == 1:
# Our community creator was not retrieved
self.assertEqual(response_banned_users_count, 1)
retrieved_banned_user = response_banned_users[0]
self.assertEqual(retrieved_banned_user['id'], banned_user.id)
else:
# Our community creator was retrieved too
for response_banned_user in response_banned_users:
response_banned_user_id = response_banned_user['id']
self.assertTrue(
response_banned_user_id == banned_user.id or response_banned_user_id == user.id)
user.unban_user_with_username_from_community_with_name(username=banned_user.username,
community_name=community.name)
def _get_url(self, community_name):
return reverse('search-community-banned-users', kwargs={
'community_name': community_name,
})
|
5a8bca996786104ff7707cc94edaa22658501023
|
01184c7098e40569dd48219fbe3012321cf31244
|
/test/test_literal/test_uriref_literal_comparison.py
|
2dfdb734d19b7efe7cfb0ecfbe64764fa11823f0
|
[
"BSD-3-Clause"
] |
permissive
|
RDFLib/rdflib
|
1c81136f2656207042f81374540d8e1f02be28f5
|
077f4ac3abb3038b266f40dc95a8ccf9f4e9a84c
|
refs/heads/main
| 2023-08-30T11:22:00.041615
| 2023-08-29T21:31:43
| 2023-08-29T21:31:43
| 3,342,046
| 1,754
| 562
|
BSD-3-Clause
| 2023-09-12T14:58:35
| 2012-02-03T05:49:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,588
|
py
|
test_uriref_literal_comparison.py
|
from rdflib.graph import Graph
from rdflib.namespace import RDF
from rdflib.plugins.parsers.rdfxml import CORE_SYNTAX_TERMS
from rdflib.term import BNode, Literal, URIRef
"""
Ah... it's coming back to me...
[6:32p] eikeon: think it's so transitivity holds...
[6:32p] eikeon: if a==b and b==c then a should == c
[6:32p] eikeon: "foo"==Literal("foo")
[6:33p] eikeon: We don't want URIRef("foo")==Literal("foo")
[6:33p] eikeon: But if we have URIRef("foo")=="foo" then it implies it.
[6:33p] chimezie: yes, definately not the other RDFLib 'typed' RDF (and N3) terms
[6:34p] eikeon: Why do you need URIRef("foo")=="foo" ?
[6:34p] chimezie: i'm just wondering if a URI and a string with the same lexical value, are by definition 'different'
[6:35p] eikeon: Think so, actually. Think of trying to serialize some triples.
[6:36p] eikeon: If they are the same you'd serialize them the same, no?
[6:36p] chimezie: I guess I was thinking of a 'string' in a native datatype sense, not in the RDF sense (where they would be distinctly different)
[6:37p] eikeon: We should try and brain dump some of this...
[6:37p] eikeon: it look a fairly long time to work out.
[6:37p] eikeon: But think we finally landed in the right spot.
[6:38p] eikeon: I know many of the backends break if URIRef("foo")==Literal("foo")
[6:39p] eikeon: And if we want "foo"==Literal("foo") --- then we really can't have URIRef("foo") also == "foo"
"""
class TestIdentifierEquality:
def setup_method(self):
self.uriref = URIRef("http://example.org/")
self.bnode = BNode()
self.literal = Literal("http://example.org/")
self.python_literal = "http://example.org/"
self.python_literal_2 = "foo"
def testA(self):
assert self.uriref != self.literal
def testB(self):
assert self.literal != self.uriref
def testC(self):
assert self.uriref != self.python_literal
def testD(self):
assert self.python_literal != self.uriref
def testE(self):
assert self.literal != self.python_literal
def testE2(self):
assert self.literal.eq(self.python_literal)
def testF(self):
assert self.python_literal != self.literal
def testG(self):
assert "foo" not in CORE_SYNTAX_TERMS
def testH(self):
assert (
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#RDF")
in CORE_SYNTAX_TERMS
)
def testI(self):
g = Graph()
g.add((self.uriref, RDF.value, self.literal))
g.add((self.uriref, RDF.value, self.uriref))
assert len(g) == 2
|
a15ce1e79121515b951acf5ce85e2da8f2d87400
|
3db15c39d9fb11b98ffc5e43ac1f6d697ccd93ea
|
/demo_nodes_py/demo_nodes_py/logging/use_logger_service.py
|
df979e9032ff90bd59bb55cd57f9634d8d32252b
|
[
"Apache-2.0"
] |
permissive
|
ros2/demos
|
e1967e0232a8d6ef4d8cb9bc914cf75735415674
|
7ecb54e14e1726d543f77c798bf41e19efc1b9b4
|
refs/heads/rolling
| 2023-08-05T20:24:32.172085
| 2023-08-04T17:07:50
| 2023-08-04T17:07:50
| 39,266,291
| 415
| 328
|
Apache-2.0
| 2023-09-14T11:28:15
| 2015-07-17T17:24:00
|
C++
|
UTF-8
|
Python
| false
| false
| 6,098
|
py
|
use_logger_service.py
|
# Copyright 2023 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
from rcl_interfaces.msg import LoggerLevel
from rcl_interfaces.srv import GetLoggerLevels
from rcl_interfaces.srv import SetLoggerLevels
import rclpy
from rclpy.executors import SingleThreadedExecutor
from rclpy.impl.logging_severity import LoggingSeverity
from rclpy.node import Node
from std_msgs.msg import String
"""
This demo program shows how to enable logger service and control logger level via logger service.
Class LoggerServiceNode enable logger service and create a subscription. The callback of
subscription output received message by different log functions.
Class TestNode can set/get logger level of LoggerServiceNode and send message to it.
"""
class LoggerServiceNode(Node):
def __init__(self):
super().__init__('LoggerServiceNode', enable_logger_service=True)
self.sub = self.create_subscription(String, 'output', self.callback, 10)
def callback(self, msg):
self.get_logger().debug(msg.data + ' with DEBUG logger level.')
self.get_logger().info(msg.data + ' with INFO logger level.')
self.get_logger().warn(msg.data + ' with WARN logger level.')
self.get_logger().error(msg.data + ' with ERROR logger level.')
class TestNode(Node):
def __init__(self, remote_node_name):
super().__init__('TestNode')
self.pub = self.create_publisher(String, 'output', 10)
self.logger_get_client = self.create_client(
GetLoggerLevels, remote_node_name + '/get_logger_levels')
self._logger_set_client = self.create_client(
SetLoggerLevels, remote_node_name + '/set_logger_levels')
self._remote_node_name = remote_node_name
def set_logger_level_on_remote_node(self, logger_level) -> bool:
if not self._logger_set_client.service_is_ready():
return False
request = SetLoggerLevels.Request()
set_logger_level = LoggerLevel()
set_logger_level.name = self._remote_node_name
set_logger_level.level = logger_level
request.levels.append(set_logger_level)
future = self._logger_set_client.call_async(request)
rclpy.spin_until_future_complete(self, future)
ret_results = future.result()
if not ret_results:
return False
if not ret_results.results[0].successful:
self.get_logger().error('Failed to change logger level: '
+ ret_results.results[0].reason)
return False
return True
def get_logger_level_on_remote_node(self):
if not self.logger_get_client.service_is_ready():
return [False, None]
request = GetLoggerLevels.Request()
request.names.append(self._remote_node_name)
future = self.logger_get_client.call_async(request)
rclpy.spin_until_future_complete(self, future)
ret_results = future.result()
if not ret_results:
return [False, None]
return [True, ret_results.levels[0].level]
def get_logger_level_func(test_node):
ret, level = test_node.get_logger_level_on_remote_node()
if ret:
test_node.get_logger().info('Current logger level: ' + str(level))
else:
test_node.get_logger().error('Failed to get logger level via logger service !')
def main(args=None):
rclpy.init(args=args)
logger_service_node = LoggerServiceNode()
test_node = TestNode('LoggerServiceNode')
executor = SingleThreadedExecutor()
executor.add_node(logger_service_node)
thread = threading.Thread(target=executor.spin)
thread.start()
# Output with default logger level
test_node.get_logger().info('Output with default logger level:')
msg = String()
msg.data = 'Output 1'
test_node.pub.publish(msg)
time.sleep(0.5)
# Get logger level. Logger level should be 0 (Unset)
get_logger_level_func(test_node)
# Output with debug logger level
test_node.get_logger().info('Output with debug logger level:')
if test_node.set_logger_level_on_remote_node(LoggingSeverity.DEBUG):
msg = String()
msg.data = 'Output 2'
test_node.pub.publish(msg)
time.sleep(0.5)
else:
test_node.get_logger().error('Failed to set debug logger level via logger service !')
# Get logger level. Logger level should be 10 (Debug)
get_logger_level_func(test_node)
# Output with warn logger level
test_node.get_logger().info('Output with warn logger level:')
if test_node.set_logger_level_on_remote_node(LoggingSeverity.WARN):
msg = String()
msg.data = 'Output 3'
test_node.pub.publish(msg)
time.sleep(0.5)
else:
test_node.get_logger().error('Failed to set warn logger level via logger service !')
# Get logger level. Logger level should be 30 (warn)
get_logger_level_func(test_node)
# Output with error logger level
test_node.get_logger().info('Output with error logger level:')
if test_node.set_logger_level_on_remote_node(LoggingSeverity.ERROR):
msg = String()
msg.data = 'Output 4'
test_node.pub.publish(msg)
time.sleep(0.5)
else:
test_node.get_logger().error('Failed to set error logger level via logger service !')
# Get logger level. Logger level should be 40 (Error)
get_logger_level_func(test_node)
executor.shutdown()
if thread.is_alive():
thread.join()
rclpy.try_shutdown()
if __name__ == '__main__':
main()
|
1437428ea6ceebb2afbe3737c36cc165db14ddea
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/net/data/ssl/scripts/asn1.py
|
358efbf342b1eda55ebf2d27aede8f16784ff666
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 3,907
|
py
|
asn1.py
|
# Copyright 2012 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file implements very minimal ASN.1, DER serialization.
def ToDER(obj):
'''ToDER converts the given object into DER encoding'''
if obj is None:
# None turns into NULL
return TagAndLength(5, 0)
if isinstance(obj, (str, bytes)):
# There are many ASN.1 string types, so rather than pick one implicitly,
# require the caller explicitly specify the encoding with asn1.UTF8String,
# etc., below.
raise TypeError("String types must be specified explicitly")
if isinstance(obj, bool):
val = b"\x00"
if obj:
val = b"\xff"
return TagAndData(1, val)
if isinstance(obj, int):
big_endian = bytearray()
val = obj
while val != 0:
big_endian.append(val & 0xff)
val >>= 8
if len(big_endian) == 0 or big_endian[-1] >= 128:
big_endian.append(0)
big_endian.reverse()
return TagAndData(2, bytes(big_endian))
return obj.ToDER()
def TagAndLength(tag, length):
der = bytearray([tag])
if length < 128:
der.append(length)
elif length < 256:
der.append(0x81)
der.append(length)
elif length < 65535:
der.append(0x82)
der.append(length >> 8)
der.append(length & 0xff)
else:
assert False
return bytes(der)
def TagAndData(tag, data):
return TagAndLength(tag, len(data)) + data
class Raw(object):
'''Raw contains raw DER encoded bytes that are used verbatim'''
def __init__(self, der):
self.der = der
def ToDER(self):
return self.der
class Explicit(object):
'''Explicit prepends an explicit tag'''
def __init__(self, tag, child):
self.tag = tag
self.child = child
def ToDER(self):
der = ToDER(self.child)
tag = self.tag
tag |= 0x80 # content specific
tag |= 0x20 # complex
return TagAndData(tag, der)
class ENUMERATED(object):
def __init__(self, value):
self.value = value
def ToDER(self):
return TagAndData(10, bytes([self.value]))
class SEQUENCE(object):
def __init__(self, children):
self.children = children
def ToDER(self):
der = b''.join([ToDER(x) for x in self.children])
return TagAndData(0x30, der)
class SET(object):
def __init__(self, children):
self.children = children
def ToDER(self):
der = b''.join([ToDER(x) for x in self.children])
return TagAndData(0x31, der)
class OCTETSTRING(object):
def __init__(self, val):
self.val = val
def ToDER(self):
return TagAndData(4, self.val)
class PrintableString(object):
def __init__(self, val):
self.val = val
def ToDER(self):
return TagAndData(19, self.val)
class UTF8String(object):
def __init__(self, val):
self.val = val
def ToDER(self):
return TagAndData(12, self.val)
class OID(object):
def __init__(self, parts):
self.parts = parts
def ToDER(self):
if len(self.parts) < 2 or self.parts[0] > 6 or self.parts[1] >= 40:
assert False
der = bytearray([self.parts[0] * 40 + self.parts[1]])
for x in self.parts[2:]:
if x == 0:
der.append(0)
else:
octets = bytearray()
while x != 0:
v = x & 0x7f
if len(octets) > 0:
v |= 0x80
octets.append(v)
x >>= 7
octets.reverse()
der = der + octets
return TagAndData(6, bytes(der))
class UTCTime(object):
def __init__(self, time_str):
self.time_str = time_str
def ToDER(self):
return TagAndData(23, self.time_str.encode('ascii'))
class GeneralizedTime(object):
def __init__(self, time_str):
self.time_str = time_str
def ToDER(self):
return TagAndData(24, self.time_str.encode('ascii'))
class BitString(object):
def __init__(self, bits):
self.bits = bits
def ToDER(self):
return TagAndData(3, b"\x00" + self.bits)
|
12dff3c99ca2ae2c571c314d6192d6bfa09d75d7
|
08f8d1203bae66dbb692f8c085b6bbe25934195a
|
/tool_generate_packages.py
|
57a05dec7613e2dd0d900e3d7925d979fed9c51c
|
[
"LGPL-2.1-or-later",
"BSD-3-Clause",
"ISC",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
Azure/azure-storage-azcopy
|
a747b43ad5cf1162dafbb308024f864baec13ae1
|
f8b3875a8d30bb56fecc3b70023d4a20c53bc7d5
|
refs/heads/main
| 2023-08-16T18:55:37.997091
| 2023-08-15T19:08:15
| 2023-08-15T19:08:15
| 114,798,676
| 552
| 218
|
MIT
| 2023-09-14T21:46:12
| 2017-12-19T18:33:42
|
Go
|
UTF-8
|
Python
| false
| false
| 3,558
|
py
|
tool_generate_packages.py
|
#!/usr/bin/env python
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from subprocess import check_call
import argparse
import os
import shutil
DEFAULT_DESTINATION_FOLDER = "./dist"
DEFAULT_SOURCE_FOLDER = "./"
THIRD_PARTY_NOTICE_FILE_NAME = "ThirdPartyNotice.txt"
# the list of executables to package are listed here
EXECUTABLES_TO_ZIP = ["azcopy_darwin_amd64", "azcopy_windows_386.exe", "azcopy_windows_amd64.exe"]
EXECUTABLES_TO_TAR = ["azcopy_linux_amd64"]
def create_directory(dir):
os.mkdir(dir)
def remove_directory(dir):
shutil.rmtree(dir)
def copy_file(src, dst):
shutil.copy(src, dst)
def rename_file(src, dst):
shutil.move(src, dst)
def tar_dir(dst, src, cwd):
check_call(["tar", "--exclude='*.DS_Store'", "-czvf", dst, src], cwd=cwd)
def zip_dir(dst, src, cwd):
check_call(["zip", "-r", "-X", "-x='*.DS_Store'", dst, src], cwd=cwd)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create packages for AzCopyV10")
parser.add_argument("--version", "-v", help="The version of the package", default="10.0.0")
parser.add_argument("--input-folder", "-i", help="Where the executables are located", default=DEFAULT_SOURCE_FOLDER)
parser.add_argument("--output-folder", "-o", help="Whether the unit tests should run", default=DEFAULT_DESTINATION_FOLDER)
# step 1: parse the command line arguments
args = parser.parse_args()
print("Starting package generation: version={0}, input folder={1}, output folder={2}"
.format(args.version, args.input_folder, args.output_folder))
# step 2: delete output folder if present
if os.path.exists(args.output_folder):
print("Deleting existing output folder: " + args.output_folder)
remove_directory(args.output_folder)
# step 3: create package for each environment
print("Creating output folder: " + args.output_folder)
create_directory(args.output_folder)
for executable in EXECUTABLES_TO_ZIP + EXECUTABLES_TO_TAR:
output_folder_name = "{}_{}".format(executable.replace('.exe', ''), args.version)
output_folder_path = os.path.join(args.output_folder, output_folder_name)
# each executable should be in a different folder
create_directory(output_folder_path)
# copy the executable into the right folder
copy_file(os.path.join(args.input_folder, executable), output_folder_path)
# rename executables to the standard name
rename_file(os.path.join(output_folder_path, executable), os.path.join(output_folder_path, "azcopy.exe" if ".exe" in executable else "azcopy"))
# copy the third party notice over
copy_file(os.path.join(args.input_folder, THIRD_PARTY_NOTICE_FILE_NAME), output_folder_path)
# compress the folder accordingly
if executable in EXECUTABLES_TO_TAR:
tar_dir("{}.tar.gz".format(output_folder_name), output_folder_name,
cwd=os.path.abspath(args.output_folder))
else:
zip_dir("{}.zip".format(output_folder_name), output_folder_name,
cwd=os.path.abspath(args.output_folder))
# step 4: create version file
with open(os.path.join(args.output_folder, "latest_version.txt"), "w+") as f:
f.write(args.version + "\n")
|
f0dcf06acfaa254245236ff7385cf662c489dbb4
|
916c1313c623c799e98d1bd897b3aef510172639
|
/py/abd/abdcmd_fetch.py
|
48773a9c1e3e2320dc90e8d7ed6405ca576ca817
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bloomberg/phabricator-tools
|
377ba3dba299c5d21a015bb039ae920fae5478ef
|
09bd1587fe8945d93a891162fd4c89640c6fada7
|
refs/heads/master
| 2021-01-02T19:43:48.274684
| 2019-01-11T13:34:55
| 2019-01-11T13:34:55
| 8,464,182
| 154
| 40
|
Apache-2.0
| 2022-02-14T09:57:48
| 2013-02-27T20:02:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,203
|
py
|
abdcmd_fetch.py
|
"""Fetch managed repos.
This can be useful if you are switching from one arcyd instance to
another, to 'pre-fetch' before actually moving over.
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdcmd_fetch
#
# Public Functions:
# getFromfilePrefixChars
# setupParser
# process
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import phlgitx_refcache
import phlsys_git
import phlsys_pid
import phlsys_subprocess
import phlurl_watcher
import abdi_processrepoarglist
import abdi_repoargs
import abdt_differresultcache
import abdt_fs
import abdt_git
def getFromfilePrefixChars():
return None
def setupParser(parser):
pass
def process(args):
_ = args # NOQA
fs = abdt_fs.make_default_accessor()
any_failed = False
with fs.lockfile_context():
pid = fs.get_pid_or_none()
if pid is not None and phlsys_pid.is_running(pid):
raise Exception("cannot fetch whilst arcyd is running.")
repo_config_path_list = fs.repo_config_path_list()
repo_name_config_list = abdi_repoargs.parse_config_file_list(
repo_config_path_list)
url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
fs.layout.urlwatcher_cache_path)
# Let the user know what's happening before potentially blocking for a
# while.
print('Refreshing repository snoop status ..', end=' ')
# Make sure that the output is actually visible by flushing stdout
# XXX: Will use 'flush' parameter to 'print()' in Python 3.3
sys.stdout.flush()
print("done")
url_watcher_wrapper.watcher.refresh()
for repo_name, repo_config in repo_name_config_list:
print(repo_name + ' ..', end=' ')
# Make sure that the output is actually visible by flushing stdout
# XXX: Will use 'flush' parameter to 'print()' in Python 3.3
sys.stdout.flush()
snoop_url = abdi_repoargs.get_repo_snoop_url(repo_config)
sys_repo = phlsys_git.Repo(repo_config.repo_path)
refcache_repo = phlgitx_refcache.Repo(sys_repo)
differ_cache = abdt_differresultcache.Cache(refcache_repo)
abd_repo = abdt_git.Repo(
refcache_repo,
differ_cache,
"origin",
repo_config.repo_desc)
try:
did_fetch = abdi_processrepoarglist.fetch_if_needed(
url_watcher_wrapper.watcher,
snoop_url,
abd_repo,
repo_config.repo_desc)
if did_fetch:
print('fetched')
else:
print('skipped')
except phlsys_subprocess.CalledProcessError as e:
print('failed')
print(e)
any_failed = True
url_watcher_wrapper.save()
if any_failed:
print("Some repositories failed to be fetched.")
return 1
else:
return 0
# -----------------------------------------------------------------------------
# Copyright (C) 2014-2016 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
f5ff9150a726520b910c59820003a8b5d22d5e96
|
d139ef8d18fcde584b06c1d7d25477d7d31ee59b
|
/google/ads/googleads/v14/resources/types/custom_interest.py
|
d3404a33c840d8b621a2c67e1bcdfa67edf0d0db
|
[
"Apache-2.0"
] |
permissive
|
googleads/google-ads-python
|
a53993e6be057d3aa61f276b69e97b8b338d1c12
|
146d7070c1ea2140555d49d73c77892430b37314
|
refs/heads/main
| 2023-08-31T01:58:16.738997
| 2023-06-05T08:18:42
| 2023-08-28T19:08:38
| 143,435,091
| 422
| 525
|
Apache-2.0
| 2023-09-12T17:46:52
| 2018-08-03T14:08:04
|
Python
|
UTF-8
|
Python
| false
| false
| 5,112
|
py
|
custom_interest.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import MutableSequence
import proto # type: ignore
from google.ads.googleads.v14.enums.types import custom_interest_member_type
from google.ads.googleads.v14.enums.types import custom_interest_status
from google.ads.googleads.v14.enums.types import custom_interest_type
__protobuf__ = proto.module(
package="google.ads.googleads.v14.resources",
marshal="google.ads.googleads.v14",
manifest={
"CustomInterest",
"CustomInterestMember",
},
)
class CustomInterest(proto.Message):
r"""A custom interest. This is a list of users by interest.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
resource_name (str):
Immutable. The resource name of the custom interest. Custom
interest resource names have the form:
``customers/{customer_id}/customInterests/{custom_interest_id}``
id (int):
Output only. Id of the custom interest.
This field is a member of `oneof`_ ``_id``.
status (google.ads.googleads.v14.enums.types.CustomInterestStatusEnum.CustomInterestStatus):
Status of this custom interest. Indicates
whether the custom interest is enabled or
removed.
name (str):
Name of the custom interest. It should be
unique across the same custom affinity audience.
This field is required for create operations.
This field is a member of `oneof`_ ``_name``.
type_ (google.ads.googleads.v14.enums.types.CustomInterestTypeEnum.CustomInterestType):
Type of the custom interest, CUSTOM_AFFINITY or
CUSTOM_INTENT. By default the type is set to
CUSTOM_AFFINITY.
description (str):
Description of this custom interest audience.
This field is a member of `oneof`_ ``_description``.
members (MutableSequence[google.ads.googleads.v14.resources.types.CustomInterestMember]):
List of custom interest members that this
custom interest is composed of. Members can be
added during CustomInterest creation. If members
are presented in UPDATE operation, existing
members will be overridden.
"""
resource_name: str = proto.Field(
proto.STRING,
number=1,
)
id: int = proto.Field(
proto.INT64,
number=8,
optional=True,
)
status: custom_interest_status.CustomInterestStatusEnum.CustomInterestStatus = proto.Field(
proto.ENUM,
number=3,
enum=custom_interest_status.CustomInterestStatusEnum.CustomInterestStatus,
)
name: str = proto.Field(
proto.STRING,
number=9,
optional=True,
)
type_: custom_interest_type.CustomInterestTypeEnum.CustomInterestType = (
proto.Field(
proto.ENUM,
number=5,
enum=custom_interest_type.CustomInterestTypeEnum.CustomInterestType,
)
)
description: str = proto.Field(
proto.STRING,
number=10,
optional=True,
)
members: MutableSequence["CustomInterestMember"] = proto.RepeatedField(
proto.MESSAGE,
number=7,
message="CustomInterestMember",
)
class CustomInterestMember(proto.Message):
r"""A member of custom interest audience. A member can be a
keyword or url. It is immutable, that is, it can only be created
or removed but not changed.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
member_type (google.ads.googleads.v14.enums.types.CustomInterestMemberTypeEnum.CustomInterestMemberType):
The type of custom interest member, KEYWORD
or URL.
parameter (str):
Keyword text when member_type is KEYWORD or URL string when
member_type is URL.
This field is a member of `oneof`_ ``_parameter``.
"""
member_type: custom_interest_member_type.CustomInterestMemberTypeEnum.CustomInterestMemberType = proto.Field(
proto.ENUM,
number=1,
enum=custom_interest_member_type.CustomInterestMemberTypeEnum.CustomInterestMemberType,
)
parameter: str = proto.Field(
proto.STRING,
number=3,
optional=True,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
e16c4997d87c96320142d896e1a776dccd63be77
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/testing/unexpected_passes_common/builders_unittest.py
|
3e5d38c714289c0060db6b170083f94152cc1802
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 23,292
|
py
|
builders_unittest.py
|
#!/usr/bin/env vpython3
# Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import json
import os
import sys
from typing import Any, Dict, Set, Tuple
import unittest
if sys.version_info[0] == 2:
import mock
else:
import unittest.mock as mock
from pyfakefs import fake_filesystem_unittest
from unexpected_passes_common import builders
from unexpected_passes_common import constants
from unexpected_passes_common import data_types
from unexpected_passes_common import multiprocessing_utils
from unexpected_passes_common import unittest_utils
class FakeFilesystemTestCaseWithFileCreation(fake_filesystem_unittest.TestCase):
def CreateFile(self, *args, **kwargs):
# TODO(crbug.com/1156806): Remove this and just use fs.create_file() when
# Catapult is updated to a newer version of pyfakefs that is compatible with
# Chromium's version.
if hasattr(self.fs, 'create_file'):
self.fs.create_file(*args, **kwargs)
else:
self.fs.CreateFile(*args, **kwargs)
class GetCiBuildersUnittest(FakeFilesystemTestCaseWithFileCreation):
def setUp(self) -> None:
self._builders_instance = unittest_utils.GenericBuilders(
suite='webgl_conformance')
self._isolate_patcher = mock.patch.object(
self._builders_instance,
'GetIsolateNames',
return_value={'telemetry_gpu_integration_test'})
self._isolate_mock = self._isolate_patcher.start()
self.addCleanup(self._isolate_patcher.stop)
def testJsonContentLoaded(self) -> None:
"""Tests that the correct JSON data is loaded in."""
self.setUpPyfakefs()
gpu_json = {
'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
'Android Release (Nexus 5X)': {
'isolated_scripts': [{
'args': [
'webgl_conformance',
],
'isolate_name':
'telemetry_gpu_integration_test',
}],
},
'GPU Linux Builder': {},
}
gpu_fyi_json = {
'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
'ANGLE GPU Android Release (Nexus 5X)': {
'isolated_scripts': [{
'args': [
'webgl_conformance',
],
'isolate_name':
'telemetry_gpu_integration_test',
}],
},
'GPU FYI Linux Builder': {},
}
# Should be ignored.
tryserver_json = {
'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
'Trybot': {
'isolated_scripts': [{
'args': [
'webgl_conformance',
],
'isolate_name':
'telemetry_gpu_integration_test',
}],
},
}
# Also should be ignored.
not_buildbot_json = {
'Not buildbot': {
'isolated_scripts': [{
'args': [
'webgl_conformance',
],
'isolate_name':
'telemetry_gpu_integration_test',
}],
},
}
self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
'chromium.gpu.json'),
contents=json.dumps(gpu_json))
self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
'chromium.gpu.fyi.json'),
contents=json.dumps(gpu_fyi_json))
self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
'tryserver.gpu.json'),
contents=json.dumps(tryserver_json))
self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
'not_buildbot.json'),
contents=json.dumps(not_buildbot_json))
gpu_builders = self._builders_instance.GetCiBuilders()
self.assertEqual(
gpu_builders,
set([
data_types.BuilderEntry('Android Release (Nexus 5X)',
constants.BuilderTypes.CI, False),
data_types.BuilderEntry('ANGLE GPU Android Release (Nexus 5X)',
constants.BuilderTypes.CI, False),
data_types.BuilderEntry('GPU Linux Builder',
constants.BuilderTypes.CI, False),
data_types.BuilderEntry('GPU FYI Linux Builder',
constants.BuilderTypes.CI, False),
]))
def testPublicInternalBuilders(self) -> None:
"""Tests that public internal builders are treated as internal."""
self.setUpPyfakefs()
gpu_json = {
'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
'Android Release (Nexus 5X)': {
'isolated_scripts': [{
'args': [
'webgl_conformance',
],
'isolate_name':
'telemetry_gpu_integration_test',
}],
},
'GPU Linux Builder': {},
}
gpu_internal_json = {
'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
'Android Chrome Release (Nexus 5X)': {
'isolated_scripts': [{
'args': [
'webgl_conformance',
],
'isolate_name':
'telemetry_gpu_integration_test',
}],
},
'GPU Chrome Linux Builder': {},
}
internal_json = {
'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
'Android Internal Release (Nexus 5X)': {
'isolated_scripts': [{
'args': [
'webgl_conformance',
],
'isolate_name':
'telemetry_gpu_integration_test',
}],
},
'GPU Internal Linux Builder': {},
}
self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
'chromium.gpu.json'),
contents=json.dumps(gpu_json))
self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
'chrome.gpu.fyi.json'),
contents=json.dumps(gpu_internal_json))
self.CreateFile(os.path.join(builders.INTERNAL_TESTING_BUILDBOT_DIR,
'internal.json'),
contents=json.dumps(internal_json))
gpu_builders = self._builders_instance.GetCiBuilders()
self.assertEqual(
gpu_builders,
set([
data_types.BuilderEntry('Android Release (Nexus 5X)',
constants.BuilderTypes.CI, False),
data_types.BuilderEntry('GPU Linux Builder',
constants.BuilderTypes.CI, False),
]))
internal_instance = unittest_utils.GenericBuilders(
suite='webgl_conformance', include_internal_builders=True)
with mock.patch.object(internal_instance,
'GetIsolateNames',
return_value={'telemetry_gpu_integration_test'}):
gpu_builders = internal_instance.GetCiBuilders()
self.assertEqual(
gpu_builders,
set([
data_types.BuilderEntry('Android Release (Nexus 5X)',
constants.BuilderTypes.CI, False),
data_types.BuilderEntry('Android Chrome Release (Nexus 5X)',
constants.BuilderTypes.CI, True),
data_types.BuilderEntry('Android Internal Release (Nexus 5X)',
constants.BuilderTypes.CI, True),
data_types.BuilderEntry('GPU Linux Builder',
constants.BuilderTypes.CI, False),
data_types.BuilderEntry('GPU Chrome Linux Builder',
constants.BuilderTypes.CI, True),
data_types.BuilderEntry('GPU Internal Linux Builder',
constants.BuilderTypes.CI, True),
]))
def testFilterBySuite(self) -> None:
"""Tests that only builders that run the given suite are returned."""
def SideEffect(tm: Dict[str, Any]) -> bool:
tests = tm.get('isolated_scripts', [])
for t in tests:
if t.get('isolate_name') == 'foo_integration_test':
if 'webgl_conformance' in t.get('args', []):
return True
return False
self.setUpPyfakefs()
gpu_json = {
'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
'Android Tester': {
'isolated_scripts': [
{
'args': [
'webgl_conformance',
],
'isolate_name': 'not_telemetry',
},
],
},
'Linux Tester': {
'isolated_scripts': [
{
'args': [
'not_a_suite',
],
'isolate_name': 'foo_integration_test',
},
],
},
'Windows Tester': {
'isolated_scripts': [
{
'args': [
'webgl_conformance',
],
'isolate_name': 'foo_integration_test',
},
],
},
}
self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
'chromium.json'),
contents=json.dumps(gpu_json))
with mock.patch.object(self._builders_instance,
'_BuilderRunsTestOfInterest',
side_effect=SideEffect):
gpu_builders = self._builders_instance.GetCiBuilders()
self.assertEqual(
gpu_builders,
set([
data_types.BuilderEntry('Windows Tester', constants.BuilderTypes.CI,
False)
]))
def testRealContentCanBeLoaded(self) -> None:
"""Tests that *something* from the real JSON files can be loaded."""
# This directory is not available on swarming, so if it doesn't exist, just
# skip the test.
if not os.path.exists(builders.TESTING_BUILDBOT_DIR):
return
self.assertNotEqual(len(self._builders_instance.GetCiBuilders()), 0)
class GetMirroredBuildersForCiBuilderUnittest(unittest.TestCase):
def setUp(self) -> None:
self._builders_instance = builders.Builders('suite', False)
self._bb_patcher = mock.patch.object(self._builders_instance,
'_GetBuildbucketOutputForCiBuilder')
self._bb_mock = self._bb_patcher.start()
self.addCleanup(self._bb_patcher.stop)
self._fake_ci_patcher = mock.patch.object(self._builders_instance,
'GetFakeCiBuilders',
return_value={})
self._fake_ci_mock = self._fake_ci_patcher.start()
self.addCleanup(self._fake_ci_patcher.stop)
self._non_chromium_patcher = mock.patch.object(
self._builders_instance,
'GetNonChromiumBuilders',
return_value={'foo_non_chromium'})
self._non_chromium_mock = self._non_chromium_patcher.start()
self.addCleanup(self._non_chromium_patcher.stop)
def testFakeCiBuilder(self) -> None:
"""Tests that a fake CI builder gets properly mapped."""
self._fake_ci_mock.return_value = {
data_types.BuilderEntry('foo_ci', constants.BuilderTypes.CI, False):
{data_types.BuilderEntry('foo_try', constants.BuilderTypes.TRY, False)}
}
try_builder, found_mirror = (
self._builders_instance._GetMirroredBuildersForCiBuilder(
data_types.BuilderEntry('foo_ci', constants.BuilderTypes.CI,
False)))
self.assertTrue(found_mirror)
self.assertEqual(
try_builder,
set([
data_types.BuilderEntry('foo_try', constants.BuilderTypes.TRY,
False)
]))
self._bb_mock.assert_not_called()
def testNoBuildbucketOutput(self) -> None:
"""Tests that a failure to get Buildbucket output is surfaced."""
self._bb_mock.return_value = ''
builder_entry = data_types.BuilderEntry('nonexistent',
constants.BuilderTypes.CI, False)
try_builder, found_mirror = (
self._builders_instance._GetMirroredBuildersForCiBuilder(builder_entry))
self.assertFalse(found_mirror)
self.assertEqual(try_builder, set([builder_entry]))
def testBuildbucketOutput(self):
"""Tests that Buildbucket output is parsed correctly."""
self._bb_mock.return_value = json.dumps({
'output': {
'properties': {
'mirrored_builders': [
'try:foo_try',
'try:bar_try',
]
}
}
})
try_builders, found_mirror = (
self._builders_instance._GetMirroredBuildersForCiBuilder(
data_types.BuilderEntry('foo_ci', constants.BuilderTypes.CI,
False)))
self.assertTrue(found_mirror)
self.assertEqual(
try_builders,
set([
data_types.BuilderEntry('foo_try', constants.BuilderTypes.TRY,
False),
data_types.BuilderEntry('bar_try', constants.BuilderTypes.TRY,
False)
]))
def testBuildbucketOutputInternal(self) -> None:
"""Tests that internal Buildbucket output is parsed correctly."""
self._bb_mock.return_value = json.dumps({
'output': {
'properties': {
'mirrored_builders': [
'try:foo_try',
'try:bar_try',
]
}
}
})
try_builders, found_mirror = (
self._builders_instance._GetMirroredBuildersForCiBuilder(
data_types.BuilderEntry('foo_ci', constants.BuilderTypes.CI, True)))
self.assertTrue(found_mirror)
self.assertEqual(
try_builders,
set([
data_types.BuilderEntry('foo_try', constants.BuilderTypes.TRY,
True),
data_types.BuilderEntry('bar_try', constants.BuilderTypes.TRY, True)
]))
class GetTryBuildersUnittest(FakeFilesystemTestCaseWithFileCreation):
def setUp(self) -> None:
self._builders_instance = builders.Builders('suite', False)
self._get_patcher = mock.patch.object(self._builders_instance,
'_GetMirroredBuildersForCiBuilder')
self._get_mock = self._get_patcher.start()
self.addCleanup(self._get_patcher.stop)
self._runs_test_patcher = mock.patch.object(self._builders_instance,
'_BuilderRunsTestOfInterest')
self._runs_test_mock = self._runs_test_patcher.start()
self.addCleanup(self._runs_test_patcher.stop)
self._pool_patcher = mock.patch.object(multiprocessing_utils,
'GetProcessPool')
self._pool_mock = self._pool_patcher.start()
self._pool_mock.return_value = unittest_utils.FakePool()
self.addCleanup(self._pool_patcher.stop)
self.setUpPyfakefs()
# Make sure the directory exists.
self.CreateFile(
os.path.join(builders.TESTING_BUILDBOT_DIR, 'placeholder.txt'))
def testMirrorNoOutputCausesFailure(self) -> None:
"""Tests that a failure to get Buildbot output raises an exception."""
builder = data_types.BuilderEntry('foo_ci', constants.BuilderTypes.CI,
False)
self._get_mock.return_value = (set([builder]), False)
self._runs_test_mock.return_value = True
with self.assertRaises(RuntimeError):
self._builders_instance.GetTryBuilders([builder])
def testMirrorOutputReturned(self) -> None:
"""Tests that parsed, mirrored builders get returned on success."""
def SideEffect(ci_builder: data_types.BuilderEntry
) -> Tuple[Set[data_types.BuilderEntry], bool]:
b = [
data_types.BuilderEntry(ci_builder.name.replace('ci', 'try'),
constants.BuilderTypes.TRY, False),
data_types.BuilderEntry(ci_builder.name.replace('ci', 'try2'),
constants.BuilderTypes.TRY, False),
]
return set(b), True
self._get_mock.side_effect = SideEffect
self._runs_test_mock.return_value = False
mirrored_builders = self._builders_instance.GetTryBuilders([
data_types.BuilderEntry('foo_ci', constants.BuilderTypes.CI, False),
data_types.BuilderEntry('bar_ci', constants.BuilderTypes.CI, False),
])
self.assertEqual(
mirrored_builders,
set([
data_types.BuilderEntry('foo_try', constants.BuilderTypes.TRY,
False),
data_types.BuilderEntry('foo_try2', constants.BuilderTypes.TRY,
False),
data_types.BuilderEntry('bar_try', constants.BuilderTypes.TRY,
False),
data_types.BuilderEntry('bar_try2', constants.BuilderTypes.TRY,
False),
]))
def testDedicatedJsonContentLoaded(self) -> None:
"""Tests that tryserver JSON content is loaded."""
def SideEffect(test_spec: Dict[str, Any]) -> bool:
# Treat non-empty test specs as valid.
return bool(test_spec)
self._runs_test_mock.side_effect = SideEffect
# Should be ignored.
gpu_json = {
'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
'Android Release (Nexus 5X)': {
'isolated_scripts': [{
'args': [
'webgl_conformance',
],
'isolate_name':
'telemetry_gpu_integration_test',
}],
},
'GPU Linux Builder': {},
}
# Should be ignored.
gpu_fyi_json = {
'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
'ANGLE GPU Android Release (Nexus 5X)': {
'isolated_scripts': [{
'args': [
'webgl_conformance',
],
'isolate_name':
'telemetry_gpu_integration_test',
}],
},
'GPU FYI Linux Builder': {},
}
tryserver_json = {
'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
'Trybot': {
'isolated_scripts': [{
'args': [
'webgl_conformance',
],
'isolate_name':
'telemetry_gpu_integration_test',
}],
},
'Trybot Empty': {},
}
# Also should be ignored.
not_buildbot_json = {
'Not buildbot': {
'isolated_scripts': [{
'args': [
'webgl_conformance',
],
'isolate_name':
'telemetry_gpu_integration_test',
}],
},
}
self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
'chromium.gpu.json'),
contents=json.dumps(gpu_json))
self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
'chromium.gpu.fyi.json'),
contents=json.dumps(gpu_fyi_json))
self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
'tryserver.gpu.json'),
contents=json.dumps(tryserver_json))
self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
'not_buildbot.json'),
contents=json.dumps(not_buildbot_json))
gpu_builders = self._builders_instance.GetTryBuilders({})
self.assertEqual(
gpu_builders,
set([
data_types.BuilderEntry('Trybot', constants.BuilderTypes.TRY,
False),
]))
def testDedicatedFilterBySuite(self) -> None:
"""Tests that only builders that run the given suite are returned."""
def SideEffect(tm: Dict[str, Any]) -> bool:
tests = tm.get('isolated_scripts', [])
for t in tests:
if t.get('isolate_name') == 'foo_integration_test':
if 'webgl_conformance' in t.get('args', []):
return True
return False
self._runs_test_mock.side_effect = SideEffect
gpu_json = {
'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
'Android Tester': {
'isolated_scripts': [
{
'args': [
'webgl_conformance',
],
'isolate_name': 'not_telemetry',
},
],
},
'Linux Tester': {
'isolated_scripts': [
{
'args': [
'not_a_suite',
],
'isolate_name': 'foo_integration_test',
},
],
},
'Windows Tester': {
'isolated_scripts': [
{
'args': [
'webgl_conformance',
],
'isolate_name': 'foo_integration_test',
},
],
},
}
self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
'tryserver.chromium.json'),
contents=json.dumps(gpu_json))
gpu_builders = self._builders_instance.GetTryBuilders({})
self.assertEqual(
gpu_builders,
set([
data_types.BuilderEntry('Windows Tester',
constants.BuilderTypes.TRY, False)
]))
def testDedicatedAndMirroredCombined(self) -> None:
"""Tests that both dedicated and mirrored trybots are returned."""
def SideEffect(_: Any) -> Tuple[Set[data_types.BuilderEntry], bool]:
return set({
data_types.BuilderEntry('mirrored_trybot', constants.BuilderTypes.TRY,
False)
}), True
self._get_mock.side_effect = SideEffect
self._runs_test_mock.return_value = True
tryserver_json = {
'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
'Trybot': {
'isolated_scripts': [{
'args': [
'webgl_conformance',
],
'isolate_name':
'telemetry_gpu_integration_test',
}],
},
}
self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
'tryserver.chromium.json'),
contents=json.dumps(tryserver_json))
try_builders = self._builders_instance.GetTryBuilders({
data_types.BuilderEntry('ci_builder', constants.BuilderTypes.CI, False)
})
self.assertEqual(
try_builders, {
data_types.BuilderEntry('mirrored_trybot',
constants.BuilderTypes.TRY, False),
data_types.BuilderEntry('Trybot', constants.BuilderTypes.TRY, False)
})
if __name__ == '__main__':
unittest.main(verbosity=2)
|
40d86c73520f53f62e1a505dc87399d1930cf4cf
|
95b5388157d6c0b14883bfc7102d54f8c5737b0a
|
/src/opnsense/scripts/filter/download_geoip.py
|
c19873838d5b2f42c7b0df6ccfa1c83881ae634f
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
opnsense/core
|
f071a087984a8d8b2b2e6ca152cf73645f9bc68a
|
a702cf9fb3300e125cd7acc8af3813474606e509
|
refs/heads/master
| 2023-09-01T05:20:10.627528
| 2023-08-31T13:10:59
| 2023-08-31T13:10:59
| 27,965,134
| 2,778
| 1,014
|
BSD-2-Clause
| 2023-09-14T15:45:24
| 2014-12-13T15:43:50
|
PHP
|
UTF-8
|
Python
| false
| false
| 1,958
|
py
|
download_geoip.py
|
#!/usr/local/bin/python3
"""
Copyright (c) 2016 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
download maxmind GeoLite2 Free database into easy to use alias files [<COUNTRY>-<PROTO>] located
in /usr/local/share/GeoIP/alias
"""
from lib.alias.geoip import GEOIP
# output files and lines processed
data = GEOIP().download()
print ("%(file_count)d files written, with a total number of %(address_count)d lines" % data)
print ("locations filename : %(locations_filename)s" % data)
print ("IPv4 filename : %(IPv4)s" % data['address_sources'])
print ("IPv6 filename : %(IPv6)s" % data['address_sources'])
|
737b8832a1ebbb0215c062586cb36266d72d9cd0
|
ea49dd7d31d2e0b65ce6aadf1274f3bb70abfaf9
|
/problems/0026_Remove_Duplicates_from_Sorted_Array/solution.py
|
0b24f615d9bf67809a84fac1693d11e8ad32ee18
|
[] |
no_license
|
yychuyu/LeetCode
|
907a3d7d67ada9714e86103ac96422381e75d683
|
48384483a55e120caf5d8d353e9aa287fce3cf4a
|
refs/heads/master
| 2020-03-30T15:02:12.492378
| 2019-06-19T01:52:45
| 2019-06-19T01:52:45
| 151,345,944
| 134
| 331
| null | 2019-08-01T02:56:10
| 2018-10-03T01:26:28
|
C++
|
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
solution.py
|
# 使用index记录上一次元素去重的位置,如果发现后续的元素和上一次去重位置的元素不同,
# 那么就把当前的元素填入上一次去重位置的下一个元素的位置,
# 这样,两次去重的两个元素之间没有重复的元素
# 同时,index标识的是去重后数组最后一个元素的下表,
# 所以,如果要返回去重数组的长度,返回index+1即可
# 同时,对于边界情况,即数组为空的时候,显然不能返回index+1,
# 此时,需要特殊处理,返回0
class Solution:
def removeDuplicates(self, nums):
index = 0
if not nums:
return index
for n in nums:
if nums[index] != n:
index += 1
nums[index] = n
return index+1
# 之所以,需要对空数组做特殊处理,是因为数组为空,和数组只有一个元素的时候
# index的值是一样的都是0,那么返回值为index+1即为1,这样是无法区分这两种情况的
# 所以,有一个新的改进方法,把index初始值改为-1,这样数组为空和数组只有一个元素
# 时,两者的index值就不一样了
class Solution:
def removeDuplicates(self, nums):
index = -1
for n in nums:
if index < 0 or n != nums[index]:
index += 1
nums[index] = n
return index + 1
|
8baddf744fadf7a898c96d9c853995d7de6de882
|
749af8e81d5ccd2d8714a34434a9c77772df551b
|
/statsmodels/stats/tests/test_effectsize.py
|
e843c7017524069cf591ab9282c1e86ba37fee36
|
[
"BSD-3-Clause"
] |
permissive
|
statsmodels/statsmodels
|
98ca67192c08bcc611ed3a75edaded2c7181ab98
|
01b19d7d111b29c183f620ff0a949ef6391ff8ee
|
refs/heads/main
| 2023-09-05T13:05:49.497076
| 2023-09-01T10:54:50
| 2023-09-01T10:54:50
| 1,885,237
| 8,666
| 3,023
|
BSD-3-Clause
| 2023-09-13T17:51:48
| 2011-06-12T17:04:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,282
|
py
|
test_effectsize.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 5 13:13:59 2020
Author: Josef Perktold
License: BSD-3
"""
from scipy import stats
from numpy.testing import assert_allclose
from statsmodels.stats.effect_size import (
_noncentrality_chisquare, _noncentrality_f, _noncentrality_t)
def test_noncent_chi2():
# > lochi(7.5,2,.95)
# [1] 0.03349255 0.97499458
# > hichi(7.5,2,.95)
# [1] 20.76049805 0.02500663
chi2_stat, df = 7.5, 2
ci_nc = [0.03349255, 20.76049805]
res = _noncentrality_chisquare(chi2_stat, df, alpha=0.05)
assert_allclose(res.confint, ci_nc, rtol=0.005)
# verify umvue unbiased
mean = stats.ncx2.mean(df, res.nc)
assert_allclose(chi2_stat, mean, rtol=1e-8)
assert_allclose(stats.ncx2.cdf(chi2_stat, df, res.confint), [0.975, 0.025],
rtol=1e-8)
def test_noncent_f():
# F(4, 75) = 3.5, confidence level = .95, two-sided CI:
# > lof(3.5,4,75,.95)
# [1] 0.7781436 0.9750039
# > hif(3.5,4,75,.95)
# [1] 29.72949219 0.02499965
f_stat, df1, df2 = 3.5, 4, 75
ci_nc = [0.7781436, 29.72949219]
res = _noncentrality_f(f_stat, df1, df2, alpha=0.05)
assert_allclose(res.confint, ci_nc, rtol=0.005)
# verify umvue unbiased
mean = stats.ncf.mean(df1, df2, res.nc)
assert_allclose(f_stat, mean, rtol=1e-8)
# Relax tolerance due to changes in SciPy and Boost
assert_allclose(stats.ncf.cdf(f_stat, df1, df2, res.confint),
[0.975, 0.025], rtol=5e-5)
def test_noncent_t():
# t(98) = 1.5, confidence level = .95, two-sided CI:
# > lot(1.5,98,.95)
# [1] -0.4749756 0.9750024
# > hit(1.5,98,.95)
# [1] 3.467285 0.025005
# > conf.limits.nct(1.5,98,.95)
# Lower.Limit Prob.Low.Limit Upper.Limit Prob.Up.Limit
# Values -0.474934 0.975 3.467371 0.02499999
t_stat, df = 1.5, 98
ci_nc = [-0.474934, 3.467371]
res = _noncentrality_t(t_stat, df, alpha=0.05)
assert_allclose(res.confint, ci_nc, rtol=0.005)
# verify umvue unbiased
mean = stats.nct.mean(df, res.nc)
assert_allclose(t_stat, mean, rtol=1e-8)
# Tolerancee relaxed due to Boost integration in SciPy
assert_allclose(stats.nct.cdf(t_stat, df, res.confint), [0.975, 0.025],
rtol=1e-6)
|
5bd994206ba97459b5ec6b2f8dd865e991b78481
|
71fb04f723b46a1bf45295be239bcec25e07f98c
|
/keras_cv/point_cloud/within_box_3d_test.py
|
a547b94a53c2e6b345b1187a7e66eec2d3f50a29
|
[
"Apache-2.0"
] |
permissive
|
keras-team/keras-cv
|
9bca4479474e853ec3a1c541b8be20fea2447a1a
|
e83f229f1b7b847cd712d5cd4810097d3e06d14e
|
refs/heads/master
| 2023-08-31T10:22:08.406394
| 2023-08-30T20:24:57
| 2023-08-30T20:24:57
| 265,079,853
| 818
| 287
|
NOASSERTION
| 2023-09-12T16:49:01
| 2020-05-18T22:39:21
|
Python
|
UTF-8
|
Python
| false
| false
| 7,808
|
py
|
within_box_3d_test.py
|
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import numpy as np
import pytest
import tensorflow as tf
import keras_cv
from keras_cv.tests.test_case import TestCase
num_points = 200000
num_boxes = 1000
box_dimension = 20.0
def get_points_boxes():
points = tf.random.uniform(
shape=[num_points, 2], minval=0, maxval=box_dimension, dtype=tf.float32
)
points_z = 5.0 * np.ones(shape=[num_points, 1], dtype="float32")
points = tf.concat([points, points_z], axis=-1)
boxes_x = tf.random.uniform(
shape=[num_boxes, 1],
minval=0,
maxval=box_dimension - 1.0,
dtype=tf.float32,
)
boxes_y = tf.random.uniform(
shape=[num_boxes, 1],
minval=0,
maxval=box_dimension - 1.0,
dtype=tf.float32,
)
boxes_dx = tf.random.uniform(
shape=[num_boxes, 1], minval=0, maxval=5.0, dtype=tf.float32
)
boxes_dx = tf.math.minimum(box_dimension - boxes_x, boxes_dx)
boxes_dy = tf.random.uniform(
shape=[num_boxes, 1], minval=0, maxval=5.0, dtype=tf.float32
)
boxes_dy = tf.math.minimum(box_dimension - boxes_y, boxes_dy)
boxes_z = 5.0 * np.ones([num_boxes, 1], dtype="float32")
boxes_dz = 3.0 * np.ones([num_boxes, 1], dtype="float32")
boxes_angle = np.zeros([num_boxes, 1], dtype="float32")
boxes = tf.concat(
[boxes_x, boxes_y, boxes_z, boxes_dx, boxes_dy, boxes_dz, boxes_angle],
axis=-1,
)
return points, boxes
class WithinBox3DTest(TestCase):
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_unbatched_unrotated(self):
boxes = np.array(
[
[0, 0, 0, 4, 4, 4, 0],
[5, 5, 5, 1, 1, 1, 0],
]
).astype("float32")
points = np.array(
[
[0, 0, 0],
[0, 0, 2],
# this point has z value larger than box top z
[0, 0, 2.1],
[2, 0, 0],
[2.01, 0, 0],
# this point belongs to 2nd box
[5.5, 5.5, 5.5],
# this point doesn't belong to 2nd box
[5.6, 5.5, 5.5],
]
).astype("float32")
res = keras_cv.point_cloud.within_box3d_index(points, boxes)
self.assertAllEqual([0, 0, -1, 0, -1, 1, -1], res)
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_unbatched_rotated(self):
# a box rotated with 45 degree, the intersection with x and y axis
# is [2*sqrt(2), 0] and [0, 2*sqrt(2)]
boxes = np.array(
[
[0, 0, 0, 4, 4, 4, np.pi / 4],
]
).astype("float32")
points = np.array(
[
[0, 0, 0],
[0, 0, 2],
# this point has z value larger than box top z
[0, 0, 2.1],
[2.82, 0, 0],
# this point has x value larger than rotated box
[2.83, 0, 0],
]
).astype("float32")
res = keras_cv.point_cloud.within_box3d_index(points, boxes)
self.assertAllClose([0, 0, -1, 0, -1], res)
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_batched_unrotated(self):
boxes = np.array(
[
[[0, 0, 0, 4, 4, 4, 0]],
[[5, 5, 5, 1, 1, 1, 0]],
]
).astype("float32")
points = np.array(
[
[
[0, 0, 0],
[0, 0, 2],
# this point has z value larger than box top z
[0, 0, 2.1],
[2, 0, 0],
[2.01, 0, 0],
# this point belongs to 2nd box
[5.5, 5.5, 5.5],
# this point doesn't belong to 2nd box
[5.6, 5.5, 5.5],
]
]
* 2
).astype("float32")
res = keras_cv.point_cloud.within_box3d_index(points, boxes)
self.assertAllEqual(
[[0, 0, -1, 0, -1, -1, -1], [-1, -1, -1, -1, -1, 0, -1]], res
)
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_batched_rotated(self):
# a box rotated with 45 degree, the intersection with x and y axis
# is [2*sqrt(2), 0] and [0, 2*sqrt(2)]
boxes = np.array(
[
[[0, 0, 0, 4, 4, 4, np.pi / 4]],
[[5, 5, 5, 1, 1, 1, 0]],
]
).astype("float32")
points = np.array(
[
[
[0, 0, 0],
[0, 0, 2],
# this point has z value larger than box top z
[0, 0, 2.1],
[2.82, 0, 0],
# this point has x value larger than rotated box
[2.83, 0, 0],
]
]
* 2
).astype("float32")
res = keras_cv.point_cloud.within_box3d_index(points, boxes)
self.assertAllEqual([[0, 0, -1, 0, -1], [-1, -1, -1, -1, -1]], res)
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_many_points(self):
points, boxes = get_points_boxes()
for _ in range(5):
res = keras_cv.point_cloud.within_box3d_index(points, boxes)
self.assertAllClose(res.shape, points.shape[:1])
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
@pytest.mark.extra_large
def test_equal(self):
for _ in range(10000):
with tf.device("cpu:0"):
box_center = tf.random.uniform(
shape=[1, 3], minval=-10.0, maxval=10.0
)
box_dim = tf.random.uniform(
shape=[1, 3], minval=0.1, maxval=10.0
)
boxes = tf.concat([box_center, box_dim, [[0.0]]], axis=-1)
points = tf.random.normal([32, 3])
res = keras_cv.point_cloud.is_within_any_box3d(points, boxes)
res_v2 = keras_cv.point_cloud.is_within_any_box3d_v2(
points, boxes
)
res_v3 = keras_cv.point_cloud.is_within_any_box3d_v3(
points, boxes
)
self.assertAllEqual(res, res_v2)
self.assertAllEqual(res, res_v3)
|
927dc3fba5570b47d461f9a9b4f9c13b32827b72
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Introduction_to_Python_for_Econometrics/Python_introduction/file_system_and_navigation_basic.py
|
5d72f40111c296ccaeee294759b01f938587a7ca
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,835
|
py
|
file_system_and_navigation_basic.py
|
# Create file.csv.gz from file.csv
# Extract csv from gzipped csv
from __future__ import division
from __future__ import print_function
import glob
import gzip
import os
import shutil
import subprocess
import sys
import tarfile
import zipfile
from pylab import *
from numpy import *
# End Imports
pwd = os.getcwd()
os.chdir('c:\\temp')
os.chdir('c:/temp') # Identical
os.chdir('..')
os.getcwd() # Now in 'c:\\'
os.mkdir('c:\\temp\\test')
os.makedirs('c:/temp/test/level2/level3') # mkdir will fail
os.rmdir('c:\\temp\\test\\level2\\level3')
shutil.rmtree('c:\\temp\\test') # rmdir fails, since not empty
os.chdir('c:\\temp')
files = os.listdir('.')
for f in files:
if os.path.isdir(f):
print(f, ' is a directory.')
elif os.path.isfile(f):
print(f, ' is a file.')
else:
print(f, ' is a something else.')
files = glob.glob('c:\\temp\\*.txt')
for file in files:
print(file)
os.chdir('c:\\temp\\python')
# Make an empty file
f = file('file.ext','w')
f.close()
# Copies file.ext to 'c:\temp\'
shutil.copy('file.ext','c:\\temp\\')
# Copies file.ext to 'c:\temp\\python\file2.ext'
shutil.copy('file.ext','file2.ext')
# Copies file.ext to 'c:\\temp\\file3.ext', plus metadata
shutil.copy2('file.ext','file3.ext')
shutil.copytree('c:\\temp\\python\\','c:\\temp\\newdir\\')
shutil.move('c:\\temp\\newdir\\','c:\\temp\\newdir2\\')
# Copy using xcopy
os.system('xcopy /S /I c:\\temp c:\\temp4')
subprocess.call('xcopy /S /I c:\\temp c:\\temp5',shell=True)
# Extract using 7-zip
subprocess.call('"C:\\Program Files\\7-Zip\\7z.exe" e -y c:\\temp\\zip.7z')
# Creates files.zip
shutil.make_archive('files','zip','c:\\temp\\folder_to_archive')
# Creates files.tar.gz
shutil.make_archive('files','gztar','c:\\temp\\folder_to_archive')
csvin = file('file.csv','rb')
gz = gzip.GzipFile('file.csv.gz','wb')
gz.writelines(csvin.read())
gz.close()
csvin.close()
# Extract zip
zip = zipfile.ZipFile('files.zip')
zip.extractall('c:\\temp\\zip\\')
zip.close()
# Extract gzip tar 'r:gz' indicates read gzipped
gztar = tarfile.open('file.tar.gz', 'r:gz')
gztar.extractall('c:\\temp\\gztar\\')
gztar.close()
gz = gzip.GzipFile('file.csv.gz','rb')
csvout = file('file.csv','wb')
csvout.writelines(gz.read())
csvout.close()
gz.close()
# Read all lines using readlines()
f = file('file.csv','r')
lines = f.readlines()
for line in lines:
print(line)
f.close()
# Using blocking via readline()
f = file('file.csv','r')
line = f.readline()
while line != '':
print(line)
line = f.readline()
f.close()
# Using larger blocks via readlines(n)
f = file('file.csv','r')
lines = f.readlines(2)
while lines != '':
for line in lines:
print(line)
lines = f.readline(2)
f.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.