blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0467caeb8e03b495ae77e8ee00893b02ec306b65
|
faada8dcb8fbb4de1db595ba5849789bb0ecc07d
|
/Regression/multiple linear/multiple_linear_rgression_with_sklearn.py
|
a34b45b23fa3b20784b418c755d81771805a582b
|
[] |
no_license
|
yug95/MachineLearning
|
77eec8b8b629ef4854b88b2db058f7136a882946
|
e9a1f9bd4bfaba3bf294f2c818eb313c2f373d35
|
refs/heads/master
| 2023-06-21T19:26:29.885091
| 2023-01-26T07:07:43
| 2023-01-26T07:07:43
| 141,684,199
| 113
| 133
| null | 2023-09-07T14:47:52
| 2018-07-20T08:16:58
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,705
|
py
|
multiple_linear_rgression_with_sklearn.py
|
#Mutliple linear regression using sklearn librery
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv('50_Startups.csv')
x = data.iloc[:,:-1].values
y = data.iloc[:,4].values
#encoding of categorical variable
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_x = LabelEncoder()
x[:,3] = labelencoder_x.fit_transform(x[:,3])
onehotencoder = OneHotEncoder(categorical_features=[3])
x = onehotencoder.fit_transform(x).toarray()
#avoiding dummy variable trap
x = x[:,1:]
#spliting data into train and test set
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.2,random_state=0)
#fit model
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(x_train,y_train)
y_pred = regressor.predict(x_test)
print(y_pred)
print(np.sum(y_test-y_pred))
#check with normal way
# x_transpose = np.transpose(x_train)
# XTX = np.dot(x_transpose,x_train)
#
# IXTX = np.linalg.pinv(XTX)
#
# IXTXXT = np.dot(IXTX,x_transpose)
#
# IXTXXTY = np.dot(IXTXXT,y_train)
#
# B = IXTXXTY
#
# pred_y = np.dot(x_test,B)
# print(pred_y)
# print(np.sum(y_test-pred_y))
# # print(y_test)
plt.scatter(x_test[:,2], y_test, c='green', label="predicted line")
plt.scatter(x_test[:,3], y_test, c='blue', label="predicted line")
plt.scatter(x_test[:,4], y_test, c='yellow', label="predicted line")
plt.plot(x_test[:,2],y_pred, 'r', label="regression line")
plt.plot(x_test[:,3], y_pred, 'b', label="regression line")
plt.plot(x_test[:,4],y_pred, 'y', label="regression line")
plt.xlabel("X parameters")
plt.ylabel("Y parameters")
plt.legend()
plt.show()
|
6bc32862d5fba942ad795918c00b419510909153
|
dac12c9178b13d60f401c4febff5569af8aa2719
|
/utils/dataset_manifest/core.py
|
1be0976849c68a4514bf5e6d18f6054c19fec19f
|
[
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
opencv/cvat
|
39dc66ca20f972ba40b79c44d7ce43590dc0b0b5
|
899c9fd75146744def061efd7ab1b1c6c9f6942f
|
refs/heads/develop
| 2023-08-19T04:27:56.974498
| 2023-08-18T09:58:25
| 2023-08-18T09:58:25
| 139,156,354
| 6,558
| 1,887
|
MIT
| 2023-09-14T12:44:39
| 2018-06-29T14:02:45
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 29,673
|
py
|
core.py
|
# Copyright (C) 2021-2022 Intel Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
from enum import Enum
from io import StringIO
import av
import json
import os
from abc import ABC, abstractmethod, abstractproperty, abstractstaticmethod
from contextlib import closing
from PIL import Image
from json.decoder import JSONDecodeError
from io import BytesIO
from .errors import InvalidManifestError, InvalidVideoFrameError
from .utils import SortingMethod, md5_hash, rotate_image, sort
from typing import Dict, List, Union, Optional
class VideoStreamReader:
def __init__(self, source_path, chunk_size, force):
self._source_path = source_path
self._frames_number = None
self._force = force
self._upper_bound = 3 * chunk_size + 1
with closing(av.open(self.source_path, mode='r')) as container:
video_stream = VideoStreamReader._get_video_stream(container)
for packet in container.demux(video_stream):
for frame in packet.decode():
# check type of first frame
if not frame.pict_type.name == 'I':
raise InvalidVideoFrameError('First frame is not key frame')
# get video resolution
if video_stream.metadata.get('rotate'):
frame = av.VideoFrame().from_ndarray(
rotate_image(
frame.to_ndarray(format='bgr24'),
360 - int(container.streams.video[0].metadata.get('rotate')),
),
format ='bgr24',
)
self.height, self.width = (frame.height, frame.width)
# not all videos contain information about numbers of frames
if video_stream.frames:
self._frames_number = video_stream.frames
return
@property
def source_path(self):
return self._source_path
@staticmethod
def _get_video_stream(container):
video_stream = next(stream for stream in container.streams if stream.type == 'video')
video_stream.thread_type = 'AUTO'
return video_stream
def __len__(self):
return self._frames_number
@property
def resolution(self):
return (self.width, self.height)
def validate_key_frame(self, container, video_stream, key_frame):
for packet in container.demux(video_stream):
for frame in packet.decode():
if md5_hash(frame) != key_frame['md5'] or frame.pts != key_frame['pts']:
return False
return True
def __iter__(self):
with closing(av.open(self.source_path, mode='r')) as container:
video_stream = self._get_video_stream(container)
frame_pts, frame_dts = -1, -1
index, key_frame_number = 0, 0
for packet in container.demux(video_stream):
for frame in packet.decode():
if None not in {frame.pts, frame_pts} and frame.pts <= frame_pts:
raise InvalidVideoFrameError('Invalid pts sequences')
if None not in {frame.dts, frame_dts} and frame.dts <= frame_dts:
raise InvalidVideoFrameError('Invalid dts sequences')
frame_pts, frame_dts = frame.pts, frame.dts
if frame.key_frame:
key_frame_number += 1
ratio = (index + 1) // key_frame_number
if ratio >= self._upper_bound and not self._force:
raise AssertionError('Too few keyframes')
key_frame = {
'index': index,
'pts': frame.pts,
'md5': md5_hash(frame)
}
with closing(av.open(self.source_path, mode='r')) as checked_container:
checked_container.seek(offset=key_frame['pts'], stream=video_stream)
isValid = self.validate_key_frame(checked_container, video_stream, key_frame)
if isValid:
yield (index, key_frame['pts'], key_frame['md5'])
else:
yield index
index += 1
if not self._frames_number:
self._frames_number = index
class KeyFramesVideoStreamReader(VideoStreamReader):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __iter__(self):
with closing(av.open(self.source_path, mode='r')) as container:
video_stream = self._get_video_stream(container)
frame_pts, frame_dts = -1, -1
index, key_frame_number = 0, 0
for packet in container.demux(video_stream):
for frame in packet.decode():
if None not in {frame.pts, frame_pts} and frame.pts <= frame_pts:
raise InvalidVideoFrameError('Invalid pts sequences')
if None not in {frame.dts, frame_dts} and frame.dts <= frame_dts:
raise InvalidVideoFrameError('Invalid dts sequences')
frame_pts, frame_dts = frame.pts, frame.dts
if frame.key_frame:
key_frame_number += 1
ratio = (index + 1) // key_frame_number
if ratio >= self._upper_bound and not self._force:
raise AssertionError('Too few keyframes')
key_frame = {
'index': index,
'pts': frame.pts,
'md5': md5_hash(frame)
}
with closing(av.open(self.source_path, mode='r')) as checked_container:
checked_container.seek(offset=key_frame['pts'], stream=video_stream)
isValid = self.validate_key_frame(checked_container, video_stream, key_frame)
if isValid:
yield (index, key_frame['pts'], key_frame['md5'])
index += 1
class DatasetImagesReader:
def __init__(self,
sources: Union[List[str], List[BytesIO]],
*,
start: int = 0,
step: int = 1,
stop: Optional[int] = None,
meta: Optional[Dict[str, List[str]]] = None,
sorting_method: SortingMethod =SortingMethod.PREDEFINED,
use_image_hash: bool = False,
**kwargs):
self._raw_data_used = not isinstance(sources[0], str)
func = (lambda x: x.filename) if self._raw_data_used else None
self._sources = sort(sources, sorting_method, func=func)
self._meta = meta
self._data_dir = kwargs.get('data_dir', None)
self._use_image_hash = use_image_hash
self._start = start
self._stop = stop if stop else len(sources)
self._step = step
@property
def start(self):
return self._start
@start.setter
def start(self, value):
self._start = int(value)
@property
def stop(self):
return self._stop
@stop.setter
def stop(self, value):
self._stop = int(value)
@property
def step(self):
return self._step
@step.setter
def step(self, value):
self._step = int(value)
def __iter__(self):
sources = (i for i in self._sources)
for idx in range(self._stop):
if idx in self.range_:
image = next(sources)
img = Image.open(image, mode='r')
img_name = os.path.relpath(image, self._data_dir) if self._data_dir \
else os.path.basename(image) if not self._raw_data_used else image.filename
name, extension = os.path.splitext(img_name)
image_properties = {
'name': name.replace('\\', '/'),
'extension': extension,
}
width, height = img.width, img.height
orientation = img.getexif().get(274, 1)
if orientation > 4:
width, height = height, width
image_properties['width'] = width
image_properties['height'] = height
if self._meta and img_name in self._meta:
image_properties['meta'] = self._meta[img_name]
if self._use_image_hash:
image_properties['checksum'] = md5_hash(img)
yield image_properties
else:
yield dict()
@property
def range_(self):
return range(self._start, self._stop, self._step)
def __len__(self):
return len(self.range_)
class Dataset3DImagesReader(DatasetImagesReader):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __iter__(self):
sources = (i for i in self._sources)
for idx in range(self._stop):
if idx in self.range_:
image = next(sources)
img_name = os.path.relpath(image, self._data_dir) if self._data_dir \
else os.path.basename(image)
name, extension = os.path.splitext(img_name)
image_properties = {
'name': name,
'extension': extension,
}
if self._meta and img_name in self._meta:
image_properties['meta'] = self._meta[img_name]
yield image_properties
else:
yield dict()
class _Manifest:
class SupportedVersion(str, Enum):
V1 = '1.0'
V1_1 = '1.1'
@classmethod
def choices(cls):
return (x.value for x in cls)
def __str__(self):
return self.value
FILE_NAME = 'manifest.jsonl'
VERSION = SupportedVersion.V1_1
TYPE: str # must be set externally
def __init__(self, path, upload_dir=None):
assert path, 'A path to manifest file not found'
self._path = os.path.join(path, self.FILE_NAME) if os.path.isdir(path) else path
self._upload_dir = upload_dir
@property
def path(self):
return self._path
@property
def name(self):
return os.path.basename(self._path) if not self._upload_dir \
else os.path.relpath(self._path, self._upload_dir)
def get_header_lines_count(self) -> int:
if self.TYPE == 'video':
return 3
elif self.TYPE == 'images':
return 2
assert False, f"Unknown manifest type '{self.TYPE}'"
# Needed for faster iteration over the manifest file, will be generated to work inside CVAT
# and will not be generated when manually creating a manifest
class _Index:
FILE_NAME = 'index.json'
def __init__(self, path):
assert path and os.path.isdir(path), 'No index directory path'
self._path = os.path.join(path, self.FILE_NAME)
self._index = {}
@property
def path(self):
return self._path
def dump(self):
with open(self._path, 'w') as index_file:
json.dump(self._index, index_file, separators=(',', ':'))
def load(self):
with open(self._path, 'r') as index_file:
self._index = json.load(index_file,
object_hook=lambda d: {int(k): v for k, v in d.items()})
def remove(self):
os.remove(self._path)
def create(self, manifest, *, skip):
assert os.path.exists(manifest), 'A manifest file not exists, index cannot be created'
with open(manifest, 'r+') as manifest_file:
while skip:
manifest_file.readline()
skip -= 1
image_number = 0
position = manifest_file.tell()
line = manifest_file.readline()
while line:
if line.strip():
self._index[image_number] = position
image_number += 1
position = manifest_file.tell()
line = manifest_file.readline()
def partial_update(self, manifest, number):
assert os.path.exists(manifest), 'A manifest file not exists, index cannot be updated'
with open(manifest, 'r+') as manifest_file:
manifest_file.seek(self._index[number])
line = manifest_file.readline()
while line:
if line.strip():
self._index[number] = manifest_file.tell()
number += 1
line = manifest_file.readline()
def __getitem__(self, number):
if not 0 <= number < len(self):
raise IndexError('Invalid index number: {}\nMax: {}'.format(number, len(self) - 1))
return self._index[number]
def __len__(self):
return len(self._index)
class _ManifestManager(ABC):
BASE_INFORMATION = {
'version' : 1,
'type': 2,
}
def _json_item_is_valid(self, **state):
for item in self._required_item_attributes:
if state.get(item, None) is None:
raise InvalidManifestError(
f"Invalid '{self.manifest.name}' file structure: "
f"'{item}' is required, but not found"
)
def __init__(self, path, create_index, upload_dir=None):
self._manifest = _Manifest(path, upload_dir)
self._index = _Index(os.path.dirname(self._manifest.path))
self._reader = None
self._create_index = create_index
@property
def reader(self):
return self._reader
def _parse_line(self, line):
""" Getting a random line from the manifest file """
with open(self._manifest.path, 'r') as manifest_file:
if isinstance(line, str):
assert line in self.BASE_INFORMATION.keys(), \
'An attempt to get non-existent information from the manifest'
for _ in range(self.BASE_INFORMATION[line]):
fline = manifest_file.readline()
return json.loads(fline)[line]
else:
assert self._index, 'No prepared index'
offset = self._index[line]
manifest_file.seek(offset)
properties = manifest_file.readline()
parsed_properties = ImageProperties(json.loads(properties))
self._json_item_is_valid(**parsed_properties)
return parsed_properties
def init_index(self):
if os.path.exists(self._index.path):
self._index.load()
else:
self._index.create(self._manifest.path, skip=self._manifest.get_header_lines_count())
if self._create_index:
self._index.dump()
def reset_index(self):
if self._create_index and os.path.exists(self._index.path):
self._index.remove()
def set_index(self):
self.reset_index()
self.init_index()
def remove(self):
self.reset_index()
if os.path.exists(self.manifest.path):
os.remove(self.manifest.path)
@abstractmethod
def create(self, content=None, _tqdm=None):
...
@abstractmethod
def partial_update(self, number, properties):
...
def __iter__(self):
self.set_index()
with open(self._manifest.path, 'r') as manifest_file:
manifest_file.seek(self._index[0])
for idx, line_start in enumerate(self._index):
manifest_file.seek(line_start)
line = manifest_file.readline()
item = ImageProperties(json.loads(line))
self._json_item_is_valid(**item)
yield (idx, item)
@property
def manifest(self):
return self._manifest
def __len__(self):
if hasattr(self, '_index'):
return len(self._index)
else:
return None
def __getitem__(self, item):
if isinstance(item, slice):
return [self._parse_line(i) for i in range(item.start or 0, item.stop or len(self), item.step or 1)]
return self._parse_line(item)
@property
def index(self):
return self._index
@abstractproperty
def data(self):
...
@abstractmethod
def get_subset(self, subset_names):
...
@property
def exists(self):
return os.path.exists(self._manifest.path)
class VideoManifestManager(_ManifestManager):
_required_item_attributes = {'number', 'pts'}
def __init__(self, manifest_path, create_index=True):
super().__init__(manifest_path, create_index)
setattr(self._manifest, 'TYPE', 'video')
self.BASE_INFORMATION['properties'] = 3
def link(self, media_file, upload_dir=None, chunk_size=36, force=False, only_key_frames=False, **kwargs):
ReaderClass = VideoStreamReader if not only_key_frames else KeyFramesVideoStreamReader
self._reader = ReaderClass(
os.path.join(upload_dir, media_file) if upload_dir else media_file,
chunk_size,
force)
def _write_base_information(self, file):
base_info = {
'version': self._manifest.VERSION,
'type': self._manifest.TYPE,
'properties': {
'name': os.path.basename(self._reader.source_path),
'resolution': self._reader.resolution,
'length': len(self._reader),
},
}
for key, value in base_info.items():
json_item = json.dumps({key: value}, separators=(',', ':'))
file.write(f'{json_item}\n')
def _write_core_part(self, file, _tqdm):
iterable_obj = self._reader if _tqdm is None else \
_tqdm(self._reader, desc="Manifest creating", total=len(self._reader))
for item in iterable_obj:
if isinstance(item, tuple):
json_item = json.dumps({
'number': item[0],
'pts': item[1],
'checksum': item[2]
}, separators=(',', ':'))
file.write(f"{json_item}\n")
def create(self, *, _tqdm=None): # pylint: disable=arguments-differ
""" Creating and saving a manifest file """
if not len(self._reader):
tmp_file = StringIO()
self._write_core_part(tmp_file, _tqdm)
with open(self._manifest.path, 'w') as manifest_file:
self._write_base_information(manifest_file)
manifest_file.write(tmp_file.getvalue())
else:
with open(self._manifest.path, 'w') as manifest_file:
self._write_base_information(manifest_file)
self._write_core_part(manifest_file, _tqdm)
self.set_index()
def partial_update(self, number, properties):
pass
@property
def video_name(self):
return self['properties']['name']
@property
def video_resolution(self):
return self['properties']['resolution']
@property
def video_length(self):
return self['properties']['length']
@property
def data(self):
return self.video_name
def get_subset(self, subset_names):
raise NotImplementedError()
class VideoManifestValidator(VideoManifestManager):
def __init__(self, source_path, manifest_path):
self._source_path = source_path
super().__init__(manifest_path)
@staticmethod
def _get_video_stream(container):
video_stream = next(stream for stream in container.streams if stream.type == 'video')
video_stream.thread_type = 'AUTO'
return video_stream
def validate_key_frame(self, container, video_stream, key_frame):
for packet in container.demux(video_stream):
for frame in packet.decode():
assert frame.pts == key_frame['pts'], "The uploaded manifest does not match the video"
return
def validate_seek_key_frames(self):
with closing(av.open(self._source_path, mode='r')) as container:
video_stream = self._get_video_stream(container)
last_key_frame = None
for _, key_frame in self:
# check that key frames sequence sorted
if last_key_frame and last_key_frame['number'] >= key_frame['number']:
raise AssertionError('Invalid saved key frames sequence in manifest file')
container.seek(offset=key_frame['pts'], stream=video_stream)
self.validate_key_frame(container, video_stream, key_frame)
last_key_frame = key_frame
def validate_frame_numbers(self):
with closing(av.open(self._source_path, mode='r')) as container:
video_stream = self._get_video_stream(container)
# not all videos contain information about numbers of frames
frames = video_stream.frames
if frames:
assert frames == self.video_length, "The uploaded manifest does not match the video"
return
class ImageProperties(dict):
@property
def full_name(self):
return f"{self['name']}{self['extension']}"
class ImageManifestManager(_ManifestManager):
_required_item_attributes = {'name', 'extension'}
def __init__(self, manifest_path, upload_dir=None, create_index=True):
super().__init__(manifest_path, create_index, upload_dir)
setattr(self._manifest, 'TYPE', 'images')
def link(self, **kwargs):
ReaderClass = DatasetImagesReader if not kwargs.get('DIM_3D', None) else Dataset3DImagesReader
self._reader = ReaderClass(**kwargs)
def _write_base_information(self, file):
base_info = {
'version': self._manifest.VERSION,
'type': self._manifest.TYPE,
}
for key, value in base_info.items():
json_line = json.dumps({key: value}, separators=(',', ':'))
file.write(f'{json_line}\n')
def _write_core_part(self, file, obj, _tqdm):
iterable_obj = obj if _tqdm is None else \
_tqdm(obj, desc="Manifest creating",
total=None if not hasattr(obj, '__len__') else len(obj))
for image_properties in iterable_obj:
json_line = json.dumps({
key: value for key, value in image_properties.items()
}, separators=(',', ':'))
file.write(f"{json_line}\n")
def create(self, content=None, _tqdm=None):
""" Creating and saving a manifest file for the specialized dataset"""
with open(self._manifest.path, 'w') as manifest_file:
self._write_base_information(manifest_file)
obj = content if content else self._reader
self._write_core_part(manifest_file, obj, _tqdm)
self.set_index()
def partial_update(self, number, properties):
pass
@property
def data(self):
return (f"{image.full_name}" for _, image in self)
def get_subset(self, subset_names):
index_list = []
subset = []
for _, image in self:
image_name = f"{image.full_name}"
if image_name in subset_names:
index_list.append(subset_names.index(image_name))
properties = {
'name': f"{image['name']}",
'extension': f"{image['extension']}",
'width': image['width'],
'height': image['height'],
}
for optional_field in {'meta', 'checksum'}:
value = image.get(optional_field)
if value:
properties[optional_field] = value
subset.append(properties)
return index_list, subset
def emulate_hierarchical_structure(
self,
page_size: int,
manifest_prefix: Optional[str] = None,
prefix: Optional[str] = None,
start_index: Optional[int] = None,
) -> Dict:
next_start_index = None
# get part of manifest content
# generally we cannot rely to slice with manifest content because it may not be sorted.
# And then this can lead to incorrect index calculation.
if manifest_prefix:
content = [os.path.join(manifest_prefix, f[1].full_name) for f in self]
else:
content = [f[1].full_name for f in self]
if prefix:
content = list(filter(lambda x: x.startswith(prefix), content))
if os.path.sep in prefix:
last_dir_symbol = prefix.rindex(os.path.sep)
content = [f[last_dir_symbol + 1:] for f in content]
files_in_root, files_in_directories = [], []
for f in content:
if os.path.sep in f:
files_in_directories.append(f)
else:
files_in_root.append(f)
directories = list(set([d.split(os.path.sep)[0] for d in files_in_directories]))
level_in_hierarchical_structure = [{'name': d, 'type': 'DIR'} for d in sort(directories, SortingMethod.NATURAL)]
level_in_hierarchical_structure.extend([{'name': f, 'type': 'REG'} for f in sort(files_in_root, SortingMethod.NATURAL)])
level_in_hierarchical_structure = level_in_hierarchical_structure[start_index:]
if len(level_in_hierarchical_structure) > page_size:
level_in_hierarchical_structure = level_in_hierarchical_structure[:page_size]
next_start_index = start_index + page_size
return {
'content': level_in_hierarchical_structure,
'next': next_start_index,
}
class _BaseManifestValidator(ABC):
def __init__(self, full_manifest_path):
self._manifest = _Manifest(full_manifest_path)
def validate(self):
try:
# we cannot use index in general because manifest may be e.g. in share point with ro mode
with open(self._manifest.path, 'r') as manifest:
for validator in self.validators:
line = json.loads(manifest.readline().strip())
validator(line)
return True
except (ValueError, KeyError, JSONDecodeError, InvalidManifestError):
return False
@staticmethod
def _validate_version(_dict):
if not _dict['version'] in _Manifest.SupportedVersion.choices():
raise InvalidManifestError('Incorrect version field')
def _validate_type(self, _dict):
if not _dict['type'] == self.TYPE:
raise InvalidManifestError('Incorrect type field')
@abstractproperty
def validators(self):
pass
@abstractstaticmethod
def _validate_first_item(_dict):
pass
class _VideoManifestStructureValidator(_BaseManifestValidator):
TYPE = 'video'
@property
def validators(self):
return (
self._validate_version,
self._validate_type,
self._validate_properties,
self._validate_first_item,
)
@staticmethod
def _validate_properties(_dict):
properties = _dict['properties']
if not isinstance(properties['name'], str):
raise InvalidManifestError('Incorrect name field')
if not isinstance(properties['resolution'], list):
raise InvalidManifestError('Incorrect resolution field')
if not isinstance(properties['length'], int) or properties['length'] == 0:
raise InvalidManifestError('Incorrect length field')
@staticmethod
def _validate_first_item(_dict):
if not isinstance(_dict['number'], int):
raise InvalidManifestError('Incorrect number field')
if not isinstance(_dict['pts'], int):
raise InvalidManifestError('Incorrect pts field')
class _DatasetManifestStructureValidator(_BaseManifestValidator):
TYPE = 'images'
@property
def validators(self):
return (
self._validate_version,
self._validate_type,
self._validate_first_item,
)
@staticmethod
def _validate_first_item(_dict):
if not isinstance(_dict['name'], str):
raise InvalidManifestError('Incorrect name field')
if not isinstance(_dict['extension'], str):
raise InvalidManifestError('Incorrect extension field')
# FIXME
# Width and height are required for 2D data, but
# for 3D these parameters are not saved now.
# It is necessary to uncomment these restrictions when manual preparation for 3D data is implemented.
# if not isinstance(_dict['width'], int):
# raise InvalidManifestError('Incorrect width field')
# if not isinstance(_dict['height'], int):
# raise InvalidManifestError('Incorrect height field')
def is_manifest(full_manifest_path):
return is_video_manifest(full_manifest_path) or \
is_dataset_manifest(full_manifest_path)
def is_video_manifest(full_manifest_path):
validator = _VideoManifestStructureValidator(full_manifest_path)
return validator.validate()
def is_dataset_manifest(full_manifest_path):
validator = _DatasetManifestStructureValidator(full_manifest_path)
return validator.validate()
|
98fd7959a6025719d91f5b3c03db17d6a8f3f375
|
d139ef8d18fcde584b06c1d7d25477d7d31ee59b
|
/google/ads/googleads/v14/errors/types/ad_group_criterion_error.py
|
06062defdcdcfb91b10e608277f72f04e554d8fc
|
[
"Apache-2.0"
] |
permissive
|
googleads/google-ads-python
|
a53993e6be057d3aa61f276b69e97b8b338d1c12
|
146d7070c1ea2140555d49d73c77892430b37314
|
refs/heads/main
| 2023-08-31T01:58:16.738997
| 2023-06-05T08:18:42
| 2023-08-28T19:08:38
| 143,435,091
| 422
| 525
|
Apache-2.0
| 2023-09-12T17:46:52
| 2018-08-03T14:08:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,569
|
py
|
ad_group_criterion_error.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v14.errors",
marshal="google.ads.googleads.v14",
manifest={
"AdGroupCriterionErrorEnum",
},
)
class AdGroupCriterionErrorEnum(proto.Message):
r"""Container for enum describing possible ad group criterion
errors.
"""
class AdGroupCriterionError(proto.Enum):
r"""Enum describing possible ad group criterion errors."""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP_CRITERION_LABEL_DOES_NOT_EXIST = 2
AD_GROUP_CRITERION_LABEL_ALREADY_EXISTS = 3
CANNOT_ADD_LABEL_TO_NEGATIVE_CRITERION = 4
TOO_MANY_OPERATIONS = 5
CANT_UPDATE_NEGATIVE = 6
CONCRETE_TYPE_REQUIRED = 7
BID_INCOMPATIBLE_WITH_ADGROUP = 8
CANNOT_TARGET_AND_EXCLUDE = 9
ILLEGAL_URL = 10
INVALID_KEYWORD_TEXT = 11
INVALID_DESTINATION_URL = 12
MISSING_DESTINATION_URL_TAG = 13
KEYWORD_LEVEL_BID_NOT_SUPPORTED_FOR_MANUALCPM = 14
INVALID_USER_STATUS = 15
CANNOT_ADD_CRITERIA_TYPE = 16
CANNOT_EXCLUDE_CRITERIA_TYPE = 17
CAMPAIGN_TYPE_NOT_COMPATIBLE_WITH_PARTIAL_FAILURE = 27
OPERATIONS_FOR_TOO_MANY_SHOPPING_ADGROUPS = 28
CANNOT_MODIFY_URL_FIELDS_WITH_DUPLICATE_ELEMENTS = 29
CANNOT_SET_WITHOUT_FINAL_URLS = 30
CANNOT_CLEAR_FINAL_URLS_IF_FINAL_MOBILE_URLS_EXIST = 31
CANNOT_CLEAR_FINAL_URLS_IF_FINAL_APP_URLS_EXIST = 32
CANNOT_CLEAR_FINAL_URLS_IF_TRACKING_URL_TEMPLATE_EXISTS = 33
CANNOT_CLEAR_FINAL_URLS_IF_URL_CUSTOM_PARAMETERS_EXIST = 34
CANNOT_SET_BOTH_DESTINATION_URL_AND_FINAL_URLS = 35
CANNOT_SET_BOTH_DESTINATION_URL_AND_TRACKING_URL_TEMPLATE = 36
FINAL_URLS_NOT_SUPPORTED_FOR_CRITERION_TYPE = 37
FINAL_MOBILE_URLS_NOT_SUPPORTED_FOR_CRITERION_TYPE = 38
__all__ = tuple(sorted(__protobuf__.manifest))
|
8782e95b4865552c8f35156979699ee41c8c1ff0
|
f62cd59f7e5c8d22b4d9ad130eff0a70d0272341
|
/tests/test_tools.py
|
8bd022cf2be61adcacbe57d97df83e7992336e78
|
[
"MIT"
] |
permissive
|
alexanderlerch/pyACA
|
61ea3c1b9350562bc248fa45b7bba3246e98686d
|
3ee1dde02999cf0a0e512ee73e021ea618303a89
|
refs/heads/master
| 2023-05-22T18:43:09.506175
| 2022-09-12T18:37:24
| 2022-09-12T18:37:24
| 195,197,713
| 141
| 37
|
MIT
| 2022-01-06T15:19:40
| 2019-07-04T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 18,513
|
py
|
test_tools.py
|
import unittest
import numpy as np
import numpy.testing as npt
import pyACA
class TestTools(unittest.TestCase):
def test_nmf(self):
np.random.seed(42)
X = np.random.rand(128, 6) / 20
X[np.arange(4, 128, 4), 0:2] = 1
X[np.arange(7, 128, 7), 2:4] = 1
X[:, 4:6] = 0.7 * X[:, 0:2] + 0.3 * X[:, 2:4]
W, H, err = pyACA.ToolSimpleNmf(X, 2)
npt.assert_almost_equal(W[8, 1], np.mean(W[np.arange(4, 128, 4), 1]), decimal=3, err_msg="NMF 1: dictionary incorrect")
npt.assert_almost_equal(W[12, 1], np.mean(W[np.arange(4, 128, 4), 1]), decimal=3, err_msg="NMF 2: dictionary incorrect")
npt.assert_almost_equal(W[124, 1], np.mean(W[np.arange(4, 128, 4), 1]), decimal=3, err_msg="NMF 3: dictionary incorrect")
npt.assert_almost_equal(W[21, 0], np.mean(W[np.arange(7, 128, 7), 0]), decimal=3, err_msg="NMF 4: dictionary incorrect")
npt.assert_almost_equal(W[84, 0], np.mean(W[np.arange(7, 128, 7), 0]), decimal=3, err_msg="NMF 5: dictionary incorrect")
npt.assert_almost_equal(W[105, 0], np.mean(W[np.arange(7, 128, 7), 0]), decimal=3, err_msg="NMF 6: dictionary incorrect")
self.assertEqual(np.max(np.diff(err)) < 0, True, "NMF 7: loss incorrect")
for n in range(3):
npt.assert_almost_equal(H[1, 2*n]-H[1, 2*n+1], 0, decimal=0, err_msg="NMF 8: activation incorrect")
npt.assert_almost_equal(H[0, 2*n]-H[0, 2*n+1], 0, decimal=0, err_msg="NMF 9: activation incorrect")
self.assertEqual(np.mean(H[0, 0:2]) < 1, True, "NMF 10: activation incorrect")
self.assertEqual(np.mean(H[1, 2:4]) < 1, True, "NMF 11: activation incorrect")
self.assertEqual(np.mean(H[1, 0:2]) > np.mean(H[1, 4:6]), True, "NMF 12: activation incorrect")
self.assertEqual(np.mean(H[0, 2:4]) > np.mean(H[0, 4:6]), True, "NMF 13: activation incorrect")
def test_resample(self):
fs_in = 8000
fs_out = 44100
fFreq = 100
t_in = np.arange(0, fs_in) / fs_in
x = np.sin(2 * np.pi * fFreq * t_in)
[x_out, t_out] = pyACA.ToolResample(x, fs_out, fs_in)
x_gt = np.sin(2 * np.pi * fFreq * t_out)
# check output sample rate
npt.assert_almost_equal(1 / (t_out[1]-t_out[0]), fs_out, decimal=7, err_msg="RS 1: sample rate incorrect")
# check output samples
npt.assert_almost_equal(np.mean(np.abs(x_gt - x_out)[10:-10]), 0, decimal=3, err_msg="RS 2: interpolation incorrect")
fs_in = 48000
fs_out = 7003
fFreq = 100
t_in = np.arange(0, fs_in) / fs_in
x = np.sin(2 * np.pi * fFreq * t_in)
[x_out, t_out] = pyACA.ToolResample(x, fs_out, fs_in)
x_gt = np.sin(2 * np.pi * fFreq * t_out)
# check output sample rate
npt.assert_almost_equal(1 / (t_out[1]-t_out[0]), fs_out, decimal=7, err_msg="RS 3: sample rate incorrect")
# check output samples
npt.assert_almost_equal(np.mean(np.abs(x_gt - x_out)[10:-10]), 0, decimal=3, err_msg="RS 4: interpolation incorrect")
def test_viterbi(self):
# states: healthy: 0, fever: 1
# obs: normal: 0, cold: 1, dizzy: 2
# V = np.array([0, 1, 2])
# start prob: healthy: 0.6, fever: 0.4
p_s = np.array([0.6, 0.4])
# emission prob: normal|healthy: 0.5, cold|healthy: 0.4, dizzy|healthy: 0.1
# normal|fever: 0.1, cold|fever: 0.3, dizzy|fever: 0.6
P_E = np.array([[0.5, 0.4, 0.1],
[0.1, 0.3, 0.6]])
# trans prob: healthy->healthy: 0.7, healthy->fever: 0.3, fever->healthy: 0.4, fever->fever: 0.6
P_T = np.array([[0.7, 0.3],
[0.4, 0.6]])
p, P_res = pyACA.ToolViterbi(P_E, P_T, p_s)
npt.assert_almost_equal(np.sum(np.abs(p - np.array([0, 0, 1]))), 0, decimal=7, err_msg="V 1: state sequence incorrect")
npt.assert_almost_equal(np.sum(np.abs(P_res - np.array([[0.3000, 0.0840, 0.0059], [0.04, 0.0270, 0.0151]]))), 0, decimal=4, err_msg="V 1: state sequence incorrect")
p, P_res = pyACA.ToolViterbi(P_E, P_T, p_s, True)
npt.assert_almost_equal(np.sum(np.abs(p - np.array([0, 0, 1]))), 0, decimal=7, err_msg="V 1: state sequence incorrect")
npt.assert_almost_equal(np.sum(np.abs(P_res - np.log(np.array([[0.3000, 0.0840, 0.0059], [0.04, 0.0270, 0.0151]])))), 0, decimal=2, err_msg="V 1: state sequence incorrect")
def test_kmeans(self):
mu = np.array([[-5, 5],
[5, -5]])
iNumObs = 32
phase = np.arange(0, iNumObs)*2*np.pi / iNumObs
r = np.array([.1, .5])
# generate data points for two clusters
cluster1 = np.zeros([2, 2*iNumObs])
cluster2 = np.zeros([2, 2*iNumObs])
cluster1[:, 0:iNumObs] = mu[:, [0]] + r[0] * np.squeeze(np.array([[np.exp(1j*phase).real], [np.exp(1j*phase).imag]]))
cluster1[:, iNumObs:2*iNumObs] = mu[:, [0]] + r[1] * np.squeeze(np.array([[np.exp(1j*phase).real], [np.exp(1j*phase).imag]]))
cluster2[:, 0:iNumObs] = mu[:, [1]] + r[0] * np.squeeze(np.array([[np.exp(1j*phase).real], [np.exp(1j*phase).imag]]))
cluster2[:, iNumObs:2*iNumObs] = mu[:, [1]] + r[1] * np.squeeze(np.array([[np.exp(1j*phase).real], [np.exp(1j*phase).imag]]))
V = np.concatenate((cluster1, cluster2), axis=1)
[clusterIdx, state] = pyACA.ToolSimpleKmeans(V, 2)
self.assertEqual(np.sum(np.diff(clusterIdx[0:2*iNumObs])), 0, "KM 1: block content incorrect")
self.assertEqual(np.sum(np.diff(clusterIdx[2*iNumObs:-1])), 0, "KM 2: block content incorrect")
self.assertEqual(np.abs(clusterIdx[0]-clusterIdx[-1]), 1, "KM 3: block content incorrect")
def test_blockaudio(self):
iBlockLength = 20
iHopLength = 10
fs = 1
numSamples = 101
x = np.arange(0, numSamples)
[xb, time] = pyACA.ToolBlockAudio(x, iBlockLength, iHopLength, fs)
xb = np.squeeze(xb)
# check dimensions
targetNumBlocks = np.ceil(numSamples / iHopLength).astype(int)
dim = xb.shape
self.assertEqual(dim[0], targetNumBlocks, "TB 1: number of blocks incorrect")
self.assertEqual(dim[1], iBlockLength, "TB 2: block length incorrect")
# block content
self.assertEqual(xb[targetNumBlocks - 2][0], numSamples - 11, "TB 3: block content incorrect")
# time stamps
self.assertEqual(time[0], iBlockLength / (2*fs), "TB 4: time stamp incorrect")
self.assertEqual(time[1], time[0] + iHopLength / fs, "TB 5: time stamp incorrect")
fs = 40000
iBlockLength = 1024
iHopLength = 512
numSamples = 40000
x = np.arange(0, numSamples)
[xb, t] = pyACA.ToolBlockAudio(x, iBlockLength, iHopLength, fs)
targetNumBlocks = np.ceil(numSamples / iHopLength).astype(int)
dim = xb.shape
self.assertEqual(dim[0], targetNumBlocks, "TB 6: number of blocks incorrect")
self.assertEqual(dim[1], iBlockLength, "TB 7: block length incorrect")
def test_freq2bin2freq(self):
iUpsample = 10
iFftLength = 256
f_s = 8000
bins = np.arange(0, 1281)/iUpsample
fftres = f_s / iFftLength / iUpsample
hzout = pyACA.ToolBin2Freq(bins, iFftLength, f_s)
npt.assert_almost_equal(hzout[1]-hzout[0], fftres, decimal=7, err_msg="FBIN 1: frequency resolution incorrect")
self.assertEqual(len(hzout), len(bins), "FBIN 2: output dimension incorrect")
npt.assert_almost_equal(hzout[0], 0, decimal=7, err_msg="FBIN 3: frequency values incorrect")
npt.assert_almost_equal(hzout[-1], f_s * 0.5, decimal=7, err_msg="FBIN 4: frequency values incorrect")
# check back and forth conversion
npt.assert_almost_equal(bins, pyACA.ToolFreq2Bin(pyACA.ToolBin2Freq(bins, iFftLength, f_s), iFftLength, f_s), decimal=7, err_msg="FBIN 5: bin to frequency to bin conversion incorrect")
def test_freq2midi2freq(self):
# check concert pitch
npt.assert_almost_equal(69, pyACA.ToolFreq2Midi(440), decimal=7, err_msg="FMIDI 1: frequency to pitch conversion incorrect")
npt.assert_almost_equal(440, pyACA.ToolMidi2Freq(69), decimal=7, err_msg="FMIDI 2: pitch to frequency conversion incorrect")
# generate high resolution pitch vector and corresponding frequencies
midi = np.arange(0, 1280)/10
hz = 2**((midi-69)/12) * 440
midiout = pyACA.ToolFreq2Midi(hz)
hzout = pyACA.ToolMidi2Freq(midi)
# find maximum deviation
diffmidi = np.abs(midiout-midi).max()
diffhz = np.abs(hzout-hz).max()
npt.assert_almost_equal(diffmidi, 0, decimal=7, err_msg="FMIDI 3: frequency to pitch conversion incorrect")
npt.assert_almost_equal(diffhz, 0, decimal=7, err_msg="FMIDI 4: pitch to frequency conversion incorrect")
def test_freq2mel2freq(self):
# check reference point at 1000Hz
npt.assert_almost_equal(1000, pyACA.ToolFreq2Mel(1000), decimal=7, err_msg="FMEL 1: frequency to pitch conversion incorrect")
npt.assert_almost_equal(1000, pyACA.ToolMel2Freq(1000), decimal=7, err_msg="FMEL 2: pitch to frequency conversion incorrect")
mel = np.arange(0, 3000)
# check back and forth conversion
npt.assert_almost_equal(mel, pyACA.ToolFreq2Mel(pyACA.ToolMel2Freq(mel)), decimal=7, err_msg="FMEL 3: pitch to frequency to pitch conversion incorrect")
def test_knn(self):
train_data = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3], [.8, .9, .8], [1.8, 2.0, 1.9], [3, 3, 3]]).T
train_label1 = np.array([0, 1, 2, 0, 1, 2])
train_label2 = train_label1 + 5
test_data = np.array([[10, 10, 10], [2, 2, 2], [1.1, 0.95, 1.3], [0, 0, 0], [1.5, 1.5, 1.5]]).T
ground_truth = np.array([2, 1, 0, 0, 1])
est_class = pyACA.ToolSimpleKnn(test_data, train_data, train_label2, 1)
# dimensions test
self.assertEqual(len(est_class), len(ground_truth), "KNN 1: incorrect dimensions")
# label test
self.assertEqual(min(est_class), 5+min(ground_truth), "KNN 2: incorrect labels")
self.assertEqual(max(est_class), 5+max(ground_truth), "KNN 3: incorrect labels")
# content test
est_class = pyACA.ToolSimpleKnn(test_data, train_data, train_label1, 1)
self.assertEqual(sum(abs(est_class - ground_truth)), 0, "KNN 4: incorrect result")
est_class = pyACA.ToolSimpleKnn(test_data, train_data, train_label1, 2)
self.assertEqual(sum(abs(est_class - ground_truth)), 0, "KNN 5: incorrect result")
est_class = pyACA.ToolSimpleKnn(test_data, train_data, train_label1, 5)
self.assertEqual(sum(abs(est_class - ground_truth)), 0, "KNN 6: incorrect result")
# different dimensionality and labels
train_data = np.array([[1, 0], [1, 2], [-1, 2], [2.1, 1]]).T
train_label = np.array([0, 1, 1, 0])
test_data = np.array([[0, 0]]).T
ground_truth1 = np.array([0])
ground_truth3 = np.array([1])
ground_truth4 = np.array([0])
# content test
est_class = pyACA.ToolSimpleKnn(test_data, train_data, train_label, 1)
self.assertEqual(sum(abs(est_class - ground_truth1)), 0, "KNN 7: incorrect result")
est_class = pyACA.ToolSimpleKnn(test_data, train_data, train_label, 3)
self.assertEqual(sum(abs(est_class - ground_truth3)), 0, "KNN 8: incorrect result")
est_class = pyACA.ToolSimpleKnn(test_data, train_data, train_label, 4)
self.assertEqual(sum(abs(est_class - ground_truth4)), 0, "KNN 9: incorrect result")
def test_loocv(self):
data = np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8], [2, 1, 0, 5, 4, 3, 8, 7, 6]])
gt = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
[avg_acc, accuracies, confmat] = pyACA.ToolLooCrossVal(data, gt)
self.assertEqual(len(accuracies)-len(gt), 0, "CV 1: incorrect result dimensions")
def test_pca(self):
V1 = np.array([[1, 2, 3, 4], [0.5, 1, 1.5, 2]])
V2 = -V1
V = np.concatenate((V1, V2), axis=1)
# dimensions
u_pc, T, ev = pyACA.ToolPca(V)
self.assertEqual(u_pc.shape[0], V.shape[0], "PCA 1: component dimensions incorrect")
self.assertEqual(u_pc.shape[1], V.shape[1], "PCA 2: component dimensions incorrect")
self.assertEqual(T.shape[0], V.shape[0], "PCA 3: transformation matrix dimensions incorrect")
self.assertEqual(T.shape[1], V.shape[0], "PCA 4: transformation matrix dimensions incorrect")
self.assertEqual(ev.shape[0], V.shape[0], "PCA 5: eigenvalue dimensions incorrect")
# only one component
npt.assert_almost_equal(ev[1], 0, decimal=7, err_msg="PCA 6: incorrect eigenvalue")
fScale = 0.5
V1 = np.array([[-2, -1, 1, 2], [-2, -1, 1, 2]])
V2 = fScale * np.vstack((-V1[0, :], V1[1, :]))
V = np.concatenate((V1, V2), axis=1)
# two perfectly orthogonal components
u_pc, T, ev = pyACA.ToolPca(V)
npt.assert_almost_equal(ev[0], ev[1] / fScale**2, decimal=7, err_msg="PCA 7: incorrect eigenvalues")
npt.assert_almost_equal(np.abs(np.max(T)), np.abs(np.min(T)), decimal=7, err_msg="PCA 8: incorrect transformation matrix")
npt.assert_almost_equal(np.sum(np.abs(u_pc[0, :])), np.sum(np.abs(u_pc[1, :])) / fScale, decimal=7, err_msg="PCA 9: incorrect component scaling")
npt.assert_almost_equal(np.sum(u_pc), 0, decimal=7, err_msg="PCA 10: incorrect component mean")
def test_feature_selection(self):
# generate 3D features (dim 1 not separable, dim 2 separable, dim 3 noise
np.random.seed(11)
sep = np.array([[.1, .2, .3, .4]])
offset = .12
f1 = np.concatenate((.2*(sep + .5*offset), sep - .5*offset), axis=1)
f2 = np.concatenate((sep - 2*offset, sep + 2*offset), axis=1)
f3 = 0.3 * np.random.rand(1, f1.shape[1])
V = np.concatenate((f1, f2, f3), axis=0)
# assign class labels (last one is wrong)
classIdx = np.array([0, 0, 0, 0, 1, 1, 1, 0])
# dimensions
featIdx, acc = pyACA.ToolSeqFeatureSel(V, classIdx, 2)
self.assertEqual(len(featIdx), 2, "FeS 1: output dimensions incorrect")
self.assertEqual(len(featIdx), 2, "FeS 2: output dimensions incorrect")
# selected features
featIdx, acc = pyACA.ToolSeqFeatureSel(V, classIdx)
self.assertEqual(len(featIdx), 3, "FeS 3: output dimensions incorrect")
self.assertEqual(featIdx[0], 1, "FeS 4: selected features incorrect")
self.assertEqual(featIdx[1], 0, "FeS 5: selected features incorrect")
self.assertEqual(featIdx[2], 2, "FeS 6: selected features incorrect")
def test_hann(self):
iBlockLength = np.asarray([2, 16, 128, 1024, 16384])
for b in iBlockLength:
w = pyACA.ToolComputeHann(b)
self.assertEqual(len(w), b, "HN 1: window dimension incorrect")
# note that the window should be periodic
npt.assert_almost_equal(w[0], 0, decimal=7, err_msg="HN 2: window does not start with 0")
npt.assert_almost_equal(np.max(w), 1, decimal=7, err_msg="HN 3: window maximum incorrect")
npt.assert_almost_equal(w[int(iBlockLength[-1]/4)], .5, decimal=7, err_msg="HN 4: window shape incorrect")
def test_instfreq(self):
iBlockLength = 1024
iHopLength = 128
f_s = 48000
fFreqRes = f_s/iBlockLength
# select freqs to generate
bins = iBlockLength / np.asarray([32, 8, 4])
fFreq = fFreqRes * (bins + np.asarray([.5, .25, 0]))
# generate audio
t = np.arange(0, iBlockLength + iHopLength) / f_s
x = np.zeros([len(fFreq), len(t)])
for i, f in enumerate(fFreq):
x[i, :] = np.sin(2 * np.pi * f * t)
X = np.zeros([2, iBlockLength]).astype(complex)
w = pyACA.ToolComputeHann(iBlockLength)
X[0, :] = np.fft.fft(np.sum(x[:, 0:iBlockLength], axis=0) * w) * 2 / iBlockLength
X[1, :] = np.fft.fft(np.sum(x[:, iHopLength:iHopLength+iBlockLength], axis=0) * w) * 2 / iBlockLength
iSpecDim = np.int_(iBlockLength / 2 + 1)
X = X[:, 0:iSpecDim]
f_I = pyACA.ToolInstFreq(X, iHopLength, f_s)
for i, f in enumerate(fFreq):
npt.assert_almost_equal(f_I[bins[i].astype(int)], f, decimal=4, err_msg="IF 1: incorrect result")
def test_gmm(self):
mu = np.array([[1, 2],
[-1, -2]])
sigma = np.array([[[3, .2],
[.2, 2]],
[[2, 0],
[0, 1]]])
N = np.array([2000,
1000])
np.random.seed(11)
points = []
for i in range(len(mu)):
x = np.random.multivariate_normal(mu[i], sigma[i], N[i])
points.append(x)
V = np.concatenate(points).T
mu_hat, sigma_hat, state = pyACA.ToolGmm(V, 2)
diffm0 = np.min(np.array([np.sum(np.abs(mu[0] - mu_hat[:, 0])), np.sum(np.abs(mu[0] - mu_hat[:, 1]))]))
diffm1 = np.min(np.array([np.sum(np.abs(mu[1] - mu_hat[:, 0])), np.sum(np.abs(mu[1] - mu_hat[:, 1]))]))
npt.assert_almost_equal(diffm0, 0, decimal=1, err_msg="GMM 1: incorrect result")
npt.assert_almost_equal(diffm1, 0, decimal=1, err_msg="GMM 2: incorrect result")
diffs0 = np.min(np.array([np.max(np.abs(sigma[0] - sigma_hat[0])), np.max(np.abs(sigma[0] - sigma_hat[1]))]))
diffs1 = np.min(np.array([np.max(np.abs(sigma[1] - sigma_hat[0])), np.max(np.abs(sigma[1] - sigma_hat[1]))]))
npt.assert_almost_equal(diffs0, 0, decimal=1, err_msg="GMM 3: incorrect result")
npt.assert_almost_equal(diffs1, 0, decimal=1, err_msg="GMM 4: incorrect result")
def test_normalize(self):
x = np.array([.1, .2, -.8])
x_norm = pyACA.ToolNormalizeAudio(x)
npt.assert_almost_equal(np.max(abs(x_norm)), 1, decimal=7, err_msg="Norm 1: incorrect result")
npt.assert_almost_equal(x_norm[1], x[1] / abs(x[2]), decimal=7, err_msg="Norm 2: incorrect result")
def test_downmix(self):
x = np.array([[.8, -.4], [.4, 0], [.15, .25]])
x_downmix = pyACA.ToolDownmix(x)
npt.assert_almost_equal(np.mean(x_downmix), .2, decimal=7, err_msg="DM 1: incorrect result")
npt.assert_almost_equal(np.max(x_downmix), .2, decimal=7, err_msg="DM 2: incorrect result")
|
320d0005ea1e0e02c1bac8907ce78edced889988
|
3afe7348e830a0c5139fb7cf393736e18b59ab4a
|
/src/clusterfuzz/_internal/platforms/android/settings.py
|
a70c75ddb6620660f0a5fbb0d4b2319e2d7e7a76
|
[
"Apache-2.0"
] |
permissive
|
google/clusterfuzz
|
00845899e081dbbb89b70a75ce0b7eba3da73b02
|
6501a839b27a264500244f32bace8bee4d5cb9a2
|
refs/heads/master
| 2023-09-03T17:34:17.821599
| 2023-09-01T16:11:51
| 2023-09-01T16:11:51
| 168,060,021
| 5,420
| 639
|
Apache-2.0
| 2023-09-13T16:40:54
| 2019-01-29T00:19:40
|
Python
|
UTF-8
|
Python
| false
| false
| 6,241
|
py
|
settings.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Settings change related functions."""
import re
from clusterfuzz._internal.system import environment
from . import adb
BUILD_FINGERPRINT_REGEX = re.compile(
r'(?P<vendor>.+)\/(?P<target>.+)'
r'\/(?P<flavor>.+)\/(?P<name_name>.+)'
r'\/(?P<build_id>.+):(?P<type>.+)\/(?P<keys>.+)')
def change_se_linux_to_permissive_mode():
"""Switch SELinux to permissive mode for working around local file access and
other issues."""
adb.run_shell_command(['setenforce', '0'])
def get_build_fingerprint():
"""Return build's fingerprint."""
return adb.get_property('ro.build.fingerprint')
def get_build_flavor():
"""Return the build flavor."""
return adb.get_property('ro.build.flavor')
def get_build_parameters():
"""Return build_id, target and type from the device's fingerprint"""
build_fingerprint = environment.get_value('BUILD_FINGERPRINT',
get_build_fingerprint())
build_fingerprint_match = BUILD_FINGERPRINT_REGEX.match(build_fingerprint)
if not build_fingerprint_match:
return None
build_id = build_fingerprint_match.group('build_id')
target = build_fingerprint_match.group('target')
build_type = build_fingerprint_match.group('type')
return {'build_id': build_id, 'target': target, 'type': build_type}
def get_build_version():
"""Return the build version of the system as a character.
K = Kitkat, L = Lollipop, M = Marshmellow, MASTER = Master.
"""
build_version = adb.get_property('ro.build.id')
if not build_version:
return None
if build_version == 'MASTER':
return build_version
match = re.match('^([A-Z])', build_version)
if not match:
return None
return match.group(1)
def get_cpu_arch():
"""Return cpu architecture."""
return adb.get_property('ro.product.cpu.abi')
def get_device_codename():
"""Return the device codename."""
serial = environment.get_value('ANDROID_SERIAL')
devices_output = adb.run_command(['devices', '-l'])
serial_pattern = r'(^|\s){serial}\s'.format(serial=re.escape(serial))
serial_regex = re.compile(serial_pattern)
for line in devices_output.splitlines():
values = line.strip().split()
if not serial_regex.search(line):
continue
for value in values:
if not value.startswith('device:'):
continue
device_codename = value.split(':')[-1]
if device_codename:
return device_codename
# Unable to get code name.
return ''
def get_platform_id():
"""Return a string as |android:{codename}_{sanitizer}:{build_version}|."""
platform_id = 'android'
# Add codename and sanitizer tool information.
platform_id += ':%s' % get_device_codename()
sanitizer_tool_name = get_sanitizer_tool_name()
if sanitizer_tool_name:
platform_id += '_%s' % sanitizer_tool_name
# Add build version.
build_version = get_build_version()
if build_version:
platform_id += ':%s' % build_version
return platform_id
def get_product_brand():
"""Return product's brand."""
return adb.get_property('ro.product.brand')
def get_product_name():
"""Return product's name."""
return adb.get_property('ro.product.name')
def get_product_model():
"""Return product's model."""
return adb.get_property('ro.product.model')
def get_build_product():
"""Return builds's product."""
return adb.get_property('ro.build.product')
def get_sanitizer_tool_name():
"""Return sanitizer tool name e.g. ASAN if found on device."""
build_flavor = get_build_flavor()
if 'hwasan' in build_flavor:
return 'hwasan'
if 'kasan' in build_flavor:
return 'kasan'
if 'asan' in build_flavor:
return 'asan'
return None
def get_security_patch_level():
"""Return the security patch level reported by the device."""
return adb.get_property('ro.build.version.security_patch')
def get_kernel_version_string():
return adb.run_shell_command('cat /proc/version').strip()
def is_google_device():
"""Return true if this is a google branded device."""
# If a build branch is already set, then this is a Google device. No need to
# query device which can fail if the device is failing on recovery mode.
build_branch = environment.get_value('BUILD_BRANCH')
if build_branch:
return True
product_brand = environment.get_value('PRODUCT_BRAND', get_product_brand())
if product_brand is None:
return None
return product_brand in ('google', 'generic')
def is_automotive():
"""Returns if we are running in Android Automotive OS, currently only for
Osprey or Seahawk."""
product_model = get_product_model()
return product_model in ('Osprey', 'Seahawk')
def set_content_setting(table, key, value):
"""Set a device content setting. The input is not sanitized, so make sure to
use with trusted input key and value pair only."""
def _get_type_binding(value):
"""Return binding type for content setting."""
if isinstance(value, bool):
return 'b'
if isinstance(value, float):
return 'f'
if isinstance(value, int):
return 'i'
# Default to string.
return 's'
content_setting_command = (
'content insert --uri content://%s --bind name:s:%s --bind value:%s:%s' %
(table, key, _get_type_binding(value), str(value)))
adb.run_shell_command(content_setting_command)
def set_database_setting(database_path, table, key, value):
"""Update a key in a database. The input is not sanitized, so make sure to use
with trusted input key and value pair only."""
sql_command_string = ('"UPDATE %s SET value=\'%s\' WHERE name=\'%s\'"') % (
table, str(value), key)
adb.run_shell_command(['sqlite3', database_path, sql_command_string])
|
061ce8aa8c674df67813e759a5c5d6456dc5aa90
|
88dd4380e0d33d4a118ca4e69e4ca9b1c8f45e1f
|
/pyspedas/polar/config.py
|
82982557a9a73019d80ff3ee3c48b5c118460ab0
|
[
"MIT"
] |
permissive
|
spedas/pyspedas
|
16d34015961e3a4d3eaf8637d3cb6abca95df1b1
|
1d07b148753afa96e148c5835ed9545c507577da
|
refs/heads/master
| 2023-09-01T16:07:47.131334
| 2023-08-25T17:15:35
| 2023-08-25T17:15:35
| 167,614,292
| 125
| 61
|
MIT
| 2023-09-08T18:41:27
| 2019-01-25T21:11:14
|
Python
|
UTF-8
|
Python
| false
| false
| 409
|
py
|
config.py
|
import os
CONFIG = {'local_data_dir': 'polar_data/',
'remote_data_dir': 'https://spdf.gsfc.nasa.gov/pub/data/polar/'}
# override local data directory with environment variables
if os.environ.get('SPEDAS_DATA_DIR'):
CONFIG['local_data_dir'] = os.sep.join([os.environ['SPEDAS_DATA_DIR'], 'polar'])
if os.environ.get('POLAR_DATA_DIR'):
CONFIG['local_data_dir'] = os.environ['POLAR_DATA_DIR']
|
3b9d44f9dc4f5d362ca6c830e76093a16f31f372
|
657541b2f4ba1939b6be28231f721a1251a7062e
|
/Chapter 06/ch06_r04.py
|
a4bd3a9bca5a835bf822bb6023add4e4d727d49b
|
[
"MIT"
] |
permissive
|
PacktPublishing/Modern-Python-Cookbook
|
0562237aa479b07ac4ea65000c3c9942fd36383f
|
5b76b34b3020789ca1d0412b7cfd9527a7b344a1
|
refs/heads/master
| 2023-02-05T01:00:37.040849
| 2023-01-30T10:09:08
| 2023-01-30T10:09:08
| 74,134,183
| 121
| 79
|
MIT
| 2019-03-09T16:17:28
| 2016-11-18T13:57:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,590
|
py
|
ch06_r04.py
|
"""Python Cookbook
Chapter 6, recipe 3 and 4
"""
import random
from collections import namedtuple
Card = namedtuple('Card', ('rank', 'suit'))
class Hand:
"""
>>> h = Hand(1)
>>> h.deal( Card(1,'\N{white heart suit}'))
>>> h.deal( Card(10, '\N{black club suit}'))
>>> h
Hand(1, [Card(rank=1, suit='♡'), Card(rank=10, suit='♣')])
>>> h.total = 11 #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/doctest.py", line 1318, in __run
compileflags, 1), test.globs)
File "<doctest __main__.Hand[4]>", line 1, in <module>
h.total = 11 #doctest: +IGNORE_EXCEPTION_DETAIL
AttributeError: 'Hand' object has no attribute 'total'
"""
__slots__ = ('hand', 'bet')
def __init__(self, bet, hand=None):
self.hand= hand or []
self.bet= bet
def deal(self, card):
self.hand.append(card)
def __repr__(self):
return "{class_}({0}, {1})".format(
self.bet, self.hand,
class_= self.__class__.__name__,
)
def test():
import doctest
doctest.testmod()
if __name__ == "__main__":
test()
SUITS = (
'\N{black spade suit}',
'\N{white heart suit}',
'\N{white diamond suit}',
'\N{black club suit}',
)
deck = [
Card(r,s) for r in range(1,14) for s in SUITS
]
random.seed(2)
random.shuffle(deck)
dealer = iter(deck)
h = Hand(2)
h.deal(next(dealer))
h.deal(next(dealer))
print(h)
|
b905c1c8eefeca74a68ace255fa3bfad3d4fed4c
|
6a468c1650b3c083f102f19ace0b0d6e4d0686f7
|
/sympy/calculus/singularities.py
|
3a2ea1a17c3b69d9e49a481cec794c0d86df834c
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
sympy/sympy
|
a5f8accaa7686c59d9b5c94212fef60d746dac4b
|
69f98fb2b0d845e76874067a381dba37b577e8c5
|
refs/heads/master
| 2023-09-01T15:51:37.886107
| 2023-08-31T20:54:33
| 2023-08-31T20:54:33
| 640,534
| 10,928
| 5,362
|
NOASSERTION
| 2023-09-14T17:29:13
| 2010-04-30T20:37:14
|
Python
|
UTF-8
|
Python
| false
| false
| 11,757
|
py
|
singularities.py
|
"""
Singularities
=============
This module implements algorithms for finding singularities for a function
and identifying types of functions.
The differential calculus methods in this module include methods to identify
the following function types in the given ``Interval``:
- Increasing
- Strictly Increasing
- Decreasing
- Strictly Decreasing
- Monotonic
"""
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.trigonometric import sec, csc, cot, tan, cos
from sympy.utilities.misc import filldedent
def singularities(expression, symbol, domain=None):
"""
Find singularities of a given function.
Parameters
==========
expression : Expr
The target function in which singularities need to be found.
symbol : Symbol
The symbol over the values of which the singularity in
expression in being searched for.
Returns
=======
Set
A set of values for ``symbol`` for which ``expression`` has a
singularity. An ``EmptySet`` is returned if ``expression`` has no
singularities for any given value of ``Symbol``.
Raises
======
NotImplementedError
Methods for determining the singularities of this function have
not been developed.
Notes
=====
This function does not find non-isolated singularities
nor does it find branch points of the expression.
Currently supported functions are:
- univariate continuous (real or complex) functions
References
==========
.. [1] https://en.wikipedia.org/wiki/Mathematical_singularity
Examples
========
>>> from sympy import singularities, Symbol, log
>>> x = Symbol('x', real=True)
>>> y = Symbol('y', real=False)
>>> singularities(x**2 + x + 1, x)
EmptySet
>>> singularities(1/(x + 1), x)
{-1}
>>> singularities(1/(y**2 + 1), y)
{-I, I}
>>> singularities(1/(y**3 + 1), y)
{-1, 1/2 - sqrt(3)*I/2, 1/2 + sqrt(3)*I/2}
>>> singularities(log(x), x)
{0}
"""
from sympy.solvers.solveset import solveset
if domain is None:
domain = S.Reals if symbol.is_real else S.Complexes
try:
sings = S.EmptySet
for i in expression.rewrite([sec, csc, cot, tan], cos).atoms(Pow):
if i.exp.is_infinite:
raise NotImplementedError
if i.exp.is_negative:
sings += solveset(i.base, symbol, domain)
for i in expression.atoms(log):
sings += solveset(i.args[0], symbol, domain)
return sings
except NotImplementedError:
raise NotImplementedError(filldedent('''
Methods for determining the singularities
of this function have not been developed.'''))
###########################################################################
# DIFFERENTIAL CALCULUS METHODS #
###########################################################################
def monotonicity_helper(expression, predicate, interval=S.Reals, symbol=None):
"""
Helper function for functions checking function monotonicity.
Parameters
==========
expression : Expr
The target function which is being checked
predicate : function
The property being tested for. The function takes in an integer
and returns a boolean. The integer input is the derivative and
the boolean result should be true if the property is being held,
and false otherwise.
interval : Set, optional
The range of values in which we are testing, defaults to all reals.
symbol : Symbol, optional
The symbol present in expression which gets varied over the given range.
It returns a boolean indicating whether the interval in which
the function's derivative satisfies given predicate is a superset
of the given interval.
Returns
=======
Boolean
True if ``predicate`` is true for all the derivatives when ``symbol``
is varied in ``range``, False otherwise.
"""
from sympy.solvers.solveset import solveset
expression = sympify(expression)
free = expression.free_symbols
if symbol is None:
if len(free) > 1:
raise NotImplementedError(
'The function has not yet been implemented'
' for all multivariate expressions.'
)
variable = symbol or (free.pop() if free else Symbol('x'))
derivative = expression.diff(variable)
predicate_interval = solveset(predicate(derivative), variable, S.Reals)
return interval.is_subset(predicate_interval)
def is_increasing(expression, interval=S.Reals, symbol=None):
"""
Return whether the function is increasing in the given interval.
Parameters
==========
expression : Expr
The target function which is being checked.
interval : Set, optional
The range of values in which we are testing (defaults to set of
all real numbers).
symbol : Symbol, optional
The symbol present in expression which gets varied over the given range.
Returns
=======
Boolean
True if ``expression`` is increasing (either strictly increasing or
constant) in the given ``interval``, False otherwise.
Examples
========
>>> from sympy import is_increasing
>>> from sympy.abc import x, y
>>> from sympy import S, Interval, oo
>>> is_increasing(x**3 - 3*x**2 + 4*x, S.Reals)
True
>>> is_increasing(-x**2, Interval(-oo, 0))
True
>>> is_increasing(-x**2, Interval(0, oo))
False
>>> is_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval(-2, 3))
False
>>> is_increasing(x**2 + y, Interval(1, 2), x)
True
"""
return monotonicity_helper(expression, lambda x: x >= 0, interval, symbol)
def is_strictly_increasing(expression, interval=S.Reals, symbol=None):
"""
Return whether the function is strictly increasing in the given interval.
Parameters
==========
expression : Expr
The target function which is being checked.
interval : Set, optional
The range of values in which we are testing (defaults to set of
all real numbers).
symbol : Symbol, optional
The symbol present in expression which gets varied over the given range.
Returns
=======
Boolean
True if ``expression`` is strictly increasing in the given ``interval``,
False otherwise.
Examples
========
>>> from sympy import is_strictly_increasing
>>> from sympy.abc import x, y
>>> from sympy import Interval, oo
>>> is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.Ropen(-oo, -2))
True
>>> is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.Lopen(3, oo))
True
>>> is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.open(-2, 3))
False
>>> is_strictly_increasing(-x**2, Interval(0, oo))
False
>>> is_strictly_increasing(-x**2 + y, Interval(-oo, 0), x)
False
"""
return monotonicity_helper(expression, lambda x: x > 0, interval, symbol)
def is_decreasing(expression, interval=S.Reals, symbol=None):
"""
Return whether the function is decreasing in the given interval.
Parameters
==========
expression : Expr
The target function which is being checked.
interval : Set, optional
The range of values in which we are testing (defaults to set of
all real numbers).
symbol : Symbol, optional
The symbol present in expression which gets varied over the given range.
Returns
=======
Boolean
True if ``expression`` is decreasing (either strictly decreasing or
constant) in the given ``interval``, False otherwise.
Examples
========
>>> from sympy import is_decreasing
>>> from sympy.abc import x, y
>>> from sympy import S, Interval, oo
>>> is_decreasing(1/(x**2 - 3*x), Interval.open(S(3)/2, 3))
True
>>> is_decreasing(1/(x**2 - 3*x), Interval.open(1.5, 3))
True
>>> is_decreasing(1/(x**2 - 3*x), Interval.Lopen(3, oo))
True
>>> is_decreasing(1/(x**2 - 3*x), Interval.Ropen(-oo, S(3)/2))
False
>>> is_decreasing(1/(x**2 - 3*x), Interval.Ropen(-oo, 1.5))
False
>>> is_decreasing(-x**2, Interval(-oo, 0))
False
>>> is_decreasing(-x**2 + y, Interval(-oo, 0), x)
False
"""
return monotonicity_helper(expression, lambda x: x <= 0, interval, symbol)
def is_strictly_decreasing(expression, interval=S.Reals, symbol=None):
"""
Return whether the function is strictly decreasing in the given interval.
Parameters
==========
expression : Expr
The target function which is being checked.
interval : Set, optional
The range of values in which we are testing (defaults to set of
all real numbers).
symbol : Symbol, optional
The symbol present in expression which gets varied over the given range.
Returns
=======
Boolean
True if ``expression`` is strictly decreasing in the given ``interval``,
False otherwise.
Examples
========
>>> from sympy import is_strictly_decreasing
>>> from sympy.abc import x, y
>>> from sympy import S, Interval, oo
>>> is_strictly_decreasing(1/(x**2 - 3*x), Interval.Lopen(3, oo))
True
>>> is_strictly_decreasing(1/(x**2 - 3*x), Interval.Ropen(-oo, S(3)/2))
False
>>> is_strictly_decreasing(1/(x**2 - 3*x), Interval.Ropen(-oo, 1.5))
False
>>> is_strictly_decreasing(-x**2, Interval(-oo, 0))
False
>>> is_strictly_decreasing(-x**2 + y, Interval(-oo, 0), x)
False
"""
return monotonicity_helper(expression, lambda x: x < 0, interval, symbol)
def is_monotonic(expression, interval=S.Reals, symbol=None):
"""
Return whether the function is monotonic in the given interval.
Parameters
==========
expression : Expr
The target function which is being checked.
interval : Set, optional
The range of values in which we are testing (defaults to set of
all real numbers).
symbol : Symbol, optional
The symbol present in expression which gets varied over the given range.
Returns
=======
Boolean
True if ``expression`` is monotonic in the given ``interval``,
False otherwise.
Raises
======
NotImplementedError
Monotonicity check has not been implemented for the queried function.
Examples
========
>>> from sympy import is_monotonic
>>> from sympy.abc import x, y
>>> from sympy import S, Interval, oo
>>> is_monotonic(1/(x**2 - 3*x), Interval.open(S(3)/2, 3))
True
>>> is_monotonic(1/(x**2 - 3*x), Interval.open(1.5, 3))
True
>>> is_monotonic(1/(x**2 - 3*x), Interval.Lopen(3, oo))
True
>>> is_monotonic(x**3 - 3*x**2 + 4*x, S.Reals)
True
>>> is_monotonic(-x**2, S.Reals)
False
>>> is_monotonic(x**2 + y + 1, Interval(1, 2), x)
True
"""
from sympy.solvers.solveset import solveset
expression = sympify(expression)
free = expression.free_symbols
if symbol is None and len(free) > 1:
raise NotImplementedError(
'is_monotonic has not yet been implemented'
' for all multivariate expressions.'
)
variable = symbol or (free.pop() if free else Symbol('x'))
turning_points = solveset(expression.diff(variable), variable, interval)
return interval.intersection(turning_points) is S.EmptySet
|
5a1350c1e6617ed1233c6e3e269e11ccc02e96c1
|
4daab5ba90185bae65169ebb8183c635385ab3f8
|
/autode/thermochemistry/symmetry.py
|
81356a925e6e998bcc93bfb07862720bcbe4dce9
|
[
"MIT"
] |
permissive
|
duartegroup/autodE
|
bcf69440bd04411f97d39df0df0ae1f2bf6feb8c
|
4d6667592f083dfcf38de6b75c4222c0a0e7b60b
|
refs/heads/master
| 2023-09-01T15:08:16.028378
| 2023-07-25T08:09:05
| 2023-07-25T08:09:05
| 196,085,570
| 132
| 42
|
MIT
| 2023-09-12T15:20:54
| 2019-07-09T21:20:27
|
Python
|
UTF-8
|
Python
| false
| false
| 8,062
|
py
|
symmetry.py
|
import numpy as np
from scipy.spatial import distance_matrix
from autode.geom import get_rot_mat_euler
def strip_identical_and_inv_axes(axes, sim_axis_tol):
"""
For a list of axes remove those which are similar to within some distance
tolerance, or are inverses to within that tolerance
---------------------------------------------------------------------------
Arguments:
axes: list of axes
sim_axis_tol: distance tolerance in Å
Returns:
(list(np.ndarray)):
"""
unique_possible_axes = []
for i in range(len(axes)):
unique = True
for unique_axis in unique_possible_axes:
if np.linalg.norm(axes[i] - unique_axis) < sim_axis_tol:
unique = False
if np.linalg.norm(-axes[i] - unique_axis) < sim_axis_tol:
unique = False
if unique:
unique_possible_axes.append(axes[i])
return unique_possible_axes
def get_possible_axes(coords, max_triple_dist=2.0, sim_axis_tol=0.1):
r"""
Possible rotation axes in a molecule. Currently limited to average vectors
and cross products i.e.::
Y Y --->
/ \ / \
X Y X Z
|
|
,
---------------------------------------------------------------------------
Arguments:
coords (np.ndarray):
max_triple_dist (float):
sim_axis_tol (float):
Returns:
(list(np.ndarray)):
"""
possible_axes = []
n_atoms = len(coords)
for i in range(n_atoms):
for j in range(n_atoms):
if i > j: # For the unique pairs add the i–j vector
vec = coords[j] - coords[i]
vec /= np.linalg.norm(vec)
possible_axes.append(vec)
for k in range(n_atoms):
# Triple must not have any of the same atoms
if any((i == j, i == k, j == k)):
continue
vec1 = coords[j] - coords[i]
vec2 = coords[k] - coords[i]
if all(
np.linalg.norm(vec) < max_triple_dist
for vec in (vec1, vec2)
):
avg_vec = (vec1 + vec2) / 2.0
possible_axes.append(avg_vec / np.linalg.norm(avg_vec))
perp_vec = np.cross(vec1, vec2)
possible_axes.append(perp_vec / np.linalg.norm(perp_vec))
unique_possible_axes = strip_identical_and_inv_axes(
possible_axes, sim_axis_tol
)
return unique_possible_axes
def is_same_under_n_fold(
pcoords, axis, n, m=1, tol=0.25, excluded_pcoords=None
):
"""
Does applying an n-fold rotation about an axis generate the same structure
back again?
---------------------------------------------------------------------------
Arguments:
pcoords (np.ndarray): shape = (n_unique_atom_types, n_atoms, 3)
axis (np.ndarray): shape = (3,)
n (int): n-fold of this rotation
m (int): Apply this n-fold rotation m times
tol (float):
excluded_pcoords (list):
Returns:
(bool):
"""
n_unique, n_atoms, _ = pcoords.shape
rotated_coords = np.array(pcoords, copy=True)
rot_mat = get_rot_mat_euler(axis, theta=(2.0 * np.pi * m / n))
excluded = [False for _ in range(n_unique)]
for i in range(n_unique):
# Rotate these coordinates
rotated_coords[i] = rot_mat.dot(rotated_coords[i].T).T
dist_mat = distance_matrix(pcoords[i], rotated_coords[i])
# If all elements are identical then carry on with the next element
if np.linalg.norm(dist_mat) < tol:
continue
# If the RMS between the closest pairwise distance for each atom is
# above the threshold then these structures are not the same
if np.linalg.norm(np.min(dist_mat, axis=1)) > tol:
return False
if excluded_pcoords is not None:
# If these rotated coordinates are similar to those on the excluded
# list then these should not be considered identical
if any(
np.linalg.norm(rotated_coords[i] - pcoords[i]) < tol
for pcoords in excluded_pcoords
):
excluded[i] = True
# This permutation has already been found - return False even though
# it's the same, because there is an excluded list
if all(excluded):
return False
# Add to a list of structures that have already been generated by rotations
if excluded_pcoords is not None:
excluded_pcoords.append(rotated_coords)
return True
def cn_and_axes(species, pcoords, max_n, dist_tol):
"""
Find the highest symmetry rotation axis
---------------------------------------------------------------------------
Arguments:
species (autode.species.Species):
max_n (int):
dist_tol (float):
Returns:
(dict(int: np.ndarray)):
"""
axes = get_possible_axes(coords=species.coordinates)
# Cn numbers and their associated axes
cn_assos_axes = {i: [] for i in range(2, max_n + 1)}
for axis in axes:
# Minimum n-fold rotation is 2
for n in range(2, max_n + 1):
if is_same_under_n_fold(pcoords, axis, n=n, tol=dist_tol):
cn_assos_axes[n].append(axis)
return cn_assos_axes
def create_pcoords(species):
"""
Return a tensor where the first dimension is the size of the number of
unique atom types in a molecule, the second, the atoms of that type
and the third the number of dimensions in the coordinate space (3)
:return: (np.ndarray) shape (n, m, 3)
"""
atom_symbols = list(set(atom.label for atom in species.atoms))
n_symbols = len(atom_symbols)
pcoords = np.zeros(shape=(n_symbols, species.n_atoms, 3))
for i in range(n_symbols):
for j in range(species.n_atoms):
# Atom symbol needs to match the leading dimension
if species.atoms[j].label != atom_symbols[i]:
continue
pcoords[i, j, :] = species.atoms[j].coord
return pcoords
def symmetry_number(species, max_n_fold_rot_searched=6, dist_tol=0.25):
"""
Calculate the symmetry number of a molecule. See:
Theor Chem Account (2007) 118:813–826. 10.1007/s00214-007-0328-0
---------------------------------------------------------------------------
Arguments:
species (autode.atoms.Species):
Keyword Arguments:
max_n_fold_rot_searched (int):
dist_tol (float): Distance tolerance (Å)
Returns:
(int):
"""
species.translate(vec=-species.com)
pcoords = create_pcoords(species)
# Get the highest Cn-fold rotation axis
cn_axes = cn_and_axes(species, pcoords, max_n_fold_rot_searched, dist_tol)
# If there are no C2 or greater axes then this molecule is C1 → σ=1
if all(len(cn_axes[n]) == 0 for n in cn_axes.keys()):
return 1
sigma_r = 1 # Already has E symmetry
added_pcoords = []
# For every possible axis apply C2, C3...C_n_max rotations
for n, axes in cn_axes.items():
for axis in axes:
# Apply this rotation m times e.g. once for a C2 etc.
for m in range(1, n):
# If the structure is the same but and has *not* been generated
# by another rotation increment the symmetry number by 1
if is_same_under_n_fold(
pcoords,
axis,
n=n,
m=m,
tol=dist_tol,
excluded_pcoords=added_pcoords,
):
sigma_r += 1
if species.is_linear():
# There are perpendicular C2s the point group is D∞h
if sigma_r > 2:
return 2
# If not then C∞v and the symmetry number is 1
else:
return 1
return sigma_r
|
f0e368fb823a43aa9998d8ac5a27f05c1ca67776
|
bad467391171ac97cb51e1110971e0cc45262526
|
/phi/vis/_viewer.py
|
1c4b79a0d047b154d9cdb26e64a141a6daa625e3
|
[
"MIT"
] |
permissive
|
tum-pbs/PhiFlow
|
d28f2f3de6ce385c3fa1214966b764ef18f86455
|
66f8e5e63ab7a920f60b43230b24f80b8e78286f
|
refs/heads/master
| 2023-09-03T17:00:48.321319
| 2023-08-29T12:07:25
| 2023-08-30T13:47:50
| 225,903,949
| 982
| 163
|
MIT
| 2023-08-30T13:47:51
| 2019-12-04T15:55:57
|
Python
|
UTF-8
|
Python
| false
| false
| 10,708
|
py
|
_viewer.py
|
import itertools
import sys
import time
import warnings
from functools import partial
from threading import Event
from typing import Tuple, Union
from ._log import SceneLog
from ._user_namespace import UserNamespace
from ._vis_base import VisModel, Control, Action
from .. import field, math
from ..field import Scene, SampledField
from phiml.math import batch, Tensor
def create_viewer(namespace: UserNamespace,
fields: dict,
name: str,
description: str,
scene: Union[Scene, None],
asynchronous: bool,
controls: tuple,
actions: dict,
log_performance: bool) -> 'Viewer':
cls = AsyncViewer if asynchronous else Viewer
viewer = cls(namespace, fields, name, description, scene, controls, actions, log_performance)
return viewer
class Viewer(VisModel):
"""
Shows variables from the user namespace.
To create a `Viewer`, call `phi.vis.view()` from the top-level Python script or from a notebook.
Use `Viewer.range()` to control the loop execution from the user interface.
Also see the user interface documentation at https://tum-pbs.github.io/PhiFlow/Visualization.html
"""
def __init__(self,
namespace: UserNamespace,
fields: dict,
name: str,
description: str,
scene: Scene,
controls: tuple,
actions: dict,
log_performance: bool,
):
VisModel.__init__(self, name, description, scene=scene)
self.initial_field_values = fields
self._controls = controls
self.namespace = namespace
self.log_performance = log_performance
self._rec = None
self._in_loop = False
self._log = SceneLog(self.scene)
self.log_file = self._log.log_file
self._elapsed = None
self.reset_step = 0
self._actions = {}
custom_reset = False
self.reset_count = 0
for action, function in actions.items():
if action.name == 'reset':
self._actions[action] = partial(self.reset, custom_reset=function)
custom_reset = True
else:
self._actions[action] = function
if not custom_reset:
self._actions[Action('reset', Viewer.reset.__doc__)] = self.reset
def log_scalars(self, reduce=math.mean, **values):
self._log.log_scalars(self.steps, reduce=reduce, **values)
def info(self, message: str): # may be replaced by a different solution later on
"""
Update the status message.
The status message is written to the console and the log file.
Additionally, it may be displayed by the user interface.
See `debug()`.
Args:
message: Message to display
"""
message = str(message)
self.message = message
self._log.log(message)
def __rrshift__(self, other):
self.info(other)
@property
def field_names(self) -> tuple:
return tuple(self.initial_field_values.keys())
def get_field(self, name, dim_selection: dict) -> SampledField:
if name not in self.initial_field_values:
raise KeyError(name)
if self._rec:
value = self._rec[name]
else:
value = self.namespace.get_variable(name)
if callable(value):
value = value()
if isinstance(value, (SampledField, Tensor)):
value = value[dim_selection]
return value
@property
def curve_names(self) -> tuple:
return self._log.scalar_curve_names
def get_curve(self, name: str) -> tuple:
return self._log.get_scalar_curve(name)
@property
def controls(self) -> Tuple[Control]:
return self._controls
def get_control_value(self, name):
return self.namespace.get_variable(name)
def set_control_value(self, name, value):
self.namespace.set_variable(name, value)
@property
def actions(self) -> tuple:
return tuple(self._actions.keys())
def run_action(self, name):
for action, fun in self._actions.items():
if action.name == name:
fun()
return
raise KeyError(name)
def range(self, *args, warmup=0, **rec_dim):
"""
Similarly to `range()`, returns a generator that can be used in a `for` loop.
>>> for step in ModuleViewer().range(100):
>>> print(f'Running step {step}')
However, `Viewer.range()` enables controlling the flow via the user interface.
Each element returned by the generator waits for `progress` to be invoked once.
Note that `step` is always equal to `Viewer.steps`.
This method can be invoked multiple times.
However, do not call this method while one `range` is still active.
Args:
*args: Either no arguments for infinite loop or single `int` argument `stop`.
Must be empty if `rec_dim` is used.
**rec_dim: Can be used instead of `*args` to record values along a new batch dimension of this name.
The recorded values can be accessed as `Viewer.rec.<name>` or `Viewer.rec['<name>']`.
warmup: Number of uncounted loop iterations to perform before `step()` is invoked for the first time.
Yields:
Step count of `Viewer`.
"""
for _ in range(warmup):
yield self.steps
self._in_loop = True
self._call(self.progress_available)
if rec_dim:
assert len(rec_dim) == 1, f"Only one rec_dim allowed but got {rec_dim}"
assert not args, f"No positional arguments are allowed when a rec_dim is specified. {rec_dim}"
rec_dim_name = next(iter(rec_dim.keys()))
size = rec_dim[rec_dim_name]
assert isinstance(size, int)
self._rec = Record(rec_dim_name)
self._rec.append(self.initial_field_values, warn_missing=False)
args = [size]
self.growing_dims = [rec_dim_name]
if len(args) == 0:
def count():
i = 0
while True:
yield i
i += 1
step_source = count()
else:
step_source = range(*args)
try:
for step in step_source:
self.steps = step - self.reset_step
try:
self._pre_step()
t = time.perf_counter()
yield step - self.reset_step
self._elapsed = time.perf_counter() - t
self.steps = step - self.reset_step + 1
if rec_dim:
self._rec.append({name: self.namespace.get_variable(name) for name in self.field_names})
if self.log_performance:
self._log.log_scalars(self.steps, reduce=None, step_time=self._elapsed)
finally:
self._post_step()
finally:
self._in_loop = False
self._call(self.progress_unavailable)
def _pre_step(self):
self._call(self.pre_step)
def _post_step(self):
self._call(self.post_step)
@property
def rec(self) -> 'Record':
"""
Read recorded fields as `viewer.rec.<name>`.
Accessing `rec` without having started a recording using `Viewer.range()` raises an `AssertionError`.
"""
assert self._rec, "Enable recording by calling range() with a dimension name, e.g. 'range(frames=10)'."
return self._rec
def progress(self):
raise AssertionError("progress() not supported by synchronous Viewer.")
@property
def can_progress(self) -> bool:
return self._in_loop
def reset(self, custom_reset=None):
"""
Restores all viewed fields to the states they were in when the viewer was created.
Changes variable values in the user namespace.
"""
if custom_reset:
custom_reset()
for name, value in self.initial_field_values.items():
self.namespace.set_variable(name, value)
self.reset_step += self.steps
self.steps = 0
self.reset_count += 1
class AsyncViewer(Viewer):
def __init__(self, *args):
Viewer.__init__(self, *args)
self.step_exec_event = Event()
self.step_finished_event = Event()
def _pre_step(self):
self.step_exec_event.wait()
self._call(self.pre_step)
def _post_step(self):
self._call(self.post_step)
self.step_exec_event.clear()
self.step_finished_event.set()
def progress(self): # called by the GUI
"""
Allows the generator returned by `ModuleViewer.range()` to advance one element.
In typical scenarios, this will run one loop iteration in the top-level script.
"""
self.step_finished_event.clear()
self.step_exec_event.set()
self.step_finished_event.wait()
def can_progress(self) -> bool:
return True
class Record:
def __init__(self, dim: Union[str, None]):
self.dim = dim
self.history = {}
def append(self, variables: dict, warn_missing=True):
if not self.history:
self.history = {name: [] for name in variables.keys()}
for name, val in variables.items():
self.history[name].append(val)
if val is None and warn_missing:
warnings.warn(f"None value encountered for variable '{name}' at step {self.viewer.steps}. This value will not show up in the recording.", RuntimeWarning)
@property
def recorded_fields(self):
return tuple(self.history.keys())
def get_snapshot(self, name: str, frame: int):
return self.history[name][frame]
def recording_size(self, name: str):
return len(self.history[name])
def __getattr__(self, item: str):
assert item in self.history, f"No recording available for '{item}'. The following fields were recorded: {self.recorded_fields}"
snapshots = [v for v in self.history[item] if v is not None]
if snapshots:
return field.stack(snapshots, batch(self.dim))
else:
return None
def __getitem__(self, item):
assert isinstance(item, str)
return self.__getattr__(item)
def __repr__(self):
return ", ".join([f"{name} ({len(values)})" for name, values in self.history.items()])
|
28cce9c2c360af44c8745739fe9c2b147b5c241a
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Tensorflow/source/tensorflow/contrib/layers/__init__.py
|
d8ab7c2d70d8a7346c04d326f3a51b40a4f900ea
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,296
|
py
|
__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building neural network layers, regularizers, summaries, etc.
See the @{$python/contrib.layers} guide.
@@avg_pool2d
@@avg_pool3d
@@batch_norm
@@convolution2d
@@convolution3d
@@conv2d_in_plane
@@convolution2d_in_plane
@@conv2d_transpose
@@convolution2d_transpose
@@conv3d_transpose
@@convolution3d_transpose
@@dropout
@@elu
@@embedding_lookup_unique
@@flatten
@@fully_connected
@@GDN
@@gdn
@@layer_norm
@@linear
@@max_pool2d
@@max_pool3d
@@one_hot_encoding
@@relu
@@relu6
@@repeat
@@safe_embedding_lookup_sparse
@@scale_gradient
@@separable_conv2d
@@separable_convolution2d
@@softmax
@@stack
@@unit_norm
@@bow_encoder
@@embed_sequence
@@maxout
@@apply_regularization
@@l1_l2_regularizer
@@l1_regularizer
@@l2_regularizer
@@sum_regularizer
@@xavier_initializer
@@xavier_initializer_conv2d
@@variance_scaling_initializer
@@optimize_loss
@@summarize_activation
@@summarize_tensor
@@summarize_tensors
@@summarize_collection
@@summarize_activations
@@bucketized_column
@@check_feature_columns
@@create_feature_spec_for_parsing
@@crossed_column
@@embedding_column
@@scattered_embedding_column
@@input_from_feature_columns
@@transform_features
@@joint_weighted_sum_from_feature_columns
@@make_place_holder_tensors_for_base_features
@@multi_class_target
@@one_hot_column
@@parse_feature_columns_from_examples
@@parse_feature_columns_from_sequence_examples
@@real_valued_column
@@shared_embedding_columns
@@sparse_column_with_hash_bucket
@@sparse_column_with_integerized_feature
@@sparse_column_with_keys
@@sparse_column_with_vocabulary_file
@@weighted_sparse_column
@@weighted_sum_from_feature_columns
@@infer_real_valued_columns
@@sequence_input_from_feature_columns
@@instance_norm
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.layers.python.layers import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['bias_add',
'conv2d',
'conv3d',
'elu',
'feature_column',
'instance_norm',
'legacy_fully_connected',
'legacy_linear',
'legacy_relu',
'OPTIMIZER_CLS_NAMES',
'OPTIMIZER_SUMMARIES',
'regression_target',
'SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY',
'summaries']
remove_undocumented(__name__, _allowed_symbols)
|
1372a54483d2d4b34a55e634a0b62acd4d767c2d
|
f312fcd24d94be8b32e2d1e50643b01c619aa23b
|
/tensorboard/examples/plugins/example_basic/tensorboard_plugin_example/demo.py
|
d3333380d4fc898ffe91de6a9c051c2b706cd704
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/tensorboard
|
bf316fc5d47f78ef980dd2106c99207892a508d5
|
5961c76dca0fb9bb40d146f5ce13834ac29d8ddb
|
refs/heads/master
| 2023-09-03T23:59:03.264261
| 2023-08-30T22:24:07
| 2023-08-30T22:24:07
| 91,379,993
| 6,766
| 2,063
|
Apache-2.0
| 2023-09-14T20:55:56
| 2017-05-15T20:08:07
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
demo.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Demo code."""
from absl import app
import tensorflow as tf
from tensorboard_plugin_example import summary_v2
def main(unused_argv):
writer = tf.summary.create_file_writer("demo_logs")
with writer.as_default():
summary_v2.greeting(
"guestbook",
"Alice",
step=0,
description="Sign your name!",
)
summary_v2.greeting(
"guestbook", "Bob", step=1
) # no need for `description`
summary_v2.greeting("guestbook", "Cheryl", step=2)
summary_v2.greeting("more_names", "David", step=4)
if __name__ == "__main__":
app.run(main)
|
36dc3201359562b30cf7568928fdb3d5c08bbc69
|
d5693d58b6a0389aa098876a6d465753482e2c8e
|
/Exec/science/flame_wave/analysis/ash_timeseries.py
|
299428e33a075d146e64bfc4c19f44b8cb2bd8ee
|
[
"BSD-3-Clause-LBNL",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
AMReX-Astro/Castro
|
4cebacd8e62638db8779c4d44c4ec0829b24b5d1
|
60a058987642dde487acd8e365923654b8918c87
|
refs/heads/main
| 2023-08-31T07:05:21.158032
| 2023-08-01T22:54:59
| 2023-08-01T22:54:59
| 29,609,157
| 236
| 106
|
NOASSERTION
| 2023-09-12T16:26:21
| 2015-01-21T20:10:37
|
C++
|
UTF-8
|
Python
| false
| false
| 3,199
|
py
|
ash_timeseries.py
|
#!/usr/bin/env python3
import argparse
import os
import re
import sys
from functools import reduce
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import ImageGrid
import yt
from yt.frontends.boxlib.api import CastroDataset
# assume that our data is in CGS
from yt.units import amu, cm
matplotlib.use('agg')
# define ash
def _ash(field, data):
"""ash is anything beyond O, excluding Fe and Ni"""
ash_sum = None
for f in data.ds.field_list:
field_name = f[-1]
# matches names like "X(ne21)" or "X(He4)"
m = re.match(r"^X\(([A-Za-z]+)(\d+)\)$", field_name)
if m is None:
continue
element = m[1].lower()
aion = int(m[2])
if element not in {"h", "he", "c", "n", "o", "fe", "ni"}:
if ash_sum is None:
ash_sum = data[f]
else:
ash_sum += data[f]
if ash_sum is None:
raise ValueError("no ash found")
return ash_sum
def doit(plotfiles):
ds = CastroDataset(plotfiles[0])
xmin = ds.domain_left_edge[0]
xmax = ds.domain_right_edge[0]
xctr = 0.5*(xmin + xmax)
L_x = xmax - xmin
ymin = 0.0*cm
ymax = 2.5e4*cm
yctr = 0.5*(ymin + ymax)
L_y = ymax - ymin
fig = plt.figure()
fig.set_size_inches(12.0, 9.0)
grid = ImageGrid(fig, 111, nrows_ncols=(len(plotfiles), 1),
axes_pad=0.25, label_mode="L", cbar_mode="single", cbar_size="0.5%")
for i, pf in enumerate(plotfiles):
ds = CastroDataset(pf)
ds.add_field(("gas", "ash"), function=_ash, display_name="ash", units="(dimensionless)", sampling_type="cell")
field = "ash"
sp = yt.SlicePlot(ds, "theta", field, center=[xctr, yctr, 0.0*cm], width=[L_x, L_y, 0.0*cm], fontsize="12")
sp.set_buff_size((2400,2400))
sp.set_zlim(field, 1.e-5, 0.1)
sp.set_log(field, True)
sp.set_cmap(field, "plasma_r")
sp.set_axes_unit("cm")
sp.annotate_text((0.85, 0.8), "{:5.2f} ms".format(1000.0*float(ds.current_time.in_cgs())),
coord_system="axis", text_args={"color": "black", "size": 10})
plot = sp.plots[field]
plot.figure = fig
plot.axes = grid[i].axes
plot.cax = grid.cbar_axes[i]
if i < len(plotfiles)-1:
grid[i].axes.xaxis.offsetText.set_visible(False)
sp._setup_plots()
fig.set_size_inches(10.0, 15.0)
fig.subplots_adjust(left=0.075, right=0.925, top=0.95, bottom=0.05)
fig.savefig("time_series_ash.pdf")
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("--skip", type=int, default=1,
help="interval between plotfiles")
p.add_argument("plotfiles", type=str, nargs="+",
help="list of plotfiles to plot")
args = p.parse_args()
plot_prefix = args.plotfiles[0].split("plt")[0] + "plt"
plot_nums = sorted([p.split("plt")[1] for p in args.plotfiles], key=int)
plotfiles = []
for n in range(0, len(plot_nums), args.skip):
plotfiles.append("{}{}".format(plot_prefix, plot_nums[n]))
doit(plotfiles)
|
9e2f77b093dbf60e27f4d3963ad2e1cfe54e0c23
|
6cbb11d41a69ce704529a99c762e474edf789505
|
/sqlite3_to_mysql/debug_info.py
|
8d0316a428fa44ed96767e137b9cfe3edd4ad628
|
[
"MIT"
] |
permissive
|
techouse/sqlite3-to-mysql
|
a2538103d28b8578c82d2e041df05b1833aa60e0
|
9349a0f5503c9746b3cf6203c42f18244926247f
|
refs/heads/master
| 2023-08-22T12:51:58.877617
| 2023-08-04T05:39:14
| 2023-08-04T05:39:14
| 135,895,949
| 287
| 51
|
MIT
| 2023-08-19T00:03:50
| 2018-06-03T10:53:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,303
|
py
|
debug_info.py
|
"""Module containing bug report helper(s).
Adapted from https://github.com/psf/requests/blob/master/requests/help.py
"""
import platform
import sqlite3
import sys
import typing as t
from distutils.spawn import find_executable # pylint: disable=W0402
from subprocess import check_output
import click
import mysql.connector
import pytimeparse2
import simplejson
import tabulate
import tqdm
from . import __version__ as package_version
def _implementation() -> str:
"""Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation: str = platform.python_implementation()
if implementation == "CPython":
implementation_version = platform.python_version()
elif implementation == "PyPy":
implementation_version = "%s.%s.%s" % (
sys.pypy_version_info.major, # type: ignore # noqa: ignore=E1101 pylint: disable=E1101
sys.pypy_version_info.minor, # type: ignore # noqa: ignore=E1101 pylint: disable=E1101
sys.pypy_version_info.micro, # type: ignore # noqa: ignore=E1101 pylint: disable=E1101
)
rel = sys.pypy_version_info.releaselevel # type: ignore # noqa: ignore=E1101 pylint: disable=E1101
if rel != "final":
implementation_version = "".join([implementation_version, rel])
elif implementation == "Jython":
implementation_version = platform.python_version() # Complete Guess
elif implementation == "IronPython":
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = "Unknown"
return f"{implementation} {implementation_version}"
def _mysql_version() -> str:
if find_executable("mysql"):
try:
mysql_version: t.Union[str, bytes] = check_output(["mysql", "-V"])
try:
return mysql_version.decode().strip() # type: ignore
except (UnicodeDecodeError, AttributeError):
return str(mysql_version)
except Exception: # nosec pylint: disable=W0703
pass
return "MySQL client not found on the system"
def info() -> t.List[t.List[str]]:
"""Generate information for a bug report."""
try:
platform_info = f"{platform.system()} {platform.release()}"
except IOError:
platform_info = "Unknown"
return [
["sqlite3-to-mysql", package_version],
["", ""],
["Operating System", platform_info],
["Python", _implementation()],
["MySQL", _mysql_version()],
["SQLite", sqlite3.sqlite_version],
["", ""],
["click", click.__version__],
["mysql-connector-python", mysql.connector.__version__],
["pytimeparse2", pytimeparse2.__version__],
["simplejson", simplejson.__version__], # type: ignore
["tabulate", tabulate.__version__],
["tqdm", tqdm.__version__],
]
|
25b83a7cd420489430b37780a9d9bea361540f5c
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/TradePrecreateConfirmExtendParam.py
|
f8deb4fa665f4fc0dc525f44472cad3f1b59f579
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,056
|
py
|
TradePrecreateConfirmExtendParam.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class TradePrecreateConfirmExtendParam(object):
def __init__(self):
self._precreate_code_from = None
@property
def precreate_code_from(self):
return self._precreate_code_from
@precreate_code_from.setter
def precreate_code_from(self, value):
self._precreate_code_from = value
def to_alipay_dict(self):
params = dict()
if self.precreate_code_from:
if hasattr(self.precreate_code_from, 'to_alipay_dict'):
params['precreate_code_from'] = self.precreate_code_from.to_alipay_dict()
else:
params['precreate_code_from'] = self.precreate_code_from
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = TradePrecreateConfirmExtendParam()
if 'precreate_code_from' in d:
o.precreate_code_from = d['precreate_code_from']
return o
|
cc7637b09d9776f78ff106126915c3ac9a000971
|
eb76f82c474a327759888306910ccf584aee7ba1
|
/ocrd_network/ocrd_network/rabbitmq_utils/__init__.py
|
2d5f55e62cbf2db6397ef176f19e6277efd95923
|
[
"Apache-2.0"
] |
permissive
|
OCR-D/core
|
cc2aa388f43823529437924c1d653e48387a180a
|
5d627396a5dfe9abcf11840608c87c94e1353647
|
refs/heads/master
| 2023-08-19T01:39:53.219760
| 2023-08-18T14:37:02
| 2023-08-18T14:37:02
| 112,337,283
| 112
| 26
|
Apache-2.0
| 2023-09-13T16:12:03
| 2017-11-28T13:13:24
|
Python
|
UTF-8
|
Python
| false
| false
| 299
|
py
|
__init__.py
|
__all__ = [
'RMQConsumer',
'RMQConnector',
'RMQPublisher',
'OcrdProcessingMessage',
'OcrdResultMessage'
]
from .consumer import RMQConsumer
from .connector import RMQConnector
from .publisher import RMQPublisher
from .ocrd_messages import (
OcrdProcessingMessage,
OcrdResultMessage
)
|
15b3481f88eafd26aa1ad6fd89398a388309620b
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/tests/providers/google/cloud/sensors/test_bigquery.py
|
5fe40227c53aed30b15b959b942ae7ad5f1a444a
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 17,718
|
py
|
test_bigquery.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from unittest import mock
import pytest
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning, TaskDeferred
from airflow.providers.google.cloud.sensors.bigquery import (
BigQueryTableExistenceAsyncSensor,
BigQueryTableExistencePartitionAsyncSensor,
BigQueryTableExistenceSensor,
BigQueryTablePartitionExistenceSensor,
)
from airflow.providers.google.cloud.triggers.bigquery import (
BigQueryTableExistenceTrigger,
BigQueryTablePartitionExistenceTrigger,
)
TEST_PROJECT_ID = "test_project"
TEST_DATASET_ID = "test_dataset"
TEST_TABLE_ID = "test_table"
TEST_GCP_CONN_ID = "test_gcp_conn_id"
TEST_PARTITION_ID = "20200101"
TEST_IMPERSONATION_CHAIN = ["ACCOUNT_1", "ACCOUNT_2", "ACCOUNT_3"]
class TestBigqueryTableExistenceSensor:
@mock.patch("airflow.providers.google.cloud.sensors.bigquery.BigQueryHook")
def test_passing_arguments_to_hook(self, mock_hook):
task = BigQueryTableExistenceSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.table_exists.return_value = True
results = task.poke(mock.MagicMock())
assert results is True
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.table_exists.assert_called_once_with(
project_id=TEST_PROJECT_ID, dataset_id=TEST_DATASET_ID, table_id=TEST_TABLE_ID
)
@mock.patch("airflow.providers.google.cloud.sensors.bigquery.BigQueryHook")
@mock.patch("airflow.providers.google.cloud.sensors.bigquery.BigQueryTableExistenceSensor.defer")
def test_table_existence_sensor_finish_before_deferred(self, mock_defer, mock_hook):
task = BigQueryTableExistenceSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
deferrable=True,
)
mock_hook.return_value.table_exists.return_value = True
task.execute(mock.MagicMock())
assert not mock_defer.called
@mock.patch("airflow.providers.google.cloud.sensors.bigquery.BigQueryHook")
def test_execute_deferred(self, mock_hook):
"""
Asserts that a task is deferred and a BigQueryTableExistenceTrigger will be fired
when the BigQueryTableExistenceAsyncSensor is executed.
"""
task = BigQueryTableExistenceSensor(
task_id="check_table_exists",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
deferrable=True,
)
mock_hook.return_value.table_exists.return_value = False
with pytest.raises(TaskDeferred) as exc:
task.execute(mock.MagicMock())
assert isinstance(
exc.value.trigger, BigQueryTableExistenceTrigger
), "Trigger is not a BigQueryTableExistenceTrigger"
def test_execute_deferred_failure(self):
"""Tests that an AirflowException is raised in case of error event"""
task = BigQueryTableExistenceSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
deferrable=True,
)
with pytest.raises(AirflowException):
task.execute_complete(context={}, event={"status": "error", "message": "test failure message"})
def test_execute_complete(self):
"""Asserts that logging occurs as expected"""
task = BigQueryTableExistenceSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
deferrable=True,
)
table_uri = f"{TEST_PROJECT_ID}:{TEST_DATASET_ID}.{TEST_TABLE_ID}"
with mock.patch.object(task.log, "info") as mock_log_info:
task.execute_complete(context={}, event={"status": "success", "message": "Job completed"})
mock_log_info.assert_called_with("Sensor checks existence of table: %s", table_uri)
def test_execute_defered_complete_event_none(self):
"""Asserts that logging occurs as expected"""
task = BigQueryTableExistenceSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
)
with pytest.raises(AirflowException):
task.execute_complete(context={}, event=None)
class TestBigqueryTablePartitionExistenceSensor:
@mock.patch("airflow.providers.google.cloud.sensors.bigquery.BigQueryHook")
def test_passing_arguments_to_hook(self, mock_hook):
task = BigQueryTablePartitionExistenceSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
partition_id=TEST_PARTITION_ID,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.table_partition_exists.return_value = True
results = task.poke(mock.MagicMock())
assert results is True
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.table_partition_exists.assert_called_once_with(
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
partition_id=TEST_PARTITION_ID,
)
@mock.patch("airflow.providers.google.cloud.sensors.bigquery.BigQueryHook")
@mock.patch("airflow.providers.google.cloud.sensors.bigquery.BigQueryTablePartitionExistenceSensor.defer")
def test_table_partition_existence_sensor_finish_before_deferred(self, mock_defer, mock_hook):
"""
Asserts that a task is deferred and a BigQueryTablePartitionExistenceTrigger will be fired
when the BigQueryTablePartitionExistenceSensor is executed and deferrable is set to True.
"""
task = BigQueryTablePartitionExistenceSensor(
task_id="test_task_id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
partition_id=TEST_PARTITION_ID,
deferrable=True,
)
mock_hook.return_value.table_partition_exists.return_value = True
task.execute(mock.MagicMock())
assert not mock_defer.called
@mock.patch("airflow.providers.google.cloud.sensors.bigquery.BigQueryHook")
def test_execute_with_deferrable_mode(self, mock_hook):
"""
Asserts that a task is deferred and a BigQueryTablePartitionExistenceTrigger will be fired
when the BigQueryTablePartitionExistenceSensor is executed and deferrable is set to True.
"""
task = BigQueryTablePartitionExistenceSensor(
task_id="test_task_id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
partition_id=TEST_PARTITION_ID,
deferrable=True,
)
mock_hook.return_value.table_partition_exists.return_value = False
with pytest.raises(TaskDeferred) as exc:
task.execute(context={})
assert isinstance(
exc.value.trigger, BigQueryTablePartitionExistenceTrigger
), "Trigger is not a BigQueryTablePartitionExistenceTrigger"
def test_execute_with_deferrable_mode_execute_failure(self):
"""Tests that an AirflowException is raised in case of error event"""
task = BigQueryTablePartitionExistenceSensor(
task_id="test_task_id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
partition_id=TEST_PARTITION_ID,
deferrable=True,
)
with pytest.raises(AirflowException):
task.execute_complete(context={}, event={"status": "error", "message": "test failure message"})
def test_execute_complete_event_none(self):
"""Asserts that logging occurs as expected"""
task = BigQueryTablePartitionExistenceSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
partition_id=TEST_PARTITION_ID,
deferrable=True,
)
with pytest.raises(AirflowException, match="No event received in trigger callback"):
task.execute_complete(context={}, event=None)
def test_execute_complete(self):
"""Asserts that logging occurs as expected"""
task = BigQueryTablePartitionExistenceSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
partition_id=TEST_PARTITION_ID,
deferrable=True,
)
table_uri = f"{TEST_PROJECT_ID}:{TEST_DATASET_ID}.{TEST_TABLE_ID}"
with mock.patch.object(task.log, "info") as mock_log_info:
task.execute_complete(context={}, event={"status": "success", "message": "test"})
mock_log_info.assert_called_with(
'Sensor checks existence of partition: "%s" in table: %s', TEST_PARTITION_ID, table_uri
)
@pytest.fixture()
def context():
"""
Creates an empty context.
"""
context = {}
yield context
class TestBigQueryTableExistenceAsyncSensor:
depcrecation_message = (
"Class `BigQueryTableExistenceAsyncSensor` is deprecated and "
"will be removed in a future release. "
"Please use `BigQueryTableExistenceSensor` and "
"set `deferrable` attribute to `True` instead"
)
@mock.patch("airflow.providers.google.cloud.sensors.bigquery.BigQueryHook")
def test_big_query_table_existence_sensor_async(self, mock_hook):
"""
Asserts that a task is deferred and a BigQueryTableExistenceTrigger will be fired
when the BigQueryTableExistenceAsyncSensor is executed.
"""
with pytest.warns(AirflowProviderDeprecationWarning, match=self.depcrecation_message):
task = BigQueryTableExistenceAsyncSensor(
task_id="check_table_exists",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
)
mock_hook.return_value.table_exists.return_value = False
with pytest.raises(TaskDeferred) as exc:
task.execute(context={})
assert isinstance(
exc.value.trigger, BigQueryTableExistenceTrigger
), "Trigger is not a BigQueryTableExistenceTrigger"
def test_big_query_table_existence_sensor_async_execute_failure(self):
"""Tests that an AirflowException is raised in case of error event"""
with pytest.warns(AirflowProviderDeprecationWarning, match=self.depcrecation_message):
task = BigQueryTableExistenceAsyncSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
)
with pytest.raises(AirflowException):
task.execute_complete(context={}, event={"status": "error", "message": "test failure message"})
def test_big_query_table_existence_sensor_async_execute_complete(self):
"""Asserts that logging occurs as expected"""
with pytest.warns(AirflowProviderDeprecationWarning, match=self.depcrecation_message):
task = BigQueryTableExistenceAsyncSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
)
table_uri = f"{TEST_PROJECT_ID}:{TEST_DATASET_ID}.{TEST_TABLE_ID}"
with mock.patch.object(task.log, "info") as mock_log_info:
task.execute_complete(context={}, event={"status": "success", "message": "Job completed"})
mock_log_info.assert_called_with("Sensor checks existence of table: %s", table_uri)
def test_big_query_sensor_async_execute_complete_event_none(self):
"""Asserts that logging occurs as expected"""
with pytest.warns(AirflowProviderDeprecationWarning, match=self.depcrecation_message):
task = BigQueryTableExistenceAsyncSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
)
with pytest.raises(AirflowException):
task.execute_complete(context={}, event=None)
class TestBigQueryTableExistencePartitionAsyncSensor:
depcrecation_message = (
"Class `BigQueryTableExistencePartitionAsyncSensor` is deprecated and "
"will be removed in a future release. "
"Please use `BigQueryTablePartitionExistenceSensor` and "
"set `deferrable` attribute to `True` instead"
)
@mock.patch("airflow.providers.google.cloud.sensors.bigquery.BigQueryHook")
def test_big_query_table_existence_partition_sensor_async(self, mock_hook):
"""
Asserts that a task is deferred and a BigQueryTablePartitionExistenceTrigger will be fired
when the BigQueryTableExistencePartitionAsyncSensor is executed.
"""
with pytest.warns(AirflowProviderDeprecationWarning, match=self.depcrecation_message):
task = BigQueryTableExistencePartitionAsyncSensor(
task_id="test_task_id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
partition_id=TEST_PARTITION_ID,
)
mock_hook.return_value.table_partition_exists.return_value = False
with pytest.raises(TaskDeferred) as exc:
task.execute(mock.MagicMock())
assert isinstance(
exc.value.trigger, BigQueryTablePartitionExistenceTrigger
), "Trigger is not a BigQueryTablePartitionExistenceTrigger"
def test_big_query_table_existence_partition_sensor_async_execute_failure(self):
"""Tests that an AirflowException is raised in case of error event"""
with pytest.warns(AirflowProviderDeprecationWarning, match=self.depcrecation_message):
task = BigQueryTableExistencePartitionAsyncSensor(
task_id="test_task_id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
partition_id=TEST_PARTITION_ID,
)
with pytest.raises(AirflowException):
task.execute_complete(context={}, event={"status": "error", "message": "test failure message"})
def test_big_query_table_existence_partition_sensor_async_execute_complete_event_none(self):
"""Asserts that logging occurs as expected"""
with pytest.warns(AirflowProviderDeprecationWarning, match=self.depcrecation_message):
task = BigQueryTableExistencePartitionAsyncSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
partition_id=TEST_PARTITION_ID,
)
with pytest.raises(AirflowException, match="No event received in trigger callback"):
task.execute_complete(context={}, event=None)
def test_big_query_table_existence_partition_sensor_async_execute_complete(self):
"""Asserts that logging occurs as expected"""
with pytest.warns(AirflowProviderDeprecationWarning, match=self.depcrecation_message):
task = BigQueryTableExistencePartitionAsyncSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
dataset_id=TEST_DATASET_ID,
table_id=TEST_TABLE_ID,
partition_id=TEST_PARTITION_ID,
)
table_uri = f"{TEST_PROJECT_ID}:{TEST_DATASET_ID}.{TEST_TABLE_ID}"
with mock.patch.object(task.log, "info") as mock_log_info:
task.execute_complete(context={}, event={"status": "success", "message": "test"})
mock_log_info.assert_called_with(
'Sensor checks existence of partition: "%s" in table: %s', TEST_PARTITION_ID, table_uri
)
|
45a6283e4fa1066995c2c8e349ee227b463ec526
|
5419846a9e7010847fdbd232aa031fbd3e53e2bd
|
/AWS SageMaker/Jupyter-Folder/Boosting.py
|
2f1f04af3e72eb643589f7068fe9ea7befd7b182
|
[] |
no_license
|
RubensZimbres/Repo-2018
|
432d492df83bb6cf9268131d34b03241e0a2cbf5
|
4880ff9f85e5a190f6515d16a810b1cda9288c68
|
refs/heads/master
| 2021-12-14T08:25:45.171826
| 2021-12-06T22:14:48
| 2021-12-06T22:14:48
| 114,630,397
| 180
| 88
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,648
|
py
|
Boosting.py
|
import pandas as pd
import numpy as np
import sagemaker
from sagemaker.predictor import csv_serializer
import boto3
import re
import os
from sagemaker import get_execution_role
role = get_execution_role()
df=pd.read_csv('s3://your-repo/DadosTeseLogit.csv',sep=',',header=0)
sel=np.where(df.corr()['selected']>.5)[0][0:-1]
df=df.iloc[:,np.concatenate([[30],sel])]
containers = {'us-east-1': 'aws_account_id.dkr.ecr.us-east-1.amazonaws.com/decision-trees:latest'}
bucket = 'your-repo'
prefix = 'sagemaker/xgboost-churn'
df.to_csv('train.csv',header=False, index=False)
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'train/train.csv')).upload_file('train.csv')
s3_input_train = sagemaker.s3_input(s3_data='s3://{}/{}/train'.format(bucket, prefix), content_type='csv')
containers[boto3.Session().region_name]
sess = sagemaker.Session()
xgb = sagemaker.estimator.Estimator(containers[boto3.Session().region_name],
role,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
output_path='s3://{}/{}/output'.format(bucket, prefix),
sagemaker_session=sess)
xgb.fit({'train': s3_input_train})
xgb_predictor = xgb.deploy(initial_instance_count=1,instance_type='ml.m4.xlarge')
xgb_predictor.content_type = 'text/csv'
xgb_predictor.serializer = csv_serializer
xgb_predictor.deserializer = None
df3=df.iloc[:,[1,2,3]]
predictions =xgb_predictor.predict(df3.values).decode('utf-8')
predictions
|
8e5011abcf13440fae8e6046b9729d3d91578439
|
1d88dfbd224ba0cdfaf4834dc4202410cba3db16
|
/examples/python/visualization/demo_scene.py
|
cfb5d3d23931c096db0a473923012b3256bde0f4
|
[
"MIT"
] |
permissive
|
isl-org/Open3D
|
5217d525f505839777078127b5da7adc7b4bba2f
|
b620539a445206a7e07fed33fb4dcc6dff06c1a5
|
refs/heads/master
| 2023-09-01T21:42:37.981501
| 2023-09-01T15:08:08
| 2023-09-01T15:08:08
| 75,413,130
| 4,535
| 908
|
NOASSERTION
| 2023-09-14T18:07:41
| 2016-12-02T16:40:38
|
C++
|
UTF-8
|
Python
| false
| false
| 7,628
|
py
|
demo_scene.py
|
# ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# Copyright (c) 2018-2023 www.open3d.org
# SPDX-License-Identifier: MIT
# ----------------------------------------------------------------------------
"""Demo scene demonstrating models, built-in shapes, and materials"""
import math
import numpy as np
import os
import open3d as o3d
import open3d.visualization as vis
def convert_material_record(mat_record):
mat = vis.Material('defaultLit')
# Convert scalar parameters
mat.vector_properties['base_color'] = mat_record.base_color
mat.scalar_properties['metallic'] = mat_record.base_metallic
mat.scalar_properties['roughness'] = mat_record.base_roughness
mat.scalar_properties['reflectance'] = mat_record.base_reflectance
mat.texture_maps['albedo'] = o3d.t.geometry.Image.from_legacy(
mat_record.albedo_img)
mat.texture_maps['normal'] = o3d.t.geometry.Image.from_legacy(
mat_record.normal_img)
mat.texture_maps['ao_rough_metal'] = o3d.t.geometry.Image.from_legacy(
mat_record.ao_rough_metal_img)
return mat
def create_scene():
'''
Creates the geometry and materials for the demo scene and returns a dictionary suitable for draw call
'''
# Create some shapes for our scene
a_cube = o3d.geometry.TriangleMesh.create_box(2,
4,
4,
create_uv_map=True,
map_texture_to_each_face=True)
a_cube.compute_triangle_normals()
a_cube.translate((-5, 0, -2))
a_cube = o3d.t.geometry.TriangleMesh.from_legacy(a_cube)
a_sphere = o3d.geometry.TriangleMesh.create_sphere(2.5,
resolution=40,
create_uv_map=True)
a_sphere.compute_vertex_normals()
rotate_90 = o3d.geometry.get_rotation_matrix_from_xyz((-math.pi / 2, 0, 0))
a_sphere.rotate(rotate_90)
a_sphere.translate((5, 2.4, 0))
a_sphere = o3d.t.geometry.TriangleMesh.from_legacy(a_sphere)
a_cylinder = o3d.geometry.TriangleMesh.create_cylinder(
1.0, 4.0, 30, 4, True)
a_cylinder.compute_triangle_normals()
a_cylinder.rotate(rotate_90)
a_cylinder.translate((10, 2, 0))
a_cylinder = o3d.t.geometry.TriangleMesh.from_legacy(a_cylinder)
a_ico = o3d.geometry.TriangleMesh.create_icosahedron(1.25,
create_uv_map=True)
a_ico.compute_triangle_normals()
a_ico.translate((-10, 2, 0))
a_ico = o3d.t.geometry.TriangleMesh.from_legacy(a_ico)
# Load an OBJ model for our scene
helmet_data = o3d.data.FlightHelmetModel()
helmet = o3d.io.read_triangle_model(helmet_data.path)
helmet_parts = []
for m in helmet.meshes:
# m.mesh.paint_uniform_color((1.0, 0.75, 0.3))
m.mesh.scale(10.0, (0.0, 0.0, 0.0))
helmet_parts.append(m)
# Create a ground plane
ground_plane = o3d.geometry.TriangleMesh.create_box(
50.0, 0.1, 50.0, create_uv_map=True, map_texture_to_each_face=True)
ground_plane.compute_triangle_normals()
rotate_180 = o3d.geometry.get_rotation_matrix_from_xyz((-math.pi, 0, 0))
ground_plane.rotate(rotate_180)
ground_plane.translate((-25.0, -0.1, -25.0))
ground_plane.paint_uniform_color((1, 1, 1))
ground_plane = o3d.t.geometry.TriangleMesh.from_legacy(ground_plane)
# Material to make ground plane more interesting - a rough piece of glass
ground_plane.material = vis.Material("defaultLitSSR")
ground_plane.material.scalar_properties['roughness'] = 0.15
ground_plane.material.scalar_properties['reflectance'] = 0.72
ground_plane.material.scalar_properties['transmission'] = 0.6
ground_plane.material.scalar_properties['thickness'] = 0.3
ground_plane.material.scalar_properties['absorption_distance'] = 0.1
ground_plane.material.vector_properties['absorption_color'] = np.array(
[0.82, 0.98, 0.972, 1.0])
painted_plaster_texture_data = o3d.data.PaintedPlasterTexture()
ground_plane.material.texture_maps['albedo'] = o3d.t.io.read_image(
painted_plaster_texture_data.albedo_texture_path)
ground_plane.material.texture_maps['normal'] = o3d.t.io.read_image(
painted_plaster_texture_data.normal_texture_path)
ground_plane.material.texture_maps['roughness'] = o3d.t.io.read_image(
painted_plaster_texture_data.roughness_texture_path)
# Load textures and create materials for each of our demo items
wood_floor_texture_data = o3d.data.WoodFloorTexture()
a_cube.material = vis.Material('defaultLit')
a_cube.material.texture_maps['albedo'] = o3d.t.io.read_image(
wood_floor_texture_data.albedo_texture_path)
a_cube.material.texture_maps['normal'] = o3d.t.io.read_image(
wood_floor_texture_data.normal_texture_path)
a_cube.material.texture_maps['roughness'] = o3d.t.io.read_image(
wood_floor_texture_data.roughness_texture_path)
tiles_texture_data = o3d.data.TilesTexture()
a_sphere.material = vis.Material('defaultLit')
a_sphere.material.texture_maps['albedo'] = o3d.t.io.read_image(
tiles_texture_data.albedo_texture_path)
a_sphere.material.texture_maps['normal'] = o3d.t.io.read_image(
tiles_texture_data.normal_texture_path)
a_sphere.material.texture_maps['roughness'] = o3d.t.io.read_image(
tiles_texture_data.roughness_texture_path)
terrazzo_texture_data = o3d.data.TerrazzoTexture()
a_ico.material = vis.Material('defaultLit')
a_ico.material.texture_maps['albedo'] = o3d.t.io.read_image(
terrazzo_texture_data.albedo_texture_path)
a_ico.material.texture_maps['normal'] = o3d.t.io.read_image(
terrazzo_texture_data.normal_texture_path)
a_ico.material.texture_maps['roughness'] = o3d.t.io.read_image(
terrazzo_texture_data.roughness_texture_path)
metal_texture_data = o3d.data.MetalTexture()
a_cylinder.material = vis.Material('defaultLit')
a_cylinder.material.texture_maps['albedo'] = o3d.t.io.read_image(
metal_texture_data.albedo_texture_path)
a_cylinder.material.texture_maps['normal'] = o3d.t.io.read_image(
metal_texture_data.normal_texture_path)
a_cylinder.material.texture_maps['roughness'] = o3d.t.io.read_image(
metal_texture_data.roughness_texture_path)
a_cylinder.material.texture_maps['metallic'] = o3d.t.io.read_image(
metal_texture_data.metallic_texture_path)
geoms = [{
"name": "plane",
"geometry": ground_plane
}, {
"name": "cube",
"geometry": a_cube
}, {
"name": "cylinder",
"geometry": a_cylinder
}, {
"name": "ico",
"geometry": a_ico
}, {
"name": "sphere",
"geometry": a_sphere
}]
# Load the helmet
for part in helmet_parts:
name = part.mesh_name
tgeom = o3d.t.geometry.TriangleMesh.from_legacy(part.mesh)
tgeom.material = convert_material_record(
helmet.materials[part.material_idx])
geoms.append({"name": name, "geometry": tgeom})
return geoms
if __name__ == "__main__":
geoms = create_scene()
vis.draw(geoms,
bg_color=(0.8, 0.9, 0.9, 1.0),
show_ui=True,
width=1920,
height=1080)
|
4f783cb3504eb786d1a0402fd4ee5934807d1cac
|
f6aac61a48a87743be9c40fecdc24344bae4d263
|
/scripts/iemre/merge_narr.py
|
7a77af64f2195b21485a965e1cb563dcc24d6f10
|
[
"MIT"
] |
permissive
|
akrherz/iem
|
8714d99b371c8818f7cdde73dd24639e9fc7d42b
|
178015584b7fb5b585f65be6013eaf16fb6db0c7
|
refs/heads/main
| 2023-08-19T02:58:24.507782
| 2023-08-18T12:08:31
| 2023-08-18T12:08:31
| 4,253,774
| 118
| 74
|
MIT
| 2023-09-14T18:28:41
| 2012-05-07T20:32:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,374
|
py
|
merge_narr.py
|
"""Merge the NARR precip files to netcdf.
Unsure if I have any code that uses these .nc files, alas.
Called from dl/download_narr.py each month around the 9th.
"""
import datetime
import os
import sys
import numpy as np
import pygrib
from pyiem import iemre
from pyiem.util import logger, ncopen
LOG = logger()
def to_netcdf(valid):
"""Persist this 1 hour precip information to the netcdf storage
Recall that this timestep has data for the previous hour"""
fn = (
f"/mesonet/ARCHIVE/data/{valid:%Y/%m/%d}/model/NARR/"
f"apcp_{valid:%Y%m%d%H%M}.grib"
)
if not os.path.isfile(fn):
LOG.warning("Missing file %s", fn)
return False
gribs = pygrib.open(fn)
grb = gribs[1]
val = grb.values
tidx = int((iemre.hourly_offset(valid) + 1) / 3)
LOG.info("%s np.min: %s np.max: %s", tidx, np.min(val), np.max(val))
with ncopen(
f"/mesonet/data/iemre/{valid.year}_narr.nc", "a", timeout=300
) as nc:
nc.variables["apcp"][tidx, :, :] = val
return True
def main(argv):
"""Go Main"""
sts = datetime.datetime(int(argv[1]), int(argv[2]), 1)
ets = (sts + datetime.timedelta(days=33)).replace(day=1)
interval = datetime.timedelta(hours=3)
now = sts
while now < ets:
to_netcdf(now)
now += interval
if __name__ == "__main__":
main(sys.argv)
|
c43068153abb7bc52fa5e493f1bbec37a5b7128d
|
997645d6bb9c404f2f195328f29afa0eaa3c55b4
|
/piccolo/columns/readable.py
|
2748648d8c2e473112bee6957f9a43739b5e0f77
|
[
"MIT"
] |
permissive
|
piccolo-orm/piccolo
|
e43ea13c05c53ac00d9d20474c53ad2c49a40e80
|
83ea66323ef5a8e4010ea3ee19f34163bc881ace
|
refs/heads/master
| 2023-08-08T19:31:37.783445
| 2023-07-28T06:19:35
| 2023-07-28T06:19:35
| 155,008,334
| 1,139
| 90
|
MIT
| 2023-09-08T16:54:45
| 2018-10-27T20:53:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,485
|
py
|
readable.py
|
from __future__ import annotations
import typing as t
from dataclasses import dataclass
from piccolo.columns.base import Selectable
if t.TYPE_CHECKING: # pragma: no cover
from piccolo.columns.base import Column
@dataclass
class Readable(Selectable):
"""
This allows a table to specify a 'readable' representation, which can be
used instead of the primary key in GUIs. See the 'get_readable' Table
method.
"""
template: str
columns: t.Sequence[Column]
output_name: str = "readable"
@property
def _columns_string(self) -> str:
return ", ".join(
i._meta.get_full_name(with_alias=False) for i in self.columns
)
def _get_string(self, operator: str) -> str:
return (
f"{operator}('{self.template}', {self._columns_string}) AS "
f"{self.output_name}"
)
@property
def sqlite_string(self) -> str:
return self._get_string(operator="PRINTF")
@property
def postgres_string(self) -> str:
return self._get_string(operator="FORMAT")
@property
def cockroach_string(self) -> str:
return self._get_string(operator="FORMAT")
def get_select_string(self, engine_type: str, with_alias=True) -> str:
try:
return getattr(self, f"{engine_type}_string")
except AttributeError as e:
raise ValueError(
f"Unrecognised engine_type - received {engine_type}"
) from e
|
56534a5e7aad3fe209ebaae69c8259a80393594a
|
091a6200be74bf6577c86f623665bcc24e16b02b
|
/BLE_MIDI_Robot_Xylophone/code.py
|
9ddac4bb7ee00f5a22d33521e36a9a0ca8106b6f
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Learning_System_Guides
|
b5f7bce40a16da64e7a79d4b39de032f2cca41d4
|
5eaa7a15a437c533b89f359a25983e24bb6b5438
|
refs/heads/main
| 2023-09-05T18:31:41.621956
| 2023-09-05T15:36:09
| 2023-09-05T15:36:09
| 105,065,494
| 937
| 937
|
MIT
| 2023-09-12T18:48:53
| 2017-09-27T20:22:44
|
C
|
UTF-8
|
Python
| false
| false
| 3,288
|
py
|
code.py
|
# SPDX-FileCopyrightText: 2020 Liz Clark for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import time
import board
import busio
from adafruit_mcp230xx.mcp23017 import MCP23017
from digitalio import Direction
import adafruit_ble
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
import adafruit_ble_midi
# These import auto-register the message type with the MIDI machinery.
# pylint: disable=unused-import
import adafruit_midi
from adafruit_midi.control_change import ControlChange
from adafruit_midi.midi_message import MIDIUnknownEvent
from adafruit_midi.note_off import NoteOff
from adafruit_midi.note_on import NoteOn
from adafruit_midi.pitch_bend import PitchBend
# i2c setup
i2c = busio.I2C(board.SCL, board.SDA)
# i2c addresses for muxes
mcp1 = MCP23017(i2c, address=0x20)
mcp2 = MCP23017(i2c, address=0x21)
# 1st solenoid array, corresponds with 1st mux
noids0 = []
for pin in range(16):
noids0.append(mcp1.get_pin(pin))
for n in noids0:
n.direction = Direction.OUTPUT
# 2nd solenoid array, corresponds with 2nd mux
noids1 = []
for pin in range(16):
noids1.append(mcp2.get_pin(pin))
for n in noids1:
n.direction = Direction.OUTPUT
# MIDI note arrays. notes0 = noids0; notes1 = noids1
notes0 = [55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70]
notes1 = [71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86]
# setup MIDI BLE service
midi_service = adafruit_ble_midi.MIDIService()
advertisement = ProvideServicesAdvertisement(midi_service)
# BLE connection setup
ble = adafruit_ble.BLERadio()
if ble.connected:
for c in ble.connections:
c.disconnect()
# MIDI in setup
midi = adafruit_midi.MIDI(midi_in=midi_service, in_channel=0)
# start BLE advertising
print("advertising")
ble.start_advertising(advertisement)
# delay for solenoids
speed = 0.01
while True:
# waiting for BLE connection
print("Waiting for connection")
while not ble.connected:
pass
print("Connected")
# delay after connection established
time.sleep(1.0)
while ble.connected:
# msg holds MIDI messages
msg = midi.receive()
for i in range(16):
# states for solenoid on/off
# noid0 = mux1
# noid1 = mux2
noid0_output = noids0[i]
noid1_output = noids1[i]
# states for MIDI note recieved
# notes0 = mux1
# notes1 = mux2
notes0_played = notes0[i]
notes1_played = notes1[i]
# if NoteOn msg comes in and the MIDI note # matches with predefined notes:
if isinstance(msg, NoteOn) and msg.note is notes0_played:
print(time.monotonic(), msg.note)
# solenoid is triggered
noid0_output.value = True
# quick delay
time.sleep(speed)
# solenoid retracts
noid0_output.value = False
# identical to above if statement but for mux2
if isinstance(msg, NoteOn) and msg.note is notes1_played:
print(time.monotonic(), msg.note)
noid1_output.value = True
time.sleep(speed)
noid1_output.value = False
# if BLE disconnects try reconnecting
print("Disconnected")
print()
ble.start_advertising(advertisement)
|
b36db3ecfd2dd3f22ea0cf7a080cbffbd4b1b5b2
|
6c37d1d2437a08e43b13d621d4a8da4da7135b3a
|
/yt_dlp/extractor/freespeech.py
|
aea551379b84dfa4f7fdd133d499b85cbccf54fb
|
[
"Unlicense",
"GPL-2.0-or-later",
"MPL-2.0",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LGPL-2.1-only",
"BSD-2-Clause",
"MIT"
] |
permissive
|
yt-dlp/yt-dlp
|
be040bde10cc40258c879c75ab30215686352824
|
d3d81cc98f554d0adb87d24bfd6fabaaa803944d
|
refs/heads/master
| 2023-09-05T21:15:21.050538
| 2023-09-05T20:35:23
| 2023-09-05T20:35:23
| 307,260,205
| 52,742
| 5,376
|
Unlicense
| 2023-09-14T05:22:08
| 2020-10-26T04:22:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
freespeech.py
|
from .common import InfoExtractor
from .youtube import YoutubeIE
class FreespeechIE(InfoExtractor):
IE_NAME = 'freespeech.org'
_VALID_URL = r'https?://(?:www\.)?freespeech\.org/stories/(?P<id>.+)'
_TEST = {
'add_ie': ['Youtube'],
'url': 'http://www.freespeech.org/stories/fcc-announces-net-neutrality-rollback-whats-stake/',
'info_dict': {
'id': 'waRk6IPqyWM',
'ext': 'mp4',
'title': 'What\'s At Stake - Net Neutrality Special',
'description': 'Presented by MNN and FSTV',
'upload_date': '20170728',
'uploader_id': 'freespeechtv',
'uploader': 'freespeechtv',
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
youtube_url = self._search_regex(
r'data-video-url="([^"]+)"',
webpage, 'youtube url')
return self.url_result(youtube_url, YoutubeIE.ie_key())
|
76bbfb598c975cc61811fd291a8620626af3a33f
|
376e1818d427b5e4d32fa6dd6c7b71e9fd88afdb
|
/lang/python27/patches/patch-Lib___osx__support.py
|
c377d2a50665e06c6361d690391aed35389fb26e
|
[] |
no_license
|
NetBSD/pkgsrc
|
a0732c023519650ef821ab89c23ab6ab59e25bdb
|
d042034ec4896cc5b47ed6f2e5b8802d9bc5c556
|
refs/heads/trunk
| 2023-09-01T07:40:12.138283
| 2023-09-01T05:25:19
| 2023-09-01T05:25:19
| 88,439,572
| 321
| 138
| null | 2023-07-12T22:34:14
| 2017-04-16T20:04:15
| null |
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
patch-Lib___osx__support.py
|
$NetBSD: patch-Lib___osx__support.py,v 1.1 2021/06/23 18:30:24 schmonz Exp $
macOS arm64 support, via MacPorts.
--- Lib/_osx_support.py.orig 2021-06-22 19:20:03.000000000 +0000
+++ Lib/_osx_support.py
@@ -470,6 +470,8 @@ def get_platform_osx(_config_vars, osnam
if len(archs) == 1:
machine = archs[0]
+ elif archs == ('arm64', 'x86_64'):
+ machine = 'universal2'
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
@@ -484,6 +486,10 @@ def get_platform_osx(_config_vars, osnam
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
+ elif machine == 'arm':
+ # No 32-bit arm support on macOS
+ machine = 'arm64'
+
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
|
2669318c27591fc5ccdafd55becea285de3d9358
|
e5bfe32c855cfda798a064d8076339b46f21c53c
|
/routeros_api/api_structure.py
|
7899956d7f667abd16b5151236f9371b202cec9a
|
[
"Python-2.0",
"MIT"
] |
permissive
|
socialwifi/RouterOS-api
|
04e4012db578c9176de8bb6186ff52b67bf3c1b9
|
5580d046c96867380546ca76699337b50e74aefc
|
refs/heads/master
| 2023-07-08T23:39:14.875793
| 2023-03-15T19:14:24
| 2023-03-15T19:14:24
| 17,862,006
| 234
| 106
|
MIT
| 2023-06-28T20:24:09
| 2014-03-18T10:33:59
|
Python
|
UTF-8
|
Python
| false
| false
| 3,342
|
py
|
api_structure.py
|
import abc
import collections
import datetime
import ipaddress
import re
class Field(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_mikrotik_value(self, arg):
"""
:rtype: bytes
"""
raise NotImplementedError()
@abc.abstractmethod
def get_python_value(self, bytes):
"""
:type bytes: bytes
"""
raise NotImplementedError()
class StringField(Field):
def get_mikrotik_value(self, string):
return string.encode()
def get_python_value(self, bytes):
return bytes.decode()
class BytesField(Field):
def get_mikrotik_value(self, bytes):
return bytes
def get_python_value(self, bytes):
return bytes
class BooleanField(Field):
def get_mikrotik_value(self, condition):
return b'yes' if condition else b'no'
def get_python_value(self, bytes):
assert bytes in (b'yes', b'true', b'no', b'false')
return bytes in (b'yes', b'true')
class IntegerField(Field):
def get_mikrotik_value(self, number):
return str(number).encode()
def get_python_value(self, bytes):
return int(bytes.decode())
class TimedeltaField(Field):
def get_mikrotik_value(self, timedelta):
if timedelta is None:
return b'none'
else:
seconds = int(timedelta.total_seconds())
return '{}s'.format(seconds).encode()
def get_python_value(self, bytes):
if bytes == b'none':
return None
else:
return self.parse_mikrotik_timedelta(bytes.decode())
def parse_mikrotik_timedelta(self, time_string):
new_timedelta_format = (
r'^((?P<weeks>\d+)w)?((?P<days>\d+)d)?'
r'((?P<hours>\d+)h)?((?P<minutes>\d+)m)?((?P<seconds>\d+)s)?'
r'((?P<milliseconds>\d+)ms)?$')
old_timedelta_format = (
r'^((?P<weeks>\d+)w)?((?P<days>\d+)d)?'
r'(?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d+)'
r'(\.(?P<milliseconds>\d+))?$')
match = re.match(new_timedelta_format, time_string)
if not match:
match = re.match(old_timedelta_format, time_string)
if match:
groups = dict((k, int(v)) for k, v in match.groupdict('0').items())
return datetime.timedelta(**groups)
else:
raise ValueError('{} does not match any mikrotik uptime format'
.format(time_string))
class IpNetworkField(Field):
def get_mikrotik_value(self, ip_network):
if ip_network:
return str(ip_network).encode()
else:
return b''
def get_python_value(self, bytes):
if bytes:
return ipaddress.ip_network(bytes.decode())
else:
return None
class ListField(Field):
def __init__(self, subfield):
self.subfield = subfield
def get_mikrotik_value(self, objects):
return b','.join(
self.subfield.get_mikrotik_value(obj) for obj in objects)
def get_python_value(self, bytes):
separator = b',' if b';' not in bytes else b';'
return [
self.subfield.get_python_value(serialized)
for serialized in bytes.split(separator)]
default_structure = collections.defaultdict(StringField)
|
005e7f955ddc86fd961e21d56c4eb51a44e310a4
|
00efa24c6c89c2c41e641431c3f685f02f7a26be
|
/expects/texts.py
|
3b1a620b5ec56dab10d98e316451e1a38b87fd70
|
[
"Apache-2.0"
] |
permissive
|
jaimegildesagredo/expects
|
0e80c3ed4e3063abcbe45fc3b1af79bd248eb832
|
7c1bbb1833fa5f5cd4e3428cfd8859b122011f53
|
refs/heads/master
| 2022-11-24T23:48:11.458697
| 2022-11-15T17:44:07
| 2022-11-15T17:44:07
| 10,266,355
| 208
| 34
|
NOASSERTION
| 2022-11-15T09:14:42
| 2013-05-24T12:55:59
|
Python
|
UTF-8
|
Python
| false
| false
| 658
|
py
|
texts.py
|
# -*- coding: utf-8 -*
from .matchers import default_matcher
def plain_enumerate(args, kwargs=None):
if kwargs is None:
kwargs = {}
tokens = []
for arg in args:
tokens.append(repr(arg))
for k, v in _sorted_items(kwargs):
tokens.append('{0!r} {1!r}'.format(k, default_matcher(v)))
total = len(args) + len(kwargs)
result = ''
for i, token in enumerate(tokens):
result += token
if i == total - 2:
result += ' and '
elif i != total - 1:
result += ', '
return result
def _sorted_items(dct):
return sorted(dct.items(), key=lambda args: args[0])
|
7730b01829e31edc09ce6e127ed7a7b8f7fc2faa
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/catapult/skia_bridge/tests/perf_api/query_anomalies_test.py
|
2d0dcdcd8aaf3650d26578f8e3560d1c7acab09c
|
[
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 7,434
|
py
|
query_anomalies_test.py
|
# Copyright 2023 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import json
import os
import sys
from pathlib import Path
import unittest
app_path = Path(__file__).parent.parent.parent
if str(app_path) not in sys.path:
sys.path.insert(0, str(app_path))
from application import app
from application.perf_api import query_anomalies
from google.cloud import datastore
import mock
class QueryAnomaliesTest(unittest.TestCase):
def setUp(self):
self.client = app.Create().test_client()
os.environ['DISABLE_METRICS'] = 'True'
def testInvalidRequestJson(self):
test_name = 'master/bot/test1/metric'
with mock.patch('application.perf_api.auth_helper.AuthorizeBearerToken') \
as auth_mock:
auth_mock.return_value = True
response = self.client.post(
'/anomalies/find',
data='{"tests":["%s"],' # Wrong request param
'"max_revision":"1234", "min_revision":"1233" add invalid}'
% test_name)
self.assertEqual(400, response.status_code)
self.assertEqual("Malformed Json", response.get_data(as_text=True))
def testInvalidRequestParam(self):
test_name = 'master/bot/test1/metric'
with mock.patch('application.perf_api.auth_helper.AuthorizeBearerToken') \
as auth_mock:
auth_mock.return_value = True
response = self.client.post(
'/anomalies/find',
data='{"SearchTests":["%s"],' # Wrong request param
'"max_revision":"1234", "min_revision":"1233"}'
% test_name)
self.assertEqual(400, response.status_code)
self.assertTrue("['tests']" in response.get_data(as_text=True))
@mock.patch('application.perf_api.datastore_client'
'.DataStoreClient.QueryAnomalies')
def testNoAnomaliesExist(self, query_mock):
query_mock.return_value = []
test_name = 'master/bot/test1/metric'
with mock.patch('application.perf_api.auth_helper.AuthorizeBearerToken') \
as auth_mock:
auth_mock.return_value = True
response = self.client.post(
'/anomalies/find',
data='{"tests":["%s"], "max_revision":"1234", "min_revision":"1233"}'
% test_name)
data = json.loads(response.get_data(as_text=True))
self.assertEqual({}, data["anomalies"], 'No anomalies expected')
@mock.patch('application.perf_api.datastore_client'
'.DataStoreClient.QueryAnomalies')
def testNoAnomaliesFound(self, query_mock):
test_name = 'master/bot/test1/metric'
client = datastore.Client()
def mock_query(tests, min_revision, max_revision):
start_rev = 1233
end_rev = 1234
if test_name in tests and \
start_rev >= int(min_revision) and end_rev <= int(max_revision):
test_key1 = client.key('TestMetadata', test_name)
anomaly_key = client.key('Anomaly', '1111')
test_anomaly = datastore.entity.Entity(anomaly_key)
test_anomaly['start_revision'] = start_rev
test_anomaly['end_revision'] = end_rev
test_anomaly['test'] = test_key1
return [test_anomaly]
return []
query_mock.side_effect = mock_query
test_name_2 = 'some/other/test'
with mock.patch('application.perf_api.auth_helper.AuthorizeBearerToken') \
as auth_mock:
auth_mock.return_value = True
# Search for a test for which anomaly does not exist
response = self.client.post(
'/anomalies/find',
data='{"tests":["%s"], "max_revision":"1234", "min_revision":"1233"}'
% test_name_2)
data = json.loads(response.get_data(as_text=True))
self.assertEqual({}, data["anomalies"], 'No anomalies expected')
# Search for an existing test anomaly, but a different revision
response = self.client.post(
'/anomalies/find',
data='{"tests":["%s"], "max_revision":"1232", "min_revision":"1230"}'
% test_name)
data = json.loads(response.get_data(as_text=True))
self.assertEqual({}, data["anomalies"], 'No anomalies expected')
@mock.patch('application.perf_api.datastore_client'
'.DataStoreClient.QueryAnomalies')
def testAnomalyRequestBatching(self, query_mock):
query_mock.return_value = []
batch_size = query_anomalies.DATASTORE_TEST_BATCH_SIZE
batch_count = 2
test_count = batch_size*batch_count
tests = []
for i in range(test_count):
tests.append('master/bot/benchmark/test_%i' % i)
with mock.patch('application.perf_api.auth_helper.AuthorizeBearerToken') \
as auth_mock:
auth_mock.return_value = True
# Replace the single inverted comma with double to render the json
test_str = str(tests).replace('\'', '"')
request_data = \
'{"tests":%s, "max_revision":"1234", "min_revision":"1233"}'% test_str
response = self.client.post(
'/anomalies/find',
data=request_data)
data = json.loads(response.get_data(as_text=True))
self.assertEqual({}, data["anomalies"], 'No anomalies expected')
self.assertEqual(batch_count, query_mock.call_count,
'Datastore expected to be queried exactly %i times' %
batch_count)
@mock.patch('application.perf_api.datastore_client'
'.DataStoreClient.QueryAnomalies')
def testAnomaliesFound(self, query_mock):
test_name = 'master/bot/test1/metric'
client = datastore.Client()
def create_anomaly(key, start_rev, end_rev):
test_anomaly = datastore.entity.Entity(key)
test_anomaly['start_revision'] = start_rev
test_anomaly['end_revision'] = end_rev
test_anomaly['test'] = test_key1
return test_anomaly
start_rev = 1233
end_rev = 1234
test_key1 = client.key('TestMetadata', test_name)
anomaly_key_1 = client.key('Anomaly', 1111)
anomaly_key_2 = client.key('Anomaly', 2222)
test_anomaly_1 = create_anomaly(anomaly_key_1, start_rev, end_rev)
test_anomaly_2 = create_anomaly(anomaly_key_2, start_rev, end_rev)
def mock_query(tests, min_revision, max_revision):
if test_name in tests and \
start_rev >= int(min_revision) and end_rev <= int(max_revision):
return [test_anomaly_1, test_anomaly_2]
return []
query_mock.side_effect = mock_query
with mock.patch('application.perf_api.auth_helper.AuthorizeBearerToken') \
as auth_mock:
auth_mock.return_value = True
response = self.client.post(
'/anomalies/find',
data='{"tests":["%s"], "max_revision":"1234", "min_revision":"1233"}'
% test_name)
data = response.get_data(as_text=True)
response_data = json.loads(data)
self.assertIsNotNone(response_data)
anomaly_list = response_data["anomalies"][test_name]
self.assertIsNotNone(anomaly_list, 'Anomaly list for test expected.')
self.assertEqual(2, len(anomaly_list), 'Two anomalies expected in list')
anomaly_data = anomaly_list[0]
self.assertEqual(test_name, anomaly_data['test_path'])
self.assertEqual(test_anomaly_1['start_revision'],
anomaly_data['start_revision'])
self.assertEqual(test_anomaly_1['end_revision'],
anomaly_data['end_revision'])
if __name__ == '__main__':
unittest.main()
|
d543e11fe3117371e15b2a6521ae874473ac4872
|
5a12bac57dcd949c67de7916e784b06fdbfa25c2
|
/kik_unofficial/protobuf/contactlist/v2/contact_list_common_pb2.py
|
542876842da507ca28c14b72bc6faa954cc46262
|
[
"MIT"
] |
permissive
|
tomer8007/kik-bot-api-unofficial
|
582f5e7b06640682873dd6e09bd8174e776f1c56
|
c860cfb59c20f5e351add17edb56b814c3d594a6
|
refs/heads/new
| 2023-08-30T12:26:08.351092
| 2023-08-18T00:20:06
| 2023-08-18T00:20:06
| 95,992,460
| 148
| 120
|
MIT
| 2023-08-18T00:20:08
| 2017-07-01T23:26:15
|
Python
|
UTF-8
|
Python
| false
| true
| 7,218
|
py
|
contact_list_common_pb2.py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: contactlist/v2/contact_list_common.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import kik_unofficial.protobuf.common_model_pb2 as common__model__pb2
import kik_unofficial.protobuf.protobuf_validation_pb2 as protobuf__validation__pb2
import kik_unofficial.protobuf.common.v2_pb2 as model
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'contactlist.v2.contact_list_common_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n com.kik.gen.contactlist.v2.modelZVgithub.com/kikinteractive/xiphias-model-common/generated/go/contactlist/v2;contactlist\240\001\001\242\002\016KPBCommonCNLV2'
_USERFRIENDCONTEXT_SCAN.fields_by_name['scan_code']._options = None
_USERFRIENDCONTEXT_SCAN.fields_by_name['scan_code']._serialized_options = b'\312\235%\005\010\0010\364\003'
_USERFRIENDCONTEXT_SCAN.fields_by_name['nonce_key']._options = None
_USERFRIENDCONTEXT_SCAN.fields_by_name['nonce_key']._serialized_options = b'\312\235%\005\010\0010\364\003'
_USERFRIENDCONTEXT_WEBKIKME.fields_by_name['related_url']._options = None
_USERFRIENDCONTEXT_WEBKIKME.fields_by_name['related_url']._serialized_options = b'\312\235%\005\010\0010\210\''
_USERFRIENDCONTEXT_WEBPROFILEKIKME.fields_by_name['related_url']._options = None
_USERFRIENDCONTEXT_WEBPROFILEKIKME.fields_by_name['related_url']._serialized_options = b'\312\235%\005\010\0010\210\''
_USERFRIENDCONTEXT_CARDPROFILEKIKPROTOCOL.fields_by_name['related_card_name']._options = None
_USERFRIENDCONTEXT_CARDPROFILEKIKPROTOCOL.fields_by_name['related_card_name']._serialized_options = b'\312\235%\005\010\0010\200('
_USERFRIENDCONTEXT_CARDPROFILEKIKPROTOCOL.fields_by_name['related_url']._options = None
_USERFRIENDCONTEXT_CARDPROFILEKIKPROTOCOL.fields_by_name['related_url']._serialized_options = b'\312\235%\0030\210\''
_USERFRIENDCONTEXT_CARDSENDKIKTOUSER.fields_by_name['related_card_name']._options = None
_USERFRIENDCONTEXT_CARDSENDKIKTOUSER.fields_by_name['related_card_name']._serialized_options = b'\312\235%\005\010\0010\200('
_USERFRIENDCONTEXT_CARDSENDKIKTOUSER.fields_by_name['related_url']._options = None
_USERFRIENDCONTEXT_CARDSENDKIKTOUSER.fields_by_name['related_url']._serialized_options = b'\312\235%\0030\210\''
_USERFRIENDCONTEXT_CARDOPENCONVERSATION.fields_by_name['related_card_name']._options = None
_USERFRIENDCONTEXT_CARDOPENCONVERSATION.fields_by_name['related_card_name']._serialized_options = b'\312\235%\005\010\0010\200('
_USERFRIENDCONTEXT_CARDOPENCONVERSATION.fields_by_name['related_url']._options = None
_USERFRIENDCONTEXT_CARDOPENCONVERSATION.fields_by_name['related_url']._serialized_options = b'\312\235%\0030\210\''
_USERFRIENDCONTEXT_CARDOPENPROFILE.fields_by_name['related_card_name']._options = None
_USERFRIENDCONTEXT_CARDOPENPROFILE.fields_by_name['related_card_name']._serialized_options = b'\312\235%\005\010\0010\200('
_USERFRIENDCONTEXT_CARDOPENPROFILE.fields_by_name['related_url']._options = None
_USERFRIENDCONTEXT_CARDOPENPROFILE.fields_by_name['related_url']._serialized_options = b'\312\235%\0030\210\''
_USERFRIENDCONTEXT._serialized_start=138
_USERFRIENDCONTEXT._serialized_end=3856
_USERFRIENDCONTEXT_DEFAULTCONTEXT._serialized_start=2716
_USERFRIENDCONTEXT_DEFAULTCONTEXT._serialized_end=2732
_USERFRIENDCONTEXT_SCAN._serialized_start=2734
_USERFRIENDCONTEXT_SCAN._serialized_end=2800
_USERFRIENDCONTEXT_EXPLICITUSERNAMESEARCH._serialized_start=2802
_USERFRIENDCONTEXT_EXPLICITUSERNAMESEARCH._serialized_end=2826
_USERFRIENDCONTEXT_INLINEUSERNAMESEARCH._serialized_start=2828
_USERFRIENDCONTEXT_INLINEUSERNAMESEARCH._serialized_end=2850
_USERFRIENDCONTEXT_INLINEPROMOTED._serialized_start=2852
_USERFRIENDCONTEXT_INLINEPROMOTED._serialized_end=2868
_USERFRIENDCONTEXT_FUZZYMATCHING._serialized_start=2870
_USERFRIENDCONTEXT_FUZZYMATCHING._serialized_end=2885
_USERFRIENDCONTEXT_ADDRESSBOOKMATCHING._serialized_start=2887
_USERFRIENDCONTEXT_ADDRESSBOOKMATCHING._serialized_end=2908
_USERFRIENDCONTEXT_PROMOTEDCHATSLIST._serialized_start=2910
_USERFRIENDCONTEXT_PROMOTEDCHATSLIST._serialized_end=2929
_USERFRIENDCONTEXT_TALKTOAD._serialized_start=2931
_USERFRIENDCONTEXT_TALKTOAD._serialized_end=2941
_USERFRIENDCONTEXT_FINDPEOPLEAD._serialized_start=2943
_USERFRIENDCONTEXT_FINDPEOPLEAD._serialized_end=2957
_USERFRIENDCONTEXT_GROUPADDALL._serialized_start=2959
_USERFRIENDCONTEXT_GROUPADDALL._serialized_end=3017
_USERFRIENDCONTEXT_GROUPINFOADD._serialized_start=3019
_USERFRIENDCONTEXT_GROUPINFOADD._serialized_end=3078
_USERFRIENDCONTEXT_WEBKIKME._serialized_start=3080
_USERFRIENDCONTEXT_WEBKIKME._serialized_end=3122
_USERFRIENDCONTEXT_WEBPROFILEKIKME._serialized_start=3124
_USERFRIENDCONTEXT_WEBPROFILEKIKME._serialized_end=3173
_USERFRIENDCONTEXT_CARDPROFILEKIKPROTOCOL._serialized_start=3175
_USERFRIENDCONTEXT_CARDPROFILEKIKPROTOCOL._serialized_end=3267
_USERFRIENDCONTEXT_CARDSENDKIKTOUSER._serialized_start=3269
_USERFRIENDCONTEXT_CARDSENDKIKTOUSER._serialized_end=3356
_USERFRIENDCONTEXT_CARDOPENCONVERSATION._serialized_start=3358
_USERFRIENDCONTEXT_CARDOPENCONVERSATION._serialized_end=3448
_USERFRIENDCONTEXT_CARDOPENPROFILE._serialized_start=3450
_USERFRIENDCONTEXT_CARDOPENPROFILE._serialized_end=3535
_USERFRIENDCONTEXT_SUGGESTFRIEND._serialized_start=3537
_USERFRIENDCONTEXT_SUGGESTFRIEND._serialized_end=3552
_USERFRIENDCONTEXT_FRIENDPICKER._serialized_start=3554
_USERFRIENDCONTEXT_FRIENDPICKER._serialized_end=3568
_USERFRIENDCONTEXT_LINKATTRIBUTION._serialized_start=3570
_USERFRIENDCONTEXT_LINKATTRIBUTION._serialized_end=3587
_USERFRIENDCONTEXT_USERNAMEMENTION._serialized_start=3589
_USERFRIENDCONTEXT_USERNAMEMENTION._serialized_end=3606
_USERFRIENDCONTEXT_BOTMENTION._serialized_start=3608
_USERFRIENDCONTEXT_BOTMENTION._serialized_end=3620
_USERFRIENDCONTEXT_BOTMENTIONREPLY._serialized_start=3622
_USERFRIENDCONTEXT_BOTMENTIONREPLY._serialized_end=3639
_USERFRIENDCONTEXT_BOTSHOP._serialized_start=3641
_USERFRIENDCONTEXT_BOTSHOP._serialized_end=3650
_USERFRIENDCONTEXT_GROUPMEMBERADD._serialized_start=3652
_USERFRIENDCONTEXT_GROUPMEMBERADD._serialized_end=3668
_USERFRIENDCONTEXT_GROUPINFOMENUADD._serialized_start=3670
_USERFRIENDCONTEXT_GROUPINFOMENUADD._serialized_end=3733
_USERFRIENDCONTEXT_GROUPMENUADD._serialized_start=3735
_USERFRIENDCONTEXT_GROUPMENUADD._serialized_end=3794
_USERFRIENDCONTEXT_PULLUSERNAMESEARCH._serialized_start=3796
_USERFRIENDCONTEXT_PULLUSERNAMESEARCH._serialized_end=3816
_USERFRIENDCONTEXT_SENDTOUSERNAMESEARCH._serialized_start=3818
_USERFRIENDCONTEXT_SENDTOUSERNAMESEARCH._serialized_end=3840
# @@protoc_insertion_point(module_scope)
|
7788cdd30507cd92bdbac653243b7cee5745d238
|
407d194b52fe9cf75cca9d6f3c162a565549a1ae
|
/Utils/test/test_redacted_settings.py
|
46962ff6a69a6fb3537559d2a57c83640b1bdd45
|
[
"Apache-2.0"
] |
permissive
|
Azure/azure-linux-extensions
|
808761f927045f00548aa68e38d4bec8651c0eba
|
3cea1567fc4f4eb5beea9884153e92d70610394d
|
refs/heads/master
| 2023-08-27T14:06:05.775617
| 2023-08-23T01:56:05
| 2023-08-23T01:56:05
| 19,841,123
| 300
| 314
|
Apache-2.0
| 2023-09-14T04:21:26
| 2014-05-16T01:38:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,015
|
py
|
test_redacted_settings.py
|
#!/usr/bin/env python
#
# Tests for redacted settings
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import Utils.HandlerUtil as Util
class TestRedactedProtectedSettings(unittest.TestCase):
def test_redacted_protected_settings(self):
redacted = Util.HandlerUtility.redact_protected_settings(settings_original)
self.assertIn('"protectedSettings": "*** REDACTED ***"', redacted)
self.assertIn('"protectedSettingsCertThumbprint": "*** REDACTED ***"', redacted)
settings_original = """\
{
"runtimeSettings": [{
"handlerSettings": {
"protectedSettingsCertThumbprint": "9310D2O49D7216D4A1CEDCE9D8A7CE5DBD7FB7BF",
"protectedSettings": "MIIC4AYJKoZIhvcNAQcWoIIB0TCDEc0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEB8f7DyzHLGjSDLnEWd4YeAwDQYJKoZIhvcNAQEBBQAEggEAiZj2gQtT4MpdTaEH8rUVFB/8Ucc8OxGFWu8VKbIdoHLKp1WcDb7Vlzv6fHLBIccgXGuR1XHTvtlD4QiKpSet341tPPug/R5ZtLSRz1pqtXZdrFcuuSxOa6ib/+la5ukdygcVwkEnmNSQaiipPKyqPH2JsuhmGCdXFiKwCSTrgGE6GyCBtaK9KOf48V/tYXHnDGrS9q5a1gRF5KVI2B26UYSO7V7pXjzYCd/Sp9yGj7Rw3Kqf9Lpix/sPuqWjV6e2XFlD3YxaHSeHVnLI/Bkz2E6Ri8yfPYus52r/mECXPL2YXqY9dGyrlKKIaD9AuzMyvvy1A74a9VBq7zxQQ4adEzBbBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECDyEf4mRrmWJgDhW4j2nRNTJU4yXxocQm/PhAr39Um7n0pgI2Cn28AabYtsHWjKqr8Al9LX6bKm8cnmnLjqTntphCw==",
"publicSettings": {}
}
}]
}
"""
if __name__ == '__main__':
unittest.main()
|
25840b4641e9bbbd9e3f61a45e12f757913a4063
|
87f6712f676d2b1aad7921e1b4d4387195a3bf96
|
/tests/test_helper.py
|
b1af2859a7e6a093bd232fb220ed73bab66c1d18
|
[
"Apache-2.0"
] |
permissive
|
Fatal1ty/mashumaro
|
e3d53b52029a29a3b495132ff2ea46ef07081549
|
c0a4cf8943de51a626bc30681c935a628565cecf
|
refs/heads/master
| 2023-08-07T08:43:30.227999
| 2023-08-02T19:59:28
| 2023-08-02T19:59:28
| 156,855,347
| 617
| 40
|
Apache-2.0
| 2023-09-13T18:26:28
| 2018-11-09T11:50:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,918
|
py
|
test_helper.py
|
from dataclasses import dataclass, field
from datetime import date, datetime
import pytest
from mashumaro import DataClassDictMixin, field_options
from mashumaro.helper import pass_through
from mashumaro.types import SerializationStrategy
def test_field_options_helper():
assert field_options() == {
"serialize": None,
"deserialize": None,
"serialization_strategy": None,
"alias": None,
}
def serialize(x):
return x # pragma no cover
def deserialize(x):
return x # pragma no cover
class TestSerializationStrategy(SerializationStrategy): # pragma no cover
def deserialize(self, value):
return value
def serialize(self, value):
return value
serialization_strategy = TestSerializationStrategy()
alias = "alias"
assert field_options(
serialize=serialize,
deserialize=deserialize,
serialization_strategy=serialization_strategy,
alias=alias,
) == {
"serialize": serialize,
"deserialize": deserialize,
"serialization_strategy": serialization_strategy,
"alias": alias,
}
def test_pass_through():
with pytest.raises(NotImplementedError):
pass_through()
assert pass_through.serialize(123) == 123
assert pass_through.deserialize(123) == 123
def test_dataclass_with_pass_through():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(
metadata=field_options(
serialize=pass_through,
deserialize=pass_through,
)
)
y: date = field(
metadata=field_options(serialization_strategy=pass_through)
)
x = datetime.utcnow()
y = x.date()
instance = DataClass(x, y)
assert instance.to_dict() == {"x": x, "y": y}
assert instance.from_dict({"x": x, "y": y}) == instance
|
d7119b6ea08919a7b66e75349c5b5c0f9a4f9f5d
|
150a7b11cb531f8bc2a045aefcf2ebe1d151efa3
|
/ocs_ci/deployment/helpers/lso_helpers.py
|
bf8b4f22e0f1c9cb1eb7ce2a4b8f660d0cb41d47
|
[
"MIT"
] |
permissive
|
red-hat-storage/ocs-ci
|
c7ac414e1b86552da0439223dfa9bca39977f31a
|
5e9e504957403148e413326f65c3769bf9d8eb39
|
refs/heads/master
| 2023-08-17T16:19:51.154403
| 2023-08-17T13:27:12
| 2023-08-17T13:27:12
| 179,558,938
| 146
| 210
|
MIT
| 2023-09-14T16:38:44
| 2019-04-04T19:00:02
|
Python
|
UTF-8
|
Python
| false
| false
| 16,713
|
py
|
lso_helpers.py
|
"""
This module contains helpers functions needed for
LSO ( local storage operator ) deployment.
"""
import json
import logging
import tempfile
from ocs_ci.deployment.disconnected import prune_and_mirror_index_image
from ocs_ci.framework import config
from ocs_ci.ocs import constants, ocp, defaults
from ocs_ci.ocs.exceptions import CommandFailed, UnsupportedPlatformError
from ocs_ci.ocs.node import get_nodes, get_compute_node_names
from ocs_ci.utility import templating, version
from ocs_ci.utility.deployment import get_ocp_ga_version
from ocs_ci.utility.localstorage import get_lso_channel
from ocs_ci.utility.retry import retry
from ocs_ci.utility.utils import (
run_cmd,
wait_for_machineconfigpool_status,
wipe_all_disk_partitions_for_node,
)
logger = logging.getLogger(__name__)
def setup_local_storage(storageclass):
"""
Setup the necessary resources for enabling local storage.
Args:
storageclass (string): storageClassName value to be used in
LocalVolume CR based on LOCAL_VOLUME_YAML
"""
# Get the worker nodes
workers = get_nodes(node_type="worker")
worker_names = [worker.name for worker in workers]
logger.debug("Workers: %s", worker_names)
ocp_version = version.get_semantic_ocp_version_from_config()
ocs_version = version.get_semantic_ocs_version_from_config()
ocp_ga_version = get_ocp_ga_version(ocp_version)
if not ocp_ga_version:
create_optional_operators_catalogsource_non_ga()
logger.info("Retrieving local-storage-operator data from yaml")
lso_data = list(
templating.load_yaml(constants.LOCAL_STORAGE_OPERATOR, multi_document=True)
)
# ensure namespace is correct
lso_namespace = config.ENV_DATA["local_storage_namespace"]
for data in lso_data:
if data["kind"] == "Namespace":
data["metadata"]["name"] = lso_namespace
else:
data["metadata"]["namespace"] = lso_namespace
if data["kind"] == "OperatorGroup":
data["spec"]["targetNamespaces"] = [lso_namespace]
# Update local-storage-operator subscription data with channel
for data in lso_data:
if data["kind"] == "Subscription":
data["spec"]["channel"] = get_lso_channel()
if not ocp_ga_version:
if data["kind"] == "Subscription":
data["spec"]["source"] = "optional-operators"
# Create temp yaml file and create local storage operator
logger.info(
"Creating temp yaml file with local-storage-operator data:\n %s", lso_data
)
lso_data_yaml = tempfile.NamedTemporaryFile(
mode="w+", prefix="local_storage_operator", delete=False
)
templating.dump_data_to_temp_yaml(lso_data, lso_data_yaml.name)
with open(lso_data_yaml.name, "r") as f:
logger.info(f.read())
logger.info("Creating local-storage-operator")
run_cmd(f"oc create -f {lso_data_yaml.name}")
local_storage_operator = ocp.OCP(kind=constants.POD, namespace=lso_namespace)
assert local_storage_operator.wait_for_resource(
condition=constants.STATUS_RUNNING,
selector=constants.LOCAL_STORAGE_OPERATOR_LABEL,
timeout=600,
), "Local storage operator did not reach running phase"
# Add disks for vSphere/RHV platform
platform = config.ENV_DATA.get("platform").lower()
lso_type = config.DEPLOYMENT.get("type")
if platform == constants.VSPHERE_PLATFORM:
add_disk_for_vsphere_platform()
if platform == constants.RHV_PLATFORM:
add_disk_for_rhv_platform()
if (ocp_version >= version.VERSION_4_6) and (ocs_version >= version.VERSION_4_6):
# Pull local volume discovery yaml data
logger.info("Pulling LocalVolumeDiscovery CR data from yaml")
lvd_data = templating.load_yaml(constants.LOCAL_VOLUME_DISCOVERY_YAML)
# Set local-volume-discovery namespace
lvd_data["metadata"]["namespace"] = lso_namespace
worker_nodes = get_compute_node_names(no_replace=True)
# Update local volume discovery data with Worker node Names
logger.info(
"Updating LocalVolumeDiscovery CR data with worker nodes Name: %s",
worker_nodes,
)
lvd_data["spec"]["nodeSelector"]["nodeSelectorTerms"][0]["matchExpressions"][0][
"values"
] = worker_nodes
lvd_data_yaml = tempfile.NamedTemporaryFile(
mode="w+", prefix="local_volume_discovery", delete=False
)
templating.dump_data_to_temp_yaml(lvd_data, lvd_data_yaml.name)
logger.info("Creating LocalVolumeDiscovery CR")
run_cmd(f"oc create -f {lvd_data_yaml.name}")
# Pull local volume set yaml data
logger.info("Pulling LocalVolumeSet CR data from yaml")
lvs_data = templating.load_yaml(constants.LOCAL_VOLUME_SET_YAML)
# Since we don't have datastore with SSD on our current VMware machines, localvolumeset doesn't detect
# NonRotational disk. As a workaround we are setting Rotational to device MechanicalProperties to detect
# HDD disk
if platform == constants.VSPHERE_PLATFORM or config.ENV_DATA.get(
"local_storage_allow_rotational_disks"
):
logger.info(
"Adding Rotational for deviceMechanicalProperties spec"
" to detect HDD disk"
)
lvs_data["spec"]["deviceInclusionSpec"][
"deviceMechanicalProperties"
].append("Rotational")
# Update local volume set data with Worker node Names
logger.info(
"Updating LocalVolumeSet CR data with worker nodes Name: %s", worker_nodes
)
lvs_data["spec"]["nodeSelector"]["nodeSelectorTerms"][0]["matchExpressions"][0][
"values"
] = worker_nodes
# Set storage class
logger.info(
"Updating LocalVolumeSet CR data with LSO storageclass: %s", storageclass
)
lvs_data["spec"]["storageClassName"] = storageclass
# set volumeMode to Filesystem for MCG only deployment
if config.ENV_DATA["mcg_only_deployment"]:
lvs_data["spec"]["volumeMode"] = constants.VOLUME_MODE_FILESYSTEM
lvs_data_yaml = tempfile.NamedTemporaryFile(
mode="w+", prefix="local_volume_set", delete=False
)
templating.dump_data_to_temp_yaml(lvs_data, lvs_data_yaml.name)
logger.info("Creating LocalVolumeSet CR")
run_cmd(f"oc create -f {lvs_data_yaml.name}")
else:
# Retrieve NVME device path ID for each worker node
device_paths = get_device_paths(worker_names)
# Pull local volume yaml data
logger.info("Pulling LocalVolume CR data from yaml")
lv_data = templating.load_yaml(constants.LOCAL_VOLUME_YAML)
# Set local-volume namespace
lv_data["metadata"]["namespace"] = lso_namespace
# Set storage class
logger.info(
"Updating LocalVolume CR data with LSO storageclass: %s", storageclass
)
for scd in lv_data["spec"]["storageClassDevices"]:
scd["storageClassName"] = storageclass
# Update local volume data with NVME IDs
logger.info("Updating LocalVolume CR data with device paths: %s", device_paths)
lv_data["spec"]["storageClassDevices"][0]["devicePaths"] = device_paths
# Create temp yaml file and create local volume
lv_data_yaml = tempfile.NamedTemporaryFile(
mode="w+", prefix="local_volume", delete=False
)
templating.dump_data_to_temp_yaml(lv_data, lv_data_yaml.name)
logger.info("Creating LocalVolume CR")
run_cmd(f"oc create -f {lv_data_yaml.name}")
logger.info("Waiting 30 seconds for PVs to create")
storage_class_device_count = 1
if (
platform == constants.AWS_PLATFORM
and lso_type == constants.AWS_EBS
and (config.DEPLOYMENT.get("arbiter_deployment", False))
):
storage_class_device_count = config.ENV_DATA.get("extra_disks", 1)
elif platform == constants.AWS_PLATFORM and not lso_type == constants.AWS_EBS:
storage_class_device_count = 2
elif platform == constants.IBM_POWER_PLATFORM:
numberofstoragedisks = config.ENV_DATA.get("number_of_storage_disks", 1)
storage_class_device_count = numberofstoragedisks
elif platform == constants.VSPHERE_PLATFORM:
# extra_disks is used in vSphere attach_disk() method
storage_class_device_count = config.ENV_DATA.get("extra_disks", 1)
expected_pvs = len(worker_names) * storage_class_device_count
if platform == constants.BAREMETAL_PLATFORM:
verify_pvs_created(expected_pvs, storageclass, False)
else:
verify_pvs_created(expected_pvs, storageclass)
def create_optional_operators_catalogsource_non_ga(force=False):
"""
Creating optional operators CatalogSource and ImageContentSourcePolicy
for non-ga OCP.
Args:
force (bool): enable/disable lso catalog setup
"""
ocp_version = version.get_semantic_ocp_version_from_config()
ocp_ga_version = get_ocp_ga_version(ocp_version)
if ocp_ga_version and not force:
return
optional_operators_data = list(
templating.load_yaml(
constants.LOCAL_STORAGE_OPTIONAL_OPERATORS, multi_document=True
)
)
optional_operators_yaml = tempfile.NamedTemporaryFile(
mode="w+", prefix="optional_operators", delete=False
)
if config.DEPLOYMENT.get("optional_operators_image"):
for _dict in optional_operators_data:
if _dict.get("kind").lower() == "catalogsource":
_dict["spec"]["image"] = config.DEPLOYMENT.get(
"optional_operators_image"
)
if config.DEPLOYMENT.get("disconnected"):
# in case of disconnected environment, we have to mirror all the
# optional_operators images
icsp = None
for _dict in optional_operators_data:
if _dict.get("kind").lower() == "catalogsource":
index_image = _dict["spec"]["image"]
if _dict.get("kind").lower() == "imagecontentsourcepolicy":
icsp = _dict
mirrored_index_image = (
f"{config.DEPLOYMENT['mirror_registry']}/"
f"{index_image.split('/', 1)[-1]}"
)
prune_and_mirror_index_image(
index_image,
mirrored_index_image,
constants.DISCON_CL_REQUIRED_PACKAGES,
icsp,
)
_dict["spec"]["image"] = mirrored_index_image
templating.dump_data_to_temp_yaml(
optional_operators_data, optional_operators_yaml.name
)
with open(optional_operators_yaml.name, "r") as f:
logger.info(f.read())
logger.info(
"Creating optional operators CatalogSource and ImageContentSourcePolicy"
)
run_cmd(f"oc create -f {optional_operators_yaml.name}")
wait_for_machineconfigpool_status("all")
def get_device_paths(worker_names):
"""
Retrieve a list of the device paths for each worker node
Args:
worker_names (list): worker node names
Returns:
list: device path ids
"""
device_paths = []
platform = config.ENV_DATA.get("platform").lower()
if platform == constants.IBM_POWER_PLATFORM:
device_paths = config.ENV_DATA.get("disk_pattern").lower()
return [device_paths]
if platform == "aws":
pattern = "nvme-Amazon_EC2_NVMe_Instance_Storage"
elif platform == "vsphere":
pattern = "wwn"
elif platform == "baremetal":
pattern = config.ENV_DATA.get("disk_pattern")
elif platform == "baremetalpsi":
pattern = "virtio"
# TODO: add patterns bare metal
else:
raise UnsupportedPlatformError(
"LSO deployment is not supported for platform: %s", platform
)
for worker in worker_names:
logger.info("Retrieving device path for node: %s", worker)
out = _get_disk_by_id(worker)
out_lines = out.split("\n")
nvme_lines = [
line
for line in out_lines
if (pattern in line and constants.ROOT_DISK_NAME not in line)
]
for nvme_line in nvme_lines:
device_path = [part for part in nvme_line.split(" ") if pattern in part][0]
logger.info("Adding %s to device paths", device_path)
device_paths.append(f"/dev/disk/by-id/{device_path}")
return device_paths
@retry(CommandFailed)
def _get_disk_by_id(worker):
"""
Retrieve disk by-id on a worker node using the debug pod
Args:
worker: worker node to get disks by-id for
Returns:
str: stdout of disk by-id command
"""
cmd = (
f"oc debug nodes/{worker} --to-namespace={config.ENV_DATA['cluster_namespace']} "
f"-- chroot /host ls -la /dev/disk/by-id/"
)
return run_cmd(cmd)
@retry(AssertionError, 120, 10, 1)
def verify_pvs_created(expected_pvs, storageclass, exact_count_pvs=True):
"""
Verify that PVs were created and are in the Available state
Args:
expected_pvs (int): number of PVs to verify
storageclass (str): Name of storageclass
exact_count_pvs (bool): True if expected_pvs should match exactly with PVs created,
False, if PVs created is more than or equal to expected_pvs
Raises:
AssertionError: if any PVs are not in the Available state or if the
number of PVs does not match the given parameter.
"""
logger.info("Verifying PVs are created")
out = run_cmd("oc get pv -o json")
pv_json = json.loads(out)
assert pv_json["items"], f"No PVs created but we are expecting {expected_pvs}"
# checks the state of PV
available_pvs = []
for pv in pv_json["items"]:
pv_state = pv["status"]["phase"]
pv_name = pv["metadata"]["name"]
sc_name = pv["spec"]["storageClassName"]
if sc_name != storageclass:
logger.info(f"Skipping check for {pv_name}")
continue
logger.info(f"{pv_name} is in {pv_state} state")
available_pvs.append(pv_name)
assert (
pv_state == "Available"
), f"{pv_name} not in 'Available' state. Current state is {pv_state}"
# check number of PVs created
num_pvs = len(available_pvs)
if exact_count_pvs:
condition_to_check = num_pvs == expected_pvs
else:
condition_to_check = num_pvs >= expected_pvs
assert (
condition_to_check
), f"{num_pvs} PVs created but we are expecting {expected_pvs}"
logger.debug("PVs, Workers: %s, %s", num_pvs, expected_pvs)
def add_disk_for_vsphere_platform():
"""
Add RDM/VMDK disk for vSphere platform
"""
platform = config.ENV_DATA.get("platform").lower()
lso_type = config.DEPLOYMENT.get("type")
if platform == constants.VSPHERE_PLATFORM:
# Types of LSO Deployment
# Importing here to avoid circular dependency
from ocs_ci.deployment.vmware import VSPHEREBASE
vsphere_base = VSPHEREBASE()
if lso_type == constants.RDM:
logger.info(f"LSO Deployment type: {constants.RDM}")
vsphere_base.add_rdm_disks()
if lso_type == constants.VMDK:
logger.info(f"LSO Deployment type: {constants.VMDK}")
vsphere_base.attach_disk(
config.ENV_DATA.get("device_size", defaults.DEVICE_SIZE),
config.DEPLOYMENT.get("provision_type", constants.VM_DISK_TYPE),
)
if lso_type == constants.DIRECTPATH:
logger.info(f"LSO Deployment type: {constants.DIRECTPATH}")
vsphere_base.add_pci_devices()
# wipe partition table on newly added PCI devices
compute_nodes = get_compute_node_names()
for compute_node in compute_nodes:
wipe_all_disk_partitions_for_node(compute_node)
def add_disk_for_rhv_platform():
"""
Add disk for RHV platform
"""
platform = config.ENV_DATA.get("platform").lower()
if platform == constants.RHV_PLATFORM:
# Importing here to avoid circular dependency
from ocs_ci.deployment.rhv import RHVBASE
rhv_base = RHVBASE()
rhv_base.attach_disks(
config.ENV_DATA.get("device_size", defaults.DEVICE_SIZE),
config.ENV_DATA.get("disk_format", constants.RHV_DISK_FORMAT_RAW),
config.ENV_DATA.get(
"disk_interface", constants.RHV_DISK_INTERFACE_VIRTIO_SCSI
),
config.ENV_DATA.get("sparse"),
config.ENV_DATA.get("pass_discard"),
)
|
f7cfce83ce0a41a4de308764e62f242a5653c211
|
c9ff14ff176600169b6e9f6490ab32f5c3af60e0
|
/jcvi/projects/misc.py
|
0a287434f9d1f8a6ac75732c0b36b45f6738cd74
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
tanghaibao/jcvi
|
c7a070692d53784a34378e19e435cb9a86d2cd2e
|
695bd2eee98b14118b54fc37e38cd0222ce6a5e9
|
refs/heads/main
| 2023-09-01T01:22:04.353148
| 2023-08-30T01:59:11
| 2023-08-30T01:59:11
| 1,130,393
| 641
| 193
|
BSD-2-Clause
| 2023-09-01T03:17:24
| 2010-12-01T23:18:02
|
Python
|
UTF-8
|
Python
| false
| false
| 22,201
|
py
|
misc.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Functions in this script produce figures in various manuscripts.
"""
import os.path as op
import sys
import logging
import numpy as np
from jcvi.graphics.base import Polygon, normalize_axes, panel_labels, plt, savefig
from jcvi.graphics.glyph import GeneGlyph, RoundRect, TextCircle, DoubleSquare, plot_cap
from jcvi.graphics.karyotype import Karyotype
from jcvi.graphics.synteny import Synteny, draw_gene_legend
from jcvi.apps.base import OptionParser, ActionDispatcher, fname
def main():
actions = (
# Epoch paper (Woodhouse et al., 2012 Plant Cell)
("epoch", "show the methods used in epoch paper"),
# Cotton paper (Paterson et al., 2012 Nature)
("cotton", "plot cotton macro- and micro-synteny (requires data)"),
# Amborella paper (Albert et al., 2013 Science)
("amborella", "plot amborella macro- and micro-synteny (requires data)"),
# Mt4.0 paper (Tang et al., 2014 BMC Genomics)
("mtdotplots", "plot Mt3.5 and Mt4.0 side-by-side"),
# Oropetium paper (Vanburen et al., 2015 Nature)
("oropetium", "plot oropetium micro-synteny (requires data)"),
# Pomegranate paper (Qin et al., 2017 Plant Journal)
("pomegranate", "plot pomegranate macro- and micro-synteny (requires data)"),
# Unpublished
("birch", "plot birch macro-synteny (requires data)"),
("litchi", "plot litchi micro-synteny (requires data)"),
("utricularia", "plot utricularia micro-synteny (requires data)"),
(
"waterlilyGOM",
"waterlily phylogeny and related infographics (requires data)",
),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def waterlilyGOM(args):
"""
%prog mcmctree.tre table.csv
Customized figure to plot phylogeny and related infographics.
"""
from jcvi.graphics.tree import (
LeafInfoFile,
WGDInfoFile,
draw_tree,
parse_tree,
draw_wgd_xy,
)
from jcvi.graphics.table import CsvTable, draw_table
p = OptionParser(waterlilyGOM.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="12x9")
if len(args) != 2:
sys.exit(not p.print_help())
(datafile, csvfile) = args
outgroup = ["ginkgo"]
logging.debug("Load tree file `{0}`".format(datafile))
t, hpd = parse_tree(datafile)
pf = datafile.rsplit(".", 1)[0]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
margin, rmargin = 0.15, 0.19 # Left and right margin
leafinfo = LeafInfoFile("leafinfo.csv").cache
wgdinfo = WGDInfoFile("wgdinfo.csv").cache
groups = "Monocots,Eudicots,ANA-grade,Gymnosperms"
draw_tree(
root,
t,
hpd=hpd,
margin=margin,
rmargin=rmargin,
supportcolor=None,
internal=False,
outgroup=outgroup,
leafinfo=leafinfo,
wgdinfo=wgdinfo,
geoscale=True,
groups=groups.split(","),
)
# Bottom right show legends for the WGD circles
pad = 0.02
ypad = 0.04
xstart = 1 - rmargin + pad
ystart = 0.2
waterlily_wgdline = wgdinfo["waterlily"][0]
ypos = ystart - 2 * ypad
draw_wgd_xy(root, xstart, ypos, waterlily_wgdline)
root.text(
xstart + pad,
ypos,
"Nymphaealean WGD",
color=waterlily_wgdline.color,
va="center",
)
other_wgdline = wgdinfo["banana"][0]
ypos = ystart - 3 * ypad
draw_wgd_xy(root, xstart, ypos, other_wgdline)
root.text(
xstart + pad,
ypos,
"Other known WGDs",
color=other_wgdline.color,
va="center",
)
# Top left draw the comparison table
csv_table = CsvTable(csvfile)
draw_table(
root,
csv_table,
extent=(0.02, 0.44, 0.55, 0.985),
stripe_color="lavender",
yinflation=iopts.w / iopts.h,
)
normalize_axes(root)
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def pomegranate(args):
"""
%prog cotton seqids karyotype.layout mcscan.out all.bed synteny.layout
Build a figure that calls graphics.karyotype to illustrate the high ploidy
of WGD history of pineapple genome. The script calls both graphics.karyotype
and graphic.synteny.
"""
p = OptionParser(pomegranate.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="9x7")
if len(args) != 5:
sys.exit(not p.print_help())
seqidsfile, klayout, datafile, bedfile, slayout = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
Karyotype(fig, root, seqidsfile, klayout)
Synteny(fig, root, datafile, bedfile, slayout)
# legend showing the orientation of the genes
draw_gene_legend(root, 0.42, 0.52, 0.48)
labels = ((0.04, 0.96, "A"), (0.04, 0.52, "B"))
panel_labels(root, labels)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "pomegranate-karyotype"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def utricularia(args):
from jcvi.graphics.synteny import main as synteny_main
p = OptionParser(synteny_main.__doc__)
p.add_option("--switch", help="Rename the seqid with two-column file")
opts, args, iopts = p.set_image_options(args, figsize="8x7")
if len(args) != 3:
sys.exit(not p.print_help())
datafile, bedfile, layoutfile = args
switch = opts.switch
pf = datafile.rsplit(".", 1)[0]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
s = Synteny(
fig, root, datafile, bedfile, layoutfile, loc_label=False, switch=switch
)
light = "lightslategrey"
RoundRect(root, (0.02, 0.69), 0.96, 0.24, fill=False, lw=2, ec=light)
RoundRect(root, (0.02, 0.09), 0.96, 0.48, fill=False, lw=2, ec=light)
za, zb = s.layout[1].ratio, s.layout[-1].ratio # zoom level
if za != 1:
root.text(
0.96,
0.89,
"{}x zoom".format(za).replace(".0x", "x"),
color=light,
ha="right",
va="center",
size=14,
)
if zb != 1:
root.text(
0.96,
0.12,
"{}x zoom".format(zb).replace(".0x", "x"),
color=light,
ha="right",
va="center",
size=14,
)
# legend showing the orientation of the genes
draw_gene_legend(root, 0.22, 0.3, 0.64, text=True)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def join_nodes(
root, coords, a, b, x, slope=2.4, fc="lightslategray", rectangle=True, circle=True
):
# Join node a and b to make an internal node
ax, ay = coords[a]
bx, by = coords[b]
if ay < by:
ax, ay, bx, by = bx, by, ax, ay
if rectangle:
nx, ny = x, (ay + by) / 2
root.plot((nx, ax), (ay, ay), lw=2, color=fc)
root.plot((nx, bx), (by, by), lw=2, color=fc)
root.plot((nx, nx), (ay, by), lw=2, color=fc)
else:
dx = (abs(ay - by) / slope - abs(ax - bx)) / 2
nx = max(ax, bx) + dx
ny = by + (nx - bx) * slope
root.plot((nx, ax), (ny, ay), lw=2, color=fc)
root.plot((nx, bx), (ny, by), lw=2, color=fc)
if circle:
DoubleSquare(root, nx, ny, fc=fc)
return nx, ny
def branch_length(ax, start, end, text, ha="left", va="bottom", color="r"):
xs, ys = start
xe, ye = end
text = r"$\mathsf{" + text + "}$"
ax.text((xs + xe) / 2, (ys + ye) / 2, text, ha=ha, va=va, color=color)
def birch(args):
"""
%prog birch seqids layout
Plot birch macro-synteny, with an embedded phylogenetic tree to the right.
"""
p = OptionParser(birch.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x6")
if len(args) != 2:
sys.exit(not p.print_help())
seqids, layout = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
K = Karyotype(fig, root, seqids, layout)
L = K.layout
xs = 0.79
dt = dict(rectangle=False, circle=False)
# Embed a phylogenetic tree to the right
coords = {}
coords["Amborella"] = (xs, L[0].y)
coords["Vitis"] = (xs, L[1].y)
coords["Prunus"] = (xs, L[2].y)
coords["Betula"] = (xs, L[3].y)
coords["Populus"] = (xs, L[4].y)
coords["Arabidopsis"] = (xs, L[5].y)
coords["fabids"] = join_nodes(root, coords, "Prunus", "Betula", xs, **dt)
coords["malvids"] = join_nodes(root, coords, "Populus", "Arabidopsis", xs, **dt)
coords["rosids"] = join_nodes(root, coords, "fabids", "malvids", xs, **dt)
coords["eudicots"] = join_nodes(root, coords, "rosids", "Vitis", xs, **dt)
coords["angiosperm"] = join_nodes(root, coords, "eudicots", "Amborella", xs, **dt)
# Show branch length
branch_length(root, coords["Amborella"], coords["angiosperm"], ">160.0")
branch_length(root, coords["eudicots"], coords["angiosperm"], ">78.2", va="top")
branch_length(root, coords["Vitis"], coords["eudicots"], "138.5")
branch_length(root, coords["rosids"], coords["eudicots"], "19.8", va="top")
branch_length(
root, coords["Prunus"], coords["fabids"], "104.2", ha="right", va="top"
)
branch_length(root, coords["Arabidopsis"], coords["malvids"], "110.2", va="top")
branch_length(
root, coords["fabids"], coords["rosids"], "19.8", ha="right", va="top"
)
branch_length(root, coords["malvids"], coords["rosids"], "8.5", va="top")
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "birch"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def mtdotplots(args):
"""
%prog mtdotplots Mt3.5 Mt4.0 medicago.medicago.lifted.1x1.anchors
Plot Mt3.5 and Mt4.0 side-by-side. This is essentially combined from two
graphics.dotplot() function calls as panel A and B.
"""
from jcvi.graphics.dotplot import check_beds, dotplot
p = OptionParser(mtdotplots.__doc__)
p.set_beds()
opts, args, iopts = p.set_image_options(args, figsize="16x8", dpi=90)
if len(args) != 3:
sys.exit(not p.print_help())
a, b, ac = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
r1 = fig.add_axes([0, 0, 0.5, 1])
r2 = fig.add_axes([0.5, 0, 0.5, 1])
a1 = fig.add_axes([0.05, 0.1, 0.4, 0.8])
a2 = fig.add_axes([0.55, 0.1, 0.4, 0.8])
anchorfile = op.join(a, ac)
qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts)
dotplot(
anchorfile, qbed, sbed, fig, r1, a1, is_self=is_self, genomenames="Mt3.5_Mt3.5"
)
opts.qbed = opts.sbed = None
anchorfile = op.join(b, ac)
qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts)
dotplot(
anchorfile, qbed, sbed, fig, r2, a2, is_self=is_self, genomenames="Mt4.0_Mt4.0"
)
root.text(0.03, 0.95, "A", ha="center", va="center", size=36)
root.text(0.53, 0.95, "B", ha="center", va="center", size=36)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "mtdotplots"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def oropetium(args):
"""
%prog oropetium mcscan.out all.bed layout switch.ids
Build a composite figure that calls graphis.synteny.
"""
p = OptionParser(oropetium.__doc__)
p.add_option("--extra", help="Extra features in BED format")
opts, args, iopts = p.set_image_options(args, figsize="9x6")
if len(args) != 4:
sys.exit(not p.print_help())
datafile, bedfile, slayout, switch = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
Synteny(
fig, root, datafile, bedfile, slayout, switch=switch, extra_features=opts.extra
)
# legend showing the orientation of the genes
draw_gene_legend(root, 0.4, 0.57, 0.74, text=True, repeat=True)
# On the left panel, make a species tree
fc = "lightslategrey"
coords = {}
xs, xp = 0.16, 0.03
coords["oropetium"] = (xs, 0.7)
coords["setaria"] = (xs, 0.6)
coords["sorghum"] = (xs, 0.5)
coords["rice"] = (xs, 0.4)
coords["brachypodium"] = (xs, 0.3)
xs -= xp
coords["Panicoideae"] = join_nodes(root, coords, "setaria", "sorghum", xs)
xs -= xp
coords["BEP"] = join_nodes(root, coords, "rice", "brachypodium", xs)
coords["PACMAD"] = join_nodes(root, coords, "oropetium", "Panicoideae", xs)
xs -= xp
coords["Poaceae"] = join_nodes(root, coords, "BEP", "PACMAD", xs)
# Names of the internal nodes
for tag in ("BEP", "Poaceae"):
nx, ny = coords[tag]
nx, ny = nx - 0.005, ny - 0.02
root.text(nx, ny, tag, rotation=90, ha="right", va="top", color=fc)
for tag in ("PACMAD",):
nx, ny = coords[tag]
nx, ny = nx - 0.005, ny + 0.02
root.text(nx, ny, tag, rotation=90, ha="right", va="bottom", color=fc)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "oropetium"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def litchi(args):
"""
%prog litchi mcscan.out all.bed layout switch.ids
Build a composite figure that calls graphis.synteny.
"""
p = OptionParser(litchi.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="9x6")
if len(args) != 4:
sys.exit(not p.print_help())
datafile, bedfile, slayout, switch = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
Synteny(fig, root, datafile, bedfile, slayout, switch=switch)
# legend showing the orientation of the genes
draw_gene_legend(root, 0.4, 0.7, 0.82)
# On the left panel, make a species tree
fc = "lightslategrey"
coords = {}
xs, xp = 0.16, 0.03
coords["lychee"] = (xs, 0.37)
coords["clementine"] = (xs, 0.5)
coords["cacao"] = (xs, 0.6)
coords["strawberry"] = (xs, 0.7)
coords["grape"] = (xs, 0.8)
xs -= xp
coords["Sapindales"] = join_nodes(root, coords, "clementine", "lychee", xs)
xs -= xp
coords["Rosid-II"] = join_nodes(root, coords, "cacao", "Sapindales", xs)
xs -= xp
coords["Rosid"] = join_nodes(root, coords, "strawberry", "Rosid-II", xs)
xs -= xp
coords["crown"] = join_nodes(root, coords, "grape", "Rosid", xs, circle=False)
# Names of the internal nodes
for tag in ("Rosid", "Rosid-II", "Sapindales"):
nx, ny = coords[tag]
nx, ny = nx - 0.01, ny - 0.02
root.text(nx, ny, tag, rotation=90, ha="right", va="top", color=fc)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "litchi"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def amborella(args):
"""
%prog amborella seqids karyotype.layout mcscan.out all.bed synteny.layout
Build a composite figure that calls graphics.karyotype and graphics.synteny.
"""
p = OptionParser(amborella.__doc__)
p.add_option("--tree", help="Display trees on the bottom of the figure")
p.add_option("--switch", help="Rename the seqid with two-column file")
opts, args, iopts = p.set_image_options(args, figsize="8x7")
if len(args) != 5:
sys.exit(not p.print_help())
seqidsfile, klayout, datafile, bedfile, slayout = args
switch = opts.switch
tree = opts.tree
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
Karyotype(fig, root, seqidsfile, klayout)
Synteny(fig, root, datafile, bedfile, slayout, switch=switch, tree=tree)
# legend showing the orientation of the genes
draw_gene_legend(root, 0.5, 0.68, 0.5)
# annotate the WGD events
fc = "lightslategrey"
x = 0.05
radius = 0.012
TextCircle(root, x, 0.86, r"$\gamma$", radius=radius)
TextCircle(root, x, 0.95, r"$\epsilon$", radius=radius)
root.plot([x, x], [0.83, 0.9], ":", color=fc, lw=2)
pts = plot_cap((x, 0.95), np.radians(range(-70, 250)), 0.02)
x, y = zip(*pts)
root.plot(x, y, ":", color=fc, lw=2)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "amborella"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def cotton(args):
"""
%prog cotton seqids karyotype.layout mcscan.out all.bed synteny.layout
Build a composite figure that calls graphics.karyotype and graphic.synteny.
"""
p = OptionParser(cotton.__doc__)
p.add_option("--depthfile", help="Use depth info in this file")
p.add_option("--switch", help="Rename the seqid with two-column file")
opts, args, iopts = p.set_image_options(args, figsize="8x7")
if len(args) != 5:
sys.exit(p.print_help())
seqidsfile, klayout, datafile, bedfile, slayout = args
switch = opts.switch
depthfile = opts.depthfile
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
kt = Karyotype(fig, root, seqidsfile, klayout)
Synteny(fig, root, datafile, bedfile, slayout, switch=switch)
light = "lightslategrey"
# Show the dup depth along the cotton chromosomes
if depthfile:
ymin, ymax = 0.9, 0.95
root.text(0.11, 0.96, "Cotton duplication level", color="gray", size=10)
root.plot([0.1, 0.95], [ymin, ymin], color="gray")
root.text(0.96, 0.9, "1x", color="gray", va="center")
root.plot([0.1, 0.95], [ymax, ymax], color="gray")
root.text(0.96, 0.95, "6x", color="gray", va="center")
fp = open(depthfile)
track = kt.tracks[0] # Cotton
depths = []
for row in fp:
a, b, depth = row.split()
depth = int(depth)
try:
p = track.get_coords(a)
depths.append((p, depth))
except KeyError:
pass
depths.sort(key=lambda x: (x[0], -x[1]))
xx, yy = zip(*depths)
yy = [ymin + 0.01 * (x - 1) for x in yy]
root.plot(xx, yy, "-", color=light)
# legend showing the orientation of the genes
draw_gene_legend(root, 0.5, 0.68, 0.5)
# Zoom
xpos = 0.835
ytop = 0.9
xmin, xmax = 0.18, 0.82
ymin, ymax = ytop, 0.55
lc = "k"
kwargs = dict(lw=3, color=lc, mec=lc, mfc="w", zorder=3)
root.plot((xpos, xpos), (ymax, 0.63), ":o", **kwargs)
root.plot((xpos, xmin), (ymax, ymin), ":o", **kwargs)
root.plot((xpos, xmax), (ymax, ymin), ":o", **kwargs)
RoundRect(root, (0.06, 0.17), 0.92, 0.35, fill=False, lw=2, ec=light)
# Panels
root.text(0.05, 0.95, "a", size=20, fontweight="bold")
root.text(0.1, 0.45, "b", size=20, fontweight="bold")
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "cotton"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def plot_diagram(ax, x, y, label="S", title="syntenic", gradient=True):
"""
Part of the diagrams that are re-used. (x, y) marks the center of the
diagram. Label determines the modification to the "S" graph.
"""
trackgap = 0.06
tracklen = 0.12
xa, xb = x - tracklen, x + tracklen
ya, yb = y + trackgap, y - trackgap
hsps = (((60, 150), (50, 130)), ((190, 225), (200, 240)), ((330, 280), (360, 310)))
for yy in (ya, yb):
ax.plot((xa, xb), (yy, yy), "-", color="gray", lw=2, zorder=1)
ytip = 0.015
mrange = 400
m = lambda t: xa + t * 1.0 / mrange * tracklen * 2
for i, ((a, b), (c, d)) in enumerate(hsps):
fb = False
if label == "FB" and i == 1:
c, d = 270, 280
fb = True
if label == "G" and i == 0:
c, d = 120, 65
a, b, c, d = [m(t) for t in (a, b, c, d)]
color = "g" if i == 1 else "r"
GeneGlyph(ax, a, b, ya, 2 * ytip, fc=color, gradient=gradient, zorder=10)
if i == 1 and label in ("F", "G", "FN"):
pass
else:
if fb:
GeneGlyph(
ax, c, d, yb, 2 * ytip, fc="w", tip=0, gradient=gradient, zorder=10
)
else:
GeneGlyph(ax, c, d, yb, 2 * ytip, fc="r", gradient=gradient, zorder=10)
r = Polygon(
((a, ya - ytip), (c, yb + ytip), (d, yb + ytip), (b, ya - ytip)),
fc="r",
alpha=0.2,
)
if i == 1 and label not in ("S", "FB"):
pass
elif i == 0 and label == "G":
pass
else:
ax.add_patch(r)
if label == "FN":
ax.text(x + 0.005, yb, "NNNNN", ha="center", size=7)
title = "{0}: {1}".format(label, title)
ax.text(x, ya + 5 * ytip, title, size=8, ha="center")
def epoch(args):
"""
%prog epoch
Illustrate the methods used in Maggie's epoch paper, in particular, how to
classifiy S/G/F/FB/FN for the genes.
"""
p = OptionParser(__doc__)
p.parse_args(args)
fig = plt.figure(1, (6, 4))
root = fig.add_axes([0, 0, 1, 1])
# Separators
linestyle = dict(lw=2, color="b", alpha=0.2, zorder=2)
root.plot((0, 1), (0.5, 0.5), "--", **linestyle)
for i in (1.0 / 3, 2.0 / 3):
root.plot((i, i), (0.5, 1), "--", **linestyle)
for i in (1.0 / 6, 3.0 / 6, 5.0 / 6):
root.plot((i, i), (0, 0.5), "--", **linestyle)
# Diagrams
plot_diagram(root, 1.0 / 6, 3.0 / 4, "S", "syntenic")
plot_diagram(root, 3.0 / 6, 3.0 / 4, "F", "missing, with both flankers")
plot_diagram(root, 5.0 / 6, 3.0 / 4, "G", "missing, with one flanker")
plot_diagram(root, 2.0 / 6, 1.0 / 4, "FB", "has non-coding matches")
plot_diagram(root, 4.0 / 6, 1.0 / 4, "FN", "syntenic region has gap")
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
figname = fname() + ".pdf"
savefig(figname, dpi=300)
if __name__ == "__main__":
main()
|
db4f797428e98a86ff62b2be5b34361b814b9a51
|
6c2dbc8d4e536220fb3b1cc72aa8104aea8b0698
|
/tests/test_utils/test_magic_filter.py
|
097c8365a1dfb6740feeb4791e3261088ab6d17d
|
[
"MIT"
] |
permissive
|
aiogram/aiogram
|
f8f98a0beb63bd4d93ea810638d5792569bf354b
|
04bd0c9e7c5421c060183b90d515050f41377bc1
|
refs/heads/dev-3.x
| 2023-08-30T21:20:13.018174
| 2023-08-28T23:01:54
| 2023-08-28T23:01:54
| 111,210,856
| 4,287
| 1,250
|
MIT
| 2023-09-10T21:34:03
| 2017-11-18T14:11:13
|
Python
|
UTF-8
|
Python
| false
| false
| 935
|
py
|
test_magic_filter.py
|
from dataclasses import dataclass
from re import Match
from aiogram import F
from aiogram.utils.magic_filter import MagicFilter
@dataclass
class MyObject:
text: str
class TestMagicFilter:
def test_operation_as(self):
magic: MagicFilter = F.text.regexp(r"^(\d+)$").as_("match")
assert not magic.resolve(MyObject(text="test"))
result = magic.resolve(MyObject(text="123"))
assert isinstance(result, dict)
assert isinstance(result["match"], Match)
def test_operation_as_not_none(self):
# Issue: https://github.com/aiogram/aiogram/issues/1281
magic = F.cast(int).as_("value")
result = magic.resolve("0")
assert result == {"value": 0}
def test_operation_as_not_none_iterable(self):
# Issue: https://github.com/aiogram/aiogram/issues/1281
magic = F.as_("value")
result = magic.resolve([])
assert result is None
|
d9a36b29f5ab981a7ee13c26e6ea4e152faea5e5
|
ecc09ad8fca4d48dc905b549b421917c17d57985
|
/tests/model/eventsources/test_cloudwatch_event_source.py
|
55a7f3245889a5570492eb36858e28e1fccc0f38
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
aws/serverless-application-model
|
8fd3ea2016560da4929653da2c30298cced41b64
|
0bb862ea715a4aafbb7984b407a81856b3ae19c4
|
refs/heads/develop
| 2023-08-31T02:22:14.862372
| 2023-08-30T17:52:46
| 2023-08-30T17:52:46
| 70,530,854
| 2,055
| 704
|
Apache-2.0
| 2023-09-14T18:02:21
| 2016-10-10T21:36:18
|
Python
|
UTF-8
|
Python
| false
| false
| 969
|
py
|
test_cloudwatch_event_source.py
|
from unittest import TestCase
from samtranslator.model.eventsources.push import CloudWatchEvent
from samtranslator.model.lambda_ import LambdaFunction
class CloudWatchEventSourceTests(TestCase):
def setUp(self):
self.logical_id = "EventLogicalId"
self.func = LambdaFunction("func")
def test_target_id_when_not_provided(self):
cloudwatch_event_source = CloudWatchEvent(self.logical_id)
cfn = cloudwatch_event_source.to_cloudformation(function=self.func)
target_id = cfn[0].Targets[0]["Id"]
self.assertEqual(target_id, "{}{}".format(self.logical_id, "LambdaTarget"))
def test_target_id_when_provided(self):
cloudwatch_event_source = CloudWatchEvent(self.logical_id)
cloudwatch_event_source.Target = {"Id": "MyTargetId"}
cfn = cloudwatch_event_source.to_cloudformation(function=self.func)
target_id = cfn[0].Targets[0]["Id"]
self.assertEqual(target_id, "MyTargetId")
|
a09acd1e21b5aea5065098dbb7e65c61507e2153
|
091a6200be74bf6577c86f623665bcc24e16b02b
|
/IoT_Environment_Sensor/secrets.py
|
f55951b186671dae1ba494434e851cea808a745a
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Learning_System_Guides
|
b5f7bce40a16da64e7a79d4b39de032f2cca41d4
|
5eaa7a15a437c533b89f359a25983e24bb6b5438
|
refs/heads/main
| 2023-09-05T18:31:41.621956
| 2023-09-05T15:36:09
| 2023-09-05T15:36:09
| 105,065,494
| 937
| 937
|
MIT
| 2023-09-12T18:48:53
| 2017-09-27T20:22:44
|
C
|
UTF-8
|
Python
| false
| false
| 410
|
py
|
secrets.py
|
# SPDX-FileCopyrightText: 2019 Dave Astels for Adafruit Industries
#
# SPDX-License-Identifier: MIT
# This file is where you keep secret settings, passwords, and tokens!
# If you put them in the code you risk committing that info or sharing it
secrets = {
"ssid": b"My_SSID",
"password": b"My_WIFI_Password",
"timezone": "Area/City",
"aio_username": "my_username",
"aio_key": "my_key",
}
|
d00cba73bb2eb90902ac518d04bb6d60ba39b8f6
|
182bbadb0ee7f59f1abd154d06484e555a30c6d8
|
/bingo/bingo-elastic/python/bingo_elastic/model/helpers.py
|
986b17b7b14f6d06c8954f8af595d1f6eb63a6db
|
[
"Apache-2.0"
] |
permissive
|
epam/Indigo
|
08559861adf474122366b6e2e499ed3aa56272d1
|
8e473e69f393c3a57ff75b7728999c5fb4cbf1a3
|
refs/heads/master
| 2023-09-02T10:14:46.843829
| 2023-08-25T08:39:24
| 2023-08-25T08:39:24
| 37,536,320
| 265
| 106
|
Apache-2.0
| 2023-09-14T17:34:00
| 2015-06-16T14:45:56
|
C++
|
UTF-8
|
Python
| false
| false
| 2,810
|
py
|
helpers.py
|
from pathlib import Path
from typing import Callable, Generator, Optional, Union
from indigo import Indigo, IndigoObject # type: ignore
from bingo_elastic.model.record import (
IndigoRecordMolecule,
IndigoRecordReaction,
)
def iterate_file(
file: Path,
iterator: Optional[str] = None,
error_handler: Optional[Callable[[object, BaseException], None]] = None,
) -> Generator[IndigoRecordMolecule, None, None]:
"""
:param file:
:param iterator: supported iterators sdf, smiles, smi, cml.
If iterator is not set, trying to determine
iterator by file extension
:type iterator: str
:param error_handler: lambda for catching exceptions
:type error_handler: Optional[Callable[[object, BaseException], None]]
:return:
"""
iterators = {
"sdf": "iterateSDFile",
"smiles": "iterateSmilesFile",
"smi": "iterateSmilesFile",
"cml": "iterateCMLFile",
}
if not iterator:
iterator = file.suffix[1:]
iterator_fn = iterators.get(iterator)
if not iterator_fn:
raise AttributeError(f"Unsupported iterator {iterator}")
indigo_object: IndigoObject
for indigo_object in getattr(Indigo(), iterator_fn)(str(file)):
yield IndigoRecordMolecule(
indigo_object=indigo_object, error_handler=error_handler
)
def iterate_sdf(
file: Union[Path, str],
error_handler: Optional[Callable[[object, BaseException], None]] = None,
) -> Generator:
yield from iterate_file(
Path(file) if isinstance(file, str) else file,
"sdf",
error_handler=error_handler,
)
def iterate_smiles(
file: Union[Path, str],
error_handler: Optional[Callable[[object, BaseException], None]] = None,
) -> Generator:
yield from iterate_file(
Path(file) if isinstance(file, str) else file,
"smiles",
error_handler=error_handler,
)
def iterate_cml(
file: Union[Path, str],
error_handler: Optional[Callable[[object, BaseException], None]] = None,
) -> Generator:
yield from iterate_file(
Path(file) if isinstance(file, str) else file,
"cml",
error_handler=error_handler,
)
def load_molecule(
file_: Union[str, Path], session: Indigo
) -> IndigoRecordMolecule:
"""
Helper for loading molecules from file into IndigoRecordMolecule object
"""
molecule = session.loadMoleculeFromFile(file_)
return IndigoRecordMolecule(indigo_object=molecule)
def load_reaction(
file_: Union[str, Path], session: Indigo
) -> IndigoRecordReaction:
"""
Helper for loading reactions into IndigoRecordReaction object
"""
reaction = session.loadReactionFromFile(str(file_))
return IndigoRecordReaction(indigo_object=reaction)
|
53fce635e9d65e1c3a765f6d7847dd5612a42400
|
cb6d0a660cfcb28ee9e8a1c0266925f8f541edfb
|
/tests/python/test_brb2.py
|
89783f308cf7528ff8ea27271f25b04ab39d0126
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-other-copyleft",
"GPL-2.0-only"
] |
permissive
|
iovisor/bcc
|
0e002769364523caeb731216021b0a3c881a723f
|
ec49363e2e9daec026ee6cae4c5fc316f8fab0ff
|
refs/heads/master
| 2023-09-03T22:37:47.238198
| 2023-08-31T14:44:55
| 2023-09-01T11:21:30
| 34,921,116
| 18,467
| 3,907
|
Apache-2.0
| 2023-09-13T21:22:53
| 2015-05-01T19:52:32
|
C
|
UTF-8
|
Python
| false
| false
| 9,397
|
py
|
test_brb2.py
|
#!/usr/bin/env python3
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
# This program implements a topology likes below:
# pem: physical endpoint manager, implemented as a bpf program
#
# vm1 <--------+ +----> bridge1 <----+
# V V V
# pem router
# ^ ^ ^
# vm2 <--------+ +----> bridge2 <----+
#
# The vm1, vm2 and router are implemented as namespaces.
# The linux bridge device is used to provice bridge functionality.
# pem bpf will be attached to related network devices for vm1, vm1, bridge1 and bridge2.
#
# vm1 and vm2 are in different subnet. For vm1 to communicate to vm2,
# the packet will have to travel from vm1 to pem, bridge1, router, bridge2, pem, and
# then come to vm2.
#
# When this test is run with verbose mode (ctest -R <test_name> -V),
# the following printout is observed on my local box:
#
# ......
# 9: PING 200.1.1.1 (200.1.1.1) 56(84) bytes of data.
# 9: 64 bytes from 200.1.1.1: icmp_req=1 ttl=63 time=0.090 ms
# 9: 64 bytes from 200.1.1.1: icmp_req=2 ttl=63 time=0.032 ms
# 9:
# 9: --- 200.1.1.1 ping statistics ---
# 9: 2 packets transmitted, 2 received, 0% packet loss, time 999ms
# 9: rtt min/avg/max/mdev = 0.032/0.061/0.090/0.029 ms
# 9: [ ID] Interval Transfer Bandwidth
# 9: [ 5] 0.0- 1.0 sec 3.80 GBytes 32.6 Gbits/sec
# 9: Starting netserver with host 'IN(6)ADDR_ANY' port '12865' and family AF_UNSPEC
# 9: MIGRATED TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 200.1.1.1 (200.1.1.1) port 0 AF_INET : demo
# 9: Recv Send Send
# 9: Socket Socket Message Elapsed
# 9: Size Size Size Time Throughput
# 9: bytes bytes bytes secs. 10^6bits/sec
# 9:
# 9: 87380 16384 65160 1.00 39940.46
# 9: MIGRATED TCP REQUEST/RESPONSE TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 200.1.1.1 (200.1.1.1) port 0 AF_INET : demo : first burst 0
# 9: Local /Remote
# 9: Socket Size Request Resp. Elapsed Trans.
# 9: Send Recv Size Size Time Rate
# 9: bytes Bytes bytes bytes secs. per sec
# 9:
# 9: 16384 87380 1 1 1.00 46387.80
# 9: 16384 87380
# 9: .
# 9: ----------------------------------------------------------------------
# 9: Ran 1 test in 7.495s
# 9:
# 9: OK
from ctypes import c_uint
from bcc import BPF
from pyroute2 import IPRoute, NetNS, IPDB, NSPopen
from utils import NSPopenWithCheck, mayFail
import sys
from time import sleep
from unittest import main, TestCase
import subprocess
from simulation import Simulation
arg1 = sys.argv.pop(1)
ipr = IPRoute()
ipdb = IPDB(nl=ipr)
sim = Simulation(ipdb)
allocated_interfaces = set(ipdb.interfaces.keys())
def get_next_iface(prefix):
i = 0
while True:
iface = "{0}{1}".format(prefix, i)
if iface not in allocated_interfaces:
allocated_interfaces.add(iface)
return iface
i += 1
class TestBPFSocket(TestCase):
def setup_br(self, br, veth_rt_2_br, veth_pem_2_br, veth_br_2_pem):
# create veth which connecting pem and br
with ipdb.create(ifname=veth_pem_2_br, kind="veth", peer=veth_br_2_pem) as v:
v.up()
ipdb.interfaces[veth_br_2_pem].up().commit()
subprocess.call(["sysctl", "-q", "-w", "net.ipv6.conf." + veth_pem_2_br + ".disable_ipv6=1"])
subprocess.call(["sysctl", "-q", "-w", "net.ipv6.conf." + veth_br_2_pem + ".disable_ipv6=1"])
# set up the bridge and add router interface as one of its slaves
with ipdb.create(ifname=br, kind="bridge") as br1:
br1.add_port(ipdb.interfaces[veth_pem_2_br])
br1.add_port(ipdb.interfaces[veth_rt_2_br])
br1.up()
subprocess.call(["sysctl", "-q", "-w", "net.ipv6.conf." + br + ".disable_ipv6=1"])
def set_default_const(self):
self.ns1 = "ns1"
self.ns2 = "ns2"
self.ns_router = "ns_router"
self.br1 = get_next_iface("br")
self.veth_pem_2_br1 = "v20"
self.veth_br1_2_pem = "v21"
self.br2 = get_next_iface("br")
self.veth_pem_2_br2 = "v22"
self.veth_br2_2_pem = "v23"
self.vm1_ip = "100.1.1.1"
self.vm2_ip = "200.1.1.1"
self.vm1_rtr_ip = "100.1.1.254"
self.vm2_rtr_ip = "200.1.1.254"
self.vm1_rtr_mask = "100.1.1.0/24"
self.vm2_rtr_mask = "200.1.1.0/24"
def attach_filter(self, ifname, fd, name):
ifindex = ipdb.interfaces[ifname].index
ipr.tc("add", "ingress", ifindex, "ffff:")
ipr.tc("add-filter", "bpf", ifindex, ":1", fd=fd, name=name,
parent="ffff:", action="drop", classid=1)
def config_maps(self):
# pem just relays packets between VM and its corresponding
# slave link in the bridge interface
ns1_ifindex = self.ns1_eth_out.index
ns2_ifindex = self.ns2_eth_out.index
br1_ifindex = ipdb.interfaces[self.veth_br1_2_pem].index
br2_ifindex = ipdb.interfaces[self.veth_br2_2_pem].index
self.pem_dest[c_uint(ns1_ifindex)] = c_uint(br1_ifindex)
self.pem_dest[c_uint(br1_ifindex)] = c_uint(ns1_ifindex)
self.pem_dest[c_uint(ns2_ifindex)] = c_uint(br2_ifindex)
self.pem_dest[c_uint(br2_ifindex)] = c_uint(ns2_ifindex)
# tc filter setup with bpf programs attached
self.attach_filter(self.veth_br1_2_pem, self.pem_fn.fd, self.pem_fn.name)
self.attach_filter(self.veth_br2_2_pem, self.pem_fn.fd, self.pem_fn.name)
@mayFail("This fails on github actions environment, and needs to be fixed")
def test_brb2(self):
try:
b = BPF(src_file=arg1.encode(), debug=0)
self.pem_fn = b.load_func(b"pem", BPF.SCHED_CLS)
self.pem_dest= b.get_table(b"pem_dest")
self.pem_stats = b.get_table(b"pem_stats")
# set up the topology
self.set_default_const()
(ns1_ipdb, self.ns1_eth_out, _) = sim._create_ns(self.ns1, ipaddr=self.vm1_ip+'/24',
fn=self.pem_fn, action='drop',
disable_ipv6=True)
(ns2_ipdb, self.ns2_eth_out, _) = sim._create_ns(self.ns2, ipaddr=self.vm2_ip+'/24',
fn=self.pem_fn, action='drop',
disable_ipv6=True)
ns1_ipdb.routes.add({'dst': self.vm2_rtr_mask, 'gateway': self.vm1_rtr_ip}).commit()
ns2_ipdb.routes.add({'dst': self.vm1_rtr_mask, 'gateway': self.vm2_rtr_ip}).commit()
(_, self.nsrtr_eth0_out, _) = sim._create_ns(self.ns_router, ipaddr=self.vm1_rtr_ip+'/24',
disable_ipv6=True)
(rt_ipdb, self.nsrtr_eth1_out, _) = sim._ns_add_ifc(self.ns_router, "eth1", "ns_router2",
ipaddr=self.vm2_rtr_ip+'/24',
disable_ipv6=True)
# enable ip forwarding in router ns
nsp = NSPopen(rt_ipdb.nl.netns, ["sysctl", "-w", "net.ipv4.ip_forward=1"])
nsp.wait(); nsp.release()
# for each VM connecting to pem, there will be a corresponding veth connecting to the bridge
self.setup_br(self.br1, self.nsrtr_eth0_out.ifname, self.veth_pem_2_br1, self.veth_br1_2_pem)
self.setup_br(self.br2, self.nsrtr_eth1_out.ifname, self.veth_pem_2_br2, self.veth_br2_2_pem)
# load the program and configure maps
self.config_maps()
# ping
nsp = NSPopen(ns1_ipdb.nl.netns, ["ping", self.vm2_ip, "-c", "2"]); nsp.wait(); nsp.release()
# one arp request/reply, 2 icmp request/reply per VM, total 6 packets per VM, 12 packets total
self.assertEqual(self.pem_stats[c_uint(0)].value, 12)
nsp_server = NSPopenWithCheck(ns2_ipdb.nl.netns, ["iperf", "-s", "-xSC"])
sleep(1)
nsp = NSPopen(ns1_ipdb.nl.netns, ["iperf", "-c", self.vm2_ip, "-t", "1", "-xSC"])
nsp.wait(); nsp.release()
nsp_server.kill(); nsp_server.wait(); nsp_server.release()
nsp_server = NSPopenWithCheck(ns2_ipdb.nl.netns, ["netserver", "-D"])
sleep(1)
nsp = NSPopenWithCheck(ns1_ipdb.nl.netns, ["netperf", "-l", "1", "-H", self.vm2_ip, "--", "-m", "65160"])
nsp.wait(); nsp.release()
nsp = NSPopen(ns1_ipdb.nl.netns, ["netperf", "-l", "1", "-H", self.vm2_ip, "-t", "TCP_RR"])
nsp.wait(); nsp.release()
nsp_server.kill(); nsp_server.wait(); nsp_server.release()
finally:
if self.br1 in ipdb.interfaces: ipdb.interfaces[self.br1].remove().commit()
if self.br2 in ipdb.interfaces: ipdb.interfaces[self.br2].remove().commit()
if self.veth_pem_2_br1 in ipdb.interfaces: ipdb.interfaces[self.veth_pem_2_br1].remove().commit()
if self.veth_pem_2_br2 in ipdb.interfaces: ipdb.interfaces[self.veth_pem_2_br2].remove().commit()
sim.release()
ipdb.release()
if __name__ == "__main__":
main()
|
b0fd3399970f04a0ef4a665850c23d3b3c5856f9
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/rympro/const.py
|
ed7e2801a1b9f6fb7ad644a40b0ddf48675615c0
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 76
|
py
|
const.py
|
"""Constants for the Read Your Meter Pro integration."""
DOMAIN = "rympro"
|
2c8a1b67d960953eabab897c2de869d2eb79e785
|
019f03d6713a2bc5344b644aeb5ebe70aaf7cfd0
|
/src/super_gradients/common/data_interface/__init__.py
|
24e57e03122134b2334c458ab581dd8445562567
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
Deci-AI/super-gradients
|
6f52cd15bc2f9f39e3cdc6067292b6512aba5dd0
|
7240726cf6425b53a26ed2faec03672f30fee6be
|
refs/heads/master
| 2023-08-25T17:47:02.595029
| 2023-08-24T11:50:50
| 2023-08-24T11:50:50
| 432,652,408
| 3,237
| 331
|
Apache-2.0
| 2023-09-14T11:24:46
| 2021-11-28T07:58:02
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 325
|
py
|
__init__.py
|
# PACKAGE IMPORTS FOR EXTERNAL USAGE
from super_gradients.common.data_interface.dataset_data_interface import DatasetDataInterface
from super_gradients.common.data_interface.adnn_model_repository_data_interface import ADNNModelRepositoryDataInterfaces
__all__ = ["DatasetDataInterface", "ADNNModelRepositoryDataInterfaces"]
|
7b33b31243e543495692e03acfd29b5883df7170
|
5c363c50c54175a982330ec888401b3e394373ab
|
/benchmarking/nursery/othpo/plotting/collect_yahpo_evaluations_for_plotting.py
|
253ba69ff7023850e3e9d15df18300c2c2aa6106
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
awslabs/syne-tune
|
b14fb008f63def6a172bea6cc451f4e1906647f5
|
c35686e1b5947d45384fd1d41a44e013da53ef43
|
refs/heads/main
| 2023-08-14T14:21:48.995716
| 2023-08-03T12:57:13
| 2023-08-03T12:57:13
| 417,499,108
| 313
| 47
|
Apache-2.0
| 2023-09-14T14:06:54
| 2021-10-15T12:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,206
|
py
|
collect_yahpo_evaluations_for_plotting.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Generating hp evaluations
import os
import pandas as pd
import numpy as np
from yahpo_gym import BenchmarkSet, local_config
from syne_tune.blackbox_repository.conversion_scripts.utils import (
repository_path,
blackbox_local_path,
)
from syne_tune.blackbox_repository.conversion_scripts.scripts.yahpo_import import (
serialize_yahpo,
)
hp_names = {
"rbv2_svm": [
"cost",
"kernel",
"num.impute.selected.cpo",
"tolerance",
"gamma",
"degree",
],
"rbv2_aknn": [
"M",
"distance",
"ef",
"ef_construction",
"k",
"num.impute.selected.cpo",
],
"rbv2_ranger": [
"num.trees",
"sample.fraction",
"mtry.power",
"respect.unordered.factors",
"min.node.size",
"splitrule",
"num.impute.selected.cpo",
],
"rbv2_glmnet": ["alpha", "s", "num.impute.selected.cpo"],
"lcbench": [
"batch_size",
"learning_rate",
"momentum",
"weight_decay",
"num_layers",
"max_units",
"max_dropout",
],
}
# Table 5 in the paper has surrogate quality, we use the ones has good quality
# https://arxiv.org/pdf/2109.03670.pdf
# For the rbv2 benchmarks, refer to https://rdrr.io/cran/kernlab/man/ksvm.html
def hp_values_hash(scenario, hp_dict):
return {hp_name: hp_dict.get(hp_name, None) for hp_name in hp_names[scenario]}
def get_rbv_result(scenario, benchmark, hp, trainsize):
hp["trainsize"] = trainsize
f1_list = []
auc_list = []
acc_list = []
for repl in range(1, 11):
hp["repl"] = repl
metric_dict = benchmark.objective_function(hp)[0]
acc_list.append(metric_dict["acc"])
f1_list.append(metric_dict["f1"])
auc_list.append(metric_dict["auc"])
hp_key = hp_values_hash(scenario, hp)
return {
"hp_key": hp_key,
"train_frac": trainsize,
"f1": np.mean(f1_list),
"f1_std": np.std(f1_list),
"acc": np.mean(acc_list),
"acc_std": np.std(acc_list),
"auc": np.mean(auc_list),
"auc_std": np.std(auc_list),
}
scenarios = ["rbv2_svm", "rbv2_aknn", "rbv2_ranger", "rbv2_glmnet"]
def run():
for scenario in scenarios:
b = BenchmarkSet(scenario=scenario)
print(b.targets)
print(b.instances)
config_space = b.get_opt_space(drop_fidelity_params=True)
config_space.seed(666)
hps = config_space.sample_configuration(3000)
for instance in b.instances[:10]:
b.set_instance(instance)
results = []
for hp in hps:
hp = hp.get_dictionary()
# Evaluate the configurattion
for trainsize in [0.05, 0.25, 0.5, 0.75, 1.0]:
result = get_rbv_result(scenario, b, hp, trainsize)
results.append(result)
if not os.path.exists("yahpo_data"):
os.makedirs("yahpo_data")
if not os.path.exists("yahpo_data/" + scenario):
os.makedirs("yahpo_data/" + scenario)
pd.DataFrame(results).to_csv(
f"yahpo_data/{scenario}/{instance}.csv", index=False
)
if __name__ == "__main__":
local_config.init_config()
local_config.set_data_path(str(repository_path / "yahpo"))
for scenario in scenarios:
# Use syne-tune to download yahpo data
scenario_long = "yahpo-" + scenario
serialize_yahpo(
scenario_long, target_path=blackbox_local_path(name=scenario_long)
)
run()
|
c75aa28e875d510d9e5a9efe3fc37c41ac616c44
|
6b27c39edc10b1353104043b7a523f4981c99ef2
|
/pytype/pytd/main.py
|
bacf4f267739730db0768f8d9b51280adebf189d
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
google/pytype
|
ad0ff0b6c1083b4f0a1af1747869d422f2b5f4d8
|
bda0b9547af9a084bb2bd1427f58dcde968e48b5
|
refs/heads/main
| 2023-08-26T17:52:23.546035
| 2023-08-24T22:48:00
| 2023-08-24T22:48:00
| 32,483,713
| 4,595
| 367
|
NOASSERTION
| 2023-09-13T04:40:45
| 2015-03-18T20:52:08
|
Python
|
UTF-8
|
Python
| false
| false
| 3,389
|
py
|
main.py
|
"""Tool for processing pytd files.
pytd is a type declaration language for Python. Each .py file can have an
accompanying .pytd file that specifies classes, argument types, return types
and exceptions.
This binary processes pytd files, typically to optimize them.
Usage:
pytd_tool [flags] <inputfile> <outputfile>
"""
import argparse
import sys
from pytype import utils
from pytype.imports import builtin_stubs
from pytype.pyi import parser
from pytype.pytd import optimize
from pytype.pytd import pytd_utils
def make_parser():
"""Use argparse to make a parser for command line options."""
o = argparse.ArgumentParser(
usage="%(prog)s [options] infile.pytd [outfile.pytd]")
# Input and output filenames
o.add_argument("input", help="File to process")
o.add_argument("output", nargs="?",
help=("Output file (or - for stdout). If output is omitted, "
"the input file will be checked for errors."))
o.add_argument(
"-O", "--optimize", action="store_true",
dest="optimize", default=False,
help="Optimize pytd file.")
o.add_argument(
"--lossy", action="store_true",
dest="lossy", default=False,
help="Allow lossy optimizations, such as merging classes.")
o.add_argument(
"--max-union", type=int, action="store",
dest="max_union", default=4,
help="Maximum number of objects in an 'or' clause.\nUse with --lossy.")
o.add_argument(
"--use-abcs", action="store_true",
dest="use_abcs", default=False,
help="Inject abstract bases classes for type merging.\nUse with --lossy.")
o.add_argument(
"--remove-mutable", action="store_true",
dest="remove_mutable", default=False,
help="Remove mutable parameters.")
o.add_argument(
"-V", "--python_version", type=str, action="store",
dest="python_version", default=None,
help=("Python version to target (\"major.minor\", e.g. \"3.10\")"))
o.add_argument(
"--multiline-args", action="store_true",
dest="multiline_args", default=False,
help="Print function arguments one to a line.")
return o
def main():
argument_parser = make_parser()
opts = argument_parser.parse_args()
if opts.python_version:
python_version = utils.version_from_string(opts.python_version)
else:
python_version = sys.version_info[:2]
try:
utils.validate_version(python_version)
except utils.UsageError as e:
sys.stderr.write(f"Usage error: {e}\n")
sys.exit(1)
options = parser.PyiOptions(python_version=python_version)
with open(opts.input) as fi:
sourcecode = fi.read()
try:
parsed = parser.parse_string(
sourcecode, filename=opts.input, options=options)
except parser.ParseError as e:
sys.stderr.write(str(e))
sys.exit(1)
if opts.optimize:
parsed = optimize.Optimize(
parsed,
pytd_utils.Concat(*builtin_stubs.GetBuiltinsAndTyping(options)),
lossy=opts.lossy,
use_abcs=opts.use_abcs,
max_union=opts.max_union,
remove_mutable=opts.remove_mutable,
can_do_lookup=False)
if opts.output is not None:
out_text = pytd_utils.Print(parsed, opts.multiline_args)
if opts.output == "-":
sys.stdout.write(out_text)
else:
with open(opts.output, "w") as out:
out.write(out_text)
if __name__ == "__main__":
main()
|
8fb06f716de4c92bf35e911ba57c2a8e298d74a8
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/scipy/py3/scipy/integrate/_ivp/dop853_coefficients.py
|
f39f2f3650d321e2c475d4e220f9769139118a5e
|
[
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Qhull",
"BSD-3-Clause",
"BSL-1.0",
"BSD-2-Clause"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 7,237
|
py
|
dop853_coefficients.py
|
import numpy as np
N_STAGES = 12
N_STAGES_EXTENDED = 16
INTERPOLATOR_POWER = 7
C = np.array([0.0,
0.526001519587677318785587544488e-01,
0.789002279381515978178381316732e-01,
0.118350341907227396726757197510,
0.281649658092772603273242802490,
0.333333333333333333333333333333,
0.25,
0.307692307692307692307692307692,
0.651282051282051282051282051282,
0.6,
0.857142857142857142857142857142,
1.0,
1.0,
0.1,
0.2,
0.777777777777777777777777777778])
A = np.zeros((N_STAGES_EXTENDED, N_STAGES_EXTENDED))
A[1, 0] = 5.26001519587677318785587544488e-2
A[2, 0] = 1.97250569845378994544595329183e-2
A[2, 1] = 5.91751709536136983633785987549e-2
A[3, 0] = 2.95875854768068491816892993775e-2
A[3, 2] = 8.87627564304205475450678981324e-2
A[4, 0] = 2.41365134159266685502369798665e-1
A[4, 2] = -8.84549479328286085344864962717e-1
A[4, 3] = 9.24834003261792003115737966543e-1
A[5, 0] = 3.7037037037037037037037037037e-2
A[5, 3] = 1.70828608729473871279604482173e-1
A[5, 4] = 1.25467687566822425016691814123e-1
A[6, 0] = 3.7109375e-2
A[6, 3] = 1.70252211019544039314978060272e-1
A[6, 4] = 6.02165389804559606850219397283e-2
A[6, 5] = -1.7578125e-2
A[7, 0] = 3.70920001185047927108779319836e-2
A[7, 3] = 1.70383925712239993810214054705e-1
A[7, 4] = 1.07262030446373284651809199168e-1
A[7, 5] = -1.53194377486244017527936158236e-2
A[7, 6] = 8.27378916381402288758473766002e-3
A[8, 0] = 6.24110958716075717114429577812e-1
A[8, 3] = -3.36089262944694129406857109825
A[8, 4] = -8.68219346841726006818189891453e-1
A[8, 5] = 2.75920996994467083049415600797e1
A[8, 6] = 2.01540675504778934086186788979e1
A[8, 7] = -4.34898841810699588477366255144e1
A[9, 0] = 4.77662536438264365890433908527e-1
A[9, 3] = -2.48811461997166764192642586468
A[9, 4] = -5.90290826836842996371446475743e-1
A[9, 5] = 2.12300514481811942347288949897e1
A[9, 6] = 1.52792336328824235832596922938e1
A[9, 7] = -3.32882109689848629194453265587e1
A[9, 8] = -2.03312017085086261358222928593e-2
A[10, 0] = -9.3714243008598732571704021658e-1
A[10, 3] = 5.18637242884406370830023853209
A[10, 4] = 1.09143734899672957818500254654
A[10, 5] = -8.14978701074692612513997267357
A[10, 6] = -1.85200656599969598641566180701e1
A[10, 7] = 2.27394870993505042818970056734e1
A[10, 8] = 2.49360555267965238987089396762
A[10, 9] = -3.0467644718982195003823669022
A[11, 0] = 2.27331014751653820792359768449
A[11, 3] = -1.05344954667372501984066689879e1
A[11, 4] = -2.00087205822486249909675718444
A[11, 5] = -1.79589318631187989172765950534e1
A[11, 6] = 2.79488845294199600508499808837e1
A[11, 7] = -2.85899827713502369474065508674
A[11, 8] = -8.87285693353062954433549289258
A[11, 9] = 1.23605671757943030647266201528e1
A[11, 10] = 6.43392746015763530355970484046e-1
A[12, 0] = 5.42937341165687622380535766363e-2
A[12, 5] = 4.45031289275240888144113950566
A[12, 6] = 1.89151789931450038304281599044
A[12, 7] = -5.8012039600105847814672114227
A[12, 8] = 3.1116436695781989440891606237e-1
A[12, 9] = -1.52160949662516078556178806805e-1
A[12, 10] = 2.01365400804030348374776537501e-1
A[12, 11] = 4.47106157277725905176885569043e-2
A[13, 0] = 5.61675022830479523392909219681e-2
A[13, 6] = 2.53500210216624811088794765333e-1
A[13, 7] = -2.46239037470802489917441475441e-1
A[13, 8] = -1.24191423263816360469010140626e-1
A[13, 9] = 1.5329179827876569731206322685e-1
A[13, 10] = 8.20105229563468988491666602057e-3
A[13, 11] = 7.56789766054569976138603589584e-3
A[13, 12] = -8.298e-3
A[14, 0] = 3.18346481635021405060768473261e-2
A[14, 5] = 2.83009096723667755288322961402e-2
A[14, 6] = 5.35419883074385676223797384372e-2
A[14, 7] = -5.49237485713909884646569340306e-2
A[14, 10] = -1.08347328697249322858509316994e-4
A[14, 11] = 3.82571090835658412954920192323e-4
A[14, 12] = -3.40465008687404560802977114492e-4
A[14, 13] = 1.41312443674632500278074618366e-1
A[15, 0] = -4.28896301583791923408573538692e-1
A[15, 5] = -4.69762141536116384314449447206
A[15, 6] = 7.68342119606259904184240953878
A[15, 7] = 4.06898981839711007970213554331
A[15, 8] = 3.56727187455281109270669543021e-1
A[15, 12] = -1.39902416515901462129418009734e-3
A[15, 13] = 2.9475147891527723389556272149
A[15, 14] = -9.15095847217987001081870187138
B = A[N_STAGES, :N_STAGES]
E3 = np.zeros(N_STAGES + 1)
E3[:-1] = B.copy()
E3[0] -= 0.244094488188976377952755905512
E3[8] -= 0.733846688281611857341361741547
E3[11] -= 0.220588235294117647058823529412e-1
E5 = np.zeros(N_STAGES + 1)
E5[0] = 0.1312004499419488073250102996e-1
E5[5] = -0.1225156446376204440720569753e+1
E5[6] = -0.4957589496572501915214079952
E5[7] = 0.1664377182454986536961530415e+1
E5[8] = -0.3503288487499736816886487290
E5[9] = 0.3341791187130174790297318841
E5[10] = 0.8192320648511571246570742613e-1
E5[11] = -0.2235530786388629525884427845e-1
# First 3 coefficients are computed separately.
D = np.zeros((INTERPOLATOR_POWER - 3, N_STAGES_EXTENDED))
D[0, 0] = -0.84289382761090128651353491142e+1
D[0, 5] = 0.56671495351937776962531783590
D[0, 6] = -0.30689499459498916912797304727e+1
D[0, 7] = 0.23846676565120698287728149680e+1
D[0, 8] = 0.21170345824450282767155149946e+1
D[0, 9] = -0.87139158377797299206789907490
D[0, 10] = 0.22404374302607882758541771650e+1
D[0, 11] = 0.63157877876946881815570249290
D[0, 12] = -0.88990336451333310820698117400e-1
D[0, 13] = 0.18148505520854727256656404962e+2
D[0, 14] = -0.91946323924783554000451984436e+1
D[0, 15] = -0.44360363875948939664310572000e+1
D[1, 0] = 0.10427508642579134603413151009e+2
D[1, 5] = 0.24228349177525818288430175319e+3
D[1, 6] = 0.16520045171727028198505394887e+3
D[1, 7] = -0.37454675472269020279518312152e+3
D[1, 8] = -0.22113666853125306036270938578e+2
D[1, 9] = 0.77334326684722638389603898808e+1
D[1, 10] = -0.30674084731089398182061213626e+2
D[1, 11] = -0.93321305264302278729567221706e+1
D[1, 12] = 0.15697238121770843886131091075e+2
D[1, 13] = -0.31139403219565177677282850411e+2
D[1, 14] = -0.93529243588444783865713862664e+1
D[1, 15] = 0.35816841486394083752465898540e+2
D[2, 0] = 0.19985053242002433820987653617e+2
D[2, 5] = -0.38703730874935176555105901742e+3
D[2, 6] = -0.18917813819516756882830838328e+3
D[2, 7] = 0.52780815920542364900561016686e+3
D[2, 8] = -0.11573902539959630126141871134e+2
D[2, 9] = 0.68812326946963000169666922661e+1
D[2, 10] = -0.10006050966910838403183860980e+1
D[2, 11] = 0.77771377980534432092869265740
D[2, 12] = -0.27782057523535084065932004339e+1
D[2, 13] = -0.60196695231264120758267380846e+2
D[2, 14] = 0.84320405506677161018159903784e+2
D[2, 15] = 0.11992291136182789328035130030e+2
D[3, 0] = -0.25693933462703749003312586129e+2
D[3, 5] = -0.15418974869023643374053993627e+3
D[3, 6] = -0.23152937917604549567536039109e+3
D[3, 7] = 0.35763911791061412378285349910e+3
D[3, 8] = 0.93405324183624310003907691704e+2
D[3, 9] = -0.37458323136451633156875139351e+2
D[3, 10] = 0.10409964950896230045147246184e+3
D[3, 11] = 0.29840293426660503123344363579e+2
D[3, 12] = -0.43533456590011143754432175058e+2
D[3, 13] = 0.96324553959188282948394950600e+2
D[3, 14] = -0.39177261675615439165231486172e+2
D[3, 15] = -0.14972683625798562581422125276e+3
|
e003123a0b611afb903c5aadf36d0bf40d46506b
|
b097b7caa954a0447bef9a7144e15fbc1b08a96b
|
/test/torchaudio_unittest/assets/wav2vec2/huggingface/generate_huggingface_model_config.py
|
68f15a6e70437c6220ffc995239beb218a228cb8
|
[
"CC-BY-NC-4.0",
"BSD-2-Clause",
"CC-BY-4.0"
] |
permissive
|
pytorch/audio
|
3fa7006404020c9ce731f27b94f0257195d2efe3
|
e057d7d144e2716588b80255f0a143662fd5c10d
|
refs/heads/main
| 2023-09-03T15:46:06.918708
| 2023-09-02T00:39:15
| 2023-09-02T00:39:15
| 90,321,822
| 2,319
| 675
|
BSD-2-Clause
| 2023-09-13T22:09:20
| 2017-05-05T00:38:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
generate_huggingface_model_config.py
|
import json
import os
from transformers import Wav2Vec2Model
_THIS_DIR = os.path.dirname(os.path.abspath(__file__))
def _main():
keys = [
# pretrained
"facebook/wav2vec2-base",
"facebook/wav2vec2-large",
"facebook/wav2vec2-large-lv60",
"facebook/wav2vec2-base-10k-voxpopuli",
"facebook/wav2vec2-large-xlsr-53",
# finetuned
"facebook/wav2vec2-base-960h",
"facebook/wav2vec2-large-960h",
"facebook/wav2vec2-large-960h-lv60",
"facebook/wav2vec2-large-960h-lv60-self",
"facebook/wav2vec2-large-xlsr-53-german",
]
for key in keys:
path = os.path.join(_THIS_DIR, f"{key}.json")
print("Generating ", path)
cfg = Wav2Vec2Model.from_pretrained(key).config
cfg = json.loads(cfg.to_json_string())
del cfg["_name_or_path"]
with open(path, "w") as file_:
file_.write(json.dumps(cfg, indent=4, sort_keys=True))
file_.write("\n")
if __name__ == "__main__":
_main()
|
09d31ee9072ca924ffb2597ab49a201ec1f9305e
|
baa2c6f22ff563d417e34692bf3345077eb8fa5f
|
/IPython/core/tests/test_extension.py
|
24ecf7e97e3e56ea51327cc4704ff1fa749c15aa
|
[
"BSD-3-Clause"
] |
permissive
|
ipython/ipython
|
c42ea223b6e391bb7dd39888cb959d4d5d6b21a1
|
e5103f971233fd66b558585cce7a4f52a716cd56
|
refs/heads/main
| 2023-08-30T18:27:18.436521
| 2023-08-29T12:16:00
| 2023-08-29T12:16:00
| 658,518
| 13,673
| 4,729
|
BSD-3-Clause
| 2023-09-12T20:22:09
| 2010-05-10T04:46:06
|
Python
|
UTF-8
|
Python
| false
| false
| 3,026
|
py
|
test_extension.py
|
import os.path
from tempfile import TemporaryDirectory
import IPython.testing.tools as tt
from IPython.utils.syspathcontext import prepended_to_syspath
ext1_content = """
def load_ipython_extension(ip):
print("Running ext1 load")
def unload_ipython_extension(ip):
print("Running ext1 unload")
"""
ext2_content = """
def load_ipython_extension(ip):
print("Running ext2 load")
"""
ext3_content = """
def load_ipython_extension(ip):
ip2 = get_ipython()
print(ip is ip2)
"""
def test_extension_loading():
em = get_ipython().extension_manager
with TemporaryDirectory() as td:
ext1 = os.path.join(td, "ext1.py")
with open(ext1, "w", encoding="utf-8") as f:
f.write(ext1_content)
ext2 = os.path.join(td, "ext2.py")
with open(ext2, "w", encoding="utf-8") as f:
f.write(ext2_content)
with prepended_to_syspath(td):
assert 'ext1' not in em.loaded
assert 'ext2' not in em.loaded
# Load extension
with tt.AssertPrints("Running ext1 load"):
assert em.load_extension('ext1') is None
assert 'ext1' in em.loaded
# Should refuse to load it again
with tt.AssertNotPrints("Running ext1 load"):
assert em.load_extension('ext1') == 'already loaded'
# Reload
with tt.AssertPrints("Running ext1 unload"):
with tt.AssertPrints("Running ext1 load", suppress=False):
em.reload_extension('ext1')
# Unload
with tt.AssertPrints("Running ext1 unload"):
assert em.unload_extension('ext1') is None
# Can't unload again
with tt.AssertNotPrints("Running ext1 unload"):
assert em.unload_extension('ext1') == 'not loaded'
assert em.unload_extension('ext2') == 'not loaded'
# Load extension 2
with tt.AssertPrints("Running ext2 load"):
assert em.load_extension('ext2') is None
# Can't unload this
assert em.unload_extension('ext2') == 'no unload function'
# But can reload it
with tt.AssertPrints("Running ext2 load"):
em.reload_extension('ext2')
def test_extension_builtins():
em = get_ipython().extension_manager
with TemporaryDirectory() as td:
ext3 = os.path.join(td, "ext3.py")
with open(ext3, "w", encoding="utf-8") as f:
f.write(ext3_content)
assert 'ext3' not in em.loaded
with prepended_to_syspath(td):
# Load extension
with tt.AssertPrints("True"):
assert em.load_extension('ext3') is None
assert 'ext3' in em.loaded
def test_non_extension():
em = get_ipython().extension_manager
assert em.load_extension("sys") == "no load function"
|
f25edf20dfc3be5a15a7d5781ee1a942a22260f2
|
2d9a3ce2a04190d0032e8a298829022260b1d76b
|
/indra/sources/omnipath/processor.py
|
02bac3709514fa4f6001e4b6b77ffb94ee010653
|
[
"BSD-2-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
sorgerlab/indra
|
f127a0f9bdd2d3f48df14575883fd31e2f4de4bf
|
6d6ca1174792b6c5a05cbf3afcb9f138fabcec6a
|
refs/heads/master
| 2023-08-21T13:25:54.654995
| 2023-06-11T16:46:41
| 2023-06-11T16:46:41
| 22,848,436
| 158
| 61
|
BSD-2-Clause
| 2023-08-30T21:47:59
| 2014-08-11T17:44:05
|
Python
|
UTF-8
|
Python
| false
| false
| 8,626
|
py
|
processor.py
|
from __future__ import unicode_literals
import copy
import logging
from indra.statements.validate import validate_text_refs
from indra.ontology.standardize import standardize_agent_name
from indra.statements import modtype_to_modclass, Agent, Evidence, Complex, \
get_statement_by_name as stmt_by_name, BoundCondition
logger = logging.getLogger(__name__)
ignore_srcs = [db.lower() for db in ['NetPath', 'SIGNOR', 'ProtMapper',
'BioGRID', 'HPRD-phos', 'phosphoELM']]
class OmniPathProcessor(object):
"""Class to process OmniPath JSON into INDRA Statements."""
def __init__(self, ptm_json=None, ligrec_json=None):
self.statements = []
self.ptm_json = ptm_json
self.ligrec_json = ligrec_json
def process_ptm_mods(self):
"""Process ptm json if present"""
if self.ptm_json:
self.statements += self._stmts_from_op_mods(self.ptm_json)
def process_ligrec_interactions(self):
"""Process ligand-receptor json if present"""
if self.ligrec_json:
self.statements += self._stmt_from_op_lr(self.ligrec_json)
def _stmts_from_op_mods(self, ptm_json):
"""Build Modification Statements from a list of Omnipath PTM entries
"""
ptm_stmts = []
unhandled_mod_types = []
annot_ignore = {'enzyme', 'substrate', 'residue_type',
'residue_offset', 'references', 'modification'}
if ptm_json is None:
return []
for mod_entry in ptm_json:
# Skip entries without references
if not mod_entry['references']:
continue
enz = self._agent_from_up_id(mod_entry['enzyme'])
sub = self._agent_from_up_id(mod_entry['substrate'])
res = mod_entry['residue_type']
pos = mod_entry['residue_offset']
evidence = []
for source_pmid in mod_entry['references']:
source_db, pmid_ref = source_pmid.split(':', 1)
# Skip evidence from already known sources
if source_db.lower() in ignore_srcs:
continue
if 'pmc' in pmid_ref.lower():
text_refs = {'PMCID': pmid_ref.split('/')[-1]}
pmid = None
elif not validate_text_refs({'PMID': pmid_ref}):
pmid = None
text_refs = None
else:
pmid = pmid_ref
text_refs = {'PMID': pmid}
evidence.append(Evidence(
source_api='omnipath',
source_id=source_db,
pmid=pmid,
text_refs=text_refs,
annotations={k: v for k, v in mod_entry.items() if k not
in annot_ignore}
))
mod_type = mod_entry['modification']
modclass = modtype_to_modclass.get(mod_type)
if modclass is None:
unhandled_mod_types.append(mod_type)
continue
else:
# All evidences filtered out
if not evidence:
continue
stmt = modclass(enz, sub, res, pos, evidence)
ptm_stmts.append(stmt)
return ptm_stmts
def _stmt_from_op_lr(self, ligrec_json):
"""Make ligand-receptor Complexes from Omnipath API interactions db"""
ligrec_stmts = []
ign_annot = {'source_sub_id', 'source', 'target', 'references'}
no_refs = 0
bad_pmid = 0
no_consensus = 0
if ligrec_json is None:
return ligrec_stmts
for lr_entry in ligrec_json:
if not lr_entry['references']:
no_refs += 1
continue
if len(lr_entry['sources']) == 1 and \
lr_entry['sources'][0].lower() in ignore_srcs:
continue
# Assemble evidence
evidence = []
for source_pmid in lr_entry['references']:
source_db, pmid = source_pmid.split(':')
# Skip evidence from already known sources
if source_db.lower() in ignore_srcs:
continue
if len(pmid) > 8:
bad_pmid += 1
continue
annot = {k: v for k, v in lr_entry.items() if k not in
ign_annot}
annot['source_sub_id'] = source_db
evidence.append(Evidence(source_api='omnipath', pmid=pmid,
annotations=annot))
# Get statements if we have evidences
if evidence:
# Get complexes
ligrec_stmts.append(self._get_op_complex(lr_entry['source'],
lr_entry['target'],
evidence))
# On consensus, make Activations or Inhibitions as well
if bool(lr_entry['consensus_stimulation']) ^ \
bool(lr_entry['consensus_inhibition']):
activation = True if lr_entry['consensus_stimulation'] else \
False
ligrec_stmts.append(self._get_ligrec_regs(
lr_entry['source'], lr_entry['target'],
# Make sure we decouple evidences from the above
copy.deepcopy(evidence),
activation=activation))
elif lr_entry['consensus_stimulation'] and \
lr_entry['consensus_inhibition']:
no_consensus += 1
# All evidences were filtered out
else:
no_refs += 1
if no_refs:
logger.warning(f'{no_refs} entries without references were '
f'skipped')
if bad_pmid:
logger.warning(f'{bad_pmid} references with bad pmids were '
f'skipped')
if no_consensus:
logger.warning(f'{no_consensus} entries with conflicting '
f'regulation were skipped')
return ligrec_stmts
@staticmethod
def _agent_from_up_id(up_id):
"""Build an Agent object from a Uniprot ID. Adds db_refs for both
Uniprot and HGNC where available."""
db_refs = {'UP': up_id}
ag = Agent(up_id, db_refs=db_refs)
standardize_agent_name(ag)
return ag
def _bc_agent_from_up_list(self, up_id_list):
# Return the first agent with the remaining agents as a bound condition
agents_list = [self._agent_from_up_id(up_id) for up_id in up_id_list]
agent = agents_list[0]
agent.bound_conditions = \
[BoundCondition(a, True) for a in agents_list[1:]]
return agent
def _complex_agents_from_op_complex(self, up_id_str):
"""Return a list of agents from a string containing multiple UP ids
"""
# Get agents
if 'complex' in up_id_str.lower():
up_id_list = [up for up in up_id_str.split(':')[1].split('_')]
else:
up_id_list = [up_id_str]
return [self._agent_from_up_id(up_id) for up_id in up_id_list]
def _get_op_complex(self, source, target, evidence_list):
ag_list = self._complex_agents_from_op_complex(source) + \
self._complex_agents_from_op_complex(target)
return Complex(members=ag_list,
evidence=evidence_list)
def _get_ligrec_regs(self, source, target, evidence_list, activation=True):
# Check if any of the agents is a complex
# Source
if 'complex' in source.lower():
# Make bound condition agent
up_id_list = [up for up in source.split(':')[1].split('_')]
subj = self._bc_agent_from_up_list(up_id_list)
else:
subj = self._agent_from_up_id(source)
# Target
if 'complex' in target.lower():
# Make bound condition agent
up_id_list = [up for up in target.split(':')[1].split('_')]
obj = self._bc_agent_from_up_list(up_id_list)
else:
obj = self._agent_from_up_id(target)
# Regular case:
Regulation = stmt_by_name('activation') if activation else \
stmt_by_name('inhibition')
regulation = Regulation(subj=subj, obj=obj, evidence=evidence_list)
return regulation
|
0595f567e8d4ffc9219bdd86b191bd25d710de83
|
62f756687af4f522b78e4963a6d2262136a9b65d
|
/tests/import/import3a.py
|
2e9d41f71dbdfabb08e12c1cf94ec8dc26db616b
|
[
"MIT"
] |
permissive
|
micropython/micropython-esp32
|
48ce07832c8d44fcdc530962bd9ae087949b957b
|
2f4dac5f121a59fc187c1d9c1f9eade365b3aba1
|
refs/heads/esp32
| 2023-08-28T12:00:44.867727
| 2018-02-27T04:06:10
| 2018-02-27T04:06:10
| 76,828,539
| 737
| 238
|
MIT
| 2023-09-06T02:31:38
| 2016-12-19T04:38:26
|
C
|
UTF-8
|
Python
| false
| false
| 34
|
py
|
import3a.py
|
from import1b import *
print(var)
|
8ebe0bd2a1803269bae21a6695f3a2cedbc25ac7
|
3079aff5691cc3452c86c9a76c0309bf66c920a1
|
/python/finufft/examples/simple1d1.py
|
fea8e5beebfea5ed9aea7cb9fd77bda1a5f77ba4
|
[
"Apache-2.0"
] |
permissive
|
flatironinstitute/finufft
|
dd332853a8900b9ea8a5a6ad21a3842f07bc9fc8
|
0e5f3f3b3ec07ac8b7e7ccd26d88d2c912c1b9a6
|
refs/heads/master
| 2023-08-30T12:19:36.644780
| 2023-08-22T11:44:39
| 2023-08-23T10:08:35
| 78,682,506
| 209
| 80
|
NOASSERTION
| 2023-09-08T21:51:05
| 2017-01-11T21:42:14
|
C++
|
UTF-8
|
Python
| false
| false
| 821
|
py
|
simple1d1.py
|
# demo of 1D type 1 FINUFFT in python. Should stay close to docs/python.rst
# Barnett 8/19/20
import numpy as np
import finufft
import time
np.random.seed(42)
# number of nonuniform points
M = 100000
# input nonuniform points
x = 2 * np.pi * np.random.uniform(size=M)
# their complex strengths
c = (np.random.standard_normal(size=M)
+ 1J * np.random.standard_normal(size=M))
# desired number of output Fourier modes
N = 1000000
# calculate the transform
t0 = time.time()
f = finufft.nufft1d1(x, c, N, eps=1e-9)
print("finufft1d1 done in {0:.2g} s.".format(time.time()-t0))
n = 142519 # do a math check, for a single output mode index n
assert((n>=-N/2.) & (n<N/2.))
ftest = sum(c * np.exp(1.j*n*x))
err = np.abs(f[n + N // 2] - ftest) / np.max(np.abs(f))
print("Error relative to max: {0:.2e}".format(err))
|
98ddcd723497cabe7efd0b0df5957aab757bd6b0
|
9ed3b16b3da72e4c47a04f2f2e3ef395e9fd9f20
|
/main/fontconfig/template.py
|
ef769d3f2648459ec35347e23385f5a456b003c0
|
[
"BSD-2-Clause"
] |
permissive
|
chimera-linux/cports
|
fdae59dc25856942be3041e10e3533dbf8f883c3
|
714680161cd719dd047452c95fbb9b447bc23a86
|
refs/heads/master
| 2023-09-03T19:30:40.720670
| 2023-09-03T15:07:40
| 2023-09-03T15:07:40
| 374,000,317
| 118
| 37
|
BSD-2-Clause
| 2023-09-14T20:31:08
| 2021-06-05T02:07:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
template.py
|
pkgname = "fontconfig"
pkgver = "2.14.2"
pkgrel = 0
build_style = "gnu_configure"
configure_args = [
"--enable-static",
"--enable-docs",
f"--with-cache-dir=/var/cache/{pkgname}",
]
make_cmd = "gmake"
hostmakedepends = ["pkgconf", "gperf", "gmake", "python"]
makedepends = ["libexpat-devel", "freetype-bootstrap", "libuuid-devel"]
triggers = ["/usr/share/fonts/*"]
pkgdesc = "Library for configuring and customizing font access"
maintainer = "q66 <q66@chimera-linux.org>"
license = "MIT"
url = "https://www.fontconfig.org"
source = f"$(FREEDESKTOP_SITE)/{pkgname}/release/{pkgname}-{pkgver}.tar.gz"
sha256 = "3ba2dd92158718acec5caaf1a716043b5aa055c27b081d914af3ccb40dce8a55"
def post_install(self):
self.install_license("COPYING")
# reject bitmap fonts by default, preventing them from being preferred
self.install_link(
"/usr/share/fontconfig/conf.avail/70-no-bitmaps.conf",
"etc/fonts/conf.d/70-no-bitmaps.conf",
)
@subpackage("fontconfig-devel")
def _devel(self):
return self.default_devel()
configure_gen = []
|
4296a398ed34f4195e955cc1ea73729b9ec59fa5
|
76fb0a3cfc9d9362ab29174bd1d55e888ea4d7f6
|
/tfx/dsl/input_resolution/ops/group_by_lineage_op.py
|
af8ed7c02970d114dff2486ee93cc11eb483442b
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/tfx
|
0cfc9c55171352ecc98c9dfa8ffe976c689d7073
|
1b328504fa08a70388691e4072df76f143631325
|
refs/heads/master
| 2023-08-30T11:56:50.894497
| 2023-08-29T22:47:19
| 2023-08-29T22:48:26
| 169,116,405
| 2,116
| 899
|
Apache-2.0
| 2023-09-14T21:51:42
| 2019-02-04T17:14:36
|
Python
|
UTF-8
|
Python
| false
| false
| 8,869
|
py
|
group_by_lineage_op.py
|
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for GroupByDisjointLineage operator."""
import collections
from typing import List, Iterable, Tuple
from tfx.dsl.input_resolution import resolver_op
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.utils import typing_utils
from ml_metadata.proto import metadata_store_pb2
def _get_neighbor_artifact_pairs(
events: List[metadata_store_pb2.Event],
) -> Iterable[Tuple[int, int]]:
"""Gets artifact_id pair of neighbors from the list of Events.
Artifact a and b is considered neighbor if there exist events e1 and e2 s.t.
(e1.artifact_id = a) AND (e2.artifact_id = b) AND
(e1.execution_id = e2.execution_id)
Args:
events: A list of MLMD Events.
Yields:
Edge as a tuple (artifact_id_1, artifact_id_2).
"""
execs_by_art = collections.defaultdict(set)
arts_by_exec = collections.defaultdict(set)
for event in events:
execs_by_art[event.artifact_id].add(event.execution_id)
arts_by_exec[event.execution_id].add(event.artifact_id)
for a1 in execs_by_art:
for a2 in set.union(*[arts_by_exec[e] for e in execs_by_art[a1]]):
if a1 < a2: # Skip symmetric or self edge.
yield a1, a2
def _find_disjoint_sets(
verts: Iterable[int], edges: Iterable[Tuple[int, int]]
) -> List[List[int]]:
"""Finds disjoint sets."""
parents = {a: a for a in verts}
def find(a: int):
if parents[a] != a:
parents[a] = find(parents[a])
return parents[a]
def union(a: int, b: int):
x, y = find(a), find(b)
if x != y:
# Union in a direction that smaller number node becomes the parent node.
# By result, the root node of each disjoint set will be the one with the
# smallest number.
parents[max(x, y)] = min(x, y)
for a, b in edges:
union(a, b)
# Python dict "order is guaranteed to be insertion order" from python 3.7
# (https://docs.python.org/3/library/stdtypes.html#dict).
# As it loops over the sorted node number, and since the root node of each
# disjoint set is the one with the smallest node number, both the inner and
# the outer lists of the result would be sorted.
disjoint_sets = {}
for a in sorted(verts):
disjoint_sets.setdefault(find(a), []).append(a)
return list(disjoint_sets.values())
class GroupByDisjointLineage(
resolver_op.ResolverOp,
canonical_name='tfx.GroupByDisjointLineage',
arg_data_types=(resolver_op.DataType.ARTIFACT_MULTIMAP,),
return_data_type=resolver_op.DataType.ARTIFACT_MULTIMAP_LIST,
):
"""GroupByDisjointLineage operator.
Let's say we have a lineage of artifacts (executions omitted for brevity):
```dot
digraph {
a1 -> b1 -> c1
a2 -> b2 -> c2
a2 -> b3 -> c3
a3 -> b4 -> c4
}
```
Then `GroupByDisjointLineage` would group artifacts by each disjoint lineage
where artifacts from different group is not reachable.
```python
GroupByDisjointLineage({
'a': [a1, a2, a3],
'b': [b1, b2, b3, b4],
'c': [c1, c2, c3, c4],
}) == [
{'a': [a1], 'b': [b1], 'c': [c1]},
{'a': [a2], 'b': [b2, b3], 'c': [c2, c3]},
{'a': [a3], 'b': [b4], 'c': [c4]},
]
```
CAVEAT: Lineage is only searched for the 2-hop distances (i.e. artifact ->
execution -> artifact), so in order to traverse for the deeper relationships,
provide the intermediate artifacts as well so that there exists a chain of
2-hop connections.
"""
# If require_all is True, then any dictionary from the result that contains
# empty list would be dropped. In other words, at least 1 artifact should be
# present from each key of each result dictionary.
require_all = resolver_op.Property(type=bool, default=False)
def apply(
self, artifact_map: typing_utils.ArtifactMultiMap
) -> List[typing_utils.ArtifactMultiDict]:
artifacts_by_id = {}
input_keys_by_id = collections.defaultdict(set)
for input_key, artifacts in artifact_map.items():
for a in artifacts:
input_keys_by_id[a.id].add(input_key)
artifacts_by_id[a.id] = a
if not artifacts_by_id:
return []
events = self.context.store.get_events_by_artifact_ids(
artifact_ids=artifacts_by_id
)
result = []
for disjoint_set in _find_disjoint_sets(
artifacts_by_id, _get_neighbor_artifact_pairs(events)
):
result_item = {input_key: [] for input_key in artifact_map}
for artifact_id in disjoint_set:
for input_key in input_keys_by_id[artifact_id]:
result_item[input_key].append(artifacts_by_id[artifact_id])
if not self.require_all or all(result_item.values()):
result.append(result_item)
return result
class GroupByPivot(
resolver_op.ResolverOp,
canonical_name='tfx.GroupByPivot',
arg_data_types=(resolver_op.DataType.ARTIFACT_MULTIMAP,),
return_data_type=resolver_op.DataType.ARTIFACT_MULTIMAP_LIST,
):
"""GroupByPivot operator.
Let's say we have a lineage of artifacts (executions omitted for brevity):
```dot
digraph {
a1 -> b1 -> c1
a2 -> b2 -> c2
a2 -> b3 -> c3
a3 -> b4 -> c4
}
```
Then `GroupByPivot` would group artifacts by each pivot artifact from
the input artifacts where artifacts in the same group is reachable from the
pivot.
```python
inputs = {
'a': [a1, a2, a3],
'b': [b1, b2, b3, b4],
'c': [c1, c2, c3, c4],
}
# 'c' is empty they are not adjacent provenance from 'a'.
GroupByPivot(inputs, pivot_key='a') == [
{'a': [a1], 'b': [b1], 'c': []},
{'a': [a2], 'b': [b2, b3], 'c': []},
{'a': [a3], 'b': [b4], 'c': []},
]
# Both 'a' and 'c' is not empty as they are adjacent provenance from 'b'.
GroupByPivot(inputs, pivot_key='b') == [
{'a': [a1], 'b': [b1], 'c': [c1]},
{'a': [a2], 'b': [b2], 'c': [c2]},
{'a': [a2], 'b': [b3], 'c': [c3]},
{'a': [a3], 'b': [b4], 'c': [c4]},
]
```
The result of the operator is a list of dictionary, where each dictionary
contains individual pivot artifact. Non-pivot artifacts could be included in
multiple dictionaries if they are associated with multiple pivots, or some
dictionary might have empty artifact list for non-pivot artifacts if adjacent
provenances are not found.
CAVEAT: Lineage is only searched for the 2-hop distances (i.e. artifact ->
execution -> artifact) and the artifacts farther than 2 hops from the pivot
artifacts would NOT be included in the result.
"""
# Input key that is used for a pivot.
pivot_key = resolver_op.Property(type=str)
# If require_all is True, then any dictionary from the result that contains
# empty list would be dropped. In other words, at least 1 artifact should be
# present from each key of each result dictionary.
require_all = resolver_op.Property(type=bool, default=False)
def apply(
self, artifact_map: typing_utils.ArtifactMultiMap
) -> List[typing_utils.ArtifactMultiDict]:
if self.pivot_key not in artifact_map:
raise exceptions.FailedPreconditionError(
f'Pivot "{self.pivot_key}" does not exist in the artifact map. '
f'Containing keys: {list(artifact_map.keys())}'
)
if not artifact_map[self.pivot_key]:
return []
artifacts_by_id = {}
input_keys_by_id = collections.defaultdict(set)
for input_key, artifacts in artifact_map.items():
for a in artifacts:
input_keys_by_id[a.id].add(input_key)
artifacts_by_id[a.id] = a
events = self.context.store.get_events_by_artifact_ids(
artifact_ids=artifacts_by_id
)
neighbors = collections.defaultdict(set)
for a, b in _get_neighbor_artifact_pairs(events):
neighbors[a].add(b)
neighbors[b].add(a)
result = []
# Preserve the initial order in artifact_map[pivot_key].
for pivot in artifact_map[self.pivot_key]:
result_item = {input_key: [] for input_key in artifact_map}
result_item[self.pivot_key].append(pivot)
# Sort for deterministic result.
for artifact_id in sorted(neighbors[pivot.id]):
for input_key in input_keys_by_id[artifact_id]:
result_item[input_key].append(artifacts_by_id[artifact_id])
if not self.require_all or all(result_item.values()):
result.append(result_item)
return result
|
7149c7c3938bdeada661cebcc1a87d1461053bbb
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/studio/micro-services/cube-studio/myapp/views/view_images.py
|
e386c6227ccb140157076ad77b658fa30659e16e
|
[
"Apache-2.0",
"BSD-3-Clause",
"EPL-2.0",
"MIT",
"LGPL-2.1-only",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 7,827
|
py
|
view_images.py
|
from flask import render_template,redirect
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder import ModelView, ModelRestApi
from flask_appbuilder import ModelView,AppBuilder,expose,BaseView,has_access
from importlib import reload
from flask_babel import gettext as __
from flask_babel import lazy_gettext as _
from flask_appbuilder.forms import GeneralModelConverter
import uuid
import re
from kfp import compiler
from sqlalchemy.exc import InvalidRequestError
# 将model添加成视图,并控制在前端的显示
from myapp.models.model_job import Repository,Images
from myapp.views.view_team import Project_Filter
from myapp import app, appbuilder,db,event_logger
from wtforms import BooleanField, IntegerField,StringField, SelectField,FloatField,DateField,DateTimeField,SelectMultipleField,FormField,FieldList
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget,BS3PasswordFieldWidget,DatePickerWidget,DateTimePickerWidget,Select2ManyWidget,Select2Widget
from myapp.forms import MyBS3TextAreaFieldWidget,MySelect2Widget,MyCodeArea,MyLineSeparatedListField,MyJSONField,MyBS3TextFieldWidget,MySelectMultipleField
from .baseApi import (
MyappModelRestApi
)
from flask import (
current_app,
abort,
flash,
g,
Markup,
make_response,
redirect,
render_template,
request,
send_from_directory,
Response,
url_for,
)
from myapp import security_manager
import kfp # 使用自定义的就要把pip安装的删除了
from werkzeug.datastructures import FileStorage
from .base import (
api,
BaseMyappView,
check_ownership,
data_payload_response,
DeleteMixin,
generate_download_headers,
get_error_msg,
get_user_roles,
handle_api_exception,
json_error_response,
json_success,
MyappFilter,
MyappModelView,
)
from flask_appbuilder import CompactCRUDMixin, expose
import pysnooper,datetime,time,json
conf = app.config
logging = app.logger
# 定义数据库视图
class Repository_ModelView_Base():
datamodel = SQLAInterface(Repository)
label_title='仓库'
check_redirect_list_url = '/repository_modelview/list/'
base_permissions = ['can_add', 'can_edit', 'can_delete', 'can_list', 'can_show'] # 默认为这些
base_order = ('id', 'desc')
order_columns = ['id']
list_columns = ['name','hubsecret','creator','modified']
show_exclude_columns = ['password']
add_columns = ['name','server','user','password','hubsecret']
edit_columns = add_columns
add_form_extra_fields = {
"password": StringField(
_(datamodel.obj.lab('password')),
widget=BS3TextFieldWidget() # 传给widget函数的是外层的field对象,以及widget函数的参数
)
}
edit_form_extra_fields = add_form_extra_fields
# @pysnooper.snoop()
def set_column(self):
self.add_form_extra_fields['name'] = StringField(
_(self.datamodel.obj.lab('name')),
default=g.user.username+"-",
widget=BS3TextFieldWidget() # 传给widget函数的是外层的field对象,以及widget函数的参数
)
self.add_form_extra_fields['hubsecret'] = StringField(
_(self.datamodel.obj.lab('hubsecret')),
default=g.user.username + "-",
widget=BS3TextFieldWidget() # 传给widget函数的是外层的field对象,以及widget函数的参数
)
pre_add_get = set_column
# 直接创建hubsecret
# @pysnooper.snoop()
def apply_hubsecret(self,hubsecret):
from myapp.utils.py.py_k8s import K8s
all_cluster=conf.get('CLUSTERS',{})
all_kubeconfig = [all_cluster[cluster].get('KUBECONFIG','') for cluster in all_cluster]+['']
all_kubeconfig = list(set(all_kubeconfig))
for kubeconfig in all_kubeconfig:
k8s = K8s(kubeconfig)
namespaces = conf.get('HUBSECRET_NAMESPACE')
for namespace in namespaces:
k8s.apply_hubsecret(namespace=namespace,
name=hubsecret.hubsecret,
user=hubsecret.user,
password=hubsecret.password,
server=hubsecret.server
)
def post_add(self, item):
self.apply_hubsecret(item)
def post_update(self, item):
self.apply_hubsecret(item)
class Repository_ModelView(Repository_ModelView_Base,MyappModelView,DeleteMixin):
datamodel = SQLAInterface(Repository)
# 添加视图和菜单
appbuilder.add_view(Repository_ModelView,"仓库",icon = 'fa-shopping-basket',category = '训练',category_icon = 'fa-sitemap')
# 添加api
class Repository_ModelView_Api(Repository_ModelView_Base,MyappModelRestApi):
datamodel = SQLAInterface(Repository)
route_base = '/repository_modelview/api'
appbuilder.add_api(Repository_ModelView_Api)
# 只能查看到自己归属的项目组的镜像
class Images_Filter(MyappFilter):
# @pysnooper.snoop(watch_explode=('result'))
def apply(self, query, func):
user_roles = [role.name.lower() for role in list(self.get_user_roles())]
if "admin" in user_roles:
return query
result = query.order_by(self.model.id.desc())
return result
# 定义数据库视图
class Images_ModelView_Base():
label_title='镜像'
datamodel = SQLAInterface(Images)
check_redirect_list_url = '/images_modelview/list/?_flt_2_name='
help_url = conf.get('HELP_URL', {}).get(datamodel.obj.__tablename__, '') if datamodel else ''
list_columns = ['images_url','creator','modified']
base_order = ('id', 'desc')
order_columns = ['id']
add_columns = ['repository', 'name', 'describe', 'dockerfile', 'gitpath']
edit_columns = add_columns
add_form_extra_fields = {
"dockerfile": StringField(
_(datamodel.obj.lab('dockerfile')),
description='镜像的构建Dockerfile全部内容',
widget=MyBS3TextAreaFieldWidget(rows=10), # 传给widget函数的是外层的field对象,以及widget函数的参数
),
"name": StringField(
_(datamodel.obj.lab('name')),
description='镜像名称全称,例如ubuntu:20.04',
widget=BS3TextFieldWidget(), # 传给widget函数的是外层的field对象,以及widget函数的参数
),
"entrypoint": StringField(
_(datamodel.obj.lab('entrypoint')),
description='镜像的入口命令,直接写成单行字符串,例如python xx.py,无需添加[]',
widget=BS3TextFieldWidget(), # 传给widget函数的是外层的field对象,以及widget函数的参数
)
}
edit_form_extra_fields = add_form_extra_fields
base_filters = [["id", Images_Filter, lambda: []]] # 设置权限过滤器
class Images_ModelView(Images_ModelView_Base,MyappModelView,DeleteMixin):
datamodel = SQLAInterface(Images)
appbuilder.add_view(Images_ModelView,"模板镜像",href="/images_modelview/list/?_flt_2_name=",icon = 'fa-file-image-o',category = '训练')
# 添加api
class Images_ModelView_Api(Images_ModelView_Base,MyappModelRestApi):
datamodel = SQLAInterface(Images)
route_base = '/images_modelview/api'
list_columns = ['images_url', 'repository', 'name', 'describe', 'dockerfile', 'gitpath','modified','creator']
appbuilder.add_api(Images_ModelView_Api)
appbuilder.add_separator("训练") # 在指定菜单栏下面的每个子菜单中间添加一个分割线的显示。
|
2072de8a1a8c18f310685cd3d30ad7ece1e19710
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/slowfast/src/datasets/build.py
|
0ecb1e8e6d65ad47ac775927d055c2b030715230
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,495
|
py
|
build.py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Build dataset"""
import mindspore.dataset as ds
from src.datasets.ava_dataset import Ava
ds.config.set_prefetch_size(8)
ds.config.set_numa_enable(True)
def build_dataset(cfg, split, num_shards=None, shard_id=None, device_target='Ascend'):
"""
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
split (str): the split of the data loader. Options include `train`,
`val`, and `test`.
Returns:
Dataset: a constructed dataset.
"""
assert split in ["train", "test"], 'split must be in train or test'
dataset_generator = Ava(cfg, split)
if split == 'train':
dataset = ds.GeneratorDataset(dataset_generator,
["slowpath", "fastpath", "boxes", "labels", "mask"],
num_parallel_workers=16 if device_target == 'Ascend' else 6,
python_multiprocessing=False,
shuffle=True,
num_shards=num_shards,
shard_id=shard_id)
dataset = dataset.batch(cfg.TRAIN.BATCH_SIZE)
else:
dataset = ds.GeneratorDataset(dataset_generator,
["slowpath", "fastpath", "boxes", "labels", "ori_boxes", "metadata", "mask"],
num_parallel_workers=16 if device_target == 'Ascend' else 6,
python_multiprocessing=False,
shuffle=False)
dataset = dataset.batch(cfg.TEST.BATCH_SIZE)
if dataset.get_dataset_size() == 0:
raise ValueError("dataset size is 0, please check dataset size > 0 and batch_size <= dataset size")
return dataset
|
33d4bf1bbb28f09367d99814295c91684c22d65f
|
e30874b3aa20804833dd11788176f839fcd08690
|
/python/cudf/cudf/_lib/pylibcudf/__init__.py
|
3edff9a53e8c48100e2f5edad1390843eee3de73
|
[
"Apache-2.0"
] |
permissive
|
rapidsai/cudf
|
eaba8948cddde8161c3b02b1b972dab3df8d95b3
|
c51633627ee7087542ad4c315c0e139dea58e408
|
refs/heads/branch-23.10
| 2023-09-04T07:18:27.194295
| 2023-09-03T06:20:33
| 2023-09-03T06:20:33
| 90,506,918
| 5,386
| 751
|
Apache-2.0
| 2023-09-14T00:27:03
| 2017-05-07T03:43:37
|
C++
|
UTF-8
|
Python
| false
| false
| 302
|
py
|
__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
from . import copying
from .column import Column
from .gpumemoryview import gpumemoryview
from .table import Table
from .types import DataType, TypeId
__all__ = [
"Column",
"DataType",
"Table",
"TypeId",
"copying",
"gpumemoryview",
]
|
a0ed0a6453e604ba5f0ace192b29b72fc5841762
|
c058f51b99f91faebf27183b2b579e9f96e0d8f5
|
/botorch/test_functions/multi_fidelity.py
|
97b0f139286dbc6321892929c5525c6ace26e465
|
[
"MIT"
] |
permissive
|
pytorch/botorch
|
255d62f698cc615c750e9343c278a63c7e96a586
|
4cc5ed59b2e8a9c780f786830c548e05cc74d53c
|
refs/heads/main
| 2023-08-22T15:23:51.071048
| 2023-08-22T05:30:38
| 2023-08-22T05:30:38
| 142,940,093
| 2,891
| 373
|
MIT
| 2023-09-13T00:16:13
| 2018-07-30T23:59:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,330
|
py
|
multi_fidelity.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Synthetic functions for multi-fidelity optimization benchmarks.
"""
from __future__ import annotations
import math
from typing import Optional
import torch
from botorch.test_functions.synthetic import SyntheticTestFunction
from torch import Tensor
class AugmentedBranin(SyntheticTestFunction):
r"""Augmented Branin test function for multi-fidelity optimization.
3-dimensional function with domain `[-5, 10] x [0, 15] * [0,1]`, where
the last dimension of is the fidelity parameter:
B(x) = (x_2 - (b - 0.1 * (1 - x_3))x_1^2 + c x_1 - r)^2 +
10 (1-t) cos(x_1) + 10
Here `b`, `c`, `r` and `t` are constants where `b = 5.1 / (4 * math.pi ** 2)`
`c = 5 / math.pi`, `r = 6`, `t = 1 / (8 * math.pi)`.
B has infinitely many minimizers with `x_1 = -pi, pi, 3pi`
and `B_min = 0.397887`
"""
dim = 3
_bounds = [(-5.0, 10.0), (0.0, 15.0), (0.0, 1.0)]
_optimal_value = 0.397887
_optimizers = [ # this is a subset, ther are infinitely many optimizers
(-math.pi, 12.275, 1),
(math.pi, 1.3867356039019576, 0.1),
(math.pi, 1.781519779945532, 0.5),
(math.pi, 2.1763039559891064, 0.9),
]
def evaluate_true(self, X: Tensor) -> Tensor:
t1 = (
X[..., 1]
- (5.1 / (4 * math.pi**2) - 0.1 * (1 - X[:, 2])) * X[:, 0] ** 2
+ 5 / math.pi * X[..., 0]
- 6
)
t2 = 10 * (1 - 1 / (8 * math.pi)) * torch.cos(X[..., 0])
return t1**2 + t2 + 10
class AugmentedHartmann(SyntheticTestFunction):
r"""Augmented Hartmann synthetic test function.
7-dimensional function (typically evaluated on `[0, 1]^7`), where the last
dimension is the fidelity parameter.
H(x) = -(ALPHA_1 - 0.1 * (1-x_7)) * exp(- sum_{j=1}^6 A_1j (x_j - P_1j) ** 2) -
sum_{i=2}^4 ALPHA_i exp( - sum_{j=1}^6 A_ij (x_j - P_ij) ** 2)
H has a unique global minimizer
`x = [0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573, 1.0]`
with `H_min = -3.32237`
"""
dim = 7
_bounds = [(0.0, 1.0) for _ in range(7)]
_optimal_value = -3.32237
_optimizers = [(0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573, 1.0)]
_check_grad_at_opt = False
def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:
r"""
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
"""
super().__init__(noise_std=noise_std, negate=negate)
self.register_buffer("ALPHA", torch.tensor([1.0, 1.2, 3.0, 3.2]))
A = [
[10, 3, 17, 3.5, 1.7, 8],
[0.05, 10, 17, 0.1, 8, 14],
[3, 3.5, 1.7, 10, 17, 8],
[17, 8, 0.05, 10, 0.1, 14],
]
P = [
[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381],
]
self.register_buffer("A", torch.tensor(A, dtype=torch.float))
self.register_buffer("P", torch.tensor(P, dtype=torch.float))
def evaluate_true(self, X: Tensor) -> Tensor:
self.to(device=X.device, dtype=X.dtype)
inner_sum = torch.sum(
self.A * (X[..., :6].unsqueeze(-2) - 0.0001 * self.P) ** 2, dim=-1
)
alpha1 = self.ALPHA[0] - 0.1 * (1 - X[..., 6])
H = (
-(torch.sum(self.ALPHA[1:] * torch.exp(-inner_sum)[..., 1:], dim=-1))
- alpha1 * torch.exp(-inner_sum)[..., 0]
)
return H
class AugmentedRosenbrock(SyntheticTestFunction):
r"""Augmented Rosenbrock synthetic test function for multi-fidelity optimization.
d-dimensional function (usually evaluated on `[-5, 10]^(d-2) * [0, 1]^2`),
where the last two dimensions are the fidelity parameters:
f(x) = sum_{i=1}^{d-1} (100 (x_{i+1} - x_i^2 + 0.1 * (1-x_{d-1}))^2 +
(x_i - 1 + 0.1 * (1 - x_d)^2)^2)
f has one minimizer for its global minimum at `z_1 = (1, 1, ..., 1)` with
`f(z_i) = 0.0`.
"""
_optimal_value = 0.0
def __init__(
self, dim=3, noise_std: Optional[float] = None, negate: bool = False
) -> None:
r"""
Args:
dim: The (input) dimension. Must be at least 3.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
"""
if dim < 3:
raise ValueError(
"AugmentedRosenbrock must be defined it at least 3 dimensions"
)
self.dim = dim
self._bounds = [(-5.0, 10.0) for _ in range(self.dim)]
self._optimizers = [tuple(1.0 for _ in range(self.dim))]
super().__init__(noise_std=noise_std, negate=negate)
def evaluate_true(self, X: Tensor) -> Tensor:
X_curr = X[..., :-3]
X_next = X[..., 1:-2]
t1 = 100 * (X_next - X_curr**2 + 0.1 * (1 - X[..., -2:-1])) ** 2
t2 = (X_curr - 1 + 0.1 * (1 - X[..., -1:]) ** 2) ** 2
return -((t1 + t2).sum(dim=-1))
|
c174f6d58696e77346579dc5de21e564837eb8f4
|
944a49e62bc79622fe01abee62403397a1b0504d
|
/openstackclient/tests/functional/network/v2/test_subnet_pool.py
|
f7cb1d74268ba2cee90a80acc9a54e839078375f
|
[
"Apache-2.0"
] |
permissive
|
openstack/python-openstackclient
|
1c22984f9b29ae8ff9bbea26067981e2130ed039
|
78988d1786c0634ee055714910d1e6187f941673
|
refs/heads/master
| 2023-08-28T15:10:05.542862
| 2023-08-26T12:44:20
| 2023-08-26T12:44:20
| 4,170,310
| 286
| 224
|
Apache-2.0
| 2022-09-19T13:29:49
| 2012-04-28T21:07:25
|
Python
|
UTF-8
|
Python
| false
| false
| 10,486
|
py
|
test_subnet_pool.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import uuid
from openstackclient.tests.functional.network.v2 import common
class SubnetPoolTests(common.NetworkTagTests):
"""Functional tests for subnet pool"""
base_command = 'subnet pool'
def setUp(self):
super(SubnetPoolTests, self).setUp()
# Nothing in this class works with Nova Network
if not self.haz_network:
self.skipTest("No Network service present")
def test_subnet_pool_create_delete(self):
"""Test create, delete"""
name1 = uuid.uuid4().hex
cmd_output, pool_prefix = self._subnet_pool_create("", name1)
self.assertEqual(name1, cmd_output["name"])
self.assertEqual([pool_prefix], cmd_output["prefixes"])
name2 = uuid.uuid4().hex
cmd_output, pool_prefix = self._subnet_pool_create("", name2)
self.assertEqual(name2, cmd_output["name"])
self.assertEqual([pool_prefix], cmd_output["prefixes"])
del_output = self.openstack(
'subnet pool delete ' + name1 + ' ' + name2,
)
self.assertOutput('', del_output)
def test_subnet_pool_list(self):
"""Test create, list filter"""
cmd_output = self.openstack('token issue', parse_output=True)
auth_project_id = cmd_output['project_id']
cmd_output = self.openstack('project list', parse_output=True)
admin_project_id = None
demo_project_id = None
for p in cmd_output:
if p['Name'] == 'admin':
admin_project_id = p['ID']
if p['Name'] == 'demo':
demo_project_id = p['ID']
# Verify assumptions:
# * admin and demo projects are present
# * demo and admin are distinct projects
# * tests run as admin
self.assertIsNotNone(admin_project_id)
self.assertIsNotNone(demo_project_id)
self.assertNotEqual(admin_project_id, demo_project_id)
self.assertEqual(admin_project_id, auth_project_id)
name1 = uuid.uuid4().hex
name2 = uuid.uuid4().hex
cmd_output, pool_prefix = self._subnet_pool_create(
'--project ' + demo_project_id + ' --no-share ',
name1,
)
self.addCleanup(self.openstack, 'subnet pool delete ' + name1)
self.assertEqual(
name1,
cmd_output["name"],
)
self.assertEqual(
False,
cmd_output["shared"],
)
self.assertEqual(
demo_project_id,
cmd_output["project_id"],
)
self.assertEqual(
[pool_prefix],
cmd_output["prefixes"],
)
cmd_output, pool_prefix = self._subnet_pool_create(
' --share ',
name2,
)
self.addCleanup(self.openstack, 'subnet pool delete ' + name2)
self.assertEqual(
name2,
cmd_output["name"],
)
self.assertEqual(
True,
cmd_output["shared"],
)
self.assertEqual(
admin_project_id,
cmd_output["project_id"],
)
self.assertEqual(
[pool_prefix],
cmd_output["prefixes"],
)
# Test list --project
cmd_output = self.openstack(
'subnet pool list ' + '--project ' + demo_project_id,
parse_output=True,
)
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertNotIn(name2, names)
# Test list --share
cmd_output = self.openstack(
'subnet pool list ' + '--share',
parse_output=True,
)
names = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, names)
self.assertIn(name2, names)
# Test list --name
cmd_output = self.openstack(
'subnet pool list ' + '--name ' + name1,
parse_output=True,
)
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertNotIn(name2, names)
# Test list --long
cmd_output = self.openstack(
'subnet pool list ' + '--long ',
parse_output=True,
)
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
def test_subnet_pool_set_show(self):
"""Test create, delete, set, show, unset"""
name = uuid.uuid4().hex
new_name = name + "_"
cmd_output, pool_prefix = self._subnet_pool_create(
'--default-prefix-length 16 '
+ '--min-prefix-length 16 '
+ '--max-prefix-length 32 '
+ '--description aaaa '
+ '--default-quota 10 ',
name,
)
self.addCleanup(
self.openstack,
'subnet pool delete ' + cmd_output['id'],
)
self.assertEqual(
name,
cmd_output["name"],
)
self.assertEqual(
'aaaa',
cmd_output["description"],
)
self.assertEqual(
[pool_prefix],
cmd_output["prefixes"],
)
self.assertEqual(
16,
cmd_output["default_prefixlen"],
)
self.assertEqual(
16,
cmd_output["min_prefixlen"],
)
self.assertEqual(
32,
cmd_output["max_prefixlen"],
)
self.assertEqual(
10,
cmd_output["default_quota"],
)
# Test set
cmd_output = self.openstack(
'subnet pool set '
+ '--name '
+ new_name
+ ' --description bbbb '
+ ' --pool-prefix 10.110.0.0/16 '
+ '--default-prefix-length 8 '
+ '--min-prefix-length 8 '
+ '--max-prefix-length 16 '
+ '--default-quota 20 '
+ name,
)
self.assertOutput('', cmd_output)
cmd_output = self.openstack(
'subnet pool show ' + new_name,
parse_output=True,
)
self.assertEqual(
new_name,
cmd_output["name"],
)
self.assertEqual(
'bbbb',
cmd_output["description"],
)
self.assertEqual(
sorted(["10.110.0.0/16", pool_prefix]),
sorted(cmd_output["prefixes"]),
)
self.assertEqual(
8,
cmd_output["default_prefixlen"],
)
self.assertEqual(
8,
cmd_output["min_prefixlen"],
)
self.assertEqual(
16,
cmd_output["max_prefixlen"],
)
self.assertEqual(
20,
cmd_output["default_quota"],
)
# Test unset
# NOTE(dtroyer): The unset command --default-quota option DOES NOT
# WORK after a default quota has been set once on a
# pool. The error appears to be in a lower layer,
# once that is fixed add a test for subnet pool unset
# --default-quota.
# The unset command of --pool-prefixes also doesn't work
# right now. It would be fixed in a separate patch once
# the lower layer is fixed.
# cmd_output = self.openstack(
# '--debug ' +
# 'subnet pool unset ' +
# ' --pool-prefix 10.110.0.0/16 ' +
# new_name,
# )
# self.assertOutput('', cmd_output)
# self.assertNone(cmd_output["prefixes"])
def _subnet_pool_create(self, cmd, name, is_type_ipv4=True):
"""Make a random subnet pool
:param string cmd:
The options for a subnet pool create command, not including
--pool-prefix and <name>
:param string name:
The name of the subnet pool
:param bool is_type_ipv4:
Creates an IPv4 pool if True, creates an IPv6 pool otherwise
Try random subnet ranges because we can not determine ahead of time
what subnets are already in use, possibly by another test running in
parallel, try 4 times before failing.
"""
for i in range(4):
# Create a random prefix
if is_type_ipv4:
pool_prefix = (
".".join(
map(
str,
(random.randint(0, 223) for _ in range(2)),
)
)
+ ".0.0/16"
)
else:
pool_prefix = (
":".join(
map(
str,
(
hex(random.randint(0, 65535))[2:]
for _ in range(6)
),
)
)
+ ":0:0/96"
)
try:
cmd_output = self.openstack(
'subnet pool create '
+ cmd
+ ' '
+ '--pool-prefix '
+ pool_prefix
+ ' '
+ name,
parse_output=True,
)
except Exception:
if i == 3:
# Raise the exception the last time
raise
pass
else:
# Break and no longer retry if create is successful
break
return cmd_output, pool_prefix
def _create_resource_for_tag_test(self, name, args):
cmd_output, _pool_prefix = self._subnet_pool_create(args, name)
return cmd_output
|
764ab7f7f763f254a9fd703abe87b8fea1fb7c2d
|
4d28185e7a78a569f9a449f39f183cac3024f711
|
/packages/Python/lldbsuite/test/commands/expression/call-throws/TestCallThatThrows.py
|
c6b90ba5ba020e692468f2da4022e511e5ae5b5f
|
[
"NCSA",
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
apple/swift-lldb
|
2789bf44f648609a1674ee520ac20b64c95de072
|
d74be846ef3e62de946df343e8c234bde93a8912
|
refs/heads/stable
| 2023-04-06T00:28:15.882479
| 2019-10-25T22:46:59
| 2019-10-25T22:46:59
| 44,838,862
| 780
| 291
|
Apache-2.0
| 2020-01-10T19:28:43
| 2015-10-23T21:13:18
|
C++
|
UTF-8
|
Python
| false
| false
| 3,778
|
py
|
TestCallThatThrows.py
|
"""
Test calling a function that throws an ObjC exception, make sure that it doesn't propagate the exception.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ExprCommandWithThrowTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
self.main_source = "call-throws.m"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
@skipUnlessDarwin
def test(self):
"""Test calling a function that throws and ObjC exception."""
self.build()
self.call_function()
def check_after_call(self):
# Check that we are back where we were before:
frame = self.thread.GetFrameAtIndex(0)
self.assertTrue(
self.orig_frame_pc == frame.GetPC(),
"Restored the zeroth frame correctly")
def call_function(self):
"""Test calling function that throws."""
(target, process, self.thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
'I am about to throw.', self.main_source_spec)
options = lldb.SBExpressionOptions()
options.SetUnwindOnError(True)
frame = self.thread.GetFrameAtIndex(0)
# Store away the PC to check that the functions unwind to the right
# place after calls
self.orig_frame_pc = frame.GetPC()
value = frame.EvaluateExpression("[my_class callMeIThrow]", options)
self.assertTrue(value.IsValid())
self.assertTrue(value.GetError().Success() == False)
self.check_after_call()
# Okay, now try with a breakpoint in the called code in the case where
# we are ignoring breakpoint hits.
handler_bkpt = target.BreakpointCreateBySourceRegex(
"I felt like it", self.main_source_spec)
self.assertTrue(handler_bkpt.GetNumLocations() > 0)
options.SetIgnoreBreakpoints(True)
options.SetUnwindOnError(True)
value = frame.EvaluateExpression("[my_class callMeIThrow]", options)
self.assertTrue(
value.IsValid() and value.GetError().Success() == False)
self.check_after_call()
# Now set the ObjC language breakpoint and make sure that doesn't
# interfere with the call:
exception_bkpt = target.BreakpointCreateForException(
lldb.eLanguageTypeObjC, False, True)
self.assertTrue(exception_bkpt.GetNumLocations() > 0)
options.SetIgnoreBreakpoints(True)
options.SetUnwindOnError(True)
value = frame.EvaluateExpression("[my_class callMeIThrow]", options)
self.assertTrue(
value.IsValid() and value.GetError().Success() == False)
self.check_after_call()
# Now turn off exception trapping, and call a function that catches the exceptions,
# and make sure the function actually completes, and we get the right
# value:
options.SetTrapExceptions(False)
value = frame.EvaluateExpression("[my_class iCatchMyself]", options)
self.assertTrue(value.IsValid())
self.assertTrue(value.GetError().Success())
self.assertTrue(value.GetValueAsUnsigned() == 57)
self.check_after_call()
options.SetTrapExceptions(True)
# Now set this unwind on error to false, and make sure that we stop
# where the exception was thrown
options.SetUnwindOnError(False)
value = frame.EvaluateExpression("[my_class callMeIThrow]", options)
self.assertTrue(
value.IsValid() and value.GetError().Success() == False)
self.check_after_call()
|
e290fd98c11b1cab20e3f57a5be0b6a0e95d02a3
|
e22eeb5256e17a96a98b3ff25433aec2d641cd2c
|
/openstack/tests/functional/network/v2/test_bgp.py
|
5c169862027c27b2c319fe669504a1298fff92a9
|
[
"Apache-2.0"
] |
permissive
|
openstack/openstacksdk
|
b4b95fd7869653feea5a3b783e9a5c588235c039
|
d474eb84c605c429bb9cccb166cabbdd1654d73c
|
refs/heads/master
| 2023-09-03T22:50:03.398512
| 2023-07-27T14:09:35
| 2023-08-29T16:28:46
| 16,223,378
| 124
| 130
|
Apache-2.0
| 2023-09-06T02:52:47
| 2014-01-25T02:48:00
|
Python
|
UTF-8
|
Python
| false
| false
| 5,346
|
py
|
test_bgp.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network.v2 import bgp_peer as _bgp_peer
from openstack.network.v2 import bgp_speaker as _bgp_speaker
from openstack.tests.functional import base
class TestBGPSpeaker(base.BaseFunctionalTest):
def setUp(self):
super().setUp()
self.LOCAL_AS = 101
self.IP_VERSION = 4
self.REMOTE_AS = 42
self.PEER_IP = '172.200.12.3'
self.SPEAKER_NAME = 'my_speaker' + self.getUniqueString()
self.PEER_NAME = 'my_peer' + self.getUniqueString()
if not self.user_cloud.network.find_extension("bgp"):
self.skipTest("Neutron BGP Dynamic Routing Extension disabled")
bgp_speaker = self.operator_cloud.network.create_bgp_speaker(
ip_version=self.IP_VERSION,
local_as=self.LOCAL_AS,
name=self.SPEAKER_NAME,
)
assert isinstance(bgp_speaker, _bgp_speaker.BgpSpeaker)
self.SPEAKER = bgp_speaker
bgp_peer = self.operator_cloud.network.create_bgp_peer(
name=self.PEER_NAME,
auth_type='none',
remote_as=self.REMOTE_AS,
peer_ip=self.PEER_IP,
)
assert isinstance(bgp_peer, _bgp_peer.BgpPeer)
self.PEER = bgp_peer
def tearDown(self):
sot = self.operator_cloud.network.delete_bgp_peer(self.PEER.id)
self.assertIsNone(sot)
sot = self.operator_cloud.network.delete_bgp_speaker(self.SPEAKER.id)
self.assertIsNone(sot)
super().tearDown()
def test_find_bgp_speaker(self):
sot = self.operator_cloud.network.find_bgp_speaker(self.SPEAKER.name)
self.assertEqual(self.IP_VERSION, sot.ip_version)
self.assertEqual(self.LOCAL_AS, sot.local_as)
# Check defaults
self.assertTrue(sot.advertise_floating_ip_host_routes)
self.assertTrue(sot.advertise_tenant_networks)
def test_get_bgp_speaker(self):
sot = self.operator_cloud.network.get_bgp_speaker(self.SPEAKER.id)
self.assertEqual(self.IP_VERSION, sot.ip_version)
self.assertEqual(self.LOCAL_AS, sot.local_as)
def test_list_bgp_speakers(self):
speaker_ids = [
sp.id for sp in self.operator_cloud.network.bgp_speakers()
]
self.assertIn(self.SPEAKER.id, speaker_ids)
def test_update_bgp_speaker(self):
sot = self.operator_cloud.network.update_bgp_speaker(
self.SPEAKER.id, advertise_floating_ip_host_routes=False
)
self.assertFalse(sot.advertise_floating_ip_host_routes)
def test_find_bgp_peer(self):
sot = self.operator_cloud.network.find_bgp_peer(self.PEER.name)
self.assertEqual(self.PEER_IP, sot.peer_ip)
self.assertEqual(self.REMOTE_AS, sot.remote_as)
def test_get_bgp_peer(self):
sot = self.operator_cloud.network.get_bgp_peer(self.PEER.id)
self.assertEqual(self.PEER_IP, sot.peer_ip)
self.assertEqual(self.REMOTE_AS, sot.remote_as)
def test_list_bgp_peers(self):
peer_ids = [pe.id for pe in self.operator_cloud.network.bgp_peers()]
self.assertIn(self.PEER.id, peer_ids)
def test_update_bgp_peer(self):
name = 'new_peer_name' + self.getUniqueString()
sot = self.operator_cloud.network.update_bgp_peer(
self.PEER.id, name=name
)
self.assertEqual(name, sot.name)
def test_add_remove_peer_to_speaker(self):
self.operator_cloud.network.add_bgp_peer_to_speaker(
self.SPEAKER.id, self.PEER.id
)
sot = self.operator_cloud.network.get_bgp_speaker(self.SPEAKER.id)
self.assertEqual([self.PEER.id], sot.peers)
# Remove the peer
self.operator_cloud.network.remove_bgp_peer_from_speaker(
self.SPEAKER.id, self.PEER.id
)
sot = self.operator_cloud.network.get_bgp_speaker(self.SPEAKER.id)
self.assertEqual([], sot.peers)
def test_add_remove_gw_network_to_speaker(self):
net_name = 'my_network' + self.getUniqueString()
net = self.user_cloud.create_network(name=net_name)
self.operator_cloud.network.add_gateway_network_to_speaker(
self.SPEAKER.id, net.id
)
sot = self.operator_cloud.network.get_bgp_speaker(self.SPEAKER.id)
self.assertEqual([net.id], sot.networks)
# Remove the network
self.operator_cloud.network.remove_gateway_network_from_speaker(
self.SPEAKER.id, net.id
)
sot = self.operator_cloud.network.get_bgp_speaker(self.SPEAKER.id)
self.assertEqual([], sot.networks)
def test_get_advertised_routes_of_speaker(self):
sot = self.operator_cloud.network.get_advertised_routes_of_speaker(
self.SPEAKER.id
)
self.assertEqual({'advertised_routes': []}, sot)
|
d9b9d062c5e64d2e87d0d099394e11f0e20486a1
|
643207c42f07413c4723b49426faa91f3c47ff66
|
/tests/test_api.py
|
ccaf321df635c1e10155b8e092e0a02e4e6aa482
|
[
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] |
permissive
|
sourmash-bio/sourmash
|
74da4910c0bad3e14655be2d92c2ba8534813bfa
|
ba581ea8c6f046b7e19da274155f326d2bdbc8e7
|
refs/heads/latest
| 2023-09-03T13:13:02.374178
| 2023-08-27T15:01:49
| 2023-08-27T15:01:49
| 55,858,065
| 149
| 23
|
NOASSERTION
| 2023-09-13T22:25:54
| 2016-04-09T17:35:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,475
|
py
|
test_api.py
|
import pytest
import sourmash
import sourmash_tst_utils as utils
@utils.in_tempdir
def test_sourmash_signature_api(c):
e = sourmash.MinHash(n=1, ksize=20)
sig = sourmash.SourmashSignature(e)
with open(c.output('xxx.sig'), 'wt') as fp:
sourmash.save_signatures([sig], fp)
sig_x1 = sourmash.load_one_signature(c.output('xxx.sig'))
sig_x2 = list(sourmash.load_file_as_signatures(c.output('xxx.sig')))[0]
assert sig_x1 == sig
assert sig_x2 == sig
@utils.in_tempdir
def test_load_index_0_no_file(c):
with pytest.raises(ValueError) as exc:
idx = sourmash.load_file_as_index(c.output('does-not-exist'))
assert 'Error while reading signatures from ' in str(exc.value)
def test_load_index_1():
testfile = utils.get_test_data('prot/protein.sbt.zip')
idx = sourmash.load_file_as_index(testfile)
sigs = list(idx.signatures())
assert len(sigs) == 2
def test_load_index_2():
testfile = utils.get_test_data('prot/protein.lca.json.gz')
idx = sourmash.load_file_as_index(testfile)
sigs = list(idx.signatures())
assert len(sigs) == 2
def test_load_index_3():
testfile = utils.get_test_data('prot/protein/')
idx = sourmash.load_file_as_index(testfile)
sigs = list(idx.signatures())
assert len(sigs) == 2
def test_load_index_4():
testfile = utils.get_test_data('prot/all.zip')
idx = sourmash.load_file_as_index(testfile)
sigs = list(idx.signatures())
assert len(sigs) == 8
def test_load_index_4_b():
testfile = utils.get_test_data('prot/protein.zip')
idx = sourmash.load_file_as_index(testfile)
sigs = list(idx.signatures())
assert len(sigs) == 2
def test_load_fasta_as_signature():
# try loading a fasta file - should fail with informative exception
testfile = utils.get_test_data('short.fa')
with pytest.raises(Exception) as exc:
idx = sourmash.load_file_as_index(testfile)
print(exc.value)
assert f"Error while reading signatures from '{testfile}' - got sequences instead! Is this a FASTA/FASTQ file?" in str(exc.value)
def test_load_and_search_sbt_api():
treefile = utils.get_test_data('prot/protein.sbt.zip')
queryfile = utils.get_test_data('prot/protein/GCA_001593925.1_ASM159392v1_protein.faa.gz.sig')
tree = sourmash.load_sbt_index(treefile)
query = sourmash.load_one_signature(queryfile)
results = list(sourmash.search_sbt_index(tree, query, 0))
assert len(results) == 2
|
649bc63d25859e304edfe739a0a535964c6b01d3
|
d3ef2463f556d6cd166eb29d3a5f5b210a6402e7
|
/cupy/linalg/_product.py
|
aef7e10cd457b7cbf0d350f49bd04f9395e7278d
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cupy/cupy
|
ce7a010a57504dbfe4fb5af10d354a22e79f4907
|
96105afb78aa3f8380834d2516184b8365e23fcb
|
refs/heads/main
| 2023-08-31T00:36:47.967611
| 2023-08-30T09:19:27
| 2023-08-30T09:19:27
| 72,523,920
| 7,505
| 1,072
|
MIT
| 2023-09-14T01:04:42
| 2016-11-01T09:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 12,853
|
py
|
_product.py
|
import collections.abc
import numbers
import numpy
import cupy
from cupy import _core
from cupy._core import internal
from cupy._core._gufuncs import _GUFunc
from cupy.linalg import _solve
from cupy.linalg import _util
matmul = _GUFunc(
_core.matmul,
'(n?,k),(k,m?)->(n?,m?)',
supports_batched=True,
supports_out=True,
doc="""matmul(x1, x2, /, out=None, \\*\\*kwargs)
Matrix product of two arrays.
Returns the matrix product of two arrays and is the implementation of
the `@` operator introduced in Python 3.5 following PEP465.
The main difference against cupy.dot are the handling of arrays with more
than 2 dimensions. For more information see :func:`numpy.matmul`.
Args:
x1 (cupy.ndarray): The left argument.
x2 (cupy.ndarray): The right argument.
out (cupy.ndarray, optional): Output array.
\\*\\*kwargs: ufunc keyword arguments.
Returns:
cupy.ndarray: Output array.
.. seealso:: :func:`numpy.matmul`
"""
)
def dot(a, b, out=None):
"""Returns a dot product of two arrays.
For arrays with more than one axis, it computes the dot product along the
last axis of ``a`` and the second-to-last axis of ``b``. This is just a
matrix product if the both arrays are 2-D. For 1-D arrays, it uses their
unique axis as an axis to take dot product over.
Args:
a (cupy.ndarray): The left argument.
b (cupy.ndarray): The right argument.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: The dot product of ``a`` and ``b``.
.. seealso:: :func:`numpy.dot`
"""
# TODO(okuta): check type
return a.dot(b, out)
def vdot(a, b):
"""Returns the dot product of two vectors.
The input arrays are flattened into 1-D vectors and then it performs inner
product of these vectors.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
Returns:
cupy.ndarray: Zero-dimensional array of the dot product result.
.. seealso:: :func:`numpy.vdot`
"""
if a.size != b.size:
raise ValueError('Axis dimension mismatch')
if a.dtype.kind == 'c':
a = a.conj()
return _core.tensordot_core(a, b, None, 1, 1, a.size, ())
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""Returns the cross product of two vectors.
The cross product of ``a`` and ``b`` in :math:`R^3` is a vector
perpendicular to both ``a`` and ``b``. If ``a`` and ``b`` are arrays
of vectors, the vectors are defined by the last axis of ``a`` and ``b``
by default, and these axes can have dimensions 2 or 3. Where the
dimension of either ``a`` or ``b`` is 2, the third component of the input
vector is assumed to be zero and the cross product calculated accordingly.
In cases where both input vectors have dimension 2, the z-component of
the cross product is returned.
Args:
a (cupy.ndarray): Components of the first vector(s).
b (cupy.ndarray): Components of the second vector(s).
axisa (int, optional):
Axis of ``a`` that defines the vector(s).
By default, the last axis.
axisb (int, optional):
Axis of ``b`` that defines the vector(s).
By default, the last axis.
axisc (int, optional):
Axis of ``c`` containing the cross product vector(s). Ignored if
both input vectors have dimension 2, as the return is scalar.
By default, the last axis.
axis (int, optional):
If defined, the axis of ``a``, ``b`` and ``c``
that defines the vector(s) and cross product(s).
Overrides ``axisa``, ``axisb`` and ``axisc``.
Returns:
cupy.ndarray :
Vector cross product(s).
.. seealso:: :func:`numpy.cross`
"""
if axis is not None:
axisa, axisb, axisc = (axis,) * 3
a = cupy.asarray(a)
b = cupy.asarray(b)
# Check axisa and axisb are within bounds
axisa = internal._normalize_axis_index(axisa, a.ndim)
axisb = internal._normalize_axis_index(axisb, b.ndim)
# Move working axis to the end of the shape
a = cupy.moveaxis(a, axisa, -1)
b = cupy.moveaxis(b, axisb, -1)
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
msg = ('incompatible dimensions for cross product\n'
'(dimension must be 2 or 3)')
raise ValueError(msg)
# Create the output array
shape = cupy.broadcast(a[..., 0], b[..., 0]).shape
if a.shape[-1] == 3 or b.shape[-1] == 3:
shape += (3,)
# Check axisc is within bounds
axisc = internal._normalize_axis_index(axisc, len(shape))
dtype = cupy.promote_types(a.dtype, b.dtype)
cp = cupy.empty(shape, dtype)
# create local aliases for readability
a0 = a[..., 0]
a1 = a[..., 1]
if a.shape[-1] == 3:
a2 = a[..., 2]
b0 = b[..., 0]
b1 = b[..., 1]
if b.shape[-1] == 3:
b2 = b[..., 2]
if cp.ndim != 0 and cp.shape[-1] == 3:
cp0 = cp[..., 0]
cp1 = cp[..., 1]
cp2 = cp[..., 2]
if a.shape[-1] == 2:
if b.shape[-1] == 2:
# a0 * b1 - a1 * b0
cupy.multiply(a0, b1, out=cp)
cp -= a1 * b0
return cp
else:
assert b.shape[-1] == 3
# cp0 = a1 * b2 - 0 (a2 = 0)
# cp1 = 0 - a0 * b2 (a2 = 0)
# cp2 = a0 * b1 - a1 * b0
cupy.multiply(a1, b2, out=cp0)
cupy.multiply(a0, b2, out=cp1)
cupy.negative(cp1, out=cp1)
cupy.multiply(a0, b1, out=cp2)
cp2 -= a1 * b0
else:
assert a.shape[-1] == 3
if b.shape[-1] == 3:
# cp0 = a1 * b2 - a2 * b1
# cp1 = a2 * b0 - a0 * b2
# cp2 = a0 * b1 - a1 * b0
cupy.multiply(a1, b2, out=cp0)
tmp = a2 * b1
cp0 -= tmp
cupy.multiply(a2, b0, out=cp1)
cupy.multiply(a0, b2, out=tmp)
cp1 -= tmp
cupy.multiply(a0, b1, out=cp2)
cupy.multiply(a1, b0, out=tmp)
cp2 -= tmp
else:
assert b.shape[-1] == 2
# cp0 = 0 - a2 * b1 (b2 = 0)
# cp1 = a2 * b0 - 0 (b2 = 0)
# cp2 = a0 * b1 - a1 * b0
cupy.multiply(a2, b1, out=cp0)
cupy.negative(cp0, out=cp0)
cupy.multiply(a2, b0, out=cp1)
cupy.multiply(a0, b1, out=cp2)
cp2 -= a1 * b0
return cupy.moveaxis(cp, -1, axisc)
def inner(a, b):
"""Returns the inner product of two arrays.
It uses the last axis of each argument to take sum product.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
Returns:
cupy.ndarray: The inner product of ``a`` and ``b``.
.. seealso:: :func:`numpy.inner`
"""
a_ndim = a.ndim
b_ndim = b.ndim
if a_ndim == 0 or b_ndim == 0:
return cupy.multiply(a, b)
a_axis = a_ndim - 1
b_axis = b_ndim - 1
if a.shape[-1] != b.shape[-1]:
raise ValueError('Axis dimension mismatch')
if a_axis:
a = cupy.rollaxis(a, a_axis, 0)
if b_axis:
b = cupy.rollaxis(b, b_axis, 0)
ret_shape = a.shape[1:] + b.shape[1:]
k = a.shape[0]
n = a.size // k
m = b.size // k
return _core.tensordot_core(a, b, None, n, m, k, ret_shape)
def outer(a, b, out=None):
"""Returns the outer product of two vectors.
The input arrays are flattened into 1-D vectors and then it performs outer
product of these vectors.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: 2-D array of the outer product of ``a`` and ``b``.
.. seealso:: :func:`numpy.outer`
"""
return cupy.multiply(a.ravel()[:, None], b.ravel()[None, :], out=out)
def tensordot(a, b, axes=2):
"""Returns the tensor dot product of two arrays along specified axes.
This is equivalent to compute dot product along the specified axes which
are treated as one axis by reshaping.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
axes:
- If it is an integer, then ``axes`` axes at the last of ``a`` and
the first of ``b`` are used.
- If it is a pair of sequences of integers, then these two
sequences specify the list of axes for ``a`` and ``b``. The
corresponding axes are paired for sum-product.
Returns:
cupy.ndarray: The tensor dot product of ``a`` and ``b`` along the
axes specified by ``axes``.
.. seealso:: :func:`numpy.tensordot`
"""
a_ndim = a.ndim
b_ndim = b.ndim
if a_ndim == 0 or b_ndim == 0:
if axes != 0 and axes != ((), ()):
raise ValueError('An input is zero-dim while axes has dimensions')
return cupy.multiply(a, b)
if isinstance(axes, collections.abc.Sequence):
if len(axes) != 2:
raise ValueError('Axes must consist of two arrays.')
a_axes, b_axes = axes
if numpy.isscalar(a_axes):
a_axes = a_axes,
if numpy.isscalar(b_axes):
b_axes = b_axes,
else:
a_axes = tuple(range(a_ndim - axes, a_ndim))
b_axes = tuple(range(axes))
sum_ndim = len(a_axes)
if sum_ndim != len(b_axes):
raise ValueError('Axes length mismatch')
for a_axis, b_axis in zip(a_axes, b_axes):
if a.shape[a_axis] != b.shape[b_axis]:
raise ValueError('Axis dimension mismatch')
# Make the axes non-negative
a = _move_axes_to_head(a, [axis % a_ndim for axis in a_axes])
b = _move_axes_to_head(b, [axis % b_ndim for axis in b_axes])
ret_shape = a.shape[sum_ndim:] + b.shape[sum_ndim:]
k = internal.prod(a.shape[:sum_ndim])
# Avoid division by zero: _core.tensordot_core returns zeros without
# checking n, m consistency, thus allowing 0-length dimensions to work
n = a.size // k if k != 0 else 0
m = b.size // k if k != 0 else 0
return _core.tensordot_core(a, b, None, n, m, k, ret_shape)
# TODO: rename `M` to `a`
def matrix_power(M, n):
"""Raise a square matrix to the (integer) power `n`.
Args:
M (~cupy.ndarray): Matrix to raise by power n.
n (~int): Power to raise matrix to.
Returns:
~cupy.ndarray: Output array.
..seealso:: :func:`numpy.linalg.matrix_power`
"""
_util._assert_cupy_array(M)
_util._assert_stacked_2d(M)
_util._assert_stacked_square(M)
if not isinstance(n, int):
raise TypeError('exponent must be an integer')
if n == 0:
return _util.stacked_identity_like(M)
elif n < 0:
M = _solve.inv(M)
n *= -1
# short-cuts
if n <= 3:
if n == 1:
return M
elif n == 2:
return cupy.matmul(M, M)
else:
return cupy.matmul(cupy.matmul(M, M), M)
# binary decomposition to reduce the number of Matrix
# multiplications for n > 3.
result, Z = None, None
for b in cupy.binary_repr(n)[::-1]:
Z = M if Z is None else cupy.matmul(Z, Z)
if b == '1':
result = Z if result is None else cupy.matmul(result, Z)
return result
def kron(a, b):
"""Returns the kronecker product of two arrays.
Args:
a (~cupy.ndarray): The first argument.
b (~cupy.ndarray): The second argument.
Returns:
~cupy.ndarray: Output array.
.. seealso:: :func:`numpy.kron`
"""
a_isnumber = isinstance(a, numbers.Number)
b_isnumber = isinstance(b, numbers.Number)
if a_isnumber and b_isnumber:
return a * b
if a_isnumber or b_isnumber:
return cupy.multiply(a, b)
a_ndim = a.ndim
b_ndim = b.ndim
if a_ndim == 0 or b_ndim == 0:
return cupy.multiply(a, b)
ndim = b_ndim
a_shape = a.shape
b_shape = b.shape
if a_ndim != b_ndim:
if b_ndim > a_ndim:
a_shape = (1,) * (b_ndim - a_ndim) + a_shape
else:
b_shape = (1,) * (a_ndim - b_ndim) + b_shape
ndim = a_ndim
axis = ndim - 1
out = _core.tensordot_core(
a, b, None, a.size, b.size, 1, a_shape + b_shape)
for _ in range(ndim):
out = _core.concatenate_method(out, axis=axis)
return out
def _move_axes_to_head(a, axes):
# This function moves the axes of ``s`` to the head of the shape.
for idx, axis in enumerate(axes):
if idx != axis:
break
else:
return a
return a.transpose(
axes + [i for i in range(a.ndim) if i not in axes])
|
1a9cdf1bddc05b9d9962a848ed35cc26d30894e4
|
6c29f457a5e787309b344fec53c133845d8985e8
|
/fairscale/nn/pipe/rpc.py
|
f7d7f37b6b5be5379eb3393108e04448aeed4483
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
facebookresearch/fairscale
|
eb378e44cca951e242fb58f82522d9ba8e87d732
|
164cc0f3170b4a3951dd84dda29c3e1504ac4d6e
|
refs/heads/main
| 2023-09-04T12:48:14.924836
| 2023-04-20T03:41:53
| 2023-04-20T03:41:53
| 277,899,703
| 2,553
| 257
|
NOASSERTION
| 2023-08-28T19:02:48
| 2020-07-07T19:02:01
|
Python
|
UTF-8
|
Python
| false
| false
| 10,227
|
py
|
rpc.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from threading import Event, Lock, Thread
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
import torch
from torch import nn
from torch.distributed import ProcessGroup, rpc
from torch.distributed.distributed_c10d import _get_global_rank
from fairscale.nn.model_parallel.initialize import get_pipeline_parallel_group
from .async_pipe import AsyncPipe
from .types import EVENT_LOOP_QUEUE, PipeMessage, TensorOrTensors
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
PipeModel: AsyncPipe
PipeResult: TensorOrTensors
SizeOrSizes = Union[torch.Size, List[torch.Size]]
DtypeOrDtypes = Union[torch.dtype, List[torch.dtype]]
def set_device_based_on_group(group: ProcessGroup) -> None:
# torch.cuda.set_device(group.rank() % torch.cuda.device_count())
torch.cuda.set_device(torch.distributed.get_rank() % torch.cuda.device_count())
def get_shapes(tensor: TensorOrTensors) -> SizeOrSizes:
if isinstance(tensor, torch.Tensor):
return tensor.shape
else:
return [t.shape for t in tensor]
def get_dtype(tensor: TensorOrTensors) -> DtypeOrDtypes:
if isinstance(tensor, torch.Tensor):
return tensor.dtype
else:
return [t.dtype for t in tensor]
def get_global_ranks_from_group(group: ProcessGroup) -> List[int]:
return [_get_global_rank(group, r) for r in range(group.size())]
class PipeBackRedirect(torch.autograd.Function):
@staticmethod
# type: ignore
def forward(ctx, inputs, dest, event, message, transport, futures):
ctx.dest = dest
ctx.event = event
ctx.message = message
ctx.transport = transport
ctx.futures = futures
return inputs
@staticmethod
# type: ignore
def backward(ctx, *grad):
ctx.message.tensors = tuple(grad)
ctx.transport.send_message(ctx.message, sync=False, skip_header=True)
ctx.event.set()
# torch.futures.wait_all(ctx.futures)
return (None, None, None, None, None, None)
def callback_with_model(callback: Callable[[Any, AsyncPipe], None], ctx: Any) -> None:
try:
group = get_pipeline_parallel_group() # FIXME(tom) handle dynamic group
set_device_based_on_group(group)
with PipeModel.lock:
callback(ctx, PipeModel)
except Exception as e:
print(f"callback_with_model got {e}")
class PipeRPCWrapper(nn.Module):
"""A wrapper for Pipe to control the entire pipeline from a single process.
Typical usecase would have rank 0 construct `PipeRPCWrapper` and run the
training loop as normal, and all other ranks would call
`torch.distributed.rpc.shutdown()`
To run code on each worker, e.g. to run the optimizer, use `foreach_worker`
"""
def __init__(self, *args: Any, **kwargs: Any):
super().__init__()
self.group = cast(ProcessGroup, kwargs.get("group")) or get_pipeline_parallel_group()
assert self.group.rank() == 0
self.lock = Lock()
if True:
assert (
self.group == get_pipeline_parallel_group()
), "Can't pickle groups, so group must be `get_pipeline_parallel_group()`"
kwargs["group"] = None
else:
kwargs["group"] = self.group
kwargs["input_device"] = torch.device("cuda", torch.cuda.current_device())
self.model = AsyncPipe(*args, **kwargs)
self.worker_map = kwargs["worker_map"]
self._foreach_worker(self._register_remote_model, args=(args, kwargs))
self.model.cuda()
def _get_rpc_name(self, rank: int) -> str:
return self.worker_map[_get_global_rank(self.group, rank)]
def _foreach_worker(self, callback: Callable, args: Any = None) -> None:
futures = [rpc.rpc_async(self._get_rpc_name(rank), callback, args=args) for rank in range(1, self.group.size())]
futures = [f.wait() for f in futures]
def foreach_worker(
self, callback: Callable[[Any, AsyncPipe], None], ctx: Any = None, *, include_self: bool = False
) -> None:
"""Call `callback` on each worker with the `ctx` and model local to that
worker. e.g.
def register_optimizer(ctx, model):
args, kwargs = ctx
model.optimizer = torch.optim.SGD(model.parameters(), *args, **kwargs)
pipe_model = PipeRPCWrapper( ... )
pipe_model.foreach_worker(
register_optimizer,
([], {"lr" : 0.01, "momentum" : 0.9})
)
"""
self._foreach_worker(callback_with_model, args=(callback, ctx))
if include_self:
with self.model.lock:
callback(ctx, self.model)
def forward(self, tensor: TensorOrTensors) -> TensorOrTensors: # type: ignore
shape = get_shapes(tensor)
dtype = get_dtype(tensor)
if isinstance(tensor, torch.Tensor):
num_tensors = 1
else:
num_tensors = len(tensor)
futures = [
rpc.rpc_async(self._get_rpc_name(rank), self._model_forward, args=(self.model.training, shape, dtype))
for rank in range(1, self.group.size())
]
if self.model.final_stage:
return self.model(tensor)
else:
event = Event()
t = Thread(target=self._model_forward_first_stage, args=(tensor, event))
t.start()
shape, dtype = futures.pop().wait()
dest_rank = self.group.size() - 1
dest = self._get_rpc_name(dest_rank)
dest_global_rank = _get_global_rank(self.group, dest_rank)
src_global_rank = torch.distributed.get_rank()
queue = EVENT_LOOP_QUEUE
activations = PipeMessage(dest_global_rank, src_global_rank, queue_name=queue, tensor_count=num_tensors)
grads = PipeMessage(src_global_rank, dest_global_rank, queue_name=queue, tensor_count=num_tensors)
back_fut = rpc.rpc_async(
dest, self._send_result_and_do_backwards, args=(self.model.training, activations, grads)
)
futures.append(back_fut)
result = self._recv_result(self.model, shape, dtype, activations)
if isinstance(result, torch.Tensor):
result.requires_grad_()
else:
for r in result:
r.requires_grad_()
assert self.model.pipeline
return PipeBackRedirect.apply(
result, dest_global_rank, event, grads, self.model.pipeline.transport, futures
)
@property
def final_stage(self) -> bool:
return self.model.final_stage
@staticmethod
def _recv_result(
model: AsyncPipe, shapes: SizeOrSizes, dtypes: DtypeOrDtypes, message: PipeMessage
) -> TensorOrTensors:
group = get_pipeline_parallel_group()
set_device_based_on_group(group)
assert model.pipeline
transport = model.pipeline.transport
if isinstance(shapes, torch.Size):
message.tensor_shapes = [cast(torch.Size, shapes)]
message.tensor_dtypes = [cast(torch.dtype, dtypes)]
message = transport.recv_message_tensors(message)
return message.tensors[0]
else:
message.tensor_shapes = cast(List[torch.Size], shapes)
message.tensor_dtypes = cast(List[torch.dtype], dtypes)
message = transport.recv_message_tensors(message)
return message.tensors
@staticmethod
def _send_result_and_do_backwards(training: bool, message: PipeMessage, grads_message: PipeMessage) -> None:
group = get_pipeline_parallel_group()
set_device_based_on_group(group)
result = PipeResult
model = PipeModel
if isinstance(result, torch.Tensor):
result = tuple([result])
message.tensors = tuple(result)
assert model.pipeline
transport = model.pipeline.transport
transport.send_message(message, sync=False, skip_header=True)
if training:
grads_message.tensor_shapes = [r.shape for r in result]
grads_message.tensor_dtypes = [r.dtype for r in result]
grads_message = transport.recv_message_tensors(grads_message)
with model.lock:
torch.autograd.backward(result, grads_message.tensors, retain_graph=True)
@staticmethod
def _register_remote_model(args: List[Any], kwargs: Dict[str, Any]) -> None:
group = get_pipeline_parallel_group() # FIXME(tom) handle dynamic group
set_device_based_on_group(group)
kwargs["group"] = group
kwargs["input_device"] = torch.device("cuda", torch.cuda.current_device())
model = AsyncPipe(*args, **kwargs)
model.cuda()
global PipeModel
PipeModel = model
@staticmethod
def _model_forward(
training: bool, shape: torch.Size, dtype: torch.dtype
) -> Optional[Tuple[SizeOrSizes, DtypeOrDtypes]]:
try:
if isinstance(shape, torch.Size):
tensor = torch.empty(shape, dtype=dtype)
else:
tensor = tuple([torch.empty(s, dtype=d) for s, d in zip(shape, dtype)])
model = PipeModel
assert model.group
set_device_based_on_group(model.group)
model.train(training)
result = model(tensor)
if model.final_stage:
global PipeResult
PipeResult = result
return (get_shapes(result), get_dtype(result))
return None
except Exception as e:
print(f"_model_forward got {e}")
raise e
def _model_forward_first_stage(self, tensor: TensorOrTensors, event: Event) -> None:
try:
assert self.model.group
set_device_based_on_group(self.model.group)
self.model(tensor, event=event)
except Exception as e:
print(f"_model_forward got {e}")
raise e
|
320c232b34cf96d3419b9271cc5104007b422ecd
|
eecd5e4c50d8b78a769bcc2675250576bed34066
|
/src/binding/petsc4py/demo/legacy/wrap-swig/run_demo.py
|
24597e33c4725d2f8733b2bf8e9e1851d32edbcb
|
[
"BSD-2-Clause"
] |
permissive
|
petsc/petsc
|
3b1a04fea71858e0292f9fd4d04ea11618c50969
|
9c5460f9064ca60dd71a234a1f6faf93e7a6b0c9
|
refs/heads/main
| 2023-08-17T20:51:16.507070
| 2023-08-17T16:08:06
| 2023-08-17T16:08:06
| 8,691,401
| 341
| 169
|
NOASSERTION
| 2023-03-29T11:02:58
| 2013-03-10T20:55:21
|
C
|
UTF-8
|
Python
| false
| false
| 1,915
|
py
|
run_demo.py
|
import sys, petsc4py
petsc4py.init(sys.argv)
from petsc4py import PETSc
import Bratu3D as Bratu3D
class App(object):
def __init__(self, da, lambda_):
assert da.getDim() == 3
self.da = da
self.params = Bratu3D.Params()
self.params.lambda_ = lambda_
def formInitGuess(self, X):
X.zeroEntries() # just in case
Bratu3D.FormInitGuess(self.da, X, self.params)
def formFunction(self, snes, X, F):
F.zeroEntries() # just in case
Bratu3D.FormFunction(self.da, X, F, self.params)
def formJacobian(self, snes, X, J, P):
P.zeroEntries() # just in case
Bratu3D.FormJacobian(self.da, X, P, self.params)
if J != P: J.assemble() # matrix-free operator
return PETSc.Mat.Structure.SAME_NONZERO_PATTERN
OptDB = PETSc.Options()
N = OptDB.getInt('N', 16)
lambda_ = OptDB.getReal('lambda', 6.0)
do_plot = OptDB.getBool('plot', False)
da = PETSc.DMDA().create([N, N, N], stencil_width=1)
app = App(da, lambda_)
snes = PETSc.SNES().create()
F = da.createGlobalVec()
snes.setFunction(app.formFunction, F)
J = da.createMat()
snes.setJacobian(app.formJacobian, J)
snes.setFromOptions()
X = da.createGlobalVec()
app.formInitGuess(X)
snes.solve(None, X)
U = da.createNaturalVec()
da.globalToNatural(X, U)
def plot(da, U):
scatter, U0 = PETSc.Scatter.toZero(U)
scatter.scatter(U, U0, False, PETSc.Scatter.Mode.FORWARD)
rank = PETSc.COMM_WORLD.getRank()
if rank == 0:
solution = U0[...].reshape(da.sizes, order='f').copy()
try:
from matplotlib import pyplot
pyplot.contourf(solution[:, :, N//2])
pyplot.axis('equal')
pyplot.show()
except:
pass
PETSc.COMM_WORLD.barrier()
scatter.destroy()
U0.destroy()
if do_plot: plot(da, U)
U.destroy()
X.destroy()
F.destroy()
J.destroy()
da.destroy()
snes.destroy()
|
1835c396c92d47d614aba98d97bad902f78733f2
|
5dc77586e3e0f9de1f032fd2ca68494d8e58928f
|
/tests/datasource/test_batch_kwargs.py
|
191821c75bd5189d512248df032920082a05d237
|
[
"Apache-2.0"
] |
permissive
|
great-expectations/great_expectations
|
dd7c22e6277d6b08bee3ff38a015e6e8cd434df6
|
b0290e2fd2aa05aec6d7d8871b91cb4478e9501d
|
refs/heads/develop
| 2023-09-04T09:30:26.395518
| 2023-09-02T00:00:13
| 2023-09-02T00:00:13
| 103,071,520
| 8,931
| 1,535
|
Apache-2.0
| 2023-09-14T19:57:16
| 2017-09-11T00:18:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,286
|
py
|
test_batch_kwargs.py
|
import pytest
from great_expectations.datasource.types import * # noqa: F403
@pytest.mark.unit
def test_batch_kwargs_id():
test_batch_kwargs = PathBatchKwargs({"path": "/data/test.csv"}) # noqa: F405
# When there is only a single "important" key used in batch_kwargs, the ID can prominently include it
assert test_batch_kwargs.to_id() == "path=/data/test.csv"
test_batch_kwargs = PathBatchKwargs( # noqa: F405
{
"path": "/data/test.csv",
"reader_method": "read_csv",
"reader_options": {
"iterator": True,
"chunksize": 2e7,
"parse_dates": [0, 3],
"names": ["start", "type", "quantity", "end"],
},
}
)
# When there are multiple relevant keys we use the hash of the batch_kwargs dictionary
print(test_batch_kwargs.to_id())
assert test_batch_kwargs.to_id() == "8607e071c6383509c8cd8f4c1ea65518"
@pytest.mark.unit
def test_batch_kwargs_attributes_and_keys():
# When BatchKwargs are typed, the required keys should become accessible via dot notation and immutable
test_batch_kwargs = PathBatchKwargs( # noqa: F405
{
"path": "/data/test.csv",
"reader_method": "read_csv",
"reader_options": {
"iterator": True,
"chunksize": 2e7,
"parse_dates": [0, 3],
"names": ["start", "type", "quantity", "end"],
},
}
)
assert test_batch_kwargs.path == "/data/test.csv"
assert test_batch_kwargs["path"] == test_batch_kwargs.path
# We do not allow setting the special attributes this way
with pytest.raises(AttributeError):
test_batch_kwargs.path = "/a/new/path.csv"
# Nor do we provide attribute-style access to unreserved names
with pytest.raises(AttributeError):
assert test_batch_kwargs.names == ["start", "type", "quantity", "end"]
# But we can access and set even protected names using dictionary notation
assert test_batch_kwargs["reader_options"]["names"] == [
"start",
"type",
"quantity",
"end",
]
test_batch_kwargs["path"] = "/a/new/path.csv"
assert test_batch_kwargs.path == "/a/new/path.csv"
|
45c0e4a6c6cb678790b8dd19fd4c5923a22196bd
|
529e713a78e82de2ae5d44cfb8ef209e0894d72a
|
/intro-to-bokeh/code-snippets/linked-selection.py
|
745457a15c9beaf1c7abd97d694b824275ff381a
|
[
"MIT",
"CC-BY-SA-4.0"
] |
permissive
|
realpython/materials
|
cd2f548276be2c82f134ca03eadb1cd279e0f26e
|
d2d62756d3854f54a12a767f2bf9470486c0ceef
|
refs/heads/master
| 2023-09-05T22:12:29.806738
| 2023-08-31T20:56:28
| 2023-08-31T20:56:28
| 132,374,697
| 4,678
| 6,482
|
MIT
| 2023-09-12T22:22:06
| 2018-05-06T20:46:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,745
|
py
|
linked-selection.py
|
# Bokeh Libraries
from bokeh.plotting import figure, show
from bokeh.io import output_file
from bokeh.models import (
ColumnDataSource,
CategoricalColorMapper,
NumeralTickFormatter,
)
from bokeh.layouts import gridplot
# Output inline in the notebook
output_file(
"phi-gm-linked-selections.html", title="76ers Percentages vs. Win-Loss"
)
# Store the data in a ColumnDataSource
gm_stats_cds = ColumnDataSource(phi_gm_stats_2) # noqa
# Create a CategoricalColorMapper that assigns specific
# colors to wins and losses
win_loss_mapper = CategoricalColorMapper(
factors=["W", "L"], palette=["Green", "Red"]
)
# Specify the tools
toolList = ["lasso_select", "tap", "reset", "save"]
# Create a figure relating the percentages
pctFig = figure(
title="2PT FG % vs 3PT FG %, 2017-18 Regular Season",
plot_height=400,
plot_width=400,
tools=toolList,
x_axis_label="2PT FG%",
y_axis_label="3PT FG%",
)
# Draw with circle markers
pctFig.circle(
x="team2P%", y="team3P%", source=gm_stats_cds, size=12, color="black"
)
# Format the y-axis tick labels as percenages
pctFig.xaxis[0].formatter = NumeralTickFormatter(format="00.0%")
pctFig.yaxis[0].formatter = NumeralTickFormatter(format="00.0%")
# Create a figure relating the totals
totFig = figure(
title="Team Points vs Opponent Points, 2017-18 Regular Season",
plot_height=400,
plot_width=400,
tools=toolList,
x_axis_label="Team Points",
y_axis_label="Opponent Points",
)
# Draw with square markers
totFig.square(
x="teamPTS",
y="opptPTS",
source=gm_stats_cds,
size=10,
color=dict(field="winLoss", transform=win_loss_mapper),
)
# Create layout
grid = gridplot([[pctFig, totFig]])
# Visualize
show(grid)
|
4078f71769bc42b43538b168ff02fa2dcceaf286
|
53a83642c01a8828e3d7bd0b18e33c3b694c2b84
|
/Python/GeeksforGeeks/count_of_smaller_elements.py
|
4c5c5e115f8b81dcf6f13fccf56eb7e64ca150ac
|
[] |
no_license
|
anantkaushik/Competitive_Programming
|
1dcd60a28b5b951c23024d6090942be081ad249f
|
6dba38fd7aa4e71b5196d01d64e81f9336d08b13
|
refs/heads/master
| 2022-03-06T15:36:23.797340
| 2022-02-21T12:00:37
| 2022-02-21T12:00:37
| 82,700,948
| 271
| 95
| null | 2020-10-27T17:34:39
| 2017-02-21T16:18:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,049
|
py
|
count_of_smaller_elements.py
|
"""
Given an sorted array of size n. Find number of elements which are less than or equal to given element.
Input:
The first line of input contains an integer T denoting the number of test cases. Then T test cases follow.
Each test case contains an integer n denoting the size of the array. Then the next line contains n space separated integers forming the array.
Output:
Print the number of elements which are less than or equal to given element.
Example:
Input:
2
6
1 2 4 5 8 10
9
7
1 2 2 2 5 7 9
2
Output:
5
4
"""
t = int(input())
def binarysearch(n,nums,target):
first = 0
last = n-1
middle = int((first+last)/2)
while first<= last:
if nums[middle] <= target:
first = middle + 1
else:
last = middle - 1
middle = int((first+last)/2)
if first > last:
return first
while t > 0:
n = int(input())
arr = list(map(int,input().split()))
target = int(input())
print(binarysearch(n,arr,target))
t -= 1
|
969b5ce5b08b0ea77da64eb29bfb1036250a1d0d
|
3dc647cd07a7361ed401e40d2b7cce8c826c8f6c
|
/Lib/sre_parse.py
|
83119168e6376ee83bb41f7b75d2c9a0cd3058d7
|
[
"Python-2.0",
"CC-BY-4.0",
"MIT"
] |
permissive
|
RustPython/RustPython
|
5ddce4a9848b9de8c041ffd2634f83c0105d3f39
|
b864e5da1f18897fc884180b7093df5aa170024f
|
refs/heads/main
| 2023-09-04T12:38:29.458699
| 2023-09-03T12:33:42
| 2023-09-03T12:33:42
| 135,201,145
| 15,815
| 1,302
|
MIT
| 2023-09-14T08:11:45
| 2018-05-28T19:27:01
|
Rust
|
UTF-8
|
Python
| false
| false
| 40,230
|
py
|
sre_parse.py
|
#
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
from sre_constants import *
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = frozenset("0123456789")
OCTDIGITS = frozenset("01234567")
HEXDIGITS = frozenset("0123456789abcdefABCDEF")
ASCIILETTERS = frozenset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
WHITESPACE = frozenset(" \t\n\r\v\f")
_REPEATCODES = frozenset({MIN_REPEAT, MAX_REPEAT})
_UNITCODES = frozenset({ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY})
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"a": SRE_FLAG_ASCII,
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
TYPE_FLAGS = SRE_FLAG_ASCII | SRE_FLAG_LOCALE | SRE_FLAG_UNICODE
GLOBAL_FLAGS = SRE_FLAG_DEBUG | SRE_FLAG_TEMPLATE
class Verbose(Exception):
pass
class State:
# keeps track of state for parsing
def __init__(self):
self.flags = 0
self.groupdict = {}
self.groupwidths = [None] # group 0
self.lookbehindgroups = None
@property
def groups(self):
return len(self.groupwidths)
def opengroup(self, name=None):
gid = self.groups
self.groupwidths.append(None)
if self.groups > MAXGROUPS:
raise error("too many groups")
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error("redefinition of group name %r as group %d; "
"was group %d" % (name, gid, ogid))
self.groupdict[name] = gid
return gid
def closegroup(self, gid, p):
self.groupwidths[gid] = p.getwidth()
def checkgroup(self, gid):
return gid < self.groups and self.groupwidths[gid] is not None
def checklookbehindgroup(self, gid, source):
if self.lookbehindgroups is not None:
if not self.checkgroup(gid):
raise source.error('cannot refer to an open group')
if gid >= self.lookbehindgroups:
raise source.error('cannot refer to group defined in the same '
'lookbehind subpattern')
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, state, data=None):
self.state = state
if data is None:
data = []
self.data = data
self.width = None
def dump(self, level=0):
nl = True
seqtypes = (tuple, list)
for op, av in self.data:
print(level*" " + str(op), end='')
if op is IN:
# member sublanguage
print()
for op, a in av:
print((level+1)*" " + str(op), a)
elif op is BRANCH:
print()
for i, a in enumerate(av[1]):
if i:
print(level*" " + "OR")
a.dump(level+1)
elif op is GROUPREF_EXISTS:
condgroup, item_yes, item_no = av
print('', condgroup)
item_yes.dump(level+1)
if item_no:
print(level*" " + "ELSE")
item_no.dump(level+1)
elif isinstance(av, seqtypes):
nl = False
for a in av:
if isinstance(a, SubPattern):
if not nl:
print()
a.dump(level+1)
nl = True
else:
if not nl:
print(' ', end='')
print(a, end='')
nl = False
if not nl:
print()
else:
print('', av)
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.state, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width is not None:
return self.width
lo = hi = 0
for op, av in self.data:
if op is BRANCH:
i = MAXREPEAT - 1
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[-1].getwidth()
lo = lo + i
hi = hi + j
elif op in _REPEATCODES:
i, j = av[2].getwidth()
lo = lo + i * av[0]
hi = hi + j * av[1]
elif op in _UNITCODES:
lo = lo + 1
hi = hi + 1
elif op is GROUPREF:
i, j = self.state.groupwidths[av]
lo = lo + i
hi = hi + j
elif op is GROUPREF_EXISTS:
i, j = av[1].getwidth()
if av[2] is not None:
l, h = av[2].getwidth()
i = min(i, l)
j = max(j, h)
else:
i = 0
lo = lo + i
hi = hi + j
elif op is SUCCESS:
break
self.width = min(lo, MAXREPEAT - 1), min(hi, MAXREPEAT)
return self.width
class Tokenizer:
def __init__(self, string):
self.istext = isinstance(string, str)
self.string = string
if not self.istext:
string = str(string, 'latin1')
self.decoded_string = string
self.index = 0
self.next = None
self.__next()
def __next(self):
index = self.index
try:
char = self.decoded_string[index]
except IndexError:
self.next = None
return
if char == "\\":
index += 1
try:
char += self.decoded_string[index]
except IndexError:
raise error("bad escape (end of pattern)",
self.string, len(self.string) - 1) from None
self.index = index + 1
self.next = char
def match(self, char):
if char == self.next:
self.__next()
return True
return False
def get(self):
this = self.next
self.__next()
return this
def getwhile(self, n, charset):
result = ''
for _ in range(n):
c = self.next
if c not in charset:
break
result += c
self.__next()
return result
def getuntil(self, terminator, name):
result = ''
while True:
c = self.next
self.__next()
if c is None:
if not result:
raise self.error("missing " + name)
raise self.error("missing %s, unterminated name" % terminator,
len(result))
if c == terminator:
if not result:
raise self.error("missing " + name, 1)
break
result += c
return result
@property
def pos(self):
return self.index - len(self.next or '')
def tell(self):
return self.index - len(self.next or '')
def seek(self, index):
self.index = index
self.__next()
def error(self, msg, offset=0):
return error(msg, self.string, self.tell() - offset)
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] is IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise source.error("incomplete escape %s" % escape, len(escape))
return LITERAL, int(escape[2:], 16)
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise source.error("incomplete escape %s" % escape, len(escape))
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise source.error("incomplete escape %s" % escape, len(escape))
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c == "N" and source.istext:
import unicodedata
# named unicode escape e.g. \N{EM DASH}
if not source.match('{'):
raise source.error("missing {")
charname = source.getuntil('}', 'character name')
try:
c = ord(unicodedata.lookup(charname))
except KeyError:
raise source.error("undefined character name %r" % charname,
len(charname) + len(r'\N{}'))
return LITERAL, c
elif c in OCTDIGITS:
# octal escape (up to three digits)
escape += source.getwhile(2, OCTDIGITS)
c = int(escape[1:], 8)
if c > 0o377:
raise source.error('octal escape value %s outside of '
'range 0-0o377' % escape, len(escape))
return LITERAL, c
elif c in DIGITS:
raise ValueError
if len(escape) == 2:
if c in ASCIILETTERS:
raise source.error('bad escape %s' % escape, len(escape))
return LITERAL, ord(escape[1])
except ValueError:
pass
raise source.error("bad escape %s" % escape, len(escape))
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise source.error("incomplete escape %s" % escape, len(escape))
return LITERAL, int(escape[2:], 16)
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise source.error("incomplete escape %s" % escape, len(escape))
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise source.error("incomplete escape %s" % escape, len(escape))
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c == "N" and source.istext:
import unicodedata
# named unicode escape e.g. \N{EM DASH}
if not source.match('{'):
raise source.error("missing {")
charname = source.getuntil('}', 'character name')
try:
c = ord(unicodedata.lookup(charname))
except KeyError:
raise source.error("undefined character name %r" % charname,
len(charname) + len(r'\N{}'))
return LITERAL, c
elif c == "0":
# octal escape
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8)
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape += source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape += source.get()
c = int(escape[1:], 8)
if c > 0o377:
raise source.error('octal escape value %s outside of '
'range 0-0o377' % escape,
len(escape))
return LITERAL, c
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise source.error("cannot refer to an open group",
len(escape))
state.checklookbehindgroup(group, source)
return GROUPREF, group
raise source.error("invalid group reference %d" % group, len(escape) - 1)
if len(escape) == 2:
if c in ASCIILETTERS:
raise source.error("bad escape %s" % escape, len(escape))
return LITERAL, ord(escape[1])
except ValueError:
pass
raise source.error("bad escape %s" % escape, len(escape))
def _uniq(items):
return list(dict.fromkeys(items))
def _parse_sub(source, state, verbose, nested):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
start = source.tell()
while True:
itemsappend(_parse(source, state, verbose, nested + 1,
not nested and not items))
if not sourcematch("|"):
break
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
# check if all items share a common prefix
while True:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpattern.append(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
set = []
for item in items:
if len(item) != 1:
break
op, av = item[0]
if op is LITERAL:
set.append((op, av))
elif op is IN and av[0][0] is not NEGATE:
set.extend(av)
else:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
subpattern.append((IN, _uniq(set)))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse(source, state, verbose, nested, first=False):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
_ord = ord
while True:
this = source.next
if this is None:
break # end of pattern
if this in "|)":
break # end of subpattern
sourceget()
if verbose:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while True:
this = sourceget()
if this is None or this == "\n":
break
continue
if this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
elif this not in SPECIAL_CHARS:
subpatternappend((LITERAL, _ord(this)))
elif this == "[":
here = source.tell() - 1
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if source.next == '[':
import warnings
warnings.warn(
'Possible nested set at position %d' % source.tell(),
FutureWarning, stacklevel=nested + 6
)
negate = sourcematch("^")
# check remaining characters
while True:
this = sourceget()
if this is None:
raise source.error("unterminated character set",
source.tell() - here)
if this == "]" and set:
break
elif this[0] == "\\":
code1 = _class_escape(source, this)
else:
if set and this in '-&~|' and source.next == this:
import warnings
warnings.warn(
'Possible set %s at position %d' % (
'difference' if this == '-' else
'intersection' if this == '&' else
'symmetric difference' if this == '~' else
'union',
source.tell() - 1),
FutureWarning, stacklevel=nested + 6
)
code1 = LITERAL, _ord(this)
if sourcematch("-"):
# potential range
that = sourceget()
if that is None:
raise source.error("unterminated character set",
source.tell() - here)
if that == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, _ord("-")))
break
if that[0] == "\\":
code2 = _class_escape(source, that)
else:
if that == '-':
import warnings
warnings.warn(
'Possible set difference at position %d' % (
source.tell() - 2),
FutureWarning, stacklevel=nested + 6
)
code2 = LITERAL, _ord(that)
if code1[0] != LITERAL or code2[0] != LITERAL:
msg = "bad character range %s-%s" % (this, that)
raise source.error(msg, len(this) + 1 + len(that))
lo = code1[1]
hi = code2[1]
if hi < lo:
msg = "bad character range %s-%s" % (this, that)
raise source.error(msg, len(this) + 1 + len(that))
setappend((RANGE, (lo, hi)))
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
set = _uniq(set)
# XXX: <fl> should move set optimization to compiler!
if _len(set) == 1 and set[0][0] is LITERAL:
# optimization
if negate:
subpatternappend((NOT_LITERAL, set[0][1]))
else:
subpatternappend(set[0])
else:
if negate:
set.insert(0, (NEGATE, None))
# charmap optimization can't be added here because
# global flags still are not known
subpatternappend((IN, set))
elif this in REPEAT_CHARS:
# repeat previous item
here = source.tell()
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, _ord(this)))
continue
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo += sourceget()
if sourcematch(","):
while source.next in DIGITS:
hi += sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, _ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise source.error("min repeat greater than max repeat",
source.tell() - here)
else:
raise AssertionError("unsupported quantifier %r" % (char,))
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or item[0][0] is AT:
raise source.error("nothing to repeat",
source.tell() - here + len(this))
if item[0][0] in _REPEATCODES:
raise source.error("multiple repeat",
source.tell() - here + len(this))
if item[0][0] is SUBPATTERN:
group, add_flags, del_flags, p = item[0][1]
if group is None and not add_flags and not del_flags:
item = p
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
start = source.tell() - 1
group = True
name = None
add_flags = 0
del_flags = 0
if sourcematch("?"):
# options
char = sourceget()
if char is None:
raise source.error("unexpected end of pattern")
if char == "P":
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = source.getuntil(">", "group name")
if not name.isidentifier():
msg = "bad character in group name %r" % name
raise source.error(msg, len(name) + 1)
elif sourcematch("="):
# named backreference
name = source.getuntil(")", "group name")
if not name.isidentifier():
msg = "bad character in group name %r" % name
raise source.error(msg, len(name) + 1)
gid = state.groupdict.get(name)
if gid is None:
msg = "unknown group name %r" % name
raise source.error(msg, len(name) + 1)
if not state.checkgroup(gid):
raise source.error("cannot refer to an open group",
len(name) + 1)
state.checklookbehindgroup(gid, source)
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise source.error("unexpected end of pattern")
raise source.error("unknown extension ?P" + char,
len(char) + 2)
elif char == ":":
# non-capturing group
group = None
elif char == "#":
# comment
while True:
if source.next is None:
raise source.error("missing ), unterminated comment",
source.tell() - start)
if sourceget() == ")":
break
continue
elif char in "=!<":
# lookahead assertions
dir = 1
if char == "<":
char = sourceget()
if char is None:
raise source.error("unexpected end of pattern")
if char not in "=!":
raise source.error("unknown extension ?<" + char,
len(char) + 2)
dir = -1 # lookbehind
lookbehindgroups = state.lookbehindgroups
if lookbehindgroups is None:
state.lookbehindgroups = state.groups
p = _parse_sub(source, state, verbose, nested + 1)
if dir < 0:
if lookbehindgroups is None:
state.lookbehindgroups = None
if not sourcematch(")"):
raise source.error("missing ), unterminated subpattern",
source.tell() - start)
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif char == "(":
# conditional backreference group
condname = source.getuntil(")", "group name")
if condname.isidentifier():
condgroup = state.groupdict.get(condname)
if condgroup is None:
msg = "unknown group name %r" % condname
raise source.error(msg, len(condname) + 1)
else:
try:
condgroup = int(condname)
if condgroup < 0:
raise ValueError
except ValueError:
msg = "bad character in group name %r" % condname
raise source.error(msg, len(condname) + 1) from None
if not condgroup:
raise source.error("bad group number",
len(condname) + 1)
if condgroup >= MAXGROUPS:
msg = "invalid group reference %d" % condgroup
raise source.error(msg, len(condname) + 1)
state.checklookbehindgroup(condgroup, source)
item_yes = _parse(source, state, verbose, nested + 1)
if source.match("|"):
item_no = _parse(source, state, verbose, nested + 1)
if source.next == "|":
raise source.error("conditional backref with more than two branches")
else:
item_no = None
if not source.match(")"):
raise source.error("missing ), unterminated subpattern",
source.tell() - start)
subpatternappend((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
continue
elif char in FLAGS or char == "-":
# flags
flags = _parse_flags(source, state, char)
if flags is None: # global flags
if not first or subpattern:
import warnings
warnings.warn(
'Flags not at the start of the expression %r%s' % (
source.string[:20], # truncate long regexes
' (truncated)' if len(source.string) > 20 else '',
),
DeprecationWarning, stacklevel=nested + 6
)
if (state.flags & SRE_FLAG_VERBOSE) and not verbose:
raise Verbose
continue
add_flags, del_flags = flags
group = None
else:
raise source.error("unknown extension ?" + char,
len(char) + 1)
# parse group contents
if group is not None:
try:
group = state.opengroup(name)
except error as err:
raise source.error(err.msg, len(name) + 1) from None
sub_verbose = ((verbose or (add_flags & SRE_FLAG_VERBOSE)) and
not (del_flags & SRE_FLAG_VERBOSE))
p = _parse_sub(source, state, sub_verbose, nested + 1)
if not source.match(")"):
raise source.error("missing ), unterminated subpattern",
source.tell() - start)
if group is not None:
state.closegroup(group, p)
subpatternappend((SUBPATTERN, (group, add_flags, del_flags, p)))
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpatternappend((AT, AT_END))
else:
raise AssertionError("unsupported special character %r" % (char,))
# unpack non-capturing groups
for i in range(len(subpattern))[::-1]:
op, av = subpattern[i]
if op is SUBPATTERN:
group, add_flags, del_flags, p = av
if group is None and not add_flags and not del_flags:
subpattern[i: i+1] = p
return subpattern
def _parse_flags(source, state, char):
sourceget = source.get
add_flags = 0
del_flags = 0
if char != "-":
while True:
flag = FLAGS[char]
if source.istext:
if char == 'L':
msg = "bad inline flags: cannot use 'L' flag with a str pattern"
raise source.error(msg)
else:
if char == 'u':
msg = "bad inline flags: cannot use 'u' flag with a bytes pattern"
raise source.error(msg)
add_flags |= flag
if (flag & TYPE_FLAGS) and (add_flags & TYPE_FLAGS) != flag:
msg = "bad inline flags: flags 'a', 'u' and 'L' are incompatible"
raise source.error(msg)
char = sourceget()
if char is None:
raise source.error("missing -, : or )")
if char in ")-:":
break
if char not in FLAGS:
msg = "unknown flag" if char.isalpha() else "missing -, : or )"
raise source.error(msg, len(char))
if char == ")":
state.flags |= add_flags
return None
if add_flags & GLOBAL_FLAGS:
raise source.error("bad inline flags: cannot turn on global flag", 1)
if char == "-":
char = sourceget()
if char is None:
raise source.error("missing flag")
if char not in FLAGS:
msg = "unknown flag" if char.isalpha() else "missing flag"
raise source.error(msg, len(char))
while True:
flag = FLAGS[char]
if flag & TYPE_FLAGS:
msg = "bad inline flags: cannot turn off flags 'a', 'u' and 'L'"
raise source.error(msg)
del_flags |= flag
char = sourceget()
if char is None:
raise source.error("missing :")
if char == ":":
break
if char not in FLAGS:
msg = "unknown flag" if char.isalpha() else "missing :"
raise source.error(msg, len(char))
assert char == ":"
if del_flags & GLOBAL_FLAGS:
raise source.error("bad inline flags: cannot turn off global flag", 1)
if add_flags & del_flags:
raise source.error("bad inline flags: flag turned on and off", 1)
return add_flags, del_flags
def fix_flags(src, flags):
# Check and fix flags according to the type of pattern (str or bytes)
if isinstance(src, str):
if flags & SRE_FLAG_LOCALE:
raise ValueError("cannot use LOCALE flag with a str pattern")
if not flags & SRE_FLAG_ASCII:
flags |= SRE_FLAG_UNICODE
elif flags & SRE_FLAG_UNICODE:
raise ValueError("ASCII and UNICODE flags are incompatible")
else:
if flags & SRE_FLAG_UNICODE:
raise ValueError("cannot use UNICODE flag with a bytes pattern")
if flags & SRE_FLAG_LOCALE and flags & SRE_FLAG_ASCII:
raise ValueError("ASCII and LOCALE flags are incompatible")
return flags
def parse(str, flags=0, state=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if state is None:
state = State()
state.flags = flags
state.str = str
try:
p = _parse_sub(source, state, flags & SRE_FLAG_VERBOSE, 0)
except Verbose:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
state = State()
state.flags = flags | SRE_FLAG_VERBOSE
state.str = str
source.seek(0)
p = _parse_sub(source, state, True, 0)
p.state.flags = fix_flags(str, p.state.flags)
if source.next is not None:
assert source.next == ")"
raise source.error("unbalanced parenthesis")
if flags & SRE_FLAG_DEBUG:
p.dump()
return p
def parse_template(source, state):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
groups = []
literals = []
literal = []
lappend = literal.append
def addgroup(index, pos):
if index > state.groups:
raise s.error("invalid group reference %d" % index, pos)
if literal:
literals.append(''.join(literal))
del literal[:]
groups.append((len(literals), index))
literals.append(None)
groupindex = state.groupindex
while True:
this = sget()
if this is None:
break # end of replacement string
if this[0] == "\\":
# group
c = this[1]
if c == "g":
name = ""
if not s.match("<"):
raise s.error("missing <")
name = s.getuntil(">", "group name")
if name.isidentifier():
try:
index = groupindex[name]
except KeyError:
raise IndexError("unknown group name %r" % name)
else:
try:
index = int(name)
if index < 0:
raise ValueError
except ValueError:
raise s.error("bad character in group name %r" % name,
len(name) + 1) from None
if index >= MAXGROUPS:
raise s.error("invalid group reference %d" % index,
len(name) + 1)
addgroup(index, len(name) + 1)
elif c == "0":
if s.next in OCTDIGITS:
this += sget()
if s.next in OCTDIGITS:
this += sget()
lappend(chr(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this += sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this += sget()
isoctal = True
c = int(this[1:], 8)
if c > 0o377:
raise s.error('octal escape value %s outside of '
'range 0-0o377' % this, len(this))
lappend(chr(c))
if not isoctal:
addgroup(int(this[1:]), len(this) - 1)
else:
try:
this = chr(ESCAPES[this][1])
except KeyError:
if c in ASCIILETTERS:
raise s.error('bad escape %s' % this, len(this))
lappend(this)
else:
lappend(this)
if literal:
literals.append(''.join(literal))
if not isinstance(source, str):
# The tokenizer implicitly decodes bytes objects as latin-1, we must
# therefore re-encode the final representation.
literals = [None if s is None else s.encode('latin-1') for s in literals]
return groups, literals
def expand_template(template, match):
g = match.group
empty = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = g(group) or empty
except IndexError:
raise error("invalid group reference %d" % index)
return empty.join(literals)
|
b4d47b18393f1f5cc91b55b5d14f67fd1b370803
|
aa2dd0720ac3cf261c7e2d2cdf3d88dee68360d5
|
/kinto/plugins/openid/__init__.py
|
40806616ee4fdc0fe0a59233bcbdf13f5dd1cdfc
|
[
"Apache-2.0"
] |
permissive
|
Kinto/kinto
|
3025e269a5f2ecc8077fd44fbb1e6c38ae6a4a8b
|
6edf6453033e0106410fe1f8c70323b6fea2f2fe
|
refs/heads/master
| 2023-08-31T13:36:10.987472
| 2023-08-22T09:37:52
| 2023-08-22T09:37:52
| 31,315,021
| 4,764
| 575
|
NOASSERTION
| 2023-09-13T14:41:08
| 2015-02-25T13:34:23
|
Python
|
UTF-8
|
Python
| false
| false
| 4,863
|
py
|
__init__.py
|
import requests
from pyramid import authentication as base_auth
from pyramid.interfaces import IAuthenticationPolicy
from pyramid.settings import aslist
from zope.interface import implementer
from kinto.core import logger
from kinto.core import utils as core_utils
from kinto.core.openapi import OpenAPI
from .utils import fetch_openid_config
@implementer(IAuthenticationPolicy)
class OpenIDConnectPolicy(base_auth.CallbackAuthenticationPolicy):
def __init__(self, issuer, client_id, realm="Realm", **kwargs):
self.realm = realm
self.issuer = issuer
self.client_id = client_id
self.client_secret = kwargs.get("client_secret", "")
self.header_type = kwargs.get("header_type", "Bearer")
self.userid_field = kwargs.get("userid_field", "sub")
self.verification_ttl = int(kwargs.get("verification_ttl_seconds", 86400))
# Fetch OpenID config (at instantiation, ie. startup)
self.oid_config = fetch_openid_config(issuer)
self._jwt_keys = None
def unauthenticated_userid(self, request):
"""Return the userid or ``None`` if token could not be verified."""
settings = request.registry.settings
hmac_secret = settings["userid_hmac_secret"]
authorization = request.headers.get("Authorization", "")
try:
authmeth, access_token = authorization.split(" ", 1)
except ValueError:
return None
if authmeth.lower() != self.header_type.lower():
return None
# XXX JWT Access token
# https://auth0.com/docs/tokens/access-token#access-token-format
# Check cache if these tokens were already verified.
hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token)
cache_key = f"openid:verify:{hmac_tokens}"
payload = request.registry.cache.get(cache_key)
if payload is None:
# This can take some time.
payload = self._verify_token(access_token)
if payload is None:
return None
# Save for next time / refresh ttl.
request.registry.cache.set(cache_key, payload, ttl=self.verification_ttl)
request.bound_data["user_profile"] = payload
# Extract meaningful field from userinfo (eg. email or sub)
return payload.get(self.userid_field)
def forget(self, request):
"""A no-op. Credentials are sent on every request.
Return WWW-Authenticate Realm header for Bearer token.
"""
return [("WWW-Authenticate", '%s realm="%s"' % (self.header_type, self.realm))]
def _verify_token(self, access_token):
uri = self.oid_config["userinfo_endpoint"]
# Opaque access token string. Fetch user info from profile.
try:
resp = requests.get(uri, headers={"Authorization": "Bearer " + access_token})
resp.raise_for_status()
userprofile = resp.json()
return userprofile
except (requests.exceptions.HTTPError, ValueError, KeyError) as e:
logger.debug("Unable to fetch user profile from %s (%s)" % (uri, e))
return None
def get_user_profile(request):
return request.bound_data.get("user_profile", {})
def includeme(config):
# Activate end-points.
config.scan("kinto.plugins.openid.views")
settings = config.get_settings()
openid_policies = []
for policy in aslist(settings["multiauth.policies"]):
v = settings.get("multiauth.policy.%s.use" % policy, "")
if v.endswith("OpenIDConnectPolicy"):
openid_policies.append(policy)
if len(openid_policies) == 0:
# Do not add the capability if no policy is configured.
return
providers_infos = []
for name in openid_policies:
issuer = settings["multiauth.policy.%s.issuer" % name]
openid_config = fetch_openid_config(issuer)
client_id = settings["multiauth.policy.%s.client_id" % name]
header_type = settings.get("multiauth.policy.%s.header_type", "Bearer")
providers_infos.append(
{
"name": name,
"issuer": openid_config["issuer"],
"auth_path": "/openid/%s/login" % name,
"client_id": client_id,
"header_type": header_type,
"userinfo_endpoint": openid_config["userinfo_endpoint"],
}
)
OpenAPI.expose_authentication_method(
name, {"type": "oauth2", "authorizationUrl": openid_config["authorization_endpoint"]}
)
config.add_api_capability(
"openid",
description="OpenID connect support.",
url="http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html",
providers=providers_infos,
)
config.add_request_method(get_user_profile, name="get_user_profile")
|
9e8179c7c0b35029fcdde1d00e84972cdd5cdc18
|
9138a287b2553f75566cad948463b04bb6f4ca14
|
/phone_numbers.py
|
de0c83010c43b6ae70ed7cf988e3732108fe095c
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] |
permissive
|
Charcoal-SE/SmokeDetector
|
606c3dbfb92564b22b141d9caab42ccec7eacf4d
|
83b0ea59b2e90b4dc316719b097c1f374aba1d06
|
refs/heads/master
| 2023-09-04T02:29:08.555872
| 2023-09-03T21:39:11
| 2023-09-03T21:39:11
| 16,389,919
| 510
| 343
|
Apache-2.0
| 2023-09-14T21:02:08
| 2014-01-30T20:29:37
|
Python
|
UTF-8
|
Python
| false
| false
| 21,242
|
py
|
phone_numbers.py
|
# coding=utf-8
import regex
import number_homoglyphs
from helpers import get_only_digits, remove_end_regex_comments
# The NUMBER_REGEXes are used to obtain strings within a post which are considered to be a single "number". While
# it would be nice to be able to just use a single regular expression like:
# r'(?:[(+{[]{1,2}\d|\d(?<=[^\d(+{[]\d|^\d))[\W_]*+(?:\d[\W_]*+){7,18}\d(?=\D|$)'
# Doing so won't get us all the possible matches of different lengths which start from the same character, even
# when using the regex package's overlapped=True option. In order to get all different possible lengths,
# we use multiple regular expressions, with each specifying an explicit length within the range in which we're
# interested and then combine the results.
# In order to make it more efficient, we combine those into a single regular expression using lookaheads and
# capture groups, which will put all of the different possibilites into capture groups, along with empty strings
# for each length which didn't match.
# The use of separate Unicode and ASCII flagged versions of the regexes is also because they can result in different
# start and end points for the numbers. We continue to keep that separation for the NUMBER_REGEX,
# NUMBER_REGEX_START, and NUMBER_REGEX_END in order to not have a separate source for a combined regex. This
# does result in our CI testing being a bit slower, but is a trade-off for not using two separate regexes, which
# would reduce maintainability.
# The minimum number of digits to be considered a "number":
NUMBER_REGEX_MINIMUM_DIGITS = 7
# The maximum number of digits to be considered a "number":
NUMBER_REGEX_MAXIMUM_DIGITS = 20
NUMBER_REGEX_RANGE_LOW = NUMBER_REGEX_MINIMUM_DIGITS - 2
NUMBER_REGEX_RANGE_HIGH = NUMBER_REGEX_MAXIMUM_DIGITS - 2
VALID_NON_DIGIT_START_CHARACTERS = r'(+{['
NUMBER_REGEX_START_TEXT = r'(?:[' + VALID_NON_DIGIT_START_CHARACTERS + \
r']{1,2}\d|\d(?<=[^\d' + VALID_NON_DIGIT_START_CHARACTERS + \
r']\d|^\d))(?:[\W_]*+|\D(?:(?=\d)|(?<=\d\D)))'
NUMBER_REGEX_MIDDLE_TEXT = r'(?:\d(?:[\W_]*+|\D(?:(?=\d)|(?<=\d\D)))){{{}}}'
NUMBER_REGEX_END_TEXT = r'\d(?=\D|$)'
def get_number_regex_with_quantfier(quantifier):
return NUMBER_REGEX_START_TEXT + NUMBER_REGEX_MIDDLE_TEXT.format(quantifier) + NUMBER_REGEX_END_TEXT
NUMBER_REGEX_RANGE_TEXT = "{},{}".format(NUMBER_REGEX_RANGE_LOW, NUMBER_REGEX_RANGE_HIGH)
NUMBER_REGEXTEXT_WITH_RANGE = get_number_regex_with_quantfier(NUMBER_REGEX_RANGE_TEXT)
# Starting the regex with a pattern for the entire range limits the rest of the overall regex to only being tested
# on characters where there's going to be a match.
NUMBER_REGEX_TEXT = r'(?={})'.format(NUMBER_REGEXTEXT_WITH_RANGE)
for number_regex_length in range(NUMBER_REGEX_RANGE_LOW, NUMBER_REGEX_RANGE_HIGH):
# These lookaheads all have an empty pattern as a second option. This causes all of them to
# always match, which results in the capture group having the capture and not causing evaluation
# of the regex to stop.
NUMBER_REGEX_TEXT += r'(?=({})|)'.format(get_number_regex_with_quantfier(number_regex_length))
# The NUMBER_REGEX is to verify that a pattern with be able to make an exact match to text strings which are
# selected by the NUMBER_REGEXes. It should be used as a test to verify patterns for number watches and
# blacklists.
NUMBER_REGEX = {
'unicode': regex.compile(NUMBER_REGEX_TEXT, flags=regex.UNICODE),
'ascii': regex.compile(NUMBER_REGEX_TEXT, flags=regex.ASCII)
}
NUMBER_REGEX_START = {
'unicode': regex.compile(r'^' + NUMBER_REGEX_START_TEXT, flags=regex.UNICODE),
'ascii': regex.compile(r'^' + NUMBER_REGEX_START_TEXT, flags=regex.ASCII)
}
NUMBER_REGEX_END = {
'unicode': regex.compile(NUMBER_REGEX_END_TEXT + r'$', flags=regex.UNICODE),
'ascii': regex.compile(NUMBER_REGEX_END_TEXT + r'$', flags=regex.ASCII)
}
def matches_regex_ascii_or_unicode(regex_dict, pattern):
return regex_dict['ascii'].search(pattern) or regex_dict['unicode'].search(pattern)
def matches_number_regex(pattern):
return matches_regex_ascii_or_unicode(NUMBER_REGEX, pattern)
def matches_number_regex_start(pattern):
return matches_regex_ascii_or_unicode(NUMBER_REGEX_START, pattern)
def matches_number_regex_end(pattern):
return matches_regex_ascii_or_unicode(NUMBER_REGEX_END, pattern)
def is_digit_count_in_number_regex_range(digit_count):
return digit_count > NUMBER_REGEX_MINIMUM_DIGITS and digit_count < NUMBER_REGEX_MAXIMUM_DIGITS
def normalize(number):
return get_only_digits(number)
def normalize_set(numbers):
return {normalize(num) for num in numbers}
def normalize_list_only_changed(numbers):
# We want all which were changed by normalization, even if that results
# in re-introducing something that was excluded.
# Example: original: ['12a34', '1234']
# ^want ^don't want
normalized_list = []
for num in numbers:
normalized = normalize(num)
if normalized != num:
normalized_list.append(normalized)
return normalized_list
def normalize_list(numbers):
return [normalize(num) for num in numbers]
def get_candidate_set_with_start_characters(candidate):
result = set()
base = regex.sub(r'^[' + VALID_NON_DIGIT_START_CHARACTERS + r']+', '', candidate)
result.add(base)
for first in VALID_NON_DIGIT_START_CHARACTERS:
result.add(first + base)
for second in VALID_NON_DIGIT_START_CHARACTERS:
result.add(first + second + base)
return result
def get_all_candidates(text):
"""
Get unprocessed number candidates, normalized entries which are differenet from their unprocessed source,
and the normalized candidates which are newly generated as a result of deobfuscation.
"""
unprocessed_list, normalized_list = get_candidates(text, True)
raw_deobfuscated_list = get_deobfuscated_candidates(text)
# The raw_deobfuscated list should contain everything that's in the unprocessed list.
# We don't want to be considering any which are the identical entries as are in the unprocessed
# list. However, it's possible that an additional identical entry was created through deobfuscation.
# So, if there are 2 copies of a number on the unprocessed_list and 3 of that number on the
# raw_deobfuscated_list, then we want to end up with 1 of that number on the deobfuscated_list.
for unprocessed in unprocessed_list:
try:
raw_deobfuscated_list.remove(unprocessed)
except ValueError:
pass
# We only ever deal with the deobfuscated numbers in normalized format. Unlike the normalized list,
# we want all of them, even if unchanged.
deobfuscated_list = normalize_list(raw_deobfuscated_list)
return set(unprocessed_list), set(normalized_list), set(deobfuscated_list)
def get_candidates(text, also_normalized=False):
"""
:param test: Text from which to extract number candidates
:param also_normalized: Also return the normalized list
:return: canidate_list or candidate_list, normalized_list
"""
# The differences between this implementation and the original get_candidates(), which was based on a
# regex implementation, are:
# 1. This doesn't have the same potential for catistrophic CPU usage based on input text.
# 2. When the first character in the candidate is not a digit, this returns only one candidate.
# For example "+(123..." will return ["+(123..."]. The regex version returns two candidates, but not
# the version without the non-digit start characters (i.e. it returns ["+(123...", "(123..."]).
# The characters other than digits which are valid at the start are in VALID_NON_DIGIT_START_CHARACTERS.
# The intent at that time was to generate more verbatim matches, but it's better to just have the one
# result. In the meantime, normalized matching has been improved and more emphasis placed on it.
# 3. The regex version routinely returned duplicate entries. This implementation only returns duplicate
# entries if there are duplicates in the input text.
candidates = []
candidates_normalized = []
in_process_normalized = []
in_process = []
in_process_digit_counts = []
non_digits = ''
prev_non_digit = ''
prev_prev_non_digit = ''
digits = ''
# alpha_count is, primarily, the number of alpha characters encountered since the last digit. However, it's
# also used as a flag, by setting alpha_count = max_alpha + 1, to indicate that some other criteria has
# been reached which should cause the same behavior.
# Specifically, it's used for when len_digits > NUMBER_REGEX_MAXIMUM_DIGITS or when
# len(non_digits) > max_non_digits.
alpha_count = 0
max_alpha = 1
# max_non_digits is moderately high, but is intended to account for potential zalgo text, and/or
# combining characters, which would leave the number still readable by humans.
max_non_digits = 50
def promote_any_in_process_with_appropriate_digit_count():
for index in range(len(in_process)):
cur_count = in_process_digit_counts[index]
if cur_count >= NUMBER_REGEX_MINIMUM_DIGITS and cur_count <= NUMBER_REGEX_MAXIMUM_DIGITS:
candidates.append(in_process[index])
if in_process_normalized[index][0] != 'z':
# The 'z' at the start is used as a flag that this isn't a valid normalized entry.
candidates_normalized.append(in_process_normalized[index])
def evict_any_in_process_with_too_many_digits():
for index in reversed(range(len(in_process))):
if in_process_digit_counts[index] > NUMBER_REGEX_MAXIMUM_DIGITS:
del in_process[index]
del in_process_normalized[index]
del in_process_digit_counts[index]
def clear_in_process_if_more_than_limit_alpha():
nonlocal in_process
nonlocal in_process_normalized
nonlocal in_process_digit_counts
if in_process and alpha_count > max_alpha:
# No sequences continue passed limit alpha characters
in_process_normalized = []
in_process = []
in_process_digit_counts = []
def if_digits_add_digits_to_all_in_process_and_promote():
nonlocal in_process
nonlocal in_process_normalized
nonlocal in_process_digit_counts
nonlocal digits
nonlocal alpha_count
nonlocal prev_non_digit
nonlocal prev_prev_non_digit
if digits:
len_digits = len(digits)
if len_digits > NUMBER_REGEX_MAXIMUM_DIGITS:
# Too many digits. No need to try adding them, nor remembering the next alpha chars
alpha_count = max_alpha + 1
clear_in_process_if_more_than_limit_alpha()
else:
in_process = [to_add + digits for to_add in in_process]
in_process_normalized = [to_add + digits for to_add in in_process_normalized]
in_process_digit_counts = [to_add + len_digits for to_add in in_process_digit_counts]
# The original regex was written so that if a sequence started with '+(123...', then
# both '+(123...' and '(123...' ended up as candidates.
if prev_non_digit in VALID_NON_DIGIT_START_CHARACTERS:
if prev_prev_non_digit in VALID_NON_DIGIT_START_CHARACTERS:
in_process.append(prev_prev_non_digit + prev_non_digit + digits)
else:
in_process.append(prev_non_digit + digits)
else:
in_process.append(digits)
in_process_normalized.append(digits)
in_process_digit_counts.append(len_digits)
promote_any_in_process_with_appropriate_digit_count()
evict_any_in_process_with_too_many_digits()
digits = ''
prev_non_digit = ''
prev_prev_non_digit = ''
for char in text:
if char >= '0' and char <= '9':
# It's a digit
digits += char
alpha_count = 0
if non_digits:
in_process = [to_add + non_digits for to_add in in_process]
non_digits = ''
else:
# Not a digit
if_digits_add_digits_to_all_in_process_and_promote()
prev_prev_non_digit = prev_non_digit
prev_non_digit = char
if (char >= 'A' and char <= 'Z') or (char >= 'a' and char <= 'z'):
alpha_count += 1
clear_in_process_if_more_than_limit_alpha()
if alpha_count > max_alpha:
non_digits = ''
else:
non_digits += char
if len(non_digits) > max_non_digits:
alpha_count = max_alpha + 1 # Secondary use is as a flag that all in_process should end.
clear_in_process_if_more_than_limit_alpha()
non_digits = ''
if_digits_add_digits_to_all_in_process_and_promote()
# We can look at returning the normalized in a bit
if also_normalized:
return candidates, candidates_normalized
return candidates
def get_normalized_candidates(text):
return normalize_set(get_candidates(text))
def get_normalized_deobfuscated_candidates(text):
return normalize_set(get_candidates(number_homoglyphs.normalize(text)))
def get_deobfuscated_candidates(text):
return get_candidates(number_homoglyphs.normalize(text))
# North American phone numbers with fairly strict formatting
# The goal here is to be sure about identification, even if that leaves ones which are not identified.
# Without a 1. It must have a separator between the 334 groupings, like \d{3}\D\d{3}\D\d{4}, but with more
# than just a single \D permited. The start can be our normal mix.
NA_NUMBER_CENTRAL_OFFICE_AND_LINE_REGEX = r'(?<=\D)[2-9]\d{2}(?:[\W_]*+|\D(?=\d))(?<=\D)\d{4}$'
NA_NUMBER_CENTRAL_OFFICE_AND_LINE_LOOSE = r'[2-9]\d{2}(?:[\W_]*+|\D(?=\d))\d{4}$'
NA_NUMBER_WITHOUT_ONE_REGEX_START = r'^(?:[' + VALID_NON_DIGIT_START_CHARACTERS + \
r']{1,2}[2-9]|[2-9](?<=[^\d' + VALID_NON_DIGIT_START_CHARACTERS + \
r'][2-9]|^[2-9]))\d{2}' + \
r'(?:[\W_]*+|\D(?:(?=\d)|(?<=\d\D)))'
NA_NUMBER_WITHOUT_ONE_REGEX = NA_NUMBER_WITHOUT_ONE_REGEX_START + NA_NUMBER_CENTRAL_OFFICE_AND_LINE_REGEX
NA_NUMBER_WITHOUT_ONE_LOOSE = NA_NUMBER_WITHOUT_ONE_REGEX_START + NA_NUMBER_CENTRAL_OFFICE_AND_LINE_LOOSE
# With a 1. It must have a separator between the 334 groupings, like 1\d{3}\D\d{3}\D\d{4}, but with more
# than just a single \D permited and a separator is permitted after the 1. The start can be our normal mix.
NA_NUMBER_WITH_ONE_REGEX_START = r'^(?:[' + VALID_NON_DIGIT_START_CHARACTERS + \
r']{1,2}1|1(?<=[^\d' + VALID_NON_DIGIT_START_CHARACTERS + \
r']1|^1))(?:[\W_]*+|\D(?=\d))' + \
r'[2-9]\d{2}(?:[\W_]*+|\D(?=\d))'
# There's a trend to using a straight format of "+12345678900", which should be considered a NA number.
NA_NUMBER_WITH_ONE_NO_SEPARATORS_REGEX = r'^\+?1[2-9]\d{2}[2-9]\d{2}\d{4}$'
NA_NUMBER_WITH_ONE_AREA_CODE_SHORT_SEPARATORS_REGEX = r'^\+?1\D{0,2}[2-9]\d{2}\D{0,2}[2-9]\d{2}\d{4}$'
NA_NUMBER_WITH_ONE_REGEX = NA_NUMBER_WITH_ONE_REGEX_START + NA_NUMBER_CENTRAL_OFFICE_AND_LINE_REGEX
NA_NUMBER_WITH_ONE_LOOSE = NA_NUMBER_WITH_ONE_REGEX_START + NA_NUMBER_CENTRAL_OFFICE_AND_LINE_LOOSE
NA_NUMBER_WITH_ONE_OR_ONE_NO_SEPARATORS_REGEX = '(?:' + NA_NUMBER_WITH_ONE_REGEX + '|' + \
NA_NUMBER_WITH_ONE_AREA_CODE_SHORT_SEPARATORS_REGEX + ')'
def is_north_american_phone_number_with_one(text):
return regex.match(NA_NUMBER_WITH_ONE_OR_ONE_NO_SEPARATORS_REGEX, text) is not None
def is_north_american_phone_number_without_one(text):
return regex.match(NA_NUMBER_WITHOUT_ONE_REGEX, text) is not None
def is_north_american_phone_number_with_one_loose(text):
return regex.match(NA_NUMBER_WITH_ONE_LOOSE, text) is not None
def is_north_american_phone_number_without_one_loose(text):
return regex.match(NA_NUMBER_WITHOUT_ONE_LOOSE, text) is not None
def deobfuscate(text):
return number_homoglyphs.normalize(text)
def get_north_american_with_separators_from_normalized(normalized):
base = normalized[-10:-7] + '-' + normalized[-7:-4] + '-' + normalized[-4:]
country_code = '1-' if len(normalized) > 10 else ''
return country_code + base
def get_maybe_north_american_not_in_normalized_but_in_all(pattern, normalized, all_normalized=None):
without_comments, comments = split_processed_and_comments(pattern)
north_american_extra, north_american_add_type, maybe_north_american_extra = \
get_north_american_alternate_normalized(normalize(deobfuscate(without_comments)), force=True)
if maybe_north_american_extra not in normalized and \
(all_normalized is None or maybe_north_american_extra in all_normalized):
return maybe_north_american_extra
return ''
def get_north_american_alternate_normalized(non_normalized, normalized=None, force=False):
normalized = normalized if normalized else normalize(non_normalized)
north_american_extra = ''
north_american_add_type = ''
maybe_north_american_extra = ''
non_normalized = normalized if force else non_normalized
if is_north_american_phone_number_with_one(non_normalized):
# Add a version without a one
north_american_extra = normalized[1:]
north_american_add_type = 'non-1'
elif is_north_american_phone_number_without_one(non_normalized):
# Add a version with a one
north_american_extra = '1' + normalized
north_american_add_type = 'add-1'
elif is_north_american_phone_number_with_one_loose(non_normalized):
# Add a version without a one
maybe_north_american_extra = normalized[1:]
north_american_add_type = 'non-1'
elif is_north_american_phone_number_without_one_loose(non_normalized):
# Add a version with a one
maybe_north_american_extra = '1' + normalized
north_american_add_type = 'add-1'
return north_american_extra, north_american_add_type, maybe_north_american_extra
def split_processed_and_comments(pattern):
without_comments = remove_end_regex_comments(pattern)
comment = pattern.replace(without_comments, '')
return without_comments, comment
def check_comments_for_north_american_directive(comments):
force_is_north_american = 'is noram' in comments.lower() or 'IS NA' in comments
force_no_north_american = 'no noram' in comments.lower() or 'NO NA' in comments
return force_is_north_american, force_no_north_american
def get_north_american_forced_or_no_from_pattern(pattern):
without_comments, comments = split_processed_and_comments(pattern)
return check_comments_for_north_american_directive(comments)
def process_numlist(numlist, processed=None, normalized=None):
# The normalized list does contain any processed item which is also normalized.
processed = processed if processed is not None else set()
normalized = normalized if normalized is not None else set()
unique_normalized = set()
duplicate_normalized = set()
full_list = dict()
index = 0
for entry in numlist:
index += 1
this_entry_normalized = set()
without_comments, comments = split_processed_and_comments(entry)
processed.add(without_comments)
comment = entry.replace(without_comments, '')
force_is_north_american, force_no_north_american = check_comments_for_north_american_directive(comments)
# normalized to only digits
this_entry_normalized.add(normalize(without_comments))
deobfuscated = deobfuscate(without_comments)
# deobfuscated and normalized: We don't look for the non-normalized deobfuscated
normalized_deobfuscated = normalize(deobfuscated)
this_entry_normalized.add(normalized_deobfuscated)
if not force_no_north_american:
north_american_extra, north_american_add_type, maybe_north_american_extra = \
get_north_american_alternate_normalized(deobfuscated, normalized_deobfuscated, force_is_north_american)
if maybe_north_american_extra and force_is_north_american:
north_american_extra = maybe_north_american_extra
maybe_north_american_extra = ''
if north_american_extra:
this_entry_normalized.add(north_american_extra)
# The normalized list *does* contain the processed string, if it's also normalized, as we need it to test
# against obfuscated
normalized |= this_entry_normalized
full_entry = (without_comments, this_entry_normalized)
full_list[entry] = full_entry
return full_list, processed, normalized
|
af612ccbdf372d2d3328d584f8b2d0d24e84a428
|
7944d899365f7bc849f367b72f4cc285e041826c
|
/submitit/__init__.py
|
3051ac611f7afcbfd4edc92aa3177ca605819b4b
|
[
"MIT"
] |
permissive
|
facebookincubator/submitit
|
39edc49ad3c97883dd22cb7fe625d2db55f105d7
|
109f7c7f098de6775397e2b84826d59101a29a9f
|
refs/heads/main
| 2023-08-22T05:39:19.476497
| 2023-08-16T14:00:41
| 2023-08-16T14:00:41
| 258,441,818
| 947
| 111
|
MIT
| 2023-04-05T15:24:09
| 2020-04-24T07:41:09
|
Python
|
UTF-8
|
Python
| false
| false
| 852
|
py
|
__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""""Python 3.6+ toolbox for submitting jobs to Slurm"""
# allow explicit reimports (mypy) by renaming all imports
from . import helpers as helpers
from .auto.auto import AutoExecutor as AutoExecutor
from .core.core import Executor as Executor
from .core.core import Job as Job
from .core.job_environment import JobEnvironment as JobEnvironment
from .local.debug import DebugExecutor as DebugExecutor
from .local.debug import DebugJob as DebugJob
from .local.local import LocalExecutor as LocalExecutor
from .local.local import LocalJob as LocalJob
from .slurm.slurm import SlurmExecutor as SlurmExecutor
from .slurm.slurm import SlurmJob as SlurmJob
__version__ = "1.4.5"
|
5ac9239532fea8ebc2b4bf40ba6792ffe143e80c
|
fd8ef75bb06383538cdb21ed2a0ef88e570179b7
|
/src/openfermion/resource_estimates/pbc/thc/compute_thc_resources_test.py
|
ec0b82c087a88d48f7ac55a11187c6d59b9223c2
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
quantumlib/OpenFermion
|
d1147383f99573d19005bd0f3e0120e9e9bed04c
|
788481753c798a72c5cb3aa9f2aa9da3ce3190b0
|
refs/heads/master
| 2023-09-04T11:00:32.124157
| 2023-08-24T21:54:30
| 2023-08-24T21:54:30
| 104,403,768
| 1,481
| 406
|
Apache-2.0
| 2023-08-24T21:54:31
| 2017-09-21T22:10:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,187
|
py
|
compute_thc_resources_test.py
|
# coverage: ignore
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from openfermion.resource_estimates import HAVE_DEPS_FOR_RESOURCE_ESTIMATES
if HAVE_DEPS_FOR_RESOURCE_ESTIMATES:
from openfermion.resource_estimates.pbc.thc.compute_thc_resources import (
_compute_cost,
compute_cost,
)
@pytest.mark.skipif(not HAVE_DEPS_FOR_RESOURCE_ESTIMATES,
reason='pyscf and/or jax not installed.')
def test_thc_resources():
lam = 307.68
dE = 0.001
n = 108
chi = 10
beta = 16
M = 350
res = _compute_cost(n, lam, dE, chi, beta, M, 1, 1, 1, 20_000)
# print(res) # 26205, 12664955115, 2069
print(res) # (80098, 38711603694, 17630)
assert np.isclose(res[0], 80098)
assert np.isclose(res[1], 38711603694)
assert np.isclose(res[2], 17630)
res = _compute_cost(n, lam, dE, chi, beta, M, 3, 3, 3, 20_000)
# print(res) # {205788, 99457957764, 78813
print(res) # (270394, 130682231382, 78815)
assert np.isclose(res[0], 270394)
assert np.isclose(res[1], 130682231382)
assert np.isclose(res[2], 78815)
res = _compute_cost(n, lam, dE, chi, beta, M, 3, 5, 1, 20_000)
# print(res) # 151622, 73279367466, 39628
print(res) # (202209, 97728216327, 77517)
assert np.isclose(res[0], 202209)
assert np.isclose(res[1], 97728216327)
assert np.isclose(res[2], 77517)
@pytest.mark.skipif(not HAVE_DEPS_FOR_RESOURCE_ESTIMATES,
reason='pyscf and/or jax not installed.')
def test_thc_resources_helper():
lam = 307.68
dE = 0.001
n = 108
chi = 10
beta = 16
M = 350
res = compute_cost(
num_spin_orbs=n,
lambda_tot=lam,
thc_dim=M,
kmesh=[1, 1, 1],
dE_for_qpe=dE,
chi=chi,
beta=beta,
)
assert np.isclose(res.toffolis_per_step, 80098)
assert np.isclose(res.total_toffolis, 38711603694)
assert np.isclose(res.logical_qubits, 17630)
res = compute_cost(
num_spin_orbs=n,
lambda_tot=lam,
thc_dim=M,
kmesh=[3, 3, 3],
dE_for_qpe=dE,
chi=chi,
beta=beta,
)
assert np.isclose(res.toffolis_per_step, 270394)
assert np.isclose(res.total_toffolis, 130682231382)
assert np.isclose(res.logical_qubits, 78815)
res = compute_cost(
num_spin_orbs=n,
lambda_tot=lam,
thc_dim=M,
kmesh=[3, 5, 1],
dE_for_qpe=dE,
chi=chi,
beta=beta,
)
assert np.isclose(res.toffolis_per_step, 202209)
assert np.isclose(res.total_toffolis, 97728216327)
assert np.isclose(res.logical_qubits, 77517)
|
de29053adcf6d7d1c7ff39a8e2f15bdfb8450add
|
d92d607fe5e4f27a2f2c5f8676e5400e5410674f
|
/fwtool/util/__init__.py
|
c3e65dd22b5ef02b32e8256b46de6d939d839c01
|
[
"MIT"
] |
permissive
|
ma1co/fwtool.py
|
38bfc3018d70421265c246a128e8137ed954d7a6
|
cdba742b73eed5981480c326aeb30033aabf0223
|
refs/heads/master
| 2022-09-21T20:40:53.945202
| 2022-09-14T10:42:23
| 2022-09-14T10:42:23
| 45,698,894
| 147
| 34
|
MIT
| 2021-05-02T08:16:12
| 2015-11-06T18:14:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,934
|
py
|
__init__.py
|
"""Some utility functions to unpack integers"""
import binascii
import struct
from collections import namedtuple
def parse64be(data):
return struct.unpack('>Q', data)[0]
def dump64be(value):
return struct.pack('>Q', value)
def parse64le(data):
return struct.unpack('<Q', data)[0]
def dump64le(value):
return struct.pack('<Q', value)
def parse32be(data):
return struct.unpack('>I', data)[0]
def dump32be(value):
return struct.pack('>I', value)
def parse32le(data):
return struct.unpack('<I', data)[0]
def dump32le(value):
return struct.pack('<I', value)
def parse16be(data):
return struct.unpack('>H', data)[0]
def dump16be(value):
return struct.pack('>H', value)
def parse16le(data):
return struct.unpack('<H', data)[0]
def dump16le(value):
return struct.pack('<H', value)
def parse16leArr(data):
return struct.unpack('<%sH' % str(len(data) // 2), data)
def parse8(data):
return ord(data)
def dump8(value):
return chr(value)
def crc32(*files):
crc = 0
for file in files:
for chunk in iter(lambda: file.read(4096), b''):
crc = binascii.crc32(chunk, crc)
return crc & 0xffffffff
class Struct(object):
LITTLE_ENDIAN = '<'
BIG_ENDIAN = '>'
PADDING = '%dx'
CHAR = 'c'
STR = '%ds'
INT64 = 'Q'
INT32 = 'I'
INT16 = 'H'
INT8 = 'B'
def __init__(self, name, fields, byteorder=LITTLE_ENDIAN):
self.tuple = namedtuple(name, (n for n, fmt in fields if not isinstance(fmt, int)))
self.format = byteorder + ''.join(self.PADDING % fmt if isinstance(fmt, int) else fmt for n, fmt in fields)
self.size = struct.calcsize(self.format)
def unpack(self, data, offset = 0):
if isinstance(data, bytes):
data = data[offset:offset+self.size]
else:
data.seek(offset)
data = data.read(self.size)
if len(data) < self.size:
return None
return self.tuple._make(struct.unpack_from(self.format, data))
def pack(self, **kwargs):
return struct.pack(self.format, *self.tuple(**kwargs))
|
0c233916ca5b1938f1982f0295d45a35c9999531
|
08a25eadde07c7489136c61f80bf326651c293f6
|
/ros_ws/src/crazyswarm/scripts/test_yamlString.py
|
d01c8842a1996444c336e054f9a40224eaa08cd1
|
[
"MIT"
] |
permissive
|
USC-ACTLab/crazyswarm
|
36c03d308882518d998c4ab6b26e7e8f5c05745f
|
beb05492eaf226462631545bf5972199099267c2
|
refs/heads/master
| 2023-06-03T04:31:43.944450
| 2022-12-17T19:15:17
| 2022-12-17T19:15:17
| 68,335,694
| 296
| 296
|
MIT
| 2023-05-12T18:08:28
| 2016-09-15T22:18:11
|
Python
|
UTF-8
|
Python
| false
| false
| 664
|
py
|
test_yamlString.py
|
#!/usr/bin/env python
import numpy as np
from pycrazyswarm import *
def test_yaml_string_load():
crazyflies_yaml = """
crazyflies:
- channel: 100
id: 1
initialPosition: [1.0, 0.0, 0.0]
- channel: 100
id: 10
initialPosition: [0.0, -1.0, 0.0]
"""
swarm = Crazyswarm(crazyflies_yaml=crazyflies_yaml, args="--sim --vis null")
timeHelper = swarm.timeHelper
cfs = swarm.allcfs.crazyflies
byId = swarm.allcfs.crazyfliesById
assert len(cfs) == 2
cf1 = byId[1]
assert np.all(cf1.initialPosition == [1.0, 0.0, 0.0])
cf10 = byId[10]
assert np.all(cf10.initialPosition == [0.0, -1.0, 0.0])
|
bd7fc201f4a6045710b58c3d917e187bd45a082c
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/inspections/AddKwargsToIncompatibleOverridingMethod_after.py
|
fb2aea89cc45ad605e441e4f5f8f4394b6b1b1ae
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 103
|
py
|
AddKwargsToIncompatibleOverridingMethod_after.py
|
class Base:
def m(self, x):
pass
class Sub(Base):
def m(self, **kwargs):
pass
|
90fd153b66672fd35a957f3f27b6dee87bdfc261
|
dcbef06d5a00f07756339b9e62c684dec2fee425
|
/tests/basics/ReferencingTest.py
|
05760384d7238c5e25b9baa5c86094de254cd0dd
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Nuitka/Nuitka
|
f9543d8d95bfa0b81d4e60af0dfad99fb72893a4
|
d87faf2f7e1d6ed9bfe4cf8c1d648f34307e33f2
|
refs/heads/develop
| 2023-08-28T14:00:32.861328
| 2023-08-27T09:16:45
| 2023-08-27T09:16:45
| 9,626,741
| 8,573
| 599
|
Apache-2.0
| 2023-09-13T02:49:41
| 2013-04-23T15:40:33
|
Python
|
UTF-8
|
Python
| false
| false
| 23,841
|
py
|
ReferencingTest.py
|
# Copyright 2023, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Reference counting tests.
These contain functions that do specific things, where we have a suspect
that references may be lost or corrupted. Executing them repeatedly and
checking the reference count is how they are used.
"""
# While we use that for comparison code, no need to compile that.
# nuitka-project: --nofollow-import-to=nuitka
import os
import sys
# Find nuitka package relative to us.
sys.path.insert(
0,
os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..")
),
)
from nuitka.tools.testing.Common import executeReferenceChecked
# isort:start
# Tests do all bad things:
# pylint: disable=misplaced-bare-raise,raising-bad-type,reimported,undefined-variable
# pylint: disable=broad-except,eval-used,redefined-outer-name,unused-argument,unused-variable
# pylint: disable=attribute-defined-outside-init,bare-except,lost-exception,pointless-statement
# pylint: disable=exec-used,global-statement,invalid-name,not-callable,super-init-not-called
x = 17
# Just a function return a constant. Functions don't become any smaller. Let's
# get that right.
def simpleFunction1():
return 1
# Do a bit of math with a local variable, assigning to its value and then doing
# an overwrite of that, trying that math again. This should cover local access
# a bit.
def simpleFunction2():
y = 3 * x
y = 3
return x * 2 * y
# A local function is being returned. This covers creation of local functions
# and their release. No closure variables involved yet.
def simpleFunction3():
def contained():
return x
return contained
# Again, local function being return, but this time with local variable taken
# as a closure. We use value from defaulted argument, so it cannot be replaced.
def simpleFunction4(a=1):
y = a
def contained():
return y
return contained
# Default argument and building a list as local variables. Also return them,
# so they are not optimized away.
def simpleFunction5(a=2):
c = 1
f = [a, a + c]
return c, f
def simpleFunction6():
for _b in range(6):
pass
for _c in (1, 2, 3, 4, 5, 6):
pass
def simpleFunction7(b=1):
for _b in range(6):
pass
def simpleFunction8():
c = []
c.append(x)
def simpleFunction9(a=1 * 2):
if a == a:
pass
u = None
def simpleFunction10(a=1 * 2):
x = [u for u in range(8)]
def simpleFunction11():
f = 1
while f < 8:
f += 1
v = None
def simpleFunction12():
a = [(u, v) for (u, v) in zip(range(8), range(8))]
return a
def cond():
return 1
def simpleFunction13(a=1 * 2):
pass
def simpleFunction14p(x):
try:
simpleFunction14p(1, 1) # pylint: disable=too-many-function-args
except TypeError as _e:
pass
try:
simpleFunction14p(1, 1) # pylint: disable=too-many-function-args
except TypeError:
pass
def simpleFunction14():
simpleFunction14p(3)
def simpleFunction15p(x):
try:
try:
x += 1
finally:
try:
x *= 1
finally:
_z = 1
except:
pass
def simpleFunction15():
simpleFunction15p([1])
def simpleFunction16():
class EmptyClass:
pass
return EmptyClass
def simpleFunction17():
class EmptyObjectClass:
pass
return EmptyObjectClass()
def simpleFunction18():
closured = 1
class NonEmptyClass:
def __init__(self, a, b):
self.a = a
self.b = b
inside = closured
return NonEmptyClass(133, 135)
def simpleFunction19():
lam = lambda l: l + 1
return lam(9), lam
def simpleFunction20():
try:
a = []
a[1]
except IndexError as _e:
pass
def simpleFunction21():
class EmptyBaseClass:
def base(self):
return 3
class EmptyObjectClass(EmptyBaseClass):
pass
result = EmptyObjectClass()
c = result.base()
return result, c
def simpleFunction22():
return True is False and False is not None
def simpleFunction23():
not 2
def simpleFunction24p(x):
pass
def simpleFunction24():
simpleFunction24p(x=3)
def simpleFunction25():
class X:
f = 1
def inplace_adder(b):
X.f += b
return inplace_adder(6**8)
def simpleFunction26():
class X:
f = [5]
def inplace_adder(b):
X.f += b
return inplace_adder([1, 2])
def simpleFunction27():
a = {"g": 8}
def inplace_adder(b):
a["g"] += b
return inplace_adder(3)
def simpleFunction28():
a = {"g": [8], "h": 2}
def inplace_adder(b):
a["g"] += b
return inplace_adder([3, 5])
def simpleFunction29():
return "3" in "7"
def simpleFunction30():
def generatorFunction():
yield 1
yield 2
yield 3
def simpleFunction31():
def generatorFunction():
yield 1
yield 2
yield 3
a = []
for y in generatorFunction():
a.append(y)
for z in generatorFunction():
a.append(z)
def simpleFunction32():
def generatorFunction():
yield 1
gen = generatorFunction()
next(gen)
def simpleFunction33():
def generatorFunction():
a = 1
yield a
a = []
for y in generatorFunction():
a.append(y)
def simpleFunction34():
try:
raise ValueError
except:
pass
def simpleFunction35():
try:
raise ValueError(1, 2, 3)
except:
pass
def simpleFunction36():
try:
raise (TypeError, (3, x, x, x))
except TypeError:
pass
def simpleFunction37():
l = [1, 2, 3]
try:
_a, _b = l
except ValueError:
pass
def simpleFunction38():
class Base:
pass
class Parent(Base):
pass
def simpleFunction39():
class Parent(object):
pass
def simpleFunction40():
def myGenerator():
yield 1
myGenerator()
def simpleFunction41():
a = b = 2
return a, b
def simpleFunction42():
a = b = 2 * x
return a, b
def simpleFunction43():
class D:
pass
a = D()
a.b = 1
def simpleFunction47():
def reraisy():
def raisingFunction():
raise ValueError(3)
def reraiser():
raise
try:
raisingFunction()
except:
reraiser()
try:
reraisy()
except:
pass
def simpleFunction48():
class BlockExceptions:
def __enter__(self):
pass
def __exit__(self, exc, val, tb):
return True
with BlockExceptions():
raise ValueError()
template = "lala %s lala"
def simpleFunction49():
c = 3
d = 4
a = x, y = b, e = (c, d)
return a, y, b, e
b = range(10)
def simpleFunction50():
def getF():
def f():
for i in b:
yield i
return f
f = getF()
for x in range(2):
_r = list(f())
def simpleFunction51():
g = (x for x in range(9))
try:
g.throw(ValueError, 9)
except ValueError as _e:
pass
def simpleFunction52():
g = (x for x in range(9))
try:
g.throw(ValueError(9))
except ValueError as _e:
pass
def simpleFunction53():
g = (x for x in range(9))
try:
g.send(9)
except TypeError as _e:
pass
def simpleFunction54():
g = (x for x in range(9))
next(g)
try:
g.send(9)
except TypeError as _e:
pass
def simpleFunction55():
g = (x for x in range(9))
try:
g.close()
except ValueError as _e:
pass
def simpleFunction56():
"""Throw into finished generator."""
g = (x for x in range(9))
list(g)
try:
g.throw(ValueError(9))
except ValueError as _e:
pass
def simpleFunction60():
x = 1
y = 2
def f(a=x, b=y):
return a, b
f()
f(2)
f(3, 4)
def simpleFunction61():
a = 3
b = 5
try:
a = a * 2
return a
finally:
a / b
def simpleFunction62():
a = 3
b = 5
try:
a = a * 2
return a
finally:
return a / b
class X:
def __del__(self):
# Super used to reference leak.
x = super()
raise ValueError(1)
def simpleFunction63():
def superUser():
X()
try:
superUser()
except Exception:
pass
def simpleFunction64():
x = 2
y = 3
z = eval("x * y")
return z
def simpleFunction65():
import array
a = array.array("b", b"")
assert a == eval(repr(a), {"array": array.array})
d = {"x": 2, "y": 3}
z = eval(repr(d), d)
return z
def simpleFunction66():
import types
return type(simpleFunction65) == types.FunctionType
def simpleFunction67():
length = 100000
pattern = "1234567890\00\01\02\03\04\05\06"
q, r = divmod(length, len(pattern))
teststring = pattern * q + pattern[:r]
return teststring
def simpleFunction68():
from random import randrange
x = randrange(18)
def simpleFunction69():
pools = [tuple()]
g = ((len(pool) == 0,) for pool in pools)
next(g)
def simpleFunction70():
def gen():
try:
undefined_yyy
except Exception:
pass
yield sys.exc_info()
try:
undefined_xxx
except Exception:
return list(gen())
def simpleFunction71():
try:
undefined_global
except Exception:
try:
try:
raise
finally:
undefined_global
except Exception:
pass
def simpleFunction72():
try:
for _i in range(10):
try:
undefined_global
finally:
break
except Exception:
pass
def simpleFunction73():
for _i in range(10):
try:
undefined_global
finally:
return 7
def simpleFunction74():
import os # @Reimport
return os
def simpleFunction75():
def raising_gen():
try:
raise TypeError
except TypeError:
yield
g = raising_gen()
next(g)
try:
g.throw(RuntimeError())
except RuntimeError:
pass
def simpleFunction76():
class MyException(Exception):
def __init__(self, obj):
self.obj = obj
class MyObj:
pass
def inner_raising_func():
raise MyException(MyObj())
try:
inner_raising_func()
except MyException:
try:
try:
raise
finally:
raise
except MyException:
pass
class weirdstr(str):
def __getitem__(self, index):
return weirdstr(str.__getitem__(self, index))
def simpleFunction77():
return filter(lambda x: x >= "33", weirdstr("1234"))
def simpleFunction78():
a = "x = 2"
exec(a)
def simpleFunction79():
"some doc"
simpleFunction79.__doc__ = simpleFunction79.__doc__.replace("doc", "dok")
simpleFunction79.__doc__ += " and more" + simpleFunction79.__name__
def simpleFunction80():
"some doc"
del simpleFunction80.__doc__
def simpleFunction81():
def f():
yield 1
j
j = 1
x = list(f())
def simpleFunction82():
def f():
yield 1
j
j = 1
x = f.__doc__
def simpleFunction83():
x = list(range(7))
x[2] = 5
j = 3
x += [h * 2 for h in range(j)]
def simpleFunction84():
x = tuple(range(7))
j = 3
x += tuple([h * 2 for h in range(j)])
def simpleFunction85():
x = list(range(7))
x[2] = 3
x *= 2
def simpleFunction86():
x = "something"
x += ""
def simpleFunction87():
x = 7
x += 2000
class C:
def f(self):
pass
def __iadd__(self, other):
return self
def method_function(*args, **kwargs):
# Make sure to mutate the list argument value
if "x" in kwargs:
x = kwargs["x"]
if type(x) is list:
x.append(1)
for x in args:
if type(x) is list:
x.append(1)
return args, kwargs
exec(
"""
def method_uncompiled_function(*args, **kwargs):
# Make sure to mutate the list argument value
if "x" in kwargs:
x = kwargs["x"]
if type(x) is list:
x.append(1)
for x in args:
if type(x) is list:
x.append(1)
return args, kwargs
"""
)
def method_function_with_defaults(self, a, b, c, d=1, e=2, f=3):
return True
def simpleFunction88():
x = C()
x += C()
def simpleFunction89():
x = [1, 2]
x += [3, 4]
def anyArgs(*args, **kw):
return kw.keys(), kw.values()
def simpleFunction90():
some_tuple = (simpleFunction89, simpleFunction89, simpleFunction89)
anyArgs(*some_tuple)
def simpleFunction91():
some_dict = {"a": simpleFunction90}
anyArgs(**some_dict)
def simpleFunction92():
some_tuple = (simpleFunction89,)
some_dict = {"a": simpleFunction90}
anyArgs(*some_tuple, **some_dict)
def simpleFunction93():
some_tuple = (simpleFunction89,)
some_dict = {"a": simpleFunction90}
anyArgs(some_tuple, *some_tuple, **some_dict)
def simpleFunction94():
some_tuple = (simpleFunction89,)
some_dict = {"a": simpleFunction90}
anyArgs(*some_tuple, b=some_dict, **some_dict)
def simpleFunction95():
some_tuple = (simpleFunction89,)
some_dict = {"a": simpleFunction90}
anyArgs(some_tuple, *some_tuple, b=some_dict, **some_dict)
def simpleFunction96():
some_tuple = (simpleFunction89,)
anyArgs(some_tuple, *some_tuple)
# Complex call with dictionary and key arguments only.
def simpleFunction97():
some_dict = {"a": simpleFunction90, "d": simpleFunction91}
anyArgs(b=some_dict, c=1, **some_dict)
def simpleFunction98():
some_tuple = (simpleFunction89,)
anyArgs(*some_tuple, b=some_tuple)
def simpleFunction99():
some_dict = {"a": simpleFunction90}
anyArgs(some_dict, **some_dict)
def simpleFunction100():
def h(f):
def g():
return f
return g
def f():
pass
h(f)
def simpleFunction101():
def orMaking(a, b):
x = "axa"
x += a or b
orMaking("x", "")
####################################
class SomeClassWithAttributeAccess(object):
READING = 1
def use(self):
return self.READING
def simpleFunction102():
SomeClassWithAttributeAccess().use()
SomeClassWithAttributeAccess().use()
####################################
def getInt():
return 3
def simpleFunction103():
try:
raise getInt()
except TypeError:
pass
####################################
class ClassWithGeneratorMethod:
def generator_method(self):
yield self
def simpleFunction104():
return list(ClassWithGeneratorMethod().generator_method())
def simpleFunction105():
"""Delete a started generator, not properly closing it before releasing."""
def generator():
yield 1
yield 2
g = generator()
next(g)
del g
def simpleFunction106():
# Call a PyCFunction with a single argument.
return sys.getsizeof(type)
def simpleFunction107():
# Call a PyCFunction with a single argument.
return sum(i for i in range(x))
def simpleFunction108():
# Call a PyCFunction with a single argument.
return sum((i for i in range(x)), 17)
def simpleFunction109():
# Call a PyCFunction that looks like a method call.
sys.exc_info()
def simpleFunction110():
def my_open(*args, **kwargs):
return (args, kwargs)
orig_open = __builtins__.open
__builtins__.open = my_open
open("me", buffering=True)
__builtins__.open = orig_open
####################################
u = "__name__"
def simpleFunction111():
return getattr(simpleFunction111, u)
####################################
def simpleFunction112():
TESTFN = "tmp.txt"
import codecs
try:
with open(TESTFN, "wb") as out_file:
out_file.write(b"\xa1")
f = codecs.open(TESTFN, encoding="cp949")
f.read(2)
except UnicodeDecodeError:
pass
finally:
try:
f.close()
except Exception:
pass
try:
os.unlink(TESTFN)
except Exception:
pass
####################################
def simpleFunction113():
class A(object):
pass
a = A()
a.a = a
return a
l = []
def simpleFunction114():
global l
l += ["something"]
# Erase it to avoid reference change.
del l[:]
i = 2**16 + 1
def simpleFunction115():
global i
i += 1
t = tuple(range(259))
def simpleFunction116():
global t
t += (2, 3)
t = tuple(range(259))
def simpleFunction117():
# Operation tuple+object, error case.
try:
return tuple(t) + i
except TypeError:
pass
def simpleFunction118():
# Operation tuple+object, error case.
try:
return i + tuple(t)
except TypeError:
pass
t2 = tuple(range(9))
def simpleFunction119():
# Operation tuple+object no error case.
return tuple(t) + t2
def simpleFunction120():
# Operation object+tuple no error case.
return t2 + tuple(t)
def simpleFunction121():
# Operation tuple+tuple
return tuple(t2) + tuple(t)
def simpleFunction122():
# Operation list+object, error case.
try:
return list(t) + i
except TypeError:
pass
def simpleFunction123():
# Operation list+object, error case.
try:
return i + list(t)
except TypeError:
pass
l2 = list(range(9))
def simpleFunction124():
# Operation list+object no error case.
return list(t) + l2
def simpleFunction125():
# Operation object+list no error case.
return l2 + list(t)
def simpleFunction126():
# Operation tuple+tuple
return list(l2) + list(t)
class TupleWithSlots(tuple):
def __add__(self, other):
return 42
def __radd__(self, other):
return 42
def simpleFunction127():
# Operation tuple+object with add slot.
return tuple(t) + TupleWithSlots()
def simpleFunction128():
# Operation object+tuple with add slot.
return TupleWithSlots() + tuple(t)
class ListWithSlots(list):
def __add__(self, other):
return 42
def __radd__(self, other):
return 42
def simpleFunction129():
# Operation list+object with add slot.
return list(t) + ListWithSlots()
def simpleFunction130():
# Operation list+object with add slot.
return ListWithSlots() + list(t)
def simpleFunction131():
try:
C().f.__reduce__()
except Exception as e:
assert sys.version_info < (3, 4)
def simpleFunction132():
C().f.__reduce_ex__(5)
x = 5
def local_function(*args, **kwargs):
# Make sure to mutate the list argument value
if "x" in kwargs:
x = kwargs["x"]
if type(x) is list:
x.append(1)
for x in args:
if type(x) is list:
x.append(1)
return args, kwargs
exec(
"""
def local_uncompiled_function(*args, **kwargs):
# Make sure to mutate the list argument value
if "x" in kwargs:
x = kwargs["x"]
if type(x) is list:
x.append(1)
for x in args:
if type(x) is list:
x.append(1)
return args, kwargs
"""
)
def simpleFunction133():
local_function(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)
local_function(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, x=1)
local_function(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, x=x)
local_function(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, x=[])
local_function(1, 2, 3, 4, 5, 6, 7, x, 9, 10, 11, x=1)
local_function(1, 2, 3, 4, 5, 6, 7, x, 9, 10, 11, x=x)
local_function(1, 2, 3, 4, 5, 6, 7, [], 9, 10, 11, x=[])
local_function(x=1)
local_function(x=x)
local_function(x=[])
local_uncompiled_function(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)
local_uncompiled_function(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, x=1)
local_uncompiled_function(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, x=x)
local_uncompiled_function(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, x=[])
local_uncompiled_function(1, 2, 3, 4, 5, 6, 7, x, 9, 10, 11, x=1)
local_uncompiled_function(1, 2, 3, 4, 5, 6, 7, x, 9, 10, 11, x=x)
local_uncompiled_function(1, 2, 3, 4, 5, 6, 7, [], 9, 10, 11, x=[])
local_uncompiled_function(x=1)
local_uncompiled_function(x=x)
local_uncompiled_function(x=[])
c = C()
C().method_function(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, x=1)
C.method_function(c, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, x=1)
C().method_function(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, x=x)
C().method_function(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, x=[1])
C.method_function(c, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, x=x)
C().method_function(1, 2, 3, 4, 5, 6, 7, x, 9, 10, 11, x=1)
C.method_function(c, 1, 2, 3, 4, 5, 6, 7, x, 9, 10, 11, x=1)
C().method_function(1, 2, 3, 4, 5, 6, 7, x, 9, 10, 11, x=x)
C.method_function(c, 1, 2, 3, 4, 5, 6, 7, x, 9, 10, 11, x=x)
C().method_function(1, 2, 3, 4, 5, 6, 7, [1], 9, 10, 11, x=[1])
C.method_function(c, 1, 2, 3, 4, 5, 6, 7, [1], 9, 10, 11, x=[1])
C().method_function(x=1)
C().method_function(x=x)
C().method_function(x=[1])
C().method_uncompiled_function(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, x=1)
C.method_uncompiled_function(c, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, x=1)
C().method_uncompiled_function(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, x=x)
C().method_uncompiled_function(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, x=[1])
C.method_uncompiled_function(c, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, x=x)
C().method_uncompiled_function(1, 2, 3, 4, 5, 6, 7, x, 9, 10, 11, x=1)
C.method_uncompiled_function(c, 1, 2, 3, 4, 5, 6, 7, x, 9, 10, 11, x=1)
C().method_uncompiled_function(1, 2, 3, 4, 5, 6, 7, x, 9, 10, 11, x=x)
C.method_uncompiled_function(c, 1, 2, 3, 4, 5, 6, 7, x, 9, 10, 11, x=x)
C().method_uncompiled_function(1, 2, 3, 4, 5, 6, 7, [1], 9, 10, 11, x=[1])
C.method_uncompiled_function(c, 1, 2, 3, 4, 5, 6, 7, [1], 9, 10, 11, x=[1])
C().method_uncompiled_function(x=1)
C().method_uncompiled_function(x=x)
C().method_uncompiled_function(x=[1])
C().method_function_with_defaults(1, 2, 3, d=1)
C().method_function_with_defaults(1, x, 3, d=x)
C().method_function_with_defaults(1, x, 3, d=[1])
####################################
# These need stderr to be wrapped.
tests_stderr = (63,)
# Disabled tests
tests_skipped = {}
result = executeReferenceChecked(
prefix="simpleFunction",
names=globals(),
tests_skipped=tests_skipped,
tests_stderr=tests_stderr,
)
sys.exit(0 if result else 1)
|
f57748da658712c9a03c8df0cc827ea4d2ff85e7
|
d5f7b5be2ef5e8a9d5d170cd45aa949243687d8d
|
/blog/migrations/0011_setting_enable_multi_user.py
|
b957596f001b571ac7a004b4d641554de0e17634
|
[
"MIT"
] |
permissive
|
enjoy-binbin/Django-blog
|
494af449f085e77460da500763037b28d00c560a
|
0fcf3709fabeee49874343b3a4ab80582698c466
|
refs/heads/master
| 2022-12-13T06:16:59.091138
| 2020-12-08T04:21:54
| 2020-12-08T04:21:54
| 171,880,388
| 113
| 24
|
MIT
| 2022-11-22T04:45:56
| 2019-02-21T13:51:02
|
Python
|
UTF-8
|
Python
| false
| false
| 549
|
py
|
0011_setting_enable_multi_user.py
|
# Generated by Django 2.1.5 on 2019-06-07 15:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0010_setting_user_verify_email'),
]
operations = [
migrations.AddField(
model_name='setting',
name='enable_multi_user',
field=models.BooleanField(default=False, help_text='启用多用户博客系统, 用户在注册时候会获得, 相应的后台权限', verbose_name='是否启用多用户博客系统'),
),
]
|
3603c8f439317528ad8a7e7461d4f73787009cd2
|
1ec0cf2205deb58b97495e1b5d0a8df3b1f3faf1
|
/concordia/settings_ecs.py
|
2f6718ddcfb3f69f46b97ff1b60f2bb40fe5e7cf
|
[
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
LibraryOfCongress/concordia
|
f9d937223320bb6b9185f764485d14d22c06f4cd
|
2f345cd177c3ae0ec3913e39c21332f5a35f634b
|
refs/heads/main
| 2023-08-22T05:28:58.717760
| 2023-08-21T18:46:22
| 2023-08-21T18:46:22
| 134,269,274
| 152
| 36
|
NOASSERTION
| 2023-09-14T18:50:49
| 2018-05-21T12:56:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,659
|
py
|
settings_ecs.py
|
import json
import os
from .secrets import get_secret
from .settings_template import * # NOQA ignore=F405
from .settings_template import CONCORDIA_ENVIRONMENT, DATABASES, INSTALLED_APPS
if os.getenv("AWS"):
ENV_NAME = os.getenv("ENV_NAME")
django_secret_json = get_secret("crowd/%s/Django/SecretKey" % ENV_NAME)
django_secret = json.loads(django_secret_json)
SECRET_KEY = django_secret["DjangoSecretKey"]
postgres_secret_json = get_secret("crowd/%s/DB/MasterUserPassword" % ENV_NAME)
postgres_secret = json.loads(postgres_secret_json)
DATABASES["default"].update({"PASSWORD": postgres_secret["password"]})
smtp_secret_json = get_secret("concordia/SMTP")
smtp_secret = json.loads(smtp_secret_json)
EMAIL_HOST = smtp_secret["Hostname"]
EMAIL_HOST_USER = smtp_secret["Username"]
EMAIL_HOST_PASSWORD = smtp_secret["Password"]
else:
EMAIL_HOST = os.environ.get("EMAIL_HOST", "localhost")
EMAIL_HOST_USER = os.environ.get("EMAIL_HOST_USER", "")
EMAIL_HOST_PASSWORD = os.environ.get("EMAIL_HOST_PASSWORD", "")
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
EMAIL_USE_TLS = True
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_PORT = 587
DEFAULT_FROM_EMAIL = os.environ.get("DEFAULT_FROM_EMAIL", "crowd@loc.gov")
DEFAULT_TO_EMAIL = DEFAULT_FROM_EMAIL
CSRF_COOKIE_SECURE = True
CELERY_BROKER_URL = os.getenv("CELERY_BROKER_URL")
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
S3_BUCKET_NAME = os.getenv("S3_BUCKET_NAME")
EXPORT_S3_BUCKET_NAME = os.getenv("EXPORT_S3_BUCKET_NAME")
DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
AWS_STORAGE_BUCKET_NAME = S3_BUCKET_NAME
AWS_DEFAULT_ACL = None # Don't set an ACL on the files, inherit the bucket ACLs
if CONCORDIA_ENVIRONMENT == "production":
MEDIA_URL = "https://crowd-media.loc.gov/"
else:
MEDIA_URL = "https://%s.s3.amazonaws.com/" % S3_BUCKET_NAME
ELASTICSEARCH_DSL_AUTOSYNC = os.getenv("ELASTICSEARCH_DSL_AUTOSYNC", False)
INSTALLED_APPS += ["django_elasticsearch_dsl"]
ELASTICSEARCH_DSL_SIGNAL_PROCESSOR = (
"django_elasticsearch_dsl.signals.RealTimeSignalProcessor"
)
ELASTICSEARCH_DSL = {
"default": {"hosts": os.getenv("ELASTICSEARCH_ENDPOINT", "elk:9200")}
}
# HMAC activation flow provide the two-step registration process,
# the user signs up and then completes activation via email instructions.
REGISTRATION_SALT = "django_registration" # doesn't need to be secret
RATELIMIT_BLOCK = os.getenv("RATELIMIT_BLOCK", "").lower() not in ("false", "0")
if os.getenv("USE_PERSISTENT_DATABASE_CONNECTIONS"):
DATABASES["default"].update({"CONN_MAX_AGE": 15 * 60})
|
111b5928b7dcf453af8e1da8f423570d5d762cdd
|
0e083f405af00029c9ec31849f0f7f81c56844b5
|
/mmdeploy/codebase/mmdet3d/deploy/mmdet3d.py
|
bd817b7593db8a9bd4bf017ee5995ec3fb88a0d7
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmdeploy
|
39b9e7b611caab2c76a6142fcb99f0bf1d92ad24
|
5479c8774f5b88d7ed9d399d4e305cb42cc2e73a
|
refs/heads/main
| 2023-09-01T21:29:25.315371
| 2023-08-31T09:59:29
| 2023-08-31T09:59:29
| 441,467,833
| 2,164
| 605
|
Apache-2.0
| 2023-09-14T10:39:04
| 2021-12-24T13:04:44
|
Python
|
UTF-8
|
Python
| false
| false
| 691
|
py
|
mmdet3d.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import Registry
from mmdeploy.codebase.base import CODEBASE, MMCodebase
from mmdeploy.utils import Codebase
MMDET3D_TASK = Registry('mmdet3d_tasks')
@CODEBASE.register_module(Codebase.MMDET3D.value)
class MMDetection3d(MMCodebase):
"""MMDetection3d codebase class."""
task_registry = MMDET3D_TASK
@classmethod
def register_deploy_modules(cls):
import mmdeploy.codebase.mmdet3d.models # noqa: F401
@classmethod
def register_all_modules(cls):
from mmdet3d.utils.setup_env import register_all_modules
cls.register_deploy_modules()
register_all_modules(True)
|
c2b79a3f9b471e97a5690cd7cd177bad2ea037b7
|
ef1f316534f2df302286eb168951255afe137a55
|
/pynetdicom/fsm.py
|
a5fd93c2d4ab4fa59665fe31c352bbc01ee8d642
|
[
"MIT"
] |
permissive
|
pydicom/pynetdicom
|
5362b4f3d03bc47e570cf3d82684925cb3d4162d
|
2aa9ed7e3f7f03a0c9af48fe8b0049c82e74ee48
|
refs/heads/master
| 2023-08-16T23:09:24.539133
| 2023-07-21T17:38:22
| 2023-07-21T17:38:22
| 69,436,608
| 342
| 122
|
MIT
| 2023-07-20T19:04:04
| 2016-09-28T07:18:02
|
Python
|
UTF-8
|
Python
| false
| false
| 35,804
|
py
|
fsm.py
|
"""
The DUL's finite state machine representation.
"""
import logging
import queue
from typing import TYPE_CHECKING, cast
from pynetdicom import evt
from pynetdicom.pdu import (
A_ASSOCIATE_RQ,
A_ASSOCIATE_RJ,
A_ASSOCIATE_AC,
P_DATA_TF,
A_RELEASE_RQ,
A_RELEASE_RP,
A_ABORT_RQ,
)
from pynetdicom.pdu_primitives import A_P_ABORT, A_ABORT
from pynetdicom.transport import T_CONNECT
if TYPE_CHECKING: # pragma: no cover
from pynetdicom.dul import DULServiceProvider
from pynetdicom.transport import AssociationSocket
from pynetdicom.pdu_primitives import A_ASSOCIATE, P_DATA, A_RELEASE
LOGGER = logging.getLogger("pynetdicom.sm")
class InvalidEventError(Exception):
"""Exception for use when an invalid event occurs for a given state."""
pass
# pylint: disable=invalid-name
class StateMachine:
"""Implementation of the DICOM Upper Layer State Machine.
Attributes
----------
current_state : str
The current state of the state machine, ``'Sta1'`` to ``'Sta13'``.
dul : dul.DULServiceProvider
The DICOM Upper Layer service instance for the local AE
References
----------
* DICOM Standard, Part 8, :dcm:`Section 9.2<part08/sect_9.2.html>`
"""
def __init__(self, dul: "DULServiceProvider"):
"""Create a new :class:`StateMachine`.
Parameters
---------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the association.
"""
self.current_state = "Sta1"
self.dul = dul
def do_action(self, event: str) -> None:
"""Execute the action triggered by `event`.
Parameters
----------
event : str
The event to be processed, ``'Evt1'`` to ``'Evt19'``
"""
# Check (event + state) is valid
if (event, self.current_state) not in TRANSITION_TABLE:
msg = "Invalid event '{}' for the current state '{}'".format(
event, self.current_state
)
LOGGER.error(msg)
raise InvalidEventError(msg)
action_name = TRANSITION_TABLE[(event, self.current_state)]
# action is the (description, function, state) tuple
# associated with the action_name
action = ACTIONS[action_name]
# Attempt to execute the action and move the state machine to its
# next state
try:
# Execute the required action
next_state = action[1](self.dul)
# Event handler - FSM transition
evt.trigger(
self.dul.assoc,
evt.EVT_FSM_TRANSITION,
{
"action": action_name,
"current_state": self.current_state,
"fsm_event": event,
"next_state": next_state,
},
)
# print(
# "{}: {} + {} -> {} -> {}".format(
# self.dul.assoc.mode[0].upper(),
# self.current_state,
# event,
# action_name,
# next_state,
# )
# )
# Move the state machine to the next state
self.transition(next_state)
except Exception as exc:
LOGGER.error(
"State Machine received an exception attempting "
"to perform the action '%s' while in state '%s'",
action_name,
self.current_state,
)
LOGGER.exception(exc)
self.dul.kill_dul()
raise
def transition(self, state: str) -> None:
"""Transition the state machine to the next state.
Parameters
----------
state : str
The state to transition to, ``'Sta1'`` to ``'Sta13'``.
Raises
------
ValueError
If `state` is not a valid state.
"""
# Validate that state is acceptable
if state in STATES.keys():
self.current_state = state
else:
msg = f"Invalid state '{state}' for State Machine"
LOGGER.error(msg)
raise ValueError(msg)
def AE_1(dul: "DULServiceProvider") -> str:
"""Association establishment action AE-1.
*Event*
Service user issued A-ASSOCIATE (request) to the service provider
*Action*
Issue TRANSPORT CONNECT request primitive to the transport service.
*State/Event Triggers*
- Sta1 + Evt1
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta4'``, the next state of the state machine.
"""
# A-ASSOCIATE (request) primitive received from local user
request = cast("A_ASSOCIATE", dul.to_provider_queue.get(False))
# Issue TRANSPORT CONNECT request primitive to local transport service
sock = cast("AssociationSocket", dul.socket)
sock.connect(T_CONNECT(request))
return "Sta4"
def AE_2(dul: "DULServiceProvider") -> str:
"""Association establishment action AE-2.
On receiving connection confirmation, send A-ASSOCIATE-RQ to the peer AE
This send a byte stream with the format given by Table 9-11
State-event triggers: Sta4 + Evt2
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta5'``, the next state of the state machine.
"""
# TRANSPORT CONNECTION primitive received from transport service
primitive = cast("T_CONNECT", dul.to_provider_queue.get(False))
# Send A-ASSOCIATE-RQ PDU to the peer
dul._send(A_ASSOCIATE_RQ(primitive.request))
return "Sta5"
def AE_3(dul: "DULServiceProvider") -> str:
"""Association establishment action AE-3.
On receiving A-ASSOCIATE-AC, issue acceptance confirmation
State-event triggers: Sta5 + Evt3
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta6'``, the next state of the state machine
"""
# Received A-ASSOCIATE-AC PDU from the peer
pdu = dul._recv_pdu.get(False)
# Issue A-ASSOCIATE confirmation (accept) primitive
dul.to_user_queue.put(cast("A_ASSOCIATE", pdu.to_primitive()))
return "Sta6"
def AE_4(dul: "DULServiceProvider") -> str:
"""Association establishment action AE-4.
On receiving A-ASSOCIATE-RJ, issue rejection confirmation and close
connection
State-event triggers: Sta5 + Evt4
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta1'``, the next state of the state machine
"""
# Received A-ASSOCIATE-RJ PDU from the peer
pdu = dul._recv_pdu.get(False)
# Issue A-ASSOCIATE confirmation (reject) primitive and close transport
# connection
dul.to_user_queue.put(cast("A_ASSOCIATE", pdu.to_primitive()))
sock = cast("AssociationSocket", dul.socket)
sock.close()
assoc = dul.assoc
remote = assoc.acceptor if assoc.is_requestor else assoc.requestor
address = (remote.address, remote.port)
evt.trigger(dul.assoc, evt.EVT_CONN_CLOSE, {"address": address})
dul.kill_dul()
return "Sta1"
def AE_5(dul: "DULServiceProvider") -> str:
"""Association establishment action AE-5.
From Idle state, on receiving a remote connection attempt, respond and
start ARTIM. This is the first step in associating a remote AE (requestor)
to the local AE (acceptor).
State-event triggers: Sta1 + Evt5
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta2'``, the next state of the state machine
"""
# Start ARTIM timer
dul.artim_timer.start()
return "Sta2"
def AE_6(dul: "DULServiceProvider") -> str:
"""Association establishment action AE-6.
On receiving an A-ASSOCIATE-RQ PDU from the peer then stop the ARTIM timer
and then either
* issue an A-ASSOCIATE indication primitive if the -RQ is acceptable or
* issue an A-ASSOCIATE-RJ PDU to the peer and start the ARTIM timer
This is a lower-level DUL Service Provider initiated rejection - for
example this could be where the protocol version is checked
State-event triggers: Sta2 + Evt6
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
Either ``'Sta3'`` or ``'Sta13'``, the next state of the state machine
"""
# Stop ARTIM timer
dul.artim_timer.stop()
recv_pdu = cast(A_ASSOCIATE_RQ, dul._recv_pdu.get(False))
primitive = recv_pdu.to_primitive()
# If A-ASSOCIATE-RQ not acceptable by service dul provider
# Then set reason and send -RJ PDU back to peer
if recv_pdu.protocol_version != 0x0001:
LOGGER.error(
"A-ASSOCIATE-RQ: Unsupported protocol version "
f"'0x{recv_pdu.protocol_version:04X}'"
)
# Send A-ASSOCIATE-RJ PDU and start ARTIM timer
primitive.result = 0x01
primitive.result_source = 0x02
primitive.diagnostic = 0x02
dul._send(A_ASSOCIATE_RJ(primitive))
dul.artim_timer.start()
return "Sta13"
# If A-ASSOCIATE-RQ acceptable by service dul provider
# issue A-ASSOCIATE indication primitive and move to Sta3
dul.to_user_queue.put(primitive)
return "Sta3"
def AE_7(dul: "DULServiceProvider") -> str:
"""Association establishment action AE-7.
On receiving association request acceptance, issue A-ASSOCIATE-AC
State-event triggers: Sta3 + Evt7
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta6'``, the next state of the state machine
"""
# Received A-ASSOCIATE (AC) primitive from local user
primitive = cast("A_ASSOCIATE", dul.to_provider_queue.get(False))
# Send A-ASSOCIATE-AC PDU
dul._send(A_ASSOCIATE_AC(primitive))
return "Sta6"
def AE_8(dul: "DULServiceProvider") -> str:
"""Association establishment action AE-8.
On receiving association request rejection, issue A-ASSOCIATE-RJ
State-event triggers: Sta3 + Evt8
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta13'``, the next state of the state machine
"""
# Received A-ASSOCIATE (RJ) primitive from local user
primitive = cast("A_ASSOCIATE", dul.to_provider_queue.get(False))
# Send A-ASSOCIATE-RJ PDU and start ARTIM timer
dul._send(A_ASSOCIATE_RJ(primitive))
dul.artim_timer.start()
return "Sta13"
def DT_1(dul: "DULServiceProvider") -> str:
"""Data transfer DT-1.
On receiving a P-DATA request, send P-DATA-TF
State-event triggers: Sta6 + Evt9
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta6'``, the next state of the state machine
"""
# P-DATA request received from local user
primitive = cast("P_DATA", dul.to_provider_queue.get(False))
# Send P-DATA-TF PDU
dul._send(P_DATA_TF(primitive))
return "Sta6"
def DT_2(dul: "DULServiceProvider") -> str:
"""Data transfer DT-2.
On receiving a P-DATA-TF request, send P-DATA indication
State-event triggers: Sta6 + Evt10
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta6'``, the next state of the state machine
"""
# P-DATA-TF PDU received from peer
pdu = dul._recv_pdu.get(False)
# Send P-DATA indication primitive directly to DIMSE for processing
dul.assoc.dimse.receive_primitive(cast("P_DATA", pdu.to_primitive()))
return "Sta6"
def AR_1(dul: "DULServiceProvider") -> str:
"""Association release AR-1.
Send Association release request
State-event triggers: Sta6 + Evt11
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta7'``, the next state of the state machine
"""
# Received A-RELEASE request from local user
primitive = cast("A_RELEASE", dul.to_provider_queue.get(False))
# Send A-RELEASE-RQ PDU
dul._send(A_RELEASE_RQ(primitive))
return "Sta7"
def AR_2(dul: "DULServiceProvider") -> str:
"""Association release AR-2.
On receiving an association release request, send release indication
State-event triggers: Sta6 + Evt12
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta8'``, the next state of the state machine
"""
# A-RELEASE-RQ PDU received from peer
pdu = dul._recv_pdu.get(False)
# Send A-RELEASE indication primitive
dul.to_user_queue.put(pdu.to_primitive())
return "Sta8"
def AR_3(dul: "DULServiceProvider") -> str:
"""Association release AR-3.
On receiving an association release response, send release confirmation,
close connection and go back to Idle state
State-event triggers: Sta7 + Evt13, Sta11 + Evt13
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta1'``, the next state of the state machine
"""
# A-RELEASE-RP PDU received from peer
pdu = dul._recv_pdu.get(False)
# Issue A-RELEASE confirmation primitive and close transport connection
dul.to_user_queue.put(pdu.to_primitive())
sock = cast("AssociationSocket", dul.socket)
sock.close()
assoc = dul.assoc
remote = assoc.acceptor if assoc.is_requestor else assoc.requestor
address = (remote.address, remote.port)
evt.trigger(dul.assoc, evt.EVT_CONN_CLOSE, {"address": address})
dul.kill_dul()
return "Sta1"
def AR_4(dul: "DULServiceProvider") -> str:
"""Association release AR-4.
On receiving an association release response, send release response
State-event triggers: Sta8 + Evt14, Sta12 + Evt14
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta13'``, the next state of the state machine
"""
# A-RELEASE (response) primitive received from local user
primitive = cast("A_RELEASE", dul.to_provider_queue.get(False))
# Issue A-RELEASE-RP PDU and start ARTIM timer
dul._send(A_RELEASE_RP(primitive))
dul.artim_timer.start()
return "Sta13"
def AR_5(dul: "DULServiceProvider") -> str:
"""Association release AR-5.
On receiving transport connection closed, stop the ARTIM timer and go back
to Idle state
State-event triggers: Sta13 + Evt17
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta1'``, the next state of the state machine
"""
assoc = dul.assoc
remote = assoc.acceptor if assoc.is_requestor else assoc.requestor
address = (remote.address, remote.port)
evt.trigger(dul.assoc, evt.EVT_CONN_CLOSE, {"address": address})
# Stop ARTIM timer
dul.artim_timer.stop()
dul.kill_dul()
return "Sta1"
def AR_6(dul: "DULServiceProvider") -> str:
"""Association release AR-6.
On receiving P-DATA-TF during attempted association release request
send P-DATA indication
State-event triggers: Sta7 + Evt10
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta7'``, the next state of the state machine
"""
# P-DATA-TF PDU received from peer
pdu = cast("P_DATA_TF", dul._recv_pdu.get(False))
# Issue P-DATA indication
dul.to_user_queue.put(pdu.to_primitive())
return "Sta7"
def AR_7(dul: "DULServiceProvider") -> str:
"""Association release AR-7.
On receiving P-DATA request during attempted association release request
send P-DATA-TF
State-event triggers: Sta8 + Evt9
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta8'``, the next state of the state machine
"""
# P-DATA primitive received from local user
primitive = cast("P_DATA", dul.to_provider_queue.get(False))
# Issue P-DATA-TF PDU
dul._send(P_DATA_TF(primitive))
return "Sta8"
def AR_8(dul: "DULServiceProvider") -> str:
"""Association release AR-8.
On receiving association release request while local is requesting release
then issue release collision indication
State-event triggers: Sta7 + Evt12
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
Either ``'Sta9'`` or ``'Sta10'``, the next state of the state machine
"""
# A-RELEASE-RQ PDU received from peer
pdu = cast("A_RELEASE_RQ", dul._recv_pdu.get(False))
# Issue A-RELEASE indication (release collision)
dul.to_user_queue.put(pdu.to_primitive())
if dul.assoc.is_requestor:
return "Sta9"
return "Sta10"
def AR_9(dul: "DULServiceProvider") -> str:
"""Association release AR-9.
On receiving A-RELEASE primitive, send release response
State-event triggers: Sta9 + Evt14
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta11'``, the next state of the state machine
"""
# A-RELEASE (response) primitive received from local user
primitive = cast("A_RELEASE", dul.to_provider_queue.get(False))
# Send A-RELEASE-RP PDU
dul._send(A_RELEASE_RP(primitive))
return "Sta11"
def AR_10(dul: "DULServiceProvider") -> str:
"""Association release AR-10.
On receiving A-RELEASE-RP, issue release confirmation
State-event triggers: Sta10 + Evt13
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta12'``, the next state of the state machine
"""
# A-RELEASE-RP PDU received from peer
pdu = cast("A_RELEASE_RP", dul._recv_pdu.get(False))
# Issue A-RELEASE confirmation primitive
dul.to_user_queue.put(pdu.to_primitive())
return "Sta12"
def AA_1(dul: "DULServiceProvider") -> str:
"""Association abort AA-1.
If on sending A-ASSOCIATE-RQ we receive an invalid reply, or an abort
request then abort
State-event triggers: Sta2 + Evt3/Evt4/Evt10/Evt12/Evt13/Evt19,
Sta3/Sta5/Sta6/Sta7/Sta8/Sta9/Sta10/Sta11/Sta12 + Evt15
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta13'``, the next state of the state machine
"""
# Received invalid PDU from peer or an A-ABORT primitive from local user
try:
primitive = dul.to_provider_queue.queue[0]
if isinstance(primitive, (A_ABORT, A_P_ABORT)):
primitive = dul.to_provider_queue.get(False)
except (queue.Empty, IndexError):
primitive = None
# Send A-ABORT PDU (service-user source) and start (or restart
# if already started) ARTIM timer.
pdu = A_ABORT_RQ()
if primitive is not None:
pdu.from_primitive(primitive)
else:
# Reason not specified
pdu.source = 0x00
pdu.reason_diagnostic = 0x00
dul._send(pdu)
dul.artim_timer.restart()
return "Sta13"
def AA_2(dul: "DULServiceProvider") -> str:
"""Association abort AA-2.
On receiving an A-ABORT or if the ARTIM timer expires, close connection and
return to Idle
State-event triggers: Sta2 + Evt16/Evt18, Sta4 + Evt15, Sta13 + Evt16/Evt18
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta1'``, the next state of the state machine
"""
# Stop ARTIM timer if running. Close transport connection.
dul.artim_timer.stop()
sock = cast("AssociationSocket", dul.socket)
sock.close()
assoc = dul.assoc
assoc.dimse.msg_queue.put((None, None))
remote = assoc.acceptor if assoc.is_requestor else assoc.requestor
address = (remote.address, remote.port)
evt.trigger(dul.assoc, evt.EVT_CONN_CLOSE, {"address": address})
dul.kill_dul()
return "Sta1"
def AA_3(dul: "DULServiceProvider") -> str:
"""Association abort AA-3.
On receiving A-ABORT, issue abort indication, close connection and
return to Idle
State-event triggers: Sta3/Sta5/Sta6/Sta7/Sta8/Sta9/Sta10/Sta11/Sta12 +
Evt16
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta1'``, the next state of the state machine
"""
# A-ABORT PDU received from peer
pdu = cast("A_ABORT_RQ", dul._recv_pdu.get(False))
# If (service-user initiated abort):
# - Issue A-ABORT indication and close transport connection.
# Otherwise (service-dul initiated abort):
# - Issue A-P-ABORT indication and close transport connection.
# This action is triggered by the reception of an A-ABORT PDU
dul.to_user_queue.put(pdu.to_primitive())
sock = cast("AssociationSocket", dul.socket)
sock.close()
assoc = dul.assoc
assoc.dimse.msg_queue.put((None, None))
remote = assoc.acceptor if assoc.is_requestor else assoc.requestor
address = (remote.address, remote.port)
evt.trigger(dul.assoc, evt.EVT_CONN_CLOSE, {"address": address})
dul.kill_dul()
return "Sta1"
def AA_4(dul: "DULServiceProvider") -> str:
"""Association abort AA-4.
If connection closed, issue A-P-ABORT and return to Idle
State-event triggers: Sta3/Sta4/Sta5/Sta6/Sta7/Sta8/Sta9/Sta10/Sta11/Sta12
+ Evt17
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta1'``, the next state of the state machine
"""
assoc = dul.assoc
assoc.dimse.msg_queue.put((None, None))
remote = assoc.acceptor if assoc.is_requestor else assoc.requestor
address = (remote.address, remote.port)
evt.trigger(dul.assoc, evt.EVT_CONN_CLOSE, {"address": address})
# Issue A-P-ABORT indication primitive.
primitive = A_P_ABORT()
primitive.provider_reason = 0x00
dul.to_user_queue.put(primitive)
dul.kill_dul()
return "Sta1"
def AA_5(dul: "DULServiceProvider") -> str:
"""Association abort AA-5.
If connection closed during association request, stop ARTIM timer and
return to Idle
State-event triggers: Sta2 + Evt17
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta1'``, the next state of the state machine
"""
assoc = dul.assoc
remote = assoc.acceptor if assoc.is_requestor else assoc.requestor
address = (remote.address, remote.port)
evt.trigger(dul.assoc, evt.EVT_CONN_CLOSE, {"address": address})
# Stop ARTIM timer.
dul.artim_timer.stop()
dul.kill_dul()
return "Sta1"
def AA_6(dul: "DULServiceProvider") -> str:
"""Association abort AA-6.
If receive a PDU while waiting for connection to close, ignore it
State-event triggers: Sta13 + Evt3/Evt4/Evt10/Evt12/Evt13
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta13'``, the next state of the state machine
"""
# Ignore PDU
try:
dul._recv_pdu.get(False)
except queue.Empty:
pass
return "Sta13"
def AA_7(dul: "DULServiceProvider") -> str:
"""Association abort AA-7.
If receive a association request or invalid PDU while waiting for
connection to close, send A-ABORT PDU
State-event triggers: Sta13 + Evt6/Evt19
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta13'``, the next state of the state machine
"""
primitive = A_P_ABORT()
primitive.provider_reason = 0x02
# Send A-ABORT PDU
dul._send(A_ABORT_RQ(primitive))
return "Sta13"
def AA_8(dul: "DULServiceProvider") -> str:
"""Association abort AA-8.
If receive invalid event, send A-ABORT PDU, issue A-P-ABORT indication and
start ARTIM timer
State-event triggers: Evt3 + Sta3/6/7/8/9/10/11/12,
Evt4 + Sta3/5/6/7/8/9/10/11/12, Evt6 + Sta3/5/6/7/8/9/10/11/12,
Evt10 + Sta3/5/8/9/10/11/12, Evt12 + Sta3/5/8/9/10/11/12,
Evt13 + Sta3/5/6/8/9/12, Evt19 + Sta3/5/6/7/8/9/10/11/12
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta13'``, the next state of the state machine
"""
# Send A-ABORT PDU (service-dul source), issue A-P-ABORT
# indication, and start ARTIM timer.
pdu = A_ABORT_RQ()
pdu.source = 0x02 # A-P-ABORT
pdu.reason_diagnostic = 0x00
# Send A-ABORT PDU
dul._send(pdu)
# Issue A-P-ABORT to user
primitive = A_P_ABORT()
primitive.provider_reason = 0x05
dul.to_user_queue.put(primitive)
dul.artim_timer.start()
return "Sta13"
# Finite State Machine
# Machine State Definitions, PS3.8 Tables 9-1, 9-2, 9-3, 9-4, 9-5
# pylint: disable=line-too-long
STATES = {
# No association
"Sta1": "Idle",
# Association establishment
"Sta2": "Transport connection open (Awaiting A-ASSOCIATE-RQ PDU)",
"Sta3": "Awaiting local A-ASSOCIATE response primitive (from local user)",
"Sta4": "Awaiting transport connection opening to complete (from local transport service)",
"Sta5": "Awaiting A-ASSOCIATE-AC or A-ASSOCIATE-RJ PDU",
# Data transfer
"Sta6": "Association established and ready for data transfer",
# Association release
"Sta7": "Awaiting A-RELEASE-RP PDU",
"Sta8": "Awaiting local A-RELEASE response primitive (from local user)",
"Sta9": "Release collision requestor side; awaiting A-RELEASE response (from local user)",
"Sta10": "Release collision acceptor side; awaiting A-RELEASE-RP PDU",
"Sta11": "Release collision requestor side; awaiting A-RELEASE-RP PDU",
"Sta12": "Release collision acceptor side; awaiting A-RELEASE response primitive (from local user)",
# Waiting for transport connection close
"Sta13": "Awaiting Transport Connection Close Indication (Association no longer exists)",
}
# State Machine Action Definitions, PS3.8 Tables 9-6, 9-7, 9-8, 9-9
ACTIONS = {
# Association establishment related actions
"AE-1": (
"Issue TRANSPORT CONNECT request primitive to local transport service",
AE_1,
"Sta4",
),
"AE-2": ("Send A-ASSOCIATE-RQ-PDU", AE_2, "Sta5"),
"AE-3": ("Issue A-ASSOCIATE confirmation (accept) primitive", AE_3, "Sta6"),
"AE-4": (
"Issue A-ASSOCIATE confirmation (reject) primitive and close "
"transport connection",
AE_4,
"Sta1",
),
"AE-5": (
"Issue Transport connection response primitive; start ARTIM timer",
AE_5,
"Sta2",
),
"AE-6": (
"Stop ARTIM timer and if A-ASSOCIATE-RQ acceptable by "
"service-dul: issue A-ASSOCIATE indication primitive "
"otherwise issue A-ASSOCIATE-RJ-PDU and start ARTIM timer",
AE_6,
("Sta3", "Sta13"),
),
"AE-7": ("Send A-ASSOCIATE-AC PDU", AE_7, "Sta6"),
"AE-8": ("Send A-ASSOCIATE-RJ PDU and start ARTIM timer", AE_8, "Sta13"),
# Data transfer related actions
"DT-1": ("Send P-DATA-TF PDU", DT_1, "Sta6"),
"DT-2": ("Send P-DATA indication primitive", DT_2, "Sta6"),
# Association Release related actions
"AR-1": ("Send A-RELEASE-RQ PDU", AR_1, "Sta7"),
"AR-2": ("Issue A-RELEASE indication primitive", AR_2, "Sta8"),
"AR-3": (
"Issue A-RELEASE confirmation primitive and close transport connection",
AR_3,
"Sta1",
),
"AR-4": ("Issue A-RELEASE-RP PDU and start ARTIM timer", AR_4, "Sta13"),
"AR-5": ("Stop ARTIM timer", AR_5, "Sta1"),
"AR-6": ("Issue P-DATA indication", AR_6, "Sta7"),
"AR-7": ("Issue P-DATA-TF PDU", AR_7, "Sta8"),
"AR-8": (
"Issue A-RELEASE indication (release collision): if "
"association-requestor, next state is Sta9, if not next "
"state is Sta10",
AR_8,
("Sta9", "Sta10"),
),
"AR-9": ("Send A-RELEASE-RP PDU", AR_9, "Sta11"),
"AR-10": ("Issue A-RELEASE confirmation primitive", AR_10, "Sta12"),
# Association abort related actions
"AA-1": (
"Send A-ABORT PDU (service-user source) and start (or "
"restart if already started) ARTIM timer",
AA_1,
"Sta13",
),
"AA-2": ("Stop ARTIM timer if running. Close transport connection", AA_2, "Sta1"),
"AA-3": (
"If (service-user initiated abort): issue A-ABORT "
"indication and close transport connection, otherwise "
"(service-dul initiated abort): issue A-P-ABORT indication "
"and close transport connection",
AA_3,
"Sta1",
),
"AA-4": ("Issue A-P-ABORT indication primitive", AA_4, "Sta1"),
"AA-5": ("Stop ARTIM timer", AA_5, "Sta1"),
"AA-6": ("Ignore PDU", AA_6, "Sta13"),
"AA-7": ("Send A-ABORT PDU", AA_7, "Sta13"),
"AA-8": (
"Send A-ABORT PDU (service-dul source), issue an A-P-ABORT "
"indication and start ARTIM timer",
AA_8,
"Sta13",
),
}
# State Machine Event Definitions, PS3.8 Table 9-10
EVENTS = {
"Evt1": "A-ASSOCIATE request (local user)",
"Evt2": "Transport connect confirmation (local transport service)",
"Evt3": "A-ASSOCIATE-AC PDU (received on transport connection)",
"Evt4": "A-ASSOCIATE-RJ PDU (received on transport connection)",
"Evt5": "Transport connection indication (local transport service)",
"Evt6": "A-ASSOCIATE-RQ PDU (on transport connection)",
"Evt7": "A-ASSOCIATE response primitive (accept)",
"Evt8": "A-ASSOCIATE response primitive (reject)",
"Evt9": "P-DATA request primitive",
"Evt10": "P-DATA-TF PDU (on transport connection)",
"Evt11": "A-RELEASE request primitive",
"Evt12": "A-RELEASE-RQ PDU (on transport)",
"Evt13": "A-RELEASE-RP PDU (on transport)",
"Evt14": "A-RELEASE response primitive",
"Evt15": "A-ABORT request primitive",
"Evt16": "A-ABORT PDU (on transport)",
"Evt17": "Transport connection closed indication (local transport service)",
"Evt18": "ARTIM timer expired (Association reject/release timer)",
"Evt19": "Unrecognized or invalid PDU received",
}
# State Machine Transitions, PS3.8 Table 9-10
TRANSITION_TABLE = {
("Evt1", "Sta1"): "AE-1",
("Evt2", "Sta4"): "AE-2",
("Evt3", "Sta2"): "AA-1",
("Evt3", "Sta3"): "AA-8",
("Evt3", "Sta5"): "AE-3",
("Evt3", "Sta6"): "AA-8",
("Evt3", "Sta7"): "AA-8",
("Evt3", "Sta8"): "AA-8",
("Evt3", "Sta9"): "AA-8",
("Evt3", "Sta10"): "AA-8",
("Evt3", "Sta11"): "AA-8",
("Evt3", "Sta12"): "AA-8",
("Evt3", "Sta13"): "AA-6",
("Evt4", "Sta2"): "AA-1",
("Evt4", "Sta3"): "AA-8",
("Evt4", "Sta5"): "AE-4",
("Evt4", "Sta6"): "AA-8",
("Evt4", "Sta7"): "AA-8",
("Evt4", "Sta8"): "AA-8",
("Evt4", "Sta9"): "AA-8",
("Evt4", "Sta10"): "AA-8",
("Evt4", "Sta11"): "AA-8",
("Evt4", "Sta12"): "AA-8",
("Evt4", "Sta13"): "AA-6",
("Evt5", "Sta1"): "AE-5",
("Evt6", "Sta2"): "AE-6",
("Evt6", "Sta3"): "AA-8",
("Evt6", "Sta5"): "AA-8",
("Evt6", "Sta6"): "AA-8",
("Evt6", "Sta7"): "AA-8",
("Evt6", "Sta8"): "AA-8",
("Evt6", "Sta9"): "AA-8",
("Evt6", "Sta10"): "AA-8",
("Evt6", "Sta11"): "AA-8",
("Evt6", "Sta12"): "AA-8",
("Evt6", "Sta13"): "AA-7",
("Evt7", "Sta3"): "AE-7",
("Evt8", "Sta3"): "AE-8",
("Evt9", "Sta6"): "DT-1",
("Evt9", "Sta8"): "AR-7",
("Evt10", "Sta2"): "AA-1",
("Evt10", "Sta3"): "AA-8",
("Evt10", "Sta5"): "AA-8",
("Evt10", "Sta6"): "DT-2",
("Evt10", "Sta7"): "AR-6",
("Evt10", "Sta8"): "AA-8",
("Evt10", "Sta9"): "AA-8",
("Evt10", "Sta10"): "AA-8",
("Evt10", "Sta11"): "AA-8",
("Evt10", "Sta12"): "AA-8",
("Evt10", "Sta13"): "AA-6",
("Evt11", "Sta6"): "AR-1",
("Evt12", "Sta2"): "AA-1",
("Evt12", "Sta3"): "AA-8",
("Evt12", "Sta5"): "AA-8",
("Evt12", "Sta6"): "AR-2",
("Evt12", "Sta7"): "AR-8",
("Evt12", "Sta8"): "AA-8",
("Evt12", "Sta9"): "AA-8",
("Evt12", "Sta10"): "AA-8",
("Evt12", "Sta11"): "AA-8",
("Evt12", "Sta12"): "AA-8",
("Evt12", "Sta13"): "AA-6",
("Evt13", "Sta2"): "AA-1",
("Evt13", "Sta3"): "AA-8",
("Evt13", "Sta5"): "AA-8",
("Evt13", "Sta6"): "AA-8",
("Evt13", "Sta7"): "AR-3",
("Evt13", "Sta8"): "AA-8",
("Evt13", "Sta9"): "AA-8",
("Evt13", "Sta10"): "AR-10",
("Evt13", "Sta11"): "AR-3",
("Evt13", "Sta12"): "AA-8",
("Evt13", "Sta13"): "AA-6",
("Evt14", "Sta8"): "AR-4",
("Evt14", "Sta9"): "AR-9",
("Evt14", "Sta12"): "AR-4",
("Evt15", "Sta3"): "AA-1",
("Evt15", "Sta4"): "AA-2",
("Evt15", "Sta5"): "AA-1",
("Evt15", "Sta6"): "AA-1",
("Evt15", "Sta7"): "AA-1",
("Evt15", "Sta8"): "AA-1",
("Evt15", "Sta9"): "AA-1",
("Evt15", "Sta10"): "AA-1",
("Evt15", "Sta11"): "AA-1",
("Evt15", "Sta12"): "AA-1",
("Evt16", "Sta2"): "AA-2",
("Evt16", "Sta3"): "AA-3",
("Evt16", "Sta5"): "AA-3",
("Evt16", "Sta6"): "AA-3",
("Evt16", "Sta7"): "AA-3",
("Evt16", "Sta8"): "AA-3",
("Evt16", "Sta9"): "AA-3",
("Evt16", "Sta10"): "AA-3",
("Evt16", "Sta11"): "AA-3",
("Evt16", "Sta12"): "AA-3",
("Evt16", "Sta13"): "AA-2",
("Evt17", "Sta2"): "AA-5",
("Evt17", "Sta3"): "AA-4",
("Evt17", "Sta4"): "AA-4",
("Evt17", "Sta5"): "AA-4",
("Evt17", "Sta6"): "AA-4",
("Evt17", "Sta7"): "AA-4",
("Evt17", "Sta8"): "AA-4",
("Evt17", "Sta9"): "AA-4",
("Evt17", "Sta10"): "AA-4",
("Evt17", "Sta11"): "AA-4",
("Evt17", "Sta12"): "AA-4",
("Evt17", "Sta13"): "AR-5",
("Evt18", "Sta2"): "AA-2",
("Evt18", "Sta13"): "AA-2",
("Evt19", "Sta2"): "AA-1",
("Evt19", "Sta3"): "AA-8",
("Evt19", "Sta5"): "AA-8",
("Evt19", "Sta6"): "AA-8",
("Evt19", "Sta7"): "AA-8",
("Evt19", "Sta8"): "AA-8",
("Evt19", "Sta9"): "AA-8",
("Evt19", "Sta10"): "AA-8",
("Evt19", "Sta11"): "AA-8",
("Evt19", "Sta12"): "AA-8",
("Evt19", "Sta13"): "AA-7",
}
|
50acf9deb4e8c27a8f5eef4d061717f0c4a0a750
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/third_party/incubator-tvm/tests/python/unittest/test_pass_split_pipeline.py
|
380053420f44fc5a1ef25392ab9fb691b6e6413a
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,586
|
py
|
test_pass_split_pipeline.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
def lower(s, args):
binds = {}
arg_list = []
for x in args:
assert isinstance(x, tvm.tensor.Tensor)
buf = tvm.decl_buffer(x.shape, dtype=x.dtype, name=x.op.name)
binds[x] = buf
arg_list.append(buf)
s.normalize()
bounds = tvm.schedule.InferBound(s)
stmt = tvm.schedule.ScheduleOps(s, bounds)
stmt = tvm.ir_pass.StorageFlatten(stmt, binds, 64)
stmt = tvm.ir_pass.CanonicalSimplify(stmt)
stmt = tvm.ir_pass.Simplify(stmt)
return stmt
def test_basic_pipeline():
n = tvm.convert(128)
A = tvm.placeholder((n,), name='A')
stages = []
num_stage = 3
B = A
for k in range(num_stage):
stages.append(B)
B = tvm.compute((n,), lambda i: B[i] + k, name="A%s" % k)
s = tvm.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], nparts=1)
s[B].bind(xo, tvm.thread_axis("pipeline"))
xo, xi = s[B].split(xi, factor=4)
for S in stages:
s[S].compute_at(s[B], xo)
stmt = lower(s, [A, B])
stmt = tvm.ir_pass.SplitPipeline(stmt, False)
print(stmt)
stmt = tvm.ir_pass.NarrowChannelAccess(stmt)
print(stmt)
assert(tvm.ir_pass.VerifySSA(stmt))
def test_conv1d():
n = tvm.var('n')
A = tvm.compute((n+2), lambda i: 1, name='A')
def computeB(ii):
i = ii + 1
return A[i-1] + A[i] + A[i+1]
B = tvm.compute(n, computeB, name='B')
s = tvm.create_schedule(B.op)
px, xi = s[B].split(B.op.axis[0], nparts=1)
s[B].bind(px, tvm.thread_axis("pipeline"))
s[A].compute_at(s[B], px)
stmt = lower(s, [B])
stmt = tvm.ir_pass.SplitPipeline(stmt, False)
print(stmt)
stmt = tvm.ir_pass.NarrowChannelAccess(stmt)
print(stmt)
if __name__ == "__main__":
test_basic_pipeline()
test_conv1d()
|
8ebaca8d796ec8ab4e100ea952734e68f97bf9a6
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/tools/stats/upload_test_stat_aggregates.py
|
38e4177032f6eebe5da811159c95992197fc1fd3
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,962
|
py
|
upload_test_stat_aggregates.py
|
import argparse
import ast
import datetime
import json
import os
import re
from typing import Any, List, Union
import rockset # type: ignore[import]
from tools.stats.upload_stats_lib import upload_to_s3
def get_oncall_from_testfile(testfile: str) -> Union[List[str], None]:
path = f"test/{testfile}"
if not path.endswith(".py"):
path += ".py"
# get oncall on test file
try:
with open(path) as f:
for line in f:
if line.startswith("# Owner(s): "):
possible_lists = re.findall(r"\[.*\]", line)
if len(possible_lists) > 1:
raise Exception("More than one list found")
elif len(possible_lists) == 0:
raise Exception("No oncalls found or file is badly formatted")
oncalls = ast.literal_eval(possible_lists[0])
return list(oncalls)
except Exception as e:
if "." in testfile:
return [f"module: {testfile.split('.')[0]}"]
else:
return ["module: unmarked"]
return None
def get_test_stat_aggregates(date: datetime.date) -> Any:
# Initialize the Rockset client with your API key
rockset_api_key = os.environ["ROCKSET_API_KEY"]
rockset_api_server = "api.rs2.usw2.rockset.com"
iso_date = date.isoformat()
rs = rockset.RocksetClient(
host="api.usw2a1.rockset.com", api_key=os.environ["ROCKSET_API_KEY"]
)
# Define the name of the Rockset collection and lambda function
collection_name = "commons"
lambda_function_name = "test_insights_per_daily_upload"
query_parameters = [
rockset.models.QueryParameter(name="startTime", type="string", value=iso_date)
]
api_response = rs.QueryLambdas.execute_query_lambda(
query_lambda=lambda_function_name,
version="692684fa5b37177f",
parameters=query_parameters,
)
for i in range(len(api_response["results"])):
oncalls = get_oncall_from_testfile(api_response["results"][i]["test_file"])
api_response["results"][i]["oncalls"] = oncalls
return json.loads(
json.dumps(api_response["results"], indent=4, sort_keys=True, default=str)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Upload test stat aggregates to Rockset."
)
parser.add_argument(
"--date",
type=datetime.date.fromisoformat,
help="Date to upload test stat aggregates for (YYYY-MM-DD). Must be in the last 30 days",
required=True,
)
args = parser.parse_args()
if args.date < datetime.datetime.now().date() - datetime.timedelta(days=30):
raise ValueError("date must be in the last 30 days")
data = get_test_stat_aggregates(date=args.date)
upload_to_s3(
bucket_name="torchci-aggregated-stats",
key=f"test_data_aggregates/{str(args.date)}",
docs=data,
)
|
66673b9edd605a7f897fb25f7ab302f3a0ac571e
|
0948f5944bcb95af55ac258d6104044ddbedab6b
|
/setup_examples/epd_waveshare_42_pico.py
|
79b8f780635c7a9afecb195cc6511f1475478037
|
[
"MIT"
] |
permissive
|
peterhinch/micropython-nano-gui
|
e9b7ca20535bbb52c695083deb28721074cfa71e
|
5eef93317e83bc767da88fba8acdfc2a167db794
|
refs/heads/master
| 2023-06-22T09:27:18.739604
| 2023-06-12T13:43:47
| 2023-06-12T13:43:47
| 146,632,615
| 360
| 78
|
MIT
| 2023-09-02T09:08:16
| 2018-08-29T17:07:07
|
Python
|
UTF-8
|
Python
| false
| false
| 749
|
py
|
epd_waveshare_42_pico.py
|
# color_setup.py Customise for your hardware config
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2022 Peter Hinch
# Supports Waveshare 4.2" 400x300 ePaper display with Raspberry Pico
# https://thepihut.com/collections/epaper-displays-for-raspberry-pi/products/4-2-e-paper-display-module-for-raspberry-pi-pico-black-white-400x300
# Waveshare code
# https://github.com/waveshare/Pico_ePaper_Code/blob/a6b26ac714d56f958010ddfca3b7fef8410c59c2/python/Pico-ePaper-4.2.py
import machine
import gc
from drivers.epaper.pico_epaper_42 import EPD as SSD
gc.collect() # Precaution before instantiating framebuf
ssd = SSD() # Create a display instance
# Set this to run demos written for arbitrary displays:
# ssd.demo_mode = True
|
ce53daa5fd37a81a446735c8c3a99e722ca9e067
|
0b134572e3ac3903ebb44df6d4138cbab9d3327c
|
/app/grandchallenge/teams/admin.py
|
567c2c34d259ad0f7f6e15e5f4ee58175251003f
|
[
"Apache-2.0"
] |
permissive
|
comic/grand-challenge.org
|
660de3bafaf8f4560317f1dfd9ae9585ec272896
|
dac25f93b395974b32ba2a8a5f9e19b84b49e09d
|
refs/heads/main
| 2023-09-01T15:57:14.790244
| 2023-08-31T14:23:04
| 2023-08-31T14:23:04
| 4,557,968
| 135
| 53
|
Apache-2.0
| 2023-09-14T13:41:03
| 2012-06-05T09:26:39
|
Python
|
UTF-8
|
Python
| false
| false
| 393
|
py
|
admin.py
|
from django.contrib import admin
from grandchallenge.core.admin import (
GroupObjectPermissionAdmin,
UserObjectPermissionAdmin,
)
from grandchallenge.teams.models import (
TeamGroupObjectPermission,
TeamUserObjectPermission,
)
admin.site.register(TeamUserObjectPermission, UserObjectPermissionAdmin)
admin.site.register(TeamGroupObjectPermission, GroupObjectPermissionAdmin)
|
fb13d91f01104c56e450cb7404f95ca00675312f
|
bbfc9f05efefe29b6ce9832bb3506efb900c1c93
|
/tencentcloud/bsca/v20210811/models.py
|
92197515fa4b92b105f634058d119e97cf0c74d7
|
[
"Apache-2.0"
] |
permissive
|
TencentCloud/tencentcloud-sdk-python
|
a2fab235926b0a27e9cfdf55e085a8bb15b3f506
|
6baf00a5a56ba58b6a1123423e0a1422d17a0201
|
refs/heads/master
| 2023-09-04T10:52:28.060438
| 2023-09-01T03:09:16
| 2023-09-01T03:09:16
| 130,147,399
| 594
| 300
|
Apache-2.0
| 2023-09-06T07:03:24
| 2018-04-19T02:23:12
|
Python
|
UTF-8
|
Python
| false
| false
| 49,093
|
py
|
models.py
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class CVSSV2Info(AbstractModel):
"""CVSSv2.0详细信息。
"""
def __init__(self):
r"""
:param _CVSS: CVE评分。
:type CVSS: float
:param _AccessVector: AccessVector 攻击途径。
取值范围:
<li>NETWORK 远程</li>
<li>ADJACENT_NETWORK 近邻</li>
<li>LOCAL 本地</li>
:type AccessVector: str
:param _AccessComplexity: AccessComplexity 攻击复杂度。
取值范围:
<li>HIGH 高</li>
<li>MEDIUM 中</li>
<li>LOW 低</li>
:type AccessComplexity: str
:param _Authentication: Authentication 身份验证。
取值范围:
<li>MULTIPLE 多系统认证</li>
<li>SINGLE 单系统认证</li>
<li>NONE 无</li>
:type Authentication: str
:param _ConImpact: ConfidentialityImpact 机密性影响。
取值范围:
<li>NONE 无</li>
<li>PARTIAL 部分</li>
<li>COMPLETE 完整</li>
:type ConImpact: str
:param _IntegrityImpact: IntegrityImpact 完整性影响。
取值范围:
<li>NONE 无</li>
<li>PARTIAL 部分</li>
<li>COMPLETE 完整</li>
:type IntegrityImpact: str
:param _AvailabilityImpact: AvailabilityImpact 可用性影响。
取值范围:
<li>NONE 无</li>
<li>PARTIAL 部分</li>
<li>COMPLETE 完整</li>
:type AvailabilityImpact: str
"""
self._CVSS = None
self._AccessVector = None
self._AccessComplexity = None
self._Authentication = None
self._ConImpact = None
self._IntegrityImpact = None
self._AvailabilityImpact = None
@property
def CVSS(self):
return self._CVSS
@CVSS.setter
def CVSS(self, CVSS):
self._CVSS = CVSS
@property
def AccessVector(self):
return self._AccessVector
@AccessVector.setter
def AccessVector(self, AccessVector):
self._AccessVector = AccessVector
@property
def AccessComplexity(self):
return self._AccessComplexity
@AccessComplexity.setter
def AccessComplexity(self, AccessComplexity):
self._AccessComplexity = AccessComplexity
@property
def Authentication(self):
return self._Authentication
@Authentication.setter
def Authentication(self, Authentication):
self._Authentication = Authentication
@property
def ConImpact(self):
return self._ConImpact
@ConImpact.setter
def ConImpact(self, ConImpact):
self._ConImpact = ConImpact
@property
def IntegrityImpact(self):
return self._IntegrityImpact
@IntegrityImpact.setter
def IntegrityImpact(self, IntegrityImpact):
self._IntegrityImpact = IntegrityImpact
@property
def AvailabilityImpact(self):
return self._AvailabilityImpact
@AvailabilityImpact.setter
def AvailabilityImpact(self, AvailabilityImpact):
self._AvailabilityImpact = AvailabilityImpact
def _deserialize(self, params):
self._CVSS = params.get("CVSS")
self._AccessVector = params.get("AccessVector")
self._AccessComplexity = params.get("AccessComplexity")
self._Authentication = params.get("Authentication")
self._ConImpact = params.get("ConImpact")
self._IntegrityImpact = params.get("IntegrityImpact")
self._AvailabilityImpact = params.get("AvailabilityImpact")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CVSSV3Info(AbstractModel):
"""Cvssv3.0详细信息。
"""
def __init__(self):
r"""
:param _CVSS: CVE评分。
:type CVSS: float
:param _AttackVector: AttackVector 攻击途径。
取值范围:
<li>NETWORK 远程</li>
<li>ADJACENT_NETWORK 近邻</li>
<li>LOCAL 本地</li>
<li>PHYSICAL 物理</li>
:type AttackVector: str
:param _AttackComplexity: AttackComplexity 攻击复杂度。
取值范围:
<li>HIGH 高</li>
<li>LOW 低</li>
:type AttackComplexity: str
:param _PrivilegesRequired: PrivilegesRequired 触发特权。
取值范围:
<li>HIGH 高</li>
<li>LOW 低</li>
<li>NONE 无</li>
:type PrivilegesRequired: str
:param _UserInteraction: UserInteraction 交互必要性。
取值范围:
<li>NONE 无</li>
<li>REQUIRED 需要</li>
:type UserInteraction: str
:param _Scope: Scope 绕过安全边界。
取值范围:
<li>UNCHANGED 否</li>
<li>CHANGED 能</li>
:type Scope: str
:param _ConImpact: ConfidentialityImpact 机密性影响。
取值范围:
<li>NONE 无</li>
<li>LOW 低</li>
<li>HIGH 高</li>
:type ConImpact: str
:param _IntegrityImpact: IntegrityImpact 完整性影响。
取值范围:
<li>NONE 无</li>
<li>LOW 低</li>
<li>HIGH 高</li>
:type IntegrityImpact: str
:param _AvailabilityImpact: AvailabilityImpact 可用性影响。
取值范围:
<li>NONE 无</li>
<li>LOW 低</li>
<li>HIGH 高</li>
:type AvailabilityImpact: str
"""
self._CVSS = None
self._AttackVector = None
self._AttackComplexity = None
self._PrivilegesRequired = None
self._UserInteraction = None
self._Scope = None
self._ConImpact = None
self._IntegrityImpact = None
self._AvailabilityImpact = None
@property
def CVSS(self):
return self._CVSS
@CVSS.setter
def CVSS(self, CVSS):
self._CVSS = CVSS
@property
def AttackVector(self):
return self._AttackVector
@AttackVector.setter
def AttackVector(self, AttackVector):
self._AttackVector = AttackVector
@property
def AttackComplexity(self):
return self._AttackComplexity
@AttackComplexity.setter
def AttackComplexity(self, AttackComplexity):
self._AttackComplexity = AttackComplexity
@property
def PrivilegesRequired(self):
return self._PrivilegesRequired
@PrivilegesRequired.setter
def PrivilegesRequired(self, PrivilegesRequired):
self._PrivilegesRequired = PrivilegesRequired
@property
def UserInteraction(self):
return self._UserInteraction
@UserInteraction.setter
def UserInteraction(self, UserInteraction):
self._UserInteraction = UserInteraction
@property
def Scope(self):
return self._Scope
@Scope.setter
def Scope(self, Scope):
self._Scope = Scope
@property
def ConImpact(self):
return self._ConImpact
@ConImpact.setter
def ConImpact(self, ConImpact):
self._ConImpact = ConImpact
@property
def IntegrityImpact(self):
return self._IntegrityImpact
@IntegrityImpact.setter
def IntegrityImpact(self, IntegrityImpact):
self._IntegrityImpact = IntegrityImpact
@property
def AvailabilityImpact(self):
return self._AvailabilityImpact
@AvailabilityImpact.setter
def AvailabilityImpact(self, AvailabilityImpact):
self._AvailabilityImpact = AvailabilityImpact
def _deserialize(self, params):
self._CVSS = params.get("CVSS")
self._AttackVector = params.get("AttackVector")
self._AttackComplexity = params.get("AttackComplexity")
self._PrivilegesRequired = params.get("PrivilegesRequired")
self._UserInteraction = params.get("UserInteraction")
self._Scope = params.get("Scope")
self._ConImpact = params.get("ConImpact")
self._IntegrityImpact = params.get("IntegrityImpact")
self._AvailabilityImpact = params.get("AvailabilityImpact")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Component(AbstractModel):
"""描述一个第三方组件的源信息。
"""
def __init__(self):
r"""
:param _PURL: 第三方组件的PURL
:type PURL: :class:`tencentcloud.bsca.v20210811.models.PURL`
:param _Homepage: 第三方组件的主页
:type Homepage: str
:param _Summary: 第三方组件的简介
:type Summary: str
:param _NicknameList: 第三方组件的别名列表
注意:此字段可能返回 null,表示取不到有效值。
:type NicknameList: list of str
:param _CodeLocationList: 第三方组件的代码位置列表
注意:此字段可能返回 null,表示取不到有效值。
:type CodeLocationList: list of str
:param _LicenseExpression: 第三方组件的许可证表达式
:type LicenseExpression: str
"""
self._PURL = None
self._Homepage = None
self._Summary = None
self._NicknameList = None
self._CodeLocationList = None
self._LicenseExpression = None
@property
def PURL(self):
return self._PURL
@PURL.setter
def PURL(self, PURL):
self._PURL = PURL
@property
def Homepage(self):
return self._Homepage
@Homepage.setter
def Homepage(self, Homepage):
self._Homepage = Homepage
@property
def Summary(self):
return self._Summary
@Summary.setter
def Summary(self, Summary):
self._Summary = Summary
@property
def NicknameList(self):
return self._NicknameList
@NicknameList.setter
def NicknameList(self, NicknameList):
self._NicknameList = NicknameList
@property
def CodeLocationList(self):
return self._CodeLocationList
@CodeLocationList.setter
def CodeLocationList(self, CodeLocationList):
self._CodeLocationList = CodeLocationList
@property
def LicenseExpression(self):
return self._LicenseExpression
@LicenseExpression.setter
def LicenseExpression(self, LicenseExpression):
self._LicenseExpression = LicenseExpression
def _deserialize(self, params):
if params.get("PURL") is not None:
self._PURL = PURL()
self._PURL._deserialize(params.get("PURL"))
self._Homepage = params.get("Homepage")
self._Summary = params.get("Summary")
self._NicknameList = params.get("NicknameList")
self._CodeLocationList = params.get("CodeLocationList")
self._LicenseExpression = params.get("LicenseExpression")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ComponentVulnerabilitySummary(AbstractModel):
"""与输入组件相关的漏洞信息摘要信息。
"""
def __init__(self):
r"""
:param _PURL: 用于匹配漏洞的PURL
注意:此字段可能返回 null,表示取不到有效值。
:type PURL: :class:`tencentcloud.bsca.v20210811.models.PURL`
:param _CanBeFixed: 该组件是否包含修复漏洞的官方补丁
:type CanBeFixed: bool
:param _FixedVersion: 修复漏洞的组件版本号
:type FixedVersion: str
:param _AffectedVersion: 漏洞影响的组件版本号
:type AffectedVersion: str
:param _AffectedComponent: 漏洞影响组件
:type AffectedComponent: str
:param _RiskLevel: 漏洞在该产品中的风险等级
<li>Critical</li>
<li>High</li>
<li>Medium</li>
<li>Low</li>
:type RiskLevel: str
"""
self._PURL = None
self._CanBeFixed = None
self._FixedVersion = None
self._AffectedVersion = None
self._AffectedComponent = None
self._RiskLevel = None
@property
def PURL(self):
return self._PURL
@PURL.setter
def PURL(self, PURL):
self._PURL = PURL
@property
def CanBeFixed(self):
return self._CanBeFixed
@CanBeFixed.setter
def CanBeFixed(self, CanBeFixed):
self._CanBeFixed = CanBeFixed
@property
def FixedVersion(self):
return self._FixedVersion
@FixedVersion.setter
def FixedVersion(self, FixedVersion):
self._FixedVersion = FixedVersion
@property
def AffectedVersion(self):
return self._AffectedVersion
@AffectedVersion.setter
def AffectedVersion(self, AffectedVersion):
self._AffectedVersion = AffectedVersion
@property
def AffectedComponent(self):
return self._AffectedComponent
@AffectedComponent.setter
def AffectedComponent(self, AffectedComponent):
self._AffectedComponent = AffectedComponent
@property
def RiskLevel(self):
return self._RiskLevel
@RiskLevel.setter
def RiskLevel(self, RiskLevel):
self._RiskLevel = RiskLevel
def _deserialize(self, params):
if params.get("PURL") is not None:
self._PURL = PURL()
self._PURL._deserialize(params.get("PURL"))
self._CanBeFixed = params.get("CanBeFixed")
self._FixedVersion = params.get("FixedVersion")
self._AffectedVersion = params.get("AffectedVersion")
self._AffectedComponent = params.get("AffectedComponent")
self._RiskLevel = params.get("RiskLevel")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ComponentVulnerabilityUnion(AbstractModel):
"""描述组件漏洞相关概览信息。
"""
def __init__(self):
r"""
:param _Summary: 漏洞概览信息
:type Summary: :class:`tencentcloud.bsca.v20210811.models.VulnerabilitySummary`
:param _SummaryInComponent: 与组件相关的漏洞概览信息
:type SummaryInComponent: :class:`tencentcloud.bsca.v20210811.models.ComponentVulnerabilitySummary`
"""
self._Summary = None
self._SummaryInComponent = None
@property
def Summary(self):
return self._Summary
@Summary.setter
def Summary(self, Summary):
self._Summary = Summary
@property
def SummaryInComponent(self):
return self._SummaryInComponent
@SummaryInComponent.setter
def SummaryInComponent(self, SummaryInComponent):
self._SummaryInComponent = SummaryInComponent
def _deserialize(self, params):
if params.get("Summary") is not None:
self._Summary = VulnerabilitySummary()
self._Summary._deserialize(params.get("Summary"))
if params.get("SummaryInComponent") is not None:
self._SummaryInComponent = ComponentVulnerabilitySummary()
self._SummaryInComponent._deserialize(params.get("SummaryInComponent"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeKBComponentRequest(AbstractModel):
"""DescribeKBComponent请求参数结构体
"""
def __init__(self):
r"""
:param _PURL: 组件的PURL
:type PURL: :class:`tencentcloud.bsca.v20210811.models.PURL`
"""
self._PURL = None
@property
def PURL(self):
return self._PURL
@PURL.setter
def PURL(self, PURL):
self._PURL = PURL
def _deserialize(self, params):
if params.get("PURL") is not None:
self._PURL = PURL()
self._PURL._deserialize(params.get("PURL"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeKBComponentResponse(AbstractModel):
"""DescribeKBComponent返回参数结构体
"""
def __init__(self):
r"""
:param _Component: 匹配的组件信息
:type Component: :class:`tencentcloud.bsca.v20210811.models.Component`
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Component = None
self._RequestId = None
@property
def Component(self):
return self._Component
@Component.setter
def Component(self, Component):
self._Component = Component
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("Component") is not None:
self._Component = Component()
self._Component._deserialize(params.get("Component"))
self._RequestId = params.get("RequestId")
class DescribeKBComponentVulnerabilityRequest(AbstractModel):
"""DescribeKBComponentVulnerability请求参数结构体
"""
def __init__(self):
r"""
:param _PURL: 组件的PURL,其中Name和Version为必填字段
:type PURL: :class:`tencentcloud.bsca.v20210811.models.PURL`
"""
self._PURL = None
@property
def PURL(self):
return self._PURL
@PURL.setter
def PURL(self, PURL):
self._PURL = PURL
def _deserialize(self, params):
if params.get("PURL") is not None:
self._PURL = PURL()
self._PURL._deserialize(params.get("PURL"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeKBComponentVulnerabilityResponse(AbstractModel):
"""DescribeKBComponentVulnerability返回参数结构体
"""
def __init__(self):
r"""
:param _VulnerabilityList: 漏洞信息列表
注意:此字段可能返回 null,表示取不到有效值。
:type VulnerabilityList: list of ComponentVulnerabilityUnion
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._VulnerabilityList = None
self._RequestId = None
@property
def VulnerabilityList(self):
return self._VulnerabilityList
@VulnerabilityList.setter
def VulnerabilityList(self, VulnerabilityList):
self._VulnerabilityList = VulnerabilityList
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("VulnerabilityList") is not None:
self._VulnerabilityList = []
for item in params.get("VulnerabilityList"):
obj = ComponentVulnerabilityUnion()
obj._deserialize(item)
self._VulnerabilityList.append(obj)
self._RequestId = params.get("RequestId")
class DescribeKBLicenseRequest(AbstractModel):
"""DescribeKBLicense请求参数结构体
"""
def __init__(self):
r"""
:param _LicenseExpression: License表达式
:type LicenseExpression: str
"""
self._LicenseExpression = None
@property
def LicenseExpression(self):
return self._LicenseExpression
@LicenseExpression.setter
def LicenseExpression(self, LicenseExpression):
self._LicenseExpression = LicenseExpression
def _deserialize(self, params):
self._LicenseExpression = params.get("LicenseExpression")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeKBLicenseResponse(AbstractModel):
"""DescribeKBLicense返回参数结构体
"""
def __init__(self):
r"""
:param _LicenseList: 许可证列表
注意:此字段可能返回 null,表示取不到有效值。
:type LicenseList: list of LicenseUnion
:param _NormalizedLicenseExpression: 用于匹配的License表达式
:type NormalizedLicenseExpression: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._LicenseList = None
self._NormalizedLicenseExpression = None
self._RequestId = None
@property
def LicenseList(self):
return self._LicenseList
@LicenseList.setter
def LicenseList(self, LicenseList):
self._LicenseList = LicenseList
@property
def NormalizedLicenseExpression(self):
return self._NormalizedLicenseExpression
@NormalizedLicenseExpression.setter
def NormalizedLicenseExpression(self, NormalizedLicenseExpression):
self._NormalizedLicenseExpression = NormalizedLicenseExpression
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("LicenseList") is not None:
self._LicenseList = []
for item in params.get("LicenseList"):
obj = LicenseUnion()
obj._deserialize(item)
self._LicenseList.append(obj)
self._NormalizedLicenseExpression = params.get("NormalizedLicenseExpression")
self._RequestId = params.get("RequestId")
class DescribeKBVulnerabilityRequest(AbstractModel):
"""DescribeKBVulnerability请求参数结构体
"""
def __init__(self):
r"""
:param _CVEID: CVE ID列表(不能与Vul ID同时存在)
:type CVEID: list of str
:param _VulID: Vul ID列表(不能与CVE ID 同时存在)
:type VulID: list of str
"""
self._CVEID = None
self._VulID = None
@property
def CVEID(self):
return self._CVEID
@CVEID.setter
def CVEID(self, CVEID):
self._CVEID = CVEID
@property
def VulID(self):
return self._VulID
@VulID.setter
def VulID(self, VulID):
self._VulID = VulID
def _deserialize(self, params):
self._CVEID = params.get("CVEID")
self._VulID = params.get("VulID")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeKBVulnerabilityResponse(AbstractModel):
"""DescribeKBVulnerability返回参数结构体
"""
def __init__(self):
r"""
:param _VulnerabilityDetailList: 漏洞详细信息列表
注意:此字段可能返回 null,表示取不到有效值。
:type VulnerabilityDetailList: list of VulnerabilityUnion
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._VulnerabilityDetailList = None
self._RequestId = None
@property
def VulnerabilityDetailList(self):
return self._VulnerabilityDetailList
@VulnerabilityDetailList.setter
def VulnerabilityDetailList(self, VulnerabilityDetailList):
self._VulnerabilityDetailList = VulnerabilityDetailList
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("VulnerabilityDetailList") is not None:
self._VulnerabilityDetailList = []
for item in params.get("VulnerabilityDetailList"):
obj = VulnerabilityUnion()
obj._deserialize(item)
self._VulnerabilityDetailList.append(obj)
self._RequestId = params.get("RequestId")
class LicenseDetail(AbstractModel):
"""描述许可证的详细信息。
"""
def __init__(self):
r"""
:param _Content: 许可证内容
:type Content: str
:param _ConditionSet: 许可证允许信息列表
:type ConditionSet: list of LicenseRestriction
:param _ForbiddenSet: 许可证要求信息列表
:type ForbiddenSet: list of LicenseRestriction
:param _PermissionSet: 许可证禁止信息列表
:type PermissionSet: list of LicenseRestriction
"""
self._Content = None
self._ConditionSet = None
self._ForbiddenSet = None
self._PermissionSet = None
@property
def Content(self):
return self._Content
@Content.setter
def Content(self, Content):
self._Content = Content
@property
def ConditionSet(self):
return self._ConditionSet
@ConditionSet.setter
def ConditionSet(self, ConditionSet):
self._ConditionSet = ConditionSet
@property
def ForbiddenSet(self):
return self._ForbiddenSet
@ForbiddenSet.setter
def ForbiddenSet(self, ForbiddenSet):
self._ForbiddenSet = ForbiddenSet
@property
def PermissionSet(self):
return self._PermissionSet
@PermissionSet.setter
def PermissionSet(self, PermissionSet):
self._PermissionSet = PermissionSet
def _deserialize(self, params):
self._Content = params.get("Content")
if params.get("ConditionSet") is not None:
self._ConditionSet = []
for item in params.get("ConditionSet"):
obj = LicenseRestriction()
obj._deserialize(item)
self._ConditionSet.append(obj)
if params.get("ForbiddenSet") is not None:
self._ForbiddenSet = []
for item in params.get("ForbiddenSet"):
obj = LicenseRestriction()
obj._deserialize(item)
self._ForbiddenSet.append(obj)
if params.get("PermissionSet") is not None:
self._PermissionSet = []
for item in params.get("PermissionSet"):
obj = LicenseRestriction()
obj._deserialize(item)
self._PermissionSet.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LicenseRestriction(AbstractModel):
"""License约束信息。
"""
def __init__(self):
r"""
:param _Name: license约束的名称。
:type Name: str
:param _Description: license约束的描述。
:type Description: str
"""
self._Name = None
self._Description = None
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
def _deserialize(self, params):
self._Name = params.get("Name")
self._Description = params.get("Description")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LicenseSummary(AbstractModel):
"""描述许可证的概览信息。
"""
def __init__(self):
r"""
:param _Key: 许可证标识符
:type Key: str
:param _SPDXKey: 许可证的SPDX标识符,见 https://spdx.org/licenses/
:type SPDXKey: str
:param _ShortName: 许可证短名称
:type ShortName: str
:param _Name: 许可证完整名称
:type Name: str
:param _Risk: License风险等级
<li>NotDefined</li>
<li>LowRisk</li>
<li>MediumRisk</li>
<li>HighRisk</li>
:type Risk: str
:param _Source: 许可证来源URL
:type Source: str
"""
self._Key = None
self._SPDXKey = None
self._ShortName = None
self._Name = None
self._Risk = None
self._Source = None
@property
def Key(self):
return self._Key
@Key.setter
def Key(self, Key):
self._Key = Key
@property
def SPDXKey(self):
return self._SPDXKey
@SPDXKey.setter
def SPDXKey(self, SPDXKey):
self._SPDXKey = SPDXKey
@property
def ShortName(self):
return self._ShortName
@ShortName.setter
def ShortName(self, ShortName):
self._ShortName = ShortName
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Risk(self):
return self._Risk
@Risk.setter
def Risk(self, Risk):
self._Risk = Risk
@property
def Source(self):
return self._Source
@Source.setter
def Source(self, Source):
self._Source = Source
def _deserialize(self, params):
self._Key = params.get("Key")
self._SPDXKey = params.get("SPDXKey")
self._ShortName = params.get("ShortName")
self._Name = params.get("Name")
self._Risk = params.get("Risk")
self._Source = params.get("Source")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LicenseUnion(AbstractModel):
"""许可证详细信息。
"""
def __init__(self):
r"""
:param _LicenseSummary: 许可证概览信息
:type LicenseSummary: :class:`tencentcloud.bsca.v20210811.models.LicenseSummary`
:param _LicenseDetail: 许可证详细信息
:type LicenseDetail: :class:`tencentcloud.bsca.v20210811.models.LicenseDetail`
"""
self._LicenseSummary = None
self._LicenseDetail = None
@property
def LicenseSummary(self):
return self._LicenseSummary
@LicenseSummary.setter
def LicenseSummary(self, LicenseSummary):
self._LicenseSummary = LicenseSummary
@property
def LicenseDetail(self):
return self._LicenseDetail
@LicenseDetail.setter
def LicenseDetail(self, LicenseDetail):
self._LicenseDetail = LicenseDetail
def _deserialize(self, params):
if params.get("LicenseSummary") is not None:
self._LicenseSummary = LicenseSummary()
self._LicenseSummary._deserialize(params.get("LicenseSummary"))
if params.get("LicenseDetail") is not None:
self._LicenseDetail = LicenseDetail()
self._LicenseDetail._deserialize(params.get("LicenseDetail"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MatchKBPURLListRequest(AbstractModel):
"""MatchKBPURLList请求参数结构体
"""
def __init__(self):
r"""
:param _SHA1: SHA1。
:type SHA1: str
"""
self._SHA1 = None
@property
def SHA1(self):
return self._SHA1
@SHA1.setter
def SHA1(self, SHA1):
self._SHA1 = SHA1
def _deserialize(self, params):
self._SHA1 = params.get("SHA1")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MatchKBPURLListResponse(AbstractModel):
"""MatchKBPURLList返回参数结构体
"""
def __init__(self):
r"""
:param _PURLList: 组件列表。
:type PURLList: list of PURL
:param _Hit: 是否命中数据库。
:type Hit: bool
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._PURLList = None
self._Hit = None
self._RequestId = None
@property
def PURLList(self):
return self._PURLList
@PURLList.setter
def PURLList(self, PURLList):
self._PURLList = PURLList
@property
def Hit(self):
return self._Hit
@Hit.setter
def Hit(self, Hit):
self._Hit = Hit
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("PURLList") is not None:
self._PURLList = []
for item in params.get("PURLList"):
obj = PURL()
obj._deserialize(item)
self._PURLList.append(obj)
self._Hit = params.get("Hit")
self._RequestId = params.get("RequestId")
class PURL(AbstractModel):
"""PURL(Package URL)用于定位一个产品或组件,见 https://github.com/package-url/purl-spec。
"""
def __init__(self):
r"""
:param _Name: 组件名称
:type Name: str
:param _Protocol: 组件所属的类型,如:github, gitlab, generic, deb, rpm, maven 等
:type Protocol: str
:param _Namespace: 组件名的前缀名,如github和gitlab的用户名,deb的操作系统,maven包的group id等
:type Namespace: str
:param _Qualifiers: 修饰组件的额外属性
注意:此字段可能返回 null,表示取不到有效值。
:type Qualifiers: list of Qualifier
:param _Subpath: 相对于组件包根位置的子目录
:type Subpath: str
:param _Version: 组件版本号
:type Version: str
"""
self._Name = None
self._Protocol = None
self._Namespace = None
self._Qualifiers = None
self._Subpath = None
self._Version = None
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Protocol(self):
return self._Protocol
@Protocol.setter
def Protocol(self, Protocol):
self._Protocol = Protocol
@property
def Namespace(self):
return self._Namespace
@Namespace.setter
def Namespace(self, Namespace):
self._Namespace = Namespace
@property
def Qualifiers(self):
return self._Qualifiers
@Qualifiers.setter
def Qualifiers(self, Qualifiers):
self._Qualifiers = Qualifiers
@property
def Subpath(self):
return self._Subpath
@Subpath.setter
def Subpath(self, Subpath):
self._Subpath = Subpath
@property
def Version(self):
return self._Version
@Version.setter
def Version(self, Version):
self._Version = Version
def _deserialize(self, params):
self._Name = params.get("Name")
self._Protocol = params.get("Protocol")
self._Namespace = params.get("Namespace")
if params.get("Qualifiers") is not None:
self._Qualifiers = []
for item in params.get("Qualifiers"):
obj = Qualifier()
obj._deserialize(item)
self._Qualifiers.append(obj)
self._Subpath = params.get("Subpath")
self._Version = params.get("Version")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Qualifier(AbstractModel):
"""PURL下的Qualifier属性类型,用于定义第三方组件的额外属性,见 https://github.com/package-url/purl-spec。
"""
def __init__(self):
r"""
:param _Key: 额外属性的名称。
:type Key: str
:param _Value: 额外属性的值。
:type Value: str
"""
self._Key = None
self._Value = None
@property
def Key(self):
return self._Key
@Key.setter
def Key(self, Key):
self._Key = Key
@property
def Value(self):
return self._Value
@Value.setter
def Value(self, Value):
self._Value = Value
def _deserialize(self, params):
self._Key = params.get("Key")
self._Value = params.get("Value")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class VulnerabilityDetail(AbstractModel):
"""描述漏洞详细信息。
"""
def __init__(self):
r"""
:param _Category: 漏洞类别
:type Category: str
:param _CategoryType: 漏洞分类
:type CategoryType: str
:param _Description: 漏洞描述
:type Description: str
:param _OfficialSolution: 漏洞官方解决方案
:type OfficialSolution: str
:param _ReferenceList: 漏洞信息参考列表
:type ReferenceList: list of str
:param _DefenseSolution: 漏洞防御方案
:type DefenseSolution: str
:param _CVSSv2Info: 漏洞CVSSv2信息
注意:此字段可能返回 null,表示取不到有效值。
:type CVSSv2Info: :class:`tencentcloud.bsca.v20210811.models.CVSSV2Info`
:param _CVSSv3Info: 漏洞CVSSv3信息
注意:此字段可能返回 null,表示取不到有效值。
:type CVSSv3Info: :class:`tencentcloud.bsca.v20210811.models.CVSSV3Info`
:param _SubmitTime: 漏洞提交时间
:type SubmitTime: str
:param _CWEID: CWE编号
:type CWEID: str
:param _CVSSv2Vector: 漏洞CVSSv2向量
:type CVSSv2Vector: str
:param _CVSSv3Vector: 漏洞CVSSv3向量
:type CVSSv3Vector: str
"""
self._Category = None
self._CategoryType = None
self._Description = None
self._OfficialSolution = None
self._ReferenceList = None
self._DefenseSolution = None
self._CVSSv2Info = None
self._CVSSv3Info = None
self._SubmitTime = None
self._CWEID = None
self._CVSSv2Vector = None
self._CVSSv3Vector = None
@property
def Category(self):
return self._Category
@Category.setter
def Category(self, Category):
self._Category = Category
@property
def CategoryType(self):
return self._CategoryType
@CategoryType.setter
def CategoryType(self, CategoryType):
self._CategoryType = CategoryType
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def OfficialSolution(self):
return self._OfficialSolution
@OfficialSolution.setter
def OfficialSolution(self, OfficialSolution):
self._OfficialSolution = OfficialSolution
@property
def ReferenceList(self):
return self._ReferenceList
@ReferenceList.setter
def ReferenceList(self, ReferenceList):
self._ReferenceList = ReferenceList
@property
def DefenseSolution(self):
return self._DefenseSolution
@DefenseSolution.setter
def DefenseSolution(self, DefenseSolution):
self._DefenseSolution = DefenseSolution
@property
def CVSSv2Info(self):
return self._CVSSv2Info
@CVSSv2Info.setter
def CVSSv2Info(self, CVSSv2Info):
self._CVSSv2Info = CVSSv2Info
@property
def CVSSv3Info(self):
return self._CVSSv3Info
@CVSSv3Info.setter
def CVSSv3Info(self, CVSSv3Info):
self._CVSSv3Info = CVSSv3Info
@property
def SubmitTime(self):
return self._SubmitTime
@SubmitTime.setter
def SubmitTime(self, SubmitTime):
self._SubmitTime = SubmitTime
@property
def CWEID(self):
return self._CWEID
@CWEID.setter
def CWEID(self, CWEID):
self._CWEID = CWEID
@property
def CVSSv2Vector(self):
return self._CVSSv2Vector
@CVSSv2Vector.setter
def CVSSv2Vector(self, CVSSv2Vector):
self._CVSSv2Vector = CVSSv2Vector
@property
def CVSSv3Vector(self):
return self._CVSSv3Vector
@CVSSv3Vector.setter
def CVSSv3Vector(self, CVSSv3Vector):
self._CVSSv3Vector = CVSSv3Vector
def _deserialize(self, params):
self._Category = params.get("Category")
self._CategoryType = params.get("CategoryType")
self._Description = params.get("Description")
self._OfficialSolution = params.get("OfficialSolution")
self._ReferenceList = params.get("ReferenceList")
self._DefenseSolution = params.get("DefenseSolution")
if params.get("CVSSv2Info") is not None:
self._CVSSv2Info = CVSSV2Info()
self._CVSSv2Info._deserialize(params.get("CVSSv2Info"))
if params.get("CVSSv3Info") is not None:
self._CVSSv3Info = CVSSV3Info()
self._CVSSv3Info._deserialize(params.get("CVSSv3Info"))
self._SubmitTime = params.get("SubmitTime")
self._CWEID = params.get("CWEID")
self._CVSSv2Vector = params.get("CVSSv2Vector")
self._CVSSv3Vector = params.get("CVSSv3Vector")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class VulnerabilitySummary(AbstractModel):
"""描述漏洞的摘要信息。
"""
def __init__(self):
r"""
:param _VulID: 漏洞ID
:type VulID: str
:param _CVEID: 漏洞所属CVE编号
:type CVEID: str
:param _CNVDID: 漏洞所属CNVD编号
:type CNVDID: str
:param _CNNVDID: 漏洞所属CNNVD编号
:type CNNVDID: str
:param _Name: 漏洞名称
:type Name: str
:param _IsSuggest: 该漏洞是否是需重点关注的漏洞
:type IsSuggest: bool
:param _Severity: 漏洞风险等级
<li>Critical</li>
<li>High</li>
<li>Medium</li>
<li>Low</li>
:type Severity: str
"""
self._VulID = None
self._CVEID = None
self._CNVDID = None
self._CNNVDID = None
self._Name = None
self._IsSuggest = None
self._Severity = None
@property
def VulID(self):
return self._VulID
@VulID.setter
def VulID(self, VulID):
self._VulID = VulID
@property
def CVEID(self):
return self._CVEID
@CVEID.setter
def CVEID(self, CVEID):
self._CVEID = CVEID
@property
def CNVDID(self):
return self._CNVDID
@CNVDID.setter
def CNVDID(self, CNVDID):
self._CNVDID = CNVDID
@property
def CNNVDID(self):
return self._CNNVDID
@CNNVDID.setter
def CNNVDID(self, CNNVDID):
self._CNNVDID = CNNVDID
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def IsSuggest(self):
return self._IsSuggest
@IsSuggest.setter
def IsSuggest(self, IsSuggest):
self._IsSuggest = IsSuggest
@property
def Severity(self):
return self._Severity
@Severity.setter
def Severity(self, Severity):
self._Severity = Severity
def _deserialize(self, params):
self._VulID = params.get("VulID")
self._CVEID = params.get("CVEID")
self._CNVDID = params.get("CNVDID")
self._CNNVDID = params.get("CNNVDID")
self._Name = params.get("Name")
self._IsSuggest = params.get("IsSuggest")
self._Severity = params.get("Severity")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class VulnerabilityUnion(AbstractModel):
"""描述漏洞的详细信息。
"""
def __init__(self):
r"""
:param _Summary: 漏洞概览信息
:type Summary: :class:`tencentcloud.bsca.v20210811.models.VulnerabilitySummary`
:param _Detail: 漏洞详细信息
:type Detail: :class:`tencentcloud.bsca.v20210811.models.VulnerabilityDetail`
"""
self._Summary = None
self._Detail = None
@property
def Summary(self):
return self._Summary
@Summary.setter
def Summary(self, Summary):
self._Summary = Summary
@property
def Detail(self):
return self._Detail
@Detail.setter
def Detail(self, Detail):
self._Detail = Detail
def _deserialize(self, params):
if params.get("Summary") is not None:
self._Summary = VulnerabilitySummary()
self._Summary._deserialize(params.get("Summary"))
if params.get("Detail") is not None:
self._Detail = VulnerabilityDetail()
self._Detail._deserialize(params.get("Detail"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
|
3314f624bd47bb4c637d26b7fa7761e7f0b5ee92
|
fe56f956d9b0fab1f8f72647a57baaf8d0dd39b7
|
/toscaparser/policy.py
|
8c9e00a0a92aca178e43ab897d307a687e5f2030
|
[
"Apache-2.0"
] |
permissive
|
openstack/tosca-parser
|
cf05510d62e97aaeed2e4948416c8cf241e975d9
|
283cb6a375070c844547e18e119bcad53e8489ca
|
refs/heads/master
| 2023-09-04T11:02:17.299078
| 2023-07-07T07:02:05
| 2023-07-12T05:39:18
| 41,282,275
| 103
| 63
|
Apache-2.0
| 2020-11-05T12:15:17
| 2015-08-24T04:40:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,406
|
py
|
policy.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from toscaparser.common.exception import ExceptionCollector
from toscaparser.common.exception import UnknownFieldError
from toscaparser.entity_template import EntityTemplate
from toscaparser.reservation import Reservation
from toscaparser.triggers import Triggers
from toscaparser.utils import validateutils
SECTIONS = (TYPE, METADATA, DESCRIPTION, PROPERTIES, TARGETS, TRIGGERS,
RESERVATION) = \
('type', 'metadata', 'description',
'properties', 'targets', 'triggers', 'reservation')
log = logging.getLogger('tosca')
class Policy(EntityTemplate):
'''Policies defined in Topology template.'''
def __init__(self, name, policy, targets=None, targets_type=None,
custom_def=None):
super(Policy, self).__init__(name,
policy,
'policy_type',
custom_def)
self.meta_data = None
if self.METADATA in policy:
self.meta_data = policy.get(self.METADATA)
validateutils.validate_map(self.meta_data)
self.targets_list = targets
self.targets_type = targets_type
self.triggers = self._triggers(policy.get(TRIGGERS))
self.reservation = self._reservation(policy.get(RESERVATION))
self.properties = None
if 'properties' in policy:
self.properties = policy['properties']
self._validate_keys()
@property
def targets(self):
return self.entity_tpl.get('targets')
@property
def description(self):
return self.entity_tpl.get('description')
@property
def metadata(self):
return self.entity_tpl.get('metadata')
def get_targets_type(self):
return self.targets_type
def get_targets_list(self):
return self.targets_list
def _triggers(self, triggers):
triggerObjs = []
if triggers:
for name, trigger_tpl in triggers.items():
triggersObj = Triggers(name, trigger_tpl)
triggerObjs.append(triggersObj)
return triggerObjs
def _reservation(self, reservation):
reservationObjs = []
if reservation:
reservationObj = Reservation(reservation)
reservationObjs.append(reservationObj)
return reservationObjs
def _validate_keys(self):
for key in self.entity_tpl.keys():
if key not in SECTIONS:
ExceptionCollector.appendException(
UnknownFieldError(what='Policy "%s"' % self.name,
field=key))
def validate(self):
self._validate_properties(self.entity_tpl, self.type_definition)
for prop in self.get_properties_objects():
prop.validate()
|
7cdc090edf0724b3d518278b38561376fc53b2af
|
95b5388157d6c0b14883bfc7102d54f8c5737b0a
|
/src/opnsense/scripts/proxy/lib/__init__.py
|
5fd16676b152039c2cb0a111f17834babcf1e439
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
opnsense/core
|
f071a087984a8d8b2b2e6ca152cf73645f9bc68a
|
a702cf9fb3300e125cd7acc8af3813474606e509
|
refs/heads/master
| 2023-09-01T05:20:10.627528
| 2023-08-31T13:10:59
| 2023-08-31T13:10:59
| 27,965,134
| 2,778
| 1,014
|
BSD-2-Clause
| 2023-09-14T15:45:24
| 2014-12-13T15:43:50
|
PHP
|
UTF-8
|
Python
| false
| false
| 5,774
|
py
|
__init__.py
|
"""
Copyright (c) 2020 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import ujson
import os
import base64
import binascii
import re
import zipfile
import glob
from io import BytesIO
class ProxyTemplates:
error_config = "/usr/local/etc/squid/error_directory.in"
def __init__(self):
self._all_src_files = dict()
self._all_ovl_files = dict()
self._overlay_status = None
self._install_overlay = False
self._overlay_data = None
self._load_config()
self.load()
def _load_config(self):
""" initialize configuration
"""
if os.path.isfile(self.error_config):
error_cfg = ujson.loads(open(self.error_config, 'rb').read())
self._install_overlay = 'install' not in error_cfg or error_cfg['install'] != 'opnsense'
self._overlay_data = error_cfg['content'] if 'content' in error_cfg else None
def load(self):
""" load (custom) error pages in memory
"""
self._overlay_status = None
self._all_src_files = dict()
self._all_ovl_files = dict()
# base (OPNsense) template
for filename in glob.glob("/usr/local/opnsense/data/proxy/template_error_pages/*"):
bfilename = os.path.basename(filename)
with open(filename, "rb") as f_in:
self._all_src_files[bfilename] = f_in.read()
# when a (valid) overlay is provided, read it's contents
if self._overlay_data and self._install_overlay:
try:
input_data = BytesIO(base64.b64decode(self._overlay_data))
root_dir = ""
with zipfile.ZipFile(input_data, mode='r', compression=zipfile.ZIP_DEFLATED) as zf_in:
for zf_info in zf_in.infolist():
if not root_dir and zf_info.filename.endswith('/'):
root_dir = zf_info.filename
else:
self._all_ovl_files[zf_info.filename.replace(root_dir, "")] = zf_in.read(zf_info.filename)
except binascii.Error:
self._overlay_status = 'Not a base64 encoded file'
except zipfile.BadZipFile:
self._overlay_status = 'Illegal zip file'
except IOError:
self._overlay_status = 'Error reading file'
def templates(self, overlay=False):
""" return template html files
:param overlay: consider custom theme files when applicable
:rtype: [string, bytes]
"""
for filename in self._all_src_files:
if filename.endswith('.html'):
if overlay and filename in self._all_ovl_files:
yield filename, self._all_ovl_files[filename]
else:
yield filename, self._all_src_files[filename]
def get_file(self, filename, overlay=False):
""" return file content
:param filename: source filename
:param overlay: consider custom theme files when applicable
:return: string
"""
if filename in self._all_src_files:
if overlay and filename in self._all_ovl_files:
return self._all_ovl_files[filename]
else:
return self._all_src_files[filename]
@staticmethod
def css_section(data):
""" extract css definition block from provided data
:param data: html data
:return: MatchObject
"""
return re.search(b'(<!--[\s]*EMBED:start.*?EMBED:end[\s]*-->)', data, re.DOTALL)
def css_dependencies(self, filename, overlay=False):
""" extract css dependencies from provided filename
:param filename: source filename
:param overlay: consider custom theme files when applicable
:rtype: list
"""
data = self.get_file(filename, overlay)
if filename.endswith('.html') and data:
match = self.css_section(data)
if match:
for href in re.findall(b"(href[\s]*=[\s]*[\"|'])(.*?)([\"|'])" ,match.group(0)):
yield href[1].decode()
def overlay_enabled(self):
""" when deploying files, should we consider an overlay
:return: bool
"""
return self._install_overlay
def get_overlay_status(self):
""" return validity of the installed overlay
:return: string
"""
return self._overlay_status
|
183893e67668d657083587978087649db1290d5f
|
62179a165ec620ba967dbc20016e890978fbff50
|
/examples/tensorflow/common/object_detection/utils/yolo_v4_utils.py
|
b527130a850e9df3f2c5a0b612a884777cfaa37a
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/nncf
|
91fcf153a96f85da166aacb7a70ca4941e4ba4a4
|
c027c8b43c4865d46b8de01d8350dd338ec5a874
|
refs/heads/develop
| 2023-08-24T11:25:05.704499
| 2023-08-23T14:44:05
| 2023-08-23T14:44:05
| 263,687,600
| 558
| 157
|
Apache-2.0
| 2023-09-14T17:06:41
| 2020-05-13T16:41:05
|
Python
|
UTF-8
|
Python
| false
| false
| 13,070
|
py
|
yolo_v4_utils.py
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
from PIL import Image
def rand(a=0, b=1):
return np.random.rand() * (b - a) + a
def letterbox_resize(image, target_size):
"""
Resize image with unchanged aspect ratio using padding
:param image: origin image to be resize
PIL Image object containing image data
:param target_size: target image size,
tuple of format (width, height).
:param return_padding_info: whether to return padding size & offset info
Boolean flag to control return value
:return new_image: resized PIL Image object.
:return padding_size: padding image size (keep aspect ratio).
will be used to reshape the ground truth bounding box
:return offset: top-left offset in target image padding.
will be used to reshape the ground truth bounding box
"""
src_w, src_h = image.size
target_w, target_h = target_size
# calculate padding scale and padding offset
scale = min(target_w / src_w, target_h / src_h)
padding_w = int(src_w * scale)
padding_h = int(src_h * scale)
padding_size = (padding_w, padding_h)
dx = (target_w - padding_w) // 2
dy = (target_h - padding_h) // 2
offset = (dx, dy)
# create letterbox resized image
image = image.resize(padding_size, Image.Resampling.BICUBIC)
new_image = Image.new("RGB", target_size, (128, 128, 128))
new_image.paste(image, offset)
return new_image
def random_resize_crop_pad(image, target_size, aspect_ratio_jitter=0.1, scale_jitter=0.7):
"""
Randomly resize image and crop|padding to target size. It can
be used for data augment in training data preprocess
:param image: origin image to be resize
PIL Image object containing image data
:param target_size: target image size,
tuple of format (width, height).
:param aspect_ratio_jitter: jitter range for random aspect ratio,
scalar to control the aspect ratio of random resized image.
:param scale_jitter: jitter range for random resize scale,
scalar to control the resize scale of random resized image.
:return new_image: target sized PIL Image object.
:return padding_size: random generated padding image size.
will be used to reshape the ground truth bounding box
:return padding_offset: random generated offset in target image padding.
will be used to reshape the ground truth bounding box
"""
target_w, target_h = target_size
# generate random aspect ratio & scale for resize
rand_aspect_ratio = (target_w / target_h * rand(1 - aspect_ratio_jitter, 1 + aspect_ratio_jitter)) / (
rand(1 - aspect_ratio_jitter, 1 + aspect_ratio_jitter)
)
rand_scale = rand(scale_jitter, 1 / scale_jitter)
# calculate random padding size and resize
if rand_aspect_ratio < 1:
padding_h = int(rand_scale * target_h)
padding_w = int(padding_h * rand_aspect_ratio)
else:
padding_w = int(rand_scale * target_w)
padding_h = int(padding_w / rand_aspect_ratio)
padding_size = (padding_w, padding_h)
image = image.resize(padding_size, Image.Resampling.BICUBIC)
# get random offset in padding image
dx = int(rand(0, target_w - padding_w))
dy = int(rand(0, target_h - padding_h))
padding_offset = (dx, dy)
# create target image
new_image = Image.new("RGB", (target_w, target_h), (128, 128, 128))
new_image.paste(image, padding_offset)
return new_image, padding_size, padding_offset
def reshape_boxes(boxes, src_shape, target_shape, padding_shape, offset, horizontal_flip=False, vertical_flip=False):
"""
Reshape bounding boxes from src_shape image to target_shape image,
usually for training data preprocess
:param boxes: Ground truth object bounding boxes,
numpy array of shape (num_boxes, 5),
box format (xmin, ymin, xmax, ymax, cls_id).
:param src_shape: origin image shape,
tuple of format (width, height).
:param target_shape: target image shape,
tuple of format (width, height).
:param padding_shape: padding image shape,
tuple of format (width, height).
:param offset: top-left offset when padding target image.
tuple of format (dx, dy).
:param horizontal_flip: whether to do horizontal flip.
boolean flag.
:param vertical_flip: whether to do vertical flip.
boolean flag.
:return boxes: reshaped bounding box numpy array
"""
if len(boxes) > 0:
src_w, src_h = src_shape
target_w, target_h = target_shape
padding_w, padding_h = padding_shape
dx, dy = offset
# shuffle and reshape boxes
np.random.shuffle(boxes)
boxes[:, [0, 2]] = boxes[:, [0, 2]] * padding_w / src_w + dx
boxes[:, [1, 3]] = boxes[:, [1, 3]] * padding_h / src_h + dy
# horizontal flip boxes if needed
if horizontal_flip:
boxes[:, [0, 2]] = target_w - boxes[:, [2, 0]]
# vertical flip boxes if needed
if vertical_flip:
boxes[:, [1, 3]] = target_h - boxes[:, [3, 1]]
# check box coordinate range
boxes[:, 0:2][boxes[:, 0:2] < 0] = 0
boxes[:, 2][boxes[:, 2] > target_w] = target_w
boxes[:, 3][boxes[:, 3] > target_h] = target_h
# check box width and height to discard invalid box
boxes_w = boxes[:, 2] - boxes[:, 0]
boxes_h = boxes[:, 3] - boxes[:, 1]
boxes = boxes[np.logical_and(boxes_w > 1, boxes_h > 1)] # discard invalid box
return boxes
def random_horizontal_flip(image, prob=0.5):
"""
Random horizontal flip for image
:param image: origin image for horizontal flip
PIL Image object containing image data
:param prob: probability for random flip,
scalar to control the flip probability.
:return image: adjusted PIL Image object.
:return flip: boolean flag for horizontal flip action
"""
flip = rand() < prob
if flip:
image = image.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
return image, flip
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
def merge_mosaic_bboxes(bboxes, crop_x, crop_y, image_size): # pylint: disable=R0912
# adjust & merge mosaic samples bboxes as following area order:
# -----------
# | | |
# | 0 | 3 |
# | | |
# -----------
# | 1 | 2 |
# -----------
assert bboxes.shape[0] == 4, "mosaic sample number should be 4"
max_boxes = bboxes.shape[1]
height, width = image_size
merge_bbox = []
for i in range(bboxes.shape[0]):
for box in bboxes[i]:
x_min, y_min, x_max, y_max = box[0], box[1], box[2], box[3]
if i == 0: # bboxes[0] is for top-left area
if y_min > crop_y or x_min > crop_x:
continue
if y_min < crop_y < y_max:
y_max = crop_y
if x_min < crop_x < x_max:
x_max = crop_x
if i == 1: # bboxes[1] is for bottom-left area
if y_max < crop_y or x_min > crop_x:
continue
if y_min < crop_y < y_max:
y_min = crop_y
if x_min < crop_x < x_max:
x_max = crop_x
if i == 2: # bboxes[2] is for bottom-right area
if y_max < crop_y or x_max < crop_x:
continue
if y_min < crop_y < y_max:
y_min = crop_y
if x_min < crop_x < x_max:
x_min = crop_x
if i == 3: # bboxes[3] is for top-right area
if y_min > crop_y or x_max < crop_x:
continue
if y_min < crop_y < y_max:
y_max = crop_y
if x_min < crop_x < x_max:
x_min = crop_x
if abs(x_max - x_min) < max(10, width * 0.01) or abs(y_max - y_min) < max(10, height * 0.01):
# if the adjusted bbox is too small, bypass it
continue
merge_bbox.append([x_min, y_min, x_max, y_max, box[4]])
if len(merge_bbox) > max_boxes:
merge_bbox = merge_bbox[:max_boxes]
box_data = np.zeros((max_boxes, 5))
if len(merge_bbox) > 0:
box_data[: len(merge_bbox)] = merge_bbox
return box_data
def random_mosaic_augment(image_data, boxes_data, prob=0.1):
"""
Random add mosaic augment on batch images and boxes, from YOLOv4
reference:
https://github.com/klauspa/Yolov4-tensorflow/blob/master/data.py
https://github.com/clovaai/CutMix-PyTorch
https://github.com/AlexeyAB/darknet
:param image_data: origin images for mosaic augment
numpy array for normalized batch image data
:param boxes_data: origin bboxes for mosaic augment
numpy array for batch bboxes
:param prob: probability for augment ,
scalar to control the augment probability.
:return image_data: augmented batch image data.
:return boxes_data: augmented batch bboxes data.
"""
do_augment = rand() < prob
if do_augment:
batch_size = len(image_data)
assert batch_size >= 4, "mosaic augment need batch size >= 4"
def get_mosaic_samples():
# random select 4 images from batch as mosaic samples
random_index = random.sample(list(range(batch_size)), 4)
random_images = []
random_bboxes = []
for idx in random_index:
random_images.append(image_data[idx])
random_bboxes.append(boxes_data[idx])
return random_images, np.array(random_bboxes)
min_offset = 0.2
new_images = []
new_boxes = []
height, width = image_data[0].shape[:2]
# each batch has batch_size images, so we also need to
# generate batch_size mosaic images
for _ in range(batch_size):
images, bboxes = get_mosaic_samples()
# crop_x = np.random.randint(int(width*min_offset), int(width*(1 - min_offset)))
# crop_y = np.random.randint(int(height*min_offset), int(height*(1 - min_offset)))
crop_x = int(random.uniform(int(width * min_offset), int(width * (1 - min_offset)))) # nosec
crop_y = int(random.uniform(int(height * min_offset), int(height * (1 - min_offset)))) # nosec
merged_boxes = merge_mosaic_bboxes(bboxes, crop_x, crop_y, image_size=(height, width))
# no valid bboxes, drop this loop
# if merged_boxes is None:
# i = i - 1
# continue
# crop out selected area as following mosaic sample images order:
# -----------
# | | |
# | 0 | 3 |
# | | |
# -----------
# | 1 | 2 |
# -----------
area_0 = images[0][:crop_y, :crop_x, :]
area_1 = images[1][crop_y:, :crop_x, :]
area_2 = images[2][crop_y:, crop_x:, :]
area_3 = images[3][:crop_y, crop_x:, :]
# merge selected area to new image
area_left = np.concatenate([area_0, area_1], axis=0)
area_right = np.concatenate([area_3, area_2], axis=0)
merged_image = np.concatenate([area_left, area_right], axis=1)
new_images.append(merged_image)
new_boxes.append(merged_boxes)
new_images = np.stack(new_images)
new_boxes = np.array(new_boxes)
image_data = new_images
boxes_data = new_boxes
return image_data, boxes_data
def normalize_image(image):
"""
Normalize image array from 0 ~ 255
to 0.0 ~ 1.0
:param image: origin input image
numpy image array with dtype=float, 0.0 ~ 255.0
:return image: numpy image array with dtype=float, 0.0 ~ 1.0
"""
image = image / 255.0
return image
|
a9308b8e3993e756d5a1bbdece20ffb0f81ec66e
|
67ca269e39935d0c439329c3a63df859e40168bb
|
/test/test_pipeline/test_metrics.py
|
0a40d84bb82905587f634a245094117c8bb3e190
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-philippe-de-muyter"
] |
permissive
|
automl/Auto-PyTorch
|
2e67ffb44f40d9993470ded9b63f10a5164b41df
|
56a2ac1d69c7c61a847c678879a67f5d3672b3e8
|
refs/heads/master
| 2023-07-14T22:55:57.826602
| 2022-08-23T16:43:15
| 2022-08-23T16:43:15
| 159,791,040
| 2,214
| 280
|
Apache-2.0
| 2023-04-04T14:41:15
| 2018-11-30T08:18:34
|
Python
|
UTF-8
|
Python
| false
| false
| 13,329
|
py
|
test_metrics.py
|
import numpy as np
import pytest
import sklearn.metrics
import sktime.performance_metrics.forecasting as forecasting_metrics
from autoPyTorch.constants import (
BINARY,
CONTINUOUS,
OUTPUT_TYPES_TO_STRING,
STRING_TO_TASK_TYPES,
TABULAR_CLASSIFICATION,
TABULAR_REGRESSION,
TASK_TYPES_TO_STRING,
TIMESERIES_FORECASTING
)
from autoPyTorch.metrics import (
accuracy,
balanced_accuracy,
compute_mase_coefficient,
mean_squared_error
)
from autoPyTorch.pipeline.components.training.metrics.base import (
ForecastingMetricMixin,
_ForecastingMetric,
_PredictMetric,
_ThresholdMetric,
autoPyTorchMetric,
make_metric
)
from autoPyTorch.pipeline.components.training.metrics.utils import calculate_loss, calculate_score, get_metrics
@pytest.mark.parametrize('output_type', ['multiclass',
'multiclass-multioutput',
'binary'])
def test_get_no_name_classification(output_type):
dataset_properties = {'task_type': 'tabular_classification',
'output_type': output_type}
metrics = get_metrics(dataset_properties)
for metric in metrics:
assert isinstance(metric, autoPyTorchMetric)
@pytest.mark.parametrize('output_type', ['continuous', 'continuous-multioutput'])
def test_get_no_name_regression(output_type):
dataset_properties = {'task_type': 'tabular_regression',
'output_type': output_type}
metrics = get_metrics(dataset_properties)
for metric in metrics:
assert isinstance(metric, autoPyTorchMetric)
@pytest.mark.parametrize('output_type', ['continuous', 'continuous-multioutput'])
def test_get_no_name_forecasting(output_type):
dataset_properties = {'task_type': 'time_series_forecasting',
'output_type': output_type}
metrics = get_metrics(dataset_properties)
for metric in metrics:
assert isinstance(metric, ForecastingMetricMixin)
@pytest.mark.parametrize('metric', ['accuracy', 'average_precision',
'balanced_accuracy', 'f1'])
def test_get_name(metric):
dataset_properties = {'task_type': TASK_TYPES_TO_STRING[TABULAR_CLASSIFICATION],
'output_type': OUTPUT_TYPES_TO_STRING[BINARY]}
metrics = get_metrics(dataset_properties, [metric])
for i in range(len(metrics)):
assert isinstance(metrics[i], autoPyTorchMetric)
assert metrics[i].name.lower() == metric.lower()
def test_get_name_error():
dataset_properties = {'task_type': TASK_TYPES_TO_STRING[TABULAR_CLASSIFICATION],
'output_type': OUTPUT_TYPES_TO_STRING[BINARY]}
names = ['root_mean_sqaured_error', 'average_precision']
with pytest.raises(ValueError, match=r"Invalid name entered for task [a-z]+_[a-z]+, "):
get_metrics(dataset_properties, names)
def test_classification_metrics():
# test of all classification metrics
dataset_properties = {'task_type': TASK_TYPES_TO_STRING[TABULAR_CLASSIFICATION],
'output_type': OUTPUT_TYPES_TO_STRING[BINARY]}
y_target = np.array([0, 1, 0, 1])
y_pred = np.array([0, 0, 0, 1])
metrics = get_metrics(dataset_properties=dataset_properties, all_supported_metrics=True)
score_dict = calculate_score(y_pred, y_target, STRING_TO_TASK_TYPES[dataset_properties['task_type']], metrics)
assert isinstance(score_dict, dict)
for name, score in score_dict.items():
assert isinstance(name, str)
assert isinstance(score, float)
def test_regression_metrics():
# test of all regression metrics
dataset_properties = {'task_type': TASK_TYPES_TO_STRING[TABULAR_REGRESSION],
'output_type': OUTPUT_TYPES_TO_STRING[CONTINUOUS]}
y_target = np.array([0.1, 0.6, 0.7, 0.4])
y_pred = np.array([0.6, 0.7, 0.4, 1])
metrics = get_metrics(dataset_properties=dataset_properties, all_supported_metrics=True)
score_dict = calculate_score(y_pred, y_target, STRING_TO_TASK_TYPES[dataset_properties['task_type']], metrics)
assert isinstance(score_dict, dict)
for name, score in score_dict.items():
assert isinstance(name, str)
assert isinstance(score, float)
def test_forecasting_metric():
# test of all regression metrics
dataset_properties = {'task_type': TASK_TYPES_TO_STRING[TIMESERIES_FORECASTING],
'output_type': OUTPUT_TYPES_TO_STRING[CONTINUOUS]}
n_prediction_steps = 5
n_seq = 2
n_targets = 2
y_target = np.zeros([n_seq, n_prediction_steps, n_targets])
y_pred = np.ones([n_seq, n_prediction_steps, n_targets])
mase_coefficient = np.ones([n_seq, n_prediction_steps, n_targets]) * 2
metrics = get_metrics(dataset_properties=dataset_properties, all_supported_metrics=True)
forecasting_kwargs = {'sp': 4,
'n_prediction_steps': n_prediction_steps,
'mase_coefficient': mase_coefficient,
}
score_dict = calculate_score(y_pred, y_target, STRING_TO_TASK_TYPES[dataset_properties['task_type']], metrics,
**forecasting_kwargs)
assert isinstance(score_dict, dict)
for name, score in score_dict.items():
assert isinstance(name, str)
assert isinstance(score, float)
forecasting_kwargs = {'sp': 4,
'n_prediction_steps': n_prediction_steps,
'mase_coefficient': np.ones([1, n_prediction_steps, n_targets]),
}
with pytest.raises(ValueError, match="the shape of MASE coefficient and target_shape must be consistent"):
score_dict = calculate_score(y_pred, y_target, STRING_TO_TASK_TYPES[dataset_properties['task_type']], metrics,
**forecasting_kwargs)
def test_predictmetric_binary():
y_true = np.array([0, 0, 1, 1])
y_pred = np.array([[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]])
scorer = _PredictMetric(
'accuracy', sklearn.metrics.accuracy_score, 1, 0, 1, {})
score = scorer(y_true, y_pred)
assert score == pytest.approx(1.0)
y_pred = np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
score = scorer(y_true, y_pred)
assert score == pytest.approx(0.5)
y_pred = np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]])
score = scorer(y_true, y_pred)
assert score == pytest.approx(0.5)
scorer = _PredictMetric(
'bac', sklearn.metrics.balanced_accuracy_score,
1, 0, 1, {})
score = scorer(y_true, y_pred)
assert score, pytest.approx(0.5)
scorer = _PredictMetric(
'accuracy', sklearn.metrics.accuracy_score, 1, 0, -1, {})
y_pred = np.array([[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]])
score = scorer(y_true, y_pred)
assert score == pytest.approx(-1.0)
def test_threshold_scorer_binary():
y_true = [0, 0, 1, 1]
y_pred = np.array([[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]])
scorer = _ThresholdMetric(
'roc_auc', sklearn.metrics.roc_auc_score, 1, 0, 1, {})
score = scorer(y_true, y_pred)
assert score == pytest.approx(1.0)
y_pred = np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
score = scorer(y_true, y_pred)
assert score == pytest.approx(0.5)
y_pred = np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]])
score = scorer(y_true, y_pred)
assert score == pytest.approx(0.5)
scorer = _ThresholdMetric(
'roc_auc', sklearn.metrics.roc_auc_score, 1, 0, -1, {})
y_pred = np.array([[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]])
score = scorer(y_true, y_pred)
assert score == pytest.approx(-1.0)
def test_forecastingcomputation():
scorer_mean = _ForecastingMetric(
'mean_mape', forecasting_metrics.mean_absolute_percentage_error, 0.0, np.finfo(np.float64).max, 1,
kwargs=dict(aggregation='mean'),
)
scorer_median = _ForecastingMetric(
'median_mape', forecasting_metrics.mean_absolute_percentage_error, 0.0, np.finfo(np.float64).max, 1,
kwargs=dict(aggregation='median'),
)
n_seq = 3
n_prediction_steps = 5
n_targets = 2
y_true = np.expand_dims(
[np.arange(n_prediction_steps) + i * 10 for i in range(n_seq)], -1
).repeat(n_targets, axis=-1)
y_pred = y_true + 1
score_mean = scorer_mean(y_true=y_true, y_pred=y_pred, sp=1, n_prediction_steps=n_prediction_steps)
score_median = scorer_median(y_true=y_true, y_pred=y_pred, sp=1, n_prediction_steps=n_prediction_steps)
score_all = []
for true_seq, pred_seq in zip(y_true, y_pred):
score_all.append(forecasting_metrics.mean_absolute_percentage_error(y_true=true_seq, y_pred=pred_seq))
assert score_mean == np.mean(score_all)
assert score_median == np.median(score_all)
# Additional parameters
horizon_weight = [0.1, 0.2, 0.3, 0.4, 0.5]
score_mean = scorer_mean(y_true=y_true, y_pred=y_pred, sp=1,
n_prediction_steps=n_prediction_steps, horizon_weight=horizon_weight)
score_all = []
for true_seq, pred_seq in zip(y_true, y_pred):
score_all.append(forecasting_metrics.mean_absolute_percentage_error(y_true=true_seq, y_pred=pred_seq,
horizon_weight=horizon_weight))
assert score_mean == np.mean(score_all)
def test_sign_flip():
y_true = np.arange(0, 1.01, 0.1)
y_pred = y_true.copy()
scorer = make_metric(
'r2', sklearn.metrics.r2_score, greater_is_better=True)
score = scorer(y_true, y_pred + 1.0)
assert score == pytest.approx(-9.0)
score = scorer(y_true, y_pred + 0.5)
assert score == pytest.approx(-1.5)
score = scorer(y_true, y_pred)
assert score == pytest.approx(1.0)
scorer = make_metric(
'r2', sklearn.metrics.r2_score, greater_is_better=False)
score = scorer(y_true, y_pred + 1.0)
assert score == pytest.approx(9.0)
score = scorer(y_true, y_pred + 0.5)
assert score == pytest.approx(1.5)
score = scorer(y_true, y_pred)
assert score == pytest.approx(-1.0)
def test_classification_only_metric():
y_true = np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
y_pred = \
np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [1.0, 0.0]])
scorer = accuracy
score = calculate_score(y_true, y_pred, TABULAR_CLASSIFICATION, [scorer])
previous_score = scorer._optimum
assert score['accuracy'] == pytest.approx(previous_score)
def test_calculate_loss():
# In a 0-1 ranged scorer, make sure that the loss
# has a expected positive value
y_pred = np.array([0, 1, 0, 1, 1, 1, 0, 0, 0, 0])
y_true = np.array([0, 1, 0, 1, 1, 0, 0, 0, 0, 0])
score = sklearn.metrics.accuracy_score(y_true, y_pred)
assert pytest.approx(score) == calculate_score(
target=y_true,
prediction=y_pred,
task_type=TABULAR_CLASSIFICATION,
metrics=[accuracy],
)['accuracy']
loss = 1.0 - score
assert pytest.approx(loss) == calculate_loss(
target=y_true,
prediction=y_pred,
task_type=TABULAR_CLASSIFICATION,
metrics=[accuracy],
)['accuracy']
# Test the dictionary case
score_dict = calculate_score(
target=y_true,
prediction=y_pred,
task_type=TABULAR_CLASSIFICATION,
metrics=[accuracy, balanced_accuracy],
)
expected_score_dict = {
'accuracy': 0.9,
'balanced_accuracy': 0.9285714285714286,
}
loss_dict = calculate_loss(
target=y_true,
prediction=y_pred,
task_type=TABULAR_CLASSIFICATION,
metrics=[accuracy, balanced_accuracy],
)
for expected_metric, expected_score in expected_score_dict.items():
assert pytest.approx(expected_score) == score_dict[expected_metric]
assert pytest.approx(1 - expected_score) == loss_dict[expected_metric]
# Lastly make sure that metrics whose optimum is zero
# are also properly working
y_true = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
y_pred = np.array([0.11, 0.22, 0.33, 0.44, 0.55, 0.66])
score = sklearn.metrics.mean_squared_error(y_true, y_pred)
assert pytest.approx(score) == calculate_score(
target=y_true,
prediction=y_pred,
task_type=TABULAR_REGRESSION,
metrics=[mean_squared_error],
)['mean_squared_error']
loss = score
assert pytest.approx(loss) == calculate_loss(
target=y_true,
prediction=y_pred,
task_type=TABULAR_REGRESSION,
metrics=[mean_squared_error],
)['mean_squared_error']
def test_compute_mase_coefficient():
past_target = np.arange(12)
mase_value_1 = compute_mase_coefficient(past_target, 15)
assert mase_value_1 == 1 / np.mean(past_target)
mase_value_2 = compute_mase_coefficient(past_target, 5)
assert mase_value_2 == 0.2
past_target = np.ones(12) * 2
assert compute_mase_coefficient(past_target, 15) == 0.5
assert compute_mase_coefficient(past_target, 5) == 0.5
past_target = np.zeros(12)
assert compute_mase_coefficient(past_target, 15) == 1.
assert compute_mase_coefficient(past_target, 5) == 1.
|
f06a737c39700a7ebd24ce8cd0fa4689316f5de4
|
39220ced634414fa06f3906b4d293cfbd966dd6f
|
/rf/rfstream.py
|
00a7a55c2fba8c7b9c6e240420b9d067ea1527b0
|
[
"MIT"
] |
permissive
|
trichter/rf
|
817e60823a0a54ed86c723b98a982a0407de6807
|
bccd99759559029af4d533b1ba853fea0527c6a8
|
refs/heads/master
| 2022-11-25T05:14:49.193352
| 2022-11-16T10:36:54
| 2022-11-16T10:36:54
| 8,273,581
| 103
| 74
|
MIT
| 2021-05-20T09:04:41
| 2013-02-18T17:38:22
|
Python
|
UTF-8
|
Python
| false
| false
| 28,059
|
py
|
rfstream.py
|
# -*- coding: utf-8 -*-
# Copyright 2013-2019 Tom Eulenfeld, MIT license
"""
Classes and functions for receiver function calculation.
"""
import json
from operator import itemgetter
from pkg_resources import resource_filename
import warnings
import numpy as np
from obspy import read, Stream, Trace
from obspy.core import AttribDict
from obspy.geodetics import gps2dist_azimuth
from obspy.taup import TauPyModel
from rf.deconvolve import deconvolve
from rf.simple_model import load_model
from rf.util import DEG2KM, IterMultipleComponents, _add_processing_info
def __get_event_origin_prop(h):
def wrapper(event):
try:
r = (event.preferred_origin() or event.origins[0])[h]
except IndexError:
raise ValueError('No origin')
if r is None:
raise ValueError('No origin ' + h)
if h == 'depth':
r = r / 1000
return r
return wrapper
def __get_event_magnitude(event):
try:
return (event.preferred_magnitude() or event.magnitudes[0])['mag']
except IndexError:
raise ValueError('No magnitude')
def __get_event_id(event):
evid = event.get('resource_id')
if evid is not None:
evid = str(evid)
return evid
def __SAC2UTC(stats, head):
from obspy.io.sac.util import get_sac_reftime
return get_sac_reftime(stats.sac) + stats[head]
def __UTC2SAC(stats, head):
from obspy.io.sac.util import get_sac_reftime
return stats[head] - get_sac_reftime(stats.sac)
_STATION_GETTER = (('station_latitude', itemgetter('latitude')),
('station_longitude', itemgetter('longitude')),
('station_elevation', itemgetter('elevation')))
_EVENT_GETTER = (
('event_latitude', __get_event_origin_prop('latitude')),
('event_longitude', __get_event_origin_prop('longitude')),
('event_depth', __get_event_origin_prop('depth')),
('event_magnitude', __get_event_magnitude),
('event_time', __get_event_origin_prop('time')),
('event_id', __get_event_id))
# header values which will be written to waveform formats (SAC and Q)
# H5 simply writes all stats entries
_HEADERS = (tuple(zip(*_STATION_GETTER))[0] +
tuple(zip(*_EVENT_GETTER))[0][:-1] + ( # do not write event_id
'onset', 'type', 'phase', 'moveout',
'distance', 'back_azimuth', 'inclination', 'slowness',
'pp_latitude', 'pp_longitude', 'pp_depth',
'box_pos', 'box_length'))
# The corresponding header fields in the format
# The following headers can at the moment only be stored for H5:
# slowness_before_moveout, box_lonlat, event_id
_FORMATHEADERS = {'sac': ('stla', 'stlo', 'stel', 'evla', 'evlo',
'evdp', 'mag', 'o', 'a',
'kuser0', 'kuser1', 'kuser2',
'gcarc', 'baz', 'user0', 'user1',
'user2', 'user3', 'user4',
'user5', 'user6'),
# field 'COMMENT' is violated for different information
'sh': ('COMMENT', 'COMMENT', 'COMMENT',
'LAT', 'LON', 'DEPTH',
'MAGNITUDE', 'ORIGIN', 'P-ONSET',
'COMMENT', 'COMMENT', 'COMMENT',
'DISTANCE', 'AZIMUTH', 'INCI', 'SLOWNESS',
'COMMENT', 'COMMENT', 'COMMENT',
'COMMENT', 'COMMENT')}
_HEADER_CONVERSIONS = {'sac': {'onset': (__SAC2UTC, __UTC2SAC),
'event_time': (__SAC2UTC, __UTC2SAC)}}
_TF = '.datetime:%Y-%m-%dT%H:%M:%S'
_H5INDEX = {
'rf': ('waveforms/{network}.{station}.{location}/{event_time%s}/' % _TF +
'{channel}_{starttime%s}_{endtime%s}' % (_TF, _TF)),
'profile': 'waveforms/{channel[2]}_{box_pos}'
}
def read_rf(pathname_or_url=None, format=None, **kwargs):
"""
Read waveform files into RFStream object.
See :func:`~obspy.core.stream.read` in ObsPy.
"""
if pathname_or_url is None: # use example file
fname = resource_filename('rf', 'example/minimal_example.tar.gz')
pathname_or_url = fname
format = 'SAC'
stream = read(pathname_or_url, format=format, **kwargs)
return RFStream(stream)
class RFStream(Stream):
"""
Class providing the necessary functions for receiver function calculation.
:param traces: list of traces, single trace or stream object
To initialize a RFStream from a Stream object use
>>> rfstream = RFStream(stream)
To initialize a RFStream from a file use
>>> rfstream = read_rf('test.SAC')
Format specific headers are loaded into the stats object of all traces.
"""
def __init__(self, traces=None):
self.traces = []
if isinstance(traces, Trace):
traces = [traces]
if traces:
for tr in traces:
if not isinstance(tr, RFTrace):
tr = RFTrace(trace=tr)
self.traces.append(tr)
def __is_set(self, header):
return all(header in tr.stats for tr in self)
def __get_unique_header(self, header):
values = set(tr.stats[header] for tr in self if header in tr.stats)
if len(values) > 1:
warnings.warn('Header %s has different values in stream.' % header)
if len(values) == 1:
return values.pop()
@property
def type(self):
"""Property for the type of stream, 'rf', 'profile' or None"""
return self.__get_unique_header('type')
@type.setter
def type(self, value):
for tr in self:
tr.stats.type = value
@property
def method(self):
"""Property for used rf method, 'P' or 'S'"""
phase = self.__get_unique_header('phase')
if phase is not None:
return phase[-1].upper()
@method.setter
def method(self, value):
for tr in self:
tr.stats.phase = value
def write(self, filename, format, sh_compat=False, **kwargs):
"""
Save stream to file including format specific headers.
See `Stream.write() <obspy.core.stream.Stream.write>` in ObsPy.
:param sh_compat: Ensure files in Q format can be read with
SeismicHandler (default: False). If set to True the COMMENT field
will be deleted which might result in loss of some metadata.
(see issue #37).
"""
if len(self) == 0:
return
for tr in self:
tr._write_format_specific_header(format, sh_compat=sh_compat)
if format.upper() == 'Q':
tr.stats.station = tr.id
if format.upper() == 'H5':
index = self.type
if index is None and 'event_time' in self[0].stats:
index = 'rf'
if index:
import obspyh5
old_index = obspyh5._INDEX
obspyh5.set_index(_H5INDEX[index])
super(RFStream, self).write(filename, format, **kwargs)
if format.upper() == 'H5' and index:
obspyh5.set_index(old_index)
if format.upper() == 'Q':
for tr in self:
tr.stats.station = tr.stats.station.split('.')[1]
def trim2(self, starttime=None, endtime=None, reftime=None, **kwargs):
"""
Alternative trim method accepting relative times.
See :meth:`~obspy.core.stream.Stream.trim`.
:param starttime,endtime: accept UTCDateTime or seconds relative to
reftime
:param reftime: reference time, can be an UTCDateTime object or a
string. The string will be looked up in the stats dictionary
(e.g. 'starttime', 'endtime', 'onset').
"""
for tr in self.traces:
t1 = tr._seconds2utc(starttime, reftime=reftime)
t2 = tr._seconds2utc(endtime, reftime=reftime)
tr.trim(t1, t2, **kwargs)
self.traces = [_i for _i in self.traces if _i.stats.npts]
return self
def slice2(self, starttime=None, endtime=None, reftime=None,
keep_empty_traces=False, **kwargs):
"""
Alternative slice method accepting relative times.
See :meth:`~obspy.core.stream.Stream.slice` and `trim2()`.
"""
traces = []
for tr in self:
t1 = tr._seconds2utc(starttime, reftime=reftime)
t2 = tr._seconds2utc(endtime, reftime=reftime)
sliced_trace = tr.slice(t1, t2, **kwargs)
if not keep_empty_traces and not sliced_trace.stats.npts:
continue
traces.append(sliced_trace)
return self.__class__(traces)
def deconvolve(self, *args, **kwargs):
"""
Deconvolve source component of stream.
All args and kwargs are passed to the function
`~rf.deconvolve.deconvolve()`.
"""
rsp = deconvolve(self, *args, **kwargs)
self.traces = rsp.traces
return self
@_add_processing_info
def rf(self, method=None, filter=None, trim=None, downsample=None,
rotate='ZNE->LQT', deconvolve='time', source_components=None,
**kwargs):
r"""
Calculate receiver functions in-place.
:param method: 'P' for P receiver functions, 'S' for S receiver
functions, if None method will be determined from the phase
:param dict filter: filter stream with its
`~obspy.core.stream.Stream.filter` method and given kwargs
:type trim: tuple (start, end)
:param trim: trim stream relative to P- or S-onset
with `trim2()` (seconds)
:param float downsample: downsample stream with its
:meth:`~obspy.core.stream.Stream.decimate` method to the given
frequency
:type rotate: 'ZNE->LQT' or 'NE->RT'
:param rotate: rotate stream with its
:meth:`~obspy.core.stream.Stream.rotate`
method with the angles given by the back_azimuth and inclination
attributes of the traces stats objects. You can set these to your
needs or let them be computed by :func:`~rf.rfstream.rfstats`.
:param deconvolve: 'time', 'waterlevel', 'iterative' or 'multitaper'
for time domain damped, frequency domain water level,
time domain iterative, or frequency domain multitaper
deconvolution using the stream's `deconvolve()` method.
See `~.deconvolve.deconvolve()`,
`.deconv_time()`, `.deconv_waterlevel()`,
`.deconv_iterative()`, and `.deconv_multitaper()`
for further documentation.
:param source_components: parameter is passed to deconvolve.
If None, source components will be chosen depending on method.
:param \*\*kwargs: all other kwargs not mentioned here are
passed to deconvolve
After performing the deconvolution the Q/R and T components are
multiplied by -1 to get a positive phase for a Moho-like positive
velocity contrast. Furthermore for method='S' all components are
mirrored at t=0 for a better comparison with P receiver functions.
See source code of this function for the default
deconvolution windows.
"""
def iter3c(stream):
return IterMultipleComponents(stream, key='onset',
number_components=(2, 3))
if method is None:
method = self.method
if method is None or method not in 'PS':
msg = "method must be one of 'P', 'S', but is '%s'"
raise ValueError(msg % method)
if source_components is None:
source_components = 'LZ' if method == 'P' else 'QR'
if filter:
self.filter(**filter)
if trim:
self.trim2(*trim, reftime='onset')
if downsample:
for tr in self:
if downsample <= tr.stats.sampling_rate:
tr.decimate(int(tr.stats.sampling_rate) // downsample)
if rotate:
for stream3c in iter3c(self):
stream3c.rotate(rotate)
# Multiply -1 on Q component, because Q component is pointing
# towards the event after the rotation with ObsPy.
# For a positive phase at a Moho-like velocity contrast,
# the Q component has to point away from the event.
# This is not necessary for the R component which points already
# away from the event.
# (compare issue #4)
for tr in self:
if tr.stats.channel.endswith('Q'):
tr.data = -tr.data
if deconvolve:
for stream3c in iter3c(self):
kwargs.setdefault('winsrc', method)
stream3c.deconvolve(method=deconvolve,
source_components=source_components,
**kwargs)
# Mirrow Q/R and T component at 0s for S-receiver method for a better
# comparison with P-receiver method (converted Sp wave arrives before
# S wave, but converted Ps wave arrives after P wave)
if method == 'S':
for tr in self:
tr.data = tr.data[::-1]
tr.stats.onset = tr.stats.starttime + (tr.stats.endtime -
tr.stats.onset)
self.type = 'rf'
if self.method != method:
self.method = method
return self
@_add_processing_info
def moveout(self, phase=None, ref=6.4, model='iasp91'):
"""
In-place moveout correction to a reference slowness.
Needs stats attributes slowness and onset.
:param phase: 'Ps', 'Sp', 'Ppss' or other multiples, if None is set to
'Ps' for P receiver functions or 'Sp' for S receiver functions
:param ref: reference ray parameter in s/deg
:param model: Path to model file
(see `.SimpleModel`, default: iasp91)
"""
if phase is None:
phase = self.method + {'P': 's', 'S': 'p'}[self.method]
model = load_model(model)
model.moveout(self, phase=phase, ref=ref)
for tr in self:
tr.stats.moveout = phase
tr.stats.slowness_before_moveout = tr.stats.slowness
tr.stats.slowness = ref
return self
def ppoints(self, pp_depth, pp_phase=None, model='iasp91'):
"""
Return coordinates of piercing point calculated by 1D ray tracing.
Piercing point coordinates are stored in the
stats attributes plat and plon. Needs stats attributes
station_latitude, station_longitude, slowness and back_azimuth.
:param pp_depth: depth of interface in km
:param pp_phase: 'P' for piercing points of P wave, 'S' for piercing
points of S wave or multiples, if None will be
set to 'S' for P receiver functions or 'P' for S receiver functions
:param model: path to model file (see `.SimpleModel`, default: iasp91)
:return: NumPy array with coordinates of piercing points
"""
if pp_phase is None:
pp_phase = {'P': 'S', 'S': 'P'}[self.method]
model = load_model(model)
for tr in self:
model.ppoint(tr.stats, pp_depth, phase=pp_phase)
return np.array([(tr.stats.pp_latitude, tr.stats.pp_longitude)
for tr in self])
@_add_processing_info
def stack(self):
"""
Return stack of traces with same seed ids.
Traces with same id need to have the same number of datapoints.
Each trace in the returned stream will correspond to one unique seed
id.
"""
ids = set(tr.id for tr in self)
tr = self[0]
traces = []
for id in ids:
net, sta, loc, cha = id.split('.')
data = np.mean([tr.data for tr in self if tr.id == id], axis=0)
header = {'network': net, 'station': sta, 'location': loc,
'channel': cha, 'sampling_rate': tr.stats.sampling_rate}
for entry in ('phase', 'moveout', 'station_latitude',
'station_longitude', 'station_elevation',
'processing'):
if entry in tr.stats:
header[entry] = tr.stats[entry]
tr2 = RFTrace(data=data, header=header)
if 'onset' in tr.stats:
onset = tr.stats.onset - tr.stats.starttime
tr2.stats.onset = tr2.stats.starttime + onset
traces.append(tr2)
return self.__class__(traces)
def profile(self, *args, **kwargs):
"""
Return profile of receiver functions in the stream.
See `.profile.profile()` for help on arguments.
"""
from rf.profile import profile
return profile(self, *args, **kwargs)
def plot_rf(self, *args, **kwargs):
"""
Create receiver function plot.
See `.imaging.plot_rf()` for help on arguments.
"""
from rf.imaging import plot_rf
return plot_rf(self, *args, **kwargs)
def plot_profile(self, *args, **kwargs):
"""
Create receiver function profile plot.
See `.imaging.plot_profile()` for help on arguments.
"""
from rf.imaging import plot_profile
return plot_profile(self, *args, **kwargs)
class RFTrace(Trace):
"""
Class providing the Trace object for receiver function calculation.
"""
def __init__(self, data=np.array([]), header=None, trace=None):
if header is None:
header = {}
if trace is not None:
data = trace.data
header = trace.stats
super(RFTrace, self).__init__(data=data, header=header)
st = self.stats
if ('_format'in st and st._format.upper() == 'Q' and
st.station.count('.') > 1):
st.network, st.station, st.location = st.station.split('.')[:3]
self._read_format_specific_header()
def __str__(self, id_length=None):
if 'onset' not in self.stats:
return super(RFTrace, self).__str__(id_length=id_length)
out = []
type_ = self.stats.get('type')
if type_ is not None:
m = self.stats.get('phase')
m = m[-1].upper() if m is not None else ''
o1 = m + 'rf'
if type_ != 'rf':
o1 = o1 + ' ' + type_
if self.id.startswith('...'):
o1 = o1 + ' (%s)' % self.id[-1]
else:
o1 = o1 + ' ' + self.id
else:
o1 = self.id
out.append(o1)
t1 = self.stats.starttime - self.stats.onset
t2 = self.stats.endtime - self.stats.onset
o2 = '%.1fs - %.1fs' % (t1, t2)
if self.stats.starttime.timestamp != 0:
o2 = o2 + ' onset:%s' % self.stats.onset
out.append(o2)
out.append('{sampling_rate} Hz, {npts} samples')
o3 = []
if 'event_magnitude' in self.stats:
o3.append('mag:{event_magnitude:.1f}')
if 'distance' in self.stats:
o3.append('dist:{distance:.1f}')
if'back_azimuth' in self.stats:
o3.append('baz:{back_azimuth:.1f}')
if 'box_pos' in self.stats:
o3.append('pos:{box_pos:.2f}km')
if 'slowness' in self.stats:
o3.append('slow:{slowness:.2f}')
if 'moveout' in self.stats:
o3.append('({moveout} moveout)')
if np.ma.count_masked(self.data):
o3.append('(masked)')
out.append(' '.join(o3))
return ' | '.join(out).format(**self.stats)
def _read_format_specific_header(self, format=None):
st = self.stats
if format is None:
if '_format' not in st:
return
format = st._format
format = format.lower()
if format == 'q':
format = 'sh'
try:
header_map = zip(_HEADERS, _FORMATHEADERS[format])
except KeyError:
# file format is H5 or not supported
return
for head, head_format in header_map:
try:
value = st[format][head_format]
except KeyError:
continue
else:
if format == 'sac' and '-12345' in str(value):
pass
elif format == 'sh' and head_format == 'COMMENT':
try:
st.update(json.loads(value))
except json.JSONDecodeError:
pass
continue
else:
st[head] = value
try:
convert = _HEADER_CONVERSIONS[format][head][0]
st[head] = convert(st, head)
except KeyError:
pass
def _write_format_specific_header(self, format, sh_compat=False):
st = self.stats
format = format.lower()
if format == 'q':
format = 'sh'
elif format == 'sac':
# make sure SAC reference time is set
from obspy.io.sac.util import obspy_to_sac_header
self.stats.sac = obspy_to_sac_header(self.stats)
try:
header_map = zip(_HEADERS, _FORMATHEADERS[format])
except KeyError:
# file format is H5 or not supported
return
if format not in st:
st[format] = AttribDict({})
if format == 'sh':
comment = {}
for head, head_format in header_map:
if format == 'sh' and head_format == 'COMMENT':
try:
comment[head] = st[head]
except KeyError:
pass
continue
try:
val = st[head]
except KeyError:
continue
try:
convert = _HEADER_CONVERSIONS[format][head][1]
val = convert(st, head)
except KeyError:
pass
st[format][head_format] = val
if format == 'sh':
if sh_compat:
st[format].pop('COMMENT', None)
elif len(comment) > 0:
def default(obj): # convert numpy types
return np.asarray(obj).item()
st[format]['COMMENT'] = json.dumps(
comment, separators=(',', ':'), default=default)
def _seconds2utc(self, seconds, reftime=None):
"""Return UTCDateTime given as seconds relative to reftime"""
from collections.abc import Iterable
from obspy import UTCDateTime as UTC
if isinstance(seconds, Iterable):
return [self._seconds2utc(s, reftime=reftime) for s in seconds]
if isinstance(seconds, UTC) or reftime is None or seconds is None:
return seconds
if not isinstance(reftime, UTC):
reftime = self.stats[reftime]
return reftime + seconds
def write(self, filename, format, **kwargs):
"""
Save current trace into a file including format specific headers.
See `Trace.write() <obspy.core.trace.Trace.write>` in ObsPy.
"""
RFStream([self]).write(filename, format, **kwargs)
def obj2stats(event=None, station=None):
"""
Map event and station object to stats with attributes.
:param event: ObsPy `~obspy.core.event.event.Event` object
:param station: station object with attributes latitude, longitude and
elevation
:return: ``stats`` object with station and event attributes
"""
stats = AttribDict({})
if event is not None:
for key, getter in _EVENT_GETTER:
stats[key] = getter(event)
if station is not None:
for key, getter in _STATION_GETTER:
stats[key] = getter(station)
return stats
def rfstats(obj=None, event=None, station=None,
phase='P', dist_range='default', tt_model='iasp91',
pp_depth=None, pp_phase=None, model='iasp91'):
"""
Calculate ray specific values like slowness for given event and station.
:param obj: `~obspy.core.trace.Stats` object with event and/or station
attributes. Can be None if both event and station are given.
It is possible to specify a stream object, too. Then, rfstats will be
called for each Trace.stats object and traces outside dist_range will
be discarded.
:param event: ObsPy `~obspy.core.event.event.Event` object
:param station: dictionary like object with items latitude, longitude and
elevation
:param phase: string with phase. Usually this will be 'P' or
'S' for P and S receiver functions, respectively.
:type dist_range: tuple of length 2
:param dist_range: if epicentral of event is not in this intervall, None
is returned by this function,\n
if phase == 'P' defaults to (30, 90),\n
if phase == 'S' defaults to (50, 85)
:param tt_model: model for travel time calculation.
(see the `obspy.taup` module, default: iasp91)
:param pp_depth: Depth for piercing point calculation
(in km, default: None -> No calculation)
:param pp_phase: Phase for pp calculation (default: 'S' for P-receiver
function and 'P' for S-receiver function)
:param model: Path to model file for pp calculation
(see `.SimpleModel`, default: iasp91)
:return: `~obspy.core.trace.Stats` object with event and station
attributes, distance, back_azimuth, inclination, onset and
slowness or None if epicentral distance is not in the given interval.
Stream instance if stream was specified instead of stats.
"""
if isinstance(obj, (Stream, RFStream)):
stream = obj
kwargs = {'event': event, 'station': station,
'phase': phase, 'dist_range': dist_range,
'tt_model': tt_model, 'pp_depth': pp_depth,
'pp_phase': pp_phase, 'model': model}
traces = []
for tr in stream:
if rfstats(tr.stats, **kwargs) is not None:
traces.append(tr)
stream.traces = traces
return stream
if dist_range == 'default' and phase.upper() in 'PS':
dist_range = (30, 90) if phase.upper() == 'P' else (50, 85)
elif dist_range == 'default':
raise ValueError('Please specify dist_range parameter')
stats = AttribDict({}) if obj is None else obj
if event is not None and station is not None:
stats.update(obj2stats(event=event, station=station))
dist, baz, _ = gps2dist_azimuth(stats.station_latitude,
stats.station_longitude,
stats.event_latitude,
stats.event_longitude)
dist = dist / 1000 / DEG2KM
if dist_range and not dist_range[0] <= dist <= dist_range[1]:
return
tt_model = TauPyModel(model=tt_model)
arrivals = tt_model.get_travel_times(stats.event_depth, dist, (phase,))
if len(arrivals) == 0:
raise Exception('TauPy does not return phase %s at distance %s' %
(phase, dist))
if len(arrivals) > 1:
msg = ('TauPy returns more than one arrival for phase %s at '
'distance %s -> take first arrival')
warnings.warn(msg % (phase, dist))
arrival = arrivals[0]
onset = stats.event_time + arrival.time
inc = arrival.incident_angle
slowness = arrival.ray_param_sec_degree
stats.update({'distance': dist, 'back_azimuth': baz, 'inclination': inc,
'onset': onset, 'slowness': slowness, 'phase': phase})
if pp_depth is not None:
model = load_model(model)
if pp_phase is None:
pp_phase = 'S' if phase.upper().endswith('P') else 'P'
model.ppoint(stats, pp_depth, phase=pp_phase)
return stats
|
afb2825c409b91730913bfc84a586a191e8362c7
|
5ef7f5ba06b98319a5406dfa3b25c985257713d4
|
/IGC/common/autogen.py
|
d2306293800d480b49ef47deeefc7b57d72d9675
|
[
"MIT"
] |
permissive
|
intel/intel-graphics-compiler
|
6a1ae1a84c541e967e70324492f22c941a02e38f
|
ea522543be6d042ec80e5db8e8878be31af68938
|
refs/heads/master
| 2023-09-03T20:31:55.215461
| 2023-08-29T18:31:52
| 2023-09-02T08:55:05
| 105,299,467
| 546
| 176
|
NOASSERTION
| 2023-08-23T08:57:03
| 2017-09-29T17:27:54
|
C++
|
UTF-8
|
Python
| false
| false
| 9,495
|
py
|
autogen.py
|
# ========================== begin_copyright_notice ============================
#
# Copyright (C) 2017-2023 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
# =========================== end_copyright_notice =============================
import os
import sys
import errno
import re
from typing import List, Tuple, TextIO, Callable, Generator
class DeclHeader:
# line contains the entire string of the line the decl was found on
line: str
# declName is just the identifier name
declName: str
# fields contains a list of all the names of the fields in the structure
fields: List[str]
def __init__(self, line: str, declName: str, fields: List[str]):
self.line = line
self.declName = declName
self.fields = fields
enumNames: List[DeclHeader] = []
structureNames: List[DeclHeader] = []
def parseCmdArgs() -> Tuple[str, str]:
if (len(sys.argv) != 3):
sys.exit("usage: autogen.py <path_to_MDFrameWork.h> <path_to_MDNodeFuncs.gen>")
__MDFrameWorkFile__ = sys.argv[1]
__genFile__ = sys.argv[2]
if not os.path.isfile(__MDFrameWorkFile__):
sys.exit(f"Could not find the file {__MDFrameWorkFile__}")
__genDir__ = os.path.dirname(__genFile__)
if not os.path.exists(__genDir__):
try:
os.makedirs(__genDir__)
except OSError as err:
if err.errno != errno.EEXIST:
sys.exit(f"Failed to create the directory {__genDir__}")
return __MDFrameWorkFile__ , __genFile__
def extractStructField(line: str, declHeader: DeclHeader):
if line.strip() == '':
return
vars = line.split()
if "=" in line:
declHeader.fields.append(vars[vars.index("=") - 1] + ";")
else:
declHeader.fields.append(vars[-1])
def extractEnumVal(line: str, declHeader: DeclHeader):
vars = line.split()
if len(vars) == 0 or "{" in line:
return
val = vars[0]
if val[-1] == ',':
val = val[:-1]
declHeader.fields.append(val)
def lines(s: str) -> Generator[str, None, None]:
for line in s.split('\n'):
yield line
def parseHeader(fileContents: str):
insideIGCNameSpace = False
pcount = 0
file = lines(fileContents)
for line in file:
line = line.split("//")[0]
if "namespace IGC" in line:
while "{" not in line:
line = next(file, None)
if line is None:
sys.exit('missing opening brace!')
insideIGCNameSpace = True
pcount += 1
if insideIGCNameSpace:
blockType = re.search("struct|enum", line)
if blockType:
words = line.split()
idx = 2 if 'class' in words else 1
foundDecl = DeclHeader(line, words[idx], [])
opcount = pcount
namesList = structureNames
extractFunc = extractStructField
if blockType[0] == 'enum':
namesList = enumNames
extractFunc = extractEnumVal
while True:
line = next(file, None)
if line is None:
sys.exit(f"EOF reached with unclosed enum or struct, check formatting")
line = line.split("//")[0]
pcount += line.count("{") - line.count("}")
if pcount <= opcount:
break
extractFunc(re.sub("{|}","", line), foundDecl)
assert pcount == opcount, f"Unexpected struct/enum ending, check formatting"
namesList.append(foundDecl)
elif "}" in line and "};" not in line:
insideIGCNameSpace = False
pcount -= 1
assert pcount == 0, f"EOF reached, with unclosed IGC namespace, check formatting"
def stripBlockComments(text: str) -> str:
return re.sub(r'/\*(.|\s)*?\*/', '', text)
def expandIncludes(fileName: str) -> str:
try:
file = open(fileName, 'r')
except:
sys.exit(f"Failed to open the file {fileName}")
text = file.read()
while True:
# look for includes of the form: #include "myinclude.h" // ^MDFramework^
includes: List[Tuple[str, str]] = []
for m in re.finditer(r'#include\s+"(\S+)"\s*//\s*\^MDFramework\^:\s*(\S+)', text):
include_file = os.path.basename(m.group(1))
relative_path = m.group(2)
parent_dir = os.path.dirname(fileName)
include_file_path = os.path.normpath(
os.path.join(parent_dir, relative_path, include_file))
includes.append((m.group(0), include_file_path))
if len(includes) == 0:
break
for (include_string, include_path) in includes:
try:
file = open(include_path, 'r')
except:
sys.exit(f"Failed to open the file {include_path}")
include_contents = file.read()
text = text.replace(include_string, include_contents)
return text
def printStructCalls(structDecl: DeclHeader, outputFile: TextIO):
outputFile.write(" Metadata* v[] = \n")
outputFile.write(" { \n")
outputFile.write(" MDString::get(module->getContext(), name),\n")
for item in structDecl.fields:
item = item[:-1]
outputFile.write(f" CreateNode({structDecl.declName}Var.{item}, module, ")
outputFile.write(f'"{item}"')
outputFile.write("),\n")
outputFile.write(" };\n")
outputFile.write(" MDNode* node = MDNode::get(module->getContext(), v);\n")
outputFile.write(" return node;\n")
def printEnumCalls(enumDecl: DeclHeader, outputFile: TextIO):
outputFile.write(" StringRef enumName;\n")
outputFile.write(f" switch({enumDecl.declName}Var)\n")
outputFile.write(" {\n")
for item in enumDecl.fields:
outputFile.write(f" case IGC::{enumDecl.declName}::{item}:\n")
outputFile.write(" enumName = ")
outputFile.write(f'"{item}"')
outputFile.write(";\n")
outputFile.write(" break;\n" )
outputFile.write(" }\n")
outputFile.write(" Metadata* v[] = \n")
outputFile.write(" { \n")
outputFile.write(" MDString::get(module->getContext(), name),\n")
outputFile.write(" MDString::get(module->getContext(), enumName),\n")
outputFile.write(" };\n")
outputFile.write(" MDNode* node = MDNode::get(module->getContext(), v);\n")
outputFile.write(" return node;\n")
def printStructReadCalls(structDecl: DeclHeader, outputFile: TextIO):
for item in structDecl.fields:
item = item[:-1]
outputFile.write(f" readNode({structDecl.declName}Var.{item}, node , ")
outputFile.write(f'"{item}"')
outputFile.write(");\n")
def printEnumReadCalls(enumDecl: DeclHeader, outputFile: TextIO):
outputFile.write(" StringRef s = cast<MDString>(node->getOperand(1))->getString();\n")
outputFile.write(" std::string str = s.str();\n")
outputFile.write(f" {enumDecl.declName}Var = (IGC::{enumDecl.declName})(0);\n")
for item in enumDecl.fields:
outputFile.write(f' if((str.size() == sizeof("{item}")-1) && (::memcmp(str.c_str(),')
outputFile.write(f'"{item}"')
outputFile.write(",str.size())==0))\n")
outputFile.write(" {\n")
outputFile.write(f" {enumDecl.declName}Var = IGC::{enumDecl.declName}::{item};\n")
outputFile.write(" } else\n")
outputFile.write(" {\n")
outputFile.write(f" {enumDecl.declName}Var = (IGC::{enumDecl.declName})(0);\n")
outputFile.write(" }\n")
def emitCodeBlock(names: List[DeclHeader], fmtFn: Callable[[str], str], printFn: Callable[[DeclHeader, TextIO], None], outputFile: TextIO):
for item in names:
outputFile.write(fmtFn(item.declName))
outputFile.write("{\n")
printFn(item, outputFile)
outputFile.write("}\n\n")
def emitEnumCreateNode(outputFile: TextIO):
def fmtFn(item: str):
return f"MDNode* CreateNode(IGC::{item} {item}Var, Module* module, StringRef name)\n"
emitCodeBlock(enumNames, fmtFn, printEnumCalls, outputFile)
def emitStructCreateNode(outputFile: TextIO):
def fmtFn(item: str):
return f"MDNode* CreateNode(const IGC::{item}& {item}Var, Module* module, StringRef name)\n"
emitCodeBlock(structureNames, fmtFn, printStructCalls, outputFile)
def emitEnumReadNode(outputFile: TextIO):
def fmtFn(item: str):
return f"void readNode( IGC::{item} &{item}Var, MDNode* node)\n"
emitCodeBlock(enumNames, fmtFn, printEnumReadCalls, outputFile)
def emitStructReadNode(outputFile: TextIO):
def fmtFn(item: str):
return f"void readNode( IGC::{item} &{item}Var, MDNode* node)\n"
emitCodeBlock(structureNames, fmtFn, printStructReadCalls, outputFile)
def genCode(fileName: str):
try:
outputFile = open(fileName, 'w')
except:
sys.exit(f"Failed to open the file {fileName}")
emitEnumCreateNode(outputFile)
emitStructCreateNode(outputFile)
emitEnumReadNode(outputFile)
emitStructReadNode(outputFile)
if __name__ == '__main__':
__MDFrameWorkFile__ , __genFile__ = parseCmdArgs()
expansion = expandIncludes(__MDFrameWorkFile__)
expansion = stripBlockComments(expansion)
parseHeader(expansion)
genCode(__genFile__)
|
efafc9045e057701cecdb8f94827f192845bc507
|
3c2ee998c99a693b3b04d44f8c5af0fc5fb2c49d
|
/tests/backend/integration/api/users/test_tasks.py
|
36788d3a737719bd1104c9ad20e76856da314547
|
[
"BSD-2-Clause"
] |
permissive
|
hotosm/tasking-manager
|
4520a56b31b35ebfc82a337bc7e676f1f8bc946a
|
45bf3937c74902226096aee5b49e7abea62df524
|
refs/heads/develop
| 2023-09-01T02:43:43.875659
| 2023-08-16T21:26:02
| 2023-08-29T13:15:52
| 80,733,077
| 526
| 316
|
BSD-2-Clause
| 2023-09-14T10:15:55
| 2017-02-02T14:31:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 8,595
|
py
|
test_tasks.py
|
from tests.backend.base import BaseTestCase
from tests.backend.helpers.test_helpers import (
create_canned_project,
generate_encoded_token,
)
from backend.models.postgis.task import Task, TaskStatus
from backend.models.postgis.statuses import ProjectStatus
class TetUsersTasksAPI(BaseTestCase):
def setUp(self):
super().setUp()
self.test_project, self.test_author = create_canned_project()
self.user_session_token = generate_encoded_token(self.test_author.id)
self.url = f"/api/v2/users/{self.test_author.id}/tasks/"
def test_returns_401_if_no_token(self):
"""Test that the API returns a 401 if no token is provided"""
# Act
response = self.client.get(self.url)
# Assert
self.assertEqual(response.status_code, 401)
def change_task_status(self, task_id, status, project_id):
"""Helper function to change the status of a task"""
task = Task.get(task_id, project_id)
if status == TaskStatus.MAPPED:
task.lock_task_for_mapping(self.test_author.id)
elif status == TaskStatus.VALIDATED:
task.lock_task_for_validating(self.test_author.id)
task.unlock_task(self.test_author.id, status)
def test_returns_200_on_success(self):
"""Test that the API returns a 200 on success"""
# Arrange
self.change_task_status(1, TaskStatus.MAPPED, self.test_project.id)
self.change_task_status(2, TaskStatus.VALIDATED, self.test_project.id)
# Act
response = self.client.get(
self.url,
headers={"Authorization": self.user_session_token},
query_string={"project_id": self.test_project.id},
)
# Assert
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json["tasks"]), 2)
self.assertEqual(response.json["tasks"][0]["taskId"], 2)
def test_returns_paginated_results(self):
"""Test that the API returns paginated results"""
# Arrange
self.change_task_status(1, TaskStatus.MAPPED, self.test_project.id)
self.change_task_status(2, TaskStatus.VALIDATED, self.test_project.id)
# Act
response = self.client.get(
self.url,
headers={"Authorization": self.user_session_token},
query_string={"project_id": self.test_project.id, "page_size": 1},
)
# Assert
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json["tasks"]), 1)
self.assertEqual(response.json["pagination"]["total"], 2)
self.assertEqual(response.json["pagination"]["page"], 1)
self.assertEqual(response.json["pagination"]["perPage"], 1)
self.assertEqual(response.json["pagination"]["hasNext"], True)
def test_filters_by_project_if_project_id_passed(self):
"""Test that the API filters by project if project_id is passed"""
# Arrange
test_project_2, _ = create_canned_project()
self.change_task_status(1, TaskStatus.MAPPED, self.test_project.id)
self.change_task_status(2, TaskStatus.VALIDATED, self.test_project.id)
self.change_task_status(1, TaskStatus.MAPPED, test_project_2.id)
# Act
response = self.client.get(
self.url,
headers={"Authorization": self.user_session_token},
query_string={"project_id": test_project_2.id},
)
# Assert
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json["tasks"]), 1)
self.assertEqual(response.json["tasks"][0]["taskId"], 1)
self.assertEqual(response.json["tasks"][0]["projectId"], test_project_2.id)
self.assertEqual(
response.json["tasks"][0]["taskStatus"], TaskStatus.MAPPED.name
)
def test_filters_by_status_if_status_passed(self):
"""Test that the API filters by status if status is passed"""
# Arrange
self.change_task_status(1, TaskStatus.MAPPED, self.test_project.id)
self.change_task_status(2, TaskStatus.VALIDATED, self.test_project.id)
# Act
response = self.client.get(
self.url,
headers={"Authorization": self.user_session_token},
query_string={
"project_id": self.test_project.id,
"status": TaskStatus.MAPPED.name,
},
)
# Assert
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json["tasks"]), 1)
self.assertEqual(response.json["tasks"][0]["taskId"], 1)
self.assertEqual(response.json["tasks"][0]["projectId"], self.test_project.id)
self.assertEqual(
response.json["tasks"][0]["taskStatus"], TaskStatus.MAPPED.name
)
def test_filters_by_project_status_if_project_status_passed(self):
"""Test that the API filters by project status if passed"""
# Arrange
test_project_2, _ = create_canned_project()
test_project_2.status = ProjectStatus.PUBLISHED.value
test_project_2.save()
self.change_task_status(1, TaskStatus.MAPPED, self.test_project.id)
self.change_task_status(1, TaskStatus.MAPPED, test_project_2.id)
# Act
response = self.client.get(
self.url,
headers={"Authorization": self.user_session_token},
query_string={"project_status": ProjectStatus.PUBLISHED.name},
)
# Assert
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json["tasks"]), 1)
self.assertEqual(response.json["tasks"][0]["taskId"], 1)
self.assertEqual(response.json["tasks"][0]["projectId"], test_project_2.id)
def test_sorts_results_by_project_id_in_defined_order(self):
"""Test that the API sorts results by project id in defined order"""
# Arrange
test_project_2, _ = create_canned_project()
self.change_task_status(1, TaskStatus.MAPPED, self.test_project.id)
self.change_task_status(2, TaskStatus.MAPPED, self.test_project.id)
self.change_task_status(1, TaskStatus.MAPPED, test_project_2.id)
# Act
response = self.client.get(
self.url,
headers={"Authorization": self.user_session_token},
query_string={"sort_by": "project_id"},
)
# Assert
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json["tasks"]), 3)
self.assertEqual(response.json["tasks"][0]["projectId"], self.test_project.id)
self.assertEqual(response.json["tasks"][1]["projectId"], self.test_project.id)
self.assertEqual(response.json["tasks"][2]["projectId"], test_project_2.id)
# Act
response = self.client.get(
self.url,
headers={"Authorization": self.user_session_token},
query_string={"sort_by": "-project_id"},
)
# Assert
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json["tasks"]), 3)
self.assertEqual(response.json["tasks"][0]["projectId"], test_project_2.id)
self.assertEqual(response.json["tasks"][1]["projectId"], self.test_project.id)
self.assertEqual(response.json["tasks"][2]["projectId"], self.test_project.id)
def test_sorts_results_by_action_date_in_defined_order(self):
"""Test that the API sorts results by action date in defined order"""
# Arrange
self.change_task_status(1, TaskStatus.MAPPED, self.test_project.id)
self.change_task_status(2, TaskStatus.MAPPED, self.test_project.id)
# Act
response = self.client.get(
self.url,
headers={"Authorization": self.user_session_token},
query_string={"sort_by": "action_date"},
)
# Assert
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json["tasks"]), 2)
self.assertEqual(response.json["tasks"][0]["taskId"], 1)
self.assertEqual(response.json["tasks"][1]["taskId"], 2)
# Act
response = self.client.get(
self.url,
headers={"Authorization": self.user_session_token},
query_string={"sort_by": "-action_date"},
)
# Assert
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json["tasks"]), 2)
self.assertEqual(response.json["tasks"][0]["taskId"], 2)
self.assertEqual(response.json["tasks"][1]["taskId"], 1)
|
100d7eb95b7babb92e39757b808b1de6b0a7f875
|
59bb398c5f23770e4725f35f932f3a5fd013efae
|
/jwst/scripts/migrate_data.py
|
cc9d1954896ee1f585998a857a87dfc0ba1f49b0
|
[
"BSD-2-Clause"
] |
permissive
|
spacetelescope/jwst
|
9826d86781c6e01aced951882471f8b967fa1f6e
|
a4a0e8ad2b88249f01445ee1dcf175229c51033f
|
refs/heads/master
| 2023-09-04T09:54:04.504036
| 2023-08-31T20:19:27
| 2023-08-31T20:19:27
| 60,551,519
| 449
| 106
|
NOASSERTION
| 2023-09-14T21:21:33
| 2016-06-06T18:34:23
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,806
|
py
|
migrate_data.py
|
#!/usr/bin/env python
"""
Migrate .fits files whose format has changed between jwst package versions.
"""
import argparse
from datetime import datetime
import os
import re
import traceback
import warnings
import asdf
from astropy.io import fits
from astropy.time import Time
import numpy as np
from packaging.specifiers import SpecifierSet
from stdatamodels.jwst import datamodels
import jwst
def parse_args():
parser = argparse.ArgumentParser('migrate_data', 'migrate .fits files whose format has changed between jwst package versions')
parser.add_argument('files', nargs='+', help='one or more .fits files')
output_group = parser.add_mutually_exclusive_group(required=True)
output_group.add_argument('--output-dir', help='write modified files to an output directory')
output_group.add_argument('--in-place', help='modify files in-place', action='store_true')
return parser.parse_args()
# If there get to be many of these we may want to move
# them to jwst.datamodels somewhere:
def migrate_mt_table_1_2_2(hdul):
"""moving_target.schema has been filled out with actual data.
"""
schema = asdf.schema.load_schema('http://stsci.edu/schemas/jwst_datamodel/moving_target.schema')
dtype = asdf.tags.core.ndarray.asdf_datatype_to_numpy_dtype(schema['properties']['moving_target']['datatype'])
renamed_columns = {
'moving_target_Dec': 'mt_apparent_Dec',
'moving_target_RA': 'mt_apparent_RA',
'moving_target_x': 'mt_sci_x',
'moving_target_y': 'mt_sci_y',
'mt_x_helio': 'mt_apparent_x_helio',
'mt_y_helio': 'mt_apparent_y_helio',
'mt_z_helio': 'mt_apparent_z_helio',
'mt_x_jwst': 'mt_apparent_x_jwst',
'mt_y_jwst': 'mt_apparent_y_jwst',
'mt_z_jwst': 'mt_apparent_z_jwst',
'mt_jwst_distance': 'mt_apparent_jwst_distance',
'mt_sun_distance': 'mt_apparent_sun_distance',
'phase_angle': 'apparent_phase_angle',
}
for hdu in hdul:
if hdu.name == 'MOVING_TARGET_POSITION':
new_data = np.zeros(hdu.data.shape, dtype=dtype)
for column_name in hdu.data.dtype.names:
new_data[renamed_columns.get(column_name, column_name)] = hdu.data[column_name]
# Convert from MJD to ISO
time_data = Time(hdu.data['time'], format='mjd', scale='utc')
new_data['time'] = [t.isot for t in time_data]
hdu.data = new_data
def migrate_mt_table_1_4_0(hdul):
"""moving_target.schema has been filled out with actual data.
"""
schema = asdf.schema.load_schema('http://stsci.edu/schemas/jwst_datamodel/moving_target.schema')
dtype = asdf.tags.core.ndarray.asdf_datatype_to_numpy_dtype(schema['properties']['moving_target']['datatype'])
renamed_columns = {
'mt_detector_x': 'mt_sci_x',
'mt_detector_y': 'mt_sci_y',
}
for hdu in hdul:
if hdu.name == 'MOVING_TARGET_POSITION':
new_data = np.zeros(hdu.data.shape, dtype=dtype)
for column_name in hdu.data.dtype.names:
new_data[renamed_columns.get(column_name, column_name)] = hdu.data[column_name]
hdu.data = new_data
def migrate_spec_table_1_1_0(hdul):
"""
spectable.schema added additional columns and renamed
two columns.
"""
schema = asdf.schema.load_schema('http://stsci.edu/schemas/jwst_datamodel/spectable.schema')
dtype = asdf.tags.core.ndarray.asdf_datatype_to_numpy_dtype(schema['datatype'])
renamed_columns = {
'ERROR': 'FLUX_ERROR',
'BERROR': 'BKGD_ERROR',
}
for hdu in hdul:
if hdu.name == 'EXTRACT1D':
new_data = np.zeros(hdu.data.shape, dtype=dtype)
for column_name in hdu.data.dtype.names:
new_data[renamed_columns.get(column_name, column_name)] = hdu.data[column_name]
hdu.data = new_data
# The first key is a model class name, the second
# a jwst package version specifier. The value
# is a method that accepts an HDUList and modifies
# it in-place.
_MIGRATE_METHODS = {
'Level1bModel': {
'> 1.2.1, <= 1.3.3': migrate_mt_table_1_4_0,
},
'MultiSpecModel': {
'> 0.13.1, <= 1.1.0': migrate_spec_table_1_1_0,
},
'SpecModel': {
'> 0.13.1, <= 1.1.0': migrate_spec_table_1_1_0,
},
}
def migrate_file(filename, args):
if args.in_place:
mode = 'update'
else:
mode = 'readonly'
with fits.open(filename, memmap=False, mode=mode) as hdul:
model_type = hdul[0].header.get('DATAMODL')
jwst_version = hdul[0].header.get('CAL_VER')
if not (model_type and jwst_version):
print(f'Unable to migrate {filename}: DATAMODL and CAL_VER keywords are required')
return
match = re.match(r'^[0-9]+\.[0-9]+\.[0-9]+', jwst_version)
if match is None:
print(f'Unable to migrate {filename}: CAL_VER not understood')
return
jwst_version = match.group(0)
if model_type not in _MIGRATE_METHODS:
print(f'Migration for {filename} DATAMODL {model_type} not implemented')
return
with warnings.catch_warnings():
warnings.simplefilter('ignore')
exception_raised = False
try:
getattr(datamodels, model_type)(hdul, strict_validation=True)
except Exception:
exception_raised = True
if not exception_raised:
print(f'{filename} is already valid')
return
migrate_method = next((m for s, m in _MIGRATE_METHODS[model_type].items() if jwst_version in SpecifierSet(s)), None)
if migrate_method is None:
print(f'Migration for {filename} CAL_VER {jwst_version} not implemented')
return
migrate_method(hdul)
hdul[0].header['HISTORY'] = f'Migrated with jwst {jwst.__version__} migrate_data script {datetime.utcnow().isoformat()}'
try:
getattr(datamodels, model_type)(hdul, strict_validation=True)
except Exception:
print(f'Migration for {filename} failed to produce a valid model:\n')
traceback.print_exc()
return
if args.in_place:
hdul.flush()
else:
output_filename = os.path.join(args.output_dir, os.path.basename(filename))
hdul.writeto(output_filename, checksum=True, overwrite=True)
def main():
args = parse_args()
if args.output_dir:
os.makedirs(args.output_dir, exist_ok=True)
for file in args.files:
try:
migrate_file(file, args)
except Exception:
print(f'Error migrating {file}:\n')
traceback.print_exc()
if __name__ == '__main__':
main()
|
925cc34a86ca5948a30c7203071fb271711d436e
|
9efca95a55cb4df52d895d42f1ec10331516a734
|
/tools/c7n_mailer/c7n_mailer/gcp_mailer/gcp_queue_processor.py
|
f86a01e808e4e698190b65329ecf04b924c67d6f
|
[
"Apache-2.0"
] |
permissive
|
cloud-custodian/cloud-custodian
|
519e602abe00c642786441b64cc40857ef5bc9de
|
27563cf4571040f923124e1acb2463f11e372225
|
refs/heads/main
| 2023-09-04T10:54:55.963703
| 2023-09-01T17:40:17
| 2023-09-01T17:40:17
| 52,837,350
| 3,327
| 1,096
|
Apache-2.0
| 2023-09-14T14:03:30
| 2016-03-01T01:11:20
|
Python
|
UTF-8
|
Python
| false
| false
| 3,052
|
py
|
gcp_queue_processor.py
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""
Google Queue Message Processing
==============================
"""
import base64
import json
import zlib
from c7n_mailer.target import MessageTargetMixin
try:
from c7n_gcp.client import Session
except ImportError:
raise ImportError(
"c7n-mailer is configured for GCP Pub/Sub, which requires additional packages. "
"Run 'pip install c7n-mailer[gcp]' to install them."
)
MAX_MESSAGES = 1000
class MailerGcpQueueProcessor(MessageTargetMixin):
def __init__(self, config, logger, session=None):
self.config = config
self.logger = logger
self.subscription = self.config["queue_url"]
self.session = session or Session()
self.client = self.session.client("pubsub", "v1", "projects.subscriptions")
def run(self):
self.logger.info("Downloading messages from the GCP PubSub Subscription.")
# Get first set of messages to process
messages = self.receive_messages()
while messages and len(messages["receivedMessages"]) > 0:
# Discard_date is the timestamp of the last published message in the messages list
# and will be the date we need to seek to when we ack_messages
discard_date = messages["receivedMessages"][-1]["message"]["publishTime"]
# Process received messages
for message in messages["receivedMessages"]:
self.process_message(message, discard_date)
# Acknowledge and purge processed messages then get next set of messages
self.ack_messages(discard_date)
messages = self.receive_messages()
self.logger.info("No messages left in the gcp topic subscription, now exiting c7n_mailer.")
# This function, when processing gcp pubsub messages, will deliver messages over email.
# Also support for Datadog and Slack
def process_message(self, encoded_gcp_pubsub_message, publish_date):
pubsub_message = self.unpack_to_dict(encoded_gcp_pubsub_message["message"]["data"])
self.handle_targets(pubsub_message, publish_date, email_delivery=True, sns_delivery=False)
return True
def receive_messages(self):
"""Receive messsage(s) from subscribed topic"""
return self.client.execute_command(
"pull",
{
"subscription": self.subscription,
"body": {"returnImmediately": True, "max_messages": MAX_MESSAGES},
},
)
def ack_messages(self, discard_datetime):
"""Acknowledge and Discard messages up to datetime using seek api command"""
return self.client.execute_command(
"seek", {"subscription": self.subscription, "body": {"time": discard_datetime}}
)
@staticmethod
def unpack_to_dict(encoded_gcp_pubsub_message):
"""Returns a message as a dict that been base64 decoded"""
return json.loads(zlib.decompress(base64.b64decode(encoded_gcp_pubsub_message)))
|
1a18d6a3a680157cc10560d6479001bb808241c0
|
c4e93e5e88b9a38fb1b5e4a398810c9e165df80f
|
/devicedefender/script/DDTestRun.py
|
601cd88a187ffa1ef5f43e8d82d7e16469c11f2a
|
[
"Apache-2.0"
] |
permissive
|
aws/aws-iot-device-sdk-cpp-v2
|
c7c441024e19a24d3ebf4cc9f05c2ed15a4c2b89
|
1ddf4ab749155abe195acab9f9505791a93b9b45
|
refs/heads/main
| 2023-09-03T15:05:45.057875
| 2023-09-01T20:33:54
| 2023-09-01T20:33:54
| 157,451,948
| 148
| 108
|
Apache-2.0
| 2023-09-01T20:33:56
| 2018-11-13T21:51:08
|
C++
|
UTF-8
|
Python
| false
| false
| 9,165
|
py
|
DDTestRun.py
|
import boto3
import uuid
import os
import subprocess
import platform
# On something other than Linux? Pass the test instantly since Device Defender is only supported on Linux
if platform.system() != "Linux":
print("[Device Defender]Info: Skipped Test - " + platform.system() +
" not supported (Only Linux supported currently)")
exit(0)
##############################################
# Cleanup Certificates and Things and created certificate and private key file
def delete_thing_with_certi(thingName, certiId, certiArn):
client.detach_thing_principal(
thingName=thingName,
principal=certiArn)
client.update_certificate(
certificateId=certiId,
newStatus='INACTIVE')
client.delete_certificate(certificateId=certiId, forceDelete=True)
client.delete_thing(thingName=thingName)
print("[Device Defender]Info: Deleted thing with name: " + thingName)
##############################################
# Initialize variables
# create aws client
client = boto3.client('iot', region_name='us-east-1')
# create an temporary certificate/key file path
certificate_path = os.path.join(os.getcwd(), 'certificate.pem.crt')
key_path = os.path.join(os.getcwd(), 'private.pem.key')
# Other variables
metrics_added = []
thing_arn = None
client_made_thing = False
client_made_policy = False
##############################################
# create a test thing
thing_name = "DDTest_" + str(uuid.uuid4())
try:
# create_thing_response:
# {
# 'thingName': 'string',
# 'thingArn': 'string',
# 'thingId': 'string'
# }
print("[Device Defender]Info: Started to create thing...")
create_thing_response = client.create_thing(
thingName=thing_name
)
thing_arn = create_thing_response["thingArn"]
client_made_thing = True
except Exception as e:
print("[Device Defender]Error: Failed to create thing: " + thing_name)
exit(-1)
##############################################
# create certificate and keys used for testing
try:
print("[Device Defender]Info: Started to create certificate...")
# create_cert_response:
# {
# 'certificateArn': 'string',
# 'certificateId': 'string',
# 'certificatePem': 'string',
# 'keyPair':
# {
# 'PublicKey': 'string',
# 'PrivateKey': 'string'
# }
# }
create_cert_response = client.create_keys_and_certificate(
setAsActive=True
)
# write certificate to file
f = open(certificate_path, "w")
f.write(create_cert_response['certificatePem'])
f.close()
# write private key to file
f = open(key_path, "w")
f.write(create_cert_response['keyPair']['PrivateKey'])
f.close()
except:
client.delete_thing(thingName=thing_name)
print("[Device Defender]Error: Failed to create certificate.")
exit(-1)
##############################################
# Create policy
try:
print("[Device Defender]Info: Started to create policy...")
# {
# 'policyName': 'string',
# 'policyArn': 'string',
# 'policyDocument': 'string',
# 'policyVersionId': 'string'
# }
# We only need a short section of the thing arn
thing_arn_split = thing_arn.split(":")
thing_arn_short = thing_arn_split[0] + ':' + thing_arn_split[1] + ':' + thing_arn_split[2] + ':' + thing_arn_split[3] + ":" + thing_arn_split[4]
policy_document_json = (
'{'
'"Version": "2012-10-17",'
'"Statement": ['
'{'
'"Effect": "Allow",'
'"Action": ['
'"iot:Publish",'
'"iot:Subscribe",'
'"iot:RetainPublish"'
'],'
f'"Resource": "{thing_arn_short}:*/$aws/things/*/defender/metrics/*"'
'},'
'{'
'"Effect": "Allow",'
'"Action": "iot:Connect",'
f'"Resource": "{thing_arn_short}:client/*"'
'}'
']'
'}'
)
create_policy_response = client.create_policy(
policyName=thing_name + "_policy",
policyDocument=policy_document_json
)
client_made_policy = True
except Exception as e:
if client_made_thing:
client.delete_thing(thingName=thing_name)
if client_made_policy:
client.delete_policy(policyName=thing_name + "_policy")
print("[Device Defender]Error: Failed to create policy.")
exit(-1)
##############################################
##############################################
# attach certification to thing
certificate_id = None
certificate_arn = None
try:
print("[Device Defender]Info: Attach policy to certificate...")
# attach policy to thing
client.attach_policy(
policyName=thing_name + "_policy",
target=create_cert_response["certificateArn"]
)
print("[Device Defender]Info: Attach certificate to test thing...")
# attache the certificate to thing
client.attach_thing_principal(
thingName=thing_name,
principal=create_cert_response['certificateArn']
)
certificate_arn = create_cert_response['certificateArn']
certificate_id = create_cert_response['certificateId']
except:
if certificate_id:
delete_thing_with_certi(thing_name, certificate_id, certificate_arn)
else:
client.delete_thing(thingName=thing_name)
if client_made_policy:
client.delete_policy(policyName=thing_name + "_policy")
print("[Device Defender]Error: Failed to attach certificate.")
exit(-1)
##############################################
# Run device defender
try:
# Get the Device Defender endpoint
endpoint_response = client.describe_endpoint(
endpointType='iot:Data-ATS')["endpointAddress"]
print("[Device Defender]Info: Adding custom metrics...")
metrics_to_add = [
{"name": "CustonNumber", "display_name": "DD Custom Number", "type": "number"},
{"name": "CustomNumberTwo",
"display_name": "DD Custom Number 2", "type": "number"},
{"name": "CustomNumberList",
"display_name": "DD Custom Number List", "type": "number-list"},
{"name": "CustomStringList",
"display_name": "DD Custom String List", "type": "string-list"},
{"name": "CustomIPList", "display_name": "DD Custom IP List",
"type": "ip-address-list"},
{"name": "cpu_usage", "display_name": "DD Cpu Usage", "type": "number"},
{"name": "memory_usage", "display_name": "DD Memory Usage", "type": "number"},
{"name": "process_count", "display_name": "DD Process count", "type": "number"}
]
for current_metric in metrics_to_add:
try:
client.create_custom_metric(
metricName=current_metric["name"],
displayName=current_metric["display_name"],
metricType=current_metric["type"],
tags=[]
)
metrics_added.append(current_metric["name"])
print("[Device Defender]Info: Metric with name: " +
current_metric["name"] + " added.")
except:
print("[Device Defender]Info: Metric with name: " + current_metric["name"] +
" already present. Skipping and will not delete...")
continue
print("[Device Defender]Info: Running sample (this should take ~60 seconds).")
# Run the sample:
exe_path = "build/samples/device_defender/basic_report/"
# If running locally, comment out the line above and uncomment the line below:
#exe_path = "samples/device_defender/basic_report/build/"
# Windows has a different build folder structure, but this ONLY runs on Linux currently so we do not need to worry about it
exe_path = os.path.join(exe_path, "basic-report")
print("[Device Defender]Info: Start to run: " + exe_path)
# The Device Defender sample will take ~1 minute to run even if successful
# (since samples are sent every minute)
arguments = [exe_path, "--endpoint", endpoint_response, "--cert",
certificate_path, "--key", key_path, "--thing_name", thing_name, "--count", "2"]
result = subprocess.run(arguments, timeout=60*2, check=True)
print("[Device Defender]Info: Sample finished running.")
# There does not appear to be any way to get the metrics from the device - so we'll assume that if it didn't return -1, then it worked
# delete custom metrics we added
for metric_name in metrics_added:
client.delete_custom_metric(metricName=metric_name)
# Delete
delete_thing_with_certi(thing_name, certificate_id, certificate_arn)
client.delete_policy(policyName=thing_name + "_policy")
except Exception as e:
# delete custom metrics we added
for metric_name in metrics_added:
client.delete_custom_metric(metricName=metric_name)
if client_made_thing:
delete_thing_with_certi(thing_name, certificate_id, certificate_arn)
if client_made_policy:
client.delete_policy(policyName=thing_name + "_policy")
print("[Device Defender]Error: Failed to test: Basic Report")
exit(-1)
print("[Device Defender]Info: Basic Report sample test passed")
exit(0)
|
34e98a8b560f35ba6130efccb2bd9101402c4e6a
|
307d3837d31f9e3728af2b62ca51ebf63fe6ec6b
|
/wlwl1011/BOJ/DP/DP_12015.py
|
8d011a60a6e085d31b73613b5e902a8f01bb496d
|
[] |
no_license
|
ellynhan/challenge100-codingtest-study
|
905043497d154b8a7333ca536e536d013f6e7454
|
bcdc6d04f13b12ba80b42e066f9d244d7c2cc698
|
refs/heads/master
| 2023-09-01T14:10:13.481013
| 2023-08-27T14:38:52
| 2023-08-27T14:38:52
| 401,561,230
| 162
| 176
| null | 2023-09-09T14:56:25
| 2021-08-31T03:30:36
|
C++
|
UTF-8
|
Python
| false
| false
| 462
|
py
|
DP_12015.py
|
import sys
input = sys.stdin.readline
n = int(input())
cases = list(map(int, input().split()))
lis = [-1000000001]
for case in cases:
#print(lis)
if lis[-1]<case:
lis.append(case)
else:
left = 0
right = len(lis)
while left<right:
mid = (right+left)//2
if lis[mid]<case:
left = mid+1
else:
right = mid
lis[right] = case
print(len(lis)-1)
|
d7d633a99b3fb855ee64ad2ab9a4e8844e97a5c0
|
f8c5b73c9706470c4dd60d523096e18bc448a960
|
/certbot-ci/certbot_integration_tests/utils/constants.py
|
5aabe379ac9a4c0b89519be31f67829b409d9172
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
certbot/certbot
|
14ab43d76fcf0242d875d551f0d98334c43e7957
|
b1978ff18837e40d16eedf2090330af53d8ceaa5
|
refs/heads/master
| 2023-09-04T00:37:03.739195
| 2023-08-26T23:19:38
| 2023-08-26T23:19:38
| 26,516,210
| 18,581
| 3,265
|
NOASSERTION
| 2023-09-12T15:18:59
| 2014-11-12T02:52:20
|
Python
|
UTF-8
|
Python
| false
| false
| 562
|
py
|
constants.py
|
"""Some useful constants to use throughout certbot-ci integration tests"""
DEFAULT_HTTP_01_PORT = 5002
BOULDER_HTTP_01_PORT = 80
TLS_ALPN_01_PORT = 5001
CHALLTESTSRV_PORT = 8055
BOULDER_V2_CHALLTESTSRV_URL = f'http://10.77.77.77:{CHALLTESTSRV_PORT}'
BOULDER_V2_DIRECTORY_URL = 'http://localhost:4001/directory'
PEBBLE_DIRECTORY_URL = 'https://localhost:14000/dir'
PEBBLE_MANAGEMENT_URL = 'https://localhost:15000'
PEBBLE_CHALLTESTSRV_URL = f'http://localhost:{CHALLTESTSRV_PORT}'
MOCK_OCSP_SERVER_PORT = 4002
PEBBLE_ALTERNATE_ROOTS = 2
MAX_SUBPROCESS_WAIT = 120
|
450dea56a1f3ab5aab0c8ca58f5505345dfcc01a
|
0f2b08b31fab269c77d4b14240b8746a3ba17d5e
|
/tools/ci_build/reduce_op_kernels.py
|
6b73b1e063e58ceeb41c3982891079e1cbaeecf6
|
[
"MIT"
] |
permissive
|
microsoft/onnxruntime
|
f75aa499496f4d0a07ab68ffa589d06f83b7db1d
|
5e747071be882efd6b54d7a7421042e68dcd6aff
|
refs/heads/main
| 2023-09-04T03:14:50.888927
| 2023-09-02T07:16:28
| 2023-09-02T07:16:28
| 156,939,672
| 9,912
| 2,451
|
MIT
| 2023-09-14T21:22:46
| 2018-11-10T02:22:53
|
C++
|
UTF-8
|
Python
| false
| false
| 14,490
|
py
|
reduce_op_kernels.py
|
# !/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import io
import re
import shutil
import sys
import typing
from pathlib import Path
import op_registration_utils
from logger import get_logger
# directory containing the reduced op files, relative to the build directory
OP_REDUCTION_DIR = "op_reduction.generated"
# add the path to tools/python so we can import the config parsing and type reduction processing
SCRIPT_DIR = Path(__file__).parent.resolve()
ORT_ROOT = SCRIPT_DIR.parents[1]
sys.path.append(str(ORT_ROOT / "tools" / "python"))
from util import parse_config # noqa: E402
from util.ort_format_model.operator_type_usage_processors import OpTypeImplFilterInterface # noqa: E402
log = get_logger("reduce_op_kernels")
def _adapt_filters_for_extended_minimal_build(
base_required_ops: typing.Optional[dict], base_op_type_impl_filter: typing.Optional[OpTypeImplFilterInterface]
):
"""
Adapts the values returned by parse_config() for an extended minimal build or higher.
In particular:
- Includes ONNX ops needed by layout transformation
- Includes MS ops needed by NHWC optimizer
"""
# graph transformations in an extended minimal build require certain ops to be available
extended_minimal_build_required_op_ids = set() # set of (domain, optype, opset)
with open(
ORT_ROOT / "onnxruntime/core/optimizer/layout_transformation/layout_transformation_potentially_added_ops.h",
) as f:
region_boundary_pattern = re.compile(r"@@region_(begin|end)\(extended_minimal_build_required_kernels\)@@")
op_id_pattern = re.compile(
r'OpIdentifierWithStringViews{(?P<domain>\w+),\s+"(?P<optype>\w+)",\s+(?P<opset>\d+)}'
)
in_region = False
for line in f:
region_boundary_match = region_boundary_pattern.search(line)
if region_boundary_match:
in_region = region_boundary_match.group(1) == "begin"
continue
if not in_region:
continue
op_id_match = op_id_pattern.search(line)
if op_id_match:
domain = op_registration_utils.map_ort_constant_to_domain(
op_id_match.group("domain"), allow_unknown_constant=False
)
optype = op_id_match.group("optype")
opset = int(op_id_match.group("opset"))
extended_minimal_build_required_op_ids.add((domain, optype, opset))
adapted_required_ops = None
if base_required_ops is not None:
adapted_required_ops = base_required_ops.copy()
for domain, optype, opset in extended_minimal_build_required_op_ids:
adapted_required_ops.setdefault(domain, dict()).setdefault(opset, set()).add(optype)
adapted_op_type_impl_filter = None
if base_op_type_impl_filter is not None:
class _AdaptedFilter(OpTypeImplFilterInterface):
def __init__(
self,
filter_to_adapt: OpTypeImplFilterInterface,
required_domain_and_optypes: typing.Set[typing.Tuple[str, str]],
):
self.filter_to_adapt = filter_to_adapt
self.required_domain_and_optypes = required_domain_and_optypes
def is_typed_registration_needed(self, domain: str, optype: str, type_registration_str: str):
# Always require registration for ops in self.required_domain_and_optypes.
if (domain, optype) in self.required_domain_and_optypes:
return True
return self.filter_to_adapt.is_typed_registration_needed(domain, optype, type_registration_str)
def get_cpp_entries(self):
# The required types for ops in self.required_optypes must be specified in the C++ implementation.
# Doing that also accounts for globally allowed types.
# We don't need to do anything special with the allowed type overrides here.
return self.filter_to_adapt.get_cpp_entries()
adapted_op_type_impl_filter = _AdaptedFilter(
base_op_type_impl_filter,
{(domain, optype) for (domain, optype, opset) in extended_minimal_build_required_op_ids},
)
return (adapted_required_ops, adapted_op_type_impl_filter)
class _ExcludingRegistrationProcessor(op_registration_utils.RegistrationProcessor):
"""Registration processor that excludes registrations and writes the result to an output file."""
def __init__(
self,
required_ops: typing.Optional[dict],
op_type_impl_filter: typing.Optional[OpTypeImplFilterInterface],
output_file: io.TextIOWrapper,
):
self._required_ops = required_ops
self._op_type_impl_filter = op_type_impl_filter
self._output_file = output_file
def _is_op_required(
self, domain: str, operator: str, start_version: int, end_version: typing.Optional[int]
) -> bool:
"""See if an op is required."""
if self._required_ops is None:
return True
if domain not in self._required_ops:
return False
for opset in self._required_ops[domain]:
if opset >= start_version and (end_version is None or opset <= end_version):
if operator in self._required_ops[domain][opset]:
return True
return False
def process_registration(
self,
lines: typing.List[str],
constant_for_domain: str,
operator: str,
start_version: int,
end_version: typing.Optional[int] = None,
type: typing.Optional[str] = None,
):
registration_identifier = "{}:{}({}){}".format(
constant_for_domain, operator, start_version, f"<{type}>" if type else ""
)
# convert from the ORT constant name to the domain string used in the config
domain = op_registration_utils.map_ort_constant_to_domain(constant_for_domain, allow_unknown_constant=False)
exclude = False
reason = ""
if domain is not None:
if not self._is_op_required(domain, operator, start_version, end_version):
exclude = True
reason = "Entire op is not required."
if not exclude and type is not None and self._op_type_impl_filter is not None:
if not self._op_type_impl_filter.is_typed_registration_needed(domain, operator, type):
exclude = True
reason = "Specific typed registration is not required."
else:
log.warning(f"Keeping {registration_identifier} registration from unknown domain: {constant_for_domain}")
if exclude:
log.info(f"Disabling {registration_identifier} registration: {reason}")
for line in lines:
self._output_file.write("// " + line)
# edge case of last entry in table where we still need the terminating }; to not be commented out
if lines[-1].rstrip().endswith("};"):
self._output_file.write("};\n")
else:
for line in lines:
self._output_file.write(line)
def process_other_line(self, line):
self._output_file.write(line)
def ok(self):
return True
def _get_op_reduction_root(build_dir: Path):
"""
Return the op reduction root directory which is a subdirectory of `build_dir`.
"""
return Path(build_dir, OP_REDUCTION_DIR)
def _get_op_reduction_file_path(ort_root: Path, build_dir: Path, original_path: Path):
"""
Return the op reduction file path corresponding to `original_path`.
Op reduction files are in the op reduction root but otherwise share the same components of `original_path`
relative to `ort_root`.
"""
return _get_op_reduction_root(build_dir) / original_path.relative_to(ort_root)
def _generate_provider_registrations(
ort_root: Path,
build_dir: Path,
use_cuda: bool,
required_ops: typing.Optional[dict],
op_type_impl_filter: typing.Optional[OpTypeImplFilterInterface],
):
"""Generate provider registration files."""
kernel_registration_files = [
Path(f) for f in op_registration_utils.get_kernel_registration_files(str(ort_root), use_cuda)
]
for kernel_registration_file in kernel_registration_files:
if not kernel_registration_file.is_file():
raise ValueError(f"Kernel registration file does not exist: {kernel_registration_file}")
log.info(f"Processing {kernel_registration_file}")
reduced_path = _get_op_reduction_file_path(ort_root, build_dir, kernel_registration_file)
reduced_path.parent.mkdir(parents=True, exist_ok=True)
# read from original and create the reduced kernel def file with commented out lines for any kernels that are
# not required
with open(reduced_path, "w") as file_to_write:
processor = _ExcludingRegistrationProcessor(required_ops, op_type_impl_filter, file_to_write)
op_registration_utils.process_kernel_registration_file(kernel_registration_file, processor)
if not processor.ok():
# error should have already been logged so just exit
sys.exit(-1)
def _generate_type_control_overrides(ort_root: Path, build_dir: Path, cpp_lines: typing.Sequence[str]):
"""
Generate type control overrides. Insert applicable C++ code to specify operator type requirements.
:param ort_root: Root of the ONNX Runtime repository
:param build_dir: Path to the build directory
:param cpp_lines: The C++ code to insert
"""
src = Path(ort_root, "onnxruntime", "core", "providers", "op_kernel_type_control_overrides.inc")
if not src.is_file():
raise ValueError(f"Op kernel type control overrides file does not exist: {src}")
# create a copy of op_kernel_type_control_overrides.inc
target = _get_op_reduction_file_path(ort_root, build_dir, src)
target.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(src, target)
if cpp_lines:
# find the insertion block and replace any existing content in it
inserted = False
with open(src) as input, open(target, "w") as output:
inside_insertion_block = False
for line in input.readlines():
if "@@insertion_point_begin(allowed_types)@@" in line:
inside_insertion_block = True
output.write(line)
[output.write(f"{code_line}\n") for code_line in cpp_lines]
inserted = True
continue
elif inside_insertion_block:
if "@@insertion_point_end(allowed_types)@@" in line:
inside_insertion_block = False
else:
# we ignore any old lines within the insertion block
continue
output.write(line)
if not inserted:
raise RuntimeError(f"Insertion point was not found in {target}")
def reduce_ops(
config_path: str,
build_dir: str,
enable_type_reduction: bool,
use_cuda: bool,
is_extended_minimal_build_or_higher: bool,
):
"""
Reduce op kernel implementations.
:param config_path: Path to configuration file that specifies the ops to include
:param build_dir: Path to the build directory. The op reduction files will be generated under the build directory.
:param enable_type_reduction: Whether per operator type reduction is enabled
:param use_cuda: Whether to reduce op kernels for the CUDA provider
:param is_extended_minimal_build_or_higher: Whether this build has at least the features of an extended minimal
build enabled.
"""
build_dir_path = Path(build_dir).resolve()
build_dir_path.mkdir(parents=True, exist_ok=True)
required_ops, op_type_impl_filter = parse_config(config_path, enable_type_reduction)
if is_extended_minimal_build_or_higher:
required_ops, op_type_impl_filter = _adapt_filters_for_extended_minimal_build(required_ops, op_type_impl_filter)
# delete any existing generated files first
op_reduction_root = _get_op_reduction_root(build_dir_path)
if op_reduction_root.is_dir():
log.info(f"Deleting existing op reduction file root directory: {op_reduction_root}")
shutil.rmtree(op_reduction_root)
_generate_provider_registrations(ORT_ROOT, build_dir_path, use_cuda, required_ops, op_type_impl_filter)
type_control_cpp_code = op_type_impl_filter.get_cpp_entries() if op_type_impl_filter is not None else []
_generate_type_control_overrides(ORT_ROOT, build_dir_path, type_control_cpp_code)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Reduces operator kernel implementations in ONNX Runtime. "
"Entire op implementations or op implementations for specific types may be pruned."
)
parser.add_argument(
"config_path",
type=str,
help="Path to configuration file. "
"Create with <ORT root>/tools/python/create_reduced_build_config.py and edit if needed. "
"See https://onnxruntime.ai/docs/reference/operators/reduced-operator-config-file.html for more "
"information.",
)
parser.add_argument(
"--cmake_build_dir",
type=str,
required=True,
help="Path to the build directory. The op reduction files will be generated under the build directory.",
)
parser.add_argument(
"--is_extended_minimal_build_or_higher",
action="store_true",
help="Whether this build has at least the features of an extended minimal build enabled.",
)
parser.add_argument(
"--enable_type_reduction", action="store_true", help="Whether per operator type reduction is enabled."
)
parser.add_argument("--use_cuda", action="store_true", help="Whether to reduce op kernels for the CUDA provider.")
args = parser.parse_args()
reduce_ops(
config_path=args.config_path,
build_dir=args.cmake_build_dir,
enable_type_reduction=args.enable_type_reduction,
use_cuda=args.use_cuda,
is_extended_minimal_build_or_higher=args.is_extended_minimal_build_or_higher,
)
|
9893c0b58a504634470b13dcdccf60477359728a
|
e366027ffef496d43350b044d0574127c512d690
|
/tools/device_file_generator/dfg/identifiers.py
|
2e30d572e33ecf435174839b9acb31843a4c9406
|
[] |
no_license
|
roboterclubaachen/xpcc
|
aa504f3d0a8246861dd00ea437cc7b938a869669
|
010924901947381d20e83b838502880eb2ffea72
|
refs/heads/develop
| 2021-01-24T11:18:27.233788
| 2019-01-03T18:55:35
| 2019-01-03T18:55:35
| 3,626,127
| 163
| 51
| null | 2018-06-30T19:15:55
| 2012-03-05T10:49:18
|
C
|
UTF-8
|
Python
| false
| false
| 5,597
|
py
|
identifiers.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Roboterclub Aachen e.V.
# All rights reserved.
#
# The file is part of the xpcc library and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
from logger import Logger
from device_identifier import DeviceIdentifier
class Identifiers:
""" Identifiers
Encapsulates a list of device identifiers.
This manages filtering, merging and accessing of device ids.
"""
def __init__(self, device_id=None, logger=None):
if isinstance(device_id, Identifiers):
self.log = device_id.log
# please deep copy this list
self.ids = [DeviceIdentifier(dev, self.log) for dev in device_id.ids]
return
if logger == None:
self.log = Logger()
else:
self.log = logger
if isinstance(device_id, list):
# please deep copy this list
self.ids = [DeviceIdentifier(dev, self.log) for dev in device_id]
return
self.ids = []
if device_id != None:
self.ids.append(DeviceIdentifier(device_id, self.log))
def append(self, device_id):
assert isinstance(device_id, DeviceIdentifier)
self.ids.append(device_id)
self.ids = list(set(self.ids))
self.ids.sort(key=lambda k : k.string)
def extend(self, identifiers):
assert isinstance(identifiers, Identifiers)
for device_id in identifiers:
self.ids.append(device_id)
self.ids = list(set(self.ids))
self.ids.sort(key=lambda k : k.string)
def differenceFromIds(self, ids):
assert isinstance(ids, Identifiers)
# list all other ids that are not part of our ids
other_ids = Identifiers(None, self.log)
for device_id in ids:
if device_id.string not in self.getAttribute('string'):
other_ids.append(DeviceIdentifier(device_id))
# our devices are equal to the input
if (len(other_ids) == 0):
return Identifiers(DeviceIdentifier(None, self.log), self.log)
# create the intersection of all ids
intersection_id = ids.intersection
# strip the intersecting attributes from a copy of my own ids
own_ids = Identifiers(self)
for device_id in own_ids:
for attr in device_id.properties:
if device_id.properties[attr] == intersection_id.properties[attr]:
setattr(device_id, attr, None)
# if we only have one device_id we can stop here
if len(own_ids) == 1:
return Identifiers(own_ids[0], self.log)
# strip the intersecting attributes from other_ids
for device_id in other_ids:
for attr in device_id.properties:
if device_id.properties[attr] == intersection_id.properties[attr]:
setattr(device_id, attr, None)
own_union = own_ids.union
# check which attributes are the same in my own ids
same_attr = [ attr for attr in \
[p for p in own_union.properties if own_union.properties[p] != None] \
if '|' not in str(own_union.properties[attr]) ]
# filter out these attributes from all the other ids
if len(same_attr) and all(own_union.properties[attr] not in other_ids.getAttribute(attr) \
for attr in same_attr):
# there are no other ids that have the same common attributes
# so we can forget the uncommon ones (like type)
for attr in [p for p in own_union.properties if p not in same_attr]:
setattr(own_union, attr, None)
return Identifiers(own_union, self.log)
# merge the ids in the list until we cannot anymore
devs = Identifiers(own_ids)
unmergables = Identifiers()
while(len(devs) > 0):
current = devs[0]
if current in unmergables:
devs.remove(current)
continue
remove_devs = [current]
for dev in devs[1:]:
if (dev in unmergables):
remove_devs.append(dev)
continue
union = current.unionWithDeviceIdentifier(dev)
if all(device_id not in union for device_id in other_ids):
remove_devs.append(dev)
current = union
for dev in remove_devs:
devs.remove(dev)
# self.log.debug("\nUnmergable: %s" % current)
unmergables.append(current)
# strip the unifying attributes from unmergables
other_ids_union = ids.union
for device_id in unmergables:
for attr in device_id.properties:
if device_id.properties[attr] == other_ids_union.properties[attr]:
setattr(device_id, attr, None)
return unmergables
def filterForAttribute(self, name, value):
ids = Identifiers()
for device_id in self.ids:
if value == device_id.properties[name]:
ids.append(DeviceIdentifier(device_id, self.log))
return ids
def remove(self, device_id):
self.ids.remove(device_id)
def __contains__(self, device_id):
return any(device_id in dev for dev in self.ids)
def __iter__(self):
for device_id in self.ids:
yield device_id
def __getitem__(self, index):
return self.ids[index]
def __len__(self):
return len(self.ids)
def getAttribute(self, name):
attributes = []
for device_id in self.ids:
attributes.append(getattr(device_id, name))
if len(attributes) == 0:
return [None]
attributes = list(set(attributes))
attributes.sort()
return attributes
@property
def intersection(self):
dev = DeviceIdentifier(self.ids[0], self.log)
for device_id in self.ids[1:]:
dev = dev.intersectionWithDeviceIdentifier(device_id)
return dev
@property
def union(self):
dev = DeviceIdentifier(logger=self.log)
for device_id in self.ids:
dev = dev.unionWithDeviceIdentifier(device_id)
return dev
@property
def string(self):
return ", ".join([str(device_id.string) for device_id in self.ids])
def __repr__(self):
return self.__str__()
def __str__(self):
return ("Identifiers( %s )" % self.string)
|
4c0553cc4581a830597ced881d77af8f5c3a889c
|
6032d4c2a046e2c5601099593660807217944019
|
/elephant/test/test_spectral.py
|
d6272af97325744cd8e49b94dd42523bbd499fc0
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
NeuralEnsemble/elephant
|
6130fc70fcfd4e3e1a91add4ad0273c4bca4782d
|
2bd871aec145d897031aed327a7a4af0102c47cb
|
refs/heads/master
| 2023-09-02T03:31:14.531100
| 2023-07-20T14:00:50
| 2023-07-20T14:00:50
| 10,311,278
| 162
| 89
|
BSD-3-Clause
| 2023-09-14T13:47:26
| 2013-05-27T08:59:34
|
Python
|
UTF-8
|
Python
| false
| false
| 55,129
|
py
|
test_spectral.py
|
# -*- coding: utf-8 -*-
"""
Unit tests for the spectral module.
:copyright: Copyright 2014-2023 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
import unittest
import neo.core
import numpy as np
import pytest
import quantities as pq
import scipy
import scipy.fft
import scipy.signal as spsig
from neo import AnalogSignal
from numpy.testing import assert_array_equal
import elephant.spectral
from elephant.datasets import download_datasets, ELEPHANT_TMP_DIR
class WelchPSDTestCase(unittest.TestCase):
def test_welch_psd_errors(self):
# generate a dummy data
data = AnalogSignal(np.zeros(5000), sampling_period=0.001 * pq.s,
units='mV')
# check for invalid parameter values
# - length of segments
self.assertRaises(ValueError, elephant.spectral.welch_psd, data,
len_seg=0)
self.assertRaises(ValueError, elephant.spectral.welch_psd, data,
len_seg=data.shape[0] * 2)
# - number of segments
self.assertRaises(ValueError, elephant.spectral.welch_psd, data,
num_seg=0)
self.assertRaises(ValueError, elephant.spectral.welch_psd, data,
num_seg=data.shape[0] * 2)
# - frequency resolution
self.assertRaises(ValueError, elephant.spectral.welch_psd, data,
freq_res=-1)
self.assertRaises(ValueError, elephant.spectral.welch_psd, data,
freq_res=data.sampling_rate / (data.shape[0] + 1))
# - overlap
self.assertRaises(ValueError, elephant.spectral.welch_psd, data,
overlap=-1.0)
self.assertRaises(ValueError, elephant.spectral.welch_psd, data,
overlap=1.1)
def test_welch_psd_warnings(self):
# generate a dummy data
data = AnalogSignal(np.zeros(5000), sampling_period=0.001 * pq.s,
units='mV')
# Test deprecation warning for 'hanning' window
self.assertWarns(DeprecationWarning, elephant.spectral.welch_psd,
data, window='hanning')
def test_welch_psd_behavior(self):
# generate data by adding white noise and a sinusoid
data_length = 5000
sampling_period = 0.001
signal_freq = 100.0
noise = np.random.normal(size=data_length)
signal = [np.sin(2 * np.pi * signal_freq * t)
for t in np.arange(0, data_length * sampling_period,
sampling_period)]
data = AnalogSignal(np.array(signal + noise),
sampling_period=sampling_period * pq.s,
units='mV')
# consistency between different ways of specifying segment length
freqs1, psd1 = elephant.spectral.welch_psd(
data, len_segment=data_length // 5, overlap=0)
freqs2, psd2 = elephant.spectral.welch_psd(
data, n_segments=5, overlap=0)
self.assertTrue((psd1 == psd2).all() and (freqs1 == freqs2).all())
# frequency resolution and consistency with data
freq_res = 1.0 * pq.Hz
freqs, psd = elephant.spectral.welch_psd(
data, frequency_resolution=freq_res)
self.assertAlmostEqual(freq_res, freqs[1] - freqs[0])
self.assertEqual(freqs[psd.argmax()], signal_freq)
freqs_np, psd_np = elephant.spectral.welch_psd(
data.magnitude.flatten(), fs=1 / sampling_period,
frequency_resolution=freq_res)
self.assertTrue((freqs == freqs_np).all() and (psd == psd_np).all())
# check of scipy.signal.welch() parameters
params = {'window': 'hamming', 'nfft': 1024, 'detrend': 'linear',
'return_onesided': False, 'scaling': 'spectrum'}
for key, val in params.items():
freqs, psd = elephant.spectral.welch_psd(
data, len_segment=1000, overlap=0, **{key: val})
freqs_spsig, psd_spsig = spsig.welch(np.rollaxis(data, 0, len(
data.shape)), fs=1 / sampling_period, nperseg=1000,
noverlap=0, **{key: val})
self.assertTrue(
(freqs == freqs_spsig).all() and (
psd == psd_spsig).all())
# - generate multidimensional data for check of parameter `axis`
num_channel = 4
data_length = 5000
data_multidim = np.random.normal(size=(num_channel, data_length))
freqs, psd = elephant.spectral.welch_psd(data_multidim)
freqs_T, psd_T = elephant.spectral.welch_psd(data_multidim.T, axis=0)
self.assertTrue(np.all(freqs == freqs_T))
self.assertTrue(np.all(psd == psd_T.T))
def test_welch_psd_input_types(self):
# generate a test data
sampling_period = 0.001
data = AnalogSignal(np.array(np.random.normal(size=5000)),
sampling_period=sampling_period * pq.s,
units='mV')
# outputs from AnalogSignal input are of Quantity type (standard usage)
freqs_neo, psd_neo = elephant.spectral.welch_psd(data)
self.assertTrue(isinstance(freqs_neo, pq.quantity.Quantity))
self.assertTrue(isinstance(psd_neo, pq.quantity.Quantity))
# outputs from Quantity array input are of Quantity type
freqs_pq, psd_pq = elephant.spectral.welch_psd(
data.magnitude.flatten() * data.units, fs=1 / sampling_period)
self.assertTrue(isinstance(freqs_pq, pq.quantity.Quantity))
self.assertTrue(isinstance(psd_pq, pq.quantity.Quantity))
# outputs from Numpy ndarray input are NOT of Quantity type
freqs_np, psd_np = elephant.spectral.welch_psd(
data.magnitude.flatten(), fs=1 / sampling_period)
self.assertFalse(isinstance(freqs_np, pq.quantity.Quantity))
self.assertFalse(isinstance(psd_np, pq.quantity.Quantity))
# check if the results from different input types are identical
self.assertTrue(
(freqs_neo == freqs_pq).all() and (
psd_neo == psd_pq).all())
self.assertTrue(
(freqs_neo == freqs_np).all() and (
psd_neo == psd_np).all())
def test_welch_psd_multidim_input(self):
# generate multidimensional data
num_channel = 4
data_length = 5000
sampling_period = 0.001
noise = np.random.normal(size=(num_channel, data_length))
data_np = np.array(noise)
# Since row-column order in AnalogSignal is different from the
# conventional one, `data_np` needs to be transposed when it's used to
# define an AnalogSignal
data_neo = AnalogSignal(data_np.T,
sampling_period=sampling_period * pq.s,
units='mV')
data_neo_1dim = AnalogSignal(data_np[0],
sampling_period=sampling_period * pq.s,
units='mV')
# check if the results from different input types are identical
freqs_np, psd_np = elephant.spectral.welch_psd(data_np,
fs=1 / sampling_period)
freqs_neo, psd_neo = elephant.spectral.welch_psd(data_neo)
freqs_neo_1dim, psd_neo_1dim = elephant.spectral.welch_psd(
data_neo_1dim)
self.assertTrue(np.all(freqs_np == freqs_neo))
self.assertTrue(np.all(psd_np == psd_neo))
self.assertTrue(np.all(psd_neo_1dim == psd_neo[0]))
class MultitaperPSDTestCase(unittest.TestCase):
def test_multitaper_psd_errors(self):
# generate dummy data
data_length = 5000
signal = AnalogSignal(np.zeros(data_length),
sampling_period=0.001 * pq.s,
units='mV')
fs = signal.sampling_rate
self.assertIsInstance(fs, pq.Quantity)
# check for invalid parameter values
# - number of tapers
self.assertRaises(ValueError, elephant.spectral.multitaper_psd, signal,
num_tapers=-5)
self.assertRaises(TypeError, elephant.spectral.multitaper_psd, signal,
num_tapers=-5.0)
# - peak resolution
self.assertRaises(ValueError, elephant.spectral.multitaper_psd, signal,
peak_resolution=-1)
def test_multitaper_psd_behavior(self):
# generate data (frequency domain to time domain)
r = np.ones(2501) * 0.2
r[0], r[500] = 0, 10 # Zero DC, peak at 100 Hz
phi = np.random.uniform(-np.pi, np.pi, len(r))
fake_coeffs = r*np.exp(1j * phi)
fake_ts = scipy.fft.irfft(fake_coeffs)
sampling_period = 0.001
freqs = scipy.fft.rfftfreq(len(fake_ts), d=sampling_period)
signal_freq = freqs[r.argmax()]
data = AnalogSignal(fake_ts, sampling_period=sampling_period * pq.s,
units='mV')
# consistency between different ways of specifying number of tapers
freqs1, psd1 = elephant.spectral.multitaper_psd(data,
fs=data.sampling_rate,
nw=3.5)
freqs2, psd2 = elephant.spectral.multitaper_psd(data,
fs=data.sampling_rate,
nw=3.5,
num_tapers=6)
self.assertTrue((psd1 == psd2).all() and (freqs1 == freqs2).all())
# peak resolution and consistency with data
peak_res = 1.0 * pq.Hz
freqs, psd = elephant.spectral.multitaper_psd(
data, peak_resolution=peak_res)
self.assertEqual(freqs[psd.argmax()], signal_freq)
freqs_np, psd_np = elephant.spectral.multitaper_psd(
data.magnitude.flatten(), fs=1 / sampling_period,
peak_resolution=peak_res)
self.assertTrue((freqs == freqs_np).all() and (psd == psd_np).all())
def test_multitaper_psd_parameter_hierarchy(self):
# generate data by adding white noise and a sinusoid
data_length = 5000
sampling_period = 0.001
signal_freq = 100.0
noise = np.random.normal(size=data_length)
signal = [np.sin(2 * np.pi * signal_freq * t)
for t in np.arange(0, data_length * sampling_period,
sampling_period)]
data = AnalogSignal(np.array(signal + noise),
sampling_period=sampling_period * pq.s,
units='mV')
# Test num_tapers vs nw
freqs1, psd1 = elephant.spectral.multitaper_psd(data,
fs=data.sampling_rate,
nw=3,
num_tapers=9)
freqs2, psd2 = elephant.spectral.multitaper_psd(data,
fs=data.sampling_rate,
nw=3)
self.assertTrue((freqs1 == freqs2).all() and (psd1 != psd2).all())
# Test peak_resolution vs nw
freqs1, psd1 = elephant.spectral.multitaper_psd(data,
fs=data.sampling_rate,
nw=3,
num_tapers=9,
peak_resolution=1)
freqs2, psd2 = elephant.spectral.multitaper_psd(data,
fs=data.sampling_rate,
nw=3,
num_tapers=9)
self.assertTrue((freqs1 == freqs2).all() and (psd1 != psd2).all())
def test_multitaper_psd_against_nitime(self):
"""
This test assesses the match between this implementation of
multitaper against nitime (0.8) using a predefined time series
generated by an autoregressive model.
Please follow the link below for more details:
https://gin.g-node.org/INM-6/elephant-data/src/master/unittest/spectral/multitaper_psd
"""
repo_path = r"unittest/spectral/multitaper_psd/data"
files_to_download = [
("time_series.npy", "ff43797e2ac94613f510b20a31e2e80e"),
("psd_nitime.npy", "89d1f53957e66c786049ea425b53c0e8")
]
for filename, checksum in files_to_download:
download_datasets(repo_path=f"{repo_path}/{filename}",
checksum=checksum)
time_series = np.load(ELEPHANT_TMP_DIR / 'time_series.npy')
psd_nitime = np.load(ELEPHANT_TMP_DIR / 'psd_nitime.npy')
freqs, psd_multitaper = elephant.spectral.multitaper_psd(
signal=time_series, fs=0.1, nw=4, num_tapers=8)
np.testing.assert_allclose(np.squeeze(psd_multitaper), psd_nitime,
rtol=0.3, atol=0.1)
def test_multitaper_psd_input_types(self):
# generate a test data
sampling_period = 0.001
data = AnalogSignal(np.array(np.random.normal(size=5000)),
sampling_period=sampling_period * pq.s,
units='mV')
# outputs from AnalogSignal input are of Quantity type (standard usage)
freqs_neo, psd_neo = elephant.spectral.multitaper_psd(data)
self.assertTrue(isinstance(freqs_neo, pq.quantity.Quantity))
self.assertTrue(isinstance(psd_neo, pq.quantity.Quantity))
# outputs from Quantity array input are of Quantity type
freqs_pq, psd_pq = elephant.spectral.multitaper_psd(
data.magnitude.flatten() * data.units, fs=1 / sampling_period)
self.assertTrue(isinstance(freqs_pq, pq.quantity.Quantity))
self.assertTrue(isinstance(psd_pq, pq.quantity.Quantity))
# outputs from Numpy ndarray input are NOT of Quantity type
freqs_np, psd_np = elephant.spectral.multitaper_psd(
data.magnitude.flatten(), fs=1 / sampling_period)
self.assertFalse(isinstance(freqs_np, pq.quantity.Quantity))
self.assertFalse(isinstance(psd_np, pq.quantity.Quantity))
# fs with and without units
fs_hz = 1 * pq.Hz
fs_int = 1
freqs_fs_hz, psd_fs_hz = elephant.spectral.multitaper_psd(
data.magnitude.T, fs=fs_hz)
freqs_fs_int, psd_fs_int = elephant.spectral.multitaper_psd(
data.magnitude.T, fs=fs_int)
np.testing.assert_array_equal(freqs_fs_hz, freqs_fs_int)
np.testing.assert_array_equal(psd_fs_hz, psd_fs_int)
# check if the results from different input types are identical
self.assertTrue(
(freqs_neo == freqs_pq).all() and (
psd_neo == psd_pq).all())
self.assertTrue(
(freqs_neo == freqs_np).all() and (
psd_neo == psd_np).all())
class SegmentedMultitaperPSDTestCase(unittest.TestCase):
# The following assertions test _segmented_apply_func in the context
# of segmented_multitaper_psd. In other words, only the segmentation is
# addressed. The inner workings of the multitaper function are tested
# in the MultitaperPSDTestCase.
def test_segmented_multitaper_psd_errors(self):
# generate dummy data
data_length = 5000
signal = AnalogSignal(np.zeros(data_length),
sampling_period=0.001 * pq.s,
units='mV')
fs = signal.sampling_rate
# check for invalid parameter values
# - frequency resolution
self.assertRaises(ValueError,
elephant.spectral.segmented_multitaper_psd, signal,
frequency_resolution=-10)
# - n per segment
# n_per_seg = int(fs / dF), where dF is the frequency_resolution
broken_freq_resolution = fs / (data_length+1)
self.assertRaises(ValueError,
elephant.spectral.segmented_multitaper_psd, signal,
frequency_resolution=broken_freq_resolution)
# - length of segment (negative)
self.assertRaises(ValueError,
elephant.spectral.segmented_multitaper_psd, signal,
len_segment=-10)
# - length of segment (larger than data length)
self.assertRaises(ValueError,
elephant.spectral.segmented_multitaper_psd, signal,
len_segment=data_length+1)
# - number of segments (negative)
self.assertRaises(ValueError,
elephant.spectral.segmented_multitaper_psd, signal,
n_segments=-10)
# - number of segments (larger than data length)
self.assertRaises(ValueError,
elephant.spectral.segmented_multitaper_psd, signal,
n_segments=data_length+1)
def test_segmented_multitaper_psd_behavior(self):
# generate data (frequency domain to time domain)
r = np.ones(2501) * 0.2
r[0], r[500] = 0, 10 # Zero DC, peak at 100 Hz
phi = np.random.uniform(-np.pi, np.pi, len(r))
fake_coeffs = r*np.exp(1j * phi)
fake_ts = scipy.fft.irfft(fake_coeffs)
sampling_period = 0.001
freqs = scipy.fft.rfftfreq(len(fake_ts), d=sampling_period)
signal_freq = freqs[r.argmax()]
data = AnalogSignal(fake_ts, sampling_period=sampling_period * pq.s,
units='mV')
# consistency between different ways of specifying n_per_seg
# n_per_seg = int(fs/dF) and n_per_seg = len_segment
frequency_resolution = 1 * pq.Hz
len_segment = int(data.sampling_rate / frequency_resolution)
freqs_fr, psd_fr = elephant.spectral.segmented_multitaper_psd(
data, frequency_resolution=frequency_resolution)
freqs_ls, psd_ls = elephant.spectral.segmented_multitaper_psd(
data, len_segment=len_segment)
np.testing.assert_array_equal(freqs_fr, freqs_ls)
np.testing.assert_array_equal(psd_fr, psd_ls)
def test_segmented_multitaper_psd_parameter_hierarchy(self):
# generate data by adding white noise and a sinusoid
data_length = 5000
sampling_period = 0.001
signal_freq = 100.0
noise = np.random.normal(size=data_length)
signal = [np.sin(2 * np.pi * signal_freq * t)
for t in np.arange(0, data_length * sampling_period,
sampling_period)]
data = AnalogSignal(np.array(signal + noise),
sampling_period=sampling_period * pq.s,
units='mV')
# test frequency_resolution vs len_segment vs n_segments
n_segments = 5
len_segment = 2000
frequency_resolution = 0.25 * pq.Hz
freqs_ns, psd_ns = \
elephant.spectral.segmented_multitaper_psd(
data, n_segments=n_segments)
freqs_ls, psd_ls = \
elephant.spectral.segmented_multitaper_psd(
data, n_segments=n_segments, len_segment=len_segment)
freqs_fr, psd_fr = \
elephant.spectral.segmented_multitaper_psd(
data, n_segments=n_segments, len_segment=len_segment,
frequency_resolution=frequency_resolution)
self.assertTrue(freqs_ns.shape < freqs_ls.shape < freqs_fr.shape)
self.assertTrue(psd_ns.shape < psd_ls.shape < psd_fr.shape)
def test_segmented_multitaper_psd_input_types(self):
# generate a test data
sampling_period = 0.001
data = AnalogSignal(np.array(np.random.normal(size=5000)),
sampling_period=sampling_period * pq.s,
units='mV')
# outputs from AnalogSignal input are of Quantity type (standard usage)
freqs_neo, psd_neo = elephant.spectral.segmented_multitaper_psd(data)
self.assertTrue(isinstance(freqs_neo, pq.quantity.Quantity))
self.assertTrue(isinstance(psd_neo, pq.quantity.Quantity))
# outputs from Quantity array input are of Quantity type
freqs_pq, psd_pq = elephant.spectral.segmented_multitaper_psd(
data.magnitude.flatten() * data.units, fs=1 / sampling_period)
self.assertTrue(isinstance(freqs_pq, pq.quantity.Quantity))
self.assertTrue(isinstance(psd_pq, pq.quantity.Quantity))
# outputs from Numpy ndarray input are NOT of Quantity type
freqs_np, psd_np = elephant.spectral.segmented_multitaper_psd(
data.magnitude.flatten(), fs=1 / sampling_period)
self.assertFalse(isinstance(freqs_np, pq.quantity.Quantity))
self.assertFalse(isinstance(psd_np, pq.quantity.Quantity))
# frequency resolution with and without units
freq_res_hz = 1 * pq.Hz
freq_res_int = 1
freqs_int, psd_int = elephant.spectral.segmented_multitaper_psd(
data, frequency_resolution=freq_res_int)
freqs_hz, psd_hz = elephant.spectral.segmented_multitaper_psd(
data, frequency_resolution=freq_res_hz)
np.testing.assert_array_equal(freqs_int, freqs_hz)
np.testing.assert_array_equal(psd_int, psd_hz)
# fs with and without units
fs_hz = 1 * pq.Hz
fs_int = 1
freqs_fs_hz, psd_fs_hz = elephant.spectral.multitaper_psd(
data.magnitude.T, fs=fs_hz)
freqs_fs_int, psd_fs_int = elephant.spectral.multitaper_psd(
data.magnitude.T, fs=fs_int)
np.testing.assert_array_equal(freqs_fs_hz, freqs_fs_int)
np.testing.assert_array_equal(psd_fs_hz, psd_fs_int)
# check if the results from different input types are identical
self.assertTrue(
(freqs_neo == freqs_pq).all() and (
psd_neo == psd_pq).all())
self.assertTrue(
(freqs_neo == freqs_np).all() and (
psd_neo == psd_np).all())
class MultitaperCrossSpectrumTestCase(unittest.TestCase):
def test_multitaper_cross_spectrum_errors(self):
# generate dummy data
data_length = 5000
signal = AnalogSignal(np.zeros(data_length),
sampling_period=0.001 * pq.s,
units='mV')
fs = signal.sampling_rate
# check for invalid parameter values
# - number of tapers
self.assertRaises(ValueError,
elephant.spectral.multitaper_cross_spectrum, signal,
fs=fs, num_tapers=-5)
self.assertRaises(TypeError,
elephant.spectral.multitaper_cross_spectrum, signal,
fs=fs, num_tapers=-5.0)
# - peak resolution
self.assertRaises(ValueError,
elephant.spectral.multitaper_cross_spectrum, signal,
fs=fs, peak_resolution=-1)
def test_multitaper_cross_spectrum_behavior(self):
# generate data (frequency domain to time domain)
r = np.ones(2501) * 0.2
r[0], r[500] = 0, 10 # Zero DC, peak at 100 Hz
phi_x = np.random.uniform(-np.pi, np.pi, len(r))
phi_y = np.random.uniform(-np.pi, np.pi, len(r))
fake_coeffs_x = r*np.exp(1j * phi_x)
fake_coeffs_y = r*np.exp(1j * phi_y)
signal_x = scipy.fft.irfft(fake_coeffs_x)
signal_y = scipy.fft.irfft(fake_coeffs_y)
sampling_period = 0.001
freqs = scipy.fft.rfftfreq(len(signal_x), d=sampling_period)
signal_freq = freqs[r.argmax()]
data = AnalogSignal(np.vstack([signal_x, signal_y]).T,
sampling_period=sampling_period * pq.s,
units='mV')
# consistency between different ways of specifying number of tapers
freqs1, cross_spec1 = \
elephant.spectral.multitaper_cross_spectrum(
data,
fs=data.sampling_rate,
nw=3.5)
freqs2, cross_spec2 = \
elephant.spectral.multitaper_cross_spectrum(
data,
fs=data.sampling_rate,
nw=3.5,
num_tapers=6)
self.assertTrue((cross_spec1 == cross_spec2).all()
and (freqs1 == freqs2).all())
# peak resolution and consistency with data
peak_res = 1.0 * pq.Hz
freqs, cross_spec = \
elephant.spectral.multitaper_cross_spectrum(
data, peak_resolution=peak_res)
self.assertEqual(freqs[cross_spec[0, 0].argmax()], signal_freq)
freqs_np, cross_spec_np = \
elephant.spectral.multitaper_cross_spectrum(
data.magnitude.T, fs=1 / sampling_period,
peak_resolution=peak_res)
self.assertTrue((freqs == freqs_np).all()
and (cross_spec == cross_spec_np).all())
# one-sided vs two-sided spectrum
freqs_os, cross_spec_os = \
elephant.spectral.multitaper_cross_spectrum(
data, return_onesided=True)
freqs_ts, cross_spec_ts = \
elephant.spectral.multitaper_cross_spectrum(
data, return_onesided=False)
# Nyquist frequency is negative when using onesided=False (fftfreq)
# See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.rfftfreq.html#scipy.fft.rfftfreq # noqa
nonnegative_freqs_indices = np.nonzero(freqs_ts >= 0)[0]
nyquist_freq_idx = np.abs(freqs_ts).argmax()
ts_freq_indices = np.append(nonnegative_freqs_indices,
nyquist_freq_idx)
ts_overlap_freqs = np.append(
freqs_ts[nonnegative_freqs_indices].rescale('Hz').magnitude,
np.abs(freqs_ts[nyquist_freq_idx].rescale('Hz').magnitude)) * pq.Hz
np.testing.assert_array_equal(freqs_os, ts_overlap_freqs)
np.testing.assert_allclose(
cross_spec_os.magnitude,
cross_spec_ts[:, :, ts_freq_indices].magnitude, rtol=1e-12, atol=0)
def test_multitaper_cross_spectrum_parameter_hierarchy(self):
# generate data by adding white noise and a sinusoid
data_length = 5000
sampling_period = 0.001
signal_freq = 100.0
noise = np.random.normal(size=(2, data_length))
time_points = np.arange(0, data_length * sampling_period,
sampling_period)
signal_x = np.sin(2 * np.pi * signal_freq * time_points) + noise[0]
signal_y = np.cos(2 * np.pi * signal_freq * time_points) + noise[1]
data = AnalogSignal(np.vstack([signal_x, signal_y]).T,
sampling_period=sampling_period * pq.s,
units='mV')
# Test num_tapers vs nw
freqs1, cross_spec1 = elephant.spectral.multitaper_cross_spectrum(
data, fs=data.sampling_rate, nw=3, num_tapers=9)
freqs2, cross_spec2 = elephant.spectral.multitaper_cross_spectrum(
data, fs=data.sampling_rate, nw=3)
self.assertTrue((freqs1 == freqs2).all()
and (cross_spec1 != cross_spec2).all())
# Test peak_resolution vs nw
freqs1, cross_spec1 = elephant.spectral.multitaper_cross_spectrum(
data, fs=data.sampling_rate, nw=3, num_tapers=9, peak_resolution=1)
freqs2, cross_spec2 = elephant.spectral.multitaper_cross_spectrum(
data, fs=data.sampling_rate, nw=3, num_tapers=9)
self.assertTrue((freqs1 == freqs2).all()
and (cross_spec1 != cross_spec2).all())
def test_multitaper_cross_spectrum_input_types(self):
# generate a test data
data_length = 5000
sampling_period = 0.001
signal_freq = 100.0
noise = np.random.normal(size=(2, data_length))
time_points = np.arange(0, data_length * sampling_period,
sampling_period)
signal_x = np.sin(2 * np.pi * signal_freq * time_points) + noise[0]
signal_y = np.cos(2 * np.pi * signal_freq * time_points) + noise[1]
data = AnalogSignal(np.vstack([signal_x, signal_y]).T,
sampling_period=sampling_period * pq.s,
units='mV')
# outputs from AnalogSignal input are of Quantity type (standard usage)
freqs_neo, cross_spec_neo \
= elephant.spectral.multitaper_cross_spectrum(data)
self.assertTrue(isinstance(freqs_neo, pq.quantity.Quantity))
self.assertTrue(isinstance(cross_spec_neo, pq.quantity.Quantity))
# outputs from Quantity array input are of Quantity type
freqs_pq, cross_spec_pq \
= elephant.spectral.multitaper_cross_spectrum(
data.magnitude.T * data.units,
fs=1 / (sampling_period * pq.s))
self.assertTrue(isinstance(freqs_pq, pq.quantity.Quantity))
self.assertTrue(isinstance(cross_spec_pq, pq.quantity.Quantity))
# outputs from Numpy ndarray input are NOT of Quantity type
freqs_np, cross_spec_np \
= elephant.spectral.multitaper_cross_spectrum(
data.magnitude.T,
fs=1 / (sampling_period * pq.s))
self.assertFalse(isinstance(freqs_np, pq.quantity.Quantity))
self.assertFalse(isinstance(cross_spec_np, pq.quantity.Quantity))
# check if the results from different input types are identical
self.assertTrue(
(freqs_neo == freqs_pq).all() and
(cross_spec_neo == cross_spec_pq).all())
self.assertTrue(
(freqs_neo == freqs_np).all() and
(cross_spec_neo == cross_spec_np).all())
class SegmentedMultitaperCrossSpectrumTestCase(unittest.TestCase):
def test_segmented_multitaper_cross_spectrum_errors(self):
# generate dummy data
data_length = 5000
signal = AnalogSignal(np.zeros(data_length),
sampling_period=0.001 * pq.s,
units='mV')
fs = signal.sampling_rate
# - frequency resolution
self.assertRaises(
ValueError, elephant.spectral.segmented_multitaper_cross_spectrum,
signal, fs=fs, frequency_resolution=-10)
# - n per segment
# n_per_seg = int(fs / dF), where dF is the frequency_resolution
broken_freq_resolution = fs / (data_length+1)
self.assertRaises(
ValueError, elephant.spectral.segmented_multitaper_cross_spectrum,
signal, fs=fs, frequency_resolution=broken_freq_resolution)
# - length of segment (negative)
self.assertRaises(
ValueError, elephant.spectral.segmented_multitaper_cross_spectrum,
signal, fs=fs, len_segment=-10)
# - length of segment (larger than data length)
self.assertRaises(
ValueError, elephant.spectral.segmented_multitaper_cross_spectrum,
signal, fs=fs, len_segment=data_length+1)
# - number of segments (negative)
self.assertRaises(
ValueError, elephant.spectral.segmented_multitaper_cross_spectrum,
signal, fs=fs, n_segments=-10)
# - number of segments (larger than data length)
self.assertRaises(
ValueError, elephant.spectral.segmented_multitaper_cross_spectrum,
signal, fs=fs, n_segments=data_length+1)
def test_segmented_multitaper_cross_spectrum_behavior(self):
# generate data (frequency domain to time domain)
r = np.ones(2501) * 0.2
r[0], r[500] = 0, 10 # Zero DC, peak at 100 Hz
phi_x = np.random.uniform(-np.pi, np.pi, len(r))
phi_y = np.random.uniform(-np.pi, np.pi, len(r))
fake_coeffs_x = r*np.exp(1j * phi_x)
fake_coeffs_y = r*np.exp(1j * phi_y)
signal_x = scipy.fft.irfft(fake_coeffs_x)
signal_y = scipy.fft.irfft(fake_coeffs_y)
sampling_period = 0.001
freqs = scipy.fft.rfftfreq(len(signal_x), d=sampling_period)
signal_freq = freqs[r.argmax()]
data = AnalogSignal(np.vstack([signal_x, signal_y]).T,
sampling_period=sampling_period * pq.s,
units='mV')
# consistency between different ways of specifying n_per_seg
# n_per_seg = int(fs/dF) and n_per_seg = len_segment
frequency_resolution = 1 * pq.Hz
len_segment = int(data.sampling_rate / frequency_resolution)
freqs_fr, cross_spec_fr = \
elephant.spectral.segmented_multitaper_cross_spectrum(
data, frequency_resolution=frequency_resolution)
freqs_ls, cross_spec_ls = \
elephant.spectral.segmented_multitaper_cross_spectrum(
data, len_segment=len_segment)
np.testing.assert_array_equal(freqs_fr, freqs_ls)
np.testing.assert_array_equal(cross_spec_fr, cross_spec_ls)
# one-sided vs two-sided spectrum
freqs_os, cross_spec_os = \
elephant.spectral.segmented_multitaper_cross_spectrum(
data, return_onesided=True)
freqs_ts, cross_spec_ts = \
elephant.spectral.segmented_multitaper_cross_spectrum(
data, return_onesided=False)
# test overlap parameter
no_overlap = 0
half_overlap = 0.5
large_overlap = 0.99
n_segments = 10
freqs_no, cross_spec_no = \
elephant.spectral.segmented_multitaper_cross_spectrum(
data, n_segments=n_segments, overlap=no_overlap)
freqs_ho, cross_spec_ho = \
elephant.spectral.segmented_multitaper_cross_spectrum(
data, n_segments=n_segments, overlap=half_overlap)
freqs_lo, cross_spec_lo = \
elephant.spectral.segmented_multitaper_cross_spectrum(
data, n_segments=n_segments, overlap=large_overlap)
self.assertTrue(freqs_no.shape < freqs_ho.shape < freqs_lo.shape)
self.assertTrue(
cross_spec_no.shape < cross_spec_ho.shape < cross_spec_lo.shape)
# Nyquist frequency is negative when using onesided=False (fftfreq)
# See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.rfftfreq.html#scipy.fft.rfftfreq # noqa
nonnegative_freqs_indices = np.nonzero(freqs_ts >= 0)[0]
nyquist_freq_idx = np.abs(freqs_ts).argmax()
ts_freq_indices = np.append(nonnegative_freqs_indices,
nyquist_freq_idx)
ts_overlap_freqs = np.append(
freqs_ts[nonnegative_freqs_indices].rescale('Hz').magnitude,
np.abs(freqs_ts[nyquist_freq_idx].rescale('Hz').magnitude)) * pq.Hz
np.testing.assert_array_equal(freqs_os, ts_overlap_freqs)
np.testing.assert_allclose(
cross_spec_os.magnitude,
cross_spec_ts[:, :, ts_freq_indices].magnitude, rtol=1e-12, atol=0)
def test_segmented_multitaper_cross_spectrum_parameter_hierarchy(self):
# test frequency_resolution vs len_segment vs n_segments
# generate data (frequency domain to time domain)
r = np.ones(2501) * 0.2
r[0], r[500] = 0, 10 # Zero DC, peak at 100 Hz
phi_x = np.random.uniform(-np.pi, np.pi, len(r))
phi_y = np.random.uniform(-np.pi, np.pi, len(r))
fake_coeffs_x = r*np.exp(1j * phi_x)
fake_coeffs_y = r*np.exp(1j * phi_y)
signal_x = scipy.fft.irfft(fake_coeffs_x)
signal_y = scipy.fft.irfft(fake_coeffs_y)
sampling_period = 0.001
freqs = scipy.fft.rfftfreq(len(signal_x), d=sampling_period)
signal_freq = freqs[r.argmax()]
data = AnalogSignal(np.vstack([signal_x, signal_y]).T,
sampling_period=sampling_period * pq.s,
units='mV')
n_segments = 5
len_segment = 2000
frequency_resolution = 1 * pq.Hz
freqs_ns, cross_spec_ns = \
elephant.spectral.segmented_multitaper_cross_spectrum(
data, n_segments=n_segments)
freqs_ls, cross_spec_ls = \
elephant.spectral.segmented_multitaper_cross_spectrum(
data, n_segments=n_segments, len_segment=len_segment)
freqs_fr, cross_spec_fr = \
elephant.spectral.segmented_multitaper_cross_spectrum(
data, n_segments=n_segments, len_segment=len_segment,
frequency_resolution=frequency_resolution)
self.assertNotEqual(freqs_ns.shape, freqs_ls.shape)
self.assertNotEqual(freqs_ls.shape, freqs_fr.shape)
self.assertNotEqual(freqs_fr.shape, freqs_ns.shape)
self.assertNotEqual(cross_spec_ns.shape, cross_spec_ls.shape)
self.assertNotEqual(cross_spec_ls.shape, cross_spec_fr.shape)
self.assertNotEqual(cross_spec_fr.shape, cross_spec_ns.shape)
def test_segmented_multitaper_cross_spectrum_against_multitaper_psd(self):
data_length = 5000
sampling_period = 0.001
signal_freq = 100.0
noise = np.random.normal(size=(2, data_length))
time_points = np.arange(0, data_length * sampling_period,
sampling_period)
signal_x = np.sin(2 * np.pi * signal_freq * time_points) + noise[0]
signal_y = np.cos(2 * np.pi * signal_freq * time_points) + noise[1]
data = AnalogSignal(np.vstack([signal_x, signal_y]).T,
sampling_period=sampling_period * pq.s,
units='mV')
freqs1, psd_multitaper = elephant.spectral.multitaper_psd(
signal=data, fs=data.sampling_rate, nw=4, num_tapers=8)
psd_multitaper[:, 1:] /= 2 # since comparing rfft and fft results
freqs2, cross_spec = \
elephant.spectral.segmented_multitaper_cross_spectrum(
data,
fs=data.sampling_rate,
nw=4,
num_tapers=8,
return_onesided=True)
self.assertTrue((freqs1 == freqs2).all())
np.testing.assert_allclose(psd_multitaper.magnitude,
np.diagonal(cross_spec).T.real.magnitude,
rtol=0.01,
atol=0.01)
def test_segmented_multitaper_cross_spectrum_input_types(self):
# generate a test data
data_length = 5000
sampling_period = 0.001
signal_freq = 100.0
noise = np.random.normal(size=(2, data_length))
time_points = np.arange(0, data_length * sampling_period,
sampling_period)
signal_x = np.sin(2 * np.pi * signal_freq * time_points) + noise[0]
signal_y = np.cos(2 * np.pi * signal_freq * time_points) + noise[1]
data = AnalogSignal(np.vstack([signal_x, signal_y]).T,
sampling_period=sampling_period * pq.s,
units='mV')
# frequency resolution as an integer
freq_res_int = 1
freq_res_hz = 1 * pq.Hz
freqs_int, cross_spec_int = \
elephant.spectral.segmented_multitaper_cross_spectrum(
data, frequency_resolution=freq_res_int)
freqs_hz, cross_spec_hz = \
elephant.spectral.segmented_multitaper_cross_spectrum(
data, frequency_resolution=freq_res_hz)
np.testing.assert_array_equal(freqs_int, freqs_hz)
np.testing.assert_array_equal(cross_spec_int, cross_spec_hz)
class MultitaperCoherenceTestCase(unittest.TestCase):
def test_multitaper_coherence_input_types(self):
# Generate dummy data
data_length = 10000
sampling_period = 0.001
signal_freq = 100.0
np.random.seed(123)
noise = np.random.normal(size=(2, data_length))
time_points = np.arange(0, data_length * sampling_period,
sampling_period)
# Signals are designed to have coherence peak at `signal_freq`
arr_signal_i = np.sin(2 * np.pi * signal_freq * time_points) + noise[0]
arr_signal_j = np.cos(2 * np.pi * signal_freq * time_points) + noise[1]
fs = 1000 * pq.Hz
anasig_signal_i = neo.core.AnalogSignal(arr_signal_i,
sampling_rate=fs,
units=pq.mV)
anasig_signal_j = neo.core.AnalogSignal(arr_signal_j,
sampling_rate=fs,
units=pq.mV)
arr_f, arr_coh, arr_phi = elephant.spectral.multitaper_coherence(
arr_signal_i, arr_signal_j, fs=fs)
anasig_f, anasig_coh, anasig_phi = \
elephant.spectral.multitaper_coherence(anasig_signal_i,
anasig_signal_j)
np.testing.assert_array_equal(arr_f, anasig_f)
np.testing.assert_allclose(arr_coh, anasig_coh, atol=1e-6)
np.testing.assert_array_equal(arr_phi, anasig_phi)
def test_multitaper_cohere_peak(self):
# Generate dummy data
data_length = 10000
sampling_period = 0.001
signal_freq = 100.0
noise = np.random.normal(size=(2, data_length))
time_points = np.arange(0, data_length * sampling_period,
sampling_period)
# Signals are designed to have coherence peak at `signal_freq`
signal_i = np.sin(2 * np.pi * signal_freq * time_points) + noise[0]
signal_j = np.cos(2 * np.pi * signal_freq * time_points) + noise[1]
# Estimate coherence and phase lag with the multitaper method
freq1, coh1, phase_lag1 = elephant.spectral.multitaper_coherence(
signal_i,
signal_j,
fs=1/sampling_period,
n_segments=16)
indices, vals = scipy.signal.find_peaks(coh1, height=0.8, distance=10)
peak_freqs = freq1[indices]
np.testing.assert_allclose(peak_freqs,
signal_freq*np.ones(len(peak_freqs)),
rtol=0.05)
@pytest.mark.skipif(np.__version__ in ['1.25.0', '1.25.1'],
reason="This test will fail with numpy version"
"1.25.0, 1.25.1 see issue #24000"
"https://github.com/numpy/numpy/issues/24000 ")
def test_multitaper_cohere_perfect_cohere(self):
# Generate dummy data
data_length = 10000
sampling_period = 0.001
signal_freq = 100.0
noise = np.random.normal(size=(1, data_length))
time_points = np.arange(0, data_length * sampling_period,
sampling_period)
signal = np.cos(2 * np.pi * signal_freq * time_points) + noise
# Estimate coherence and phase lag with the multitaper method
freq1, coh, phase_lag = elephant.spectral.multitaper_coherence(
signal,
signal,
fs=1/sampling_period,
n_segments=16)
np.testing.assert_array_equal(phase_lag, np.zeros(phase_lag.size))
np.testing.assert_array_equal(coh, np.ones(coh.size))
def test_multitaper_cohere_no_cohere(self):
# Generate dummy data
data_length = 10000
sampling_period = 0.001
time_points = np.arange(0, data_length * sampling_period,
sampling_period)
signal_i = np.sin(2 * np.pi * 2.5 * time_points)
signal_j = np.sin(2 * np.pi * 5 * time_points)
# Estimate coherence and phase lag with the multitaper method
freq, coh, phase_lag = elephant.spectral.multitaper_coherence(
signal_i,
signal_j,
fs=1/sampling_period,
n_segments=16)
np.testing.assert_allclose(coh, np.zeros(coh.size), atol=0.002)
def test_multitaper_cohere_phase_lag(self):
# Generate dummy data
data_length = 10000
sampling_period = 0.001
signal_freq = 100.0
time_points = np.arange(0, data_length * sampling_period,
sampling_period)
# Signals are designed to have maximal phase lag at 100 with value pi/4
signal_i = np.sin(2 * np.pi * signal_freq * time_points + np.pi / 4)
signal_j = np.cos(2 * np.pi * signal_freq * time_points)
# Estimate coherence and phase lag with the multitaper method
freq, coh, phase_lag = elephant.spectral.multitaper_coherence(
signal_i,
signal_j,
fs=1/sampling_period,
n_segments=16,
num_tapers=8)
indices, vals = scipy.signal.find_peaks(phase_lag,
height=0.8 * np.pi / 4,
distance=10)
# Get peak frequencies and peak heights
peak_freqs = freq[indices]
peak_heights = vals['peak_heights']
np.testing.assert_allclose(peak_freqs,
signal_freq*np.ones(len(peak_freqs)),
rtol=0.05)
np.testing.assert_allclose(peak_heights,
np.pi / 4 * np.ones(len(peak_heights)),
rtol=0.05)
class WelchCohereTestCase(unittest.TestCase):
def test_welch_cohere_errors(self):
# generate a dummy data
x = AnalogSignal(np.zeros(5000), sampling_period=0.001 * pq.s,
units='mV')
y = AnalogSignal(np.zeros(5000), sampling_period=0.001 * pq.s,
units='mV')
# check for invalid parameter values
# - length of segments
self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y,
len_seg=0)
self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y,
len_seg=x.shape[0] * 2)
# - number of segments
self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y,
num_seg=0)
self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y,
num_seg=x.shape[0] * 2)
# - frequency resolution
self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y,
freq_res=-1)
self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y,
freq_res=x.sampling_rate / (x.shape[0] + 1))
# - overlap
self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y,
overlap=-1.0)
self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y,
overlap=1.1)
def test_welch_cohere_warnings(self):
# generate a dummy data
x = AnalogSignal(np.zeros(5000), sampling_period=0.001 * pq.s,
units='mV')
y = AnalogSignal(np.zeros(5000), sampling_period=0.001 * pq.s,
units='mV')
# Test deprecation warning for 'hanning' window
self.assertWarns(DeprecationWarning, elephant.spectral.welch_coherence,
x, y, window='hanning')
def test_welch_cohere_behavior(self):
# generate data by adding white noise and a sinusoid
data_length = 5000
sampling_period = 0.001
signal_freq = 100.0
noise1 = np.random.normal(size=data_length) * 0.01
noise2 = np.random.normal(size=data_length) * 0.01
signal1 = [np.cos(2 * np.pi * signal_freq * t)
for t in np.arange(0, data_length * sampling_period,
sampling_period)]
signal2 = [np.sin(2 * np.pi * signal_freq * t)
for t in np.arange(0, data_length * sampling_period,
sampling_period)]
x = AnalogSignal(np.array(signal1 + noise1), units='mV',
sampling_period=sampling_period * pq.s)
y = AnalogSignal(np.array(signal2 + noise2), units='mV',
sampling_period=sampling_period * pq.s)
# consistency between different ways of specifying segment length
freqs1, coherency1, phase_lag1 = elephant.spectral.welch_coherence(
x, y, len_segment=data_length // 5, overlap=0)
freqs2, coherency2, phase_lag2 = elephant.spectral.welch_coherence(
x, y, n_segments=5, overlap=0)
self.assertTrue((coherency1 == coherency2).all() and
(phase_lag1 == phase_lag2).all() and
(freqs1 == freqs2).all())
# frequency resolution and consistency with data
freq_res = 1.0 * pq.Hz
freqs, coherency, phase_lag = elephant.spectral.welch_coherence(
x, y, frequency_resolution=freq_res)
self.assertAlmostEqual(freq_res, freqs[1] - freqs[0])
self.assertAlmostEqual(freqs[coherency.argmax()], signal_freq,
places=2)
self.assertAlmostEqual(phase_lag[coherency.argmax()], -np.pi / 2,
places=2)
freqs_np, coherency_np, phase_lag_np = \
elephant.spectral.welch_coherence(x.magnitude.flatten(),
y.magnitude.flatten(),
fs=1 / sampling_period,
frequency_resolution=freq_res)
assert_array_equal(freqs.simplified.magnitude, freqs_np)
assert_array_equal(coherency[:, 0], coherency_np)
assert_array_equal(phase_lag[:, 0], phase_lag_np)
# - check the behavior of parameter `axis` using multidimensional data
num_channel = 4
data_length = 5000
x_multidim = np.random.normal(size=(num_channel, data_length))
y_multidim = np.random.normal(size=(num_channel, data_length))
freqs, coherency, phase_lag = \
elephant.spectral.welch_coherence(x_multidim, y_multidim)
freqs_T, coherency_T, phase_lag_T = elephant.spectral.welch_coherence(
x_multidim.T, y_multidim.T, axis=0)
assert_array_equal(freqs, freqs_T)
assert_array_equal(coherency, coherency_T.T)
assert_array_equal(phase_lag, phase_lag_T.T)
def test_welch_cohere_input_types(self):
# generate a test data
sampling_period = 0.001
x = AnalogSignal(np.array(np.random.normal(size=5000)),
sampling_period=sampling_period * pq.s,
units='mV')
y = AnalogSignal(np.array(np.random.normal(size=5000)),
sampling_period=sampling_period * pq.s,
units='mV')
# outputs from AnalogSignal input are of Quantity type
# (standard usage)
freqs_neo, coherency_neo, phase_lag_neo = \
elephant.spectral.welch_coherence(x, y)
self.assertTrue(isinstance(freqs_neo, pq.quantity.Quantity))
self.assertTrue(isinstance(phase_lag_neo, pq.quantity.Quantity))
# outputs from Quantity array input are of Quantity type
freqs_pq, coherency_pq, phase_lag_pq = elephant.spectral \
.welch_coherence(x.magnitude.flatten() * x.units,
y.magnitude.flatten() * y.units,
fs=1 / sampling_period)
self.assertTrue(isinstance(freqs_pq, pq.quantity.Quantity))
self.assertTrue(isinstance(phase_lag_pq, pq.quantity.Quantity))
# outputs from Numpy ndarray input are NOT of Quantity type
freqs_np, coherency_np, phase_lag_np = elephant.spectral \
.welch_coherence(x.magnitude.flatten(),
y.magnitude.flatten(),
fs=1 / sampling_period)
self.assertFalse(isinstance(freqs_np, pq.quantity.Quantity))
self.assertFalse(isinstance(phase_lag_np, pq.quantity.Quantity))
# check if the results from different input types are identical
self.assertTrue((freqs_neo == freqs_pq).all() and
(coherency_neo[:, 0] == coherency_pq).all() and
(phase_lag_neo[:, 0] == phase_lag_pq).all())
self.assertTrue((freqs_neo == freqs_np).all() and
(coherency_neo[:, 0] == coherency_np).all() and
(phase_lag_neo[:, 0] == phase_lag_np).all())
def test_welch_cohere_multidim_input(self):
# generate multidimensional data
num_channel = 4
data_length = 5000
sampling_period = 0.001
x_np = np.array(np.random.normal(size=(num_channel, data_length)))
y_np = np.array(np.random.normal(size=(num_channel, data_length)))
# Since row-column order in AnalogSignal is different from the
# convention in NumPy/SciPy, `data_np` needs to be transposed when it's
# used to define an AnalogSignal
x_neo = AnalogSignal(x_np.T, units='mV',
sampling_period=sampling_period * pq.s)
y_neo = AnalogSignal(y_np.T, units='mV',
sampling_period=sampling_period * pq.s)
x_neo_1dim = AnalogSignal(x_np[0], units='mV',
sampling_period=sampling_period * pq.s)
y_neo_1dim = AnalogSignal(y_np[0], units='mV',
sampling_period=sampling_period * pq.s)
# check if the results from different input types are identical
freqs_np, coherency_np, phase_lag_np = elephant.spectral \
.welch_coherence(x_np, y_np, fs=1 / sampling_period)
freqs_neo, coherency_neo, phase_lag_neo = \
elephant.spectral.welch_coherence(x_neo, y_neo)
freqs_neo_1dim, coherency_neo_1dim, phase_lag_neo_1dim = \
elephant.spectral.welch_coherence(x_neo_1dim, y_neo_1dim)
self.assertTrue(np.all(freqs_np == freqs_neo))
self.assertTrue(np.all(coherency_np.T == coherency_neo))
self.assertTrue(np.all(phase_lag_np.T == phase_lag_neo))
self.assertTrue(
np.all(coherency_neo_1dim[:, 0] == coherency_neo[:, 0]))
self.assertTrue(
np.all(phase_lag_neo_1dim[:, 0] == phase_lag_neo[:, 0]))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
faf57217de842b0aa4ea07c531cb8ee24e8dec1a
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/HLTriggerOffline/SMP/python/hltSMPPostProcessor_cfi.py
|
9c2388bb06f8689c5988aa0f746027b83a070a4a
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 484
|
py
|
hltSMPPostProcessor_cfi.py
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
hltSMPPostProcessor = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring('HLT/SMP/*'),
verbose = cms.untracked.uint32(2),
outputFileName = cms.untracked.string(''),
resolution = cms.vstring(),
efficiency = cms.vstring(),
efficiencyProfile = cms.untracked.vstring(),
)
|
58ba04be9f174b79256354fc7d4fb62221fac471
|
213b8cab639c7d45cbf6a4fd46eb23e379d9d374
|
/python/nsound_examples/drum.py
|
5952263de3993859017063ce9bf7afbdeaa35c5b
|
[] |
no_license
|
DevDungeon/Cookbook
|
f85b04b690ea0a202ddfaeda6460b6ba5797cb70
|
a49a1c77f2b89dc303fa9f2563bb3c19777e4c6c
|
refs/heads/master
| 2023-05-12T06:58:50.606019
| 2022-03-30T04:48:16
| 2022-03-30T04:48:16
| 34,371,982
| 307
| 94
| null | 2023-05-03T22:53:45
| 2015-04-22T06:02:53
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,215
|
py
|
drum.py
|
###############################################################################
#
# $Id: example3.py 718 2012-04-15 23:59:35Z weegreenblobbie $
#
# Simulates a drum. Based on the Csound drum by Hans Mikelson.
#
# source: http://www.csounds.com/ezine/winter2001/synthesis/
#
###############################################################################
from nsound import *
sr = 44100.0
BITS_PER_SAMPLE = 16
###############################################################################
def drum(
duration,
attack_time,
high_frequency,
low_frequency,
tension,
resident_frequency):
"Simple drum"
sin = Sine(sr)
frequency_sweep = sin.drawLine(attack_time, high_frequency, low_frequency)
frequency_sweep << sin.drawLine(
(duration - attack_time), low_frequency, low_frequency)
hz_20 = sin.generate(duration, resident_frequency)
rezzy = hz_20 * frequency_sweep
parabola = sin.drawParabola(duration, 1.0, duration / 2, 0.25, 0.0)
rezzy *= parabola
temp1 = rezzy * tension
frequency_sweep -= temp1
audio = sin.generate(duration, frequency_sweep)
audio *= sin.drawParabola(duration,1.0, 0.5 * duration, 0.3,0.0);
return audio
###############################################################################
sine = Sine(sr)
bd01 = DrumBD01(sr)
dkb = DrumKickBass(sr, 266, 0.0)
out = AudioStream(sr, 1);
out << bd01.play() \
<< sine.silence(0.25) \
<< dkb.play() \
<< sine.silence(0.25)
# duration, attack, high f, low f, tension, ressonance
out << drum(0.5, 0.012, 160, 51, 0.9, 54) \
<< drum(0.5, 0.012, 160, 51, 0.9, 54) \
<< drum(0.5, 0.012, 160, 51, 0.9, 54) \
<< drum(0.5, 0.012, 160, 51, 0.9, 54) \
<< sine.silence(0.25)
out *= 0.5
hat = Hat(sr)
out << 0.666 * hat.play() << sine.silence(0.25)
out >> "example3.wav"
# ReverberationRoom(sample_rate, room_feedback, wet_percent, dry_percent, low_pass_freq)
room = ReverberationRoom(sr, 0.60, 0.5, 1.0, 100.0)
out2 = 0.5 * room.filter(out)
out2 >> "example3_reverb.wav"
pb = AudioPlayback(sr, 2, 16);
out2 >> pb
|
5f8d7315fc707e78fbc2677cc980e9a5f94e0e2d
|
e25a98b616384794ac59e1b1d18f856d2d4507e1
|
/rplugin/python3/deoplete/sources/rust.py
|
5664f3d9c2581dcb75eadf34f652e15afd2ef46d
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sebastianmarkow/deoplete-rust
|
0ed26c1526b37382485bcbc94b52db921c513717
|
0a86e502113910c33448b337c4d50cabea120d25
|
refs/heads/master
| 2021-07-16T21:55:52.854385
| 2017-07-18T16:20:47
| 2017-07-18T16:20:47
| 63,178,377
| 146
| 14
|
MIT
| 2021-03-04T11:42:28
| 2016-07-12T17:24:29
|
Vim script
|
UTF-8
|
Python
| false
| false
| 2,745
|
py
|
rust.py
|
"""Rust completion via Racer"""
import os
import re
import subprocess
import tempfile
from .base import Base
from deoplete.logger import getLogger
logger = getLogger('rust')
VAR_RACER_BINARY = 'deoplete#sources#rust#racer_binary'
VAR_RUST_SOURCE = 'deoplete#sources#rust#rust_source_path'
VAR_DUPLICATION = 'deoplete#sources#rust#show_duplicates'
class Source(Base):
"""Deoplete Rust source"""
def __init__(self, vim):
Base.__init__(self, vim)
self.name = 'rust'
self.mark = '[Rust]'
self.filetypes = ['rust']
self.input_pattern = r'(\.|::)\w*'
self.rank = 500
self.__racer = self.vim.vars.get(VAR_RACER_BINARY)
self.__dup = self.vim.vars.get(VAR_DUPLICATION)
self.__encoding = self.vim.eval('&encoding')
self.__rust_re = re.compile(r'\w*$|(?<=")[./\-\w]*$')
if 'RUST_SRC_PATH' not in os.environ:
rust_path = self.vim.vars.get(VAR_RUST_SOURCE)
if rust_path:
os.environ['RUST_SRC_PATH'] = rust_path
def get_complete_position(self, ctx):
"""Missing"""
if not self.__check_binary():
return -1
method = self.__rust_re.search(ctx['input'])
return method.start() if method else -1
def gather_candidates(self, ctx):
"""Missing"""
candidates = []
lines = self.__retrieve()
matches = [line[6:] for line in lines if line.startswith('MATCH')]
if not bool(self.__dup):
matches = set(matches)
for match in matches:
tokens = match.split(",")
candidate = {
'word': tokens[0],
'kind': tokens[4],
'menu': tokens[5],
'info': ','.join(tokens[5:]),
'dup': self.__dup,
}
candidates.append(candidate)
return candidates
def __retrieve(self):
"""Missing"""
content = self.vim.current.buffer
line, column = self.vim.current.window.cursor
with tempfile.NamedTemporaryFile(mode='w') as buf:
buf.write("\n".join(content))
buf.flush()
args = [
self.__racer,
'complete',
str(line),
str(column),
content.name,
buf.name
]
results = []
try:
results = subprocess.check_output(args) \
.decode(self.__encoding).splitlines()
except Exception:
pass
return results
def __check_binary(self):
"""Missing"""
return os.path.isfile(self.__racer) and os.environ.get('RUST_SRC_PATH')
|
3584d9f857c4cc6aea67627e6f867a6f99538223
|
b8441dc1987be9e64fa3081d456b2a3060ec44d1
|
/mars/tensor/arithmetic/isreal.py
|
3fc405358e8107636fb00e5632929573304b45bc
|
[
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"CC0-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mars-project/mars
|
f99fefbce999d58a9249bc72046787a9731c9c73
|
c36c53fa22e10ef9477d9c454401a2f281375f31
|
refs/heads/master
| 2023-07-23T00:23:55.133015
| 2023-07-03T11:44:54
| 2023-07-03T11:44:54
| 160,543,708
| 2,704
| 362
|
Apache-2.0
| 2023-09-11T07:57:35
| 2018-12-05T16:04:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
isreal.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..utils import inject_dtype
from .core import TensorUnaryOp
from .utils import arithmetic_operand
@arithmetic_operand(sparse_mode="unary")
class TensorIsReal(TensorUnaryOp):
_op_type_ = OperandDef.ISREAL
_func_name = "isreal"
@inject_dtype(np.bool_)
def isreal(x, **kwargs):
"""
Returns a bool tensor, where True if input element is real.
If element has complex type with zero complex part, the return value
for that element is True.
Parameters
----------
x : array_like
Input tensor.
Returns
-------
out : Tensor, bool
Boolean tensor of same shape as `x`.
See Also
--------
iscomplex
isrealobj : Return True if x is not a complex type.
Examples
--------
>>> import mars.tensor as mt
>>> mt.isreal([1+1j, 1+0j, 4.5, 3, 2, 2j]).execute()
array([False, True, True, True, True, False])
"""
op = TensorIsReal(**kwargs)
return op(x)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.